From 0acc125e1688a83c66542f19519045ee2f6eadf6 Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Fri, 8 Sep 2023 10:28:21 +0200 Subject: [PATCH 001/115] Start GroundTruthStudy refactoring. --- .../comparison/groundtruthstudy.py | 66 ++++++++- .../comparison/tests/test_groundtruthstudy.py | 128 ++++++++++++------ 2 files changed, 152 insertions(+), 42 deletions(-) diff --git a/src/spikeinterface/comparison/groundtruthstudy.py b/src/spikeinterface/comparison/groundtruthstudy.py index 7b146f07bc..12588019ba 100644 --- a/src/spikeinterface/comparison/groundtruthstudy.py +++ b/src/spikeinterface/comparison/groundtruthstudy.py @@ -22,8 +22,72 @@ collect_run_times, ) - class GroundTruthStudy: + """ + This class is an helper function to run any comparison on several "cases" for several ground truth dataset. + + "cases" can be: + * several sorter for comparisons + * same sorter with differents parameters + * parameters of comparisons + * any combination of theses + + For enough flexibility cases key can be a tuple so that we can varify complexity along several + "axis" (paremeters or sorter) + + Ground truth dataset need recording+sorting. This can be from meraec file or from the internal generator + :py:fun:`generate_ground_truth_recording()` + + This GroundTruthStudy have been refactor in version 0.100 to be more flexible than previous versions. + Folders structures are not backward compatible. + + + + """ + def __init__(self, study_folder=None): + # import pandas as pd + + self.study_folder = Path(study_folder) + + # self.computed_names = None + # self.recording_names = None + # self.cases_names = None + + self.datasets = {} + self.cases = {} + + # self.rec_names = None + # self.sorter_names = None + + self.scan_folder() + + # self.comparisons = None + # self.exhaustive_gt = None + + @classmethod + def create(cls, study_folder, datasets={}, cases={}): + pass + + def __repr__(self): + t = f"GroundTruthStudy {self.study_folder.stem} \n" + t += f" recordings: {len(self.rec_names)} {self.rec_names}\n" + if len(self.sorter_names): + t += " cases: {} {}\n".format(len(self.sorter_names), self.sorter_names) + + return t + + def scan_folder(self): + self.rec_names = get_rec_names(self.study_folder) + # scan computed names + self.computed_names = list(iter_computed_names(self.study_folder)) # list of pair (rec_name, sorter_name) + self.sorter_names = np.unique([e for _, e in iter_computed_names(self.study_folder)]).tolist() + self._is_scanned = True + + + + + +class OLDGroundTruthStudy: def __init__(self, study_folder=None): import pandas as pd diff --git a/src/spikeinterface/comparison/tests/test_groundtruthstudy.py b/src/spikeinterface/comparison/tests/test_groundtruthstudy.py index 70f8a63c8c..f28d901075 100644 --- a/src/spikeinterface/comparison/tests/test_groundtruthstudy.py +++ b/src/spikeinterface/comparison/tests/test_groundtruthstudy.py @@ -3,16 +3,18 @@ import pytest from pathlib import Path -from spikeinterface.extractors import toy_example +# from spikeinterface.extractors import toy_example +from spikeinterface import generate_ground_truth_recording +from spikeinterface.preprocessing import bandpass_filter from spikeinterface.sorters import installed_sorters from spikeinterface.comparison import GroundTruthStudy -try: - import tridesclous +# try: +# import tridesclous - HAVE_TDC = True -except ImportError: - HAVE_TDC = False +# HAVE_TDC = True +# except ImportError: +# HAVE_TDC = False if hasattr(pytest, "global_test_folder"): @@ -27,61 +29,105 @@ def setup_module(): if study_folder.is_dir(): shutil.rmtree(study_folder) - _setup_comparison_study() + create_study(study_folder) -def _setup_comparison_study(): - rec0, gt_sorting0 = toy_example(num_channels=4, duration=30, seed=0, num_segments=1) - rec1, gt_sorting1 = toy_example(num_channels=32, duration=30, seed=0, num_segments=1) +def simple_preprocess(rec): + return bandpass_filter(rec) - gt_dict = { + +def create_study(study_folder): + rec0, gt_sorting0 = generate_ground_truth_recording(num_channels=4, durations=[30.], seed=42) + rec1, gt_sorting1 = generate_ground_truth_recording(num_channels=4, durations=[30.], seed=91) + + datasets = { "toy_tetrode": (rec0, gt_sorting0), "toy_probe32": (rec1, gt_sorting1), + "toy_probe32_preprocess": (simple_preprocess(rec1), gt_sorting1), } - study = GroundTruthStudy.create(study_folder, gt_dict) + + # cases can also be generated via simple loops + cases = { + # + ("tdc2", "no-preprocess", "tetrode"): { + "label": "tridesclous2 without preprocessing and standard params", + "dataset": "toy_tetrode", + "run_sorter_params": { + + }, + "comparison_params": { + + }, + }, + # + ("tdc2", "with-preprocess", "probe32"): { + "label": "tridesclous2 with preprocessing standar params", + "dataset": "toy_probe32_preprocess", + "run_sorter_params": { + + }, + "comparison_params": { + + }, + }, + # + ("sc2", "no-preprocess", "tetrode"): { + "label": "spykingcircus2 without preprocessing standar params", + "dataset": "toy_tetrode", + "run_sorter_params": { + + }, + "comparison_params": { + + }, + }, + } + + study = GroundTruthStudy.create(study_folder, datasets=datasets, cases=cases) + print(study) -@pytest.mark.skipif(not HAVE_TDC, reason="Test requires Python package 'tridesclous'") -def test_run_study_sorters(): - study = GroundTruthStudy(study_folder) - sorter_list = [ - "tridesclous", - ] - print( - f"\n#################################\nINSTALLED SORTERS\n#################################\n" - f"{installed_sorters()}" - ) - study.run_sorters(sorter_list) +# @pytest.mark.skipif(not HAVE_TDC, reason="Test requires Python package 'tridesclous'") +# def test_run_study_sorters(): +# study = GroundTruthStudy(study_folder) +# sorter_list = [ +# "tridesclous", +# ] +# print( +# f"\n#################################\nINSTALLED SORTERS\n#################################\n" +# f"{installed_sorters()}" +# ) +# study.run_sorters(sorter_list) -@pytest.mark.skipif(not HAVE_TDC, reason="Test requires Python package 'tridesclous'") -def test_extract_sortings(): - study = GroundTruthStudy(study_folder) +# @pytest.mark.skipif(not HAVE_TDC, reason="Test requires Python package 'tridesclous'") +# def test_extract_sortings(): +# study = GroundTruthStudy(study_folder) - study.copy_sortings() +# study.copy_sortings() - for rec_name in study.rec_names: - gt_sorting = study.get_ground_truth(rec_name) +# for rec_name in study.rec_names: +# gt_sorting = study.get_ground_truth(rec_name) - for rec_name in study.rec_names: - metrics = study.get_metrics(rec_name=rec_name) +# for rec_name in study.rec_names: +# metrics = study.get_metrics(rec_name=rec_name) - snr = study.get_units_snr(rec_name=rec_name) +# snr = study.get_units_snr(rec_name=rec_name) - study.copy_sortings() +# study.copy_sortings() - run_times = study.aggregate_run_times() +# run_times = study.aggregate_run_times() - study.run_comparisons(exhaustive_gt=True) +# study.run_comparisons(exhaustive_gt=True) - perf = study.aggregate_performance_by_unit() +# perf = study.aggregate_performance_by_unit() - count_units = study.aggregate_count_units() - dataframes = study.aggregate_dataframes() - print(dataframes) +# count_units = study.aggregate_count_units() +# dataframes = study.aggregate_dataframes() +# print(dataframes) if __name__ == "__main__": - # setup_module() + setup_module() # test_run_study_sorters() - test_extract_sortings() + # test_extract_sortings() From 462961ff8321c1a060705f27005f38dfd6ef3a66 Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Fri, 8 Sep 2023 13:44:05 +0200 Subject: [PATCH 002/115] new GroundTruthStudy wip --- .../comparison/groundtruthstudy.py | 153 +++++++++++++++--- .../comparison/tests/test_groundtruthstudy.py | 23 ++- 2 files changed, 146 insertions(+), 30 deletions(-) diff --git a/src/spikeinterface/comparison/groundtruthstudy.py b/src/spikeinterface/comparison/groundtruthstudy.py index 12588019ba..fc4de5a18d 100644 --- a/src/spikeinterface/comparison/groundtruthstudy.py +++ b/src/spikeinterface/comparison/groundtruthstudy.py @@ -1,26 +1,32 @@ from pathlib import Path import shutil +import json +import pickle import numpy as np from spikeinterface.core import load_extractor -from spikeinterface.extractors import NpzSortingExtractor -from spikeinterface.sorters import sorter_dict, run_sorters +from spikeinterface.core.core_tools import SIJsonEncoder + +from spikeinterface.sorters import run_sorter_jobs, read_sorter_folder from spikeinterface import WaveformExtractor from spikeinterface.qualitymetrics import compute_quality_metrics from .paircomparisons import compare_sorter_to_ground_truth -from .studytools import ( - setup_comparison_study, - get_rec_names, - get_recordings, - iter_working_folder, - iter_computed_names, - iter_computed_sorting, - collect_run_times, -) +# from .studytools import ( +# setup_comparison_study, +# get_rec_names, +# get_recordings, +# iter_working_folder, +# iter_computed_names, +# iter_computed_sorting, +# collect_run_times, +# ) + + +_key_separator = " ## " class GroundTruthStudy: """ @@ -44,10 +50,10 @@ class GroundTruthStudy: """ - def __init__(self, study_folder=None): + def __init__(self, study_folder): # import pandas as pd - self.study_folder = Path(study_folder) + self.folder = Path(study_folder) # self.computed_names = None # self.recording_names = None @@ -66,22 +72,121 @@ def __init__(self, study_folder=None): @classmethod def create(cls, study_folder, datasets={}, cases={}): - pass + study_folder = Path(study_folder) + study_folder.mkdir(exist_ok=False, parents=True) + + (study_folder / "datasets").mkdir() + (study_folder / "datasets/recordings").mkdir() + (study_folder / "datasets/gt_sortings").mkdir() + (study_folder / "sorters").mkdir() + (study_folder / "sortings").mkdir() + + for key, (rec, gt_sorting) in datasets.items(): + assert "/" not in key + assert "\\" not in key + + # rec are pickle + rec.dump_to_pickle(study_folder / f"datasets/recordings/{key}.pickle") + + # sorting are pickle + saved as NumpyFolderSorting + gt_sorting.dump_to_pickle(study_folder / f"datasets/gt_sortings/{key}.pickle") + gt_sorting.save(format="numpy_folder", folder=study_folder / f"datasets/gt_sortings/{key}") + + + # (study_folder / "cases.jon").write_text( + # json.dumps(cases, indent=4, cls=SIJsonEncoder), + # encoding="utf8", + # ) + # cases is dump to a pickle file, json is not possible because of tuple key + (study_folder / "cases.pickle").write_bytes(pickle.dumps(cases)) + + return cls(study_folder) + + + def scan_folder(self): + if not (self.folder / "datasets").exists(): + raise ValueError(f"This is folder is not a {self.folder} GroundTruthStudy") + + for rec_file in (self.folder / "datasets/recordings").glob("*.pickle"): + key = rec_file.stem + rec = load_extractor(rec_file) + gt_sorting = load_extractor(self.folder / f"datasets/gt_sortings/{key}") + self.datasets[key] = (rec, gt_sorting) + + with open(self.folder / "cases.pickle", "rb") as f: + self.cases = pickle.load(f) def __repr__(self): - t = f"GroundTruthStudy {self.study_folder.stem} \n" - t += f" recordings: {len(self.rec_names)} {self.rec_names}\n" - if len(self.sorter_names): - t += " cases: {} {}\n".format(len(self.sorter_names), self.sorter_names) + t = f"GroundTruthStudy {self.folder.stem} \n" + t += f" datasets: {len(self.datasets)} {list(self.datasets.keys())}\n" + t += f" cases: {len(self.cases)} {list(self.cases.keys())}\n" return t - def scan_folder(self): - self.rec_names = get_rec_names(self.study_folder) - # scan computed names - self.computed_names = list(iter_computed_names(self.study_folder)) # list of pair (rec_name, sorter_name) - self.sorter_names = np.unique([e for _, e in iter_computed_names(self.study_folder)]).tolist() - self._is_scanned = True + def key_to_str(self, key): + if isinstance(key, str): + return key + elif isinstance(key, tuple): + return _key_separator.join(key) + else: + raise ValueError("Keys for cases must str or tuple") + + def run_sorters(self, case_keys=None, engine='loop', engine_kwargs={}, keep=True, verbose=False): + """ + + """ + if case_keys is None: + case_keys = self.cases.keys() + + job_list = [] + for key in case_keys: + sorting_folder = self.folder / "sortings" / self.key_to_str(key) + sorting_exists = sorting_folder.exists() + + sorter_folder = self.folder / "sorters" / self.key_to_str(key) + sorter_folder_exists = sorting_folder.exists() + + if keep: + if sorting_exists: + continue + if sorter_folder_exists: + # the sorter folder exists but havent been copied to sortings folder + sorting = read_sorter_folder(sorter_folder, raise_error=False) + if sorting is not None: + # save and skip + sorting.save(format="numpy_folder", folder=sorting_folder) + continue + + params = self.cases[key]["run_sorter_params"].copy() + # this ensure that sorter_name is given + recording, _ = self.datasets[self.cases[key]["dataset"]] + sorter_name = params.pop("sorter_name") + job = dict(sorter_name=sorter_name, + recording=recording, + output_folder=sorter_folder) + job.update(params) + job_list.append(job) + + run_sorter_jobs(job_list, engine=engine, engine_kwargs=engine_kwargs, return_output=False) + + # TODO create a list in laucher for engine blocking and non-blocking + if engine not in ("slurm", ): + self.copy_sortings(case_keys) + + def copy_sortings(self, case_keys=None): + if case_keys is None: + case_keys = self.cases.keys() + + for key in case_keys: + sorting_folder = self.folder / "sortings" / self.key_to_str(key) + sorter_folder = self.folder / "sorters" / self.key_to_str(key) + + sorting = read_sorter_folder(sorter_folder, raise_error=False) + if sorting is not None: + sorting.save(format="numpy_folder", folder=sorting_folder) + + def run_comparisons(self): + pass diff --git a/src/spikeinterface/comparison/tests/test_groundtruthstudy.py b/src/spikeinterface/comparison/tests/test_groundtruthstudy.py index f28d901075..15ba7db2ab 100644 --- a/src/spikeinterface/comparison/tests/test_groundtruthstudy.py +++ b/src/spikeinterface/comparison/tests/test_groundtruthstudy.py @@ -25,18 +25,19 @@ study_folder = cache_folder / "test_groundtruthstudy/" +print(study_folder.absolute()) def setup_module(): if study_folder.is_dir(): shutil.rmtree(study_folder) - create_study(study_folder) + create_a_study(study_folder) def simple_preprocess(rec): return bandpass_filter(rec) -def create_study(study_folder): +def create_a_study(study_folder): rec0, gt_sorting0 = generate_ground_truth_recording(num_channels=4, durations=[30.], seed=42) rec1, gt_sorting1 = generate_ground_truth_recording(num_channels=4, durations=[30.], seed=91) @@ -53,7 +54,7 @@ def create_study(study_folder): "label": "tridesclous2 without preprocessing and standard params", "dataset": "toy_tetrode", "run_sorter_params": { - + "sorter_name": "tridesclous2", }, "comparison_params": { @@ -64,7 +65,7 @@ def create_study(study_folder): "label": "tridesclous2 with preprocessing standar params", "dataset": "toy_probe32_preprocess", "run_sorter_params": { - + "sorter_name": "tridesclous2", }, "comparison_params": { @@ -75,7 +76,7 @@ def create_study(study_folder): "label": "spykingcircus2 without preprocessing standar params", "dataset": "toy_tetrode", "run_sorter_params": { - + "sorter_name": "spykingcircus2", }, "comparison_params": { @@ -87,6 +88,13 @@ def create_study(study_folder): print(study) + +def test_GroundTruthStudy(): + study = GroundTruthStudy(study_folder) + print(study) + + study.run_sorters(verbose=True) + # @pytest.mark.skipif(not HAVE_TDC, reason="Test requires Python package 'tridesclous'") # def test_run_study_sorters(): # study = GroundTruthStudy(study_folder) @@ -128,6 +136,9 @@ def create_study(study_folder): if __name__ == "__main__": - setup_module() + # setup_module() + test_GroundTruthStudy() + + # test_run_study_sorters() # test_extract_sortings() From e0af88dbae3593a62372706ed842cde3b1736464 Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Fri, 8 Sep 2023 20:32:11 +0200 Subject: [PATCH 003/115] Make internal sorters able to be run with none dumpable to json recording. --- src/spikeinterface/comparison/groundtruthstudy.py | 2 +- .../comparison/tests/test_groundtruthstudy.py | 4 ++-- src/spikeinterface/core/base.py | 6 ++++-- src/spikeinterface/sorters/internal/si_based.py | 14 +++++++++++--- .../sorters/internal/spyking_circus2.py | 4 +--- .../sorters/internal/tridesclous2.py | 4 +--- 6 files changed, 20 insertions(+), 14 deletions(-) diff --git a/src/spikeinterface/comparison/groundtruthstudy.py b/src/spikeinterface/comparison/groundtruthstudy.py index fc4de5a18d..2eeb697980 100644 --- a/src/spikeinterface/comparison/groundtruthstudy.py +++ b/src/spikeinterface/comparison/groundtruthstudy.py @@ -105,7 +105,7 @@ def create(cls, study_folder, datasets={}, cases={}): def scan_folder(self): if not (self.folder / "datasets").exists(): - raise ValueError(f"This is folder is not a {self.folder} GroundTruthStudy") + raise ValueError(f"This is folder is not a GroundTruthStudy : {self.folder.absolute()}") for rec_file in (self.folder / "datasets/recordings").glob("*.pickle"): key = rec_file.stem diff --git a/src/spikeinterface/comparison/tests/test_groundtruthstudy.py b/src/spikeinterface/comparison/tests/test_groundtruthstudy.py index 15ba7db2ab..169c5a12bb 100644 --- a/src/spikeinterface/comparison/tests/test_groundtruthstudy.py +++ b/src/spikeinterface/comparison/tests/test_groundtruthstudy.py @@ -85,7 +85,7 @@ def create_a_study(study_folder): } study = GroundTruthStudy.create(study_folder, datasets=datasets, cases=cases) - print(study) + # print(study) @@ -136,7 +136,7 @@ def test_GroundTruthStudy(): if __name__ == "__main__": - # setup_module() + setup_module() test_GroundTruthStudy() diff --git a/src/spikeinterface/core/base.py b/src/spikeinterface/core/base.py index 87c0805630..4f6043f16e 100644 --- a/src/spikeinterface/core/base.py +++ b/src/spikeinterface/core/base.py @@ -425,14 +425,15 @@ def from_dict(dictionary: dict, base_folder: Optional[Union[Path, str]] = None) extractor: RecordingExtractor or SortingExtractor The loaded extractor object """ - if dictionary["relative_paths"]: + # for pickle dump relative_path was not in the dict, this ensure compatibility + if dictionary.get("relative_paths", False): assert base_folder is not None, "When relative_paths=True, need to provide base_folder" dictionary = _make_paths_absolute(dictionary, base_folder) extractor = _load_extractor_from_dict(dictionary) folder_metadata = dictionary.get("folder_metadata", None) if folder_metadata is not None: folder_metadata = Path(folder_metadata) - if dictionary["relative_paths"]: + if dictionary.get("relative_paths", False): folder_metadata = base_folder / folder_metadata extractor.load_metadata_from_folder(folder_metadata) return extractor @@ -622,6 +623,7 @@ def dump_to_pickle( include_annotations=True, include_properties=include_properties, folder_metadata=folder_metadata, + relative_to=None, recursive=False, ) file_path = self._get_file_path(file_path, [".pkl", ".pickle"]) diff --git a/src/spikeinterface/sorters/internal/si_based.py b/src/spikeinterface/sorters/internal/si_based.py index 1496ffbbd1..ee5dcbea0d 100644 --- a/src/spikeinterface/sorters/internal/si_based.py +++ b/src/spikeinterface/sorters/internal/si_based.py @@ -1,4 +1,4 @@ -from spikeinterface.core import load_extractor +from spikeinterface.core import load_extractor, NumpyRecording from spikeinterface.sorters import BaseSorter @@ -14,8 +14,16 @@ def is_installed(cls): @classmethod def _setup_recording(cls, recording, output_folder, params, verbose): - # nothing to do here because the spikeinterface_recording.json is here anyway - pass + # Some recording not json serializable but they can be saved to pickle + # * NoiseGeneratorRecording or InjectTemplatesRecording: we force a pickle because this is light + # * for NumpyRecording (this is a bit crazy because it flush the entire buffer!!) + if recording.check_if_dumpable() and not isinstance(recording, NumpyRecording): + rec_file = output_folder.parent / "spikeinterface_recording.pickle" + recording.dump_to_pickle(rec_file) + # TODO (hard) : find a solution for NumpyRecording without any dump + # this will need an internal API change I think + # because the run_sorter is from the "folder" (because of container mainly and also many other reasons) + # and not from the recording itself @classmethod def _get_result_from_folder(cls, output_folder): diff --git a/src/spikeinterface/sorters/internal/spyking_circus2.py b/src/spikeinterface/sorters/internal/spyking_circus2.py index 9de2762562..72171cd5b5 100644 --- a/src/spikeinterface/sorters/internal/spyking_circus2.py +++ b/src/spikeinterface/sorters/internal/spyking_circus2.py @@ -54,9 +54,7 @@ def _run_from_folder(cls, sorter_output_folder, params, verbose): job_kwargs["verbose"] = verbose job_kwargs["progress_bar"] = verbose - recording = load_extractor( - sorter_output_folder.parent / "spikeinterface_recording.json", base_folder=sorter_output_folder.parent - ) + recording = load_extractor(sorter_output_folder.parent / "spikeinterface_recording.pickle") sampling_rate = recording.get_sampling_frequency() num_channels = recording.get_num_channels() diff --git a/src/spikeinterface/sorters/internal/tridesclous2.py b/src/spikeinterface/sorters/internal/tridesclous2.py index 42f51d3a77..7cbf01cf68 100644 --- a/src/spikeinterface/sorters/internal/tridesclous2.py +++ b/src/spikeinterface/sorters/internal/tridesclous2.py @@ -49,9 +49,7 @@ def _run_from_folder(cls, sorter_output_folder, params, verbose): import hdbscan - recording_raw = load_extractor( - sorter_output_folder.parent / "spikeinterface_recording.json", base_folder=sorter_output_folder.parent - ) + recording_raw = load_extractor(sorter_output_folder.parent / "spikeinterface_recording.pickle") num_chans = recording_raw.get_num_channels() sampling_frequency = recording_raw.get_sampling_frequency() From 9905bf59fc4447e5f80bbf5acadb71f692337982 Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Fri, 8 Sep 2023 21:24:24 +0200 Subject: [PATCH 004/115] wip --- src/spikeinterface/comparison/groundtruthstudy.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/spikeinterface/comparison/groundtruthstudy.py b/src/spikeinterface/comparison/groundtruthstudy.py index 2eeb697980..d760703ea1 100644 --- a/src/spikeinterface/comparison/groundtruthstudy.py +++ b/src/spikeinterface/comparison/groundtruthstudy.py @@ -165,6 +165,8 @@ def run_sorters(self, case_keys=None, engine='loop', engine_kwargs={}, keep=True recording=recording, output_folder=sorter_folder) job.update(params) + # the verbose is overwritten and global to all run_sorters + job["verbose"] = verbose job_list.append(job) run_sorter_jobs(job_list, engine=engine, engine_kwargs=engine_kwargs, return_output=False) From 98fa0f81b280ef79c691444d0d3999abb2c9a160 Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Sat, 9 Sep 2023 08:57:29 +0200 Subject: [PATCH 005/115] gt_study wip --- .../comparison/groundtruthstudy.py | 59 ++++++++++++++----- .../comparison/tests/test_groundtruthstudy.py | 12 +++- 2 files changed, 53 insertions(+), 18 deletions(-) diff --git a/src/spikeinterface/comparison/groundtruthstudy.py b/src/spikeinterface/comparison/groundtruthstudy.py index d760703ea1..3debced277 100644 --- a/src/spikeinterface/comparison/groundtruthstudy.py +++ b/src/spikeinterface/comparison/groundtruthstudy.py @@ -5,7 +5,7 @@ import numpy as np -from spikeinterface.core import load_extractor +from spikeinterface.core import load_extractor, extract_waveforms from spikeinterface.core.core_tools import SIJsonEncoder from spikeinterface.sorters import run_sorter_jobs, read_sorter_folder @@ -13,7 +13,7 @@ from spikeinterface import WaveformExtractor from spikeinterface.qualitymetrics import compute_quality_metrics -from .paircomparisons import compare_sorter_to_ground_truth +from .paircomparisons import compare_sorter_to_ground_truth, GroundTruthComparison # from .studytools import ( # setup_comparison_study, @@ -51,25 +51,15 @@ class GroundTruthStudy: """ def __init__(self, study_folder): - # import pandas as pd - self.folder = Path(study_folder) - # self.computed_names = None - # self.recording_names = None - # self.cases_names = None - self.datasets = {} self.cases = {} - - # self.rec_names = None - # self.sorter_names = None + self.sortings = {} + self.comparisons = {} self.scan_folder() - # self.comparisons = None - # self.exhaustive_gt = None - @classmethod def create(cls, study_folder, datasets={}, cases={}): study_folder = Path(study_folder) @@ -116,10 +106,26 @@ def scan_folder(self): with open(self.folder / "cases.pickle", "rb") as f: self.cases = pickle.load(f) + self.comparisons = {k: None for k in self.cases} + + self.sortings = {} + for key in self.cases: + sorting_folder = self.folder / "sortings" / self.key_to_str(key) + print(sorting_folder) + print(sorting_folder.is_dir()) + if sorting_folder.exists(): + sorting = load_extractor(sorting_folder) + else: + sorting = None + self.sortings[key] = sorting + + def __repr__(self): t = f"GroundTruthStudy {self.folder.stem} \n" t += f" datasets: {len(self.datasets)} {list(self.datasets.keys())}\n" t += f" cases: {len(self.cases)} {list(self.cases.keys())}\n" + num_computed = sum([1 for sorting in self.sortings.values() if sorting is not None]) + t += f" computed: {num_computed}\n" return t @@ -187,10 +193,31 @@ def copy_sortings(self, case_keys=None): if sorting is not None: sorting.save(format="numpy_folder", folder=sorting_folder) - def run_comparisons(self): - pass + def run_comparisons(self, case_keys=None, comparison_class=GroundTruthComparison, **kwargs): + if case_keys is None: + case_keys = self.cases.keys() + + for key in case_keys: + dataset_key = self.cases[key]["dataset"] + _, gt_sorting = self.datasets[dataset_key] + sorting = self.sortings[key] + comp = comparison_class(gt_sorting, sorting, **kwargs) + self.comparisons[key] = comp + def extract_waveforms_gt(self, case_keys=None, **extract_kwargs): + + if case_keys is None: + case_keys = self.cases.keys() + + base_folder = self.folder / "waveforms" + base_folder.mkdir(exist_ok=True) + + for key in case_keys: + dataset_key = self.cases[key]["dataset"] + recording, gt_sorting = self.datasets[dataset_key] + wf_folder = base_folder / self.key_to_str(key) + we = extract_waveforms(recording, gt_sorting, folder=wf_folder) diff --git a/src/spikeinterface/comparison/tests/test_groundtruthstudy.py b/src/spikeinterface/comparison/tests/test_groundtruthstudy.py index 169c5a12bb..9aaa742184 100644 --- a/src/spikeinterface/comparison/tests/test_groundtruthstudy.py +++ b/src/spikeinterface/comparison/tests/test_groundtruthstudy.py @@ -93,7 +93,15 @@ def test_GroundTruthStudy(): study = GroundTruthStudy(study_folder) print(study) - study.run_sorters(verbose=True) + # study.run_sorters(verbose=True) + + # print(study.sortings) + + # print(study.comparisons) + # study.run_comparisons() + # print(study.comparisons) + + study.extract_waveforms_gt(n_jobs=-1) # @pytest.mark.skipif(not HAVE_TDC, reason="Test requires Python package 'tridesclous'") # def test_run_study_sorters(): @@ -136,7 +144,7 @@ def test_GroundTruthStudy(): if __name__ == "__main__": - setup_module() + # setup_module() test_GroundTruthStudy() From f0940a5265d9f1db235dc4db66af15e0b513fc51 Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Sat, 9 Sep 2023 18:19:18 +0200 Subject: [PATCH 006/115] gt study wip --- .../comparison/groundtruthstudy.py | 200 +++++++++++++++++- .../comparison/tests/test_groundtruthstudy.py | 48 +++-- 2 files changed, 224 insertions(+), 24 deletions(-) diff --git a/src/spikeinterface/comparison/groundtruthstudy.py b/src/spikeinterface/comparison/groundtruthstudy.py index 3debced277..9eb771b71a 100644 --- a/src/spikeinterface/comparison/groundtruthstudy.py +++ b/src/spikeinterface/comparison/groundtruthstudy.py @@ -1,11 +1,12 @@ from pathlib import Path import shutil +import os import json import pickle import numpy as np -from spikeinterface.core import load_extractor, extract_waveforms +from spikeinterface.core import load_extractor, extract_waveforms, load_waveforms from spikeinterface.core.core_tools import SIJsonEncoder from spikeinterface.sorters import run_sorter_jobs, read_sorter_folder @@ -26,7 +27,16 @@ # ) +# TODO : save comparison in folders +# TODO : find a way to set level names + + + +# This is to separate names when the key are tuples when saving folders _key_separator = " ## " +# This would be more funny +# _key_separator = " (°_°) " + class GroundTruthStudy: """ @@ -70,6 +80,10 @@ def create(cls, study_folder, datasets={}, cases={}): (study_folder / "datasets/gt_sortings").mkdir() (study_folder / "sorters").mkdir() (study_folder / "sortings").mkdir() + (study_folder / "sortings" / "run_logs").mkdir() + (study_folder / "metrics").mkdir() + + for key, (rec, gt_sorting) in datasets.items(): assert "/" not in key @@ -111,8 +125,6 @@ def scan_folder(self): self.sortings = {} for key in self.cases: sorting_folder = self.folder / "sortings" / self.key_to_str(key) - print(sorting_folder) - print(sorting_folder.is_dir()) if sorting_folder.exists(): sorting = load_extractor(sorting_folder) else: @@ -160,9 +172,13 @@ def run_sorters(self, case_keys=None, engine='loop', engine_kwargs={}, keep=True sorting = read_sorter_folder(sorter_folder, raise_error=False) if sorting is not None: # save and skip - sorting.save(format="numpy_folder", folder=sorting_folder) + self.copy_sortings(case_keys=[key]) continue - + + if sorting_exists: + # TODO : delete sorting + log + pass + params = self.cases[key]["run_sorter_params"].copy() # this ensure that sorter_name is given recording, _ = self.datasets[self.cases[key]["dataset"]] @@ -181,17 +197,29 @@ def run_sorters(self, case_keys=None, engine='loop', engine_kwargs={}, keep=True if engine not in ("slurm", ): self.copy_sortings(case_keys) - def copy_sortings(self, case_keys=None): + def copy_sortings(self, case_keys=None, force=True): if case_keys is None: case_keys = self.cases.keys() for key in case_keys: sorting_folder = self.folder / "sortings" / self.key_to_str(key) sorter_folder = self.folder / "sorters" / self.key_to_str(key) + log_file = self.folder / "sortings" / "run_logs" / f"{self.key_to_str(key)}.json" sorting = read_sorter_folder(sorter_folder, raise_error=False) if sorting is not None: - sorting.save(format="numpy_folder", folder=sorting_folder) + if sorting_folder.exists(): + if force: + # TODO delete folder + log + shutil.rmtree(sorting_folder) + else: + continue + + sorting = sorting.save(format="numpy_folder", folder=sorting_folder) + self.sortings[key] = sorting + + # copy logs + shutil.copyfile(sorter_folder / "spikeinterface_log.json", log_file) def run_comparisons(self, case_keys=None, comparison_class=GroundTruthComparison, **kwargs): @@ -202,9 +230,29 @@ def run_comparisons(self, case_keys=None, comparison_class=GroundTruthComparison dataset_key = self.cases[key]["dataset"] _, gt_sorting = self.datasets[dataset_key] sorting = self.sortings[key] + if sorting is None: + self.comparisons[key] = None + continue comp = comparison_class(gt_sorting, sorting, **kwargs) self.comparisons[key] = comp + def get_run_times(self, case_keys=None): + import pandas as pd + if case_keys is None: + case_keys = self.cases.keys() + + log_folder = self.folder / "sortings" / "run_logs" + + run_times = {} + for key in case_keys: + log_file = log_folder / f"{self.key_to_str(key)}.json" + with open(log_file, mode="r") as logfile: + log = json.load(logfile) + run_time = log.get("run_time", None) + run_times[key] = run_time + + return pd.Series(run_times, name="run_time") + def extract_waveforms_gt(self, case_keys=None, **extract_kwargs): if case_keys is None: @@ -219,6 +267,144 @@ def extract_waveforms_gt(self, case_keys=None, **extract_kwargs): wf_folder = base_folder / self.key_to_str(key) we = extract_waveforms(recording, gt_sorting, folder=wf_folder) + def get_waveform_extractor(self, key): + # some recording are not dumpable to json and the waveforms extactor need it! + # so we load it with and put after + we = load_waveforms(self.folder / "waveforms" / self.key_to_str(key), with_recording=False) + dataset_key = self.cases[key]["dataset"] + recording, _ = self.datasets[dataset_key] + we.set_recording(recording) + return we + + def get_templates(self, key, mode="mean"): + we = self.get_waveform_extractor(key) + templates = we.get_all_templates(mode=mode) + return templates + + def compute_metrics(self, case_keys=None, metric_names=["snr", "firing_rate"], force=False): + if case_keys is None: + case_keys = self.cases.keys() + + for key in case_keys: + filename = self.folder / "metrics" / f"{self.key_to_str(key)}.txt" + if filename.exists(): + if force: + os.remove(filename) + else: + continue + + we = self.get_waveform_extractor(key) + metrics = compute_quality_metrics(we, metric_names=metric_names) + metrics.to_csv(filename, sep="\t", index=True) + + def get_metrics(self, key): + import pandas as pd + filename = self.folder / "metrics" / f"{self.key_to_str(key)}.txt" + if not filename.exists(): + return + metrics = pd.read_csv(filename, sep="\t", index_col=0) + dataset_key = self.cases[key]["dataset"] + recording, gt_sorting = self.datasets[dataset_key] + metrics.index = gt_sorting.unit_ids + return metrics + + def get_units_snr(self, key): + """ + """ + return self.get_metrics(key)["snr"] + + def aggregate_performance_by_unit(self, case_keys=None): + + import pandas as pd + + if case_keys is None: + case_keys = self.cases.keys() + + perf_by_unit = [] + for key in case_keys: + comp = self.comparisons.get(key, None) + assert comp is not None, "You need to do study.run_comparisons() first" + + perf = comp.get_performance(method="by_unit", output="pandas") + if isinstance(key, str): + cols = ["level0"] + perf["level0"] = key + + elif isinstance(key, tuple): + cols = [f'level{i}' for i in range(len(key))] + for col, k in zip(cols, key): + perf[col] = k + + perf = perf.reset_index() + perf_by_unit.append(perf) + + + + perf_by_unit = pd.concat(perf_by_unit) + perf_by_unit = perf_by_unit.set_index(cols) + + return perf_by_unit + + # def aggregate_count_units(self, well_detected_score=None, redundant_score=None, overmerged_score=None): + + def aggregate_count_units( + self, case_keys=None, well_detected_score=None, redundant_score=None, overmerged_score=None + ): + + import pandas as pd + + if case_keys is None: + case_keys = self.cases.keys() + + perf_by_unit = [] + for key in case_keys: + comp = self.comparisons.get(key, None) + assert comp is not None, "You need to do study.run_comparisons() first" + + + + # assert self.comparisons is not None, "run_comparisons first" + + # import pandas as pd + + # index = pd.MultiIndex.from_tuples(self.computed_names, names=["rec_name", "sorter_name"]) + + # count_units = pd.DataFrame( + # index=index, + # columns=["num_gt", "num_sorter", "num_well_detected", "num_redundant", "num_overmerged"], + # dtype=int, + # ) + + # if self.exhaustive_gt: + # count_units["num_false_positive"] = pd.Series(dtype=int) + # count_units["num_bad"] = pd.Series(dtype=int) + + # for rec_name, sorter_name, sorting in iter_computed_sorting(self.study_folder): + # gt_sorting = self.get_ground_truth(rec_name) + # comp = self.comparisons[(rec_name, sorter_name)] + + # count_units.loc[(rec_name, sorter_name), "num_gt"] = len(gt_sorting.get_unit_ids()) + # count_units.loc[(rec_name, sorter_name), "num_sorter"] = len(sorting.get_unit_ids()) + # count_units.loc[(rec_name, sorter_name), "num_well_detected"] = comp.count_well_detected_units( + # well_detected_score + # ) + # if self.exhaustive_gt: + # count_units.loc[(rec_name, sorter_name), "num_overmerged"] = comp.count_overmerged_units( + # overmerged_score + # ) + # count_units.loc[(rec_name, sorter_name), "num_redundant"] = comp.count_redundant_units(redundant_score) + # count_units.loc[(rec_name, sorter_name), "num_false_positive"] = comp.count_false_positive_units( + # redundant_score + # ) + # count_units.loc[(rec_name, sorter_name), "num_bad"] = comp.count_bad_units() + + # return count_units + + + + + + class OLDGroundTruthStudy: diff --git a/src/spikeinterface/comparison/tests/test_groundtruthstudy.py b/src/spikeinterface/comparison/tests/test_groundtruthstudy.py index 9aaa742184..3593b0b05f 100644 --- a/src/spikeinterface/comparison/tests/test_groundtruthstudy.py +++ b/src/spikeinterface/comparison/tests/test_groundtruthstudy.py @@ -71,17 +71,17 @@ def create_a_study(study_folder): }, }, - # - ("sc2", "no-preprocess", "tetrode"): { - "label": "spykingcircus2 without preprocessing standar params", - "dataset": "toy_tetrode", - "run_sorter_params": { - "sorter_name": "spykingcircus2", - }, - "comparison_params": { - - }, - }, + # we comment this at the moement because SC2 is quite slow for testing + # ("sc2", "no-preprocess", "tetrode"): { + # "label": "spykingcircus2 without preprocessing standar params", + # "dataset": "toy_tetrode", + # "run_sorter_params": { + # "sorter_name": "spykingcircus2", + # }, + # "comparison_params": { + + # }, + # }, } study = GroundTruthStudy.create(study_folder, datasets=datasets, cases=cases) @@ -93,16 +93,30 @@ def test_GroundTruthStudy(): study = GroundTruthStudy(study_folder) print(study) - # study.run_sorters(verbose=True) + study.run_sorters(verbose=True) - # print(study.sortings) + print(study.sortings) - # print(study.comparisons) - # study.run_comparisons() - # print(study.comparisons) + print(study.comparisons) + study.run_comparisons() + print(study.comparisons) study.extract_waveforms_gt(n_jobs=-1) + study.compute_metrics() + + for key in study.cases: + metrics = study.get_metrics(key) + print(metrics) + + study.aggregate_performance_by_unit() + + +# perf = study.aggregate_performance_by_unit() +# count_units = study.aggregate_count_units() + + + # @pytest.mark.skipif(not HAVE_TDC, reason="Test requires Python package 'tridesclous'") # def test_run_study_sorters(): # study = GroundTruthStudy(study_folder) @@ -144,7 +158,7 @@ def test_GroundTruthStudy(): if __name__ == "__main__": - # setup_module() + setup_module() test_GroundTruthStudy() From b0267dcd72b69c0c1982d57200381c9ab6c1ec0f Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Sun, 10 Sep 2023 21:45:40 +0200 Subject: [PATCH 007/115] Add levels concept in GTStudy --- .../comparison/groundtruthstudy.py | 83 ++++++++++++++++--- .../comparison/tests/test_groundtruthstudy.py | 3 +- 2 files changed, 74 insertions(+), 12 deletions(-) diff --git a/src/spikeinterface/comparison/groundtruthstudy.py b/src/spikeinterface/comparison/groundtruthstudy.py index 9eb771b71a..76c019f6b9 100644 --- a/src/spikeinterface/comparison/groundtruthstudy.py +++ b/src/spikeinterface/comparison/groundtruthstudy.py @@ -71,7 +71,29 @@ def __init__(self, study_folder): self.scan_folder() @classmethod - def create(cls, study_folder, datasets={}, cases={}): + def create(cls, study_folder, datasets={}, cases={}, levels=None): + + # check that cases keys are homogeneous + key0 = list(cases.keys())[0] + if isinstance(key0, str): + assert all(isinstance(key, str) for key in cases.keys()), "Keys for cases are not homogeneous" + if levels is None: + levels = "level0" + else: + assert isinstance(levels, str) + elif isinstance(key0, tuple): + assert all(isinstance(key, tuple) for key in cases.keys()), "Keys for cases are not homogeneous" + num_levels = len(key0) + assert all(len(key) == num_levels for key in cases.keys()), "Keys for cases are not homogeneous, tuple negth differ" + if levels is None: + levels = [f"level{i}" for i in range(num_levels)] + else: + levels = list(levels) + assert len(levels) == num_levels + else: + raise ValueError("Keys for cases must str or tuple") + + study_folder = Path(study_folder) study_folder.mkdir(exist_ok=False, parents=True) @@ -97,6 +119,10 @@ def create(cls, study_folder, datasets={}, cases={}): gt_sorting.save(format="numpy_folder", folder=study_folder / f"datasets/gt_sortings/{key}") + info = {} + info["levels"] = levels + (study_folder / "info.json").write_text(json.dumps(info, indent=4), encoding="utf8") + # (study_folder / "cases.jon").write_text( # json.dumps(cases, indent=4, cls=SIJsonEncoder), # encoding="utf8", @@ -111,6 +137,12 @@ def scan_folder(self): if not (self.folder / "datasets").exists(): raise ValueError(f"This is folder is not a GroundTruthStudy : {self.folder.absolute()}") + with open(self.folder / "info.json", "r") as f: + self.info = json.load(f) + if isinstance(self.levels, list): + # because tuple caoont be stored in json + self.levels = tuple(self.info["levels"]) + for rec_file in (self.folder / "datasets/recordings").glob("*.pickle"): key = rec_file.stem rec = load_extractor(rec_file) @@ -327,12 +359,9 @@ def aggregate_performance_by_unit(self, case_keys=None): perf = comp.get_performance(method="by_unit", output="pandas") if isinstance(key, str): - cols = ["level0"] - perf["level0"] = key - + perf[self.levels] = key elif isinstance(key, tuple): - cols = [f'level{i}' for i in range(len(key))] - for col, k in zip(cols, key): + for col, k in zip(self.levels, key): perf[col] = k perf = perf.reset_index() @@ -341,7 +370,7 @@ def aggregate_performance_by_unit(self, case_keys=None): perf_by_unit = pd.concat(perf_by_unit) - perf_by_unit = perf_by_unit.set_index(cols) + perf_by_unit = perf_by_unit.set_index(self.levels) return perf_by_unit @@ -354,18 +383,50 @@ def aggregate_count_units( import pandas as pd if case_keys is None: - case_keys = self.cases.keys() + case_keys = list(self.cases.keys()) + + if isinstance(case_keys[0], str): + index = pd.Index(case_keys, name=self.levels) + else: + index = pd.MultiIndex.from_tuples(case_keys, names=self.levels) + + + columns = ["num_gt", "num_sorter", "num_well_detected", "num_redundant", "num_overmerged"] + comp = self.comparisons[case_keys[0]] + if comp.exhaustive_gt: + columns.extend(["num_false_positive", "num_bad"]) + count_units = pd.DataFrame(index=index, columns=columns, dtype=int) + - perf_by_unit = [] for key in case_keys: comp = self.comparisons.get(key, None) assert comp is not None, "You need to do study.run_comparisons() first" + gt_sorting = comp.sorting1 + sorting = comp.sorting2 + + count_units.loc[key, "num_gt"] = len(gt_sorting.get_unit_ids()) + count_units.loc[key, "num_sorter"] = len(sorting.get_unit_ids()) + count_units.loc[key, "num_well_detected"] = comp.count_well_detected_units( + well_detected_score + ) + if comp.exhaustive_gt: + count_units.loc[key, "num_overmerged"] = comp.count_overmerged_units( + overmerged_score + ) + count_units.loc[key, "num_redundant"] = comp.count_redundant_units(redundant_score) + count_units.loc[key, "num_false_positive"] = comp.count_false_positive_units( + redundant_score + ) + count_units.loc[key, "num_bad"] = comp.count_bad_units() + + # count_units = pd.concat(count_units) + # count_units = count_units.set_index(cols) + return count_units - # assert self.comparisons is not None, "run_comparisons first" - # import pandas as pd + count_units = [] # index = pd.MultiIndex.from_tuples(self.computed_names, names=["rec_name", "sorter_name"]) diff --git a/src/spikeinterface/comparison/tests/test_groundtruthstudy.py b/src/spikeinterface/comparison/tests/test_groundtruthstudy.py index 3593b0b05f..5c5af476e4 100644 --- a/src/spikeinterface/comparison/tests/test_groundtruthstudy.py +++ b/src/spikeinterface/comparison/tests/test_groundtruthstudy.py @@ -84,7 +84,7 @@ def create_a_study(study_folder): # }, } - study = GroundTruthStudy.create(study_folder, datasets=datasets, cases=cases) + study = GroundTruthStudy.create(study_folder, datasets=datasets, cases=cases, levels=["sorter_name", "processing", "probe_type"]) # print(study) @@ -110,6 +110,7 @@ def test_GroundTruthStudy(): print(metrics) study.aggregate_performance_by_unit() + study.aggregate_count_units() # perf = study.aggregate_performance_by_unit() From 4f73dd1cd5f7990ace9f6f8d962b218f406e4692 Mon Sep 17 00:00:00 2001 From: Alessio Buccino Date: Mon, 11 Sep 2023 16:37:59 +0200 Subject: [PATCH 008/115] WIP: firing_range and amplitude_spread --- doc/modules/qualitymetrics/firing_range.rst | 48 ++++++++++ .../qualitymetrics/misc_metrics.py | 94 ++++++++++++++++++- .../qualitymetrics/quality_metric_list.py | 2 + 3 files changed, 143 insertions(+), 1 deletion(-) create mode 100644 doc/modules/qualitymetrics/firing_range.rst diff --git a/doc/modules/qualitymetrics/firing_range.rst b/doc/modules/qualitymetrics/firing_range.rst new file mode 100644 index 0000000000..fd8f79682c --- /dev/null +++ b/doc/modules/qualitymetrics/firing_range.rst @@ -0,0 +1,48 @@ +Firing range (:code:`firing_range`) +=================================== + + +Calculation +----------- + +The firing range indicates the spread of the firing range of a unit across the recording. It is computed by +taking the difference between the 95-th and 5th percentiles firing rates computed over short time bins (e.g. 10 s). + + + +Expectation and use +------------------- + +Both very high and very low firing rates can indicate errors. +Highly contaminated units (type I error) may have high firing rates as a result of the inclusion of other neurons' spikes. +Low firing rate units are likely to be incomplete (type II error), although this is not always the case (some neurons have highly selective firing patterns). +The firing rate is expected to be approximately log-normally distributed [Buzsáki]_. + +Example code +------------ + +.. code-block:: python + + import spikeinterface.qualitymetrics as qm + + # Make recording, sorting and wvf_extractor object for your data. + firing_rate = qm.compute_firing_ranges(wvf_extractor) + # firing_rate is a dict containing the units' IDs as keys, + # and their firing rates across segments as values (in Hz). + +References +---------- + +.. autofunction:: spikeinterface.qualitymetrics.misc_metrics.compute_firing_rates + + +Links to original implementations +--------------------------------- + +* From the `AllenSDK `_ + +Literature +---------- + +Unknown origin. +Widely discussed eg: [Buzsáki]_. diff --git a/src/spikeinterface/qualitymetrics/misc_metrics.py b/src/spikeinterface/qualitymetrics/misc_metrics.py index ee28485983..9be9a32ff6 100644 --- a/src/spikeinterface/qualitymetrics/misc_metrics.py +++ b/src/spikeinterface/qualitymetrics/misc_metrics.py @@ -563,7 +563,99 @@ def compute_synchrony_metrics(waveform_extractor, synchrony_sizes=(2, 4, 8), **k return synchrony_metrics -_default_params["synchrony_metrics"] = dict(synchrony_sizes=(0, 2, 4)) +_default_params["synchrony"] = dict(synchrony_sizes=(0, 2, 4)) + + +def compute_firing_ranges(waveform_extractor, bin_size_s=5, quantiles=(0.05, 0.95), unit_ids=None): + """ + Compute firing range, the range between the 5th and 95th quantiles of the firing rates distribution + computed in non-overlapping time bins. + + Parameters + ---------- + waveform_extractor : WaveformExtractor + The waveform extractor object. + bin_size_s : float, default: 5 + The size of the bin in seconds. + quantiles : tuple, default: (0.05, 0.95) + The quantiles to compute. + + Returns + ------- + firing_ranges : dict + The firing range for each unit. + """ + sampling_frequency = waveform_extractor.sampling_frequency + bin_size_samples = int(bin_size_s * sampling_frequency) + sorting = waveform_extractor.sorting + if unit_ids is None: + unit_ids = sorting.unit_ids + + # for each segment, we compute the firing rate histogram and we concatenate them + firing_rate_histograms = {unit_id: np.array([], dtype=float) for unit_id in sorting.unit_ids} + for segment_index in range(waveform_extractor.get_num_segments()): + num_samples = waveform_extractor.get_num_samples(segment_index) + edges = np.arange(0, num_samples + 1, bin_size_samples) + + for unit_id in unit_ids: + spike_times = sorting.get_unit_spike_train(unit_id=unit_id, segment_index=segment_index) + spike_counts, _ = np.histogram(spike_times, bins=edges) + firing_rates = spike_counts / bin_size_s + firing_rate_histograms[unit_id] = np.concatentate((firing_rate_histograms[unit_id], firing_rates)) + + # finally we compute the percentiles + firing_ranges = {} + for unit_id in unit_ids: + firing_ranges[unit_id] = np.percentile(firing_rate_histograms[unit_id], quantiles[1]) - np.percentile( + firing_rate_histograms[unit_id], quantiles[0] + ) + + return firing_ranges + + +_default_params["firing_range"] = dict(bin_size_s=5, quantiles=(0.05, 0.95)) + + +# TODO: docs +def compute_amplitude_spreads( + waveform_extractor, spikes_bin_size=50, amplitude_extension="spike_amplitudes", unit_ids=None +): + """Calculate mean spread of spike amplitudes within defined bins of AP events + + S Musall 2023 + + Input: + ------ + amplitudes : numpy.ndarray + Array of amplitudes (don't need to be in physical units) + + """ + sorting = waveform_extractor.sorting + spikes = sorting.to_spike_vector() + num_spikes = sorting.count_num_spikes_per_unit() + if unit_ids is None: + unit_ids = sorting.unit_ids + + if waveform_extractor.is_extension(amplitude_extension): + sac = waveform_extractor.load_extension(amplitude_extension) + amps = sac.get_data(outputs="concatenated") + else: + warnings.warn("") + empty_dict = {unit_id: np.nan for unit_id in unit_ids} + return empty_dict + + all_unit_ids = list(sorting.unit_ids) + for unit_id in unit_ids: + amps_unit = amps[spikes["unit_index"] == all_unit_ids.index(unit_id)] + if num_spikes[unit_id] < spikes_bin_size: + amp_spread = np.var(amps_unit) + else: + amp_spread = [] + for i in range(0, num_spikes[unit_id], spikes_bin_size): + amp_spread.append(np.var(amps_unit[i : i + spikes_bin_size])) + amp_spread = np.median(amp_spread) + + return amp_spread def compute_amplitude_cutoffs( diff --git a/src/spikeinterface/qualitymetrics/quality_metric_list.py b/src/spikeinterface/qualitymetrics/quality_metric_list.py index 90dbb47a3a..917927f44a 100644 --- a/src/spikeinterface/qualitymetrics/quality_metric_list.py +++ b/src/spikeinterface/qualitymetrics/quality_metric_list.py @@ -12,6 +12,7 @@ compute_amplitude_medians, compute_drift_metrics, compute_synchrony_metrics, + compute_firing_ranges, ) from .pca_metrics import ( @@ -41,5 +42,6 @@ "amplitude_cutoff": compute_amplitude_cutoffs, "amplitude_median": compute_amplitude_medians, "synchrony": compute_synchrony_metrics, + "firing_range": compute_firing_ranges, "drift": compute_drift_metrics, } From 0750638eb13030b22ad30b9db94fa968a60c7fa2 Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Mon, 11 Sep 2023 16:56:23 +0200 Subject: [PATCH 009/115] wip gtstudy --- .../comparison/groundtruthstudy.py | 53 ++----------------- .../comparison/tests/test_groundtruthstudy.py | 2 +- src/spikeinterface/widgets/widget_list.py | 3 ++ 3 files changed, 9 insertions(+), 49 deletions(-) diff --git a/src/spikeinterface/comparison/groundtruthstudy.py b/src/spikeinterface/comparison/groundtruthstudy.py index 76c019f6b9..049c97c234 100644 --- a/src/spikeinterface/comparison/groundtruthstudy.py +++ b/src/spikeinterface/comparison/groundtruthstudy.py @@ -139,9 +139,11 @@ def scan_folder(self): with open(self.folder / "info.json", "r") as f: self.info = json.load(f) - if isinstance(self.levels, list): - # because tuple caoont be stored in json - self.levels = tuple(self.info["levels"]) + + self.levels = self.info["levels"] + # if isinstance(self.levels, list): + # # because tuple caoont be stored in json + # self.levels = tuple(self.info["levels"]) for rec_file in (self.folder / "datasets/recordings").glob("*.pickle"): key = rec_file.stem @@ -371,11 +373,8 @@ def aggregate_performance_by_unit(self, case_keys=None): perf_by_unit = pd.concat(perf_by_unit) perf_by_unit = perf_by_unit.set_index(self.levels) - return perf_by_unit - # def aggregate_count_units(self, well_detected_score=None, redundant_score=None, overmerged_score=None): - def aggregate_count_units( self, case_keys=None, well_detected_score=None, redundant_score=None, overmerged_score=None ): @@ -420,51 +419,9 @@ def aggregate_count_units( ) count_units.loc[key, "num_bad"] = comp.count_bad_units() - # count_units = pd.concat(count_units) - # count_units = count_units.set_index(cols) - return count_units - count_units = [] - - # index = pd.MultiIndex.from_tuples(self.computed_names, names=["rec_name", "sorter_name"]) - - # count_units = pd.DataFrame( - # index=index, - # columns=["num_gt", "num_sorter", "num_well_detected", "num_redundant", "num_overmerged"], - # dtype=int, - # ) - - # if self.exhaustive_gt: - # count_units["num_false_positive"] = pd.Series(dtype=int) - # count_units["num_bad"] = pd.Series(dtype=int) - - # for rec_name, sorter_name, sorting in iter_computed_sorting(self.study_folder): - # gt_sorting = self.get_ground_truth(rec_name) - # comp = self.comparisons[(rec_name, sorter_name)] - - # count_units.loc[(rec_name, sorter_name), "num_gt"] = len(gt_sorting.get_unit_ids()) - # count_units.loc[(rec_name, sorter_name), "num_sorter"] = len(sorting.get_unit_ids()) - # count_units.loc[(rec_name, sorter_name), "num_well_detected"] = comp.count_well_detected_units( - # well_detected_score - # ) - # if self.exhaustive_gt: - # count_units.loc[(rec_name, sorter_name), "num_overmerged"] = comp.count_overmerged_units( - # overmerged_score - # ) - # count_units.loc[(rec_name, sorter_name), "num_redundant"] = comp.count_redundant_units(redundant_score) - # count_units.loc[(rec_name, sorter_name), "num_false_positive"] = comp.count_false_positive_units( - # redundant_score - # ) - # count_units.loc[(rec_name, sorter_name), "num_bad"] = comp.count_bad_units() - - # return count_units - - - - - diff --git a/src/spikeinterface/comparison/tests/test_groundtruthstudy.py b/src/spikeinterface/comparison/tests/test_groundtruthstudy.py index 5c5af476e4..1da79b9efe 100644 --- a/src/spikeinterface/comparison/tests/test_groundtruthstudy.py +++ b/src/spikeinterface/comparison/tests/test_groundtruthstudy.py @@ -162,6 +162,6 @@ def test_GroundTruthStudy(): setup_module() test_GroundTruthStudy() - # test_run_study_sorters() # test_extract_sortings() + diff --git a/src/spikeinterface/widgets/widget_list.py b/src/spikeinterface/widgets/widget_list.py index f3c640ff16..1e9d5301cf 100644 --- a/src/spikeinterface/widgets/widget_list.py +++ b/src/spikeinterface/widgets/widget_list.py @@ -20,6 +20,7 @@ from .unit_templates import UnitTemplatesWidget from .unit_waveforms_density_map import UnitWaveformDensityMapWidget from .unit_waveforms import UnitWaveformsWidget +from .gtstudy import StudyRunTimesWidget widget_list = [ @@ -41,6 +42,7 @@ UnitTemplatesWidget, UnitWaveformDensityMapWidget, UnitWaveformsWidget, + StudyRunTimesWidget, ] @@ -88,6 +90,7 @@ plot_unit_templates = UnitTemplatesWidget plot_unit_waveforms_density_map = UnitWaveformDensityMapWidget plot_unit_waveforms = UnitWaveformsWidget +plot_study_run_times = StudyRunTimesWidget def plot_timeseries(*args, **kwargs): From ee2eb2f04d5c17817fcb9f014f9814f5192cb624 Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Tue, 12 Sep 2023 14:23:00 +0200 Subject: [PATCH 010/115] STart porting matplotlib widgets related to ground truth study. --- .../comparison/groundtruthstudy.py | 4 +- .../comparison/tests/test_groundtruthstudy.py | 48 +---- src/spikeinterface/widgets/gtstudy.py | 192 ++++++++++++++++++ src/spikeinterface/widgets/widget_list.py | 6 +- 4 files changed, 201 insertions(+), 49 deletions(-) create mode 100644 src/spikeinterface/widgets/gtstudy.py diff --git a/src/spikeinterface/comparison/groundtruthstudy.py b/src/spikeinterface/comparison/groundtruthstudy.py index 049c97c234..d936c50e5e 100644 --- a/src/spikeinterface/comparison/groundtruthstudy.py +++ b/src/spikeinterface/comparison/groundtruthstudy.py @@ -347,7 +347,7 @@ def get_units_snr(self, key): """ return self.get_metrics(key)["snr"] - def aggregate_performance_by_unit(self, case_keys=None): + def get_performance_by_unit(self, case_keys=None): import pandas as pd @@ -375,7 +375,7 @@ def aggregate_performance_by_unit(self, case_keys=None): perf_by_unit = perf_by_unit.set_index(self.levels) return perf_by_unit - def aggregate_count_units( + def get_count_units( self, case_keys=None, well_detected_score=None, redundant_score=None, overmerged_score=None ): diff --git a/src/spikeinterface/comparison/tests/test_groundtruthstudy.py b/src/spikeinterface/comparison/tests/test_groundtruthstudy.py index 1da79b9efe..52d5c73d3b 100644 --- a/src/spikeinterface/comparison/tests/test_groundtruthstudy.py +++ b/src/spikeinterface/comparison/tests/test_groundtruthstudy.py @@ -109,54 +109,10 @@ def test_GroundTruthStudy(): metrics = study.get_metrics(key) print(metrics) - study.aggregate_performance_by_unit() - study.aggregate_count_units() + study.get_performance_by_unit() + study.get_count_units() -# perf = study.aggregate_performance_by_unit() -# count_units = study.aggregate_count_units() - - - -# @pytest.mark.skipif(not HAVE_TDC, reason="Test requires Python package 'tridesclous'") -# def test_run_study_sorters(): -# study = GroundTruthStudy(study_folder) -# sorter_list = [ -# "tridesclous", -# ] -# print( -# f"\n#################################\nINSTALLED SORTERS\n#################################\n" -# f"{installed_sorters()}" -# ) -# study.run_sorters(sorter_list) - - -# @pytest.mark.skipif(not HAVE_TDC, reason="Test requires Python package 'tridesclous'") -# def test_extract_sortings(): -# study = GroundTruthStudy(study_folder) - -# study.copy_sortings() - -# for rec_name in study.rec_names: -# gt_sorting = study.get_ground_truth(rec_name) - -# for rec_name in study.rec_names: -# metrics = study.get_metrics(rec_name=rec_name) - -# snr = study.get_units_snr(rec_name=rec_name) - -# study.copy_sortings() - -# run_times = study.aggregate_run_times() - -# study.run_comparisons(exhaustive_gt=True) - -# perf = study.aggregate_performance_by_unit() - -# count_units = study.aggregate_count_units() -# dataframes = study.aggregate_dataframes() -# print(dataframes) - if __name__ == "__main__": setup_module() diff --git a/src/spikeinterface/widgets/gtstudy.py b/src/spikeinterface/widgets/gtstudy.py new file mode 100644 index 0000000000..aa1a80c3d3 --- /dev/null +++ b/src/spikeinterface/widgets/gtstudy.py @@ -0,0 +1,192 @@ +import numpy as np + +from .base import BaseWidget, to_attr +from .utils import get_unit_colors + +from ..core import ChannelSparsity +from ..core.waveform_extractor import WaveformExtractor +from ..core.basesorting import BaseSorting + + +class StudyRunTimesWidget(BaseWidget): + """ + Plot sorter run times for a GroundTruthStudy + + + Parameters + ---------- + study: GroundTruthStudy + A study object. + case_keys: list or None + A selection of cases to plot, if None, then all. + + """ + + def __init__( + self, + study, + case_keys=None, + backend=None, + **backend_kwargs, + ): + + if case_keys is None: + case_keys = list(study.cases.keys()) + + plot_data = dict( + study=study, + run_times=study.get_run_times(), + case_keys=case_keys, + ) + + BaseWidget.__init__(self, plot_data, backend=backend, **backend_kwargs) + + def plot_matplotlib(self, data_plot, **backend_kwargs): + import matplotlib.pyplot as plt + from .utils_matplotlib import make_mpl_figure + + dp = to_attr(data_plot) + + self.figure, self.axes, self.ax = make_mpl_figure(**backend_kwargs) + + for i, key in enumerate(dp.case_keys): + label = dp.study.cases[key]["label"] + rt = dp.run_times.loc[key] + self.ax.bar(i, rt, width=0.8, label=label) + + self.ax.legend() + + + +# TODO : plot optionally average on some levels using group by +class StudyUnitCountsWidget(BaseWidget): + """ + Plot unit counts for a study: "num_well_detected", "num_false_positive", "num_redundant", "num_overmerged" + + + Parameters + ---------- + study: GroundTruthStudy + A study object. + case_keys: list or None + A selection of cases to plot, if None, then all. + + """ + + def __init__( + self, + study, + case_keys=None, + backend=None, + **backend_kwargs, + ): + + if case_keys is None: + case_keys = list(study.cases.keys()) + + plot_data = dict( + study=study, + count_units = study.get_count_units(case_keys=case_keys), + case_keys=case_keys, + ) + + BaseWidget.__init__(self, plot_data, backend=backend, **backend_kwargs) + + def plot_matplotlib(self, data_plot, **backend_kwargs): + import matplotlib.pyplot as plt + from .utils_matplotlib import make_mpl_figure + from .utils import get_some_colors + + dp = to_attr(data_plot) + + self.figure, self.axes, self.ax = make_mpl_figure(**backend_kwargs) + + columns = dp.count_units.columns.tolist() + columns.remove("num_gt") + columns.remove("num_sorter") + + ncol = len(columns) + + colors = get_some_colors(columns, color_engine="auto", + map_name="hot") + colors["num_well_detected"] = "green" + + xticklabels = [] + for i, key in enumerate(dp.case_keys): + for c, col in enumerate(columns): + x = i + 1 + c / (ncol + 1) + y = dp.count_units.loc[key, col] + if not "well_detected" in col: + y = -y + + if i == 0: + label = col.replace("num_", "").replace("_", " ").title() + else: + label = None + + self.ax.bar([x], [y], width=1 / (ncol + 2), label=label, color=colors[col]) + + xticklabels.append(dp.study.cases[key]["label"]) + + self.ax.set_xticks(np.arange(len(dp.case_keys)) + 1) + self.ax.set_xticklabels(xticklabels) + self.ax.legend() + + +# TODO : plot optionally average on some levels using group by +class StudyPerformances(BaseWidget): + """ + Plot performances over case for a study. + + + Parameters + ---------- + study: GroundTruthStudy + A study object. + mode: str + Which mode in "swarm" + case_keys: list or None + A selection of cases to plot, if None, then all. + + """ + + def __init__( + self, + study, + mode="swarm", + case_keys=None, + backend=None, + **backend_kwargs, + ): + + if case_keys is None: + case_keys = list(study.cases.keys()) + + plot_data = dict( + study=study, + perfs=study.get_performance_by_unit(case_keys=case_keys), + mode=mode, + case_keys=case_keys, + ) + + BaseWidget.__init__(self, plot_data, backend=backend, **backend_kwargs) + + def plot_matplotlib(self, data_plot, **backend_kwargs): + import matplotlib.pyplot as plt + from .utils_matplotlib import make_mpl_figure + from .utils import get_some_colors + + import pandas as pd + import seaborn as sns + + dp = to_attr(data_plot) + perfs = dp.perfs + + self.figure, self.axes, self.ax = make_mpl_figure(**backend_kwargs) + + if dp.mode == "swarm": + levels = perfs.index.names + df = pd.melt(perfs.reset_index(), id_vars=levels, var_name='Metric', value_name='Score', + value_vars=('accuracy','precision', 'recall')) + df['x'] = df.apply(lambda r: ' '.join([r[col] for col in levels]), axis=1) + sns.swarmplot(data=df, x='x', y='Score', hue='Metric', dodge=True) diff --git a/src/spikeinterface/widgets/widget_list.py b/src/spikeinterface/widgets/widget_list.py index 1e9d5301cf..4bc91e0737 100644 --- a/src/spikeinterface/widgets/widget_list.py +++ b/src/spikeinterface/widgets/widget_list.py @@ -20,7 +20,7 @@ from .unit_templates import UnitTemplatesWidget from .unit_waveforms_density_map import UnitWaveformDensityMapWidget from .unit_waveforms import UnitWaveformsWidget -from .gtstudy import StudyRunTimesWidget +from .gtstudy import StudyRunTimesWidget, StudyUnitCountsWidget, StudyPerformances widget_list = [ @@ -43,6 +43,8 @@ UnitWaveformDensityMapWidget, UnitWaveformsWidget, StudyRunTimesWidget, + StudyUnitCountsWidget, + StudyPerformances ] @@ -91,6 +93,8 @@ plot_unit_waveforms_density_map = UnitWaveformDensityMapWidget plot_unit_waveforms = UnitWaveformsWidget plot_study_run_times = StudyRunTimesWidget +plot_study_unit_counts = StudyUnitCountsWidget +plot_study_performances = StudyPerformances def plot_timeseries(*args, **kwargs): From b91ff2e774de0b2ee04f1ed6e075962e1c30d468 Mon Sep 17 00:00:00 2001 From: Alessio Buccino Date: Tue, 12 Sep 2023 15:24:44 +0200 Subject: [PATCH 011/115] Add amplitude_spread --- .../qualitymetrics/amplitude_spread.rst | 48 ++++++++++++++ doc/modules/qualitymetrics/drift.rst | 1 + doc/modules/qualitymetrics/firing_range.rst | 24 +++---- .../qualitymetrics/misc_metrics.py | 66 ++++++++++++++----- .../qualitymetrics/quality_metric_list.py | 2 + .../tests/test_metrics_functions.py | 26 +++++++- 6 files changed, 132 insertions(+), 35 deletions(-) create mode 100644 doc/modules/qualitymetrics/amplitude_spread.rst diff --git a/doc/modules/qualitymetrics/amplitude_spread.rst b/doc/modules/qualitymetrics/amplitude_spread.rst new file mode 100644 index 0000000000..0ae0761265 --- /dev/null +++ b/doc/modules/qualitymetrics/amplitude_spread.rst @@ -0,0 +1,48 @@ +Amplitude spread (:code:`amplitude_spread`) +=========================================== + + +Calculation +----------- + +The amplitude spread is a measure of the amplitude variability. +It is computed the ratio between the standard deviation and the amplitude mean (aka coefficient of variation). +To obtain a better estimate of this measure, it is first computed separately for several bins of a prefixed number of spikes +(e.g 100) and then the median of these values is taken. + +The computation requires either spike amplitudes (see :py:func:`~spikeinterface.postprocessing.compute_spike_amplitudes()`) +or amplitude scalings (see :py:func:`~spikeinterface.postprocessing.compute_amplitude_scalings()`) to be pre-computed. + + +Expectation and use +------------------- + +Very high levels of amplitude_spread ranges, outside of a physiolocigal range, might indicate noise contamination. + + +Example code +------------ + +.. code-block:: python + + import spikeinterface.qualitymetrics as qm + + # Make recording, sorting and wvf_extractor object for your data. + # It is required to run `compute_spike_amplitudes(wvf_extractor)` or + # `compute_amplitude_scalings(wvf_extractor)` (if missing, values will be NaN) + amplitude_spread = qm.compute_firing_ranges(wvf_extractor, amplitude_extension='spike_amplitudes') + # amplitude_spread is a dict containing the units' IDs as keys, + # and their amplitude_spread (in units of standard deviation). + + + +References +---------- + +.. autofunction:: spikeinterface.qualitymetrics.misc_metrics.compute_amplitude_spreads + + +Literature +---------- + +Designed by Simon Musall and adapted to SpikeInterface by Alessio Buccino. diff --git a/doc/modules/qualitymetrics/drift.rst b/doc/modules/qualitymetrics/drift.rst index 0a852f80af..4e78150ba7 100644 --- a/doc/modules/qualitymetrics/drift.rst +++ b/doc/modules/qualitymetrics/drift.rst @@ -42,6 +42,7 @@ Example code import spikeinterface.qualitymetrics as qm + # Make recording, sorting and wvf_extractor object for your data. # It is required to run `compute_spike_locations(wvf_extractor)` # (if missing, values will be NaN) drift_ptps, drift_stds, drift_mads = qm.compute_drift_metrics(wvf_extractor, peak_sign="neg") diff --git a/doc/modules/qualitymetrics/firing_range.rst b/doc/modules/qualitymetrics/firing_range.rst index fd8f79682c..0d17eedc13 100644 --- a/doc/modules/qualitymetrics/firing_range.rst +++ b/doc/modules/qualitymetrics/firing_range.rst @@ -5,7 +5,7 @@ Firing range (:code:`firing_range`) Calculation ----------- -The firing range indicates the spread of the firing range of a unit across the recording. It is computed by +The firing range indicates the dispersion of the firing rate of a unit across the recording. It is computed by taking the difference between the 95-th and 5th percentiles firing rates computed over short time bins (e.g. 10 s). @@ -13,10 +13,8 @@ taking the difference between the 95-th and 5th percentiles firing rates compute Expectation and use ------------------- -Both very high and very low firing rates can indicate errors. -Highly contaminated units (type I error) may have high firing rates as a result of the inclusion of other neurons' spikes. -Low firing rate units are likely to be incomplete (type II error), although this is not always the case (some neurons have highly selective firing patterns). -The firing rate is expected to be approximately log-normally distributed [Buzsáki]_. +Very high levels of firing ranges, outside of a physiolocigal range, might indicate noise contamination. + Example code ------------ @@ -26,23 +24,17 @@ Example code import spikeinterface.qualitymetrics as qm # Make recording, sorting and wvf_extractor object for your data. - firing_rate = qm.compute_firing_ranges(wvf_extractor) - # firing_rate is a dict containing the units' IDs as keys, - # and their firing rates across segments as values (in Hz). + firing_range = qm.compute_firing_ranges(wvf_extractor) + # firing_range is a dict containing the units' IDs as keys, + # and their firing firing_range as values (in Hz). References ---------- -.. autofunction:: spikeinterface.qualitymetrics.misc_metrics.compute_firing_rates - - -Links to original implementations ---------------------------------- +.. autofunction:: spikeinterface.qualitymetrics.misc_metrics.compute_firing_ranges -* From the `AllenSDK `_ Literature ---------- -Unknown origin. -Widely discussed eg: [Buzsáki]_. +Designed by Simon Musall and adapted to SpikeInterface by Alessio Buccino. diff --git a/src/spikeinterface/qualitymetrics/misc_metrics.py b/src/spikeinterface/qualitymetrics/misc_metrics.py index 9be9a32ff6..6c237ee720 100644 --- a/src/spikeinterface/qualitymetrics/misc_metrics.py +++ b/src/spikeinterface/qualitymetrics/misc_metrics.py @@ -567,8 +567,7 @@ def compute_synchrony_metrics(waveform_extractor, synchrony_sizes=(2, 4, 8), **k def compute_firing_ranges(waveform_extractor, bin_size_s=5, quantiles=(0.05, 0.95), unit_ids=None): - """ - Compute firing range, the range between the 5th and 95th quantiles of the firing rates distribution + """Calculate firing range, the range between the 5th and 95th quantiles of the firing rates distribution computed in non-overlapping time bins. Parameters @@ -579,11 +578,17 @@ def compute_firing_ranges(waveform_extractor, bin_size_s=5, quantiles=(0.05, 0.9 The size of the bin in seconds. quantiles : tuple, default: (0.05, 0.95) The quantiles to compute. + unit_ids : list or None + List of unit ids to compute the firing range. If None, all units are used. Returns ------- firing_ranges : dict The firing range for each unit. + + Notes + ----- + Designed by Simon Musall and ported to SpikeInterface by Alessio Buccino. """ sampling_frequency = waveform_extractor.sampling_frequency bin_size_samples = int(bin_size_s * sampling_frequency) @@ -601,7 +606,7 @@ def compute_firing_ranges(waveform_extractor, bin_size_s=5, quantiles=(0.05, 0.9 spike_times = sorting.get_unit_spike_train(unit_id=unit_id, segment_index=segment_index) spike_counts, _ = np.histogram(spike_times, bins=edges) firing_rates = spike_counts / bin_size_s - firing_rate_histograms[unit_id] = np.concatentate((firing_rate_histograms[unit_id], firing_rates)) + firing_rate_histograms[unit_id] = np.concatenate((firing_rate_histograms[unit_id], firing_rates)) # finally we compute the percentiles firing_ranges = {} @@ -616,20 +621,37 @@ def compute_firing_ranges(waveform_extractor, bin_size_s=5, quantiles=(0.05, 0.9 _default_params["firing_range"] = dict(bin_size_s=5, quantiles=(0.05, 0.95)) -# TODO: docs def compute_amplitude_spreads( - waveform_extractor, spikes_bin_size=50, amplitude_extension="spike_amplitudes", unit_ids=None + waveform_extractor, num_spikes_per_bin=100, amplitude_extension="spike_amplitudes", unit_ids=None ): - """Calculate mean spread of spike amplitudes within defined bins of AP events + """Calculate spread of spike amplitudes within defined bins of spike events. + The spread is the median relative variance (variance divided by the overall amplitude mean) + computed over bins of `num_spikes_per_bin` spikes. - S Musall 2023 + Parameters + ---------- + waveform_extractor : WaveformExtractor + The waveform extractor object. + num_spikes_per_bin : int, default: 50 + The number of spikes per bin. + amplitude_extension : str, default: 'spike_amplitudes' + The name of the extension to load the amplitudes from. 'spike_amplitudes' or 'amplitude_scalings'. + unit_ids : list or None + List of unit ids to compute the amplitude spread. If None, all units are used. - Input: - ------ - amplitudes : numpy.ndarray - Array of amplitudes (don't need to be in physical units) + Returns + ------- + amplitude_spreads : dict + The amplitude spread for each unit. + Notes + ----- + Designed by Simon Musall and ported to SpikeInterface by Alessio Buccino. """ + assert amplitude_extension in ( + "spike_amplitudes", + "amplitude_scalings", + ), "Invalid amplitude_extension. It can be either 'spike_amplitudes' or 'amplitude_scalings'" sorting = waveform_extractor.sorting spikes = sorting.to_spike_vector() num_spikes = sorting.count_num_spikes_per_unit() @@ -639,23 +661,31 @@ def compute_amplitude_spreads( if waveform_extractor.is_extension(amplitude_extension): sac = waveform_extractor.load_extension(amplitude_extension) amps = sac.get_data(outputs="concatenated") + if amplitude_extension == "spike_amplitudes": + amps = np.concatenate(amps) else: warnings.warn("") empty_dict = {unit_id: np.nan for unit_id in unit_ids} return empty_dict all_unit_ids = list(sorting.unit_ids) + amplitude_spreads = {} for unit_id in unit_ids: amps_unit = amps[spikes["unit_index"] == all_unit_ids.index(unit_id)] - if num_spikes[unit_id] < spikes_bin_size: - amp_spread = np.var(amps_unit) + amp_mean = np.abs(np.mean(amps_unit)) + if num_spikes[unit_id] < num_spikes_per_bin: + amp_spread = np.std(amps_unit) / amp_mean else: - amp_spread = [] - for i in range(0, num_spikes[unit_id], spikes_bin_size): - amp_spread.append(np.var(amps_unit[i : i + spikes_bin_size])) - amp_spread = np.median(amp_spread) + amp_spreads = [] + for i in range(0, num_spikes[unit_id], num_spikes_per_bin): + amp_spreads.append(np.std(amps_unit[i : i + num_spikes_per_bin]) / amp_mean) + amp_spread = np.median(amp_spreads) + amplitude_spreads[unit_id] = amp_spread + + return amplitude_spreads + - return amp_spread +_default_params["amplitude_spread"] = dict(num_spikes_per_bin=100, amplitude_extension="spike_amplitudes") def compute_amplitude_cutoffs( diff --git a/src/spikeinterface/qualitymetrics/quality_metric_list.py b/src/spikeinterface/qualitymetrics/quality_metric_list.py index 917927f44a..ee25ce64fd 100644 --- a/src/spikeinterface/qualitymetrics/quality_metric_list.py +++ b/src/spikeinterface/qualitymetrics/quality_metric_list.py @@ -13,6 +13,7 @@ compute_drift_metrics, compute_synchrony_metrics, compute_firing_ranges, + compute_amplitude_spreads, ) from .pca_metrics import ( @@ -41,6 +42,7 @@ "sliding_rp_violation": compute_sliding_rp_violations, "amplitude_cutoff": compute_amplitude_cutoffs, "amplitude_median": compute_amplitude_medians, + "amplitude_spread": compute_amplitude_spreads, "synchrony": compute_synchrony_metrics, "firing_range": compute_firing_ranges, "drift": compute_drift_metrics, diff --git a/src/spikeinterface/qualitymetrics/tests/test_metrics_functions.py b/src/spikeinterface/qualitymetrics/tests/test_metrics_functions.py index d927d64c4f..a570b75b52 100644 --- a/src/spikeinterface/qualitymetrics/tests/test_metrics_functions.py +++ b/src/spikeinterface/qualitymetrics/tests/test_metrics_functions.py @@ -12,6 +12,7 @@ compute_principal_components, compute_spike_locations, compute_spike_amplitudes, + compute_amplitude_scalings, ) from spikeinterface.qualitymetrics import ( @@ -31,6 +32,8 @@ compute_drift_metrics, compute_amplitude_medians, compute_synchrony_metrics, + compute_firing_ranges, + compute_amplitude_spreads, ) @@ -212,6 +215,12 @@ def test_calculate_firing_rate_num_spikes(waveform_extractor_simple): # np.testing.assert_array_equal(list(num_spikes_gt.values()), list(num_spikes.values())) +def test_calculate_firing_range(waveform_extractor_simple): + we = waveform_extractor_simple + firing_ranges = compute_firing_ranges(we) + print(firing_ranges) + + def test_calculate_amplitude_cutoff(waveform_extractor_simple): we = waveform_extractor_simple spike_amps = compute_spike_amplitudes(we) @@ -234,6 +243,19 @@ def test_calculate_amplitude_median(waveform_extractor_simple): # assert np.allclose(list(amp_medians_gt.values()), list(amp_medians.values()), rtol=0.05) +def test_calculate_amplitude_spread(waveform_extractor_simple): + we = waveform_extractor_simple + spike_amps = compute_spike_amplitudes(we) + amp_spreads = compute_amplitude_spreads(we, num_spikes_per_bin=20) + print(amp_spreads) + + amps_scalings = compute_amplitude_scalings(we) + amp_spreads_scalings = compute_amplitude_spreads( + we, num_spikes_per_bin=20, amplitude_extension="amplitude_scalings" + ) + print(amp_spreads_scalings) + + def test_calculate_snrs(waveform_extractor_simple): we = waveform_extractor_simple snrs = compute_snrs(we) @@ -358,4 +380,6 @@ def test_calculate_drift_metrics(waveform_extractor_simple): # test_calculate_isi_violations(we) # test_calculate_sliding_rp_violations(we) # test_calculate_drift_metrics(we) - test_synchrony_metrics(we) + # test_synchrony_metrics(we) + test_calculate_firing_range(we) + test_calculate_amplitude_spread(we) From d80341ca2cd84852988cc5704bafc1c0a6d16540 Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Tue, 12 Sep 2023 18:16:26 +0200 Subject: [PATCH 012/115] remove gtstudy widgets from legacy and port some of then in the API. --- .../comparison/tests/test_groundtruthstudy.py | 15 +- .../widgets/_legacy_mpl_widgets/__init__.py | 16 - .../widgets/_legacy_mpl_widgets/gtstudy.py | 574 ------------------ src/spikeinterface/widgets/gtstudy.py | 60 ++ src/spikeinterface/widgets/widget_list.py | 6 +- 5 files changed, 66 insertions(+), 605 deletions(-) delete mode 100644 src/spikeinterface/widgets/_legacy_mpl_widgets/gtstudy.py diff --git a/src/spikeinterface/comparison/tests/test_groundtruthstudy.py b/src/spikeinterface/comparison/tests/test_groundtruthstudy.py index 52d5c73d3b..a75ac272be 100644 --- a/src/spikeinterface/comparison/tests/test_groundtruthstudy.py +++ b/src/spikeinterface/comparison/tests/test_groundtruthstudy.py @@ -1,20 +1,11 @@ -import importlib import shutil import pytest from pathlib import Path -# from spikeinterface.extractors import toy_example from spikeinterface import generate_ground_truth_recording from spikeinterface.preprocessing import bandpass_filter -from spikeinterface.sorters import installed_sorters from spikeinterface.comparison import GroundTruthStudy -# try: -# import tridesclous - -# HAVE_TDC = True -# except ImportError: -# HAVE_TDC = False if hasattr(pytest, "global_test_folder"): @@ -71,7 +62,7 @@ def create_a_study(study_folder): }, }, - # we comment this at the moement because SC2 is quite slow for testing + # we comment this at the moement because SC2 is quite slow for testing # ("sc2", "no-preprocess", "tetrode"): { # "label": "spykingcircus2 without preprocessing standar params", # "dataset": "toy_tetrode", @@ -118,6 +109,4 @@ def test_GroundTruthStudy(): setup_module() test_GroundTruthStudy() - # test_run_study_sorters() - # test_extract_sortings() - + \ No newline at end of file diff --git a/src/spikeinterface/widgets/_legacy_mpl_widgets/__init__.py b/src/spikeinterface/widgets/_legacy_mpl_widgets/__init__.py index c0dcd7ea6e..bf28c891f5 100644 --- a/src/spikeinterface/widgets/_legacy_mpl_widgets/__init__.py +++ b/src/spikeinterface/widgets/_legacy_mpl_widgets/__init__.py @@ -41,22 +41,6 @@ from .sortingperformance import plot_sorting_performance -# ground truth study (=comparison over sorter) -from .gtstudy import ( - StudyComparisonRunTimesWidget, - plot_gt_study_run_times, - StudyComparisonUnitCountsWidget, - StudyComparisonUnitCountsAveragesWidget, - plot_gt_study_unit_counts, - plot_gt_study_unit_counts_averages, - plot_gt_study_performances, - plot_gt_study_performances_averages, - StudyComparisonPerformancesWidget, - StudyComparisonPerformancesAveragesWidget, - plot_gt_study_performances_by_template_similarity, - StudyComparisonPerformancesByTemplateSimilarity, -) - # ground truth comparions (=comparison over sorter) from .gtcomparison import ( plot_gt_performances, diff --git a/src/spikeinterface/widgets/_legacy_mpl_widgets/gtstudy.py b/src/spikeinterface/widgets/_legacy_mpl_widgets/gtstudy.py deleted file mode 100644 index 573221f528..0000000000 --- a/src/spikeinterface/widgets/_legacy_mpl_widgets/gtstudy.py +++ /dev/null @@ -1,574 +0,0 @@ -""" -Various widgets on top of GroundTruthStudy to summary results: - * run times - * performances - * count units -""" -import numpy as np - - -from .basewidget import BaseWidget - - -class StudyComparisonRunTimesWidget(BaseWidget): - """ - Plot run times for a study. - - Parameters - ---------- - study: GroundTruthStudy - The study object to consider - ax: matplotlib ax - The ax to be used. If not given a figure is created - color: - - - """ - - def __init__(self, study, color="#F7DC6F", ax=None): - from matplotlib import pyplot as plt - import pandas as pd - - self.study = study - self.color = color - - BaseWidget.__init__(self, ax=ax) - - def plot(self): - study = self.study - ax = self.ax - - all_run_times = study.aggregate_run_times() - av_run_times = all_run_times.reset_index().groupby("sorter_name")["run_time"].mean() - - if len(study.rec_names) == 1: - # no errors bars - yerr = None - else: - # errors bars across recording - yerr = all_run_times.reset_index().groupby("sorter_name")["run_time"].std() - - sorter_names = av_run_times.index - - x = np.arange(sorter_names.size) + 1 - ax.bar(x, av_run_times.values, width=0.8, color=self.color, yerr=yerr) - ax.set_ylabel("run times (s)") - ax.set_xticks(x) - ax.set_xticklabels(sorter_names, rotation=45) - ax.set_xlim(0, sorter_names.size + 1) - - -def plot_gt_study_run_times(*args, **kwargs): - W = StudyComparisonRunTimesWidget(*args, **kwargs) - W.plot() - return W - - -plot_gt_study_run_times.__doc__ = StudyComparisonRunTimesWidget.__doc__ - - -class StudyComparisonUnitCountsAveragesWidget(BaseWidget): - """ - Plot averages over found units for a study. - - Parameters - ---------- - study: GroundTruthStudy - The study object to consider - ax: matplotlib ax - The ax to be used. If not given a figure is created - cmap_name - log_scale: if the y-axis should be displayed as log scaled - - """ - - def __init__(self, study, cmap_name="Set2", log_scale=False, ax=None): - from matplotlib import pyplot as plt - import pandas as pd - - self.study = study - self.cmap_name = cmap_name - self.log_scale = log_scale - - BaseWidget.__init__(self, ax=ax) - - def plot(self): - study = self.study - ax = self.ax - - count_units = study.aggregate_count_units() - - if study.exhaustive_gt: - columns = ["num_well_detected", "num_false_positive", "num_redundant", "num_overmerged"] - else: - columns = ["num_well_detected", "num_redundant", "num_overmerged"] - ncol = len(columns) - - df = count_units.reset_index() - - m = df.groupby("sorter_name")[columns].mean() - - cmap = plt.get_cmap(self.cmap_name, 4) - - if len(study.rec_names) == 1: - # no errors bars - stds = None - else: - # errors bars across recording - stds = df.groupby("sorter_name")[columns].std() - - sorter_names = m.index - clean_labels = [col.replace("num_", "").replace("_", " ").title() for col in columns] - - for c, col in enumerate(columns): - x = np.arange(sorter_names.size) + 1 + c / (ncol + 2) - if stds is None: - yerr = None - else: - yerr = stds[col].values - ax.bar(x, m[col].values, yerr=yerr, width=1 / (ncol + 2), color=cmap(c), label=clean_labels[c]) - - ax.legend() - if self.log_scale: - ax.set_yscale("log") - - ax.set_xticks(np.arange(sorter_names.size) + 1) - ax.set_xticklabels(sorter_names, rotation=45) - ax.set_ylabel("# units") - ax.set_xlim(0, sorter_names.size + 1) - - if count_units["num_gt"].unique().size == 1: - num_gt = count_units["num_gt"].unique()[0] - ax.axhline(num_gt, ls="--", color="k") - - -class StudyComparisonUnitCountsWidget(BaseWidget): - """ - Plot averages over found units for a study. - - Parameters - ---------- - study: GroundTruthStudy - The study object to consider - ax: matplotlib ax - The ax to be used. If not given a figure is created - cmap_name - log_scale: if the y-axis should be displayed as log scaled - - """ - - def __init__(self, study, cmap_name="Set2", log_scale=False, ax=None): - from matplotlib import pyplot as plt - import pandas as pd - - self.study = study - self.cmap_name = cmap_name - self.log_scale = log_scale - - num_rec = len(study.rec_names) - if ax is None: - fig, axes = plt.subplots(ncols=1, nrows=num_rec, squeeze=False) - else: - axes = np.array([ax]).T - - BaseWidget.__init__(self, axes=axes) - - def plot(self): - study = self.study - ax = self.ax - - import seaborn as sns - - study = self.study - - count_units = study.aggregate_count_units() - count_units = count_units.reset_index() - - if study.exhaustive_gt: - columns = ["num_well_detected", "num_false_positive", "num_redundant", "num_overmerged"] - else: - columns = ["num_well_detected", "num_redundant", "num_overmerged"] - - ncol = len(columns) - cmap = plt.get_cmap(self.cmap_name, 4) - - for r, rec_name in enumerate(study.rec_names): - ax = self.axes[r, 0] - ax.set_title(rec_name) - df = count_units.loc[count_units["rec_name"] == rec_name, :] - m = df.groupby("sorter_name")[columns].mean() - sorter_names = m.index - clean_labels = [col.replace("num_", "").replace("_", " ").title() for col in columns] - - for c, col in enumerate(columns): - x = np.arange(sorter_names.size) + 1 + c / (ncol + 2) - ax.bar(x, m[col].values, width=1 / (ncol + 2), color=cmap(c), label=clean_labels[c]) - - if r == 0: - ax.legend() - - if self.log_scale: - ax.set_yscale("log") - - if r == len(study.rec_names) - 1: - ax.set_xticks(np.arange(sorter_names.size) + 1) - ax.set_xticklabels(sorter_names, rotation=45) - ax.set_ylabel("# units") - ax.set_xlim(0, sorter_names.size + 1) - - if count_units["num_gt"].unique().size == 1: - num_gt = count_units["num_gt"].unique()[0] - ax.axhline(num_gt, ls="--", color="k") - - -def plot_gt_study_unit_counts_averages(*args, **kwargs): - W = StudyComparisonUnitCountsAveragesWidget(*args, **kwargs) - W.plot() - return W - - -plot_gt_study_unit_counts_averages.__doc__ = StudyComparisonUnitCountsAveragesWidget.__doc__ - - -def plot_gt_study_unit_counts(*args, **kwargs): - W = StudyComparisonUnitCountsWidget(*args, **kwargs) - W.plot() - return W - - -plot_gt_study_unit_counts.__doc__ = StudyComparisonUnitCountsWidget.__doc__ - - -class StudyComparisonPerformancesWidget(BaseWidget): - """ - Plot run times for a study. - - Parameters - ---------- - study: GroundTruthStudy - The study object to consider - ax: matplotlib ax - The ax to be used. If not given a figure is created - cmap_name - - """ - - def __init__(self, study, palette="Set1", ax=None): - from matplotlib import pyplot as plt - import pandas as pd - - self.study = study - self.palette = palette - - num_rec = len(study.rec_names) - if ax is None: - fig, axes = plt.subplots(ncols=1, nrows=num_rec, squeeze=False) - else: - axes = np.array([ax]).T - - BaseWidget.__init__(self, axes=axes) - - def plot(self, average=False): - import seaborn as sns - - study = self.study - - sns.set_palette(sns.color_palette(self.palette)) - - perf_by_units = study.aggregate_performance_by_unit() - perf_by_units = perf_by_units.reset_index() - - for r, rec_name in enumerate(study.rec_names): - ax = self.axes[r, 0] - ax.set_title(rec_name) - df = perf_by_units.loc[perf_by_units["rec_name"] == rec_name, :] - df = pd.melt( - df, - id_vars="sorter_name", - var_name="Metric", - value_name="Score", - value_vars=("accuracy", "precision", "recall"), - ).sort_values("sorter_name") - sns.swarmplot( - data=df, x="sorter_name", y="Score", hue="Metric", dodge=True, s=3, ax=ax - ) # order=sorter_list, - # ~ ax.set_xticklabels(sorter_names_short, rotation=30, ha='center') - # ~ ax.legend(bbox_to_anchor=(1.0, 1), loc=2, borderaxespad=0., frameon=False, fontsize=8, markerscale=0.5) - - ax.set_ylim(0, 1.05) - ax.set_ylabel(f"Perfs for {rec_name}") - if r < len(study.rec_names) - 1: - ax.set_xlabel("") - ax.set(xticklabels=[]) - - -class StudyComparisonTemplateSimilarityWidget(BaseWidget): - """ - Plot run times for a study. - - Parameters - ---------- - study: GroundTruthStudy - The study object to consider - ax: matplotlib ax - The ax to be used. If not given a figure is created - cmap_name - - """ - - def __init__(self, study, cmap_name="Set1", ax=None): - from matplotlib import pyplot as plt - import pandas as pd - - self.study = study - self.cmap_name = cmap_name - - BaseWidget.__init__(self, ax=ax) - - def plot(self): - import seaborn as sns - - study = self.study - ax = self.ax - - perf_by_units = study.aggregate_performance_by_unit() - perf_by_units = perf_by_units.reset_index() - - columns = ["accuracy", "precision", "recall"] - to_agg = {} - ncol = len(columns) - - for column in columns: - perf_by_units[column] = pd.to_numeric(perf_by_units[column], downcast="float") - to_agg[column] = ["mean"] - - data = perf_by_units.groupby(["sorter_name", "rec_name"]).agg(to_agg) - - m = data.groupby("sorter_name").mean() - - cmap = plt.get_cmap(self.cmap_name, 4) - - if len(study.rec_names) == 1: - # no errors bars - stds = None - else: - # errors bars across recording - stds = data.groupby("sorter_name").std() - - sorter_names = m.index - clean_labels = [col.replace("num_", "").replace("_", " ").title() for col in columns] - - width = 1 / (ncol + 2) - - for c, col in enumerate(columns): - x = np.arange(sorter_names.size) + 1 + c / (ncol + 2) - if stds is None: - yerr = None - else: - yerr = stds[col].values - ax.bar(x, m[col].values.flatten(), yerr=yerr.flatten(), width=width, color=cmap(c), label=clean_labels[c]) - - ax.legend() - - ax.set_xticks(np.arange(sorter_names.size) + 1 + width) - ax.set_xticklabels(sorter_names, rotation=45) - ax.set_ylabel("metric") - ax.set_xlim(0, sorter_names.size + 1) - - -class StudyComparisonPerformancesAveragesWidget(BaseWidget): - """ - Plot run times for a study. - - Parameters - ---------- - study: GroundTruthStudy - The study object to consider - ax: matplotlib ax - The ax to be used. If not given a figure is created - cmap_name - - """ - - def __init__(self, study, cmap_name="Set1", ax=None): - from matplotlib import pyplot as plt - import pandas as pd - - self.study = study - self.cmap_name = cmap_name - - BaseWidget.__init__(self, ax=ax) - - def plot(self): - import seaborn as sns - - study = self.study - ax = self.ax - - perf_by_units = study.aggregate_performance_by_unit() - perf_by_units = perf_by_units.reset_index() - - columns = ["accuracy", "precision", "recall"] - to_agg = {} - ncol = len(columns) - - for column in columns: - perf_by_units[column] = pd.to_numeric(perf_by_units[column], downcast="float") - to_agg[column] = ["mean"] - - data = perf_by_units.groupby(["sorter_name", "rec_name"]).agg(to_agg) - - m = data.groupby("sorter_name").mean() - - cmap = plt.get_cmap(self.cmap_name, 4) - - if len(study.rec_names) == 1: - # no errors bars - stds = None - else: - # errors bars across recording - stds = data.groupby("sorter_name").std() - - sorter_names = m.index - clean_labels = [col.replace("num_", "").replace("_", " ").title() for col in columns] - - width = 1 / (ncol + 2) - - for c, col in enumerate(columns): - x = np.arange(sorter_names.size) + 1 + c / (ncol + 2) - if stds is None: - yerr = None - else: - yerr = stds[col].values - ax.bar(x, m[col].values.flatten(), yerr=yerr.flatten(), width=width, color=cmap(c), label=clean_labels[c]) - - ax.legend() - - ax.set_xticks(np.arange(sorter_names.size) + 1 + width) - ax.set_xticklabels(sorter_names, rotation=45) - ax.set_ylabel("metric") - ax.set_xlim(0, sorter_names.size + 1) - - -class StudyComparisonPerformancesByTemplateSimilarity(BaseWidget): - """ - Plot run times for a study. - - Parameters - ---------- - study: GroundTruthStudy - The study object to consider - ax: matplotlib ax - The ax to be used. If not given a figure is created - cmap_name - - """ - - def __init__(self, study, cmap_name="Set1", ax=None, ylim=(0.6, 1), show_legend=True): - from matplotlib import pyplot as plt - import pandas as pd - - self.study = study - self.cmap_name = cmap_name - self.show_legend = show_legend - self.ylim = ylim - - BaseWidget.__init__(self, ax=ax) - - def plot(self): - import sklearn - - cmap = plt.get_cmap(self.cmap_name, len(self.study.sorter_names)) - colors = [cmap(i) for i in range(len(self.study.sorter_names))] - - flat_templates_gt = {} - for rec_name in self.study.rec_names: - waveform_folder = self.study.study_folder / "waveforms" / f"waveforms_GroundTruth_{rec_name}" - if not waveform_folder.is_dir(): - self.study.compute_waveforms(rec_name) - - templates = self.study.get_templates(rec_name) - flat_templates_gt[rec_name] = templates.reshape(templates.shape[0], -1) - - all_results = {} - - for sorter_name in self.study.sorter_names: - all_results[sorter_name] = {"similarity": [], "accuracy": []} - - for rec_name in self.study.rec_names: - try: - waveform_folder = self.study.study_folder / "waveforms" / f"waveforms_{sorter_name}_{rec_name}" - if not waveform_folder.is_dir(): - self.study.compute_waveforms(rec_name, sorter_name) - templates = self.study.get_templates(rec_name, sorter_name) - flat_templates = templates.reshape(templates.shape[0], -1) - similarity_matrix = sklearn.metrics.pairwise.cosine_similarity( - flat_templates_gt[rec_name], flat_templates - ) - - comp = self.study.comparisons[(rec_name, sorter_name)] - - for i, u1 in enumerate(comp.sorting1.unit_ids): - u2 = comp.best_match_12[u1] - if u2 != -1: - all_results[sorter_name]["similarity"] += [ - similarity_matrix[comp.sorting1.id_to_index(u1), comp.sorting2.id_to_index(u2)] - ] - all_results[sorter_name]["accuracy"] += [comp.agreement_scores.at[u1, u2]] - except Exception: - pass - - all_results[sorter_name]["similarity"] = np.array(all_results[sorter_name]["similarity"]) - all_results[sorter_name]["accuracy"] = np.array(all_results[sorter_name]["accuracy"]) - - from matplotlib.patches import Ellipse - - similarity_means = [all_results[sorter_name]["similarity"].mean() for sorter_name in self.study.sorter_names] - similarity_stds = [all_results[sorter_name]["similarity"].std() for sorter_name in self.study.sorter_names] - - accuracy_means = [all_results[sorter_name]["accuracy"].mean() for sorter_name in self.study.sorter_names] - accuracy_stds = [all_results[sorter_name]["accuracy"].std() for sorter_name in self.study.sorter_names] - - scount = 0 - for x, y, i, j in zip(similarity_means, accuracy_means, similarity_stds, accuracy_stds): - e = Ellipse((x, y), i, j) - e.set_alpha(0.2) - e.set_facecolor(colors[scount]) - self.ax.add_artist(e) - self.ax.scatter([x], [y], c=colors[scount], label=self.study.sorter_names[scount]) - scount += 1 - - self.ax.set_ylabel("accuracy") - self.ax.set_xlabel("cosine similarity") - if self.ylim is not None: - self.ax.set_ylim(self.ylim) - - if self.show_legend: - self.ax.legend() - - -def plot_gt_study_performances(*args, **kwargs): - W = StudyComparisonPerformancesWidget(*args, **kwargs) - W.plot() - return W - - -plot_gt_study_performances.__doc__ = StudyComparisonPerformancesWidget.__doc__ - - -def plot_gt_study_performances_averages(*args, **kwargs): - W = StudyComparisonPerformancesAveragesWidget(*args, **kwargs) - W.plot() - return W - - -plot_gt_study_performances_averages.__doc__ = StudyComparisonPerformancesAveragesWidget.__doc__ - - -def plot_gt_study_performances_by_template_similarity(*args, **kwargs): - W = StudyComparisonPerformancesByTemplateSimilarity(*args, **kwargs) - W.plot() - return W - - -plot_gt_study_performances_by_template_similarity.__doc__ = StudyComparisonPerformancesByTemplateSimilarity.__doc__ diff --git a/src/spikeinterface/widgets/gtstudy.py b/src/spikeinterface/widgets/gtstudy.py index aa1a80c3d3..304cf1a44a 100644 --- a/src/spikeinterface/widgets/gtstudy.py +++ b/src/spikeinterface/widgets/gtstudy.py @@ -190,3 +190,63 @@ def plot_matplotlib(self, data_plot, **backend_kwargs): value_vars=('accuracy','precision', 'recall')) df['x'] = df.apply(lambda r: ' '.join([r[col] for col in levels]), axis=1) sns.swarmplot(data=df, x='x', y='Score', hue='Metric', dodge=True) + + + +class StudyPerformancesVsMetrics(BaseWidget): + """ + Plot performances vs a metrics (snr for instance) over case for a study. + + + Parameters + ---------- + study: GroundTruthStudy + A study object. + mode: str + Which mode in "swarm" + case_keys: list or None + A selection of cases to plot, if None, then all. + + """ + + def __init__( + self, + study, + metric_name="snr", + performance_name="accuracy", + case_keys=None, + backend=None, + **backend_kwargs, + ): + + if case_keys is None: + case_keys = list(study.cases.keys()) + + plot_data = dict( + study=study, + metric_name=metric_name, + performance_name=performance_name, + case_keys=case_keys, + ) + + BaseWidget.__init__(self, plot_data, backend=backend, **backend_kwargs) + + def plot_matplotlib(self, data_plot, **backend_kwargs): + import matplotlib.pyplot as plt + from .utils_matplotlib import make_mpl_figure + from .utils import get_some_colors + + dp = to_attr(data_plot) + self.figure, self.axes, self.ax = make_mpl_figure(**backend_kwargs) + + + study = dp.study + perfs = study.get_performance_by_unit(case_keys=dp.case_keys) + + for key in dp.case_keys: + x = study.get_metrics(key)[dp.metric_name].values + y = perfs.xs(key)[dp.performance_name].values + label = dp.study.cases[key]["label"] + self.ax.scatter(x, y, label=label) + + self.ax.legend() \ No newline at end of file diff --git a/src/spikeinterface/widgets/widget_list.py b/src/spikeinterface/widgets/widget_list.py index 4bc91e0737..3a1bdd12dc 100644 --- a/src/spikeinterface/widgets/widget_list.py +++ b/src/spikeinterface/widgets/widget_list.py @@ -20,7 +20,7 @@ from .unit_templates import UnitTemplatesWidget from .unit_waveforms_density_map import UnitWaveformDensityMapWidget from .unit_waveforms import UnitWaveformsWidget -from .gtstudy import StudyRunTimesWidget, StudyUnitCountsWidget, StudyPerformances +from .gtstudy import StudyRunTimesWidget, StudyUnitCountsWidget, StudyPerformances, StudyPerformancesVsMetrics widget_list = [ @@ -44,7 +44,8 @@ UnitWaveformsWidget, StudyRunTimesWidget, StudyUnitCountsWidget, - StudyPerformances + StudyPerformances, + StudyPerformancesVsMetrics ] @@ -95,6 +96,7 @@ plot_study_run_times = StudyRunTimesWidget plot_study_unit_counts = StudyUnitCountsWidget plot_study_performances = StudyPerformances +plot_stufy_performances_vs_metrics = StudyPerformancesVsMetrics def plot_timeseries(*args, **kwargs): From f97f76a7948f87cdf6873ce0a0b378f1120040b7 Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Tue, 12 Sep 2023 18:23:43 +0200 Subject: [PATCH 013/115] Clean --- .../comparison/groundtruthstudy.py | 340 +----------------- 1 file changed, 10 insertions(+), 330 deletions(-) diff --git a/src/spikeinterface/comparison/groundtruthstudy.py b/src/spikeinterface/comparison/groundtruthstudy.py index d936c50e5e..8d43fb5f0c 100644 --- a/src/spikeinterface/comparison/groundtruthstudy.py +++ b/src/spikeinterface/comparison/groundtruthstudy.py @@ -16,19 +16,10 @@ from .paircomparisons import compare_sorter_to_ground_truth, GroundTruthComparison -# from .studytools import ( -# setup_comparison_study, -# get_rec_names, -# get_recordings, -# iter_working_folder, -# iter_computed_names, -# iter_computed_sorting, -# collect_run_times, -# ) - -# TODO : save comparison in folders -# TODO : find a way to set level names +# TODO : save comparison in folders when COmparison object will be able to serialize +# TODO ??: make an internal optional binary copy when running several external sorter +# on the same dataset to avoid multiple save binary ? even when the recording is float32 (ks need int16) @@ -48,17 +39,16 @@ class GroundTruthStudy: * parameters of comparisons * any combination of theses - For enough flexibility cases key can be a tuple so that we can varify complexity along several - "axis" (paremeters or sorter) + For enough flexibility cases key can be a tuple so that we can varify complexity along several + "levels" or "axis" (paremeters or sorter). + + Generated dataframes will have index with several levels optionaly. - Ground truth dataset need recording+sorting. This can be from meraec file or from the internal generator + Ground truth dataset need recording+sorting. This can be from mearec file or from the internal generator :py:fun:`generate_ground_truth_recording()` This GroundTruthStudy have been refactor in version 0.100 to be more flexible than previous versions. - Folders structures are not backward compatible. - - - + Folders structures are not backward compatible at all. """ def __init__(self, study_folder): self.folder = Path(study_folder) @@ -105,8 +95,6 @@ def create(cls, study_folder, datasets={}, cases={}, levels=None): (study_folder / "sortings" / "run_logs").mkdir() (study_folder / "metrics").mkdir() - - for key, (rec, gt_sorting) in datasets.items(): assert "/" not in key assert "\\" not in key @@ -341,7 +329,7 @@ def get_metrics(self, key): recording, gt_sorting = self.datasets[dataset_key] metrics.index = gt_sorting.unit_ids return metrics - + def get_units_snr(self, key): """ """ @@ -369,8 +357,6 @@ def get_performance_by_unit(self, case_keys=None): perf = perf.reset_index() perf_by_unit.append(perf) - - perf_by_unit = pd.concat(perf_by_unit) perf_by_unit = perf_by_unit.set_index(self.levels) return perf_by_unit @@ -421,309 +407,3 @@ def get_count_units( return count_units - - - - -class OLDGroundTruthStudy: - def __init__(self, study_folder=None): - import pandas as pd - - self.study_folder = Path(study_folder) - self._is_scanned = False - self.computed_names = None - self.rec_names = None - self.sorter_names = None - - self.scan_folder() - - self.comparisons = None - self.exhaustive_gt = None - - def __repr__(self): - t = "Ground truth study\n" - t += " " + str(self.study_folder) + "\n" - t += " recordings: {} {}\n".format(len(self.rec_names), self.rec_names) - if len(self.sorter_names): - t += " sorters: {} {}\n".format(len(self.sorter_names), self.sorter_names) - - return t - - def scan_folder(self): - self.rec_names = get_rec_names(self.study_folder) - # scan computed names - self.computed_names = list(iter_computed_names(self.study_folder)) # list of pair (rec_name, sorter_name) - self.sorter_names = np.unique([e for _, e in iter_computed_names(self.study_folder)]).tolist() - self._is_scanned = True - - @classmethod - def create(cls, study_folder, gt_dict, **job_kwargs): - setup_comparison_study(study_folder, gt_dict, **job_kwargs) - return cls(study_folder) - - def run_sorters(self, sorter_list, mode_if_folder_exists="keep", remove_sorter_folders=False, **kwargs): - sorter_folders = self.study_folder / "sorter_folders" - recording_dict = get_recordings(self.study_folder) - - run_sorters( - sorter_list, - recording_dict, - sorter_folders, - with_output=False, - mode_if_folder_exists=mode_if_folder_exists, - **kwargs, - ) - - # results are copied so the heavy sorter_folders can be removed - self.copy_sortings() - - if remove_sorter_folders: - shutil.rmtree(self.study_folder / "sorter_folders") - - def _check_rec_name(self, rec_name): - if not self._is_scanned: - self.scan_folder() - if len(self.rec_names) > 1 and rec_name is None: - raise Exception("Pass 'rec_name' parameter to select which recording to use.") - elif len(self.rec_names) == 1: - rec_name = self.rec_names[0] - else: - rec_name = self.rec_names[self.rec_names.index(rec_name)] - return rec_name - - def get_ground_truth(self, rec_name=None): - rec_name = self._check_rec_name(rec_name) - sorting = load_extractor(self.study_folder / "ground_truth" / rec_name) - return sorting - - def get_recording(self, rec_name=None): - rec_name = self._check_rec_name(rec_name) - rec = load_extractor(self.study_folder / "raw_files" / rec_name) - return rec - - def get_sorting(self, sort_name, rec_name=None): - rec_name = self._check_rec_name(rec_name) - - selected_sorting = None - if sort_name in self.sorter_names: - for r_name, sorter_name, sorting in iter_computed_sorting(self.study_folder): - if sort_name == sorter_name and r_name == rec_name: - selected_sorting = sorting - return selected_sorting - - def copy_sortings(self): - sorter_folders = self.study_folder / "sorter_folders" - sorting_folders = self.study_folder / "sortings" - log_olders = self.study_folder / "sortings" / "run_log" - - log_olders.mkdir(parents=True, exist_ok=True) - - for rec_name, sorter_name, output_folder in iter_working_folder(sorter_folders): - SorterClass = sorter_dict[sorter_name] - fname = rec_name + "[#]" + sorter_name - npz_filename = sorting_folders / (fname + ".npz") - - try: - sorting = SorterClass.get_result_from_folder(output_folder) - NpzSortingExtractor.write_sorting(sorting, npz_filename) - except: - if npz_filename.is_file(): - npz_filename.unlink() - if (output_folder / "spikeinterface_log.json").is_file(): - shutil.copyfile( - output_folder / "spikeinterface_log.json", sorting_folders / "run_log" / (fname + ".json") - ) - - self.scan_folder() - - def run_comparisons(self, exhaustive_gt=False, **kwargs): - self.comparisons = {} - for rec_name, sorter_name, sorting in iter_computed_sorting(self.study_folder): - gt_sorting = self.get_ground_truth(rec_name) - sc = compare_sorter_to_ground_truth(gt_sorting, sorting, exhaustive_gt=exhaustive_gt, **kwargs) - self.comparisons[(rec_name, sorter_name)] = sc - self.exhaustive_gt = exhaustive_gt - - def aggregate_run_times(self): - return collect_run_times(self.study_folder) - - def aggregate_performance_by_unit(self): - assert self.comparisons is not None, "run_comparisons first" - - perf_by_unit = [] - for rec_name, sorter_name, sorting in iter_computed_sorting(self.study_folder): - comp = self.comparisons[(rec_name, sorter_name)] - - perf = comp.get_performance(method="by_unit", output="pandas") - perf["rec_name"] = rec_name - perf["sorter_name"] = sorter_name - perf = perf.reset_index() - perf_by_unit.append(perf) - - import pandas as pd - - perf_by_unit = pd.concat(perf_by_unit) - perf_by_unit = perf_by_unit.set_index(["rec_name", "sorter_name", "gt_unit_id"]) - - return perf_by_unit - - def aggregate_count_units(self, well_detected_score=None, redundant_score=None, overmerged_score=None): - assert self.comparisons is not None, "run_comparisons first" - - import pandas as pd - - index = pd.MultiIndex.from_tuples(self.computed_names, names=["rec_name", "sorter_name"]) - - count_units = pd.DataFrame( - index=index, - columns=["num_gt", "num_sorter", "num_well_detected", "num_redundant", "num_overmerged"], - dtype=int, - ) - - if self.exhaustive_gt: - count_units["num_false_positive"] = pd.Series(dtype=int) - count_units["num_bad"] = pd.Series(dtype=int) - - for rec_name, sorter_name, sorting in iter_computed_sorting(self.study_folder): - gt_sorting = self.get_ground_truth(rec_name) - comp = self.comparisons[(rec_name, sorter_name)] - - count_units.loc[(rec_name, sorter_name), "num_gt"] = len(gt_sorting.get_unit_ids()) - count_units.loc[(rec_name, sorter_name), "num_sorter"] = len(sorting.get_unit_ids()) - count_units.loc[(rec_name, sorter_name), "num_well_detected"] = comp.count_well_detected_units( - well_detected_score - ) - if self.exhaustive_gt: - count_units.loc[(rec_name, sorter_name), "num_overmerged"] = comp.count_overmerged_units( - overmerged_score - ) - count_units.loc[(rec_name, sorter_name), "num_redundant"] = comp.count_redundant_units(redundant_score) - count_units.loc[(rec_name, sorter_name), "num_false_positive"] = comp.count_false_positive_units( - redundant_score - ) - count_units.loc[(rec_name, sorter_name), "num_bad"] = comp.count_bad_units() - - return count_units - - def aggregate_dataframes(self, copy_into_folder=True, **karg_thresh): - dataframes = {} - dataframes["run_times"] = self.aggregate_run_times().reset_index() - perfs = self.aggregate_performance_by_unit() - - dataframes["perf_by_unit"] = perfs.reset_index() - dataframes["count_units"] = self.aggregate_count_units(**karg_thresh).reset_index() - - if copy_into_folder: - tables_folder = self.study_folder / "tables" - tables_folder.mkdir(parents=True, exist_ok=True) - - for name, df in dataframes.items(): - df.to_csv(str(tables_folder / (name + ".csv")), sep="\t", index=False) - - return dataframes - - def get_waveform_extractor(self, rec_name, sorter_name=None): - rec = self.get_recording(rec_name) - - if sorter_name is None: - name = "GroundTruth" - sorting = self.get_ground_truth(rec_name) - else: - assert sorter_name in self.sorter_names - name = sorter_name - sorting = self.get_sorting(sorter_name, rec_name) - - waveform_folder = self.study_folder / "waveforms" / f"waveforms_{name}_{rec_name}" - - if waveform_folder.is_dir(): - we = WaveformExtractor.load(waveform_folder) - else: - we = WaveformExtractor.create(rec, sorting, waveform_folder) - return we - - def compute_waveforms( - self, - rec_name, - sorter_name=None, - ms_before=3.0, - ms_after=4.0, - max_spikes_per_unit=500, - n_jobs=-1, - total_memory="1G", - ): - we = self.get_waveform_extractor(rec_name, sorter_name) - we.set_params(ms_before=ms_before, ms_after=ms_after, max_spikes_per_unit=max_spikes_per_unit) - we.run_extract_waveforms(n_jobs=n_jobs, total_memory=total_memory) - - def get_templates(self, rec_name, sorter_name=None, mode="median"): - """ - Get template for a given recording. - - If sorter_name=None then template are from the ground truth. - - """ - we = self.get_waveform_extractor(rec_name, sorter_name=sorter_name) - templates = we.get_all_templates(mode=mode) - return templates - - def compute_metrics( - self, - rec_name, - metric_names=["snr"], - ms_before=3.0, - ms_after=4.0, - max_spikes_per_unit=500, - n_jobs=-1, - total_memory="1G", - ): - we = self.get_waveform_extractor(rec_name) - we.set_params(ms_before=ms_before, ms_after=ms_after, max_spikes_per_unit=max_spikes_per_unit) - we.run_extract_waveforms(n_jobs=n_jobs, total_memory=total_memory) - - # metrics - metrics = compute_quality_metrics(we, metric_names=metric_names) - folder = self.study_folder / "metrics" - folder.mkdir(exist_ok=True) - filename = folder / f"metrics _{rec_name}.txt" - metrics.to_csv(filename, sep="\t", index=True) - - return metrics - - def get_metrics(self, rec_name=None, **metric_kwargs): - """ - Load or compute units metrics for a given recording. - """ - rec_name = self._check_rec_name(rec_name) - metrics_folder = self.study_folder / "metrics" - metrics_folder.mkdir(parents=True, exist_ok=True) - - filename = self.study_folder / "metrics" / f"metrics _{rec_name}.txt" - import pandas as pd - - if filename.is_file(): - metrics = pd.read_csv(filename, sep="\t", index_col=0) - gt_sorting = self.get_ground_truth(rec_name) - metrics.index = gt_sorting.unit_ids - else: - metrics = self.compute_metrics(rec_name, **metric_kwargs) - - metrics.index.name = "unit_id" - # add rec name columns - metrics["rec_name"] = rec_name - - return metrics - - def get_units_snr(self, rec_name=None, **metric_kwargs): - """ """ - metric = self.get_metrics(rec_name=rec_name, **metric_kwargs) - return metric["snr"] - - def concat_all_snr(self): - metrics = [] - for rec_name in self.rec_names: - df = self.get_metrics(rec_name) - df = df.reset_index() - metrics.append(df) - metrics = pd.concat(metrics) - metrics = metrics.set_index(["rec_name", "unit_id"]) - return metrics["snr"] From ba2e961bd9b26fd7acc226183b19bc5b3a85401b Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Wed, 13 Sep 2023 14:58:14 +0200 Subject: [PATCH 014/115] small fix --- src/spikeinterface/comparison/groundtruthstudy.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/src/spikeinterface/comparison/groundtruthstudy.py b/src/spikeinterface/comparison/groundtruthstudy.py index 8d43fb5f0c..9f0039b9cb 100644 --- a/src/spikeinterface/comparison/groundtruthstudy.py +++ b/src/spikeinterface/comparison/groundtruthstudy.py @@ -228,7 +228,12 @@ def copy_sortings(self, case_keys=None, force=True): sorter_folder = self.folder / "sorters" / self.key_to_str(key) log_file = self.folder / "sortings" / "run_logs" / f"{self.key_to_str(key)}.json" - sorting = read_sorter_folder(sorter_folder, raise_error=False) + + if (sorter_folder / "spikeinterface_log.json").exists(): + sorting = read_sorter_folder(sorter_folder, raise_error=False) + else: + sorting = None + if sorting is not None: if sorting_folder.exists(): if force: From 9b5b28b9b6cf0b7d7e313d12cf2015253087f032 Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Wed, 13 Sep 2023 15:03:57 +0200 Subject: [PATCH 015/115] small fix --- src/spikeinterface/widgets/gtstudy.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/spikeinterface/widgets/gtstudy.py b/src/spikeinterface/widgets/gtstudy.py index 304cf1a44a..bc2c1246b7 100644 --- a/src/spikeinterface/widgets/gtstudy.py +++ b/src/spikeinterface/widgets/gtstudy.py @@ -35,7 +35,7 @@ def __init__( plot_data = dict( study=study, - run_times=study.get_run_times(), + run_times=study.get_run_times(case_keys), case_keys=case_keys, ) From 0d87ea07eab0baa02ee34915d96be8a6c623b222 Mon Sep 17 00:00:00 2001 From: Alessio Buccino Date: Thu, 14 Sep 2023 10:11:38 +0200 Subject: [PATCH 016/115] Update doc/modules/qualitymetrics/amplitude_spread.rst Co-authored-by: Zach McKenzie <92116279+zm711@users.noreply.github.com> --- doc/modules/qualitymetrics/amplitude_spread.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/modules/qualitymetrics/amplitude_spread.rst b/doc/modules/qualitymetrics/amplitude_spread.rst index 0ae0761265..cc79ebbe1d 100644 --- a/doc/modules/qualitymetrics/amplitude_spread.rst +++ b/doc/modules/qualitymetrics/amplitude_spread.rst @@ -6,9 +6,9 @@ Calculation ----------- The amplitude spread is a measure of the amplitude variability. -It is computed the ratio between the standard deviation and the amplitude mean (aka coefficient of variation). +It is computed as the ratio between the standard deviation and the amplitude mean (aka the coefficient of variation). To obtain a better estimate of this measure, it is first computed separately for several bins of a prefixed number of spikes -(e.g 100) and then the median of these values is taken. +(e.g. 100) and then the median of these values is taken. The computation requires either spike amplitudes (see :py:func:`~spikeinterface.postprocessing.compute_spike_amplitudes()`) or amplitude scalings (see :py:func:`~spikeinterface.postprocessing.compute_amplitude_scalings()`) to be pre-computed. From 2513a0e14cb5144c1747aa21cda9670c39449b80 Mon Sep 17 00:00:00 2001 From: Alessio Buccino Date: Thu, 14 Sep 2023 10:11:44 +0200 Subject: [PATCH 017/115] Update doc/modules/qualitymetrics/firing_range.rst Co-authored-by: Zach McKenzie <92116279+zm711@users.noreply.github.com> --- doc/modules/qualitymetrics/firing_range.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/modules/qualitymetrics/firing_range.rst b/doc/modules/qualitymetrics/firing_range.rst index 0d17eedc13..1b82c7540f 100644 --- a/doc/modules/qualitymetrics/firing_range.rst +++ b/doc/modules/qualitymetrics/firing_range.rst @@ -13,7 +13,7 @@ taking the difference between the 95-th and 5th percentiles firing rates compute Expectation and use ------------------- -Very high levels of firing ranges, outside of a physiolocigal range, might indicate noise contamination. +Very high levels of firing ranges, outside of a physiological range, might indicate noise contamination. Example code From 78959e349b3783e77a3eca2a18967140909ba619 Mon Sep 17 00:00:00 2001 From: Alessio Buccino Date: Thu, 14 Sep 2023 10:11:52 +0200 Subject: [PATCH 018/115] Update doc/modules/qualitymetrics/amplitude_spread.rst Co-authored-by: Zach McKenzie <92116279+zm711@users.noreply.github.com> --- doc/modules/qualitymetrics/amplitude_spread.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/modules/qualitymetrics/amplitude_spread.rst b/doc/modules/qualitymetrics/amplitude_spread.rst index cc79ebbe1d..bdd23892c5 100644 --- a/doc/modules/qualitymetrics/amplitude_spread.rst +++ b/doc/modules/qualitymetrics/amplitude_spread.rst @@ -17,7 +17,7 @@ or amplitude scalings (see :py:func:`~spikeinterface.postprocessing.compute_ampl Expectation and use ------------------- -Very high levels of amplitude_spread ranges, outside of a physiolocigal range, might indicate noise contamination. +Very high levels of amplitude_spread ranges, outside of a physiological range, might indicate noise contamination. Example code From a311455f34bb4fbe085b9191cd61b91e6efbb14a Mon Sep 17 00:00:00 2001 From: Alessio Buccino Date: Thu, 14 Sep 2023 10:12:13 +0200 Subject: [PATCH 019/115] Update doc/modules/qualitymetrics/firing_range.rst Co-authored-by: Zach McKenzie <92116279+zm711@users.noreply.github.com> --- doc/modules/qualitymetrics/firing_range.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/modules/qualitymetrics/firing_range.rst b/doc/modules/qualitymetrics/firing_range.rst index 1b82c7540f..3fd3d53573 100644 --- a/doc/modules/qualitymetrics/firing_range.rst +++ b/doc/modules/qualitymetrics/firing_range.rst @@ -6,7 +6,7 @@ Calculation ----------- The firing range indicates the dispersion of the firing rate of a unit across the recording. It is computed by -taking the difference between the 95-th and 5th percentiles firing rates computed over short time bins (e.g. 10 s). +taking the difference between the 95th percentile's firing rate and the 5th percentile's firing rate computed over short time bins (e.g. 10 s). From dcf2935acffb6d0634ba210fa6a590597173eabb Mon Sep 17 00:00:00 2001 From: Alessio Buccino Date: Thu, 14 Sep 2023 10:30:45 +0200 Subject: [PATCH 020/115] quantile -> percentile --- src/spikeinterface/qualitymetrics/misc_metrics.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/src/spikeinterface/qualitymetrics/misc_metrics.py b/src/spikeinterface/qualitymetrics/misc_metrics.py index 6c237ee720..541d201c5e 100644 --- a/src/spikeinterface/qualitymetrics/misc_metrics.py +++ b/src/spikeinterface/qualitymetrics/misc_metrics.py @@ -566,8 +566,8 @@ def compute_synchrony_metrics(waveform_extractor, synchrony_sizes=(2, 4, 8), **k _default_params["synchrony"] = dict(synchrony_sizes=(0, 2, 4)) -def compute_firing_ranges(waveform_extractor, bin_size_s=5, quantiles=(0.05, 0.95), unit_ids=None): - """Calculate firing range, the range between the 5th and 95th quantiles of the firing rates distribution +def compute_firing_ranges(waveform_extractor, bin_size_s=5, percentiles=(0.05, 0.95), unit_ids=None): + """Calculate firing range, the range between the 5th and 95th percentiles of the firing rates distribution computed in non-overlapping time bins. Parameters @@ -576,8 +576,8 @@ def compute_firing_ranges(waveform_extractor, bin_size_s=5, quantiles=(0.05, 0.9 The waveform extractor object. bin_size_s : float, default: 5 The size of the bin in seconds. - quantiles : tuple, default: (0.05, 0.95) - The quantiles to compute. + percentiles : tuple, default: (0.05, 0.95) + The percentiles to compute. unit_ids : list or None List of unit ids to compute the firing range. If None, all units are used. @@ -611,14 +611,14 @@ def compute_firing_ranges(waveform_extractor, bin_size_s=5, quantiles=(0.05, 0.9 # finally we compute the percentiles firing_ranges = {} for unit_id in unit_ids: - firing_ranges[unit_id] = np.percentile(firing_rate_histograms[unit_id], quantiles[1]) - np.percentile( - firing_rate_histograms[unit_id], quantiles[0] + firing_ranges[unit_id] = np.percentile(firing_rate_histograms[unit_id], percentiles[1]) - np.percentile( + firing_rate_histograms[unit_id], percentiles[0] ) return firing_ranges -_default_params["firing_range"] = dict(bin_size_s=5, quantiles=(0.05, 0.95)) +_default_params["firing_range"] = dict(bin_size_s=5, percentiles=(0.05, 0.95)) def compute_amplitude_spreads( From 34a8df2e4db5c412b8a699057b05d44d699a8c40 Mon Sep 17 00:00:00 2001 From: Alessio Buccino Date: Thu, 14 Sep 2023 14:10:28 +0200 Subject: [PATCH 021/115] Modify amplitude_spread implementations and docs --- ...{amplitude_spread.rst => amplitude_cv.rst} | 27 ++++-- doc/modules/qualitymetrics/amplitudes.png | Bin 0 -> 214334 bytes .../qualitymetrics/misc_metrics.py | 90 +++++++++++++----- .../qualitymetrics/quality_metric_list.py | 4 +- .../tests/test_metrics_functions.py | 21 ++-- 5 files changed, 98 insertions(+), 44 deletions(-) rename doc/modules/qualitymetrics/{amplitude_spread.rst => amplitude_cv.rst} (50%) create mode 100644 doc/modules/qualitymetrics/amplitudes.png diff --git a/doc/modules/qualitymetrics/amplitude_spread.rst b/doc/modules/qualitymetrics/amplitude_cv.rst similarity index 50% rename from doc/modules/qualitymetrics/amplitude_spread.rst rename to doc/modules/qualitymetrics/amplitude_cv.rst index bdd23892c5..981813ef09 100644 --- a/doc/modules/qualitymetrics/amplitude_spread.rst +++ b/doc/modules/qualitymetrics/amplitude_cv.rst @@ -1,14 +1,15 @@ -Amplitude spread (:code:`amplitude_spread`) -=========================================== +Amplitude CV (:code:`amplitude_cv_median`, :code:`amplitude_cv_range`) +====================================================================== Calculation ----------- -The amplitude spread is a measure of the amplitude variability. -It is computed as the ratio between the standard deviation and the amplitude mean (aka the coefficient of variation). -To obtain a better estimate of this measure, it is first computed separately for several bins of a prefixed number of spikes -(e.g. 100) and then the median of these values is taken. +The amplitude CV (coefficient of variation) is a measure of the amplitude variability. +It is computed as the ratio between the standard deviation and the amplitude mean. +To obtain a better estimate of this measure, it is first computed separately for several temporal bins. +Out of these values, the median and the range (percentile distance, by default between the +5th and 95th percentiles) are computed. The computation requires either spike amplitudes (see :py:func:`~spikeinterface.postprocessing.compute_spike_amplitudes()`) or amplitude scalings (see :py:func:`~spikeinterface.postprocessing.compute_amplitude_scalings()`) to be pre-computed. @@ -17,7 +18,13 @@ or amplitude scalings (see :py:func:`~spikeinterface.postprocessing.compute_ampl Expectation and use ------------------- -Very high levels of amplitude_spread ranges, outside of a physiological range, might indicate noise contamination. +The amplitude CV median is expected to be relatively low for well-isolated units, indicating a "stereotypical" spike shape. + +The amplitude CV range can be high in the presence of noise contamination, due to amplitude outliers like in +the example below. + +.. image:: amplitudes.png + :width: 600 Example code @@ -30,9 +37,9 @@ Example code # Make recording, sorting and wvf_extractor object for your data. # It is required to run `compute_spike_amplitudes(wvf_extractor)` or # `compute_amplitude_scalings(wvf_extractor)` (if missing, values will be NaN) - amplitude_spread = qm.compute_firing_ranges(wvf_extractor, amplitude_extension='spike_amplitudes') - # amplitude_spread is a dict containing the units' IDs as keys, - # and their amplitude_spread (in units of standard deviation). + amplitude_cv_median, amplitude_cv_range = qm.compute_amplitude_cv_metrics(wvf_extractor) + # amplitude_cv_median and amplitude_cv_range are dicts containing the unit ids as keys, + # and their amplitude_cv metrics as values. diff --git a/doc/modules/qualitymetrics/amplitudes.png b/doc/modules/qualitymetrics/amplitudes.png new file mode 100644 index 0000000000000000000000000000000000000000..0ee4dd1edacb27e459b5601eb313aec675fbf890 GIT binary patch literal 214334 zcmdSBWmMH&`!2c=0VSoS1qGx9qy+>PE!_gg}r)A&>{kkC4G{ z*apQt!9Ncjge8?9fsf~-4?*B(B1chmMVnT_=`Wiu;qq?ye#*4NJ*j2xe_y<%f$e8$Sg%g)35>UD(9n|ui5 z8AK8)r0kl!JLjUUG;xh`D2EfkD5UV#h`}#C2+fWF-)^jV+tg~#scM?HzJze_yC5PT=z4fz`ckezBadV1(CI{#$G! zOq(>?zYk#(LV5mNWjgbH>9f`{d0$iXQ22N}S5;LFsGWD0-h34l6gEBI?14>2#^m@E z3KiQKNa34oa4H-!c!EuY|3Aj{oxH0=kd#&$#D{xw>hY6s1G|iWrvtaj{mc?C=35?P z)d$r7F?ei3&g0{*2U8|T^Ob|BWFc)wXKsH#+W+fxNHu@WbUB#s+#Jqg^}2G*Il~~~ zOc#$Ne}s;CZ(ap~Y7vNM!WggI_U!2F*pQ!&99=o&1i*wD$}sORr> z1mgyUg>_sWZ-rn5MFl!_> zH5HSVmbP=H#%hL&nHjTKvs$r=hlhvq`EwBzgMVTXdn+KJhNY^kOx~R_?ze%NS^fwO zPB31igS~y#92s_$?Be<*`|%od*6&d&euDj2nY!D3=XPQ8GZPW?sa_@(#_|*JLK#Fh4z2V z$}%keq*SjRQBlEmmIh)!IVB}3AwlC{gAgB|lA0Qcm6g>!EF?%mok^<(DkSt#wX`@d z@2!FYcK5dr%D;@*NSj~{ON~c>1=B*f!nu<--$jE52 z$&LM5jw(}gcdn7p+S(e#L*P&b2+bTmmwo+-5*_s>*K+U+x~AG&kK~|8wKuXoJ-*x} zNCVdQvOn^AzKt=^5#!-~_VatNy1t%iF;N1)VgMFJgn-x8kHJB#@$vDCvl$x%2nm;U zN0COQyrQCCAcgl+L}c`#9g=7N4XMoBHT@6SQd-*fY-|qpi@TdI-$Fl_Z^z)l92rKE zY+8c37Ce>mi>2e3xqa?DSDrurClp6Mxfct_{heB}8I~w1>8-p|Qnn|B)xC{{P75tfOcCYzgSCw`U|FAtCY#3Lg;=k$+4~5Z2e%Pga^>Nl8mDZEXdE z*;S7_`giSdgp9L`uB@yWSX)Pc)MB{amvFID+6sYOpY4gbx^l-d>x6v$it~b+dW%v0 zpLxyf8sVigx3UT!8Bv71)zBa+DlS&g&k!$i*!ejAwbT~&YlEA^NNZ(nmx|(2I=VO{Zdj=mXE3} zCWx`=u@#OD4Ga+Sl?#p**6ewNMMS1RN|7wDs)A~0Xf&rCtgjMBwLXVla$*REPhlnRP3m>AShMO+8=kpd$6W$?d(2A zMn;lnQBmQjid=VX|M~OB_Ugo}FP>GTTLByUvG@xdj&1mT8(j_}R#!h*&eUMw7pehh`bK`+Ta0Cav z2Ji8biOEP%5~>_NE~x;MKkVUu;l)@HquVjljn`e%@==Y~ zwZ4IML_b3s{wsPgqii%yUa5cGEDvgCEBzk!#sKOQ41igP82(2s{`W!p1E5rG z>FLj6&W3cGk%9u?_rm|@UX~iQ1knMQAG4bIwWqn4k5~;4m;Ek<14Y3LihV|7`U?vw zR4xyfkhOT7s${go^WF2Vg#Ry4=;@rWbWBXlq}#>p z%D+QCbG-+gPq#%xZ-vy^*~7w!_l)@O|D!Uu4r8X{%@fCpsVPa}{n<7k>udVgh{C_* z)w4EaZ<0vIre_rvE|*tkJ?AV|r_}%Fp3t#m_)F;UIYIj(D89tUBAbaKdCtvIMoE0e z4E^UaiI20SLmv17FhI-2^`9a8>;9R>J)745A}^{D+W+m0!EZ;Cqrf4`qSNqsr8`oD zk>EKaBRaf7_xE?E@_Wc@YNE=?$-TKbY;OpO`RlgtZz~Q91AmFpog>i9l@nd8J$JyCAp4*O!o`E4M zDG5zYO%3AP+A4^Hi|a(d_*d*C#P%=Tr+<5bWhU@>Oa z)jiF~$gs7y_XE+f%mXmNOM3bd$@IU5{e$~*bhOHJg}p;mLNua@xOh7X{P+ESU|E9m zlaon720%nYLYl0xA|YVbR^PE;V`r}hm`I`W1wklCxcNs5Mr=ekH#YztCHNNh_HvVX zAaOEj>f1&>)Ya9!8F;d#;qJ^St6LYe>~a1C=Ltkm4l6^%e7xg5>vWA(;ptAbY{>NVbf!ok z=9AD)TU%S-m>7I8LUsr9{GARUf$N)?e8&UB!~ZNw!uvbx|sSx}ff7gPozS%dm-W18O@IKBQ7x2xvsJCIVUHh8q;u`%rDL$w|*B?59|5@u#*pfsFp zCf)kbQ$rCJSLPbaz1uuLQe`&0HqSMcmw(c7v5eO9fh~-b2Zt)^QF{PJJGhjBu5JWq zG}`<7W&Tr&IT*||G+yRAfA9C&-hPRbK<`3G*hj=A3vW<@UN^*lNgr{p%?siv3y%k{ z2-%#fq;)^r%^w@jl_6o&tjbdmf+?#j`mvnvwzU$nWX9-7zk7tynTK$@<4p-7HHu)Q zvhrwaU(a^y%BilgZm~R2JnMT}n(o=!_UgkO0w!ggtd`VN_bCc&>@D5haFPq-g9C|o z(w|yuxd`4RLn58`Knlo{e6?C9o%VpwrE6*Qd(8B%l~uwMj`!hGA;iD3Sek;Kn+&`p z_qpFlyKry+9ZyU^^H)L}BaM;FdD^fkIa^fov@@>tHeht7CL=WN`@lJElth{PnW5`i zGjtZE{Kz}M@{`9XJQnm1IMgydt^b2jS$a+j^q zkOcO_^LmGurK*Hp{ku~S#Gp>BSJ|fC1{sRNCoOx|_aFZ1a2@wAE=_Yj^2k?jPO*qb zE?>GioK_6N0MvoDdUF#wAk?U;J|!O^+?%c%{@oH3BvE@tE?6QFG#{6e7K!*>_9!tX z4^ud-@r1fcjYWI@5;r0_SXjY~4*7iZBgnF-WCXCEo6(t%^Ygc7loZiPAAI)s)Rd!wi)bCua*sr;`IFqSof{=m2 zOZi7$P*9Kj{M8kS=h^Slx|kUMo1enFe0C2)=ON2O2|PR3`4rARGqZ4@2=v2~|MoWHB{~d_FZ~|7EkD+7NzEIhAqb5++wf-Aee15G ztr>Fh4X>`gF1`5Er=cZzrnCJyG#3{a&@?>;2r8j$*6+cKxd#6Ma`Lju8UkKYY5?OV zn?1M)Saca3Uqa38W<`ZxT_t9oYs4pIRR*FgfBkA;W{nBbY=%fCu_8O$7d3fbW?kK~ zS6qa=q*UhX9aLGZQy&w#Nf;PTPL6ST(LY81UV|@#s>}y1`}<#%lj?=K`r~ogptBjUh-$sQlN=>kf#aT2@JQfjs1cRA64=3V)1vLZy-PB z#|bD(G(Jg+1U@1q!sb8`aX(c}pquVqpr{#FeIN=Q^VDfc4bos~^6jX44e8#3)9xF`r*C1FS8MFXi`AWSsNgbSc^{eMj9P*!Qht{eiT_vu62$F- zsdrza_9Jhz1ym-b7toIdq6(^()1Ofj-ho-qt5@gSc+3crELn2$qP|HW>nG%;(Txc- zG6IFllbcwkkk2n6AJ$j(Vb=#}PqAU$^DEu(>%yV+`a!E2aT>h*MODzSD!pdrL0ZGd zr{CLHS$-H4V9tw6QaBQ;CGV@Hp>b$BAqyC+Jf8B}>Q0YAfzb0*FIA9c6PsUhY@9G~ zeSH?)>q^p$%}4QF;L?qU6YL_(-dU<(}!(eLd3-*70pNp zNYPRF_z*JTFv^ObSIWtfgW5745+yJ4j*L9FqQ{5?1?A0HVcc8zdtno`(%4-$0c4p* z;T1IKeDAk;;aX)ggFGr8_SW{T?@$J|Z@z&+S#?DhKyRz~1nEBd&(>w)pE;_7rx<%X z*q0cLIUvjJx{a5;hVPoECih0bIBdv3foxVs+jjXk%VGcgLKMJPJ~lRt@!TKLzQVgP zSf=C)Q3Md{h^c8@YMZYlJ}Dqx8IJkh2?q*+%Z5KA2xaAeLps{t?vLqD{PM>P$w84i zGn}*nBO(?p8S<44uo(Q<*pohgDDYxp-|NdiS&yJVVUU)F!%sGz-Ds!pRYmA}w>5;c zhVS()jQocYQS^mPUqY#(z$<}|0*83k!25})_PXe}z(rq3Um7=dvAS5PWDf&l2Vv`7 z2zzn?0_g$&-@hql50qmAF9}*{N zcLt(uEIanI#PK6tK>GtyP={x%5quulhy$tQ43`U3Lb`qKj#y$B;X%IdXmH`+ceKXqK8@aLH=3j* zFB`8_44PL}$hQH1p(xv$gN4!&0(=i7x~;7TP!SC5w6B@po-`k9JP$+)5>tN{!m03> zj*i|6fU~;PSem!5q0roLuL|qgx|jd}U1FS_(pcbEdQ2;za(lr@Ne%k}g2s$kD`|Nl zLUNdf{$2mKb!V&T9~12&aIF0ijbA^$a_dGVV7e%Ck7N;;2`k z3CIv(=cW-&$~$#fPY3mOxg7LCcWT9{pbilY@H%<@F+c0www$_T7q~bQ&o=&$4#-zP zmmYQeCc(SEYgH*+G_@&po$dUMyR*^9>Ioogwd?*Ttn!r~2p`tgC>dC=_%KO-I8b#5TK(9GYL1ULH`dbF#123u1PXbT>_J3}~Vr zkr4nkaC>#VOQ1d#Q%tO&s*{@bIXf-NKt#k-ep?96`CUL=_J>Jzt+qlh4}?t6_NFwn zywf4UR&aoIwIKx7TDKJ!G0yh>79nI|QP)&|L>8+WA|-|GBwF3IUhZH5V&WeB9F-WA zF>-g16Z!!9oDsA;xyB!!m8L^^4Kn$wmCKEotHX^{SJIu= z-*6@yEC_WcEAWjJ=^)Pla=Ixy!lK&TKSztPt}}aq_9@^jO#o` zt?XDHCUx+>{ORvpnRdVpo>s6pq2GFkYh+o3#Ky%L9x!zn6^mv^ON+?gwr_obJZgeppM+HTQz`8_d&{sUXn)g@t_-A=5p0`}Hm zS81!Nb~57Ifw@NY7PDwugeXnvT$#?%(eLeZok(Cd9$`Ih<0`d$!pr!{&E@*ce1)u@ ziV{_s`dymaxejFe`jD&TM#_pJH6|(al)|e>BIv=}j(UfShUreysFLQhOTq%LeZJh$ zjh5RJrFho&50f>k{get-;or`CU(vx+0vR`+neI?b7E(wMI=pZj_N5Ei zmQ{+N(9*)Iy2ZwujN{uI@0&XC01%MdE*85f>;#x)H?9l9Gv-UnEl%DI2L(NvR85j< zu~`uK92nRRgW(I_->`rE`gL(_O=ZJ*prfmxYK;JI5k%IT>1qqhcqomy&UgB_-`Gj< zj}9=&9s;P`%Bla%LI9?d!I9W`57uryM-QV{PJ8s5B45hPPrtD0hnmcquRhy;kqeI~HWPRnAc%POERjYv!OyMp8}=(H){ywN_Y ztE$3Ng&_<7he9Zrs39lgNB!aRiN2?>1Q+^tP$TmFoH#|h#V$8`JRjDbQttc3rpxM7 zDAhoU$cAJz_RXZ_!~Xc-bRzZ`>w}D3vs-bBto9xSSJO7(x=8P*4tj2)n_mu>aiy`?Kf1BN+V%AI_pN~{jA{Xulx^;qYkP$cf z0}rx$A89o`y!7|yT^Ouu`=mmEZ?m%K8|Z#Ia@n%o{^gS0ux~49RWtWvR#p~ejMQ+g zjUJp90!*x}i%U0LAO(Hd?Rn%YV zcT2D*02GGgmw59KNr@j3h$x_DRB>3t{B?URge}J2!{i{E(y#?(axcMWMSr0z|i#f08o1uKhn6Hjxg z#w8_%8g@q*{8{bQY@Q!`kvsPju(!5*>oOO0^-F+P9U6|I?)j^JZL&Ef{yKL!HH1NTUP|u5JbcldQHbcEgw6+wJm~>|Se*fWOwPGR*+QQVE^=-ZaN- zvj<@uvyQNk5W@2E@+-5E7qg8na!1LTnN*W1g>;TUme8!7g3V#9;{@AhX=4T{k7jO^ zXsa|ffAuhukZ8^yA;+cA+qmm>B?}Hyp1HGeMEBcPs8pqhNag$PEN_3cEE>lC6(*iy zX;tp|=hH(gBMqWf6tS+^S<P%3^eY$RzINX$Q5s5qEaxbYFWJ``^cSQ<8GS4gsN1tMl}MMZ_XV5`&4I3UVo zuK2k`oB5o6@^konpX`8U=j7z~Ia|vPXxA6i#2)l+n!fWgErVc3m5+9^8X6l1pmNOQ zMTUf+I_^$J-G1QL2o`mk&m0g0ytJwb3( zO}4wQ51pKxe7aS4hEO(%Q|9RY+$5yNW`UfUnOQvc@J zIqRI+iw8F9tE>v0J%EG+2$7eJbPDD+qYWy}XL4P1Z`=E@Sg_7D?@K_JcR1*e1_%o3 zpMIa58YX`gH*Q<#U%0v-sXtGl15!wdPGgz#o@R!K8NpYZg|6~S6H2DiA0o}}1$eVDI+L6VBz3{Td zMdOqgrV01< z3)>E9wDbKlMtg7k(@sOcvey)AF3IYq{yH855=}hs8+fm#mhZLwY8+<%OYCGP zkP{PAs@4S18HTg^{jCKpf2KBEH)d?`Bem4k7k_k9WT?e4^9Gx}J%4-Q=1#@Tqml2m zyC}pvC4jv(-U2xC9e+Z~=w66&iXNc;KZ-`5YVBvfqwf?!BUrYt_D8vGZi2U``ep_h z6JA_SAQB8h;}&;gPzoL0t8F5ETJBH*1bY}3HYVBM^TU_NAxf?IZah0f^CRlNsqX`@`(~NF!K!_~lDxu~ypkXVkYsl4YO?C7u4m`hw#iCQ9<$VqY2Lzq&!W>Bu>?d9 z(E4T)yO?mj@NxFx6Q#!Tex@T}$UCpzpuFWaz3PZARAB(v+3>lb+FCBs`=pO1K->h* zkmuao>V=jzSbBOF9v*Oj)5Poeqv>en=&m5fbmkWV6C)Eg=@32(le+Iw2A0`uSJ|R| zU6tMEm>lW{Uc5^Q;wA=npO5H$OwD;2nHXQ$^*m7rY*bD-#!nI9!?r}&;qVtnmeRZ* zt1t_BjXlCNuoOgrF#76+GA(Ox=lgf0ym;fWO!^j9;cfgLi+gKw&$G3-QY_ORE6 ziiy2n{P9sZaAcKfx(?ec;TMBs*NYeWmtjQia|eK%fYW+XRxW>zmk#zY0Nc>K-UD%f#iFBX9vTtZ9j%m`*EdEoTZsZ#j-j7FAKdBc zx^72_AC8@fj}3(26;O-^PIrBrIlm94z#><6zboc*c`{PW(4?b-6jEo#10G9e!Ai}% zQA>F&``Ra?lq@7P)b4a!zYOQ;nAd5|EY8agSK!|@r{EMguLtiS*0BA`9bGK;BI<1&M(s*-r zxJkoTDMSKv-K9>Zv_)fMxs5Cj+I{aw7i9>R)-CYZ+B!B-dw^|Um@!EgG*w_DLzLX-@ z-``B9y%Y>79~MPdo6>i@YAgy0_JGrO7E3!@9|dJol}GpUFYYHvxc)=Nsj8URwRJp} z+0PsY9f)kPR^baVFW6X@WkI*X`GA0w5rc1mhP43f>&*0tpzvvXKyFFogV*O~(?d>a zCXWEQba6AmUuVC9%-hhg9BgNZoxT+D*asuj+VskjM0)g8L)U<^<>FK=IXGN&Mz#2r zk^0ksv#sK|rxMNkDPFd#eMB*51LOsQHSmV1)cYC($O)Uf`w{gk-R<4YOO->QaNZnl z=!M*3qG5LRDLgkY7G7gP=jrML)EuB>yHgFZ>u6&#JvIi^xi%h0H#ps@J88 zGTn_kZ6?bITHTHvq-nmqJT&N=It)>q0h#LNY`zhs3Rw1s6R#V)oS9yL@3AXa0*$ps zWhXd9DSWn!b_Lv(^Y?C57+8INKExCK#>Vv1Kp2#@nF(OlO~sf| z)ga=unV-!j&}@iJ41sqVU*`b0_3^pmalhWl%`N3j)RGw%iTciJ2cyra5kR=(KHK`P zX)`(rBr^0u6Pe-f3vS{$sTT~P1O{t{k>=G0F0C^WaTcDj4&9OMD1~Yk)%;5GYBIwh zDnT>z*vzedd0~c|dvRG#kh4K-@zy`q&(DvzI_0XsMrJtIofM{`1Tf%t9vEJ6IHh^W zcEjLkt>5zdhXQOmIkDP6=YGws$muv7jhs)XIdeMoMKn|y=d#s$`tuT{j7^sf=zX0J z=I2%kG~w3+gwLbovmnJ*BNMCeP(B}FEtEjyz2nZ}w7fAXZ{u;srkV>$rg@=J!%fz8 zP*g$bER>qS1iGI|%WX~Dgsig9X)%&{-rggBc2Nw=iKXZCR^kK6LdLQYvT9}gyr-;uq8|n+FsuY zCe4|+UfCBZ0xBbs>7-rHC!s@s`cgUyya^y7GR0)JsfQlq2Cjv;IXWd z90tl89W~G`rb;{_DY`4nM#oy#56791HN133Pp5J$CX{h9-@-O#wM)~&6m&LXuQ6|~ zOTiF8P^F~~wV=>Xr5r74>|N7N7iE%RD+uiotV$!Jc4tz_zp6iJGj{_euwwQzxf4ij zgusX~pt6EdO_uSB&RqX-e&Tjk&Nc^KVtPIxGo&g7^3P9>M|-QT0Zvr@`U~>S7$Uz{ z@;(0^or5e%CL#C0&B3#DyCWyB6ww&{U;z-9K)M}Ryyl3Oc;&tbfgn28^t)noYe>0^ zRZ50DP)XBqm5?YdbT?UFO;4{=&*7)$7umVZEpW3kkbFE+QIQDSD73%?-h%>zHM{U` z%YZWySKaPu3gsD|l?CtGd>-h4)I*l44(a>TzBMl&B>^Bl>Co$RF7%g)!DA%(S7 z+W^sMDXhgvLt&euA8}8^tao|qg z@L|epqQRE))L$&kUE5GcbTMWR)PeT)VZ-%zO}G7==zF8$Rw-jZWGSq=^}gXI*DQNt z73fd7dPIBJf6z)oW!aoaOn~eZqXE|6i?^@)Ck|T*2!jDJyx z)FAP<2sz_Zo(%AkcJUuGvQ<)`E9Bhv))?ji!db-sUBJn%wpk*A5JBPMd}+zz$i~Y zl=?L_B1(=wysGb=a?c0w3hj43cWv;Vf|my&kJkVW`O|-d;&DJhM*^4X6YA?jR?BKF zP&}`<3u#o+br(R}LHTFIDBH*g0b*HHRA$y`fAZLrx3+NeYua~O%oKsFcz)P03?C@b zh~LMH5g-(|C%E%BS0p*d_vLw&>aX3tPE)WpIPZSWQc&n&cnOh@ah!H#RV%47dI*@IvhZ8_l5I1lhn@OP1Bqf?T6UH9_J6_t9$ywwakK= z?GWF>>YY0K65@I6v%9QPTRe%tB2^<^0dxK^H}a7h?|s*QTMc4!T-2O=a&f1}CtII= zSag-QedaaFNpBzQ+&}k`>CDM_dbaw$gPN8hMTxJWfzveE^8)xJefFFH0gwd-xVEC= zusb-~)pJ*U(zNc-I^?Fg+3p3sRQ}+#em`-Q6|8JJIAEkihQ#w|hy2@YBMLSoDzBg_ zA~LBlGxCYlLz8l80GtN098e*|MUB2>(HMeQ|8ucy9B`z=vRyjC!wDl-`>7~avtSD<~bG15p zFGLU-9UJ%DvsY1Dq7E6@;6V3wbO3mmiCme=1W$P3R6o7D?rGNY1-7=?zCNh1IXx=q zpXBI8!FQP}D@Pactipi>E@D2{Hvp+u*UplMhZG#gQae!%q$-((O3RLW$P8X5labl| z`OWeL(3QZ2?RfOZ+*YUlQ&bd(rt}J$yv871=C{ipTDR<*L!J9xGg;AETkc#w~cC z5(TTJz20x&AGSL(H>na!<_pDAE$#Z6uLN}32V8`N)kyPlQ1QaC2}Ixa?}IO4))wY3 za2`|DT8U^H)7%O8kR$r}Elzcqh!SN1ZiKtZ3C-Hb2yiImfNJEJ@ju=+1S&X=_f+`B zn6i3&LI!X?DpnlFCm!F2Yt;grrL`G0%$gd`lY}6()R}Qc&CL$XbQ6llHB8;}2p}NFnxM=pIwz!xwWc;mcnYFWlS;QZQU=^CVc`n)R8e)5M^%y@CaV$Jj0W(x!~w z;^;lt-nFc%HgX(}Bnbujr`WyDE4*N6CJZY#5y-@oGgC{(V#xcFImkTXoG?QrCE?vv zW%EUeu*kn{sY7UIfMrVSDWN!WPba?IZuCuMp=7p!%nJo$7K(h9Q*8Cra9oj%e00-Tb06ACO#4S+q3 ziF`oq8L~8gzNNiKkdHa^@SfK$O6AvzTOtmFQVD@ajUjrFQ9bLYlVm1mad26adG(*un0yqrlzG=b7z9UjR0sb@KzW*Bt?}^Ii`R|h0*(1 zW_7k1E!~`RbkL@CZq;V1C^E=l@1mdg1S)~iOeQ$151;23UB_H8JTABSq(@^ zeiM4#10pI6aiN{>8Piy^#$lfASI8^vP9AWV$~B z1OVJAD#oGP9>ZKBaP2cyvs!=l*1de&+wWlI0848pm*1>J;9)V{Rqy_sR7x%wyy3i; zeQB?BSxK_OKOr{p)lpINllgac0usNnLj4m!2MdTOj zkB8gxF}@^E@jh1-F#=ah+i)8KyV&QIOSfZvjp8b(f+Ss+K&9~3bzE!=Qj|oI`-?F7phWLPfP7ilP29hXoOcj zU;_lYw7(q||L1V&IN}ev-HWOGb__caih3>9``V&F>e9G-?Pn2Y7gnA75|-v)&vW(g;j_y#s~87!5o@heXD3l< zEv;-L%ACd>2&uCUc+t>6`TW-TgN1H1Yz7ZuQ z7!ibd4TWYMBSs<9sgN&G(VtK?kOC|c(&+(I?=h02o9q1orbikL4Sjz~7wo6`M7Zk7wKh&~d2-}>%N>URD9d{=_ zCdL&q2@%tYP%#4*(>lQanpK~G6|utzMky0+q6i899XozG%O1xv5wzEOpC2U_S)$rE z^+Z!$JmnyXF6*3(ylmwhozXK4BOoc?oDK(SwtDX_wjs+w zjn`M7@$+fr=*%rt)YH+^iF!&ENHx+mxhQ?#n#~W+HCzT(KfqCz{2qcRV+GbM3i z{Lba@w8I}MZMX_rXJ+ORE!S6a+AitcABDMOa!#L=Z zbfTot&G}YavLWN_K>pUVaH}D!rd35}n>Ven1V=GuwrJ2PzriX_O?h#-dM5;0T0_k1 zJrdh+{v|5q6IkobW3|n$d+x>ELZ!?)Wwl)$sq0kqoCJWY`(uuh{D`U(q0V+BTk~Mq z%=)2B2rpm3y_1m6_15n=yQ@D&a6s06ea6wVSHWSC9t4HqWWl)2Tq(K+>Z`A&SO<#9 z>~1_?1Jpg-Vnu>}Fr!9Wh$Z#=_t8Y_ciR@JcsbyOCu)69A*|B0K^ZEl- zWw?EU@RR#Pj)X*;>4Go4wuj0uN^)|4cF&OkWdk2+lgv^j5~ExlousfpLvq@)YumIy zaD;KQKGNAIqg6 z9F`z?U@V}>Y{)`R1zdC`rG94gjMjM{(@am!7|IWlyXKk5O;(wH1V)B$E2cln>44K_ zY19PI%gMF%N!ws0%}K>xtP(yFs z?j4lck#Jo9#J;`axbDOm>t%ZR!j)y0KYxa5Ez~-7oUiII zA3#trZtGdYgh1D}l!it))L=&7vyW&?1K~0+Q}uUq8(|f^lWnZ~wO!fb3Xp zBYo*+DE>GgcS7}^&(u94*L7ROEm`N@qWj9zJ24`o%kwNCcPnvZSl3~m7FZPn2a|=Y zg%FS(59ZUWNnd+$0tNB#c&Xj*MEYtU_rOOtjBh87q}MjX z+kx(9evux>MUl*=ijGq1@lyR1f^u>XT|F48`A>Zg7I&IG@FVw%itTnQ<=~#1i3CSSJ@WD0?6Bd#;(O|dClF9PCjfdZNcJ|3arkK}rZ z1~0AQ@tFk*@fvGvNBr?C@=xb;y@TQh8jz z=4@FSY!pUFFRO*)!VVBk`CX^UcmQx!l&&gah)K2SNG8t`t_8N&C%*aKqX)7LgFnFN#& z;1Nd`(7jIfRoIdP+ug1NSy3+iMMKc$SkCWqUt3$`xwPC8lzN`kFPGOR5P5sKL{Do4 zDY3J?(jU+U9(4NY+`zz<&^TSoF^5yi{|fHJX#=2(%sJnO6|zi-{l^-Q4f?z>=2dk9 zi0pOuGwsi^$;sPg2xXG4) zv`7)RiPcaQxL&I#@L+?rS9SaHEyfM4W&NL61JtQLHg)mRSL?)$husDW-Yc5#;4Wf; zV~bZfsv1h@EO=c`>BSgzavg8se=-568$!)z-}kw{{9fZeJfPbZuvZ@DG~4`wJQrNR zc~T;Q6c{Ij;d|dKm=?FLEs}H z0dz15Fw{qox3rX1RKdx)H^-Zd0QR;>4&Ye=q5t{S>8FGabStgupBlEU29UQqEJEX+eo&y2$t~6?RJOdslUZz##Y&fD=GJK!8Y_gim(oM1cP_wpKQ^UTX$5`-Sj zu2iaQOXg&MH|*82p{q9Ib&=I-B@1+Y|0~$a89n zn%;=&S)qg7WUR)A&*G)~;Hfu^?9Ed^P!>lM-S_9`ZWe9m$xAH*ESLQu`#I|#0bJcb znXObSUEbP?%L4jun&OtWbUss0gHzvmiX=I7z_tjYTIGiq3v$VWjrCw7s z8@96pJf=v#4B>>(`Ty3KL1KAU(U6!4%&qZF?%^XZ68^PvtkeM9D=N;#1^&i`Z)77M zflTUW;L3-aBzcLB)wqm@o&ml<1Wj7{A&O(CG#Y`v=cY*bw6RwSouk*B!f~S6_Tkdz z+6IaTA0IqW;V6>mx%l{XLsbL7$B(uM9)WGwV2cVtrC4~!%&n3IggDA<9} z)kz%6C)U~Zel06fSaGl+jpM#6Wp>V%Qs|QNu23?detg0uP5t(`L9pao5cShZ{wKf_ zVK$2ZLFyExcfs$Xr8OL0?G2CUoP5Nqga^4oLC*AHDirFX{+?$QIWbgX0d~%{1&E@> zOkX+gg&^V55`VbA8Xp~$GfTV#J3iAlucQ$q^ zfy-vTuj#xQp=t2Y-Yh&au4QVoK|LvnEBHK{^CcaN_}%ZgB73VY)HrPw*w`~ZL5c|8 zY?K}b`meBrmR@0q)b&By!_Jx)+)on$8OBS9Uhj8z~&3 zg8bsO)f%a##eAg61!|H{50#kf-_#7cyC+8;L7>qAs1!+YHVK?2ra)CEm^(+4mS63NSWfyK~5D@qP zsY6L4NDI=X(%s$N-Cfe%EiFhR-QA#chje$t-Q1b`=l*b%8RzAkcklhgTI*RmG1A4# zQP8=KLhapy8h*y?#ol?s2z}DZ)W3y7C>IY~5`nX6B}i>$b(e<&+c4~8f~d!HM!d?| zB$hRikRU0BuDmG?RS*kDzmw6$Up;Gm|1E_=-Ma)@E{{Sk3uLVd?kP1^Xju=fpWpqU zm$lW-)wBebuX8&n^RC{$1&`9SLRahM%eo!o8an`AHqGJ*n;RV$LeZwj`-Y})i^ z&tnJHMY8N3SF7`^j!wS+`YElP;b0!X`Teps-q){<`1gw=9;-;C zA{~Vo5!=<$g^h@ajWwDUA0&r!ZF9RXXB?VhkRX$@;j%@m@vYH*GcgtAdQr^|=$c`@ zc)i4KKW8pwk&Lr{z}t!^=XY^YPGN-I65PYEH8 z!tQ?IQ%19!#L(!xbY-G`^-Md!eU(uoyVlJ#H9d zMf^{^yte!M)8B^qrnM==VE$-w!eFFaKfBjfRw*CgTV-UvI6;0u3B-i)RaGFt;?V2b zIcF@i5e2wA&VC12(NMiw*JXEMBp%5h*R}>AEOSib3F-~1b6ba@U%7s>xv6(Kid90v07D=ZrOb%!5B&2t=jf6@-o zOCc3hXJzH@yw?)(l(65$MLRm+M=Z_TtgXNEILdC3sw68UR9KZCFE181RlIC#^wCBc zI;55;me}Rd$!7eplTmlf+O5caK@Kbx*?~ETdUf1|&j|{VB6kKuDiA|qj7;k1R79ubi8yv)%_KOCzo2MyNKaaz6O%q3DMbX4 zk<8i%ioI$17L+SUub;5WQ%Mqhj{rUqLXUpcrK~Ld`r87#u(OX9kdItCTJau_15ntPnt#( z?}})&aABnSY10x?L(3Hkhqf8Y-7K$nLHO=ZbY_ITAAtknis9KHF`;;e1r@p_vU< zsBO%ZB54?M`&z6ZlZ~E_?p%Bz%U6MIw(;Rv~EXi<{J2urUS-=ulDKweQFu zt}GryYz6Fu!@7=0(25h!G%^n_JgwqS7b_yq{jIp2Gc4`5Q0g;S9Ff4Zd)Ca!noxjf zU}HLe&sjS|jW_qrRZ!eAY&;%QfR1uWoTn zC&SCdil?R)vwc`D=)_a^jhZ1DP?gp*WmIqdo~Vtovv1ZzKD%o(6lwm>Yz%!<{lZh^ zfF6HMs8lna4^_9 ztv_ljAmA-rB+$|`enL@DXldy9{i1_=Y83{PW|D1VeR^egoxPJb5?jc;_E(CoP`Wqi zD+=fFE&4(Z^ZZVuz!7z*7$!`VlkifN&YCoL8E)v8N@Av%&OsE>j27fDiJV?t-IS=uvhj>7>xKUiji84$G=(2{b!LyC5GAka4C2K`1I>Osimm!M7Kn72GY}8 z*Uf7dZIu?B^@~)_#*D2&HT!bh`!BtSy|U!Uw5d(&e>Xg&sXpd$ZQhsBx&81#mVs4xk z!q`w*xY+vl-`8*NZ*4MeC{2v_Mg*p7$6nYl;NJh$vv59&!?D*%AIYm=gRZdLTbqBe z*_~|js-et~Mf`Pm#O&2$n>Z`y{>A!6{5duwHK--|E0igs+LRejW`*KD?N6GbEhQyT zZK^!@P7COebl+bhOlbT%c#)zhDb}_64)gdstZD7MV1JM z{JcVKVz3nFk(=`4sUGR=UsOHan@<>#Txc*nzu_AldloS=oWWM!-p7 zz4X5(ZH;-C%DS(1YMeVfQW$anYKIFY`{efgc=DbDznnZlIKDzes`JSm&#r<}-*ChJ z%Q;R9a=3SMwRKM{My7nG`+(eug4@Ywh|whF0oxo+wAm8H2y4wGg2M$t37>V>mqRRg z_fECZf!eb#A_(wiDYeI1JYH1%|0#`Viwj|b*#Hua0m!&Z50g@tyj)o;R%qD>&I<0Q5qTjiA9g2&@-|6r97jiwE-rek}MzKtbpvF%M-59X0BsHSt(c} znpkfjT>`&)Rn^n}6azdTCPXk7?|fTcncWVml7%(Eq%^7QJWHLbET=={yEFlbw&$-#tCZ|Ft%!F_e?QDPdnRACcL}8 zd=3uQp9*_NZ(ixcs`v|%hWVb*f|r`RnQHlQyZS1Vh>*$niXe5_`*4FYGAlJMF-YuS z<#i97;SBw1~NLnI?x z<8h>se7N*edLg|``vf6RW(+YfrCE?k`Hp_=)t^Ie$?&>8>w@4-^o4!DsLEpG^-K@G z0UudaMLr~rq8}!|A{a4IbC!Ey-aHdG*CYf@eoWDLSv2m8lNJ$p%s^`X#7NIANqc)Q z;(DP}I(zHBINt6-cV{2MEHO`voy(Q!D)C}6V?bzZ?85keNN~1J=Am>q$GOVpIZo$i zowzpbqX+7mit1rNh-{R4^D1RkQN-qQd#J=DylrUO*sC!S6_to@wb=uFlgkYKQiNOJ z5?Lt6_kvXXHqBOo=d32NwX>h6ohx3q+qZTJD#|~=CZttU`z|fr2Q;HlP(UxZ=4@F; zFY(hC!=FNo?faJ;2SwGi)b((s(up=H2+*0W54AoFjE(0tS`N#|!NEW$ft^Jw9L)Cf zIGLQB*=0Bc42KDN3BzWAk*mYV^b}@$il)1}bwPbHmi!`yU<8D@CPQXwj=0~ZJA0wc zc4yVUnLn}jZJcdZ+MV$I!^O=Lq!0q_({qD2tO9Un10VeWKl<-qX7f8_5RG!%a-rRT z=Z%^+ng|4gBnUJ)AxJzM=Vt(*0@-iQOu%#4_qYCQD#I71`QD0G1jlQ^?EoY$_q z*cm|ct5`tGi&ws;#_svrDx=9gkx~cXDDn~}cTJ7!@`l(SN-0}xv&P2eYMehFf*JJA zVU*dtJa8Ooc&p+q3wao6);vWFJ$a~pd8k-5nlrRq&6|97KEwQAVJ@qt5C9JStCxzU zt!>RrcdL@bKb&{(sW`uyuMJ#{1`57c@17hj142U82UvpPg{Sq&9xvE$sL5D=cH zIM~*V%<#+kJ&|NgOlocZ7M_t@k)*xV0ES2^{oY0+0qAj<%ntuV`U4{Zkr z>2=OApWd7b9R8)oWONbIca%rGgfo4V(jVyKh8{HYQj&VBK3^|jLXvI#uRR`t9<07tgRFT_F-&7Y-fy@4u?za^dIi0e~G24RR)OKNlC8@ z1%TD>w+}Y-c-%O_wYF=(na~la7y_te@SB`&(mqwzS559O`v!)@lOipJMh2-i`QptK zaj=hOubM8O*>p_Ztn~A$e{V9p(9vI1SYbu`{rlGZNIfV(YxMM3#lx1A%>R8dn1pb3 z*nrzTcwCB`$&2Snzr`s!#XT>QUX-W<<{)MieFJp&9@U}V}-Oo+nd5BV{1

*j_z* zVJC}a2!it=C#xB;)FlLh&UWA1KySs>R1hG=AbmFbu%VC*ljC3+_xc78y(KO zt<~G$!S3Oa6c-Y6Yjl+QxH|rNBCofGG1gXFo+dS2KYsp7J2f-?o|%{EW7#iYHBl@C zP6X!Pzj{`Bxp8#S(RAXm(J@iUMnSt78h6=Pp%(DY*gFd<1({;IGUcI6EPBSP=sEuD|>Ss z!9I-x>Z-2Ub|RC+zm4J-VPU5~axmJCt)-G0Pu;xv0Nge?vrM$;!$rT7nCHick3tf1 zG?qv=q0{7dFx~WNA41^BVm-EWg{dzz* zrBE4#kB-fY(a{{C@krFvP;BR(nC_>Z?k^)NyLC@!dpQ&P*LPyajhEsM)NwfuDhje8>F8@avKEr(|M3yqe)WR<_a8Ja zE^e*Os&G*065wCLE#7Yh-QOzIE!MMAL>QuH{mFfhj-HOK%~m2Yg4SDp*a$G{M| zg8gs9CV#k~`dQ593?@-&`qatguQ{s_Q99@S*~~I zl+xLJk^B4R4ULU@x3<%on%JmsQ76O@S1=In0bh8uyt{MS+1KUozgeHJCt(ghzmPYK@_QBDC+?wc0IA4f(O()j9KfFw$4~`kHIC7#j&L!j@Dk z65=%3HS|-B+Q~pz*zsEF$!lNR#LRfcu>7UYJP<=3135_CjW^wa{*m8zQAqWI6Rg|4aFRS0__oorSOg1b8@5{#94z4dPw zx`XUKdZCwq`!x+FmDw4)Klz{F)0!*x?7g>ETEFpzS3FJS7Oi<^t+zYCZf3J*y`*zL z&tqV~c{Tgvg6u}ko5pYhNfWx>N}pWP5ygN5AjC^4#}~hEwqb?&`8j1}sKBwc3s^|o zt@{T?T#cl%BE5av8%HT2tE3ckdFfa%KJT$Xkr9FxpqAZMc{ocVc(HA8U5K7wuV!a= z%_Ax~wa?vDd$qOXiL^ho9eU@}4Hd6eLaN=w|$Urt+VaK=L|9YhR+=l zsHl}kYwv53&;+CLS?x}v`D@*Olhx1v5{aH_VhjxMm=Vy)<|si@@c@nJ<=&K74f0FK zH$RD_3{Ljx5h=+Dsjs4fp5`AwG5oe@qVp8OK@3XdklDE{_X^PPHq|CVltG6iDjM%v z73&+}lK>ZbBAwL}(DUWd?uqMuJiVfBsdTuc7^&Q@TNBd5fc(~-x|&tDO<1~d{sc0E1S z>LaWcdV>pzcJty5jG~tn=OtI3VIv1KgxiOzmM`!6uhW4Qy4gwlxM}nX5%kH_p#Yppnt?Y8{)gjuccF zQ-XzsQB5G&pF5hGyTE5nN+-___?zv|q9H=0&ZNV^QO99fyY=8A(VtLBwl@47 z`G0Jw+1iL8uiBpbmTp|zpn3SC2Y6iST6g}p-et3eJFk*bV_TJAcBP;66VrGaP9t4gHe<2=e z8HFLE-ro5OP!exPznPu;%)-nfWI{rc&+p^6QOyG4_a?yZ^cI+wjhG*bK-@upr=&FS z(fv)XfN#w4^4Z-Yot}Z&pqtl;G!dxYd>@m0E7>@@=lnY7YHgwE*jn?FaeF92GyGxL zz7tFoDNuVhx_{dF7XSOy);TyR6~Fr8Tuw;=)(0K^wS+6G!~h89IS|<&&wPYKL@>Wx z+4Ok|O5}E7X&4<_ZHshUU;jXh1#s-i>aQrkU*kv1Ph_Ij*0cHQdW^M`aW^j%3yX$& zbIQiHYD{lp;<+Z(`lX^5!B1Qd_dg1Mqj3fW0Z@vtFl4-*;*>{2x9^bkn_i0?2MGe8k3mR2iWHH2B_0=^=m4?cr`8Hedhys)9Fi zqtlBw;p1jRPYfxvkift#Qufx?=Ht=gXZLj&@YQOQJQ$?EI(_bPueFH=Gm`(>J>Mu8 z7)0!^k}M`MSmB`0f!`9ypzSY0D}%T6rD@Ih-==LQ+h@h!a>f9hh4S$+Y;gtZ3HgRP z$F8_DEG9-CNlHXLfVl`Iw}k!)LsnA*1(~#Vqdy>TP@kJ--+=tsHJ{EQr=<}d_S2k4 zTuczKVZoRCcRx$10zROf`Nv_R#yxAR=tTbkmGIQGI4>8aHB2TL=+NCn=;Imq^gAy; z9-u6PrsxLb1{8%w!_^)__o`-7m`HwX-u^M%u}$^nK{hkL;s-~o!jmia!x?MS0;ay+ zI3Jx>*P^;HhU*sFK#>c>0=qoWClQ0?Bp~>L5l`_&pZTY``6&%0yisCe+{6dUYMJ-6 zEY0a=&^gRH0$s?Oeg%{EAA$lzcM*O4zN(wv5d0icPvywGv6t;Eg8 zY<31OkGosiS7j}3k;<3vFg%a&TC zI92K@e+P@unSgGi*s5MdS%utnoos#%EA%TA?HNODeO2eb&RkUv5B325RR0ee{eX|E ziZVS|N8{(L`0fWC#|@p{Gwmwco=5nP*h)9`sY(F`-G8?Rz!r*V|$QT3zUb=C@ucJLiJ+^0Lg}M~Cc;TUfqJ+!%d>b-Eq+28trE3x!|bAx<+Y zLI~`>DevvxW*0RhpmYZ9yfEqbHWd+=^)RH|l?@&YKr}dC>onJSA#p?}hlTaL+{S$U z^UaN0MutA~v)s{+ESLCyxZrk>Au|O|FnrntHv-`$06y28D{ebPV;<0E`Y2I{!HU&X zUrotwG+BI3&I}Til+URuVJX(hR)6{E{BqeP5W~mMDC(R&=IDIXODCO3377u?$4%_=rRILfb(`}xYFQ?y z@R5zHR>!o?{_gnpH%EDvB9oCi#2aSgHK>A{CN9nrqlZs8v&7X_FLOmQ_y+-naLa}N z0>O|Wy(o`!`)6ow028xg%)%^negShI@%v1NE(Exef21n? zGjf_D{{X@PiYwJx)7S+kINu8w>zbjV(w{LgX@%^`Z47ot&>C691^GGoEt+O9>CJX_ z8Tg|W%RauKN-+P#-)gSeJf5`5T~(EVsBmdFM|d5`GD7?bxdvhuPuCr^Xqnmy!7aMX z0(o~)7GbS) z#ZcDlRaQ0&+r23R7Z*WlYAU#Hz$Y&PbLeX_`n&0AP!|;|e-{Q0?qe%$ktZ_Q-vE2G z^9kq+0Mc@HInNqH5jwPQ{@M~khsYJ9F09;;un9X^ef$w07ptP!bLRo#c2ov`G*AzJ zYiIu?3PmBR_M1~m4g_NnT3S@jtq%%_bxuy-#iaWj%=$dOg8a52CyqRf=bDuro$}8x z@|J1(wIc)c^B@cK!G;zHt{d5fWLjyXGLZasSHIh$l9u%;z|qFOdGp!r>ch|ny7W5h z7i=1PlncSsD8*^*Fn9H2fX#Gt=TiiWY?pokPy2z1i6K0!J%*@c&{~o!w)p9Wutd_> z-s}R=$YEFLCo(o>nBPtbSz6nF@f4y}#x^z>p|YCF?Fj!yBzZ>1W`2@H zy7uWPlo2WVQHV{6&T3URYTipJaZP#IH>J-~QP(YCa42NbHj0mp{9BTo!RCaA|7`!v z^Cyen6&{ixa(`_BBs`cn|Gju=)9>0E9AmEea~g%iAU)1E*vzc2uptJ9_;% z3V_}QHCNf0PsZ%Y?|kq;s0)@H8IWxf$!8!)G^GVs>AZRKCWXxm6Tov35fQS&tfF)u zKIlqw{}K}y*Izm@wxBz+jYHFfffK#nW+Sw+T<392Ipp$j;ZW8BKM73D=x;w9oGnh5 z#_fpJA1`MTo-HPQthUI*#HxCJc%u365gsZf9WU=^Ar9G|?(t$3ET39~YysW5UyVuC z%?(oI$pd=Y`e|?YoWnUbhz(Fs&~0sVb%!4T+L>40My$YY^X)#IaBun?D}Cnrdaljz z4Ub0M7ogDJeG^pHdAPOP5hr0}y0ZZzkX8~IleO#LQM^T0se4H?GDBK5SKx6|b=6NI z_kNk_(#ffJK)dyHbnkCOwU*7tppWQA&_N2b6$U$1I!5U=L_EAz7V{cR9L$2=cFf8E zZWl{Ug@R%=FlgcM;(}SY*WWs>z)1hFp3lb4*0Vnt&-7DNg|>@rZOwzhIYG+8#MZ)la7S>B860eOboGBWb)tj(*%pp~;@+8GNGL0Cs7y6!Uz z_I@rPO-I>9_(Y}qnjhK>21bg)aXKdiNJ_5{1@|4c=kgXF7M>oyM}`Ods$!OmjG%y6 z{E^HHLxbXz{%G{13E0tTkG5}$`eeHf(LLpQvlTVTxC~#H1Mu<q3SF~x<-R0$2qM7ZiN8Q#s&U4s%Y3OR`jNRY@7vuTlX8eos zT|O^P*3qW<1(qy5OUnrJJe>yGvIl$R; zgh#T!?K@INyF-Z5v8h1-@BpTYW#(I~Z_Xw;?&)VOKiXZt8~UeN(U#?KSm$83{wuz% zjc@Zb1+{&{V0qn~IGpctmFln5d{k?)YvC z6tP>W5_c2v@&fG^eL95J@hY~0(=%gkQx(fYNUj`giqo?$c!I%qM6AE9zj~@Sm5mZv zS=pXY&p0CG%^W#83@6*`qV>w=RzBPGtz9N8<|J|N%YT|f(-UV*?`n;O0 zqZz?T;sX;a+S61~nv9z#WoQBIuh%=6WBK?KCVBaCtk#U+3U`*cdyFHwLt7kL&&DVS z;6L*&aTK~%=D}Tm%YNndO*&#%!z$3xBcpjetO3ssSdBrvyq1|GWcUS&XACn$4EwKeuQM-12*Jwc)FBH zYY;)@J{%YL84?^^icWjI2(q@&(D=RthL$-$?~YQc@>ml(6Y@oju6i+|kSHrkbi3d5 z#r`6Jff`$C@XO>U>D;Vk11+fm0OQrKrcL;t16!$OfX8xzoHPa;@w2DuuMu026kEFc znRv3rL|J~%&_%=t8T9Ke{wZh~|5;YS`6_AoP+;^$OjtlvA9dHdc!RkOdZ3+8n$ zcr4q;I3Yb1kNmDjEQNq0ip2EgqM*~*Cbn^PFReC=T<#tgiuI>Cq>q(md50%~wQQD;&WvQ> zUMM5$sLCAD77i2klQ%q2Q!_LAt8HGYL+@b$PIIdZY?s^~&_4Vu1$Y|rr@A_t5}M?_ z-i)rUZ-cx~L;%a&U9KW$2P%FRW_5989GIe-ZmpX#tpKsy+*4pL-WW8Z&?*)ne0J32 z1m)|y_xFq;uw0hffCUMOrBkjf=dbhPMkp5B zecqg}nFAOXU~krU*8LI{igdrMwwW)NBAJ)P+a;yHaf$NKjsy8o&c2$S9Y{brvhwFlG_eg3Ha_BXf8~`H)6^ zc~ls$1WOo>7WO4C{>L4PjK-8|G5(UGijaOo+d!;pQywJSytrMulD>vd zfR4hdKTn|d&30FUcv=`H@ zj_Fe;fLG5tyWlgvJf0k`MB2z?7$&@dhnex8>KwzNrIuzIf`joBACCtrJKcr*w`J#a ztKNA|zgH1rk%;rY)%<`{9PQP7t-6v<+IOb1Aved}<#{6bvmDN7))7|j-0V0SHzWd* z;$f02Dl#f3mBpM))}!j0VG)55fBY%d_y2vL)A4qjR(hEAZ`1yfGsuG~YFK@dP$Hk2 zkCfo|+uHh-L~8;!$t;RXRDp*4^l5Ju--sOtr=jUa!*Zz^m*a;4UelctB_=c*iHBnd z8J~to^fo1j=e)hLP|`85Ve=2h=Jx;&S^Qa?;XoA!I5C8ik&Mn%?BBG^fF{;#bV1jT zGc3FLq&7}hj<^#PVz4dBIexLuKfd`(hTp*LtvM?q8{#Xq3fN~`+7FmlS8rJsJ7sl9 zAZ^QZ65rnu;(d?2*y$V`L-RG$IX&2taFvt&F1@lsFps5aKV4^~@pv^lOW@NT zdx2XC7z4GZdPiW5V093ln$D-YEUc5f{5`7k`_`}-nuL8S6APPfYSo+W!i}!#qs4+j zQzPo7KOH2@*2JHEuaOD=OsTI8)|gaZHHXI=Hl^imb^CCc{ zK-dWYaf}Gn5P=uU5@gEgr^w3@lmG8;FVuS|3!7dL6wLmUT>e?MV)kD<>t!t0XS*%r zZOav*Jg3AF2(-8Nt>K=$q((b5s7m7~L;Oc9JuFJZe?k~ck7)0AC&?3%k~W%8+Ts&O zGr4g)J3C*MXdtbvA02&M#DIt5jf!vwgiJ(x2|5IqFoIE~qHo<{M>YKi9Fz*R2oOvC z`@VGcS8wHh1rXBj6pDTTHjx$?#fo99H6;PN;NQ_}w|RW|^Z2!FoQ$W=I3mnWL+0h> zU5bj_;(ZI_+fXE9WRJ_P3)O`{v(qP!cwjs()=VkMQ=bX8YZm$7PUrRT?}{HqI>@MdxnuTkZOD9m-hK`1RIDR?QFQ4R9LSJPNx zXc+th%7Ld;=;r$9tRMGKojSkBPV;RKpPRoRZ&T^j;e4LRf#f*9sCb-v)^Io2A< zR|Ee@CR^U_jxXIjKeP~9kl$a!i+NM*pi%zo^EeOP%vjfU`$ORuc5-HG9fac1CSO`U zlw*!|X6Ilqud%=3O{Rg)X6vf=54ZKpSEoS|~EGUcd0-ziNGjEqjE(Umg)Hfv|lnusR%xIOR(eNqr192}U&{=Bu# z&ql0yKq*k;h8HtHEHJAT_kxa!BmmwBL`&xRrnFXE^C|t)B{wsbU}c>l zq7WnFW<>Xp$@sF$kZIJ@kM^5=ZNU>73gnMRjQ{-77P#;rcoVKg*yWUzJcL!WGF5lY zCx9h=8{lzm_}@l40e%2Vl5s_{-nTf4@|E`J25Onfg~|VbwpfL3goTqF%R?sfi?!Z+ zAeHEbbcdW7V@eGT0KlBMOF#!5w#g|V9a&}&=npg-91t0iktgT-5kY3!Pz;1pg^lu0 zRAe~(wbkzM5srRz{yYz5PTe*{hUVY}fwuDjR6TWfc$DLc&u=~36;_XR4Z>Y3Ibbyw z>epJv1=E7$>2Wp}0uWi|@fVtLB@hU{kZHC!;L222uWfDk#~k}HP0S`=MWZe)(|IIw zfwoT14K0@9O%>kl#=w8KZmu1j3kdc4271Pp5o;Y#weka%*1ePbFn&H(2XI>=+paB1_A}X9UrOze_!Vy^#FyvynI{BEAP}O+tWKOmr;}fQ3;Uz6iWGE-wkE z5Svq1AFQa`B4(5-*un0cbuOgpE0dxi7Z#Q-XLi~i*txsArQ0WdEvqUVOn~maaY#KZWz;%L-_AKhZD-GsgJ&&vl^7(JKb-Z|ak%ENA&MZuf zdVlFy{<|*#+(Yv5w;{(KU+ZPp9i^`+_!o1mkqMJh3qkiLI!pF@I&-uxSUnI5hg(XpbAP@C zVJ~Sf928m?3qVPK_B|q61Yql^`zu61q8_ojnO(Ty?#Ncb`*(_ooz1r-RxWQxkD(dG z^!^HS_4bb=VLS3G@c`5upb&z7YIgRkNY}{A0(5R)T3+Pax11d1wOSk>ev~*y7Nn+T zjAt+>PqcnBdhrh`*>>>z5h0sk25?v)W8p%$+8t}A+74)P_d`i5OHap2&>Xa)huQ9< zqHlDMyF&Gmz}kAdI}_>9D!ixBTh~%oK-s=5O;>biz(7q)$LdhGusT={AIHK{@O&tb zMmeZ}f4{FMk7>>84hLY)Ksbc+@~d6h`j?Zw-oagQuzRVFg2j4Apx5f08$bfPo~#N<6QQ8# zDF3^P`;y(+vDV0ZOX|C^Z*lqbwLeWBt^twFBCly)`9-tc@B0A<6co2|S%-g>0aJqt zV+wwHPIXgol>9Bi+`U6dX*LQ!TqJvLi~99& zLw-DKP&T?wLPmBvW3p&uMU9Pob`hfCDlArG?=a~yrrGC<4rVFV(d%usSEpxj!4yJS z+48245EPV+bcQea&P&2sI|kh71%iObID5-#$wyFd+g^5kReq^=wiUQ>Ueb;RDe`?d z&gS!GzkLN$(8C*l-}WMEOzal?{veuh?VZj2Feo4XB?(>kz$o;j+R0iyVSE-SQ6ZW_ zH2kp5{(qsGI0GDbiM00gNBBhMGlrz z4_pU5v_xoth-O$fJK&Ak`W_pZ*;^I~?UA_i7R&_o>DpFkm%UYSz`}|^LpGmsNCtw@ zkg<%|Co4dNKpxsdL`s##PtsWok(`Nm!w}?P>Av*Nj=nXfI)6(ST2Jb6qlkO-1MPO< zI5M~UbT68pr5sR)*eXActL}BC3Ehr5VFTM+!`2%coX!Ba@neD8NK>>@(zsZ56c;`S zdFsPCr%tAOYn3;w^xw~ld)FPcdflol`v=}X0~^i2KsAR7hdnxCC2|3uaOJ9_7&B3 zXaEbA!HYw7W<*EFj1#0#iSm`I$K;;w^}0D$Sq5&8%F38jrEGM#qO#h3gGhW(0ZP(X zDgO!g?^#)*_0Iok0Z5tu3rKD$Q9O}`utyZY!ahDRnOxv}p`?I{8D`F`a9;H|u^W2_ zHPM>MU$l}5UYd1(pZ?VOy9h2Tz#f$r$*4Ru6QCG5!;&E4eL`rw|Dw?-cSzD}0>Q^y zyEVyXIYbork=^%8yL&O%fy1Vg5qArlcV>IL;i?yBax7l z`)Bw0resniOPw}Cl)k9R`&4iwPa9#hW~TJIKUWa7UlU~%867E`EEj(V=0foRS1JlY zgD#^H9Hhc||YN5|p zT#DQ9x*|h4=&ukj{2fi0SWF>LFcPg#SWBtK4?GZ?XjYwUX)6R>XO6ad}r|{Yu=^=`x4~di6N`KA;j!BKdNK-0`eTUpqrAa@@nc($y9_T_Nlmb#Im@6TjaeYyIa%qv zYj$B`B8B=d`vfjWBa<7PB%5rFlr%CP8l?W0dx@rFlNE`h|ICp8S0!xbRD{O#()|k* zB>fA34u+A*=IyQ?jJG=ma@5CgGqw-N;*#RXpcF<`ASVxmf7=n>1@jLZ+aGG}XaiY; zP68WexMm`jLf6{dU$6BcC$ApYIGQ-pIVrMeWz zx3ptskO58EyPNaeWS;eHQ@hK+GT}T^uDGl`_*W36`-gQD{Cf9~2ZjwmM#m5pMK2Rl za?(0JARrk>0TgdcKyx}~a`M>|Pvw|dEIXcsg^7V8Fup|{=oUTK1XAz_I!AioUSBWu z@QM(+*+698ua7W~PcaFxp^qE-%4wJeDN(2n?hd<>>EU@xz(CV&(}l86f~n_dIKKv;N5!Zkw5 z^ z#Nxi#e?!2DAS%68^?X>g)0D?!4OKM7$+N67~XC8)^Ew0#JND9+X_JVrU-}>r_DHOL%0d zXKDH!P1~wce?f8NEo6E3uN~NL5yvEOKpm+H?ix`&S-G_zCD4v~?-cWLPHF5@nJTBO zp@Qh=+mUlTBz&%+D#31xB=jn*ffAilRey!zZnrk0^+XTywz!?iuI?NfA#l(s1gUEp zS(UU}0ToAG{`cKAP8N9hcpacV2XUrz0iTsgZ-R8?d8hlFi2LjkpVjaJV1X-H>{3Qg zyDXc!1|P>8Xe}6EGQGT*Ip|Tfyu5&iLwT(>khueCGId^rHF|Azy@+hR^~CI7Fv0S% zFC=j7Ldm9EGDu}~Dpxq>j0_jR0hR3a9k+t-=i2pne-nT1AZVW)9Asi=>Y09+f`0e@ zLLca|5!B}w4NlImK7-`2*;X{3rDxdg{Xu4T%K@u1rWA}%>ubBF{P9TC>l7hbPgNX` zmFJ^@h4r%3Zh&f5sraUWjvm}nmddA9I{Fdp5#axXdoRSxHZ`Rykj!wacY&zxMl~{5 z`CpJ?pMFW&t0Xs2FkmVxoVMj*nz-jfAkK$;u`-t%slc|q&&l&P%|Mz-8V z#iuIVE(Konum?b%Kl5oP-X+Yx^(G8?Q^rj5sS#YNQ00A0_-iY8gVd8O{u>+knGjgj#E`9aE3dI2)oP zGkR$?d>XRn?}~McEL0VJa+bZ6VyE)oyR!wXnuJ7phPqJb)YOjPc318dwPAuAz;m8G z&r0%%I?Y6enTl&&tl5#3^SY8MO`d<617EPxc}BADJrzKbXKVU$xbPRmH4t6c#Nb)eDyL6`aP0 zu(Pl`f-_?7%)TAWd=#Fxpzc}Hfi8ssr(Rdt-LR%R&tqet=NAi-_)Xe$gj-g%%o~3J zH>Xcv9DK6KfO_N&diQ$GP73gP@&VeeTMze*t|RV4Ck1^noG7Ajv6Hs`Q)4T@B+DrU zRYD79SS5f7l&!_oS~JfJn7XK(MJWd9C{Yv_;`;`9)P6!%^EMFyK zK<}MiC6Z68T~cJK@7`pHomfPUqm@+F08+8U35Z+lDh` z4_XKV?!(WYXVVLxs+SLdt#W9Nt;i~Q>Nhs&Ck;D;((){#g^LeBG}Wn=)uVtW+VU+6 z7svMbdRGj2^8_dnq=|bC;pLs3yd}m4d^yl@O;G6QT8bzxTuh^hf&K_ggS;u#Vs66v#9;D?JOCyS*)St2f@8v=YRb6Y*C zh7b`PyE~w+^UIJy_0dFSe4EQ+-fmDn`nhG5l`Mj8UFicn^eRT&)9L>7H@tb zLP)5tNvsPhyps8O^&r-geHX5b!^29mPS3FerMhIlHok3u3~ReK^f@WQGbYd6y6&ZO zc@V2K!|GBNuffi$t_xX;hbe9iJkn^K4QM98W6_S~l!>p;Q!Sqo;AiUS#_4UW2ac}lu3Gy}t1PYb2Oh>`Jxsi`SzYlslFElqoH zb-Nw^8MDO_7HLdeVrYl+vun?!v9IC!R9obEB;tN%n0;?zr99Te;`j5<#T7QM=GA(7 z#(lJm3qaHioBdG-+>wja(9mFt?I(b!g{Z()1N4GA?KR4qksJ)%;Swhw#yWML?n}G0 zEb}hSoY}!0TQ-@ueMGY@+DZxmMf39n7hYy4QrqZaZo!AE)yx7=D?m=32El#^@z5TX zEiL<5h%kS5Cpu_IZ3;KHnP#J%v+(MOp~Mo#xOHM}zH3fAW6UrCe`~oqi?3aK$;<*O7iI$glogE#8=gq~?XXZFD6gN%^Op|>70WRjSkwz$y zNNV(AXl?5c!J-PiNy4ibZ>P@FL0VPTr);qmqU0t?qzS$w7+xFXX~1_N z(8_yb|NpHP@J?XV&LFK?TC(ceewVn6uLX(Qx}L3*{p)PT$#!@}DP+?Im>?Yfp*-Zg zhJXj;_Q&EU!JU$|V{FcpeI3SePGNIp0(KctCLSJ*98m;Ez#(GD{#R0h#Qx>Zuf@-x zN(dVsp6H#$g`)@+EgSv#>RrG+Q8qHFXJZiXn9OKq^;PIg*gDaxr3MPHf56+0#?MQ1 zAtV*UaA4X%UkW#oiGo_&+z&qg`9CyW1yE4Y5~UkyN$CzLk?scR?vhZtq@=rBLAnH_ zySqb5y1ToZfBAp@j5EwQ46p9pyL-;rv-F38o&TIjIZ@p#oX910gt7)FeQ`HoVuT>*^uXfExN%n3ed1H zd};yC7Xy`z_hkUnd#z~CP=l`zLlMcy{%*&@NQu+~%2c?w>x1_pUS4IU{EK~74(H8w zWq3SA?Xe)~21fSf@5XwE6M?7~XMEV6p4S)&OjgfNR?KxxzL-%jFse3t*|>5Q=Y<#S z&ubIi@F(89&30_0B za=NJ8SjPe(=)ZGoQ^MbERIbRNlP}j{9e?$c7&(*my>A6#(qVHc$%(7o;L|rG;X>Zt zPN!e}cQ-!+;iZrQtq_=4j}$ITijK5;u-}_h2afxSqg7#|woBYUUoFSR7YJ?TlYD^U zj2v9nG*PX%5j3P-TFemRo+zf1J18jG;sTW#duKUaI&dO;#5_5BgKR zqgL*L5|66&?>~OgDMgGhy7R8Ddm*u*!RoynsHA6^bX}7Y0zj{QyeY3fD9^(WV&fie zb19uH7v7epr?WNWXO8ggZ*O*=Ls@McqGT!d69@uDp#*5cEbB`JOMN|qtv);u3vGA1$c-O;UcZW- zc;D6cSMB3lX=MjP$+-5e&Cft3+ul8k_aL8|Fz^K!pJ~z}wWVZfOO5Dm>l-_kdw*SU3J}WSyb%*yjVF0? z-Wm<%>8Yry9Tfx@>hdSLhym`#Sh+#!BZl;1WVmQM;XhWlQ$t1q!6+BdKiVK&&<_KH zP>73$K!+4%b{=;$IPl~$YqEOsaSo9>OtYKchO#;7qYncMk)>Fk{R zg&ecElF{V!WWJ6WLab!=BH9FAFAoF@XOR%!bqVXv4krD0z2Q9nipxm{-1z!NYoJKu z7tp(Y%RE!VlNoK>qbuM<2R_REy?au%%{%f9lAP*nlq*znd3rtwklXj|Gk+^I{b%nh ztMRAm?KlTQ+|X4eK8s-olvr<{KhhjIOu%o3DC|oY6T zUh6Od-8!4H_iQ!BhFd-Pbvjq5n!$Int!~3QRMOaBK&Wmv8{h-)-tjNz*ppK;vWbLM zHqg+GWa}5BtnOP2jQ%&la)f99<{SYpke(oukkIf)#re_Uihh`YBkW4Q=OZNL+NUi(YN^)wTHJ}qptIunM+C*t1zd;?lH`l4d7;L!NZ zwVyGrTHQqaovV>+>)4>vqd7Gjbp4GxpJS^|PRj_YaO7KPsZq&g$e>`0v|IgTR&V)> zWRT^r@I&(IApgg+$3fMortW7FMqoa=qWBK31Q~#1hO2&o5p1TFnKmzH!m%oofCUHL zh+2!G8rNPRpxGQ7@qi8Bm>i^rp1a_-_hGqh&Z&2EA2tQ_8-)c@yvU7_UvKIH#$=HjJ zAAI?27@{5cP|nztJ*Hiri3%sbQR}7q^G5(0mr-?) zZobZuM%NqfZ@t00rNO?78%Ab@ywdGopr#mB(p4X!Rml^4#d^OTx^A{G#(AQ_0_+|z zO_d*iKRECC9iWj6K7X8?kz|tds^6D|U-HTUSVj;qx{2fxpYgt63Kv9E%uq=v^iSrq zkQ<mlLQu%xUD`Hdb*I@ncvEGpf8k2TESjN&9Sd%{ElTfS(wm+x^*BG*< zvHMJMl&qwr5azA7x-?hWeB!vk%jWK(p2JcrH9Lb*FDx0!$|KBOof0$aAtD*mRt3r@ z$=c;>6I@PCW-d&N*+`s2v8+%@hfnXB%f#71-cVAFeY_P`h=3P1c;Q!&GxuYFha&U! z<)YdW->z&*i1Y_7BCgSIBwIr8Blm%KRFTKc;Gw#^YSMbMa3bA4gj^qI9>wlxewlPV z`9F~V#{l=QA-X|LgLq?fq|lI`a_JEzveiSr5K_HHsv=N1`ewboA@_Td$)6NWqW=i; zpT!s(e>R8rfNKe)msg6Fg4xOH(yR`U6qpZt#e|wkmkAEGmc^3Mr>j_9?z$sZ{EtX*0uAzxrF@+y1zPmwUO|3yX4E0vHHJ* zJ735fy8XT-6Dhjp)#Rl0akbE2Ar9s@>N~$se^HFd8V2BU!Nm{ZC2*4;Aek7Go{14B zZpq860A%voU(5!;AO4*9go86xz(U|`@80)D;`;7I>1OIoZR+J^)bZrFQIm$ju>GSU zO0Pw)MC7rmh_IL#6_pw~h7z_giy}4#c7-Cw-na5d0u#c6<71tjo4!zwa~tT#+b90E z=o86r$DuB*`%hu35Z;&RmWn(9a7sP|KNY;6^YknUVvh6eypfE|zJ>*yM6F&%ddFx9 zrGd$#eI&O>6Q;|abaQi3gTo&P=moy53N1c7<6V1s6ZLFu;c;XNzKtO1Yuf3%*by7G zpZT})^4P^*0GVJu3ozyK@4YD3^I&eZ{Y$Wkb*aF~XY=5nL6YPJFFxu;qb^rh;_GK5 zEXH%PQYJXEP}qGN8{G{Tm7R*tdTE>50`|vzza=H>LZ1=CT_Lfb?hWw8qf;?2piL~Z zN-k)E74!{)&3k*xY0|)nj@R;NI_^m`l4cpNr24gme{iIuu!V%8DN|83S?^Dqy);-b zsxZvBxR2!hRj?tjj@$gbFtKg6i|B|h&XJMeDf9BP`OuJ&yZkoaa#c`L}VVk|NT1^1T>`12IvQSviDIX z-vr!+UizX649Ds`TujXeGe{<@aD(1s1e3kEKI;}$I}1e9&FOzQ=L!Zak>4ZgVkMlPb};l zfoYLAGJojBF7umzYgg{u#I%v<8Jdo!UR6J)iJBU|rf89T@H+(u!q{RlG z`(>$6rFebRN{LevUVlH^U+3e-oc)HFT97l(J6lxNUKl|IG^_8&+)7t&5+rT}Q#&ri z_u6sqJ+J>M%*qh<1{|5Rk0s?p7COHS<;l)2jE$LX-M!%EZf}MpeD~qW#PExCPCire z(I(VIN+;{Dda_;bO9QyhXxY7@RZOI;d&p!e(MKWyGO9q-HX0$JGE#9qp|G7=+>z>z zt+E47&EzBxP0~8}h481o=qfm}uL1@5N_2AQ-kUn<=J~>$xI++NY5aGI16L3=60eZp zWG!-AUX%ORs+RUAnt*a;<(&h=j1&3!WBhRrD={@pw1nOAXuklZbZ6I`G^IIC;?Iqg z5*;P+8EbU)jGg@%Wt))vQUedG9NH?hHe5RoygTAB6ndN&Kg6k8n~X;&K4J;tuGMpR6Z<*h<; zK=&nzx_}WW%|B`13PhywU+>RtR|Q$a2+l3hvtMvTzo(HRd?=dAmif*t-jnTNNl2im ztVEXFCQ>#-(N>+Xvrf23mnnyoR45aeBn%CSVs)i&B9(|R2kRrrJu&0vnSfupFhFN@ z#bNX2_u#ztK>0z!p%n&(1}y^UMBCWCK8llpuVVtaglVL^a(PW<+y|>|Y{zY>-?k1L zojzZNBqgI|wu4J7t+4;P>5!(qGr9NhdJs4Swvs3(0}6Ma!1o|+UkzBm`T_r(hX%fuYcx~6LL ziWK5l_z3*YC^Lm_Un8FrRaK?beG;fg^ool0VMB1IL3d&EHTouRE7`)gW|EdnMjtMbYBgn0kU$M3QB zF?W%^k$F_I6d%8zOQ(peCvli%s~Ue6gOQLBtG@rFLsmC}JlO5l4@sJ@*<8ALXMh1Q zNK4Nwx>eb{5xdCW(2xxs2Eoic4yk75dwENklLfJ5#NjfGcjnkLF`Bo%*$E0yq(G4m zB%d&{Q?toGd7MQ+q&B zLdI_}pLxx~9`2}c@L^k;jAq9a_pN}s*Tu_F3}GEW8pb&!racqlyNHVs>x2<$IA+^W z{YYIa1xef5i0vb?KV{%Ai%K1uOVD^v5bXjRQu5>^3@AIyL+7^YrgW{!6Ygv1IOVGUbnR zezby-rl}Aw0~qXz^93yA*(B?6#KxVLWgXU=QUSwBp>+Z;ek44iXgCA-xG@RBDl##u zWdFYYUkeadl*{w(mJ7>^8}S4RK&>nX%KJ!6Ufq;vmvbgV~7NfW-D zKecx{y<CwsX?nobVm>FySt3w$nWL>hnx4#>*Di7IQK`tHA+}~ibO7Jey2NCPlh~W?zV=TpF z?Q(dGnEYE}h3>u!R->lIn{TSyd~y(xur{HH{xPuSk9C}~-{?-uo$fu{XklU1UCaE2 z+-ZcUJ_;{%e8`|zE!|Lde6-k4WLz6c%Bj2p7UJ-Jn?+YLB4Dar*e--9Du~+N=#8Z; z^5yY7j<0~g={IMN$M(}Vq2HHG;P3=@35n?CZX_=~+~GLL<~zgl&gFP#=PQ@pl#kvV zV?%fl5*~u#jNb%CdV9~OnJBy;oj2f^n?tzA^Ir_R26A2Z0M50#as0+5mR#!endwr1 zN;tc^64S)M4Hir)vfgKh8281892-PoG97<7`$8Zb7=nQKBbUrUO&FSw|LpJw(=OLH zB5=F8+afVKKVqQ`9t=5$Qc@3lxHlPxhs(Efq7B(y5sKkaCA_2yecr~haB^6yv>N*G z_&7LXGMxE)1ZQiN^ny>E`B1%QN5YMyqF#m}PKC%SX3R9NtwqMCx$OVgU#PN+hvk3# zFJHElwZRinQ9qW!5c*Ue@1se%owCJ?I%H3d zUnxWe_HPmR;XsL!Z8EHR8$}Ymy2n1oYF2!BgO9L<>BBbh;`{^`O`_lCQYPo={VOJ} zFL$%H*4WO1R+Tzo1%m=Po)S?73tH~;Zn<)nP}|)_mLffuTK=7dEo<8}ddiQekUQj6 zBAm#q@Spu}bJnhdzg7vu!O`Xuz)?_;&sxB9J6&Ah;ET20ALV}5ssG;guUmTegS~Rf z!opj-yVl7@lWe@e)0=bbs}3QPT+}ZSHkfny(VI*l-x z@ajfX&|ee!sxas)OMxKuZy%CB@0K?-x|Nrn*Ts{|XvckeI%lW9^>Y zE^Ox)_8_V-CU@SU{JZkFIlcFT zt$(16yrM-oJ?~tuv0PhUCH*QI(rH)} znJzD9In+I?{57HM5~whIWxe zR8?1R7&KVu*f7)IkJvfewdV!Efd5B_S)J(c)kw7(??!a>T!XrYxgf6k0_i6t{Dm%z zVUX2+mPN_K{o7u|P#xAR3p!{8Xy)^HtA6y1PL-HAwtuPk??O+-AUXw&xP5w1Cf5+& z1xZN6flWtzc6bu|SYEz9{@46=MQm3|E+sf}syre|5rAmL`0f*Z_58vV&Q+vb{S|5G zUuaPuA!$>pk(I{4ynnyXk%Yek3}CkU1p=&y%xQ~QHO&ygGQFop8Xzm#6A)f#JZ|Z7`4~!RO;4o}RsY55mhVf3mgmTzDK4I2IUNF$6 zPKp2fuLl-9UC0LZ9|FvI{g$9WEb7w;RkW%~7JDl1mDDU$o=;w8W(=E& z3G4SQ9tFjPfncxM*t5z5Uv=Ey@`1@RApPw%`xBEaB3$kWhh7h+NRmGU7#;uqdnrJf z`R_nUO(#4G=y`Dy69ZwFy{~X;$v@*}L~#L0->+S05nfA%>H?0{?KES|23h=7yxaPw zoa#!XIu+$v%W!IW!<&wOK}5`NOG8(EAabhND1GL!P)?kjow|Lm$~M%MG1ZTk|Bgt4 zcgtKxb79+}udtN_HDejzlh=UHB!sC*ix>D~6gtTh3me_XfLVVXA(ir#E>TkHI_Eo1 zH`%Ujtw8)*0=G@Ca$RTR)p~1j^_6ROF0{8er-<;m-hGO6k|7aj%mUT(&Huf{fMIF$ z_Ra~ar|CGW+Bv=Gk!`L}q))iL;~%t$OGrla36pYl9wB=Cw#O#Fatj~%mubwR2K*~t zl(2(wgcUCT{(QsmGgeIz;h11LUKLN1n+7E%+XIRCXnNlXlzAdAAMpsWkzje;+%`mX ziJH8DsiyJCFAgq;ee7>U_XHeQSBjP&`uj)wdg@)9S)9X)>ujP&((JcJ-YsiCfm@$5 zCPf9c`E#f8huUg8zDQKhHy{?ZIv=bq3iITj=zv+>&@}Ryz&!5nR}Ny}*C79uemgox zx!dXLs%I{xZcGSQff50Vpd?B%;~H#Z@8cD7x9a#7{_SXz;LTzyup@traD3pX_;4Bx z#(;CP#o@kvqsX=cHmmCNWck`I1j-#ece(?ZrCQc<=`uwSdFayQbQm)1_kH|V3cGgd zN1-Jl+kgo9GB+? z$%mTiOz@d*i1|f4O|T>!!2dr=wF z`R+nhT&v+lv?zf0Gs3WK-ju|r>(KMYnSPI3_hD{@W#x^QrBW+ikLM%y9McQ zU?4o~2UWm9@ZkeTR%>CX%V@C@1qrY5aW`m_YaK0a+M$*+M!S%>ItU*1zQg&;#AHRH zN{`d(;gLcs_ywFrR0fk%mCa#p+jr9Ovjy#7Q>8ZH1Xu3U|M;<>sl8q$M`)^I0SRUx z(*T>M;dw+Mbl01$t)vfDcnoz8j&X$9VyM+0a#d({FWoWFEJyK>wxr%i6TW>L)MeWm z3Szzb@kdrxUwwt_=HC+EO01W?eOfbY7^KH7By6S+u5O?9af*Muj}juD|Ragtu->SZG&J$4nRWx9RCr zPvOX^1!gAy3Z zPS-Ah)94gINyj$@#a=}au1sbZvwIU4>M&Nf0_nOaOz(;k(iw7#BIIH!m>?3~pFbB( zj6l3lKUeKvj7GdeMx!zRq*)W!V#ead#_jQ{Zp>^gOg>R(a;l)FWi}8o>bDfe*OLA` zLF}d5m2{dwO~tFTIYQ#`(wfUT*K-S_FV*p{{XP)6!g!hyz;*2ledX?gp0F%p?J-@yQD3FN02S+{yiAI$Wui zBZAaV2eU;?B5{<-!aEJI;wbl6V9O66A=u7u#T7Q~s6YO=;S@%haWSYIe7G zK<`D-!9HaR(nxo&@xer1_5s+KJGMQcr7wphl)hfPI7+_33mF;L>W#E5W-?#| z>Ww_Dpf8Qc29w3AY@!5`K>)+eDOS#Qiz(TB*w$N=sca=C&OUp^kWS5aWCb2R4QqDu zZ*jTP&UrB!!Yf+_%q z1}7B!VwYk18R$BeNup$flpA=sRu_GrXp8=ca0&|Qvt7qxVDxj{5dy^3$n*;&D85yv zr^&&CffA~3spaqBlO&i7T9U^ca{UAvfAtx4u}a1VK`G3)j6YjJksw3$!)j&3qb#P+ z?xiHtG-8fI(pW|nJLn40f3Q2?oeGeiQkt7~40b}H%E@JWV&M=+#Z{TxF>vDp^=t?d zS}*|AqJy-PlGlXYd4gKX6Y72=pr3wo>545`n7xbdy2KbyBH$p}v03ty$oJ1^YT8_G zbJ%&KfB!RL4i{@(%d4L@l&9qlj+*2t22gPb{h%at5RhbZOR8*Bpbfr;I zCFjoXLB7?MiPB{#Wnbyi-T<9}mv{p4^?g8y$1Qq|)B8T*7Waztb+CSV6?X2{>;SA? zP-u&OhK-enx$u)o)It62bF`R%&7EN^(|8B`InhhSDWww+E z!}$D|23E_AUwP|VB%k@H2HnbL4lrI$$sY9bl$?%oTAKHe0n()01zLnVV5$@VA2*_~Zfs8*mZjJX~f zwF@IA`%04|i`X(pcRI676?z3HHAFSEjB~)e1uSC+hpnn`kN^27Zo|{+FU%9+b#)?Q z#c1gI-Mk?&D`S0d+F@Wg?Mp1;L*v>nmxzuh@j@`?^+-yh%~GWwpIb<)_I$|8HH$-d_!%h$G;x$X!tr9^)Gx|-^5E>Rv*NlT;i5U(k>gMI z!r~Vw5;jYYm$b`Udve%Jo<@VJ#d@cVy6@g)P3?*C74xGH#@a+hkvBnT91FN@A$_x- z7Hl`L1i@$Kc*z8<4?$|ErREA1WU{e*FgB>hW%6h46Z=lHw~LOu|5|tfdydg!h3~;7 z=Jr)tJi1wTk@DY1seK4{{vzHsE^7k7>oI>3m3F;VjPw4fu3oY7^Zp6Q)pl`-7JHRe zb81V8nSQ-R5Abv5+3?G>wg?arc8e83gYCAwG#23lJsDI(26@+Y*m{YTOpmy?qY>E< zv#8cSWRDS(2atgx^905)lB5JvFyLnAXx`v;#J7amUAMOHznI zE)N=RSZI%H9}U7tD!H#5m~ru7MkwNI{N{)DgAk^rl=`eg!a2-=i0O=qT3~6FfDmcRAgijpf+3Hp4#m&zMwc+3tcZUU? zk!n*j6xSWlU?N*iN}G4l#lqwS`2f*;XD&Vk=hl=+_McLDu@LK$5+zOb_bE-NnGs#X zS-$>hX-cMrpi;iFMrq)TMTj?4)YAxNf9VEeAh z-U;Q}FiQ51d1?xRZ5rUIcJ6Q(*YlE z*&uX$XqRSQH@7t<`O?kQnu4ifyGebn`MEVCGwMJ4`(uKX0d>R;&G8GruR+((t-E{W zS~qN79>CcF8P(G@Jgg4OlcyZ|Tuqu1mlg@Xw+EO}e=XIn^hpLa0<&U&C?o*>|1N4h=29Hfes;bEU6;T%9S5`A7=UHXNrj2*ZN zK$(DJs<|oOd%RyVag=3$N|g)6fTPp0wlP`)7$44)7KLXPz9e!M&o}Qr z=h!ss>P7@9zvvWLTXi2LV8h(swl7Xr%gJ--+U@v+D!S?Rp9$foBvbAa5Y9o{SRx~X zW6@WUR#9JX={He{DG0t&(!%n>kUWhIWMeZkyU*x@xKmeG{S5DY6EgxW7` zk4GLF1cmhK-XV%gVpt6_gpj=(_xs*HieeqkfN!YXg$Fa@Cp6NSfRG2}VO(PYD}w1u zJ}Srtb}jTWEaNdH-m@F-nxh7f+B?&}_pD-J--w*@Mob@p=Ptr~hmoaN{*f-u9gnhu zbCmTmYCH>HtBvDbG(hG=C4?c1frIJV;UkxxJN>OfWcH7@Zx06;5|RD2z9%^UB1M?w z_#jfv0{g>JS)ua1dp2y*xP!<%Dffk*XAieLx%;IL$PoCm;506oy`OMe8l5^KFeY%u zNZ@NwQIo2SD30ufDzn`Aa9s(y?oUMp;e&%ZRmB5)2ohjcCu2lqj3+oDe3aJ3O0AE%PBUv=v(H-^#aYz!&CJARrdj31sqY4Dyu9kKw}jR0 z!Vj^6(AqX|o5>Jh(^ncJsn9{*x)Cmyq6w#>s2!Gkz-cERh7v%<&m8?`k1h54Swxz9 z9LFjPAnMC&%MBF>lFg6v@;V$Z@fb5$p-&jSLt6yj(;+G`xAv?t1`9uiW=@}zb9(o5S0)- z(_gG<{6KgZU&%RcyRfkBBV?e(Ao4Wl4Z1vz6fD)>@?7Z{c~f?>M5FBi{2=HkR4V@t zxg2+c0{NmBF>~7m(3kS5I~s)VrfE>%)0Vb z>DLgzPnVgu&@}*(!RV&RPFF|N=O->m!(%m+$pP9th~(MiB5Ab_z3lq=E%v&yh##Y8 zL1|G}b!RNH}9b^YS% z_Z2^%pV`VA9$x-2faF`V|DSoD!-utci!{0fnIu|lMN8NoXUe1)z=vxc%>45Z(HDa( zsHt|ES0%cCav5(-|6o-xF7WbmHHs5)khE*86o5ut*CV{Upe(a}dBiFbobMtC?rL-i z1~X)uM)9S#y1s)qeEM7`Iok&02m;!71aD-l?>t=Z>1)Z4fOn zr`BBu!o`Qgpr^Xp<}*Gij1F}r{WOp;(t;gg2fW7VV{WMH#Rc;$G|78ZIG@FJOxGT~ zuuYaW=3iVL7UC-;qiN8IIkJ%Gs>?J2LR< z*`9`i2k%w5+gY1VVdvpi(iF%qLdk53wmuQOgIK{e@ zG|`dxED%V3=MCodvH=0-_~$69Yo}g93#-b+f#aTIz?!Ey3Ea3 zjMjR8L>WEfm#KUVLf|=C)~9s~^F9G+pSU$htGi2fkP!B@>+_$DXau7;<}0oS7OI?H z1cuX=((VKzJ1&sJn-P(inT4jS&|>3}$MfFX+dUp~r$%E*&bT%;5}>+SD;2Ri1J^2< z%x2S1YV`G7T$!qdG5E7p@bdauo2>qf(uQj~I&&XrM(^cpb@n);?X;+9je;p}{ zK&3A9Ji4XQTfSRTXZOgcW)T}hstjlmauE@5{6qrWdE;q%{+Bxe-WUZ8iQ~Bg zV4nj0+8IucpoES{fXr3@zUK8V#iEJh&dq;DEf;@+ueN;yYCMyz?{L&ISdXsHe8Fb% zmptra6h1O5RKchZ0v)XzeRGXe^jK7czZmjyx#kuqKnkfn`E_`q8Q)vKX+*|&xCJW+ zrFYzAnRPzDPcMEC*g-eCvI0Q7JrOe5mUn(`U>^=*g}7bZ!(t7<>%%MOaI!l~L_|i$ z*l~?ooF(<}h+4xf~>ZY|X7$MI%%s4i-i152P#%}np8h!Oxp}ml@+72R~E3b51KNJ1h zIB%ecFQ}c#u7$~|4rHqr7N!z$`HdqapUTcMHnB~WO&MPPuLU@rhbK|Y^2mh4gwgDR z{z+FM!?vRZ7+xR~5iS{TA6QPmUSAJ#R?M=Q>$*u(W0>C2ZC$m2k2C{=V#`PwRZ-CQpA|Zox*GiTW$KAoO%YE792#hRR zT0xr&6bHKe%p;>9SWR`cz&GD&xtz`^-w_WPWP&mQa3SP_g-D8PY*n0An`us;H@9+H zD`Ib=04h7Hty2ZlGx`s!c6u7n8R906ui;2hTWf&#PIhouOwmWMaXh^^1voky%-EKz zNG)95ZU{rEzujh0D>SsVhK3cU6Z(|Ihx<&dpOA?fv_{w)0i0{TpF-roqZqa*kQ-veXkr9yK30sTu7Y{R8Kx4tiQEHI|-#wTalY_rk)$kS; z8XBaREX-yoiI$b+V=VfqF5T4Bm7Yt^w>m_EvbffU0c?IeiSORNrOtl$`tATG+-{>C z24m!~$wDOP@#BUmQXb6~7k3x-382><;K+Ez4j~>>vjGZv=v7u)B&uOxVCDYo_K9e| zk@ZuSBGA5|Z@$INDsDtdA9=6I=f@0 z3^8P9{W~}#W%vJU3!`{h(Ac_c@#l?C+X}=N?cI|tSRG4Cz!zmVH_BA^5vHV^UG#+4 z-|S+XENz>a-CA|Q4dY<|q>V^bb;d^#y&Obt>?rl5L#qUGmz+!Q zyAiz*1uxpw9FF<~^wAqT3JCT@IuAwD806c_Yrw4Be?yB?{Y|p9ZM^)OiIIsMhA#^+ z5ny>~vUoLr*1^~B$^>N*=kZeCc``fLOQE3*8fVQ5RpDW)hQJeSqgU9}*f0B>4e(muY33O2+9QMZR_RXZ4>C*2ePJe zbPbyeR2Pugf}Hvlr+_4a`1C0Oq*8%De~-4W4@^vf(YLf8Caw-AU;!W(&r~p@Z{f@n z;r@kNF5W^oArrglWZ7%MLcUJH`yLVAnOVv2KURm|$_ioNV=gqT8acD_H=&-79CrsPkF zxRDmV#N|P75>DCaE~;qVG_$S`_#n%?g04qup!9iF*=+y*ZC6?6AF7OK$$(t%vFIYY_PAXR;|7U7r8^QYP(1kDR8D2D%q--P|+9|M8@uuh*b`+d%509nMoY z5`HD}HYW|5RhcNIvjZuLK;E5G{0AwXg2Hs=;vtb?_P$5B3wC~p2P|N~-T~6vPp_<4 zXeE>M@^BH~Jx&ZP^J>ugxRODsV5nng`z7?T*oqdk)z8c#fF|zANqoMg%iMIDcs5J3 z=X(3yr5c7xWMsEyP!%ZskgN^@94%NHA^;8tMecZ&EH2o4A@tu=+z~TKe$Gr9)(X9W zpzT_&bXhTd7YGqIWzxXsyY}$IY#XHc9qAlJ;{ywXlJaStK${B5LPW|Y^>+Yg{1aKk z*(VcMvx~R6pLYmuc{}oRM3#gQO8I@p(P|mB7Ju^)plr9DQ4^aHq#A4 zavSLn;}LOR;XV|nQ@%23pfL*bo%Y!?r&{3{CoRHUU|>N(p>J6*FWeZMm)T!dkGKw> zbbkA%V?AsQ)$@C?E%vhR%rSkw`vE*ru z6PZZm<@Mqj;B*{7=yD>QKO;L22lTF|K*QMrT>8Lyl=Y`HssS+*jf#Xa4XM7LA&jEr|(Wm}gk>1g3ad zK@bBK{SINzT@zk$@(bhiu zt5yGGiwI@3)I?^qfx#ySHu^f?=g!C#B80_WU}AFWE{u&?n5iYs)9j_C;i9PvUCee? z=PFFjKx+}^ku>gj0T7~e)cV7V-yn4)O4nl`bXNR~lk9HrD133}0v1!_IVE1aQ55WU zD;F1~Hxb*-w#-TBLRox!2ghV7W`Wil&|ez16X&YLJ+30D!q0l+dXnlL68kUeTPvjF zf*^xDDFT5eJ9le4v~)#5w(F_dNm}dBOPc^-8dNKE2|M*M^K}q0F{vmdE-rt3=oKzL z3YAQ)pH7!VCpM|beEYns;z3@6;B!5eL)w?W`B@F_=HEom2{i&^GEd-CC|=SZ)x+kO zGK}#CtkyZX0nNpR_ybPSItj5$tdb=~VH@vEPsehLbr{B9T)s~W-h5ZK={6L?bL*eC z{Wv9^yeVg~AD-VaGw#K=@jiH4au7)6*95Jo@jt=CWlur5KkvONjq{~Pa> z<__u00m5Q{GT-ukRCieF7W^LKc`$9GlgwUdc4Zf9Gz5{B}o2FcvV$x=jxa~9??AV$@e=Pf|L|fE9eQ2fExy3{CE|w zd+F-u$g_eMsF!3V)v}qc&c%plPQ<)8ff|f6*)Oo|Q{$^?rsme7yk@M~KeLp7uu5&M zxhExu`nNs58f>E=p>A#Ae!#Q!4G10pIy`q{W@sY=h!I3}h8x7S&op{&lS5O1JXa-u z;)vuxZ9MSj^xHIMw~nzCm{F;p-vui4)uZho42&nkY}Iwp4Fl-2;fre1D&-hTpuWw{ zZH9VY)-PwX)tT_nfR_Z>{5sGerSdwphn(7Jj_0Sn7N!Kv1TZp`?6qhU|6=tt5e(4K zlKy;hTQ*x4IQM8zL-rut6MN|0J^%}-bg2a%fXSCghko6cP6ajVP&~R^b8{79Q!JcN zKy_?me2TZV0_@V|n3%W-J+L%vR<;+(T5NRz4-5oCrOU1U=f4PeH1M8Kr+--A){VtC zHa8MfpmifILq2P=S`RYzOS*V_ivxM*czd_LEN5%z-@} z8D!{}J-|420iAQi+XDBb#K5AulXg{8KOpd!uH!b;_trJxX8A;s-qq}k*I-GB9|c7%Iw zM7}E9#Sud~0WopVK)q$f$YkoKYgvi(%6vTn{ZeX^GZv+E;FET;><*&Iywcw@i#W^t z)J|IQQFb6qJv?cP{ShAyKx<$uNtxmf)R$}E;}V;hb*}tF&Vj%5UJskqofiwUJ_ew4 z%wG;#c<_MNdW$Y>)ncZ`P>~W=5YV|_>H*RYE`8^a*!`=jwj2WSj|HNKkC4owloZLB z%s3=>I=PqnIy;Q70T|$xT!<9wd0+iYB#+4=97SwtKXO1AEowzg(NYj_#j z$C$~YsiZLtp05d?pK0D+*M8hRK0pRp(@a z_e4P1XD0}ucz#6#vIO8i;--W|X{pHGUsEgbIUF=eea4o;b#$(*>jV1#^~*@Yu{~$J zIcBJhAQZDbMA0%iHR>b%Ca|*Vzo*EIqtE3lGerRoZn81s6CE{`}e-#NnaI(Ww zO8cF2{GK-lZJimT20}K{tWC@aenM1fGNJGy_4yV$zUl4DCW5Ow$hfQ9yCFdL4^C73 z-K0tFygo258>nI6ymgz9fN&915jgx!PHU0ymYj;{apKAibIZ$vNo4(6W zOG=wK|DoxsqO#hy@K1MlcOxy`-Klg75`u(ucQ+y>UD6#MxnJxhzafc?pD?G#gFVbRcjPxl+fzIG50hP^X- zhAn6i!A0lWx?mde00&&U?&`Vxg+mGKfFRl$w+V7P-4@IzNy)-vtb|%ELMX5!!IxqK zqXr*rY$k0R5z$Y>fkBJdK%b+=N^M^6S5a_-O8?K`c8&W|d_twrdkD?E2qW zwEC}C{o$?0J&9Sx8k9QL)M!<1looU$`6LVg&Lrd0>o={fJ_(&FV)*^X5UR=^RcEzPt2n~IRk^sDkYwgjsk({b9gK7W?y#Gi zykv%2pIz@G-%jHWR3&{0{IH=nOC4ks{x2eI{%4+a8m0#_gi6y4s_)!h zsG=fHLD&ZhHB^=>p2D|>4*Zu76u1A5?O$n&UjJt%EPX0Np;`Z6Ay@OxIiU8wL_1(R zHzyI(y>Ivos4kB~0R7i)`b{i;BBUqB59oc{(!E-P_Ico36bn59*14 zRa#X8Ygxw&q3*5TFLi2L z*YMPc15SO;PkVD98V;#{5DU~S?Q7U$4CaHjGX8n&(ohd3XT|+-AhWZ_;`%_JvFB4T zN{Vhzz0G4ElcXOE?CuF~)0E?*@ORLTNM^}^S0;^*9^S#{n&_n2 zW=bRJ&Zgl0dgsdh-OF_( zfFo(u>ACBffh}N4TUoymnPBnMnAv52U?F_NjvXIU>|x)kV#|ZPTv_?_xT!L~Ci~S0 z8`I_klQo^}BPtEz829CW$rGGGL()WSapB*1#qAJ4CRZmgbOe4@JisAzFLl8D`c!r| z#Q@83adqWkIc{fX(iC%JOfHhg>A&Lf&0YNgijrT_OvV-Oxy>!s=)rENZe!zk_k!T` zY29k^lLM>X=-*DRw#o7IP|4wF!GO63n0c$)&$Ril8U~n3cG!Btj=OCP{v~(J^6S$8t-i_w#^l4q z>Gw#@NDxUQqgaGJBPV@v6DDm?f*NlzD6e)P5x}e}kpxYyriskiEKVkY3528gL_bU9 z8av#m|FTQ#8&_FRKy@9&i<&>2t}ZWU^Y=~mtdIh%(HfsL18~;)DnQvWW%W;(x8yCv zgI}N^w*r;2@FIa^BvqhJEG0UNC5I6gx$XS<;Yh?VT^^d+2|Vy2V_D((=gz}lsk))EXm5^pfl?FPt@%3|HgDtz%d3aPk+K_Z3T;xqP6x_yjjBf*ys=8Im4k5Y`i3&~v5PDhvPX(AaEO<6$T@uP@6 zKk8a@H!ij^1y7X!@eQD~>mRm;N;`GEoG&kJz8S+D8?+9pzQockH(z8wIgfl)jwFYThZD_p#*2lEWlOK)BBHWpRtZ!^RQ) z#S{5qzS43qxI`{dd_u3@>Fi>{;|}+yuCOIC@4pAw3M~{lGpqaDg~QsDaEis7XSX{p zQPFFW&%WNe=UJNa%4&lVmLuxxi?qK-sGmCUdZpia@E?J>RgZ-Vi@tv%_Wl-WmDIju zgVTH$PA!SnuCkJ8!Z66x1|hj_HB9wAHES`%n$Y0P^1!Y zETlV4BTY*JhutS-BJxPkuWl{wchSuGSM2P}!2IACy4xP;{ryYRg;-DjedxdE=L(Grr?cx+6RLVjio$Fz*+kkD9hwB;sAj>{oVG{!*5!;K$aV5KVSQj*(1ZfE3a37K=+%>sa69%=@8ggkGr!1FDeRz zxv-@^SCUB_X;iBqtXJ6*nS_M}74V-FmByhGd3i(3j1YySQ>QOdKR4c0<<%zg#lcE| zHq^{om#`87_0=q<05d3{C|%%V3@H@I^BJY!L#L=Vg2Aw|(oIXJs`#YCn#GG}@_3GN zVgPhKud0Zj9|pR5MnMp+UE>Qs<6w!t@dKK(P2Y$7`ewe@Cu;w+ z(3LWdLk#%thy-%=Qsvj9{^_X7>7{V{F%?1aaY>srUc;nffm+tyA=MOk?-IHzim zkxceJCjCW3&z8s5#>WTAez;BTEvRpZ-x{1oN$9!0jep4iDi#4-+NfVFz{1B^W=;OW zLB#TL?2qTY(|#oG2~hcl=$CItdcY#m78lJpBQ{=V3AqI1gbRTe#lhG>0JN38TWMrr z$QcF`{W=|wPPp@Ze=n}Y&}kLtx-V26k*|Ys?!bb8-*NXXA{>}nd(~Rkq`)bYkdc8b zBqZeF;Q2cLQ1PaL5ToTe8p}mI85MI&MvP`#jTsG_6}yS0!2G2 zrws&PJv{SF968Q^DdtUCn<{y)Dul|Bi066-hykfG!(F+fLz|!#Iz1{GG72|L*5Ps` ziKyU5_0M~hls}YJOwDZJ9r*ZS$apX*Dtyom@!5cyj;`$R(A3I4v{a+l$>qWAn_Khy z@i&Zb`v1a&y(31Y!1_yK@mGUQ=d-~pL-T54F{Xfmj;43IaVkHzw&bNvQL*HbEMHU*FyiV1qkJVGJW@g2%G^C zWFPWYY|U)PM~>kM_vV2=-Xa!BdA$VyG<`tA-wgx>tIgHpS;3%OB1F}IeXI5rhWjm> zaTOcC_Xb3*_`ioeEH~eW%I;9f)5=>5m^XYt@E8rUC8nmv&;UcV_is0xpjbu9=Uj>h z=~6Px{mX0g<4TLW$%eQD788J|3N5Un@4F|HD-kfJT{#&Wtd7TlVq=0*E$Q8y)dvAo z5@O=PPE zy+93;cz#AiJNor^h#UxpK2O;+5oS;dt3v{A|KQqrJ28PXb2Rzsrpjoh3YRF}`ODdF zgjShH$=h~73{+(BVE_?D#K8gvNGk`nU0?_abk}Jcop(%_elW@4T>T;170~K{##L)J zRUdvF)Z~O#s2Q#Wc|Yr-@4}<)SChG`3r%RfV8!4!uI#97F zD2US3$}$#w`m^nEcTRSCc;+G`QagLId>#o;mA4Kr*vQbGCu?Y5{_|KKy(00rl&M}Q z0J{~ic!^(OimUXZKfXO4>JzfYj8{|SopT44T4D5O9JMKLn|M@)AGDSbOIT-KhTxz8 zP@-!337TCRK^A>*M=!#Vtpd7TU~WBHM*dotJJ?A^iW6DpzQZQOCXPCUc3|6VrXT=G z%#`A%Jx^C&BKWFll!uA*;kCi=3iR)_0m#@`$HPiFF8GU)F~K*dH5q5k#|-I2|D?)@ z^V1(h$oZXQD3BsIHBefc>Ox!oC?YaQ?dbZF>1Q+G(#G>a5VB#%+DS|w+^{0t_Ht1E zs(d){+KpRVJUe&3J^}DO={9btSr^BYNV(33iG#VebuhH}boh*dE@v}8I94UIY@=v* zw;++l)M+2x>WhQN=R}SEXi7xoxEP*{)AqN8e=VZdkRaT$ahN-zg3eK*ci74#5)g25 zZ^iqdyddGoY*Ovt-A(ShJc=Oq3S~zUMMIH(h^|pV)pT~oT&<=6Dl=d()Z&y=3W8b#hSNes9oF!xN%KAOzxOl+HS0@w1RN0Vr&l%K(J6CP^5oP8Ya0Y z{BqYKk53qp?u4|zvScl)#P(-EE>GQbekd{Rtuw9nF<{WBsjw*%5*g41E}Yy4mq(0o zjZOg5OS0Azpv0gb%;As4t>gP2!f@C?@yFd$5x+hEVvAiOSdrcGnUxxsW&5z>B{TdtpGnR3=jYTaeA~FR7m$*W^!u!P+ z(#6-AA-nErlMTE1&7iq==R23wL))58ydDQG;LDYVaN&wn+(;|4yIZiid1uY6NtQ zL&Wq0?S_!g2?%lk)Ya$f)7RlRw9_U`vJUmZ*Cslt5QQS(l}UR-(D zCLZ8(RW1JzDHe}Vn1xo&+2sp|afvd14_2N)4LfEWg+kT7 zV+SnK7`!h<_h0(jJo(*O^cKISAc*h4A$c;hf|25e@4Hd{a)%o5GC1fiX6i)cb69P` zj;8-;p!gk|(>3f=H{N!^kG3k*pkv6;Bw^U6=q8qxU4{vX`8$%VcBdXvtIW4|QvCD2dn4*xW=*gXg0)zpf@!Snnn zA@Rsg6SOU~Ju9eQ^TL57@Y`oNKXHkxI&u26wKPi3e?GnVkk|8a z-qB1&doLjQ!XPoFTDgokIuQU>R7)mSV3RSEf~yRMj-Rf&-`YO=w0aQLY9yL={iD?& zhU6CrnTHLJj#-swM{@~L?EW#o$di&E88(YI!*-4f1>Jsi`@9M+rj^_(1N)W0Y@+YOg5|b(JfsXco7@blBn#Mj8CF*7Cy_ z!@x;hllZZ4^lcD>AiTQrF5agx1ui(|>gx8a>txaMkssJTOqbrb(nN+vh8#7ZKMV`o@cIQa z{Q1XmyEvGfIMCUQ2_A2<$jG|_V8H2jcL1aw6k=}=8J1dtR8^-EWZXTV#I+@ORpxPg z>PNoy@YphU<^sq#7CvssLN-YWWP10G=?w$IjMy`wn%EUCJsQz8V>Zf!6}V0&Z5pj> z^zDgYn^Z7Pp@MNz*=hds{DK587d#iW9!ECVU2o-d(JeJgc#u0|i-m_D<~B9;gPRx& zpq1&{Ia1b?MC))6w$=7%BL>xt0};(Yc4Y5o!N~kN=QiHY6_{Tg2*}3Pq@fxT?jRgI z8x=EKMHpg1t>rAE_|X<)o{zka%k`kN1?ZPW)guOev2G^ zvNB9uPvE7noN8Rc!|;*`gPD3KJrg4&AHevTh_!apM*XvK_w~&`J0sTCxl`p|ggbmM z#{5^TexiC5t2xAwafqwy_q6@`QTa=lU(!6C`CD)%2H60X(*PQz1KZL~gX*K-b0`No z?bhA5L3?lC$1i2*AA!<|_n%k*2?j-eF^HWSA4fz%g=LJgW`rmj<0cn8vXr#$$8YZc z4YRG{kLUk1v@nB$=ha?_VS92oJK*<`xR-<9S8FMbrpWDSUM@w%sA(Q?4c3ye`$rTX;ZM9_ z^n&n)>w?VK%@jqcx|uj74M z&9D2AupdGflpcV1Dftn)rS(5kk80zzw`OgU7-w}?)g!5i87Y-Ktl*oFF$Q}X)FM9Q)01pVPYMXXKe)QRyIF?=Qw4&a|AudnErwzJO~xN*@`U^fZ3gRCxAp z_CK^UqmR*W%Wz@;!l9!@PLf-dV3C%!f=f5Xh4^4z-o)X}<8UuGk}l-KM-QwcHCJdPagA}{A> z{#?K5Oxj&+^X09_%H@rooT%DdB6#8<7&Tm5H2Svt{SsDT_`qAmJ=^<(oL`2<+4a{k z=kERvCalKw^HfGSBt$mBLh49djdT@Ce(2G^Mrxg2pjMRBh2T(}^4sT9S?oWOO z2CHiu0TSjVcD91lx{>&DZ<1U~HYQ;Y)_CyzQ#2Gz6ei)EHBXP1+VE)Ke6K$xO?;(* zgg!-pN^z8ZDJ(Ei`I5W1x_!G10W!1O{hMHqg!D-5&CN7kudhakxZZt-9(-0-v+2r) z110ta5oUI7b!iO!g5S*uId~wk|DlnQ;D!E}R4?3-cdGo*awXCrM;&L>1{)jZO>sM+ zS$4<>jL}mCvITWvBFDAXU^-Jeql&D}oUkFV1AJRhNnxk2CmL5({n6}*X@ly$6(E)( zsgGjI)rXazHXxk*g5h%h>wS#3J5(yRTJW?wwG2~PNdMH3sBg`o(v_VwD4(=H4CH@h zp)JHh;Z!UO%F6nl&~^d9zL4C8GUT&ldK6uw!gm&8?Cga{%P0g443}AFWsaBM+&|lg z5#g4$8H4#+cogy5I#u$&bLXrb9VR~KQG!SjKF-bd(4-!A^T%3$MXgk}L4q)h9BY0E|T1KczDr0Nm|-Zw^3XD9@UYb^hoMU*u%1~U_m|ALC-hE*;YK2Lbrs>-lk<{Rw~tM1=1YuXJ+6L4n+0f{%h%|p7u@l9T~ z5B$x%+kZ%I5|VHi!@bo%qV~MjE#kE?+1!M@LQ%OM;Lz-VFh-V>`|kHD=qiVW{Au93 ztBVRvCr9;~+{DW+`%jQAuliS4TSM|OzzNoqv&-&WdS&`TLyi%KmZllY5i1RsApGWM#n0jdLwHD+ z`w3eDX=O%@V`2akat&lbr^())ryx|?*%(1rV6jt);L+*1Eyzm#&}QV^GHO+xbNzgl zml;{7+5Y{hO#4#IIJ@#@kR+}T@sy99)k`oN3gv&ne~F{XNNW<5@7g*)jt~xhPnb>wXUuW>IC9 z1zBq?P^~ZbC?q!&m{m4!i8bDu*TBcg$JsRMzt-DbqM-QNTSYAXk@i{CC&J8OG_>0e zp3Ys92%gV>d2>>B!~~@KtpvegXNEupV;F0E%s)|4>|q%Z!|#x&v&8Q_N1i`gL-V!I za{T&EnKq_u;1VUWFD)NpGBhfOXnL_tH5jn-KSTPIc?T66qV!Hj5z?Rgy3w zk9ZJsnA2HwUJU!P|8EO9xt}$#59#`%H~8s+wC9P|EXtOL7)r~fCO9o0e7bj|%ijQT z=|9ID%G%ay#pm=!uTnc-RiNDo#>>UjX~rX+y3uHei)Uy8V;jGXWsmwN%$bAfXZ=cT zmuxP$bkDmD%S!!aGCDa6y{%atN3r0sA6AIYSv>U4?YoR~vvYYjEHm#d>%dBP!ZFOs za@q=S?2EecxpGJn8R7ubk5^~|*?+1O)^O&z*oNO6g!-0iV|TXiZ%|QLmwu#J>cwp= z8GIN`3tW!Zr0a~_d5oJ7?O#h)Ko+8#oFFJlNs+Yb)3koRNIvT~9OT+P#u~1Qh>d}j z(P(lVg7Cme#3$VU&U??YLXdTu@W5_vr-?mom)ca@l9eIbca*%m8DJNS>B4}dHH&4C zz##B9M`Z1NL9vgTa^cmbU5;qW*(nu(83mLzYM&G0ibzw1x&{$;`JO5mA+JnB(}W0u zmdK&|#d%^r<0MaYXUbaz+LsBlf@t6A#%TPl6pez@i|$~Li*g5AF7cANtzS)*Woh}> zdtd}5Ve^Jj+8F*YAJn`YBkP+lx(kcFxNvq&Z*0&zy$)3>)_y7_4HYdSV!|aJ@56$V z%ddJunk$^icE?WAMj0`U!vnL)?~xM-YbWnQnUZiK3$LISEy~hFvjuRPK{8%i{`vC- zSTZax+YDmWKO88V0!is2VAiAm{9>7BJO_a7dta*+Imm`y@z zcg?-+xEE`u)nEKMWM0&`WNxw*>)^id5OeULWth{f3s!Q`NRrbgBx{%BeMi}!QQY~^ zr29IvWHgHhiGj^12@y-1aC&cn4hm{bbT2s}ptL|1Stx*j*Fkc6?o*4~i)hDM2U$#E zezB&;{49~9Bt9nGBD0CN8a-0jb3m1m6F7*!w>hWio}J|+%-QyzSWPHaY2bz~4#|&Z z3hQYsWtQj%Yk0&pn5cuCt{ikSTA#%3R`w@^GyC6|d|=y&GgI6dry@ zC>AyAr>fw~YsE`0>={HR7r*33!=DE+Sd74k8Yg`reGPyYVMwmAUFIFGFI){g9*o<% zcIe!kt8I1yMQ+a1V?A}I$3ty_Tb4F<4ks!JZEfna-@)&&lk7VS5g>zwbxT z%h!vEm*P5GaqB;+zc1eV`;*o@$h&x^@yP2-jW{J$M!`$d0Ubuzlg8hi-W4sdK<7iE zjojkOr5SC!wRtxnvN_-7xdL9|7CBa1&z17-eWDXEQB{ua;>*Lx1--TPYY8jNACh~u z^C1Ddo?(f&HJdgj(@Zkz??#)2MSlJ28Iz00RlS*eV5;b*$0GYhrW0nyVK!tsn$Dj% z6m4z`3IlI%x9z^25yr|2>3DQ9;(is}-^x(91z7WKZ5T}H=|OGoPHqEZWXn#E)0TlR z2(BklcbPGziZa{*}1nrldifQI-FYWA4kt7@*GlOzjwNlz!lNmetbah7Xk15 zbZpt-!}Mfci`R>2(BmUVgB(Uf4hI&z@i$EfA+m&Dhc4HLu>7DZ?ehN6vCJdw90IKc zFc!Ve9fO|TIEDlR3`Q+rL@xJ#Ks!VnoZaaY9HQP%M#23^z*HZb@+B7)^Tq2@4K%Ak z`Ha-?TW|DEA*MSTTEY3%G32o&TibMVF~Km~JTL)e_k<7&Mii$j0t9-h-jQ zq`T~~tCc@WB_nN!qVbAIt$ciHV=ZP3`@)fOF23^e7G=FxX(9&mcF)fXzJuU{J~U%h zMc+?@W=@`3rL6*qDjIA>S8+ z=Cn*!bWEKYSF%2l-T!gQX@_OG$#gl9hTrj?N7bkmy^DUqCc|ge&1AaN9~bxP`m}~6 zIoQV+Zk4iH)c?<;=huaI1v-YLve(Du{G8A=US2mQ48!B>jI;y&n?pBX4MyVf((69h z{r)j!J(CZ&r9UXoaoXRoTb<0i%2*fwTUyq9d2j@F^$%F!t*!1%WfNi^Ns$wL_VjT# zeN<8#`+wPLzufs%`JEWOj#PgW6wGNEiLFh=MT>h}P~h}$-rh}^9~FG(2X+ZH%wuF| zL+JiV>GhzJhyDFJ(P>lhJpb1PlQlXlSalK-;p+VR!}ljAnhqdg34eJuI5z;%o6jAd zI0e#hQ?#E#X0mb_A8)dnIe&P&%ivcQG*PPMOr!^ca$yrpL_58?9!MY;iD)aAH5b!V zj;oMpyt3fRbUHm+U%XgQu)zCE11{9~FV5=vO#R9gKdJ_%=E_6u9?aB4WEuj4e0&pK z{~&6gti;YNRCo6;Df|%i7omg0yC*&^(Rm;Ig4@M`3tEm-7jw0|d*QCQO3b#MA;lT% zRmrEq01@KJ#&XJu8E5Cj^i+f>^Y{Xi!^5*{zIDZa3nZxgPWwrPjBnMPI5}Hdz4>bI zg&mg5#x|S8;ug^&&bO)W@Vwm~WH85%(+5k`Nx?9qPDs>hFYi7|X>Ex|(96Bqb}336qI+IUyEb(hQ#d zqow5Kf>!domAE2*OMRB!qSk}n{;R&NhOz3$2C00lldP(X+Q=|Hyv&aM=EG4H_N6?| zs+tCF*;Qkc1Gea!^+vpo%;Z>c(2kI&6aW0#vbZzlFKI)KO2#>$lRlaY9YFe7G{u0K z)d=+9nzPdPS)`yN+DMryc)v_7HqZRIU}-vcLn8)|AiA>*~8T_3V#t!f3nw;T9XfqRNYfm-alrxps^2_f$-& zAghT(IqR^48@JTD%L$>9o#6p7B^hnq#Z%T9eHdW7PKgTs4il=dCJ}PqicJ7v4F=+0 zX`-OZJ{`WxqR-Nfwz|KBpIpv!gF}GLHFmj|R3`F->pOJb+R0hLQluGj5m-;UR8a7w z>*N-H%zl)|GEJ6KDQeQth>H0-_4Kf`Q)zehyH=|0Nwa9-izAhVNMDsqCb{ON%=Z*o zrfbxQiHN(sE0Ub@risK{#xC;Rd&dFPEeF9$d^HAo61EcFFc!bSb#ij&*m(>72d{8AJk}4#j_OU!j#_W__8x<@&_mmSgR;Xl zM(WeV$SRrA$F?9kZsSBl2T}8oH&W^OxQ~N44W_jVYy%yJSVrQ#3Nn7emxWke`t0hLyE90`|6AwEw1sk zw=VtibM}$hQCZ^mm%CAm>z6xoE#e4w)xO8=!nwS03tHN!+81{8i1S%yVM9Dy(DGAv z4Tjgx^sN_2e`Izyk_zU@gDYy{j|tlS=`YV)L31v{b&Z?(PHv{bLMq|qTWQDIra+_I zE2^C*;=MHxj9^F^u^A^)k7HJ3Q-v(j|MaVCZVB#qPz`pBtWc-KlP7IsQ;$40uSeE-?P{~Y$I}=pJUEY zLz925`tde`fJ}(fu!cgoBk9|8*Dp&Mq-M^qlkfs~-%`x_|5oz5J`sb(zp?eFk4Xo` zr%jl%!Agzw`+c3!sC>YfBN7^_v>6e~=@c5rRtE>t5#L7211Xj0Ro>)+Wbn7KVEv-Lu2S>Eue2PsmKQ;t0E{#1yjZlW)sK+4*sa&*v#sNg4!dTQ4C7$h5$}=pid?mmbDg?IJEq% zeRiln?XWEUI3SE+L9#Hczm|F-|JMTC`@J8FvV#D_v01k3*niXh1~Q-oPt2bK?yi#Ybr(6^$Vqj4!CD z!w&55^Vm(Ez_*+ZVrnm5WX#S~ERj?MIb`&|8ewm7ei20;`K6()fdueO z!^8R8>*MAS+mX1fkvI|##2brsNPr&@G29JMII)Y6eXe4IFcPZJ!_j{MHTg#2OwV)| zJj>nc<9UXyvf-sDVTSB``!_tewCEwfIY+Xc$j)xY3~IQvt$$V^Ubk0l%-X!=FsQVv zNmyqt2$)%LU_=%a86WNzdU;~koaWxx-j8&Wg71A_(hFSS>KaC96l(0K$4gpVyu$eW zIapk-f8l%!(d?-|Axe}V?|bHA0F<60@eo9&^k^SG8%hxrOAKv*F#`Qf96(1 zjoB+<55{1`B>XT5N;-kU3{G=inV=HEIpg5g8e%+HE#*$q(juGY-g33IG2NYJ{zCb> zJF^(R+LNqBlg^&37oljBM6lr99QVZKI0nedgkA5i5OtE?X(iAEdtY{a#p(?W(!Z`z z@+9*Dd}iJcd1?8GEi%ZbYQcS%pyPh^isPOg2TJM&n?V#PZTgXTyCy_*SqtkQ&;?gJ zi8Py_dLv1Fs(o4?6eFe+TdkmH6))y^ypk3JO2Q zM`ZmsG^ySY9oG}gpx5{FsHEq`+zzl|rB$0x39UBqoUZ#^!z)D=AI(SddRrWK(OJ>( z!Ez{Q8qdA0DsYo5nXla3Jcttf=ONbFZQ@XC`ghkP*J#B0>q>{=6Q&ZwCayL{rI_p8Bq!tO^E#~23Vb=*q7xn+rG73h79vYjI;%8LGDj3fJDSIn8#lr9dA~MKb5|(s z%Z6fzcDyu$AHMlyf|p}zJ(1maMV0yVu8bRP|b>lXLP%5L87D7|*sUnGsluK#>~c~5H0GRMg@qtG%%i%y9oyk@AMS-lr?s2If&BQ$QPzQjd+R57368T1 z2peUum>hRNSe=05_tvaHE0Q z#O7cY8XPdciDGvY*`X?aV={ohMZH0YM99MxS`c@A@A8vs>X8rt71NUy8)OZgSe;vEz&=h!

tr0c18~op{xCF{z3QG3&&wAe2m*$B>|);?K7|MsC9*3d_UdOSr=5>)m1b0a~=`= zn~b+Okb4E+gb}lT_;^&Sw33P-Xk{?a0wc!)%G9Z0p51uVQd?0W=J3ZgnUhI|7vFt4 zVXq{*z3oN$rU3EJPPILvkV4(G`cA`0ljmEQHc{;gxg}z}Lm77+ojn)9S;w|jW?|A+ z?TW5}r-#$+!1bT8eEY*l8Gp^*fBp1$Q{9QrTMA@Pm?F-&2QqTQvlcL7yd~A^T`lhG zDUbMnc3A}c06N|i6r`@J53k>~^!3_^Z36H$8$0pesGAFQZ=19Wy2Go-)%Wtk)#@g1 z=5*{ct591`A{@7*KMcj|f$xwoWZSqEN~h#_35_z5m&d3~!bw^#b}Q~wO?adAO!f`! zYuc%;mu#Er-Hm{d0Ma<5+TfeRBbFFNLpR#nrYhnYrmK(K<^yq3p00Se2Sd)}|42!q z$;ku}<&HeO9{LH+_K);p%~OA$zDbEgctP)YlVy_D^Dz*Si1b&~7{h#(#L0LZg^kk0 zSU(Dd4`rgInC~SO_U|UTt@r-d10=ft`L#=j^V>AEJgXzyoKAhJHlLa;jXJ4)i*9!) z8D&uIa@~Je!&zq)AF&V6^0c$z5<|@Z7dD0agpkVJgCo{iI?Bzw&Zn(t5Bn8e7e6!P zgRzg1E9Po?OC2NtOYB|ulX6kqp&i7f-$V(62LqHU4CT?WUdBfx&NwZzw1Y}_NL(&stgkw%g4yygr(v16&+3?{pcP zo}Hx4~ zv*67j)=bDeok+<0Eko;5g86XBN+YCAI|{Ue0Yx_O#Iau&{&WaVy4Yt=s71n60$SKb&R|D*)g+a3_r&I6lc<MY)@let3t>{SibI!A)1}d z$i_anf53Bp;X{$KvdyJ=}YrDC#Z>8Kw70q&2h?RFF;asHrAI1kU4r_~-LvdRX?OCYkJ(DUMWg;EpChJq z20Yqlqe)c3{-$n>XnckHB0j=|Z&|T>f5$c_-hZ%*=|c_$Ld53H;C|!^Qox(lY|`Kk z()anWWjlDoi}i904UI^(_XpXwlZWo$5k9}mH@Axg*2GI%6rfB}=`n;}T>Txcmfbb6 zY!Gxf$bqVA(is=eYuC5hz_UpViOk1OmMxIjNuTM<#q2B=URlL_5;Gq^}W{)ff1SzPG^?Dm$Uxl z=44SrhBBuF5Y|7q38#1k6_cjuiu-J4ESCOVKTN;#vKbENXY7@M3HaLXjXOBtp?6R* z)2%qOcTfFkl9NX&WNY!Rf0y6?p^2J=sOIGM4?fy^dYgYpCL&$R@G6sT7Z9i<=!xe!Y;d47AqT4jPkAwnY z;(K>A1zU<8dYNSjbXU=v%(JoTWxP;={%d?Su$IjxQcd#n|04!P5@UHOloa=*&0A{{ zGL^5pJSy8xpV^o%z4$*hT9e?(%u2oY66ukX-{Ga{J={L7B?!kD9H3K*cuLd)Hh1Uf zs06@bB6s%A2Ub*gmM(H#R-hxFf zZKRb0F)8-{X>Fj$92=g&$=)2w*_lLdmMHf%PdM!S)$>$L}U)ABT^M&!FQ8X&ugNHB%+2+m-TA92Tb`C3qP#uz5W@nC^17=;4O^`+)=VmoMkXkH zBX|=N)6ToGc?;yF#~vR~$Ni0a9?BOmziYvhfBwAKX2-N;(EM2w3M-PLKf$S|wZT3b zv;bH;m6#ro)IRyz-D+h>sBI#`jy}i4mtHdZkLWdLjl{z!D(5_&iyWg_ z{2F!W{hxmR*My()_N_Gm*_-cW!2PainB1JRaChy}C?aIoQ=QM*+W4W>wwO_z%?Ebz zl8Zfa?lLGol>s3K?%(pXxkG@a11Cg#EHm8d{<^#WkHzS{Boji0Cd7X8W?jjpQ!=jn z7A5u!6502RzD^JYCD;_#{WHh!>tY46iH;>IQxvv2rvG=LZ~{@?=|aN5@ARFWx=9LS zhvVViH`J0l5gjysNoli>;b3O*X8K(d=Z>f80oVhd&&<2S%Qy+UbZvkTwEmFh{lYx; zW3+qS}Jkc?15P$-iI(XOD$WAaV41~;u^8n-bX?xA%eW;Xk30658WN@ z(ulB5P)5^jzpi-KJ1wi*!y;2+j6&uWyPLk&o@o_yWoeF zTwY7Q;ieE51kCKH^&2)v-JgOndHIiwrOD20Avo-*1OCs3$ca^F=f^kW4D0yVKfazz z6obRjj(_Gz~z|4nPR{mBNs3`YHNl+j|Y|K>53<7SYK>7Z#XKnH4mjo=N@lJ7MJS@JX z1X_xQpHe)Z@mxtU!q}@a@NGrtk^aARfB+z#nmI*eXudIgHp&J#!u#Mf^BBeXqap}e z=uwcQOlRB5YUaCAZ1wKX8hYNe6{Oz$T*8sN%j6okJL5&6k|ceEHP?=IcKnJKKi`Uu^TCzY6^^Tu;{~(^!ORG3C ztbz6{fPG27DVTIm&2~PIt2m)!E)8{*kwTKx#tIi8Hl9`gHj;Xxt9@`^r6+Z#R7M`L z7?lDY!cTi2SJpN@EbDVfIJ=Mu*V@wrkr^P{JOiEQv#4m07At$hHxME{92BtFQV{aO zC?8|yy4@|6o>4==uRJzRZ*H2n6)O-b1Atc>O;hQDsYTZzPsNei`0BBy$fwqp{OP;^ z#;$`7N)wYmP9#`SC%izr$MrX4cXr+e%y-jhPPpt4sVmW%<>(*M=t|y^xt?L|lIqoR;=Qj7DqZ6rwoKP=tw)Kb75wvWdr+lE?cHwd!2f8v%BZZq zr;D_7BT^#W-Hn7aNOuTGOLrsP-5{WJcc*lBcXxNa=l8Dl|G)>AjEoWq<+^Rb7pJq|lP+$;Xe)@=ew{)}f7G z$7&1(_fC(`#B5=~$++Ujb0Gsx~TQMvYo%%4jTW?mJrmZ9YKBQFd5uB)t={SnLYpFBof z>Iy@7M2B~COOyD0Gd1O;2Oq!UfbpMDD-(zYkGo7VD)XP75Orq;IIW*wCf@o|^PL@F7Zy7hkXhu4Rm9+B+EH0=%LhxupBXFH6SV&RFv zKv*Lqqfar?8(B;SkaFo<+3=w#lE)#0eUa6DaDBP~B|Q%^s8a}sg+sI)(KZ6@;Z zBb~kOkpoibyS=!$zrIM2tFU_S?Lecx65v97qXWXCrUg_!Lo5AHx;ccX3LSvPwwWT} zNif(IhJ-8z&Ni2HQsm7G7gEs2I=?*k(KYNq=ZbR($x+bJ3R<1|0k7h_CztsPHxM~a z|I2$t&rp&2w-%(hLfvF7$_$hj42<)KWzO-+gm3papzm?;h|J}5k;7(6mxDt>@{fZ{ z!JC)I*Rwak^4rIE1sHQdKL~ zHMmjPc>i7U_H^e!*NrF9YCd@AKSKM`{}b9L=k)w(-=+GoS!WGr25>l4#mMV42QqT} z>}g97^CfH#}RaURnyzJ;#?I1 zP-o@awAlt{TmQGde^F#g83_&x>C7=xvVAz^KbDpfDYL|U55c#xats^z9}Uyd{cf>Wo9y*NjD6+H zydn@FNN)!J{-A&a67f%bcc<J}7D*sBF(h)CjO z_?-NEIX*goVMIj*)apJs3-6AA?WGD$JTC47rUrLspiTD+EOmfm#`pE>GTFyodq0)v z9WFSQ6)t*9hRWJqe|o;qKEaqBM%En~d*e@8OEY80k}`6?-It|)OvlUT zEJz9k=(1?LMh}Np`6NlrC6N90SgrKg)&=Xt!aZ&HGCYug}8Ym3r$WlchBaZG$#bA6_Uzl#u zVC8GShtmEk7Z{X=4`MVv_`8gP_Rk_;Dz5Hr%LD?HOI9SMyJrPo(JbFREMDH+Qk216 zc$6(SAveEP8K`?-FBb39cS~D`f35rNQAU{E_Wpg{#csc3eznCOB;DGe>%`QGYynK? z_mw0yHOz-LV;B2B(ip=3MmhJ=F@AAJ1ub%t`0B4geQ_@$eo$McEeiRa*D7hh0KOSg z7O%X4Ju&Enc{FdY;XcFQp9Gt=bl&Lci}30IPqv?Hy77f z$6KBJO^8h5f6lIDzbeeH{s$vm*WyzZ(!q(Dt<(0E!lkj(*VnobC+SIh>1Gy8OFMr3 z{!E(pgog(tQqYYnRhT8_0%msRMSp(kEY&4VryS=<3q{Y>9>5OS~Y>Y4J?5mH&f(C>SgdXuQ zk>I;-H~s+PJNq#j>HH2b1Ps9U>p0iX=rG3+lvfFrXgm`nX>r_cXb4^ zO0jatC^Gj71AZ|TO?fmljVnetOw82p2~#G6eYF2Bi|@j+=W~8$3B-erVQYQsMEo|F zhwRfIk-F+#jcI$;3)nyCo0)8+f?e5_h!-2)vM3> zjj(YHhk4I_7nC3+2EyV<#}UHD*ww#RyWryUxP-2SB@G<1k(?>8QpN%XkqpOZpj7W$ z6bl0&PS}s#_K@dEi~)7f_TN1Cml?bNbUpDM4F`(~9vd)gG`6z32d$QV2>_z%R+Rl7 zlhe8o;L{;{uQi2C7D;8lJWqTQ$D+zJ`zViqU_-*K(Q%43H9RKR_o|d{z>;RQCA^TR zdK3%~HWEc0N&i^+L}g0DzGr%)Efg{`a%D^C%m26lu@A;i1)bh&p143U0U(*KQDoWN zQB$mwx`ka1W(E}E)<+r!GD^hG2d4=|^{Z9)A_?6dEFfw7Pl;FChT;*+0E6Htj)}2Y zi;bXoP@_qg`HCQZ7pF?*={@q%lKUCX99jGWpuU|;;sAT4i*SPwd~T-G zh=UZU?Lfno88n%jt+ZP0aU=EGkfeSYcfDx=-vF=+0Ke*9!XBdHsC-uc-whlpbR*Nl z0m(kz9C|%>358G3U<$YH042mY1{cSZh8H`05_N!$nQ)R&U}}BBHP1 zVM@#GhF3Tx3$0o#)@oo9dN}j1Bbt6q7CA9HoHCL%I6mb@n;C;g%r!2%2^yLU&w z=-M&^1#!Hx>gIk=n2Yx!+p^0&JZzlR9ec+dF22CMkG48eM|uM71@I5?li^Gm;qp3Q z0ez-u@%nIZp)s zd;VSv2~DtQrJO+d9QD#Yz!ymPIg>`>aR($|k-)}`*anlEHE$j4)vzG0x6;?I>f^F2 zFsTrThm#|LcV9Muw)89)yFAk{7AgK&QX>G{=Xy0Ng#ymB>~hxiG`;$**<1<8>5v(7 zFf{^)wp)xRgOkxpBbNgz{>@Td0S#0hHXy*Vshb6h!$I_PEuL4s%W<|paf?p~82>GF z@9K`&XU@O8%=BYY)Ce0(YJ2A}05Rn9n@q9uX;+d~J&nuhOyzfSxk=WmwSO~DcLCgu ziLri)FOOiNk!XL@>2e)?UKC!01vM(yszGI6I0fcyx{OH1z%QP-_x}Y>0Hj@LXY*qG z&CBbWo5LlkvZVh(NePGnTg#K{NW7%t)6y{l_?e{D#8?P0h$nNDjUAY}lA zgQ2AyaBqN^wh3){$EvRvLH}|(8ATV6O;s%wBhQY;BEE>4QjG!$4Q3GHz}06l#R@hy z8@$k3FizWdK%2231iQ0P1g#Q?mea`@WPS&LEdUGdw&u7^u0@K z{rcv}QV*~0L^UNaP^rEfWI zM-*wp(TxeBIr4GdXRtX~KiXbxkgC%m!&~5xU8wx?Je~WC2na-&pKz%{H!*ZAJ>E-o z!zfsOCw+$fYE%LA?h@srXrY9u!zEVT^*bZfN@S0;{cFUqx#)PV4}Mc4LzV(is+NfT z#%i^?z(tp^L=`{J>?v_Vz1fBYbf$sUIl#H?1qXYE9deAI-QhiJZ)8B2e-HdnaWS)~ z2_gk}(f_ppd!Pva-p?{OINX(qMta}OrXlcC=0}!lZVa|Qb=i;d5Vm>l5ehR}uzdt+ zaWy`)wiAXRKYbc!Ulg%daLuvhhO4|Y=YM;Sc6)#-a$E^3pkB{Ikt-V=@IqokrIy{U zznV?~g)Osl)t2uD*zp9ffC!}ajYR-<{fjPC~@BNlB{^@Lz?VYGZzn= zTTM{rnb=*@kTIe4y0Mo1{X-SGLBUO(*=Gg{@kxQzzMZ+)$~K}?YirZ>V|jP4*mTp>-s+!i0cohjm;Un8qdimOOMfO{f0dKJIkVKx1AI;en!#SqWEl~p{mwX2dx$-f|yr z{8#lw6*W>!IfrL=PYD-E>6p-9mSOXP(rCf7GDQug?p9)m?oZ_n!St4L3pUd|76RaG zFJ4VY1+DU#HXf-rnmkiuV{%yJkH3}(boWzOe^gj4aais!p}am{$q&3Jsg{ZQC$OLO z5MphG6cTP7e8lu$Ak7k!RKW%_wqg|3gUj!TD~uDnaP z&I8g{CU5TzEjHQj8@u%VJE4Ybb?!(IFPl7@5iz8lOh^bcG|{PPK@okC@5ipN4v&AS zJ3DXB&XG?288-KHJ^v&c@T}1>iAOR5|hY=(e z%s%dp?sTbhcLohd6ju{gmT-y`VT+{!?5W-QHH^0IshXc{*y-498CX6XW!C4;lR4kB z1e})iqg2aZ-r@eDS&v7%{__FZG*rBzoKgJd*q2_U$MF&+A~5HC#G-t2Jd+WAp~4^yvS^J^wd?~QVP4va6Mv+i4CrX!Zv6_PocE&T%3-JxIGC0iVT zDP}O5&RxKoqM)onz43(|q%1v|QcJJ<< z+t@D1{FIrmVKGPJl>biub)I?u1Y)`?lJ$q}{UCPFhoTLQcO8f?Pv1-Y@g*RJynU=* zAdx8KQlSCt0&N?3jw%tkzTEm$w6@b{`!bQs&VDGeFoj_INSDki zk=!@et`}iGs))loetA}Cyi#j#-xGAT+bNmvMMYlR@F9%jXAB=4gpO9Nh^(-{q%qIr z#2J*(Q!qT8yKU$>r(nCIDy{W&sBQPc8%2#}Ku>y{)8?+MCC>$Jr5^nkk@Er2O7fm7 z?Hg(0>ERa^Sfwg0BRNFqFbV%+bW|aRXPFS?$`2PRlAlN)qoW;O#N$7%=~SKQ&zj8^ zdw10yHt=4Ek@%;K*8w>gi~)~|^eawIq6Tp~n7EzS5mtGi=nqs!>O%E+I3iMr6(VzO zRB%V^9wljg4OaWv5}aD(T*-238utq}AR;5QP*MPqxTVf{r1?|Z^QANwFK@RBSya#E z^yIitOBIVtb2Dkj68>%=>~=z8=h_ga`)HQLLksH<6c7ij&8WfL zk@ceul8qF>a=kuA$$YFc7sq=hzBUgHJD@OGznRlhD=BZ?W7I!;xcXNYE+_LZ3aRa29XK60yxnANh7b-m_5oRPa6Vlyqs)o*JH>dc&@VGWQvfR6WE~xiqihPM z9$dhSu`Z*!x#)p{1jF(e`Jxa!=@%u$KPfms$`896hB`Y3WyOt>c*%|S|3et%M2nk0 z-5QvYgVOXD?$1j#? zX~QO1z?MuhS6I}BY1v3%u*vaAKilks*+=V-$mwjU6a!GPC*=1JmP!{h;Ro=D@Mzm^ zUal|`S>|(}2~h156a*E42U!`X?}j7Ir}a9g(8%7{y@4Kn*R?Rcv_I^FOh|PME)ttR z@J39q;*+}9rZ80;9sSZ9qhtNpgWk~ihw1|VEnW463n#Y|YeK_NpPR{=Am)KNa+_G9 zVZVi-(_|oIyT2G^v2zfUtEeQ`d-?y7hX_E4p+^(t+KKNH+KA&ZtE~t&#O+ERcr4h> zA0}k~1_;U#`?J_n4c~7K@j9OyVgchz?=stcJhk%*yXI<7W@!8I zgjzSfRA*lc+c?YOdm+(@6zoWzup!}N7 zB4wQ{|NZa-K(J#=(wj*aS6bi`b8{u{zSw)BTgiktLNUrOw6ggv*KK)Uv^khLveQ^t z2s8VX=fj!HQh<&0ol|Xp*{HUwi|KsaocRve&)-G~=KOByI(-5xnCFSyafZ*3@6uaz zLAYo8h}-#dRe+%~4mdF)$y63p>WRI@6Q&TwlNA*EZk>`4^PRS~ zw!$*XSobezdV+#NI6e?!!rc%^NxjD+@b?N>Fy6$(4rb-UhPGCJqJuWQ)YP)0NJvP2 zr&}Tj^JTw#_b&M7%E#ShZ6gz7+EvW;vnT+$=Jx6f32WeWK|zTH21GdU5NnosbZf$L7YKDeO6ox)wjWHu#78faeXCWD0 zBhL742vjDY$3+yez`RqY61X&m(l}%N%dPF*)3SALW{|8+(D5KluedL>me}mI=LY~1 z7VK(-5Y~CKrj=ID`g?==SzUYxCz+|sE5&b$A(0b`=Sl-5*H>3x?&j6650~*0YHO(q zOGHJhO>6+k5HL~bcwIhsc6Nd@H#jP)??tO#zaWk$4XOM0vc8B}#yjSb?%)a}Ih51T zuz{f=MDR%}D#oA*<*0&jG~x9)s@2i^S_g>g$b6p;A#FKV__zMAzlNL~AMPe-d_PJR zW1j-E`WQXcz+GQo8`S})_Xr4nNzKjMy5Laz#H!TLw;Lam5JXIg`R@et5zLnLE1|0~ zoq#0sfyyZOTrdt!1noYq4|FEWgM)Vc27B%Fx<6xLVq8Skn~8{s;E|EV3I-KjGil7t zqt!G6HwO{2S<2EqJUq|{2#`|q{)@%wu^1o1Dah)6Qc}xob`F%}Mb9qashF^)KHUHQ z1G1s#=XFXqO2Erur1kGH7c;6hfqcWm|M;6TWqofqP;`NrTVP!zc7P8C4o(`9Ro1*( zm3Bg6v!wq1-ennpO0ZD4(CB$8-0i}`!cH41A|fK^ObjS0;-nrR{0`$yZF0{9hqJO1 z8Qu?*E{tJ`kz{+i{|aL{cG=ufi7rQlG*Y*CcJ^p#ofup~JQ#2$Tl=Q2HiwxCjd zediPfJf&YXmhbgAIg{ydmK(gfZ)H#0c_=A;am|n$6|jK_UW*)O55@Mvx#XM}&GR4Np)l(9s24Bw&S=is`-C}IisL`q>IPP$W~ z#iGGOV~EgDv+liY#dBMIn|^w28?!Ip9;R+CYrS2x@Wh2kd%NezoDz-VQ zj&yT%)o7cdaMw2*f>n={mY0{0jg9>pzY~tLv9Y1u;Fj=wX(yT=7kG1oX*6Ggn~+#^ zlH_*G^#HSLv+H)FEh(99jp6C)<~CU&4E2%2w4(>8*COY8BP82B7HTRX-gTPTH)raO zj8Ss2#Jc(V&Obkc3E^OjuK)SD%_RZ?mpw_Ckf7coCBa}8hid3E`??6J99B}f`jYYr+bUm1R>c~3 zZFs1iHHV&kd?0eRHf@4GJ9>Jmtgy4NV7&1-Hn`l-1SBMEX8#5O}96OyC;_qF7uxuPQ49}YFu%U*3xoS7N=!3A^k$fD89)Y1M3O#YRW zl;l|%RLZrOgH~-y+Jr&=_A@Xr5M!nyn`SN|NFHH%U>?+BHYB8kLA?xQaPjLqJ3DnP z?3!0FzK=JD_B)S`9-b{`Xdk5%&8Urqy4`NRjvx2=GwM1#5m`+V3!|A%7NDPOrYb6G z3}hlX&Vv7XWP^(+Mf&}PK#uZYA&Hurv3sf8U*DgzWf7Nycf!^d z8Xto$vxZBdqUz$q2G$6QJ%Znnx_iwd-tBYm4L3C~CC8$clHW5-Z1jA2d7(xZBhcg@ z%$V~<+7Ti|Z)s_zW*z6Gx%CCch%9W94J*QwiR|8{C(>X?{KGE@E$Pkr1Yct|C4NV= zL&&fcrR%C@Y{!11npv}5F&^Kj`|^mT!c&0%Q3}exto&L+*Noap_|yEH+9I!tfxA-r zd(n4?Gv)FkQ+ehqaj~%#OD85JRVd@A8Gnjn<`u9HgsAqi!Ww- zw?8qg^BQWVFq+CIsaEH=X-M(*gX9N;_IM*kbBlY$wm%`KX~il3MMwx=t$D)7#Deuo zjp~IqZ({G43(5~1X&h-0A>BdM9wQ%ThgL(tZ{h|9cFBH-{>_d*CZO(EonCXFo4m8k zC-j{6(N=YJ}m%?=Ogq5YZKO*!h!Fj%RJomqxp*UTLR` zzDoJYRp;+(2^2Rr=-QkqIMR^wrwTL8;o&{ntnkzB&h}1V5RYqiWl{O$KBFd(^JshY0s6sAaY0HwS%fhtuSUCHUHZ}V(Fh6 zi?RE6FU*?{D#b1aGZN;>G?3;6$vD)KlEcYdF>hWE2}hiWB+<)N2r<^5SKe`~&$Svw|}|8eYHFUp6G?CQPkQ&yw6IQf|H zleuNRkDvbJ=)5)}L z&U(9a(xk~InMxv6GLIV;p;m1b9cqKoeqF2kj-yJnJl+T#!JTaCzrt{&Jid!R@xR1# zqmd3LQ;-?k=~|VrG@4b;N;fYRSzIRjPufhbOavBcBwuhk#!pw&n;ocsurM;&c0!IC zORhpn&ELbVd35(?G((tq1=O4OJQpg>{FH06^_LAL85Y4)*j?K05}EU4dG_?&uJG6) zTYe4&FW&m8>3d(Kkzi{wW1Zz}dR|%#8fLc;R8l=UDuY(9)JU)WVu9lGU5sm!yIalA zzWVl;>?;)9Z6iFlatV98qUa8xRqZzIA3l&So+~^!txuUB1y-tccyqeCWsjDCbJhnz zeTeo*{IS|+k2#7C6RST~3he?(W#%o}VqaTbVh$J?IWsuc`0dClk*n$V+UP&|P$OnV zf88`ro(-t8a(V7fIOcIvMsc=#*>wmt+B3)CI7E_`bB!}r%he=nU7|L|uU#*M{V-F` zDDh26qu#7~;KR?q&2DZk4YM40i|k5>#yVpybOFLNYea&-1!;Glh_&m&6V|-)1*WK( zW29l_Mt&uK2t@wiGh(v!-|D`|ok=vt=hgS&X^YtMc-f_2ZZc)&3=O4=X#Hz!d@L)! z*QNz{adVefDGlp_;JZyvkNGz_>3=Z8x_&Z8}I9 zv}um*b5G@~;MM6VQQfeYb*@j62pyhz=HCPSu(|2>|M%FWxk z)8#BVg!6GFegzbsAS(!x{aTL2NJ1egKWbu^iANgOJosCiPp{WbBwSJ0-R2yGf^@VR zA$Ha(Z)--KMa(DqP1_zJER4(h0OQlZDr%Nn>?$Eo^5Su)@z8vgce`YHAuhqN){tJ^ zut14e`?gWt@hbhBG0r>)JnU{`y@Jlbw{XnaPI*Vx940^;nhH z6i)gjfm+VI3{QNxWOq>E)^Xgv$^sAw7Y{xY^tAE+Xp zH8!jAwAXp4HYou=k?(;hGY16dii*STNAwH*yIs@hq$%x&0o2q>lQ4+tO?KpI&L=Dp z+3u9#&RVNlZg-pAKieD!WQJkFqs)d08QTg@PyC>u$Ptk7oTCUjEw1*U(1@^m!Yl;m z_E$gW>|&`#&hdEGP3zE*`i(7qOrGB=O(;z#lK#v6Q*v|gCBW8i-ino$hLe_QSx z{wqB|OQk#~ulMsO;7pKfGHM4QQcg~B$7IS)iAUUDFZ`I3+_(8W&rAmw>#J^tN&nVLb%fc=(#LMrb>A~q zu+=IS_o2<@LM*{h_8^M(QrZA1kfEmMnGhe3l_#5K;<}bSmGA2L;uezD!9cAEdFgFc z)-E>EF~#AcAqG>Jg4^J^uOM{z=k#Rp^5ti{l1H_2{R0lPubl);7K27tV+q};Zhy|? zMl@G#Z^ZY??i5NyBZy0#o@o|gyGm9r-v0Z|87X8b#KCw}*XONcW-f>M}S0?k%Q4(uP%JS>K zxXJlRIYZ+gM>HQN(9xIOkJLr{k1C$#nXMBOr1NL89p7J~c39U>36ck>?TxTs3{OLo z5_u&LH8wWc8R<3mT73yz-`mQ?=Va{-->uH_-kp)x$^ImlA6HWnX8uHdLX5R8|2vEL zOOxZyP7v{=>$0Pa%%6&ejw2o>e@1fmULsIO;N;e&vAO}fh5K}SPxg+4^tJZqpf=b?eO%DHWlCUNh0gBVfqL?!`;&1fDSLH!$Y;*>DiXt&c z^uyUg|EMI~Rvw2>)C`f&IU+6mtsf>!X@`it=Kr~CQGW9IH&5|LZ;+zBA{qqP%v9!# zjTy2rm+vcXs-AZlYzdaBm48#TNPK-;uEz;@>a9lzTZij40NY6woACBU~l zPf@-}Rf=z*p+x!kHTc*udo+?BU^XCF`!I;R-`ts(Eop z8l@n}^=qhc4#d|!i9}DQ<_sZ0!QShWAw*O@g&mWlEn@F!{43~!tuuKSQ!4O=!Esh$ z_)7X~ru68R5QNsweQ5%x3+etqWu_MYW?q60UwGg+%Wsy7URj40ZlNB<) z3kuh}T2F{dt=Dc<2>jATBV%ZMmtm2K_J>D(z_Ju&fkvJE&iQ9S@#33v$6+=fU3H0&kSos|Tja!+KPf>Y zoqEc<$Eag?l-b&Ze7>`6v5nhUpd2bKz2Lst5JK#*wCc&%w8}HT_h8m6y>WiD)yDOM*R_!@4VVk(tTln}@ z9+KR*U}J8M&~-WE({^^0m6g}4RKwQUe*6-!edPmZ>*jnT8+Vc)3vB!=dPrF0W?4+` zy!YlW>8SXjxh4&|GY0LiAYA}~F(xLX^w3%Q#|R5v<%v@>9}Z(&Mp{@6-_R9C#gbL9 zT}$2wQy&=I?P9Y@<|yZ6F%?*nPGDE2N6E&OEa?w>`LD*ED<-kGb&S892cqCfT+GRj z=E}-XNZWxe>@1_uL~+&rm}`k$cLNe5>ySTJeDh?dCmN1u9Iwb43umHI4DtEJW5&^= z=UKu$L1SsGFUaZQBE8)uLrF?2Q zd`M*__^_nTTSrBC&3KS`XjI!<=A5829?$D&Ggii-vUt<>yTXQ|0iCy9zdqT4S8z&a zuAve*=%9%n`$f!z;ZAY1apNtxtvIRQoq~kGWNK7rawCe$uzmq=s+>_?uDQ=@;hSdd z=QHtlYGu*}?E<>GARdBs)wa3{ill&R>7@RcMk+D%8-4Z8&Z+!^Wu6cUWc*}@vtD#` zZALn#(y1JH$&sz(>StpDl>FVMeo%I~UPl&M@5N%nq~XL456zWkC7{5eOh}eHoirw9 zMPVv(2A1Pu*}P7Yvk#OlGPpJ#>Tb11@rfkOmO4I#W&{6q(QVCF$2fi{Xs{SQESBPz zkETg0yEK>y@Uu7~U=~Xr^FGE%^tX6uI2Rb#FMKs$B|t^xRkO3#QKSy_kNLFUtD)_H z_11XA5ShW7KS29A;m%H%NGS-Jl>X@RPQs4MJpcY;9&$eGB(Bb7Ca(Rvmw_m890-<}NAnMOG*AQivR5Ftd#0iBwhjGIgko?g$HX@zO z%6{vQb`99RgnfWJKka&B+PHv8d~u?t4C;5gad{h{ETUTif2n@6;+$6!4ic>Np~26( zn5&f%9KTE@;;GD$s!~(Dcu1jq-1{PH$*$ih`x)uBwP(qP-1Hc!m?H3*?tBYs^1m59 zL}|I*Znp*?(zoO9@Ac8z?_CZqH+sB1oylldjBk%XRqWl{uB?G&Jly=kq3TtfF5XBK z8Wy?RouA3p`aJ+9KbeK1gu~?xt8X;3md(hNzrO~=^pu@>LAzPLSs3VVnswj!ORy~$ zYlm#|6;<(ebD#GTM$+hpl(kHsl!VXl?hf0jhpM#P9QI0K5Y2C1!S_+dtxOfc;~h*- zdOeBF?{xI*U!<~?;Uz$c)ciF;GxmHrr{@scJ#a=Ws5`=RV8aD}Ff~`J`nw;?8yvpf zT6!J_bsnI!rr$}aI+zi`5{t~@l_;S=damR>TL(V{P3X(ZbM@aPGMl}fYR8(oC86ei z`;hI)c|3?k#5Vf6TGMjbaj4YyX3W@Fo6U8Xp0$i9{cP`!@_sGk`GQ!qquk{hc3%EM zDX$smw1K{n_-Pw+rdm8@2ybDbh9IBJrs9&?H)j&# zsIeRm7jo7>@}MdRKc_&8vqnV;gPw}(?u66hexyE@ME3?kflD;wkT={-0wm#_Lxb?B z$UC`WZt`P)iOaShRLb@db{d7e+s)@>iXT(Gc`X`kF?`}@ftl3+Yu?(eDaywhi=}_U zP{jmydsS7}u!_PP##6aDRk2D3ctsO>X${>xY$MBW*a->gTv;ZA9|UuLpz2oiM|+UX zXbQaOfK!p)qou{=^mTzf5tC4}hG~SjG}G+8;`9V36;ovUdnnlRz(7>Rn7aM}7BP0K zQqxI^c*+t~-0QC+Eka%f*>Zt|HHTX7tcf{ldKcPrySg9l@vgW^k;}Gsix!S+jYF)a zqX}sR|MaM{!aW=yQKRSb!uuKh76eZ~R;b4lMgEE|3#gFR^{nibDjI{yefPe;bHcS?+^)6Ch~!pX=Q(z z*zJjsE3@$9nXs5vx0KBPaEeuS;<*MH2uFWEm#d4u(TM*kM+$GbM&t^JoXHF!9rviqn=yVJ4TPLI@d@LIbYZ02wmMfw+{c378W|R9$8O0^VXY>)R)t7CRAT# zINKc`bFb81h9WTRZtoM|gcn#p<0&eClUfv$jH)e+4C#+yDf6cg9*?BK-yk75em2FH zm57`agEV_GcZnhb)rQ~Bs~#S{82R7yS+Yx28#49;a28O!^cU%1rGMp(>fX3tBD&Rl zJ`$fu72(0dn2yssIjDBhq)mgRZA%mK_U5pBO1QFvWdT|l8X7|N-a4pkYZodSLKKdZ zWifLT3fqYZ=tt>QtXTI72)p^A#Vqg5FnX=vpUcUw)B{ZwF^ix0YRZPXp_Nhb1CCd9 z?83q2xiaMHju*N^YS;|%IX)}gCfc}4QaM{B;9G3qFZI7{I#ZCwp|-yyg3S6oGD426 z=r3xdZ`5&`cHouPmmxk@>qPmYjKG9>w# zM49H|a$7Irz=b}PFbEaYUBQ1$M(z!VRORyWr>HAo?pEDsfu|hEkk32k)+#*T-sO;f zb}_va{W#HXXp0rrj#R|@oABr<7{zcgS9YyP^P|x5(tOY7<)eD?Nod_RoeFd z%?wLpk>?YfmBpAt6$_H%H0QJ3!c{KdOMmW(~*!%n$3cY5#Px0_NE9T|^bh%76P{XO)uEeD)(*Y1%B_h@dGi(0_Yh zo&ApxKylL-9*~P4Oneiok2N^$b(XzJ^G3Y!^JiyM{QajDcT%>X>wU6y8!%)ulEi{az-&1N<>u~Mu=9_*niv9h2Z8;a2 zrx-EKP5QZs);wX2btTtKUWofXmF-CLm`M1B8ni zvih2@?G7P&cPC)!#_AF-sx=GN#0u&&|Fn3QKQ(as^2D9$Hv4T<(_n5Q_n6!3zf}y< z@}6@~d&-cUIBbi#i>b)=WC0Vl1-0^+)D%fm8>WN=fW0M5v|8cMG?aFnxRDUUo6bFg zSM{~fFH@5D726yh`rdUP zR2bv?_k;N4;1Lrgvjdr(5-r^36*-^s@>8bjA|5KP*wn!+U{iH;67j|1au0X~xVteM z@1uPdInofoJnl_v$lIbAABzIegi?QNeo%(Wn)iPLM*p%97qFZOcVsI-eb&d+UmJ)^ zZ?)4?vAKqm0F%sm2WASwMr53al=O0?%~f48YDr}+{6qtrL;|Gavrow)u46S$VsxLz(*;GsNV{|cB@>8on4RHS6` z8Je?WKZ`$cmx~Dvl~VPnOPmixzPP(!z6p9_*UP-Zd2FfA$NgENB@fC%sAh+{aM2>_ z*Rk*%fE;z0#JS+bWBy1e(NQu}{H{EXCmlogrj!}qGBw@n%>8)SHrM2|6aQ1qxGvoxuXVKPn zBDuCukJS!4QaTNat?T-3j{8DyPbLR)%K(y|X=>(kye8Ga8t#QdzxiLy-9iH>*9&isz!y6q`%JTB>=7_Y$&q>9d1&X_wXKBf8w)hUSdTcYRKJX2{ep(A`aUC7rrSPR!EOI~;gSUeixyt-7(yC^ z9DHmK9BQqMx?P3LSY~H86-y~RG9QCbTWZ2pKIVD=@HF@7c4v#&YrM7D%7gFB`d-sY zdh5=L1OGelDD4{>a=3Jnjw13X-nfT65as=wu$pVRq4l#81Q+v^&YOrVr{nd%GsO1y zSG5l9dqicr&g8@%dqa%(4SK_Va(jz4$*5p*bS+! z+-CxeR(qhiEG|GbCHCwCPdIj#cjTw9Qq{7F{yD)|Zogi&AUsW0X%8@h&Hc!BT$4C6 zWbS@XD@1?(z_7Q=Vsrb#lR34WNdI7Z@j27&i7>8=sUVE(45$50U`XIqea?%Afwl(L zs`bz1<+YLMSmyoX{rR@m&orZr-(@4HBwhBZFOVHg@xQ8DD54ZE{n!+%4Tv0t(7JFrPzohmfW`8y{DpvVl(I-{}5 z?>zkfNXp$oG3(H9;*pr<{O`XlMQy`z7% zRrVz>z{kRpHi)R#vz7%GR2dh8sqAH#Xc5d(6MN}C`vw0@?u1m+jOXEz%evXwk(Az0 zU{qO#i%?V|THmmbop^KI?{Coy8SAYbY2*4(2|DbHRV59oa1uLG_eJ!q zR|fgV=oAkSp@sD)!c!=qqygHBg@c>6SmfLn2@MB**-h-N%8mL+PFd|ZMznHPe_Ovh zq4RXbxcu~d)x^?TZ$lCXi)#4v`VyXb2akQZkwcY~`<1k>wBgJj^11l>*EgmDY1~5? zID7Uto=*jrGQLz&X9DgUkcew;h(7-)Wbe1SQO|ELSxU7aMsjl-uMWhDP<9ar;n27e zVn(O)Ge1}9^51$liTBl_#(H@yVagva<~F?;l%o(9;1bYX7!QGN`dUwc>(zW-cJbt( z2JNudhM20wbw0?7Jt~jMGyadJs}8H`3$`F2(g^%Sy1SI_knZm8Zjc7)2I=nZ?iT5i zZjkQoe%trG_aA&*?mcJUefF$bYt77QnJx|Kc#pPdboJGszRKIVf8|R}W%FwxVTXyg& zLx-+xcw6!El(IX0A{dmKDPMiO6|Mff_Vn1%y7E5j^%}kDdf(hFk;AZ_gL}zhWfau9 z@1XEmwW==K9t~56g|!O+2!iv0zrcDa1!kvPLDg55p!xd)Acdt>;%a8@kDG2Fz6V4TVKwdM1M zQw?u@A}_A*q+qnkQAg@nq@SSaI4BCt*O>PL2&w#K2Xp!+p#N*VtXQ@TKSVDu#ZR9K ztY#D194yAo6J$)JL`dmqbZFd30V@|6KALNj!3ki5z&t)->}Q;j-Z+Y{*^-#Gm0CUI z3uc&E-AN-l4ju{|_B}pIIkrpLA%9vDoH%d01K_y7*!vxlP15sb0tx8e)on6gU+@%* zBbK5_R&lul04gC-q4R+r|H9vN@Dv&LS1Gzmzx~lEzE}^TX>StAV{l|-j!SXmzw9s8 zaW^>>XqkAmWYi{wX&?!7Q*mNVHQ*52b!7ih5EuMmIe*n6pU(%g>~cwpB9pFnz@e!L zm|UXn^=L_s`SZ)`Y{78o%yeGqfexrxm$0sA1`ss#<&x+D%lR*V0la{#+iC9%3Nlgm zM0DoK^2KI4tfi;BdEMe?Kmilm(Y?KY?d){2olj&V@X@-9N}1pp5GI3*`qBE4@m5tw z01|+H(*FK;v(ce%Fg7@h0d<(Ql$6wkI(B%4h&aPQM4ulf8CV|(22{e<&W3=P9I9ZO z^Hzf>0hz?s&fVDUN&DLk+hwsmo(SUURG?^~6d5yn$orckCi_49BG<>lApV3(rzV5u z43T^-+rIk8?{GQeBtRRD$7YV-)!zV-U$L1QlNuA+yFo}#J=|w(^mWcQF=-1>?Yr7- z98S9tr%hc{z@r}F0Drl z`0Tj2qyfN39Cga;$O}Auf*bwJgH6D|&|v+>?esI8gV8qcSwUP~oAa7h z>}Kj@i!)8Ci15bNjxYdvOaoCHB?r@3OC4!8-W{*|p4wujK%GPX^gLkf1T)QD?8MIQ zbAbjco5eaP=L^`Z6}J&O78^;D^5K{LnG$7c$g8(Dzb*4U7X6^%w{T{l(NGBnU;gq( zx$Kr?IEd#iqO|A55nN-??&vpkSrIsk!n?RWCoH$2{4Ve%5Z`1+E2sU}jvLK~cLh5; z|7HsP%+99XnTA$+umlurmc58vN^4xU&R6UVO^S_!gPz&;BMayNCpA*6uy*dz6Y~E?jEj{c6#RlH zhW4E)!-zRr<{yqicoA;;%2ct|@+j&CqL~ihH=K_Yv^0kA2MFeGisV%lmf)Z_aYPl_ zijiY(z5z!ULlqBaR&#MN_Ru#v?&I(Mw@2XsH~YT60+u;q8tb6dBTPc#15&AAQa-72 zRO>Ktj_C6D_RDjvf3>?s!LYz^5OK7grAm-8u<5eC6(B{Dvv2?+Vg`ynTok11^{!RuwW!CKwH~Kx;`u~tb_$j zNWF(;^NlB~BMySZ1vtWeE41^bg**wF0;Nx^#5LfzbZkq-;ZAyzBXH`@;<(LV4jsmfE={_Z5ut2u?3t_h-ZsX zC~X-)3v*qthJJjQ1{nf=DsyO&>3FdUW-OZj_F<)VYie&$QqsX(2^+pg{BjUtgH>5! zB=#g3RcP76L%gyI>PL;FgFMkX&r>aDe8jU!f8Z0*#F?*mn)PaWhNKLK%*y&CgObD91>+3fyr?@Oz&zfA^-d-9wW%0VYi)Cs%zk4TU zwG_`fmWGb`-#>tqqI$hjs(N*$+)=NumfUJX#aQ5|VA-h@%U+6ObHz?iO+{30qm6#j zKf0GmauoJT*xCA$qksZZ6krsv=#pmnEby;UhP>re6z`(?;+v|o@@hp=a zRZ!81N-Bc2Ww?*#jN+^}6BsF!h6AMYvoOzU(%i@H1r?9zEWOG?zAW!O?Q!i<1rid0 zSdQq`uL%V-s;P62f<>}S)-#j|TpHnzL{mNMfF~EQFQJ+GqZSmmH|=&j&h>e&12!`g zpSf5!3#{XAABW?NeHGq_Mlc@C&VLDj8~ai!YpZj=Pr{?hXQ#HyR8L*>(ZB6`0U+Ce zZssH=rgwP0LHhgmj98)8r=m_)x z+zJ=VA=K6=ZZPCkx1Y5M|Dt<8rwM2;i3b0Y=S#g*TNQ@`U|s+Q6N^oIm|-2^NytcN zRAHk^OH7yIG4jDzeGO4Th?u)fwZZGjJaLe@J1atZ~GCP!4Qy9&r&v-B&K)y+J__at@!h#~5 ziP#ObHK55`71+D+$2D)CrX2V3PFlufgU1DjKo}KTW#GbL9edSLtANsHTDK zNzrw1-8ZR-!jVGOq;f?RLf2~1!0pVuB?|a_rG8ZBGs^SU4kBC@T{{y~rSjnj!~fiTp9b+HtY_)V9qgwAvDfFg#viv#y_hvck?@ zkYUf82*0uiLd#=v7D*?e=zfgKlDRuXDE&hbW>agU;s^Ml{Ea*VFp zKY8I`xr79-NNSeKWSIO*_k^tU!p1Od>CLyPAPmXOy9HWuay#0w)agqn5l&!2U_5{) zj8ruAk@yk;_3*~oDCIOFsGUK}{4cZlG?or2re(JN+PLa8L8X3ruw7iJ#k164D8^pdY6icp1FEmp-wg)Hd&|ehG=L*o8jVuqw(u|aQ(~7d~jYuU|_sd53)D6RA z-m!a|64%)b|JF|v7dGIo5imCWO<)o`bg11_;@%ty$@ z`ig#tK3kf;;>Lt6aT@-#fBZo*soCfGo_geH=U@mNNGZJTPP2icOPKq}2`h4V?_r^9 zZ!((7yt0(Nz;jWOl2Yi8V5okF%QyZ$60^LPJTQ1}XUjB`DEAG5MPX5bt}Q)H^F&8L zKuj-t+;Ztd1!hUHBWvw_HO^^FZ;7eYd{j}vLHoS+8APz45UP++Q3&CJz|V9Dp(6C2 z%7x8(Ly7D>sS-O*p4Gp4L@zX)+y-F=<4p#`$#)tU=%`KSg|9Kg#XX68ICOTSrQ^Ls8P zUDh4xB{xTsdEGH0Nk~GYYZ}R$qAbY~z9qdg6H;4kZD0)H%#fM0UHsM0_bOM_a{pfV zJ1*}q$kcP|%Oe2mlEUiFOq6;zz!o*xA-iGzZ$Fwc#{aQ%Dyph^za|Y6NJa)EVYYuR zJtAL$h{M%1K=>OpEt}(Fi39hgO!@)~Rw@QcDuMCVQACZQlRj3zi%4K#oZAlLz|^=e zB^n;SKv%p`DzjPk)$czyRXG_a-?_$c*b3G2L|TtCA--boh$F#$379G-sFs^SH^7$E zUwg+cOof>+o+bdzXUi8RakF9iy>0ckX(=Jg)jJv>ivb5DK)t)CF(k^B1zF#p``DJ7 z96PFiREmqQFj>Ttq3EUUj?t~-Rn2@XIqO>N%!u?k%`UEw2cA}kyTOk(9lG927nr(l zk7Xqc*rGzUmrrNxo{xxRY$G48??_72nfj6TCyLiPlzZlh8{s^n@a8iRHQnd^mN|?A zk1gkuUw(z-aykZqvDp7!h<>-?Px#^A0%g5jh7Wg{;M!`ajh*FADU5Ku+4hV~Yj{%M zFaUYv28Hi@`4!65&HYQYBxe8JneAo3pE1jDm+OS@s-k zNnpExT+E_HjKb{c_tNP%Pbovsjt&m@+w?;!SiPpj(9n>-0PR*>xLRU-3Ic4p@%w1I zlh0AMxQ57HamEqBtDumhQt64i#{kq+g^by3o%V$*<5zJ7p-}J^)=()GG}g9K1IN;b zw$;k75f;c7a=-5Kjm+r{lW|q(Byb0&6KVA?{}L(ND7;DkW{cX83R+x%s=QbN_?w%D zTZ*C`c=pLBysi1D#muZacV&F*4rR+6)U9nAjsjn2R-WmAL?AilEAP40|4*X2%NMgipK+NYBe1poj?YnweXPM|JxnEdN%M2ANU&C3iEF%JYxMxSXEz?PN`l-Cg2LjS zhghr4yI08t!iV6_Ghpd8epLCerf%mY@g*3ep!!miDpdbR6@y@#%X9^jN^@$~gom#6 zUtlOqe3l`O)wfwOjF!(ok0`@$og%uP>+o2tx5Pv)imU#q2NJ=m%9M*pL#gK-Z(d#@ zi{|x}jeo;CE*4F-#>dFW8T}{*x|!f|B?To7*jlaB_#Xwr)JH~(I&m&G+7h_2*~n24{)D8W zQm*uA_y?&0#@5o${xm%Uj7lchJVo|K`BX$YN@`y8+uQq3GFJpJ_}@`*WqA6cDZ_wg zEM_^&wRZTXrf?8gbX>ZYx+m*Q%d`-B3W`p6z0+}o16OEi08A+Y+<^_~LfDzXsc>L- zz9`FAMzjM!Cf&^+AH|~Tshr4^5Xe*(3!$G~J_(N|(i<`ivxivHI4qmvADfD6naLTa zW{}6Woxk#ui%e8mETmDzEBCAK?rrss)jE7?e-(*5+^RfPc?2Cwak)GazebNX&A`yr zb+mDCM@5HjjNRxKUsh{>d)LXkM&D^k%5id_Z+Z@eGF2oPjC~hAeXw1p>%GxY)wy}u zz)gHOYrMNqBZbXeyyT|8LO^XtG2eTn=EzKa(&Y8#o#}amN3Yc)X zJBqZkw-HT~j#WY_UG$ z?ObUU-XUc<1?CQUkmkGF3{##PJ|(>R0tjEPD8>X2UF^&9;a@(BqYV<@k^s-wqa2be{O0dM*PuWfN@&HO&e!f zBsXD2q;~y9`$Mj>=lmNi1_tJ|-XFbmWLRQWXaNVT@53r?#v}(A-6q>9?{2+XgEV)! z^MkHc`s#iHD77peC*7_oI_DMDR8$nI$Xp{1dThQj@)S+{m9U}DM&)Rm*-5qQ|6KDN z$HM(`+QunZd`VB!rd^a6`uup>)@tcxx>k0nlrq>yw^ZWwjl0X|+_!|5WN)150=7cq zntAK-&h%uW%Wi^6QZB^vKg%xrx}Tt?>U5T%gh+PDMZ>M1a(}i4$_Jl20O0gMREfx( zyHE1>YZkw#IIYHULuoZJWRzq`MUSeP~zeqgpJRpI2e_y{i3;nq5SCv)VBo%>&)?S zHQMT`@a4U|7o%u*q0_MXs>9#b!+JaI16YfCd9>(}YqE*Ug9*NoBGD-?nIgfxT@;0W zIl3Y<=MZeh_@h8~+m#f<0Y?455Rcab6-x=j4zRo)6z#-{oCz8J6leD8y@fW&^e>NysH#e zhy2bpOoo#8*`-DiF^V%Iz5Wh@9B23(e!dx8^Qr4)jn$hN34}w=Ngt(zh78Qf1iQQ+ zcCg((KP!Ib5dM$y1s{*7X3m0xQb2;!>1?6qSv5|0YP#3onSpzvdpyH=9h}3mdt9gS z^HOkB6`Duv!Z{X|U+Bv3b2*#D4fb++E58=JH-K;Tq4_d=m}fe{s5eK8ZhAUeV`$E0 z&S?DKPV1g>UTz;}VR;gefsiY85EKPlqazxaE-wC>S#%;_^6mhIbxxWjDlo5e;7@>?l_<*+vfVm$ZY;S2`(}$ zmCMRO*xFBv_pRR>B*nklV~JZ~b8N`C$jwO^b6dt~^Y9pQjvdY9*l+cd%gVz5QE>CL zPtvLNM*_aFqM&C%7cd{E8;LSFYFifPay4g8uEEmFOs@Fhw0<>_K{Lc3`0M@h)VA+1 zppDCsc6hmqdb7jg@D96$P5|47Ei9Z&##J7=2&%_~EpmF`wB7$EjrEBAD8e&LuY-3E z+IVNDK7(1^)XBc-6ScdBfOrixTlx0o87#`>VUat0K#z9og(&&_Jr>&Yf2^>(WWw5D)czMTU??m2YH3W(o z3+JRGb>t2Ug1YClWN6f0hp;o<_wW?mEIAoOxS#DJ*@Yb)92Tnep+v82li7uvIH0QZ&bg}E{gcvwuXN!(>+%e=gmtC!kbq$M^% zAeiYAdoLWY{Z#@6oOdPe@`i@hGLO`W~+3Z3NhOGD2ozllWjGcBm z`hPbcl|Z9@dojRCjk0V`V5Vm4e)$W_>CaNC9CAf3#*anx44@427Q#%G^ z?KnvUd0HSAE^jq0M5bNXtp5HoPC_$tu66@MFsHdL5fG%m{r4BhzgY8KeE-zMvkUb0 zHUWw2JeyOb7dvX6j4g9>(!M@>GPe243>6A&SlP_Y?jp;76->8T+(QF=uf_Cwxs(z) zTEyHeUf9vAZWIvUeR2&A3=$Uif}M`~ZJ$u|xPw)e+U)(s3Zt=wt$KmTuq2Fx^t5v` zAKGFZ;9gaE@5evzJ5dUYGk?iyrR(`7iK;{ZSXIGKnCow^!3VDu9=CR6Y^r4CbDMEw z6F{~REIht~pR;-%x*nZhZmOHw_yXt8(6rwYtCbL!z=1RneZgTYu1hBZ?VPW9xe4BXNpIs(2WV_gfiFm1x^zX0iTo5g7iAAS!yqfS4M zx7qhz>zt&&6@lSXSicyOu3onEF?5$xZI7Eg(*%uSbMk&jhN4EUk_BF6q;nYP?Tx1S@y>` zAI(_Z4D;BQ>&m(b2R`z`SZ{7Fcx(n3uU9PIqXU3Yq_6fFll675Hkxk(Pzp{`QvZS? zP*A>j9sNmR8dYz^T_k`JKsCbSPb52BcQ{dknOzbqE@#k~@}|qo{F)IJN|$_o|ITfT zvXqe&HXDVlIWbn9XCHZ9jA2XrW?~kO)%;X(c$?gG4ZagjZTx2 z$C`Z{6-Tb92J$+15}dcxFUfz_;`vzq9c0>EMZzif6LI>9I8XeR!IqTv-+x-8LmQFK zN3-kkw{2d7dbqii6K+oK!6QHEE(cGVrgMaIQ@uLxB2v=r1#|LQKH#JYmf5~2g3E}? zl8qbSBE*5+%FRvwY7$|5yRZ3llc(+3o?5WDVaIqjt2%42go=8MADibtMm{O<;hin_ zBjvyPSD-58RNDV@2^Q&3f0#Qmlm=yi_Hmf0Z4KwkC(C3!(zkl{6iILFc7L2+x=Z44 z@2{t+Z~k;4SmG_Q;A>e4ztdhOZ8^*laK7yRpv*R-ca+KF*A8x7kT^iC&1 znnKqZs-eUg@8X>2dhC%Q#X>p#b3*8aMpG2g`6|Qn7Zf|e*{HPAYB19PFvz{*d7>pa7{7t=2!{Z- zaR&w4lIR1KCf&V(VNZyK4CU#ypR^KMP-i+>4u7iqzp;gnlh|*Rpv&~)iQxPKh0m_8 zT+0Hg2d>toU~X%AVm=ARWz{^zOQ(jZB~_+-nx4Y≪7gP>~iN#rHNOs?ge9LVji( zo*e>+o+5*A7^` zGNu|d*;q+rj5a{j%2{oLTy^RK9dLk9lC`8dj>K52y}e#o+G-!lYlxZ%x_JPg$yDuV zs-y8zbE;ET%~0m@xmQPPUi&28LlBL>5@!8z6%d>MA_-^jV}{C$s+^V-$n*adib>dH zp5oK#Di;u0QT}7WQ87+LdW}|~E7vKjwDiMAGOM^M?H3wr#pZ%No#0~tWW^z6#MHh zYo0fx&kq6aE@l%;y4UJ5Wd6}NIZ)3YTQ%e%<*Kp+9f!y&+~PopZ$?s{{ADp#4r&7O z1^idI|^FHyVNs`&JY0 z%Yms==5IfvCSv#GHR zr4;f#o3>4`9LZ%BIR0G!2~dU$dc*Fby9%6j;vOe?r%W+N*9 zO)(g2;x8F#F*-JD^qABg3~QpR9Rw6juio|jCX-AqR*zfm8!%kqY7*7ed<E(;{wiq@db_C(lwmbXD0YBC>)v*jg>!i_+6V{PCwV@Pv zQL;*01T-Nn*pn#_>)LTDEU|`|Mis+0P7lSv-qE`$e*=moC(t!7^<#Yep|KlmTzo?WP#f%baaCH9*vd*QN6)fo^MbAD4z!l zwcBV%{&Yy=tgeG|K+5Zo3L6DSrcOTZ9|2ov)%pfKw?be-Nwgy)Z#3l>2z_%bK4Ct+ z;B%SpE(;7UkmM7{lh^n^%q24mR=0=R?WpSr_72 z&R6OdEe^`hoXuzHOq-s+oONk-ikkDC)X5YLrbfOLZO{F`7XXLD=OXurEx!{cxmgb~ zG8lzI|G0t~mpkD8t6|F1FioMN9SH_67aeH-LKoQo1EU0Qbsz51c30OBTWHGi5%Js8 zZJbOB^BuLH0|;H`lXvT?>MAavX76W9$4=iw7_JT~wBjIr^Py?+PqIg{c`fyFcojoM zRA^^j=BNlIr28pUr%1_pliFJ|0)n$gK}AvPr<%$%^opSye`CMw|By*W_@qZ=&cJ+C~4Ef9li4XN&?|!PFoc`}&OQYdHZtrjLcHYWG~@AQOo zEUEp`hM>P;tH;xAbGv0 zh1urm`zdXTd>dZ{L0gk8ilrn%hgLxLc>Z4>Tj^5X8&7u{+Hv0};K>tm`@ zNx0>;*&g2=S)*h49G*#D_74}6tNakxBBAw>q@&Nc6f=1F=Ok2}VtnE1fAP89;3uo3 zgDtIazllT(4DNrm3lA4PZhwf)6bdvpPJZbL{_vR<7qU0VaDZ+7S4LcSK#9sjohi4b zeBNxB5lZj;N|_)mYgkCxx)bNi0;xEfHCZgb!wm_h!&MqW<*4DXhPtxw1l8+G2t10Q zy&Ff>>Er%8UsJ5*Po%Y(ya`eYpYT3jOf{DT{;ba-OaEw9PCMD*<$-ue)bW8?q{!}U z_U^w~VsZ&G1Ec23$O8f@B_(D3jl_zDw<|aZrI7L{l7XmnUZjF=OKqa1nJ>BdC(Yhy zJGnq2@8zZAdfK0IPpJG2YKRWs3x;9JJu??C5kn^BaVIU| z(!u!FMgb~y6e#we_^Sw7o<4BO=#H(o!&u(3WOWZr_XS=$VGcg~uC_s;F26w&bqLQ? zNn-HwUgEXCM+$PDSfjnc!z?5xkKJ-R%BgZ|)ckgd=kI~(%a7&F>98@Yfivo!l-VENie(4H>Q--TV?NP%{h1Opc$9pRNPFvO3R02p}|RX!gxi zML1x|i1O$3vtI5=atk{xk)ne@cip1H)t z1t4wNj-Nc9ay6D`kV}i)H(wN6md`gqLUwu<-i>JCWe8~%BZ4^q`AIIb29% z%&ehY^eIEZ_=&&2{l*-f(iBbwK0WTy0%LCX3ZT!5M_c=~xq3Y9-j#6Qebx!LK7b?N>wS6h$JlSgyoc`U8{`_q0! zL3uY4%7^ZP?d3rlYJ=r5p%Nq5mX;JOR(h=;yct||>u^uMrhfU6Z_j2iH*;|rO1-@< zJl@O~yQ&D&$i9QfO0%E*9ZGlmuI39QnF;)P&K7Sr1gJpuwLT*`4|&W#LZR|TbS z32{7RlODnrU|RdkykOoem=KQGr6bY(+ZRfxzsBcC#WguDZ(>v=Yj}o*nZIn>mlQ6H zAYkp7;=`d(ZDl9qY(s;YQklHAW_Z462_ZirIkiG#mRU9qZ_ zc0`OnDkUwit{td}LNIwE&=85tsWf}ev>FSMD3jr*kA}7L&C4t1hB<4DutJWq+I5N) z9CPQDbSd0eaj-}h@>n~l_^3Z%Q&OyIQP2y}EKSF@joT4!u6k3zzyMXZaIBHfcz%KAVz=gBfv23&SGc-7N2sEK97?0)Q3Z!q@rpvBEKJ|i-<)qRGI zm6)ar!cACMj*Vpl^g6fUc7XY`WU@JWb9BFVYu$*455_^#v8gH`w|}rWI{Iq2 z@y(1`V}A%Ezq#^;XcBm^7HW*X(jo>lL3Y1p`Sg2aoduV+Vjq$t)gr*#(SQA@7>wBe z0|r5z=dQfg4*|G;;l4U*W^-n+34dj&RvGMqlG#wb*R$UFX>_vx651o; zm_!fbl1Mr>wI9Hb4Sw zVcJZYsio*>uKze})hJF&EzU=+&s3pgcTcNvTR0~_-(@4?gRn($;VvuD-{TZO1)u*- zL;4G8lx=-;T-qA^w2`9!vixDJe9cE zVXU3J>>eZ^Z|Vo!HGTMwb!u>BZf|4H>Kf|Of14g z7Z+WV6Z+=-q4jJmhA(R+#xOl_aE^64aW{Mo1XFvC$25OF-+!eEj>RwVBNe~xu2mHK zAfwg%qqLQCPR4fY1d74MWsd33TK{_j2F?CXir@2?!LnI}+e8DIAcmZcC;M_?VWkTf zcD9=o7T$wrome&_B%4sH*;$f7^5ppM#-YU345Z5Lt{i2t9a|;s3b-}{ViNTSRRwjp zNEQFXB~GRJxm@wQp6OV$=S(}SDkPlumIw&98IEi=zb_p3+-hqK(^!iw&G@xRC=_YM zYa0!Uv-r>y@%6?)50}NOr=1bCFE+SZcz88;1BUUc{|Qt-n?^+T?za!LxFxeX6&F{1 zdwI4y-Ibm*Wd1XPkX4}trz+~U)_2&-`Fl*ad$=7OT=x_QtgkIzIHYiiP+4wW!F5dI zT3t~D>@VkMrx`cJzW+oJd2{{k}Wo?!zcI9^F#9&iEn~jTP9% zY)@vKlOl2f^?#e7vpw70h!XYO>GjR^?_%ST7q}G9BBS)S^;ebSh1~J!i4o6!&yB3fhAPT8hBg;@$fJ{TY+l}dzCkEetJ88 zBH*-ox|^($#2o77ec!Xj%MQSu=$fL}hMuLr{l$eD0dB;I%V}=Nu543NO%Dz_IXEPM zflwEZ6C=Foj9)M=m5x|dR4p~laA1mz7|(w58HRSXCC;=h<8hCG7MBlSU~MgzCpvoF zTl1GVJUgf|-nCizy*7dywB~9@z+{F^^CPy*%Vg}=7KNfV;lKf0y2P2BLYYG%;704f zn#z-&aj_e4(d_*Od4%{iUsbl9%@jGh^5i%YC;6r)m{JjNX1%+A%}ikx(aGpnB;G-G z%8-bo{fo7~F1D)O&v=J8I_x*zX-UZJMxtn1TCRZZk&Al~c-eDuPUZ>9vxLr^)E9MN z`MwG=iU0X-Ank?suA?3GBWa9W2(-7f*qp+^g12 zymdYqE?l4X!n5KT&V2AB%hz(5SZ@yn^H?IHqk9LFJ3(MT66NB$cXcKFN)~7>7t560 z{Hrc?Ty22AY?fGn_-VB-Uo+QoBK*66Z?oeym(lK?V8tZi?*Uke0<`~>l?S~aF|0kk z-qeoPQx+WN?1T18bmtbKPcMReoI!3DgHgm<15+;q z(`o;{^{qr{b2O8OvZS~>!Q<}l*+D3H;3zA~g#^s8zP-E%X|X7H#i%L;gqco&^AJ9k z5Oj|Bf6#0^_ZGF@L|bSy`nvS^KtxR|#VVObL`q6AO+h*;w7d2A@|0`I;(XKma885) z@ArupBI`|-`xfL+Vq!9hh*XuHYPC4RUa?y5 zj$--QD0sc2p{HN%jHKU~(kct8HXK5;O6I~mp^!27@52q}cnJf~A1`A>)dGc3|D$=Z z^$U)<4f>;J8j>JE%zbV)MvzdZ=BA?l7E;AfPWP~;2AWgcZ=W=MF&@*HJY!gqu#gWy z_Xh#UO4UwQaTWKTbo*C%l!D>$=&U92DIbJu`W-YC-%ALhdk(?z2~yxU-;Z8T5>af* z&JC2yT>Ng>x*Ydp^yFkYssmB6R|U=O>2y#{6I_7-n`d4gqz~2fYZAF<^Bx<1Ka3hZ z$Pt6Wuh?wA(ewsBwu=o7Pf>i$<@rtZFhct)x%TjL8J@?Qx_SKkM@3!wObpDerht)m zJQVweq0@_u3GhQD=C$>>Z+w5ZjBU4G+#JC^jpk=y$omx9&rd1Xi2EGIvgrymL7z44 zKAWiL#O}TDT}}?M8-!>F|Es3(JSYhorSh1P=>xUA=Ey9D@l!Cx1Qs?R3cr_L>a$jx z%f#Pg@0$eGFTA|?(Y1A;6|4TU!v=BJC|VbX;|r%r6s|AiA^S%&@&DFl?_zA3veKsv!+$%7@qYx}BSpPeOPC`D8=*C;IB=?v-e6#Y?|`^ca#&H>J;Uzc*802Pk8p=s zCXDAB3dXA|eN*GWOH*YCG19_(y;)O_;>RC)9ZqZMUm(-g)E2DXpJN85BV{DUL5XT= z3PHnOu`hgQ(2YXbo$#?y-#CcUoAqbtlW)Uy4;l!o!9MqNt=u5>`0V z{Pm=Jz&oY)+5#>yq@nUJPaFO){j)`6&bDSsSFFt~&8{?XJTUV9L4R1daWyJ3p;DYB za!|&mq6q}$jmfZQEq;;?C03k#{z?}pXVZiL4{i_L^(jvqI787Wv^xcbm+HSe_U=IGhB@`8sx!H{J8b#-Kax1eaD2wA(SPL@$0KSu10#Qf zw`Z8ev5111=Bp%^e22^rzuD zMD}Sa7V3e^kh4rBv=Z_owA+0|(rPC=Ewa2j?dj|BW#dghIY#AF@oet(OOSTYbTetE z{GE#iVMlcEiD8o2pO%w40U=T){A*3kLYu{Zmg~rX%z+Q|b4jk!f>1-ln`Pf_d{~{2Za0}swchJ&}va#ue0VWZo zKR9E~{uClTwN&N^*afdy?sdVP?vmF*l3=7l3xs@d&p19-^Y5J-E>+(msVWX(xo+0+ zL5!#E$-&Tr>s!|5Y)J809sqgO zA0BFdWooh-Mlg%0Gs=qz@9c;*XGENLLj=G;S9_{D3G8I`+K?)Y7*)sw!$vcy?+jQ) z4bdTPXHOp>+KihuXnddidvY2DKF7}4$p~}1pH{Brue%Q8joHaA`K5O=u{}RO9Q%vu zX;4A+DAnTPAOp3?GQ^w7h?%iDoTeqsT48ZYHLiDNv3TgsN1!!gg#-;rTtAXr zGsQ!UN=cmuB$6pi%pE#`uST#sI{J9`2OteNg95t4Zj=9*<|m;%80qCIY`k<9 zoW(&c+l~#2m{c-Z?;Y-c0AC-VP5{@lKUg5K(fzCYn-V;LCK7PIGsP;sLL~t{N4v&R ze{jmk^lWUE%7kyj6-t=H%93j;jyRr|rfeB?2<(hMY}v*UeV{RZoSL1yXkis%Afw3p zrX1MA(Tch11$%|G;RJgKJe$Zp72Gjr+eXlcr6h15v=%mb!H0@%zSjN8k8#5=e{$RYLABKTK&RC7Dov+0P>C z%-o`lkL_NQYU;SRC#NRYyGkRR&o2VI{C4*_BY!NZ1P#qad127JuEJ4Y5=_5KNU47_ zK3Jpn2l^Fu^s|=`{lOSvCH%FJO~K@qBuEz-rmZ|R18Ym#VyLdYSB|#ZnM0>oOQUh^ z+zIFt-`<%JdmU0SJh8xcGT(2{9bAHweEqR)NV94j%jGNj>kciNIXT1TjyM7Xn;rI$ zAHSbkop(%x`#38ql_>pMo&4CBkWlWyh2z(26}0b^tN1BMN(27PyAd|a`wfcgrz=q* z?r))s7SnhMGIy8b5FcpuL_^|&7UzKEi`OS572#G-rXZBw3el2g!C;q#ob%b*WYG^4UGdunL@->Kj zS5A!jq^pn{3r8zg<=woALxOFmE3%&t>jQU3=7yD#kTJ~6F>L?Krj%T3?w}0 z>AX7EKj!xQ44@@@r&n5!qQrX+7?{sZ4rC)FyH>W*US%qizenTG@#;Kbj#4abOoHxq z$9vM{^GK_3(nfa%x+V+`#xvw*XMW5~;tx7`#1KxZ5L@wwfOPL~!_8SvaXBtTsDDyb zNI&E>9$BfxcTadXt>iJQ7~1_wJTMuI*jAonDM)mYBe~h)Kb%h+r^gXcM{~sFjA+`w zdWF_&#ErC?_07m^U`(`q&&zj3T6PN1e^jN$$XfE@IImo^CTfKDJouv9!IRB z(O*?Qxw>AW1wo=3&l=f1Uj8?_-j227lv~pH>V0p1q_Wnm-FC)zf8TbNZTN_xw6KS1 zIoCb4l71-$`3B*;DYebk+0@1bP*a4vsgx3_cbBL7^;1rV2MC&w#?%Q;1dhkJr|r76 zh7^e>kDqCZx`LFey67!ne{kAoNe2_v6b-_|tbOLg2gr%?A?@uwVq^=ZFOhcbOPZ}2 z3gkf~SHZU)1WoLvHa1>P2WX7G4?V+9{3fSg<-bal@-b`B#Y<2dspd`o`W?W>Djo|) zJ|jPlrvBR}78b;hosFINUmbVfWq)Jg)46T5T7?x7-5NaLbK8lV5*S;AhuEQGwx4aY z!g!Caum={Cs!TPnGAG8%W_@s+{bo|s)%FeZ{wT;bW^6QcFEFEl#Se__e7&YpvzCT< z=p(y-zK65obp7*}n<)T5X7=wq|t4-RODzHBg$kb=mL-ATal13s7Oi92tQ#_YJa@q_NZOo zsEP{GJpQcljE$318HzU&GG{~xp$6;lv)5;I&@UwbDIpqc^}95-Arrw}KTAz?co^Ez zQRrZ_$3HGa2D!O^ifxR1Cip9Do4=!AB1*^CG<)tL2Qg^FJICpOHo3RhbK{eW-@Tj< zG1eZgnE+YtZ+RC5(%hb#BhBkhOaUn)QX8J_qf*LisI!(e|dq&*haRucL5)4-NZXexW2YUT0zC;HE_Xae@?qEtFKR{#s0%Lu_a~w zf()r#eP&qA$;p}zFrWHPbG*#Ph`s*zFXF{TA@6@zHUl7g==kBz;{UA(vGqatEhF~u zRGwvC|J4f6Ob@;!-2b(ghVQeQ;nlq_z>F9 zM^$J+<(qlBO2WRhI)7=Iq$`;9=|x-|M2VK>fAFwQ@jG z>eFm*i0ysYBVh}Sy$-R-4tQCX&e7~qfM2ANI!C*s$n!}Ho`$QEc=A=>0Qv%h7YmLC%mGQx^keGI%mkc#2mQi$MRa~ENd%zy6qVNd2zS(VEVGhV8c!rZN1 z9tr|KkYdJM5z8eLuD4*glM6FH!N<>i@~kwYef5TNXnsehaQH_7#EcJpof zhg*p`LO@lw@8WQ6L%qdfm&;IWZ(RE=BrG_MQ*(g5w_3l<4=32D<#(eVCNdl@EWJlS zvSj=D&OhbHx7`G$og4!LG4aI1^h8dyhM=vv4}6DBY|O*rQ{GY^A89lv^uxe7?i|;| zc~{rnE%HebrHW{5Er%dL2+@_2(O?h=O%(D?Wl4Zu`i7YjLPEeSDuT3?nvom~2NaCH z6sXUlx*3`O_RdW+5854?sb(j3C zm`nqd-wR~~kRAce76pYa)?opU4-C#j4#URa;GCJBMhr_9EltFfoX#$ud+++d_@DUH z+9(nje^pjpCFLnf`3-{t85;JVwD*xog_)F>+(kNu--B>fS!pj!F@@%=u@YBFNiSE` zs?ZB-gOF<1VD^73fQZ<_<7MZR$N@v6bQAF_4ei#?D4e>-L$%L1q4_FekekRzrs?Rh z@q(Gpm=t7j@2#yfF1Ms}x3ZGIgkg$&OsiZ3uH3S#*8}b#-dui%vQwXWKYLn(qPXsJD&OSxpk8t=#XSa~fI2Itdha$k|f2m;F!;(wn zvP14YlK4L0BAw9J&S`19{rv+F>$h2IiGZ)uvpO+x`0f{kjqTUcFUs>5nB4jr2it-9 z_FKtpUq$_WT-?IK@5l@I`|s2kO{`6n9jwk*%7F!??CySdID!Rq{fn-O%Ifg%--e9u z#24MSfWH;C9qp`>kx2#4B=qLyvhtcD{-&#`aR%nll+I2waY`z!ll3?h-s>S9KT~Sf zjkXXe96D&oi_25%x(>o#lH<$1QbuOxCP_ThxIrGbvt&bQ4BBUI`0o3+Do2;%7GgaM zLqC=r+6wRD?d5m+H#Ww7%V^B@mlGksS0L}T9;>VdUFAXIPxhOJ5^JwZ>bk5 z1s=WU4(i9Cxi1jPv9W>kJ1WHO;rG@_Fx}uFkkBO}cQGHrV`rl+dnZ0>;5Pp2*K4(?O&Zk4FNrF;!6~0I*-Dc^(Ii#88ym1 zchbIyLGypm{SXHJy{&#d;p^&;Fyc%Z?))Z#KIo^z*fCd`=~=(O88e3)B?=9vuz~1c z8eWh47t`;`4pHT%fzhf)wvT=3w?ge<<$59$lDW^M z57*kyZwa0Q9yck0p_$)QL1K|W9L7thY_)J~cBviVcv9)#Uk;oVk(QKyi8~M>Jom;} zm_vgh%>kDzB4dI1o;(%!tR z`aCQKh$Ku*^k3v%9Vmi23dK{uGsf9>=R_oIK6bxA_bg)k{HgR&rR&MX!#x~C1dVzu z0ukY#KxkcHNV%b3ZaIujU!i$E)(zC`Upl(It8=((%TkmrwTYHYljPjkSYl#)?l;Vg z6%}4n?O93j_kcn1c|@dH?@mzvf=na!`!Z|VumYIRmyWX6IV*y@K6r=F?mSi4+zr zgRX8QBJx8XZCWI%P0a6QJ&;U^ma9@$>+|~7`x0fF^M&l$K2DUvwK}~D*RaBsm?PfY z^d3`(?;KJD6f_s)>KVg8`s=2WB!DUOW{Lar5(m#VeEyZ4m^H)DAor=#npt zK&BH@7{9@!ps38^M4*1S;eOyp{rAEF!$^>U6&h)5hpkJW_r!>Gs)>|r1|4_-g8F8U z*w*@?bxz33Hns}-^$Bp~_*41Va9VA^;^2)alKBqfk z7H=QhzBl6oNh5>j3eWcCH>SUW>}JC#7VMAEu&vDpw+^SVc_{F4J0Zoo(N?IR$jZvR zIMI-<>0mbz6bK8Y1j-|AqeOO77~IgnP~Z9_e)&?{=JHq?KE4izHMUAR@}iqt92)Qz ziXXF(&}e7)xp3RHnPu90L`jAHi1SFFbD!hlX`H}~y+tRbEVty=V_5)_Gg&#zq5)K3 zxkPx+&l#k4(5G_+??iZ_ozo)vicOT(VY|BN8wN>ufuDC#Z6hjoliT#+;rY0?)qe$7 z$lgVvMoI!pE1&KwX5abD$;v(UjsHZeGB9+yUKbKLg${#nB1ZB^u5(=FqsK*-goWQu zIqjB7ksi1I!Fb3&^dC|^dUT^{M2`h*U~C7S)!y$h@3YL&tM-CUI(Xo25MB*sbv)x) z)a+d3yuah73cCHP?%hd5i}UkL@NTjHAcl$QzM~3zats!VRIeMo_a`CJ>V$otQsmnz zeBDxIVpQu3U<>k+@H7_RK>~e<5@@TVkJZ3<DfnjfpD-`Qm9Lar7uo<{$Wu=}7;qEL4>Ec0 zq#2nLt@W!n54J7jk*D~G7fLb0ua82MQ|K;*Fyyd9Lm_hVN@1N zyK+wK2_M6uX44ENmRhl7lwTQibHPfoJ+7vX{1^#K$sxPxXi#^3UQ|ebceFNN zcs#UV_`9pGo`VPV<8L1Tw&?~72hzx`DorI$c#cSU+p}gB%i4EvEs}#+8>69KYVm6w zhsTu_;9g8d<8$WRCf`GMbR=fx{CviO7$2pM7QWa5Uk>MeGYK;aOnI~X_|JdmbZ|%K z`SZR~$0>TA@XtrZvDlr}KJlXQ9^;|NtqgYYlk%6rW$r>7^&R<0pbz4veTk}eIer4; zfp^2kH^I7Xx4RGSq|kXS1_=11DaQ`JssV6Kx)UcqHul=Z!t^}rg( z{jgv83748#9>Bb}h$a5Hp!($ZJmqLl&L3$1ppDA72w)~QS0M%W9QQ|<0FS)mcs2qp z#V@|#2MU$>Z6mIVf`ZhdZ1Oo3~xq?NDWn#v~S=hKSGf?t?{qL|<)UwX; z+n;~MDB2k*zx8E!aJZmLjbiyb1WMMEAI5hHtY2Cz<>&H0E2~d-4B&A(`93z|KMxH8 zzfwQ6Hx{eE4&?ft5&WW;4{V$w6nNRPEbyBD=_AuH;DtNrY;f{KPQSHW=QKYv{uj*5 z%$Aa57u< zt2+rOkjl2+@`Z>DAByD3$1;ifS2ej_nAr+CVrF#-j4Zoro?oxFuHIf*Z4@jyvHSgc z_XMV6V9|N1>9*ywAh~m<*7&M_yP+tv|-rQ1^WJmEjhJOKxE)_j?e?TB)2t zu#av}eb!eieax(Yn`-y>I_tRb-R@-k;~sU>Ii1i1uLU*Wqo zH>9j#&RaF{x%?5}xVXSP*$HX2H$=Pqh-~ao5m2K%JSQQLHnKK4dU!zm#8Oa0?ILSp zV!vL9Y6JOj3;fS*6D@q?@vBk`frq(ZOhipqRIIFVKT8Kw7+y;{+z+WC&Ugr^98(9v zGT)8Bx^%Y^=23`1St9s=_c&;DU%&eMr{i8g_}i-TFD>nR4cl?xV-o^Nzy<*$9$YIb6X__l(NRe|yBVR7%v42DVlk(yEZ>rKN5fY%B4Eyd_A3H;dPS@91!v7%W*C{ipdeX%h^199wXez!HFC zm-&c~VB)Ua!B*1SKKG=<{P)@5l*rD+H3>M#g8h?{TF2}ok^g!)?{}tLXH%EaC=plT z12~pkaB`qz*9=_Jn`Rq=qSow;m+$2@GI`25)2#HEH$2U@p6JIqikQilz$Zcm_o$bH zpZq)@i>?7^xRW?ZtvvKY^5<2_Bal?T@iMoXV1LKeyyu$Ip2F#1#9@>@4aKWLZt^o% zsJ6B?u%T1vzsrlnj|Q>!TYrqrt)bpJ*0kLet}A(QsN&uTU=~9gHxQ!a^#+jhO6q9bADmF#%57fRWDqb zit2E!oSJEBH5qsV2aMbV|zLlw2+jn$bYt%FX*bn3NUDkrjwG2L_`-?G%KFza0cg~p-bUZ$ygs?2Q!$Yy z3Z=z<%YVo;gFe+NSo*3n0ilV|+%30p1f+Lx@h{f||1PD<@V%P*@BKtTnn-`S{zJ-e z@y-WpqPpQZg1YnMeU5E_&*?z*q; ze`J&hqle!!b?M37!#P=TcStYjNc!fRox0WRlhlUck7@Dv!8YA1I+2fGRq4<4#i8LPJuLS!WO>J~__dWss%ZCW zG@6OI=dJ|MAfzX@OE;TuS>fe+2IfM)to>YqnOL9AxpMW3?TBB-Ef8PCt2|&5!;pSY zvXc$9yU;ax<2^>~qQUuGfYMniiQUMV9WVbK4Cgdm&Np{ARG?~iFtdRvQD6S%KifO$^(GjLfE{@f)=izw%l=Hg6x@7|2@AD0vDApw^s>7zv zu)PlmSa!a?PI*^CWj6hX>Tvme{YSk415{WG05Cbce9a z;6T37TIAv4x1zER#W@t9g#SYpl>sr-Zf|K9C$(}#=Xb-yBg=PIa+o@^wnPM$0c^Fx zlef!YC5y)VH`7zze6<2R=a%0?Pw#@(z}hG92k%=&OuL~lKrAJj4t$tjD15wH^zzLz z?C*DaxqLnI_W)>k(Ylee7fKQ)Eq2yBN+Ydx)W~>zr$2IWJ0Jrgv_+7}Xxf7D4cJG0 zT$)+H*X|PdwH?afzOX=uhtt3F6S_>;c$u*@U-Tj$1@aA@Jh+TOBWcW2IFSbZ$r)Rq z3O!W(Korai8IAx(*^OE8p6F7C&MjA&cw5$Is zIQ=i;vv|(w@llkh16vyP2Qhkv93@68m{i=xHsOZmc$}=tTb=!5B@0wCkpOZajYj~Y zJ~^Ir-wW?LE*5tb4H(K#2g9Z#ObiyoL%k_S36F+ z+7ZMruvXvQZ%g?PpoP%=YyZa+j2PUZX-;;F6ODM^236MO;z&0 z#Bdtvc1UJ6H2Zk?UaYBAuD7RmzD;$CwflLkzeS5d-<2T}z(JeChX9dNuQMQ@KMEm(npAkJ~knSz=u$9f!xXm=zG8y;AHNk!C! zziY!g8Tgaq>0#E#3+D^E9sLLb(eN};U)n_m-{<)cjj_-CQ0`+UsP_8jy6hw&pw`S2 zIBYEV|CTo#6o(Brb{i;H$Q^~VELhj zWdYGrk+4QCUYRi!kxyfrz3+IrL#mp%A3TnNSz^{D+j}Shw@h7cM?ZuSDU;a3`!A^e z{!zsD$EUW+mdpxj^XV*lrA;#j3?|O#irL-;`2NSM!K65fTB8sDPR#y6BA^-KrllVf ztT$>8?e@Z8P4^Ap=7@`9;%};$2ivR>bIQc_fI1+_r?@Zk^PX;h1dMd(V8X>%0_T`Y$Y)#KxxP#o87}C~e%tW@ zSm6EOpuykYkF;IESF`Ki+}R2mnO3dLejx_?dX5=Q!gjIx%+)Cfpi7)zkb}Cq2xq%R z+-r2=jU5~^vc8&VcKiQ()Iei~my|KRjov$4tD~+t+1cK%wOLvbL~rUZeciNs3#~cw z@l-iy>$rSF)u8^8TJRgv{ZfedkBy%bisy@^d3}=MkF%2H0~x=-%{Tb38{GLcdslbi zzM(!vT3;Q)FtJgsFtN9IE5Bfj3V2;80tAH5MlKIWh-+G?Qy6j%23|i}3oVClbhvli zfWZcpg-%wODMOiZ=Mom!R*vM(Y}nUz>0~6V?&9KU?giwl&|9DY-JlOy7^ zDVog}5Y&()UC;4lE@?W++rfyl1gW{Z3HDD&u+yJ)m>V)0tM(`je>f^(xtj8d5a6dK z>`TkrE0PP!iVQ6=;{sx=;nUy z-Za3oFA2y!W@p)Lc1JAQO20O9a&ia7eW4XRG($E0WQw+oY2fRB!UwF*C=ZtQe8~A) z;mq>S4>uk_cq>Y;*(A#arKCx|U)!y5E@Bi8>ll=2ypCt6Z8LZYB)qQ&7Q0_qKTlb5 zHZ=XEt%*58Ez~;00!iOpPmo$ z8uH}GO6W-K{%jiCMEDyPo?XWO5zO)e-+5=`Tx4|L@*A9R%yl^h&d-WaJrigYXRX*! zj1!)JGuT;bGGKRYAAwA}$t!bu1cPh3+`c1s@y^w)$J0 z)2UOeR@`SvP;|el>+7SWEbCnzE#YY4;~&lx2>tu4f=&-+2YQ~y`6EOHnkC|rj_(#G zGs7ghU&h8&01xIIgIs5K{%QR#L)evmJ?}jRV8Or_zCSTnsLp2)8JPK(3lh%9bmiOv z4L7D&lrUNe|79oB(5oEfHQR->P--+~cgkRDs%saJ(UKDU0|W}AF1)xiL$aS)=fe8< z0W|q9)h(BRFH$*BK07}XHS~jVhDDM(CLv*W*`=3+g}ZQdap705WMO_+=zt1XLCNQ9 zj-=f5;r{~Ijd#f#SaF*9sfP!Se9E#1vQd~u;PRT6W1Ye$FfGdfrIf{5b< z&C3t>xXC>UqU?z-A}GIoFCmq#b*k7pF;jqO{0zSZRmHLffd>PT8HB%d_3NCELhey8 z7ps``+J6)R_r7>YTY>N&P{%6|qz#O=MonX^vMYS^IT7dqSCv zBUGpy=S#|ss&OwBxkXL#6$88v^v?kFel(6Z-!N`!7F@Og!n&KDrY@mU5Red018Q@oC<5l0p9BzY5oVbx1`(Q+06{c8|LiRv-HWel; zI7lA;eZk>R$4ACZXGL);|HlHT%-xY!W%C?l=dybM4DQobt5YN*YG0yMjO3Kvc?UFJ zT9fyFEdDEcV$JEA>+|ZCH~iJ_xuk2#!jFk6}>mN5PuG_G6Nz9rv$Aq&WhlcyB-7qcr|us#0x& zUG?xaGn0$Hy6m0j#mUk3o6PrhQ*|Z`VRNKMII{;trVuy8ddDZenU4KtyhVkxoW+l( zUZC?8UvCaNC4@(!bNaG4Y3JI^+8$j+`{*#}L7LrAEYsjd9%f^an=4Y;h=@kvte<@^ z`E$%ZWr;?a^WG;blnx(4;Mgbv*#HM&!~r&Do%WZpMPg=FOsoBxBQLd~b3-iABParl zjEXi(!AhbTb)O~9R#8OljDlj#Jw#Zz8HUJ0%8S2(&jv4w zHSO@aPr`#8HY}&qt$~muc~rb2LC@>%&oo(xtV)8Y&V#p@_Ira?p2KAF`(ml-{PAp- z-|e`#o_##!oDzmT*^d^7jmj5JeCLY|!oSv{U4LTzyC-y1#(#>XpS@e!*h^2%R}Q97 zp%?kj*;&dt4+Z4;sg=w^ETMBs!8B+S4S!24&j?bv{+3+2X?Ti4n%Q)j_0wNbFelk9 zocN19RC23E!-}d{p7ed9_6g+Vo2a-KR%3smPU6|dLwa_eo(Q&0@Wm&a^SB%#Z)ZrB zqZk+tMYN~X6yMG0Q!dG>t=Gj8%1ZN3- zTk#bclyr&uMtuAqIynlHks&Ovozao5iKizEmrz1+mmjsQ9i+fOEIf|Q=22s0w>xA4 zm^>ig(+-7`=RVa8{3zguee^WF%yMYF^I!=|ts09t`|?Y}D?1N^g$#|UshSg43XZV* zW|EDeKyG_?7wW?ew23mLoa;O>ZB6iNpS>`LkI9uA5o_fB)%)ouz1Q8rfS!|~Xj{RD zP6G^G<#U~@glO-hdpJ@%yE+4LR;V6eVK?y|nJAps*hLR{9K+LJk#feSzmdbF2O>9`5VEBaJe3KitN|3Epw}Jlnd_#$+nvo*7=7y*!Pzei%K|lbc z{Zy^*lB9vC06?pf*;+U7 zfuy!v12{xgRcp+vrE3G6le8(g+`Q5fxy~(5u%tRP+ z<{?30;2&x-Z>|5vpZOM;BRaY|kv!62&&1mY#4}02(UeU6Q}?qBeug4 z{SWC4uI621aANb<5QuJr#aL>-AxtM+P_fX)15s-8jDqB*)UqTp`>;z1d z&D>HO3QHIqD1dH81KqV&113~d@pkuUKlA?erUAuuY5V)Fd0+2W|1e9|CSb6|X6@P& z%4({WYraXux!(n66fYg!%(p~9;@-TtDyCVs0~{;Bs52bY_m?HfO;VktQ?fY-D6a86 zYN1aE#C$DM&z@u>5r5A#r>*z431!=IKnp1E|H1>O1Q#l8O9u~L5EhdIeGJrdas1|; z3Rx*HZ*{(9mH*(q9*`s?RRc?u!+=@^2XZLv)N;*A!(~$0tms9YIX5!m11+MWQhcyiw^)8Id*m68f(wawki}Rx zpdw^b6P4kdW8lBukN3d_Mcwce0k?)RiysV7f3L$39L2fjC<7!RB z2u7FJ@vnnBt6_ATDXQf|<_`-N|2EI0=MzAghr)cUhpsT}mn{a6A;>Qe8k%Wg`pw(N8~&yu zD=WVmGc@K13CB%GIuqiY_Xq_-ys!Sz_U0SZA1D}?G49aFlut^g1Xe2JO)`EO*^m}{Ge zu4DsSiswtViItA~lCw>@Ywgl$)m=RNLPaSjdImlocnGNU-Q23n^PD1T|3vGrB5T~q zi6l<74esq7U7VIp3#|6StJ(! zlasB_YgkJ>c@0iQ{?xdbM`i$3HyPTn{nAkKqI78PgFmP}a_+b}<|z&T^W{t;gkw=> z#3|a|{}L^vqj@T%j!A2KszQ5d>hI*_o!>1TVY4R^vi!K`wb^bhm7xuD>0yPVHT!h%2!db5^R8I~t0=bvYQXv6bMmDrBz%b;JH zwNN>_QNu_$kLYx?*}d-)aEpGn1=Q|YkElU+KwjtD63fth+ZFAkpG$#A7^sp)CR7Qt zr{*rv$cxnCmZ53Q4WwG9OCc)p>R3Diko@69&2IE?<|&_ zxxQ!V5K&fU)qVuhm!$Gn@fo3{?UDGjN`weD!{|%iYjPhD0=9Y|RuI_B@PmLnDQ9nQ zKRok?Kp7v0^>!fCzntRclI>v9WZCERa@iSB9D*1u<=hns5eUfRW7EYms>jUnf|O9q z0d9=zS?_Pdj+?wsjeNyxd_oNh4c_U+P+oPcD^?>{6O^-GB=*j6+W^>}p9h6k1Ac!A z6}-^6i7h_zjEtZ%-KcYxaDb+F==bmkfdo|=5$TUT zkkJ2i#1J?ti<7p`0S9MPUbDSd^+gkImqbJXm|b2!n<2y)Zw1QwYA{IRCAyzM{MSn( z^X6BaK>^xu&PO6IugL5N#w@EZN0y8nQTgjq-RX%*<`DB)gZtmcG1t09dT0^;&(m{r zuo@*uD&(9bKWOCCB-rCEHY+U&eY?Fc5O`fqp!Er8fN>(^x{Wv>!`0}Su)U0!yzV<#18d%zyW8Lmr57c6?=AtO153vqmY@dHJ^ zgcA!dg`iCX>9?8y6<0(0%CKAZ;DFzoVxS!x1b988V~FrWy*rZwd^}5IaWWI)TzvR>5BqA+%}N+jsYtk7!nBTp_N5~oP2?q% zcBT1iVP^&(sUjH{VvZgc?+Q7W%wjE1>d@`?quH$wgJh#eY%?_PRMi`+4q_8%NbgUG z`!^b~R)$srUUOEA(`LD|5D>PqBm3vo&;F3(a|IHAsCdJ_mO2BeMHh=k$^4Y9fJ$A` z>aaf|p|zipE!sFbSzyha%Lk>5j;>WMj4^U71SKpiJL(y=`UI_!kHK~pK~v20{m3g& z{}GD#J$}IVG6#MAgNxHyZR~lwNzcvWQw{AJX)zNl50!0?GPQ~%_Xi3&G*p{@<99v$ zjY6jn6PLKRrSUav7IB2}0Pk=1*b!cx7i`PyZe-MD!S|k-F$JHHVQK8_k_j1B>&w#2N%BKLM)3jD$!Jni%!r`^&!mt+QeyDXj7rJS~qg% ziL=F;V8*K-{#t(J-!QzRv_Uc&G6)?wxaV_AqpU^}2T$$oIvh#f$7pH#cm)OVZFVDJ zn;86M8XOp{wDtxOcK#y$gE|o_hY43bwvN#c-k?a!DcE*TRK(rggYG&t+dg7^q0?K+YgLUX5JInzBP5Q zi4i7SW_s!iB_=1o%U|?K-XS<${lX{t)n>4NQA2oJJ}M+3tk+o7TPv;7Ky`b@dZRJw zQ;CHO-{6E7!oF}t6hIn2y}pe6Wb65B8sMlFE~*#4v%^A1Y0EvxZg?Ti}nTiQwaka2*|+WQ$|WAW+cr4S zmKP2MRpPvT@9kw`nL zvY?TNv6BL{Joc=fIY&C2>u6j00SlHxy1rS{$?Z!lgM#Y#O8dZ^5qn41wa^JkqEk9Fe-E+n(0WNOZp)vBsr9@&L#kPjJDkbB@o4=*lDpU~7+q0fPWs~Gn zUBf`3gm=rBamNen%VGPy1~2M1;?}+*0ZGwB{izikF)@fx-?+o4-ZM7GmDZSWl&6Z9 zSN3*3%(ec#6oUi@2TTZzzAm_uA}>@Y=THhpe6)eA5}Lm{6wMfi<5d_k9SIqz60~bi zR*%z$)L^T1*veoVG5Rp+T3OXaxm?9*>@At2FaGB-Q5Y*e_qWWD`Tn zPa$a098#1G&-r4b_~87IPb1an`qtrPp-!d!C7u z-x(GT4sQB$ljo^Wxf49Zghkx(?unl@I-D&~(ExQKEhk&-=wz2U3fH{XIR$&@Ik^e} zqc9)>z05@Hx-m7Kt?qgH)bnl^$+xrP?8QfDyos}Tps8@+1N~lj$|QW+tV%3+3u|wG zQFvyKoCvlcqKA_b5K~TRXr|LS{vzXmv_A8JhDz&2(FW;;uGjLmg zKZ=;VMUUtWiC=*}<8yQiz2Ok7Z=ey;_ouYP127YId zZb0M_{9(7=3`ADwZvf|%nXwYfUrFih2oV)ZDsDWq&x=F5K`?ywZ-pyIDLzb1VFVa; zO-O}Ds}0V7NYnVQ!WnO%WQ5)i7E0UFa-g)>_+X0~T%Q|Fx1vmvq7ySjHHF5aW+Tu? zZ3gnwtOt{ZYQz>|oUPC&t47QO8av=oZ+ZN3}e-Ve8%ug>v8F73{ zUpktRzjs1PPH-UYw}E#Elt`Z7pcL}u;yc=xj|%36ojGV1D4v)4uCMOkvD(3rFaAe) zQ=$6lpcreuLTm6|zol4Nm56(+cTyP?WK-}%VOE;XjhLz=54qosQ!QoSNh-9)T)if&)?B)JCZ1<${rBZTugWo3&P#}oA6jeoV0S%K?eM-v?tXb- z`lg%{{rq@aq*1DTc5^e-8-jp^jSVeABzAdOu8~pugtGG71!tGC7kW~W z{FwF}_+Vr&KULY1a48ifH{ci@>>@w(`6hnDK|h&8dQ1yI7rplqx%;DCq4}`u!Z9k8 z2O?qPSp)NG2Af-l^=}U6YYyE=tupf7!Vb{pTevJo9lUWZy$qjO&b%yx>RqC%U$Y-vtqwo7kH zCH{P~odtgn5a7>O~m5i*!{oYO+j_T8;8mR{Q3r(FooBTI6%x=Yg`X z;Qu;xO>)R4FQX}+PSR<{fF5;KQzP;<1nK! zSG64FUu(IfK8UC!Jiwv;w*{%Z9Fa^UXv+xp(${W|7?;SEQcP4uO*IG;3{B^X%ZVXS z;P9R3?Z(1F%;M5@Q~{*_{)mZZrdk*oy+~NvRC+eKDhr+$+oexzHwsLb}!?DVWvIz}U(Q0Y%QBvr)1a!Nt>(Jg!`tbF@DQ zvUy%;6srzuOu(kZGkrbZxpSN+9^|@eqwozZNqL*OX5_WGz>YdNI4Di7R^&f?+1v1f z61{^(%eN9kmPhLpeQ`h1#$&vy%5E`R|AG)4c#;j7$U1d?M~Le9cN%L@YEU(aM)NZ^ z^L#!OmuzN%3e5*t;BiotS|Lpzx#rU&H#awfUw+k_qgYI21Sw_lm@HK4LINWr5sQn9 zDGe;J4j<1+pImmrWj}Ch{lViKS~@c{xxTLven6a!Kuywoe+in{_~_6r(O*V%KO>89 zf?+BQkC0l&6CGaxe}nRF?O^VTsqqgeu%VN1!u}7R;Ihfc;N}l}!onGy-~2{{e|lK_ zm~pYkIOB_2mDT6xnsK)&CUA$iiG)a0%*d)9;7%PDsYpA0E?^sed}l z-RFJ9>hDV+ElbQi$6oUCa%^lFPGsF8TyC<7vYR_Q6HR6}%U!!JuqO7O2Vw)GA}46t z`t@K^yhNQr`1E6!LDc#m1(gcg`r$WMhU+Z)Of{|KF&g!@W$r}Lep#Q z8-J@`R$PZm&1l}uHca|{>jonq?3#-oe67KQ0YhaY^2O;Pn$^3(+BFSrpN^Im2L8ta z1UnD0o@FKV%zZEg0pdLFQLz1&+u5@X?B_DQUUyoQ|LN6nV$Fsa(JCV)OOEBLI9tz$g; ze!}m02^5;`5VT!Emfeg-vXxh*Zx-<0dQbkzSN+G0B&i@}0i3L8zrQ>k(QJcq#Nlj>F$>9{t?n4-O}BSbax4e#Iru<{9o{bIQMeD zyE{Aci5Zm+9XBy6Y6$GZ<7U-WdU{l^`Q-L9%||HFOYgm$#csuHwoU_){y$Y8f|OUD zNLmfw5GV{EAXR-DPIjiPGd%E>-5kXiHmFc)uzGNN6;_Px7#b3-?0lx~@~hSpd}`D# z>%EFkPK3E27or*bGa!ia6`iJ}3s&}9TGtT?=p5+Od{aKD>WO^i_X4Hlc8~p+GR$Mc zJLm$PGze=e^+9Ifl$f z<1Yt$Mq@Lk4}TwBhuuKrDY~)&FD|%edA_vzS4R{Jr*Hx(#dM*dHY|b(eq6CA zlf-iEOrb4F^!Yr>@X=AtbSV}-HngqlO~HWE4T^FH@Em1qZXXU+!g*II&$?=*Swt*d z0FiR5R{gic+^|Wv)8M6stMN4@w)|W*smPv{YQK%<056sJsnWT_Bn~LR~ z?u6r;-|*hCzdvwX&KLH^D?k5mzKYoi1sN0rRLk5^dsMH#tGik}DtVQa7B^EHg? zIn&3zzoKOR3G43H6P&8h7mB*L0zE+xUoRz;xp0zX5*Q#{7AD0FkzWEY5%o3sY@Z~i z{B>x|byw>Lx80tqRyLVNvVyX6o>JHyY22rdfS~BE#GKS`#&RJMpLWBH zKS6A`h;MBc8$Xa!7V*q*a<+6j|8;`>7N1PdpISf{OY*^^f_jLY6-86AxM=miiN(|Q z_ecaeH53aM27hT4raF;s?8@B|rTmp~X|`y~ENU(~f3%pMd)N~fji=54by{nq70o8R zuc|TwY`*&Uv2z^19GGYm8T%=d`RJoPa%otxwEo}f3vS=iQfM8+q1hyJQZHA3jLCx< zM5c-?_iZjTAsNN1U$m;4ojk)xVX*rT`<4Gbkhfp|?4O?Arpqpw%>DS58)7z?nVi3E zuSGS~vrew3)4*|+9=Lp0M5n#ENuc(G>mbPfzL3p#)I-q}lPp)6mI!n6BnV3p=jrcKNI{Xqe7%)6mbMZMA%_;dGB97iqCws3}7r;CNL$nAJOfT zyH~${irPPd@m|+%@%jSdK_;vB^3Q{4uk1vsr6rYPd+)L&Ud0vjJukcivkFwMkluzt8@(sDk5~Qi-r7V~z+4NNvg2d()28sf_n*m@=H_N` z(gFylZ&~xOVB8Mg%z8gvjCq@OG6Ugi3Gcw&k*&H{H1kNPe|kZ~HW;EzY!2aEu=z*_ zt*TG*>HHp+P^Z>UGi~`S9EM2#>|OW#u&gK4i}a4>cimyQj*s8X^MlQ0BkHWh74 zLp;Pdi-%;k9?qs4(Y_Kb>eO=1teM-#J~JR_@t5U2U6~eOzuL?0v{MU7MXXDp7dc|w zT#1L4DYu6X|1n_-&@3_BFv8PVIyAS$E#Vc^|$D}N;IV~CA(BMbSI=o(4SLhzq z!R(BKTG)?af}VpZyoVt~+U*}qiqj-<H;VY0-M zDJ%uKbSkenbZBU5kWi`p9!O*@|MYN>TdKYs^RYo$93fpC9)h{j9KX#yE;ewosM+F$ zF|pQ)l&u)|HHO6Ks4t>arNJtF6}M1XEj?Z`s`WS1xmR8sMi%KQksS`XI0(0FVcxRstNS%|r-&zN?eH*200@eZGZ@aXU<1y=(6?5e5%}Cr zQcfpy+CtIb?o+NFxkH(BgI{xmV9<)|E+3aUrz<}*2E)RO7pgn|fMp_{yB<6nqU!;o z9dj3(1>CSn15DEUyGv0tH@DsW{kIjCs!uN4)QblAl_v?KV$*xU5^GV7* zQ+Q%HSi^OMi_B#7n?}PtI`4+puuliWr!fXQmo3hu8fX4BY&K~^QGfMIW_x;jdygmU z3*zsN@lq}x$qCNapdc<5E4UwNqr<}Fk4;YC(JoWzJpTh0o%(_J2Y!w3yT~0q{u;~6 z_FuAq$6i9GlGr#g(dd)9Me}DTvBz@xv?SW^p=e0;Srdrp z{>h?zD;OX64gtZN%LUO4oy6?Uho?*rWda>NFi95To{|>4U${Q_(CNb_r`F#nkw^>e ziRv$#Zmn6KvZ>c9DA>E&3lm{!8R*=b!5st?c!5vPmmBq(J#P6iBvib-8Y%UfND46W z?FCFRh?Kd)8~p!yL^u^m%+dyiGtojPbnhk`7!PDrPc?jIw9#UtXe^M)`Wi$0hE`0o zuWx*rD1h5~`>{|uaL3i#hy8k1$x)C4`!hpJfqfWy+I`Pokd#Nd3RJuvnUjJ6O5-Cq zd9Fe5fiDP;4~Xj&i@BjiClvw0!Vb<~rOXs#H{IP)_RN!Xl>Pmx40!PZH|JbON=Ml# zk+45!Wq(<1xx0MRxx$*YL4ij0(=;u71FCx{@>_Q|ZzDd#QIv7^jY5D3i+<-spxbAR zLsWR`l9IxdCSF>M!?+$Z^{cb>f{@dt>#OU&aLFhDCHYuzS-GHFR?*$Nk@5p&V^-e{ z0W0R|MI4gP-_6@DUeiVIui3(1w*o2g=7t0V%aR%1@bVH!3Xr8-tc7g6{6T1eg9D!~ zW4=6Wezqiebn7kex>+1yZ0wtHht1sCK<8{SQnr_yy>JCZDZNU3&M-8|IG+8h0G?`% zxF`mk(WBV$j1=p&;3kV$AwNeJFsQ{450@ZP#k^Ij(u6eCHQgaR z3~fTE29?C^Yro<%;@|L{>x?I^Vw!}8I4}oK3-3Nj*}Qgtm{!tjKo(kUOzbcVquukm zV|s~-=^y#!^$$5Cq$B21$Ojw`i1EzHg&dO;pr2EZuQoXFPPV_?ZxO2|CP1P>h967T zTgjMmIAy#R6Z!Ml=dS};QP9J4%3^M4Y&gDxd%r;IS8Ma#W&Y8E1@YHL9>l{{48S-{ zo&26EoyY&J5-o>*mw;&C|E(BbfuXqN!rv6H2ki0GH38G4Gj&?yOVq-16j zp`I-l-Pd`ZDkQG)Lp^G-kz?hGSa_Mm041pyruj*A6(BQ+-CLUD6!hsVEk)F6vV()T z9L>P>^z^ttT)sKq=!!Gy4HdIpBm29bY0u`oFAwQ1l#aXH?niOqVVQA!ub)Fut#8y_ zpmfC`m?=w8Xi@buS)1l|K5}*zG5~C1v%FJ7rz1Y?mYgrGjxaXIL=>@Kzw$LW2HCJf zgd53)qiw_D9NMYLGI@H6wdzN*ej-7l+)n$j;$i5)=suflMumA+l&bgc{<7Ac(A`nO z`IgbA3usZo?p^Y>e=&D@5=#hc zZHVr*G@HXjW#cr*YG)Yu2n{APklyJBU#GFEZ>xX#Z_|y(iren&g`8S;yQJ%N;L*T$ z&z(mH4#8v^;lku)!Peb|v=GD)O&;I(#|yXpW7S;)iG9=%|MEl}bss*s{1{>=$oU7! ztsp!D#41T_YDGIub3F6=h2F3$ve9Lk-YbrYJ95>ajo*df|2w z+Cn9r)MgKxI(+mj{wT0V5o13)yA2Mb zowT?FP^GeL&DyTgI9cuO4j+=B4{PG7h^L`Z@#282osNa~r_1@E=Jf9`$nI?NdLLhl!v%S91jEiJT(}r*G#JVB z89Vp)67y@nYV8JGxDx1vKUd5e9k>2&w%h$#KW)M0JY*PsI0MK0PCf~@;JREL&9CCQ zO!vO#t|YhiVpmrGxK<5|6rVh{-$W|)4{^T$nefY<3(xstC!9Et|LFI%{q3MA;6A2BCEL0&{6)j?nxgcatm@EQ;tN9r3Sb>X+Phw)P`` z$Qyllr!dP`=ZOdHJyt^baC6MX;icM z^h!mLZ=I>&)px$Y(@VYc*Vr5RhYWrA#zG>39`y4ka4-3p%)d}TE(;5~g>vmhUI-gL zz@abQ4+=<~ST-y@4wKf;w7#}pv8MREc5F6fisV!d{rWQ=5VYiRnl^1@D}Pd`Wv81D zt)ZlYgV2R}Eb}KMx-thx zTl3|ZhGw^mp$5MF=OVAW!mAc@h_{syVM-zWuc0pI!89g=$gz4JmypCRZAQN0w@8F^ z`Le@Re_jRM&_BBQOvae|Zjt&EIU{sw1_=4RM7VZ6y=DJtJzpA7Xsw>9};Pa2PP9vC-UV)X>H`Sue=N5BTS(;tw^JCHOIpFZb8nnQ_WGJs>aUjTD_s?+G zHa8P`y*|JQDrMm=ebo_8hXHtvcc&LG`nCH6jF0tO+AIDhvETZc3!2=faxJG@H*V*~ zSFvP)+*`BLg|%RO)w$l%@``9Ruc$qnf3an=0RNV&T@HIWH1@ib|8%)H3HMJZa)YBy zMI%jts?#|)Cb~%R*N~{oK0-^z&b!+Hn18!nF?QkV3JQ&KpTAyZA9x1)O$IhQcq&^O zb7*OhR?;}V2?vN#)FyvDkts$7!mR_w!Ma$Hxi%Nj1VX|&K}pf5lKiz_PBl@F)`m~L z;=*J07TDM)E1GR-gu1x6$b|4EJ+uGoXhb_U`Rh$z?IF3PFOM4P)7YnpiUTp)_CQEm zf&3NLiDtb&8{BztALgvuJq%y;@Te1TsU+rhaGy_I^1Y?wdA)EWM}zI9K@K$gcey9Q|Sq_c@TNu zIy*bV!^1^M*3UE4%kiEzq$+ue7;zwCQ+cMxlyZrSge;-Z8B&mH1^hq#lI;PsPqsib z=;Y$b75|5f!rE+dAc0;L6ORIef|a!#GuB}5tgfc{bvI(OB?&>t{lj1Hs=9SD!K6RL z^$ptcWWuCzCMGKL<7nLP-?O>&h!1w8yXIJngtcmdZI zGC;O8!jkN8Gx%I*sr4Ema|M;BSuG8vK&4sUUy?|>wKH9aMMM&I{JGr0b`5wm0gOE~ zHX$V>A`%<7w_e@#mWQ9n;}2ZvNeSMi!IO)Wc6(F-eP3}&VP(1+9L?5?s{Xtd3vtlE z_Y0mvg&=sdtFJG2^BV5S*)dE1QO|wXQcY2+tpI#5jWC7vRuwA7R{5#TE-@-%-ON)h zj~z)iocDiUzI5lQ7dF;1obNH~nDU3Q2ziI64~SDqN=ouMY(fNFISc|k2pn!4|Jz+k z4#O}tjaM(1{E)feo{<)A+(8aJqh`)C!qG9DKHruVd~kNC6=;Q0DCNJrR;(3R5}y(C z4itMhV{PuJ7N|IS{^e;0P@2=@$GP9LKi3!VF^r7~i&R_+Yw77Pude6`;x5k8EbPW{ z)!g=SamYTO|2}>wR*;z-`FS&^2D?y!Bu#Yi6U?fiL05r<%Y+Z^=1YvY`Zzm2Z&4A_ z!4OmqX?UNVlI6`?)mrv=S3Ft37wqiha}r{vCI#1g!*p+LbDn!!eCuAkX;Q0*KxiOn zAPx4Say%tc@g}xqG%n?5Y8kwb6n(K&$aw)$0j{Fh|Mk(!VZyQqdk3@NXPdI`sH|W#EHm|L3qVQ zk90RNVe0wvPi;eHf9m;j(Fh|~FbuRBL7JF7`Ykgy_Q9I7Fjq5MaOIHg`_`69v6?Eo zTmowfB{MTxXKbv@dk=BL)!L6Ee{$^JoS5(hb-4lItlJWrNCcJ|b*{smLq6!G_)LW#Tqxf5BYF3vklOxQsT7c;ew^HRzQyb-{L(A^15%hxKHP;)8F>Rufx-0T;)S(JWW z=(R3U44{@RRU0dQFO3C(rv7gHD4xb)Z=rnYQW-s~(}d{qN}6P}zcR1NjnR|7@QnirLsgjmB^p1s0gPx4~La71)HR z@cPK{?Z$dvqTf%Epw`I=ph$cD<+#!Ws;eEvFVq$pQiqFxtEq_!iW|N0S|N4D<3!1gX<-D(CE2+oUKiJpSt5vhgq(q!!8AQ)*FtRnt+Ou}y zF2>o2ie+)_9iRR6%uKk2$2wTn+N#}9SIZ3~i*L4%RM$LhKIy9FgW7l_`m?XXSTj4HU?;hMi1TFCD&5Bzv$Sm4jM{csDQ$bQo!#^?A4 zSy;CrvK&zxh$Dln7t-{El#rr8nyt|F!cgif&}m>`lL01vW%8H~9YL|cWl{Pcu<8%# zHNO@D1D8+SJ%wQ;!h}sNJmqC^|t+9=xAuc_`Yb3v24duinF=(aWFBJ8Gk= zBqBanaQ_hUt&p3mTeXT!MybEW3UIm0Tuo>q&#+VfQTynh>wQIy->7|lKIJcFgX;M) z%u8urWe_I9W2Y8vrXsHE!~s8<8#g$RZFeu1T;sYW4$yr3EoPm~(O+#n(He;oB7&?2 zEn+BB?Eew5Y_z@O+t=t2@Ep&eIh>rB#Mh}Dn#dlKiGFfn*RY%#oW%jkg;@Na9<#|z znAfa<)88joD`~{Dd~BFuo!Y8qM(Y4wk}cX{sH#Hr4O1OS-bcz^YWFK)%W8L$^^ef%D^P-Sx{vx!vE3^cgzJX`upaP+y2xx%BP=WlhEN zN7#GtNky0p>{4{g%IBRvv;HYHfh(e_&_W~^@?en3B`7qh|K_CEtIPGBgl;rmb9zza za?*0_;h1svx|th|tM_%-$v?0-KADm0a?t06fAFJp0(CJ4QQ$vh=rr&fONs0+PXHQ1 z8RkI}xYP?uAljypGH|Nl5tN8iu%j0rf@q5zjXG9C#O>KO^<92!@+r&`u6K|uoNvS> zZ(KB5RD|$YLX*UVek3r%vNMa^FB9^rF}o1%;r0WQ{MdwFW5PvqwJ50GiNq!P{9Cap z4TEHBQVmDPfQ3;M$ASW8Bjm?G{=->eT%d4bkPSLn&6h}D@k&n)(K=Ze&7iE%pJo3~ z>$~;f>W4wIacb{6G>|TU&Paht>KVmbpCw!O8HY#NP!8^+qES(^cq#4+2!$WOb-2W5 zjvne#Tal-2`Hwi1*S!i6z4)^?%DVADn`5XOewd_riGqB+hi$D>doWtr!jO!doxwYJ zM8XR@*w&bGoZ@sBs$ChlPPq%Q!y-kUtkR3Oo{J#TiGv)NvE2<$ucHIN0ujYBboO=K z-~H>{ywzxh2oG2U!S%iqS+HY$1zbN}djZL7r*Q=4Zraae+WmT2hTiqhB{*=*TLa>({oH+k<5fojT`Ac7!?d>1j zJ)L;6bPYyLSg+66TU;x~Y;oj2&{{XuXHHwW_VdsAig=-Oc0S4(mNz1za~$hI&|uX5 zLxw#`V>|BuRXl0v!maW(xH1L|(ET2Cf{`vRWeh&;*3MBx&HkHa?+f8YS~OzA(or$& z;%H{#&D$P4K{un{IURiUw$?3VFI$z`M1=-W{?K_GQYABhh$pp$tcu~uZoB%DYsV6y zAP5Qq=~JPEngBGZ%~_4bja&FfBV-}Z*0!C$G`StzIVvCXv>T(P&mZ6&4^OpDg?!f@ z_>eHEp%NgdQi`&6(6c|LFYLz1XqUoKuu%qM@O~`2uDElTs1gguv%%=b`c zle#+%AnST%p-TgIE~CC1*rNXibKfr;vTdG@XA;h~5oDQ$e|hfc*xKF=?jx`SxZ#J{ zR@Yj`iNR8Xk1d}}WC`mGp^QDYp3|3?0R}seI$P2ssv;w+6tGrdmkBjD1VI$PW$n*?9Q zOE_gY@&)(!*3ix40i_2Z$D1d6rjC6>?FyM-3N7=wvzQ7N2#aBHp=d!(VF*=1-u@4` zs>vE)y4ub()fg#zyKa^sWa@{V9xmR=x9e*fP=ABNrrh6}HEmzJB|}$&tr##`3W?Fj z-~TxAbs{PibTk;~F9#94tYu|JXDc%DA5CWLe|s*it{8u~Ki6dYiVE3TYBOb+`$^%2 z7vWd6(H&^tg-6;3<_Ud`$xQoeQnR9?7pwCt_0eWSQF{W} z3$LCe8D{D^R0_)w;KRapnz~-GaN{Cm-DAO8WEAa|sxfd?5(EQu>P-_R1+3UxWgVPX zoqBk?J$XxOa?;?nCvWPal0wZAnD1|aL@kf@vx8e@uvm6)t zS$3hx9tpNuzb@o9Gb2ZKPoVwDYWH3@Bi=#9Q^TV{d>5_z6^%dg76ekn=8SW&`wxR>Fw?&!9^tpepHg7(LyjaN{5oPEGAMQz8OY+XQgobOX# zJhXW7j~xSK0DG$j>zK#uGl%rjUoS7Ju2M`4lbk%wlAubdApQJ&P0pJ_j)oSiNqtfQ z=Yhlb|Ft0QYEel!3H(zXDs$;=uUzp=GnJA%U0(xr5i-E$+(%OYjvo$m-($&dv(ay) zQ9Qy;_lg{SbqY9{H(GGObL#V9VPF|((P^qE@Vc6|-e(e1NlVqz0`ve&y-9loW;Du( zKZJs!xwSd3rW8@^<>kTnIP4JjC*R|PQ-o{i^+wE0#%iM@)qm~s(ktciYEEB&0&UFI zpCo$h7FKiniU$YeQ`<9G4ptue2>Lpy%Tqo!j<^kqZ2dSOXpTOehH3Hm6CN2Q0g7on z*@DkO@Jo#rM(L4FkTK5!F3_;1$r6WUqhk;cvrtR|=;&XsC#Tm?eVrjda%Q!*YfHF` zhYK!NF527j<_7Dl6lOk;J%;-hkZBRvZ45*k^?o@J)7K{_7x9h$hMBMWp!F%?G{dOM z*O-N=!S)Xza2a@>uNfi!+jP|OodMt(MGeF@96y&k&$p+QPEKnla01ppNk@GR;gtRTzlaGA<{Ei!KCKu zD!C`e!ln#AS9%=q&+{=*lW*!P|$7#+n>mY$b1l~Oa@TDToo*->UZ|(nJ8dq6pJP>kiQBxCwUT* z%{$e--P`;4bbm^uw8pM@#KNp&Jlo!K>Eb30qG>!}k^z*F$9X6=1(69g&t=4ymEGf! zRQczmqQxLpuJUS{wWzV$(BCl=c{B9s6H)dwwPgi!cj7!(+ICm5g7bjJ&i*bi%uRzx z=iaO#)yIkR+dy9o%I<|AfF1@RrF_yJdap{@Kz%Jb+|Z%{F;pgHiw0Wlzi;mbMQc@X zAR8piiuDsC!(yMbiI^X zZL4Cix2`lYn(7`O4+K|khref%3IW*uUO|3$W^V);+_T>NKE$27@#?%pZReQn|s+VD!L~6RwBHESp=7jb1ZEhk)hVv*Nkl z<)FUgmH|!rEj2u-5a@s3PhVZJ5xRN*xn>jMtY5Q@GD7p{3)6iC9+0;?+aI|`R(Py} zRWrMiNkzahyMG0=XrCgMIfs{mb4>9i{y3KNt<2Fo?eKp|+vU~Zzb|$BHYP;d8O=Kn zbzHFIG1Jgv?xqDidyDY^Yy%D<2iLCyU0AQOod{`a1KGLE_@r$>OqWWlGz)8h0cSyM zl@3b)nk{J*5?4h=HeOwwlzmPxK~Ty~QDEmXM?3x1>66YAZZG1?k!E~o??2Vm$XRuQO9X#WB0BoNZz!io?6Roy;33bzma?>AtXhzRx%b)2lL z;!57#u^QN^`|X~_h!y-f!J4?l%sl15mj9A~n+jx)o0kq1+1xnR=Hp!D82dl7u4uC| z$4=)?ep?t7OW;Bdl(%-e%xjBg{!2!15*P(wCUsGf){kk@tK07&G(`9sqmLM`*16x_ z5;YwSnCV=*%7dxXSGV-UDe+SOam`hK9<@68Xld)w+E!SjnUj0HAsr{Twq~PVog;ex zgQvnaF1ytUk5Z<`jGf}Z{#P9x_97{GhhYaa5+OubC~=zem>DsAYj1hj*_K4rX%4rJa#dH&qdZXZr0tcorU%Is6mI{2lfAocg_a}D zJ)(8IwC(2Ur6$m@?H%tOeJ=DNq8$M<6|HH(2U!zOEIXC_l0uAaB*Q6g!?T zFuw;qAOW=PVmbdw`Nu%Llc}UOywd$h>GttR(cH(J5F5W;55b3tZ!?c->fpKu#*Q6s z0&+Be+D(<^$~D{iW*nm7|3XP3R(8dom>r+goScjq8mD3Dj@a083g5rkQT=q;U!~_x zc!S$u{b&g0As|+LM#)M3g_U@z>%bzLK)RR~kf3q-aB=b28}`8a7W(tAwW&(i6DO?X z)P>yZf`r7xzVDXy)+o{l2wRp+Qh` z^Ek+zX~XO7_(Y>MFIA+zCA;mMZXaw*fQz>UqKtb^MkG8`!*s1yJ@WFO)S&GS!Bp=H zxw?n@`h&|ZPIJ&QH(U-GIbenu!gk~iU+)hZbKkHjYBEq<0YBMt#84=?BMvt$`O68;yfTfft|uV6^0`F~lUx7Fdjmw8%~x8(%d+AmyI<@L^0Pd_Zn;8Kq8VhK zZ%IC-s?}u~KIa;wEtJNu?XS!8kpZby*c zzqI(mOeSEfXq1OYRSp!sQ`*|u1JhZox2`+9XhzQ5kc4w`q+aveE3SVhOnzWqIC6L# z=vsR^S!qpPCL+DQ=}l`zzw}n)pi(F%BxPh|%8nar=QJAz^HL1g^2dDJZy)!~tW|JB zZkp_!ou{j%T%Z2ZLGuuEAOHE|cv`|Gnf_@neM))O8S~F>Qgzec58MNOBjOOsj^mrVmP^g+sgjpGBfjeWz)@oX=hJ zu)$6tTGP{uk58ttSRDpslHk`q|CSrRPE=v|5VsT`si2>|G+9C*)#chLV!zG2L7I5_ zTWy>>5(Jg~v*uvQQPmHLlInyTNW1w;6tu4X#Aa)@`HA4rP7vpE9NLNr1eVuxu5epL zN4Z@<)|#<|Pq2QqjlSGH6n!LYHN;wSygFP~B^7of7bNhD6x+(CY0r5U{2DA*^z+BO zwZrchpJ7*l8`=l(P0huF<9Qpn^FWzgzD!zSoHR#aZK&+d#}irB5qG+b13Wwynr%EF zY9jIVLO1*jM+KLWR0JHbD4X9O{-<{Jegr@K}n>bth^iyVsZMWw9|jjPP_MB}+qQxMoV8vldl8)OB8p8InhB|jksb{Dtg8iI-v zTA%&yk;$NzaI_IP-kwDqe4d}zv`~Tk#w&$6SxAI~1wFgOd&todYHW@DAVZ?fb7S7}E#LEyK40MK6F=Z9!2e8iWuk|!jM+*Jz3zIp_GQV`d_DHea z=)sf2n@q05Z8iowI2s-29nzt9;M0@C(OirqWVs^o*H|)z_Iau3 z<7F7g<>>b=HX!7e>YNqVNvA|SeYjOcy;vW&dS)jOZ}}h-i2e_YXa8@TzhLn0DCYCP z5Pi3rLb`eY`{R-fHS|9i0t5oUxN_~mGhza4?Bi{vp@?6A-7^T;Y+#&7F$+9N?T}X$ zx!oseXUUUJuyWrHSF?3~)bKHsn?4(A{b62ElB)|DuazCIwbhc>?1b)mdFH)e26DQX zW7th!muMT*1Q6=#)5IoU*{g#tr_aa&c3-Dz7AAryIF<*A_}2 z1aCaw*#W9HO3AuN$W&Lo3pySi0hP-W>=S|Yh*hOQIX=tjnh`!iRe61VpSmfBbJ>pU zPiJ5fN>C0^PVX#SwooB5r?(E%5ungnz<~e$){sp(4^`f!G?L44Bx>-%0nSS-+Shr1 zvm*Xc={+r%6}L>F;jL8F02&lA^({Ox10G2Q-SsfmOam-iWf+95*_n@A2umO&?CaUv zZ-o^Y;r*G7ZONAle3Fd({P{{@=npTKD*kz5->5BM#$gpPi^G*h4zXyKwlxPt{t?{y z1_o6f_P#H02X?=vl-^+Vi5Ezp2iHoULl9V5U{;?S&ze5YJO7-#R*v1-9~~hhfs#@*T52zXF~j9|jDWz;56Z zgBwJSirPBa`^@5-4y>)HS~cCflsxLG&UH;eg+VZ7&8_*iAaelt3UDBlYjS-=fBq=| z2D(U0smi}$EzCil>ONxL$d3^3l4f~9JxWi<7V`#~#0INYAb~36TYEd9SX{O&yE0dQBc!$br~aM#D6KTd*P<0W927X^kK6Pa^ zSZP78U>?gr+JFGW%d3I zhmAp&2pwzeAMd3KkLF4Blz)Nk84~)le;A8MtJ=ukB*;@+gilc5?TIUoMMfxy6`(A4 z{L(8R_Cve+E+5!PN`Y)`{cLr|!-^p_rQbl+A~I011V-$e?iMUdN2H;fUyNjSG|(IZ z`$}4|XcXt4QP%i}w+aqS+ee&Zq{5^yf^LD81Mm~Ev7ie9KhMM;O~(o}t!8CWI0;Nx z^1%V4`0%#u{z$zxhqE4uo>~NU_J4N?ki^noWf*}$dv#6H+fiH=a|~P<$Ul`ByF{~{ z02M1K*)u8$(Pg8c4B6@ZJon%2yt_aK73Y0YCmyex{7PB>PfPwGo{ckvctBwZ- zUUpTh@0^b7WYue8Lq5n1(iqrvMS)n-_ekMq>@6(i9Q}L8#cCQE++Xoe`$K z`cGTUMeYUNI{7Yo&R1OQDwrPKoxFY&_OXct6%K&JtgT90{mo{(3yvm1s;0Sr(QQBdU4R>#2~+f9qxr?Cl3$Iu?>Y#M(G7Gp+dwC~ zJe?q8(xjXe)K)>f{M}PVG&}m_1v(u(b(eqB;vd6h9+*h|xozyt{#zoTCy(0TeDFh| zhl6tZUlSy`gf=H-Z3@Ir$TkJ1Z^ib0adXe@TPlgoKJzP4qp>8s`@*6peIR0`ebe)n zrQwxi6UgYmenyHmbOu^;*3Ir!r^~!kI{~!72u`*c_-Wmu&;GK@sl^j8qM(O|acF4P zlj&r&dJ%GpLe7^3q1`e~`V=M_1J2M(wNC>GL7gDz-lfR${PwnZGxtvL~U%T`w&*5~V z4awO4KWP*I=m&nh34$ARzCpR{OJk@GeispeH}XykyU*#!)}Ip5`@Ivj@57A}E%&dX zqKYr~K)Uhaz1N-kLQWVvdpncl{>ABu|Cw@D>W3|O%|IT{Tc@7}ewg$#tEN}@Oq z6%TJSt17)C%oHs9u)$>t8>|;{nvcF4};*I`#~j&=(V>-}u>#|dh& zJ;6sQfjZRnEYe7CFNha(R2g3R^(5TI-2S=5LjI?7>6{gz1&;I^$=NYVw!mesyfilb z_1X-5@#{mNG`StC82vs-IU)^RyJM z-n%W|lB%p(KuvoSRK%xQxIxcq@N;263UeHhP5fW(5ZQH8t^)JgKOpzDvZIWy#Asf1 z(_%|na&CS^x-(pLVgnA?!;J>>H~7zUjDEEuCE?baU_W} zg4b@8`x~UdB-ik|MZA&>X#dP8PEbb#gem28FOH&IbS=)7B}Xy$EN3nWYXSdHgaE&=OXdU_=Eb>X7pkZrdOKtL?t952kJ zSR{hp1XKr;JAHeML(FudZ38jvRra~qRLduGNh7Qo@O5?d>>!zfeR#;L(G26{wWkEx zv;s6ttp`x$44MJu8V?0u_$+T#A&`~&ylMkJmiNBm79J+$Vktss@I_Sr4bBm0v|$l_ zVXd`qEGb@!5k;myb0fr{!ICKP;|IGblAKPQEteHwKhqGM)-TFhKZmXx`fC@7gUzW~ z>r4Q=2O|Ot{@0Ttyzk6SvT+(0Vh&-UOLtC8xJhbNM9EZ5y{7Ukv7LIyut~!Gs%%CC z75w2%a;IR&`3jd|F{mUVaKuRg@Um==M`<*pY|YjL{}4v4PoE<0b!Xb{s#Rc!8#287 zY$!z)^@aC=2!G)A=y_qmziA&=VRA(LFA`23=tgpv=_oV(<6DJn_2dZVs?|I zDW?V0`C#Jd)^WUproVgG-v&- zoxRLwQ5!=^EXn?LuhS0e(`oA5fxf<53;AhW1y16R(X<1Rauxn>c{1cu85BI1v;VxL z)d>pL57m9d;_J#q5)>4?G1G6m==6#W{Ur52Ex;tv#R=m!ZuVbn*TdAUUl)Fump@-T z{&3__(;5$6n+oV@5b!nBN~(zm`ZG>{kEuLtUoMm~zR#CKgSEc7BFmS#HGRlSD&9t@#@2tD2XhloMS@z=HgMNT5V$ z@Ge_=rQklRMP__tn|JgY8Kw6AfxWMYx_7(>s?GN`yU1K+;Vh(EG>wo*j5bk>B{k}l zNLe11*5oTkao3ci+~wNmHJ_-+k2QxKWHNFe2Sb;C(k-CXn?>+R@{Xrh{;sb|d#}|^ zFE`6o{2+T12QA;mf9@ySM=#)dgD*Dn`$e?`^LCW=aPu(FHQlABznfBisX6}D`))uX z3z*b3=4J#>pv%fI?9;eEf>-eumDM6rgooYyA_^fF_K9S^*=Nrx;Od_eFI_&)eR)Cw zhO~h2CB6q1t&x(k>@HUnX%^k?j@~e?6*CiW@2TNstv>oPv_^qVGBP4`rlmbIksg&MxeoNmYQKR7h0wK~R4 z%T#AJZ>6osW+!_M2}aCfwlAIENcnREUQ(1aWPW(LWnkr?;?)+f0>Mwj3tz<64?*=Q zjXSeduX(#LZlnRF=(y813ZL+JF9anZ;K^gHwLU#2W`O$$JP>zeLCC|w@n={O&tQXf z)s^?hHTLt!)o5g8@;C^b#lat>pdk~#+Uecz5A5Lz7PI(ezHo9DeFH8+sHjxL!*JMI&Le|Wbn35>c4zy^u5xDIeM&URJ5t_%I&8$A;DG?iRmq`&K;6Nz!K*TO=9@ubT#EuoShq!2jU#kze?FZ!J@*7AIygkJ4faqBZ>m6fehqFWGO_pZr$qZfEwLy zLR*E=G<)FWxQ)%_^xv)T_08mM6|Mo^;;sXz)YmPbAXBgvW80v(@JTz)80w`p92z6Gf#>>C(S`ad6_fDX?}-e`oCNqnl1?Hf~mjn$)ZXk?-u z@`lZFG&sm94;^PzOy9b}2JxVMz;uc-jxd>phgbrMZoTk(^Cs{X%4f5Q?7~tlx0)%o+z|?<`@(!;u2l8b z?h9-^;r4+Cv=O^;h!mr<^LxJQFd3iDMhPF^nnt$4!-f+|D%qo3Jjm-Ww4O06RLjR6 zPIJi(5ucRsD9}q|YA~)`-e(;|1B^KG@AtUZu1}oD@lK1O^fI#ue>FUL=Gf0zzc-fN(vc} zGi4PbLd!!)H_!aB?|XZ1H+o=-tSwQyL_bbZPK@XDg5pk2vl*Epp$RM_+6*O1QIFFs zxn5y#ImF}pxJ#%>NQpP^LqN>do&#J7CS?T0>1mU4fHC>uV`7P2kM;#{t(7; zzJ6npTGdkNCCT}{(kuLw{E89H&XIruJ%x&pS04p_WAYn%;L;O@Z|lO!ZmG-7skg@! z@*9M8nLst{dq3X-jdwP^0!c`i5v7%tGS@y%v$fYzSsg;3TVFxQZo0p}t7~j}#WVNV z)w0%CVW&aAkexAKr2coz1O|rx9LGM2vc+j23m8^N@Wc0Qq9m{tqr_H^SA@PZ(D&BI zHNpz>naD3$th5Wd-9=kA?u2wdm-8sAflIq0aBGlKF;Qw|-PNe1g@>OW-zn&cnBV*F z?)u7(yS`b5n0}8>?KasxWAGdC+sPIOo$QBox99gl&!Xa?H?PXJS{iKV7XAZ-?DYMr z>tiA$Q~3MEm+2}cIS75R6>?0J_?N#Yqiu;;uSK5?3-AAJ7QC8;3$htDk3BdGJyXfTibF59vf}?ZWny`xjb#6m z>7L(s_>RhZz<$jQN=J{x9%A*X(C}ny!o8=T05srlsfW$|Pm4{PcU|aFLd{sH=M zJ>PHJ=8Oyn#O(J)Yx6kj>e73a{gv)E-kzz-OjEmA8DaK8n$jI3m$LPH$cN||K0)@h zD`aGJ+%v3IJ7-RCT#j1YU6(Y?a6OEyYcw1ux($R)fkNy zr>n_Yu&{`?WB(xRaA*)T~^%O7S@(1Gyz`b*79%-r5r1 z7={08>ol%7{aj>pwS>8H>gQsjqwQy@NAiJXf<`33lmFLp-;#Fv&h*q03eT#@TzORT zP|9^_wNY95X)Yau>D28-;*qkFgL=6`O3ND!1RF}1)pixvEQTVR^x~%lFhyRQr(IAy=0H3)%<@KOyy7pc`=@V9CyfD2Un-8 z0y_EV`-0L+7K3r`;J9fW%w>=r1z(P%H7E}ESako^p~j(D1J87R_2idAi<*+dd4ZObg}6;*_Z*s3!* zQKuXAI}g995+Cl+3zm@F2{?^r-rnT~N$q*qty8d(@TIPxSHulEY2^jAI75y2PIEy- zHrIy_b$<%u>wP9DBA!1XR=ycU%Xn+hgyfMUh|U&?F|g=)goT3*6P)jOd%I_{Az*>1 zk47B5S*g7>`1n9_reUZb-2&XIr%9!GbCYIit!_zF*Ixxe-6|Gj;}-_oSB|i)7(5jX zaI=LpJV^(N%m-Z{7`|49p8WUO`(&);oSU;ARpI$Cuj0^u8B0{gN7LNccC{+q(ocIJ zVa+_US1>g#$Ke$oj4Ja9)HGbD?aIf~HY2_Zu=B#!V>0gD)I=SQ0+g$6O#DQlU)W^9 zf7s`AXqG80?&6p>@KsY++mrX1yo$}Bx~^gUsffV*1HFvjUufXdD8?s@m$_&?mp=p; zFQW)XS?hdT+uwh3$$~4cIKQ4M)Ne}v7Krj37iUA5-bc6Z!)7UX^4|1+f0 z!kIONMC0*_Lq-!GSWC{0zT(Y4BK35bh%UyFgws0u{;iC4c6RnhB#~CjV;po2NK8P^ z4Ub94&a7-Q;#ZYGtLYGV;O4}x?Ytv!l#LBL`Yx~gJU}Q^969U1*ev~O>f0IkhKaiD zdB7KrIzjyQ{9>Z}d>#FC2^UWqW!@8#`9YDz$rD{f1(KCbPyRTRQlIT)aBywPzB3O)%Pl$k89Zl?_%FNzHGzx}}j@98x1 z!Hhpy`koN{EcV(fg0J=m(>Z*g8Yq^OL(GHBly@9aJ5Le6H({-lSQ0H(6jgQEAvRY5 zYJoS>eDi4X+Emn@yIZ+~z*oh>Z+D~L8CBgef?G+gLnXHTjXW07WVJzYE(m zW7(7~U!YKz&_5(JboBhZl!*cHr6J~zPS}rsx3*p6JHe2~O)>0e(s= ztwmL6>)E%s718eu4~2$hB5~k*?V6x;C_9``h|<)ekc)FQwQ$XQpxN!O)2dYaDbrW> zA=cA=0Yk%c8;^YBAOtQbch~E6YfuO#WiNPj$G!7sm&A2&01a%JHMDr80Jey3VtG1=tii8<7)3=#6gRLD6hY&5(WDmXYC zN}*qKy7E;Sv51KN3*0ICTG78cjPfMj3m^ta4^i%VSDW-%`NDD@m({Cv7L(7oWoL1_!! zv@d`pV4W^re*FBk6C4P?EBV&ME(hZf7KFe8ntHMl6`MttJiH^Ppfc3GP-D>nQaP%O zhFAiux3_yl8HqXtKZYe$r;X}R+FDv60L}POFI=EF=hs|5(5ps+<1&Sqk>mEXSnvM~ z6I@otxVX5OXEmC-zkM9r6B&QbDohRmo#@dM^EKsjE8m-lv$K9}{yF*eV|)yu#}y&4 z)90-v7x7??K|EI*tl@T)y~$FuEjAEyPxek>#}^b5*OPO+TB?pAF!#W}V82H~Fq`^x zBkg1jiYqyE?A~4@r~|Atnd!gx%Hlu;1F??1^S!j@cChS!t|?5yr4R~$zD9{s$Rw8v z-?hM?#>VFNcoIsYs34&qd0OqAf-!Mg)?~|UZfJ4p52E(+!B{Lv#QEoUBFHAYK{S7{#L>R$C++uc)4BK-r|Rq z0Kc^^9zLFdQilc5Cq}k4Un^@bgQ%c~@o{{AbFDK(F(Q<*BOY|G(Q>^Zz^=dOFtW5Y z4K^a3KZ^TP{{G=_D~T^A(*ZF=yg^Smn9DqABOc}U_foTtrXdu3-%{TP!Q+eQhe0_w z^Ib~Z@1wgb-hao@RTf0Vh>Vw4RRjFKmPN36pU6-NHEb2 zdx)nDg}@-tBgrSgnvjWOfrK7Z5RAyJbTI_7`3~)7I2^X+n!FY<_$Y<2e)8nj7}4s< zvSk9{usjpAzt8)f?;)0`V}`K7>&*JEW%XBg4GS)=QRdK91*qyN&bhIDR1>T8406mT z85=fbxy4CgYQ8^2)U zhYp+){O((bBKHS2Qv%@qQ!p+79-YMT-2TuZm4w7}7)-)1B%V}r@cKMX%pb@AU0ezB zCm-6u9j9YeZx7Fvj)t`6g9IWUBXb(8W2Qej3w!5%E^Ow_%giM0`$Rr~%Nt!6@T-}J ztGBE`XXsO+RpPRMlHz+wOGC1nuodipFQ1b_>(YpZ%aFAG1Yvx3a|7@2%FoG5?0k+r zyLt^9Tv75tI;#Ort5w7q6-0|pzy)>vDEi_-S!!zo;18p|twLw~ugy*H=A5jAELOrR zdFG_1GpVA_6M!tI1dR`XBRtE3a};#7q-*|}``UH_j8HO)>Nr_WPV|Hpke6T$$wA|I zIBMEtShg@Or)UPC_Gg4$toLA=pU9v;Sj0@Q#Rao4zuQ5yn0I79fV6JqkGGFrRk*wU zN@bf;jFhHBQkwAQqfxiOv)3Rcy*+y*kUu!^&dH9$`1A>%Ai^$~h@6bv_3WHlO4Khx zy^XBqU+Hm(=)^Pky*8G2N zN|;i$N7fjcHu%yka=T!x>2>I?$5&$a#`j4L;o*flF@&?0bs^YPR>+@39 z4^>3OuNvrjn_y4F#Ii!i7vEL@_Yy5_3e-Kh@d+Z11u!-qRx(lLBO7%_yu>4qHVF*% zaNdL|pM0?d#cn#3K>?Q7p9~u(Q~2+6YQkbVJBuRX;);v4n-@OqjGhm!2zZlGVh-^- z*=xEN4`vk%;wuCZ@-is<%Nu3o!Dxja22G$w_dO29vk2WP^sda}fzb_%0{8Myl2$@; z&Zjz}h@x+0MN;1z)YdGG*L#k4#qB$4HEbQN=!s#(<@L;)(CFk*Zw>5W@uZ`&@(k0# zT;G=~Q&`v9$`GTw!=b@R!pxPk+~in(y(nvMUg{ETocGPO$c}{l&!w8WKB@&REw-$> zbdPi!@<;!r0YXB~k(vxKs2FHLHfG9<%nWsaTjqADnL)JCw`NGpwdom~f}sdm+=n$}EB%ULD+qsj8J2dg$F$t5~rKbb5 zQOZzZ)21J1rV=4eYEQQhFFW*FEj|9%TBFWKOFZNy= z&?`SaQNuQgwpLQ&s81Rt1rI;<_C?$Y z1eMdHH#is+wtildzWgTR%U$_X(`KhBSZ553{cSpmt?Hj+Q&Yq89%Ko-_~5{sB;r&h zud8oN=4MP6P<@AX{DvaP56Ch5st##t?B8+EwY^a*fXpCT6@>&N#nY~K2R@4&%ft<; z1U)|{Rw8o-9qXzl!=kpqX3Zp(w3Z>`JHMWv-;AC5Q&8tmzZ_PaPRxKo!FW)Y{;5Lw~gu}gqwdX>@SO2J1V5<`#=R!my~526l2*06;YAEG*Mlw~wZYMkdXmkk1Y zLv9{+-Qh!Sw3__=cjX_Q?W_@N5tWw|BoDKBo9d_dDJ!3qvr9kY!j>yv9nw*}x1wVj zPJdu7&}jddc$8B?Q2!PDaYYPM)?F7K?)a7yWhNG}DnW9U-N z`M=cKgxHcdm|W?KzFAg)14l(*%z!FdLx_GXwvnl-PVboTD>szc$S7lg9}OP;9(AtQ zNCYlq%zO}jZ=~pB7;nF9-gCi#E158&MvG&J1T}%~0-t@XMx)QxpzBws(4ePwX3hQL za424~1dN0XR~Ge1gV+o=K`iO3QQr(M-4x_Nyomn2ag~h~vyg`|W z{k_c);Y{9qz^&YFZku@DUX_11B6^u)c_*@2d{k8U`0&0Y(}1B9PrA3o&&$8f4y3|y z8NFDIi>~(MCdOrONl6tcdGTD>3?W-zq&&+CZDhSbG9L}k+gDtPVJ2s5i%n+2Bnw-% zq)MJONu8we$1fJtb$j!S?!H&c&tCaanWEuge~og8OFKGKsqc60xMJ z-mWC|c$=(~`0yz=H`jfw>uKON%RHFHQdF}87eicTBq8yF3?RGvtL3;|D~)PBub2>C zr>fJj3L=68C-CkE|J$v{7zx^O{^M@m8=|#R%%^rgLhwS(vqe>^s=k;b&1_WntT}OY zs~eh2XlG=76A_|_2oMLd$9>7Pua}zQ;xIHcG?S7O5?>#MgutR^WV+YU>1q~!LS`W; zd3C&q?KV98!~X&nquRi1C%Igg%MBLtQd;hfM;;2i4E|Sn8kxL6)`epXRFGh`F)}hd zKgd{^YRHQF{-w8X7H>V9pPJ~1DLNvlx?1@}>iUtp`Kv^rS-x$JSA7e+7w(l-u+!x( zK%|Iq)HM)1r{6<0)IC?zQ&Ui&5IJ>XR|zb5;7^<^4)AN$)Yvuf)h4ihav^0@=>6dL z)$=>z2NEKpp5;CX)1aHf+0Q1gnek8=84-@8v_4FKTm1FEk1S-?u@RUhx?LO0k}%4I zwJ|+U{pY>VdzHxikoQ6aJ?FQYOFTg@;EqZ z)3Kd`a}$^~7#N8+cUKGRu2;XMg9(=FLEbhTZr z7#XsIrhb6aq5XO9dkOla^I-^*p@`G~LKf|~4D}sG*POdGeTMPBolz;+qU`JeZ2F0;sEncRbgrRF?a`x>tuR+iMW zUw+N8kgFKiKq2!lnE+XNd6bVQ%i(MW&9feAjS@#@{XP;w7^&9Qtq%|4jk#0=`3JM( z#37>3to*w?F2<7U4q|Z|xs^JcPNN2~Zpi;R;$qnRJr;j<#*g93=)5Kel#d=022Al+ zCKv{m2&#db^NaKEOSLzef7K4rm9qsA{QUg9_of32L=^VMq9{u^Tx&d|R{&Y*|( zf-RO3#lhDO^ZPp#gm9r8W?M2-7sRv?TMsP*zNDVycj3~uyL&@2R?)AQ5Vi)*CrXs& z)Kgywp%2&g3>cvVrSYW1L>qVXL2*maex2&UYrh+*A<00|vd`13uh}u1X=PA%)s=Yn z07v3_7&Fln_(rSaVu!*%;0oCtGsE;mxi|rb_Ebt<-TgO4GTr}a0hEaLW7a<&Cv=>) zfsT74rS&HFp!Uc7oI+dLMmKVMe9-w__ zxVaRt!pl%zM&+pg67U$y*lnkz1p5YMVt(*P7l81!__AB?kg_la+ixGO3N8|-XWU+z z4{&i@pD!$uQ&Up>0+VETNqj^z`0UU~U)9qv$R~2}RR_M|JY7=op!!$C3VJi?q$eOJ z%Q8mcyjzL^!JsghG1~lTikmSS8dpsJ6AW0~=-f(@G6@B4ExgyABQ&({NRgDY`#6Jw zEW5o!&KGro4l7yly&71KQ6y*!Jqobw#CFV_%|}KkIhkY{5hcSi*-c(jki7XNXlS^{ z{i-u~LVo1s_sWWgCUnV;mVA@$oR@uBrRZp2^{Vv0H77KO79Ev-`-*5!+PY^wy;B?q z_lp^)ij|F}AI#sM6EgJa7ZzS~SF%&%E* z)vmwFVkWJ#%2g6r@Du_CI4&LD`MuLk&$NKnvOjhB>*ehSJP+eG5{pQ1oGSk%HQWkN zA&k{)SN@(p(urwfVDRth>0y6XIcxlG?lbM;)9TH?zDaPf|GPVQjefW^7l!+#)^u)& zQc_$k0oyPqdQtUcX^I>yeYTXo1^`aK0{Zt)Pc8~LC-VMNWSwwKJ&XqZ^rK6*}}Me zcLm@9S{b~kt5I-3rS?Af*IN4f#6u9v3AAXFhB@j``Ds-QyP$Kq{!qfZI|u|*M~Cdr zwNx3ZuNCrV9>4qXft~mC2?s%18bb8uSyW_&X}DaMeB;n#x8E-WCDw!GQw&kgJw5`Bgh813f+I1onAxp?Px-}BXi$wluI z9MtRLm*1IPW9hxurPMD1wp$+H4PT84MLeihTwPsx8T5dSs`p|zaQbLg2PNgA^oJ$a z?e$@}dy8|_E;dA0JC0XX_VsHlZkX_}-$Ia|{_@&CEW3OCQ&A#&JAF}L?}`TBspt?I zc6(8NXoKY<9{xpsNIxm7J>t+GVYcaa1_!~*6B|i`NJD2Xs8f7J70gr_3J)GcRm9$W zx%}MjAo@Tinj_Pj>L*W+vAx*%XKTy~m5lDPh zyQJ_5h0%j|cug@l_f)y#S_oJfZ7!Y6dUTa#F(Gj`+?Ig&^OHi#R%@Yxx&uD^oLGY&A_-dYRd&S-}#8+`$KM5 zSIP3=oer^@nws3`*gz(TBBZJBH#eUIq%k>j-q*1gHu?Ny3N8W>(RQi`W$=nJ_$h~i zN)C3Hq2z_;n~A^}&~b5;ctw8aU}6<}c^#h8Mywbcr^fuHuu&5Otj;ynbnc)6j)Z32kb$58;CE`0y+(U*l#J~55M7VBrMlBt<> z#M7$K#YM(~C(eHrY`2$xtd529iZ&|CM2lM+@ob#eHl!nLGw%5Mevw`dc4ft=n}DtZ zf+zlEC+=QFf&xx zM?u7FHSv7Ec}c0?yAM9?^mNtqcKW@H+CWG(S?X?6l`cYFX_Cosw>B#vg#raKt|6ei znW}O6#e5Kyp}NKLUQ$vrryqpy1{(DUh}uDS_)Jsu1sL&Ai>$0uGbMufxYRwW0PF`S z4vz1YEz50ci3?!fuXfw08ntyFlWRKHn~%l+X|@tdV4lb?&=`6CSLegz3BcLd~7<6qft^x(mYYHyV0_M2Ltqoavn=dLw~!rbUdg#EQN3gvnlqa3Xu zDTzLQJuY_Im+S!W_xHSIDPLk_Qd9j+$}n6XHOQA4e`ynf;RQUiG~IP_9)|WJWpCuh z*uh((&%(lEIrjbNPbqME+e(nR0j|zDGP1wjK}Y@_Gx*`XhXvU%DJeM$OpI5RUMoX@ zol5*WyZYnD2Wb;P21K8{|DtI%HTiQ8(S@B=wDm|UtiM8kI;xyQ@013kQ*7`+z zWjy%jd@D+$V;nu-LjeZz@>Wv~u{{DCI%fe{E_BvgM^zII*TRBVP)lq7zqX(o4DzdH zJA$MuG}LhD>TxL*t=#1CxM=?GR?AQCFOlIR!+WJ@mNTe=VRCX7Ioi#|oqFRbqH1Aa z$D5rhvLrRhc~0@rf!;`-ssHXrIEn^0I@Fpm+LnaMR-!Sj$WA{|=k#z@H4H>&c{Y^P zP4z5M58!L5DUXKDY8s+q$N*kdWL0SM4YrS}zIFs4T&aQX2gv(4%4{Z3Jqx};(iIWi zadCSw_^ATp0f5j7_}i)#Ln+bsi5=Q8gUcx8i~{;;15EuLVwWL&AYtS7;3*(Eb(k~H zkFT}7tQ0>bdBfZk%fu!S6rr}`YOborN#$!j`MqN$1K8W#x1^fop@^$fbkfm^y}Pop z$sg!No8AFpKK~jbn4PpVC~IGl1%M@M_tCQA;B|#+ZVVvNuy6PCx33Rz7LxJ6IQSb# z2LKd|Aljr^cnH7#7g+(f7>`7cX{h7R;lSW@qSt?bIo@I%0mrmVbn#fltnQJG z4M2LpH%(Tl(V`vAAjMT4bm5EJ=@I`6gdG;ih?8wvulU5Wp-gcV3g6Oe3c3 zOhJ-I&V3!eyeR9T$F5_kJUP@}Hg;+5BxQAS&pVdprnahGbK3T6sRrFO$$CT_!rj8B zJjub6y4yZWpgtir!~OpGwGbjl;bzQ={||iB&fBCaHrBiju3H;=vETyRdA*T2xI&Kd zQw=+Fi|VPhY3>_4=9q0Bd((~zwbK2wy>YV(Fk7+gAUZk?UA#kv26rMF@GYd6lf9IVTlSc0>a%&AD zi<6QSM!=SSp3@DfXP!b8URiasJ8QX)7`58{@p9IP&jOgl@1@*j zA24ARZOHgcgSBt6xfU@a$8)xVRLYL|LrOMV%;|$Lp;UmUm-$KgjsX1T{kJY%rhFBE zbZh;$DpGxFl9I;rp$HYJ@Q+dT-SOLF5f5a(KtTwL`z14bq%Wm|rCk)MD0wiDfN2Gu zEu(6k3SeXmrr$&BL9WthPHq19Z1s4q-eH81lz3w}b)ps6cf4?s3>YKaP8lPjL!UA=S zJoV6PrI}mXYx}3mx~>2-JL`xQ*!nbxjt!s!iCQh0CPIGrNZ5UZYZbz@Ymngu2fBjG zL@5f3=si()mdIETfTwx@1cQ(n$rBD}&s$LD1NOux-PblYEBtjPmO7qZp3-2hwk$)o zf6?bgBPw4Y?nj(G35B4ye*~)Z(B#il$T;wzkymYB*o&ge*8&${prtJjZjYDhu5}x? zRiA0EK4J!#IX*uvHhbmT24-?uB4gwT$AIw#ddWgEa3k~@DPV6*RzU_a$PoyYZw+|j z>Y@ArCnNw6V%h!>ZacoopX3>$$j}dOVnWuwjoAxJ_OtbVxV9SxVt9Nk4cb}0D`f%` z{=ghb+`o!KfMRKlN0nple>JYz&&oS$PskvwJ$c4f26BM?n``WprlUBeSm~@)ZY2vj zcJML3L}n49dE)HJnojS-1oy;r2+G!CcBV%2Qvzavd)VizhKCC8VT;_eWxG?T%%7Km8LhG&JOX z`uL}WFO6{ckIiRC-*8hH`B2Oq`n+u&Dl4w4T!B#06E_Li`M|$z8~4Rl9~8+2;XB`X zN&udTV_*Wao!1$lul#+p%YCqg4-Y-6*1?0?+C($m-T$%JAB<;172tyXfOmE5>fqmA zsLlBAI#hN{2%TsUM(ud^hNVO0h0!ei)!8medn%imxh1@ptZY>duTl?vz0;WpLuaV< z?KE-%EQ#L7cG%NT%KF7)FLK06cS3NNP=NtK;Ews-PkaAB2o?c(C^+1IZ=^TeZuKGb zddW}Z_763X9vjSI*iha$5(|LPL>!s<-R+&;8mrL{;Z;{KDhK{e55{by**dL61;3tR_U$ zzg_{sz)VH#d#}^)4UHZ!HeZ20bj`RG0@jbhFF(pQ10D*>o6yaZG*tEV5z}MCpg8JD zmR;T4*B_+!;Eis@dB$SsHuqC$|u8B^WxUN$HKecpmna5Ha^lsm{nTzd44sJh?4|xSXoD}bMI<5qyY#tm!9Ug+JCfhzU2k3@tla4Z|)M&4590CDH zBGm&lw8X7V&=edRO>eDDPhyGLMTyZC{tVUD&fj((63Jr)!!Y3%vDB{zo6_O{NjCQR zqgTqC?&kkyIU*KP>mQ#i>_k?b#^+DV8{8JR=A+Vm~;ei0svkuAyMph(`g@~wu2MO6kb zekDkfh5+pL%kAGqw<#Y6hCz-5SAPNm!Yw^t-zKS_3;3EyuS;qstvaj5vQ(G6@((C7 zc1IBz)ri}T-{_P{SutPGbc>k`_%ea0y7V&n=;>CJLiYCmE_EPc4ZV_!b)ti``V_^9 z4DGU{LZ4J1V!;fSR4_zDSpQ0hY%f-M=-R27-%tQIK}|AZ9JBqXcX5&iH$b)9+S)p1 zk_s3tt*m-eS2x|fyktE*8f8jtFB#$h3G!;R1c@@~;=@6?xK;CkEYaS1591y}vhS zuNy+;6Yy(y%*@L9Q_V@bt@W;^)wkNVs6d*>D-@*@xi4N?YOR9&!HCt{ejnTxtQ zcb%k@!8#~F6V*cryq9kNGzy~%W{)QoK}Cz#3i1ar(b zW=qh~(9xtmsW}_g$OBH&^jBBF%P|ZJMa}H&p>Ix%z}CpO`-#HH?2U~Mk{`gCFoP4= zV+vrPv{k=PC>gF>FY?4~EzQE4-V)7?SA=CEzLU@WoAf5(w=+`kI1iS*D&gnsFOihb z_c428nL>;4CRHh^DVrcK7u)qUJX1O`6{OLA84d26SO3aX;S~6OfblrQoaAjGT#v37 zWbFL?m*kYq&_^WiH-7*O>aZv^3*$_3+&QusqJBg&&RfHr+@k!!(-q5VJ3T$kcH7LU zr}CB2{HT%9as$&4TC33XuD&a=_BMt3B)5ru4CxK!Ms;ns4Dgt~Y-D4XyJr4uvEH?B zh=_jVZL~((S>2a4NzZBYyScj8*_%XPwyw(Zvm^3~$?-X>`K1lC_l=(O+c zh9dEMoU%el=#)Z(6kzk1pi3K7wrvhkab@iL$uX`MAT^I%F!|X~Qxjezg`3^o-F@4g zOdm&=&w*hp#k!o*j-%YdaG9zXr#%r~42N#w9T3N7@Y&O~rUDHtLq$;|%unUxf{QIr zA1N5#pBeOWkMJ+nF;o#1;@?O^^VJALK!d$-bY<=d9-Wj;KYHXz!|+Ns%x3##yarh< zV|usPc+8)%amj|ojprEf;-)iMOOQ0d2B2CXt7W@!c_hr;YC~n$bL#1#5T5KeB0||% z{N~woUfj00lwYnyiv^#g-V=WJ9JtF0_`J%0Pw2KimBAZz9B4-MI1eh1aSr)jpsy-| z#p`sVl2&#yXo@DzV&rh4B+9!^M=4uE4$|we=5^W{;v-s2P0Du>ga4^+^u^Ln%XdB6{C8dm&5$h^WYJFbIt?v6@p1W_ zzqhYH2RHCJ|6Z7RHb4I63HDET$YQG>-+aIm0>;d@<)ap{@^ zAF9_zO6`md1?>{Y7nFf$w+^4>Wjg>WULpT`ytnItql$jLh?!nq-ta0IY@QU`{_pJy=H0&5Ex!a9Kc+E%577>Bn zAQWzC$#D@{rI|dY3^q832qc;f$yQCFqrH%JaG$7N|Cj24+|=ZxYNNvfMk=#f;i@voXJNiEJ4n#~@VA`c`VKg~ zEG=zfTJ`Yp!n>wHNCGy7WWUcsMkIiFSs?eSC5-v<<_Ef(6N4ZmxOzJW+)s5g)np?=-q$pz`%A6 zttfyER-_3y-V1u_eYvnh*HJFbj)TW)v<}51HT9pdYbY1l)k&P|Dm`R1{W}=du^_{nwIDqX zor3I6vPi^)7#y>`kNuIZ0PY5m2tu=)R`19PnHR=WL`;bIKf&Qz9d-~exqiBOvA>UP zh}sqRuYl(2<_Zsmn>Wu{?9266@S$w~qh03uU`)l!g!0K3N1~#_$8G&WbigA7J|sTy zi8v_eqU|dFkbgwsz4#2CoVxDw3rOS3hbP%l5ec0vt|rIj{DHW|6cGB85+GWx3k?a* zjtX4J33;{%$m!GUg95B5kj%Y_mkH~@@`emBrw}nUZrnw9;;o><#cNaq4qpYjAeUgf zib8*Xjxt%yHN?xK)k=MT%(*Dkchc__6zaS{R%lNGwdDEsFm8wU1uQ0|h?KMQ8yZ?# zPPgAuc!Y$P-NO?Tg#1eVvmYba*D(l?j}si}%b>|vKVTpV;!!n75T)3-jfHURA~^*) zvBZ6})u~aSoH*!{80LZmyU#GVI*+g(7SPaE=HbE}{`uD-w>05fs=`Dv3VCZQ?c%S&) zRC|(zo)eo%D{$zKnX~gJC@g($S^zESBHvdSDkmKQK|v_pul+Ko^A9r2L{1}s#`hgiZ3EiUW&m&PpZ#^Y?syBR`mPp}a>s*P1N zWzjyI=e+^LTi8=mO(n(^r1`*U<|Fj{3PZx-{j1@#tGG5(xKFrVu;tr`IxQNtYi)6NlK;F( ztG@YTF8B2GKx%4gQl0~(Rk(S12bY^1?k32!$f!V#V_idoGvPnv zEDThf@CUt`}B)&F_lH}2m$(8}^Oz9o25Q=|KIN0?Z^xP^%6?5&alzlSvy zmY9h1f(T^tI#mp+^W=Rc67|M{_VvTy8w;mWB0}Qa0s}q2)#8B>7)(Lu3n?rwlXmVK zqw~|R#e#7s&xW1RFWlI98WGsW&6l?23mSQifL1bU;GIZMlV3iddI1zMP;h~3YGxk| zH9tU)0dB7IqxJ5Uqo)t|+<(cRai=EI7}NA7nN zfxmi&N)G2yryB!S(XvbONIQ+!l96{JGX|KG9aq~izd(s$x&R?*20|2kV;X^6z5luE zk>ELeB|JYLCgShkzEAk0mGse9Ru+YV%DE!wAfCa)Zlkh_L%+o-rX}cBjk^}}rZ9znbg_I2z+ z%xE4W4E#!a{O*{+#@O8@GkLzi@C0VQN)7}b1p&x6rV=E39p`6QbD8qD5lWey?>4A0@Glv(Tu7a8 zS8FO=9 z*I&pCDH&Z@E!0WNA}&txdlOM`U7vQ@fNv$Zp!XHZhr~Z2m=vbol(iYG<|)6yP+7N! zxx*hw&4C3<1Y6zDTNI4gLDJe~b9W37E`n;^ecP)0TU-xU4=I->w9par{!@0hyK$$k zcuXR0vhUiP`Q0A{KN(-`ow?us^8xm%+q=~hUmw0& zz#{|1|8}DL>47~qF7`!N*lGA|HEF$K;mtOXnU6jhE2FLbV#sm4J9NOSBYtI^{mx>p zu^edrfKDT(?aWbDU#4WF-DzAy7 z777uFEo_k@e>| zd1=j7uPAc=y-KxiQt8kq_jQZDzhja%&fs;5E`I-|>xvKDN@C(HVk4Fi#z6OA7ysj+ zy*Tn1xY8%Y0d0bo$b^i~9U z4!x5~@h9tHba5ej+}`>S)b6*1*wOCDRFEqi+EDG(Bs0;4ij3S=_|>f!Z4GQ$bBzo3 z-)zAMS{(wWFXf;%()g{dDW_f5sT*Jj<58_RlC|UP-tLg3F;#1F@U^*w~~o)^Q~HanaLW zqL`RMA{VaeZBy8Y2x9d1>o}tvhP`v}rj(f!FH;lD7pTkIr9L_bOqdNJajOpq zK&Up^ooUmQOb;wF*m*Wgv?k`{CYgXG!>x~dsP|Qxm#9+h*)sSr$ zH_%&g)$K~lz!LKEo%4ZQ@yQ()@fp7YJ7?&{O#vk25h}9|Mch zoPN+Zv9r$w_Ymo~z7?H*OU}60Z zN@ia~C;*VpV?B*@GY0gVwl4na#(7s$i(}0DO6{NC z&)uqIv&{6QP;iC!r@Mp&?G;@74id@{m@L>Le7jx5P9nigCdBRPHf+fiQdZWMPJD)^ zg(N+r_?sLZMl=h6w?eDTL)X_dqTTaO+8`UAnpuRx$)H*4Eoo-Pn{gJDjE9Lz(d{`> zph%CkD3iAHKlHbnKq&l_`(MWw13i%9_xc?3XSD?u>gQDQ_Jc)7nBJQM@#C3V_0InE z8Un<7M@-qfPmDpbmUkTljvfOZ*6vJFgguNz}*b*};F-SH~jAuU}Z4HDAb zjf5cGeHY)jcZ~b-M?J%F*!!Hd*P8R0Po%b%ysWKLP!SWe|D%ba8#Ii_Ykqj{ zl+s3BThx}EzA#P4t0mRO2PCQ{m6;(Qig?+4-wRRSeSN&7`^NUogLM*)gp>Ba?_VPp z^O9@JOM&frgn0hOI7zEL$*b;T>qbQMq~+pO7TzWhQ>cw<2B(lwJJg=mm*{LXP(lH{ zL1%UMQejk@n)>H~^q`2$8xE+3U!^SZ@}scCGY?O}9_u)56>x3$`|FiN7ZFH%fCw}C z?cqOiS*e!!j%54zuZ_8W@&8xj@Or`eFPCb1G1Z*HPghzU3h{*lh6gFkjbkb*y8i)% z;cb$-@V$(X9Jb-Vp?Ggi5BtDlKJQ7gc$5AfppgFof<66_@hF(&PHI8RA!{jU!QJdRNr@h)o?{#0`Pz_It1%@NcA5MT*>`rl(?;d=>0DyNd){}qlfO&9#dD%3B zVw)b$4?LVO>XQKSG*OcrYECp;@gErF!`t;BndHkPh!;T#Q;V#`ARk;CUJUN^J}~5MGroSk&^?) z00SU^B@x6nwDpL)x;ed?2w@NsbW1O~j44x}d@ry0rfL~QWeRA$Thpo8cFTtUSEtY> z;L(5a&lX_Aui`!vc$BV)3e6tzkCVUtH|77ESV*nkqG}o;QOsEK!Hp3+`(talwFK}p z2Fa$(HnK2kdXl*;+5cr{Ml^g^z$Y9uv@?Qg`3wciyi3o-K;JFna3Z(52k0)g3yt!Q zOk4~|LaTy3i*rp=JM*yYva)HeT`vfCu0+>!G>a8!FdZlcsM>MdM4Gf zGrzD4S)~-Yg=lC}s8Xn`6^v)FTWcBJCFxaRi*C?3FP_sDIYyZ5@Z7ua+ND?8TV3`P z7SlQRavn%hW=GnPV!VRGuMX(zV~4s^gnXPHmvBl$#npdfaw&HA59u6f5gy(OqyGVX zl*vNr{gswSM0QZB;$LsY6i|f%I0>7x58pIT>__xeVc}fsT)Jh!ZEp-in)DVXhVbOH0&CMyBAAUn%G>R9Ts$yQ}z~CbQeM%H%~IU>zE21Jzt?je>fYB_kv$DpIyjSI1d zg(vDJ+$Q(1V+84By)x5N>)seF29?_LD~U_qe)gN2`8QWQY%=;GcVeDD#{O3I|LPw? zI;qr-ztLpk)y^%vzwRsLU}!%K$*kv$_kAuaJ(^c5Ieq$1#Er#;kq>73{$`*vmG!q0 zUMn7MxDe5LH*%p=*)mPXHK8b-@aKnvG+1{y6`3v&lzq)$IcopnuoxbRN(n(X_xtnUr=lFy7C&Up( zVu!r!CErot6b@_=nxE8$6#);5!^K%tR4lb-DxD}u8>o85gc-c`GbXv#^cyf7Sak+j z*8?C(G0~x)9(307YDA7j&<6g9;UEj7{sG}BdHjKe2nd_hj2aKCC~X}=k_dFsTSbnc zZfZJs4lnW9rrS{cu~_>tDj@|;Jeo4YgKy1Hlao>@47;6@F7peqN0Dd+t+;Xu==cN~ z2$6O+jZCRnR(_%)yKtiXTbkYY$&S|4RHsBe;6#qy-K|!__Dwp$diGr_4smQJNTrzm zMdit38Hm~LQ|RV0-Ev0Ez> z!DZLfH}&TouTFIJ!P>j+l*pwou~*yiEiNLbXJzmlJ>b%!jLcQ2NxOTsApN+?&hBPR zQf2T`jblnArov3Ad!mV4yb=fy=!DJ$@S_Gc8b4G1FKyl|^cL-);K8 z?yX&0fplhWocL|W&1;esG(@ppZx;NiLU`hG3YX%xAADz>&`N{$o9h@q4)XPOziks* zj9mOy*}Z!cICj_&9A1+~J2?28=O;SInT!8AUqt~jO;>fAd9=GbGDNjUu!tW{i0vc7 zjT7^xbxB?ImT^SBIRmgr(8fkKZxO_(Ui0wyiuJ$!ZBWc*M@r{M#1I0^mVl^{d=J8H zr7aXfo#BHvKwED8TM%g{-1Ql^VRDbq#K9&e>C*MeC*Eu}WE&Dx*~lIL=s}9y~ad*vyUpvS!T20~|C=r+0Pv?(O5tO;*|`tdW-lRzP?Btf{CzdnV5O3rpY9DVfG zu#eS|kN9NWKw*5y@#SsP+Njgn7>SCmJS*$MqBfBuvNi4c({-!m&o7`}M)N)G=;j*2 z51$;O!Tgv_8%oY82#?Hb7haf6q7=}I+M%xx_sPMkt=&INpokJ};GJnO)9^mrZt)fzJo!%QkG(b+tW@QNRf8H}t(Z%c3JUzSQ`XYILc{qc} zqnR=|%BtL4#(E^-Ek?F?^0H!(+x%}a#G~m%xw9E42lJW9jHdD+Z4Xa3?-mj8XlXAi z@_5R``}S8mNbnu=_}ymS2JVuc&31?EuQj3W#u7&XQz@ys$6)rU@3mUyqdhkf38lsT zfdvTo;ZV|QJwD0dWw$qzAO-pAB*X|GMW=~)J|uWXOy@{!dkjR)UN$*gX4t8Gx_3K( z0l$sPhW`?GcNbqT>{G`$a{b&3dc=6PTBnD*>#s09LB-;Q-#{aGI@)azcF_HvnD3L` z&N;_7i;oM-3*?NvTC3(D^??2P`JSKxepTlozc!O7hl;9nnJ&8LGXF!IxtiieYCwX? zWb4U!Mv&AcRB&Md7#tc;@^gD!TKZlsWiS{t*Q$^}&-!fx8>h3QOb6qo2?{fo!`*+e zL3*uQmsf`c*HKW=6~E^8g210=Pq|we5Y7lxc*SW?qZygs7KJEu1g3PZb|1`u!tou? z$r3eUXL@G3My5o@!Fv&>E2Np79GE*QjWKgMZ>ciBK|K@PkN626H7glho`Aw(kd;pT zMe0-htd0m7dnmaS@f4^ubHW|QTASG^%6wsDW7-}RY z7wZg0UM3*4_I7Jl3$;!kq`cy7U*rbB!4SaP2`|-4BSvRnfKc?@N&}+@ht7(6vtl#? zW7X;YiVU&U*3ChS+|7xW1aYKQ%7+G9d0Wm1J;kfsHveg z>i^xl?gx6AND+0*O$Qm_+b-5?&7>G;M~(+P!$yeQ;p4d<{YsFIfA>Cxj6k2y{=2MM z)U1?0pEowwX7ULift?FDB7S%H(J?V-YOTP9PP(DhBztvzRFa?1z!~oZeeg!Zi3qWF zJhUPmm$OT67mME6S$XQLUsk&l^n4=~j@(#5%h@RfAi)L&+Ruu#FLcl0z8o#YlLQd= zzixpp9s|gQ(&-8tRmE=!u_%!u--rTAp*yK?Y@B=w{y^#52HUN${;O56OP1PGhHm_6 zw7ISM;y35w+;f*L2$oL3J1!bHW)KPkK#oF9pXP>|uJn_RK9A0)cwj@*Tj=jMLdZ_c zoasq0TAa}Z3bN2Y@PTGf@ilBEqZ5Fvpg<&mJxu$t99OW3M1TrO6Vin)9dGy)d`D9> ztBq3{AB41avY=L#M+f=0+=kc+9C3DhZj=YspiG{g*uQIymvc1FU7QUiTTZtD6^a)! z?yfohN+CQAa!~Pg!vg!lA3+ z;W6;bJnaR%P_T1r>|Kc;6V`7h^lCR7LLDUKd-wnYaMPXL(`RzzsC7h9Yz$b>zbVo8 zz7mnnQ(Ph!k;Qak%}Hv*E#=!T8};f|;(&96)xOkR**Lx?g&e(xhzKirE1G8QOpVZ> zGoAIl`e5k`*Om zrSbgmcMT_rrIsO;T6maFP9VDjm(;QkFDDiR9e(?kwvrUKm+q~kign(%08Vj{qYwlSI@FI0QT3*ZbJs@x}Ga97$Rw=tZy5E1- zWEW+=gJ%{mC^)0zP^C+vn)CnC8W1KS{ac`A>4q%X;1|@b!-L0{hmVe~i#L56`<8_; zl2BdsV$inwO*3?dRh9ea-Q~T~??A)H@KY0BQl1nVM`b)GxfR`qi0FHkFbQMH zN8WE%|lriekW3JQc?%dbWh$+yz1p) zqjgL@#>PV^RN)em$n5MrAJ=Kme-^*>Vc5UOZg-0@KpyB;s7Om^LKQVMDqeO40g96|xPh$C z8^;Gau)3-~AQnc7#K-5GnOT$+79zz$z!nS*Jj}ix$_7;S*O|<&?j1X}yZ6pgR2GJrSdY%;~ID&q_eU2xM z-Ih;3z^LhuPtg%8>^7T`VZi5>fR7JK)ZO5!EHvC&9#RuZQh)5@Z{}Ua&*R|eV?`V8 zN2e-}|3*YTXfZULuSyfPrSl!8NLI-?NlNW0c=u1(UEvF2K{achvcIby{QrFCaR;*> z{!Et?H0>TA51~}<5oZ)nuB?*ev$Getvr;r&VRLBc?fB5xjZ zWxG7LU@pW9e2Qq&5x3?W%DV5@-EI-qo;H1`N{WIYr#8U%s=W~x&?5n?*_lu(jtr`M-+E~~=&Ifnd7gKu%_)ZJ1iCKuGia;pOd;sf(k zhn$G%3aEX0>ozP2A@C_XA}0LgJQSp_n3V$-s{%2;;rVa=*@e)8vCYl&L_{pCvBUS;DA*Ygy35U2j1j|A`;$7(w@q429Id zE&`dyzQ;o3>AS~EulIGRhY5Nm{0Fqq0;;f7;-h~xu3HlA^IBlDGVdv+rCsPMYi|9K z*1fM$;dp+~$k1F!<0wRjfTiz;!%GHBNzY7j(@SbHcYnp_e-~Y=sK&-_nbDuISTA7* ze!DivuyudBqcyL5A@k?=Czc-19XK@bkD@*~(8^y`|I6cprli*z|7(Uz;#Z&1^cabD z=aKI38@Xizq<$kXacbkFi(G-;5|;-DUUE7od$ECmtmJ~GV#eS>fZLf(AD-@8IB!G* z?G((b$|0p4ZSsGzC@kxp5$j`F-F4`~U4qLVc>#N41-2nyZpJ`0lpkQx%U@CL|IDxH zcTSI%D;N<%`;lM-Pnk~S^_*x5jW4b*ER@v{93S~D=IkAAc^kOR2bONXB@cZXnvMB5 zuG!$l&HKL)BaM3SF)b`d#%zp|2>sNwh$?4GHc|eBAKqPq!@BiE=HCSxzU>E*1jCxV z>PqD+)7?LL>phaUk9)aJTL&7pg02XDyR4!LzHMS)Mg2k0f875-MMWE8_KA%t#s>_x zg#v=UrKxFbZ~uiA=gU8iTNudT_w_8BkGV7H4|J3uktYvD=jx{|NorZ6Sp69k7+4%O zbdp%LKBvPpIx{<7-6Ndao=w6uI!d814=YF2bLL&J5mE#uae=`F?>>V(s2r1k*`x$aOpnw7A?d(3WWaHC;nS{_6JU`ss<6hdKEGas)go~QN z7Q=pRzhpTzyO&_l68wFI_?bf0Zz+X!STg^b({3Nb)qWP~1thFM_SainS{QSQ-hvR1 z=etw>@50pra0AbR5zEaE+efn%an2|V^2!pS-zFF!!~$;Rng>J~gag%|F>zF2Zqun= z1Gb`nf4w>mm?sN>aGZ;y^ct?O2be5=JD4Pa3!I5Xv9#CEVvC8$EAyfBZM_~RGV^50cRKX=d~gNaHhLg=%wf3v^XK{7X%9RngIHvY`ocx&WZ;KNDwq41@EO{xGN zh0b3`2wu@viUm!J$b=FU3Bs|lxCN&fJaRN9%6R!^`_|l!PTl{22!#Etr6@lphKd{s zs0UFv2~DM@(*#V}Y%42g#8gDp*b07tHZHI9(`xw|EV>2ie1euZIHW6O;ZQ6zk48e?GQC zE0mK}WYKEXhnUD19FC9NcI7O00rDQUSkqI9ImxiJK0YYNYpaH^7;b=u_>O2Hfj#l{ zAV8qlU#^7)8JdQ+YNm*ByCpl(q&YH+9Y1?6LYk#bz;fYfB5-ta?rEUdVPsKXKE1Y# zCHN5DB|uvu1uFqXE-}zZMI|8Bq}N>&day7a{>>NNbyq$gj0>oBs|o3vo10WDLgSC;n@2k2D0$d1zq19F$+@dK zuo@W29cm9I*v|EX$FX=k;p%3uWF%xf#n4nW3Cc1DUru?sV7fRI#5(HmI?!01On0-5 zdD;U5Vf!aS8{rq#>ooMKdQk~~BZ}8oNGYXysyqz%F8{_T0nK4o)Yl@G`9eG0h2QEZ zq;vI`@T+@aQP9vc>8eWs+=^dix_`hV{cv~LC`b_Cykw~(A?ZDJa7ptk1h*= zzqsMOqT_t2F8d2%*e`aw*xk_9ysjToP;odsLZyX=7u{FoR+PQU*tw-QA*OvXoM=}h z;`^=68s~qFQvY%z;u()0wQDk9d>K zztd-%39VO6lHHelj$2gTqnYUQfnt2F_YFF4kcnv@q4abbIhxXuXdu08X)X3b2e&s7 zcGYW+sQ95CO%A+GpT=QDNQI)0UGzaNHPn8yBj)q0oK%_Y<>3P6F2|Jw9KJG(RM>T6 z<$T-S?ar-JNOfT`DAGSJy8GoWYTR<$A?bNJf!w6sXJuS{2cogDL{(hs!w%8)x2SrMll26OdZlS@xbQAYz1Sh>|0QW1y(lVh*}2j z+(ODuwNL_K;T%@;Z z2LE0pWdimB{!vU$YX9)6|2Kn-;4>+0toO*tQH0r zis_R=K$QicCtR-TK7ZNz{$u>r<}J!A0FMj~4&lD}<4Z)5@k}p1L#RY<+*tx3f;HXR z&rR!QCjo)$<5`lRhXc79H4h8ywoA43J=ec$CHwIfYCLPlPAqnIjr(;E9RZvJCK41( zyNEo%@b*JV(ex>fH4FF!;NCco!^7Y4ijWzVzhP;kt2%|x>L9gL7sOErdF@*RvV>v^ zwu~{v6EkxYn0>D2qM?-4XxR2CG<|O|Nkb!Eje2>>gnFVAM8vG!n`#v+*s9zPx7u-r zjmk7Qe|4(Sj*xIJqch?#tJHO?}*!>6U`}Hoy*-`3b-1>`P_i= zREN#Ypyu82ko1(;$EJ5 zUn6iWPqqs^UT9rogX@@LVmyw!nvDh=rWealE}cm=UAY(rP~{W9C~}GM>Hs zt9=L92JR_lPBd&7m9FYWn#vRX6-FOmj#py_OFsVhH8Ubk(0$`XK2>7f>q}xF5G3B> z*F?L!^|FC@jykF%j}IT>4CBNl5#yu9*?R+N9-#xRHIX5p!q$>&zSznZ+~CuEQqDiK z$|HT$xT-ip{-mQrzCRyQ_frL>cOzJVdj}|!)fa^MLA6qz$m%G_Cs7~>cY3oE1r1O5 zt9XhnDYrmb0PkdFvxvMQarxrsBYW6hOd;Ob@G}p3*83kb-#C=PyDg&~s ztgNBNW6C|&o|PVArr}_-TPUh<=wi*F6=`QFfoce2V(@y*i0Xk|YR6dj`CG>9Us{I=SObXgrMscFxm8E{YU+y61}p=LAfeplzc|M{;4 zbAr$kSy}SXH@$4su1P^rAGiyz8RGtFV$tSzZ&2XB91y-oKu;QHE_k%0*kAehy?+-T)DKo{CXC?Eo89sg@v~nK2d*ht&ANSD@2O&?4Y>TmA zn=W#c&vn=()dsvxvW>wie)mKA%f%>!rZ6@+WS|P6vnr%R8eLh5HB{`A86Ej`jxH>5 zQERfvZ%k$WauQI$)LtiC&E!1K1Az+|`_ySibbv|>v#j5~xe`FWYqw%K@oe)JoKZDR zO>n~;2cJ00JHt#WK7PdBsOQko$)L`+B8)Th7hH0R13UHWfwiQNqgo|^*oX3pLyJ}* zVmTd7eh|~j!7(;!>s+YM-rRvc5Nr*Eh8kcm@M-3g&%kQm;`2}RKRw@REPf8*xeP}o z;=woR16weDqcaWwY{a_eNtEaBrlt(!6%@ljX#%|8Za3&9^8-p)Zf&m^OwuY1Ap-aJ z1$i|H<6pjZ*D<|z5_mTHVK%Ej;r@1pRI?%>Az@zsK|Du762lC}&B?hvElS;3mnBSu zy%ooqp6lq#HR-u~CC=0b@v?Vjunrc-#(c%f;Bl6|thX2(`Yi_ZWM{{9X8WvI`21t(2c*Koaco+(M8#>Mzx z1rDUz%yqTn%4l8M&ZB`VO-##x181HBJ{ty6DoYSRG-?$&5l#YLpgRN{A;7@^I0H&p zDxlcIiS!_I`%Z9xsrrYMYEgSJVaw>ZR&-czxJuRVzQVymAi|5y=(^YcB&FPWz_bg4 zl<0Lg{ht&M|CbI|f&pN715w7F9Kzng$0YCw zY622bEpqh(ODej4-z-~;fSK`rri+#`t)^GAa=>kkh0 zhKeb+ctIfu$S{O-Emaz9KTt}$`s;uCQ)QQ#%FJykRx*5ZVzTISQkZ6ROFzXFl6Q*m zgKyT$euv7R-|J1;UjplOnJW)kE^Jn%Y*lx7xPq_<_-kUpaKK$NGHziDBMwY%fkpGb zg#xG|FbWQry82w}xWd=QInk=5U z@7``XG3P64%8r`?y?9hkF3W0b1YQZgJsCD93K$g;wZMjv_xx0M6n|92@CY6f+3A9p z;1_Msi|J3ONXkm0H*WZQ0gfD~-WovtpFK{ha7+ekIo8F(W>hI)t+uwV$A>ZHkclv# z6#ykT$1h-FY>G8VCMk(OR-@CC)E&ZRR>SeLIxn;G0e!G(fJT;6a@3OWwRdjDWBzVk zU!MIs7*>msei;W5W>&~3-jP70y2$XCJ4`nr_`l>#UPeo=_#w}xj{`#Vw|wh zB<$b+BE|#mWjSDFHNL`;=Y@^`v$Mr`7+>GH_5zhFTiL~TtycG2k3+3~o)7uNp#%LX z@|22bp9~CxDJm=U$A%tL>YZJjuGWM?aIn1e!A|qQ;{Z_8acb6BLTbWN1sRzxvVcb; ziI|~*C`MJGEwGrtGT~AjJ}|cls6y9x%BtTRWMBL{C_Vn7#S9%;EF<9>`%gGZQLbE5 z5I5lv!vN>voo=ld!n(AoWt zbRNAA)YJU{OLG&D_oA2?e;a3p0n-ClJt_v z$>3qV1P&UNG)m={&(8;4P(RR#;)RiM^A^);@$b&%fV^3ACF{IvIMgDby4v+ebKh3yi7CZ@i!rDm%%(KIw#3J3HV>U;3CC ze?47Xc=&zMlHEVpmZzoc9WAsem?s`{EKPuqSYvpD6s#90y`QD^qC2kjjUv2e#V7quzf;Li8COlSl~(-1plzd8xg4U+q5;kX^}1(|&+m3t|z!zQu)UNMW7 zQM_qr!&pIYZDIy%z*?=En%e3CR*;ykN9Hzyw&u?*0kmtltBTB)*50X=H=*wg`4!*N zQEXk+6SLaOn(j@VFKf&_m&)QZ)7^8?69g7VS1{;m>-mb}Q_yJ!un3d4?~Z=GyaW9z zCMI6V+>`2m|7FJ|lLdS?)lB=~Wyb5;Er?>!xQ>YkN**l38phs-Mn{ z1vZrrpymcaq$Z}h!V9(b*|nonL>AK0|JH8Nm_|mtXxk<7iza^XU3nN}2;hJc6Di^T z>NTUjd$o0Yz=LP-TF~d0W5dmr20EO}Bl-uR4jgu>{mzQS?yDHSsHs9Glk9}Azb0o!ATZ}v zl(%5L4PN*~%tlWy-AkreRQh!D&0` zp4=#6{Z?ukyPbrXNj*Dtdj9Paf~XTyqynJ}z=ppa^&Bl*lZ1W&M$hk9*fDcZ3GvOL zv1bP3gf&b9)_pH!+<*Pk*VZoj*MfJgJ(U7zzF?x3{S&WSWfDLrm!fiX8%Ci8^yPWV zD)z$ORJho*5K)RbNU|7?0z4Qk`Tq3g7S?NDS}4XR9*0Bq?Dmm680_yKs4&n{r+hfS zX_Yu3LL-PC$s*34{sq8VjEY+pBn_F86lVEZ8AeuTVO{N!uY^VHZN}%O-_zbNE|}BN zy}y}pbL^UTXw&~Yu9&STpK3fkRirW1ywZ59=~yFnsaX*W&Spqr+zsTBQxXgs%KK8_ zH#f-*#YAPfB-N&-hd-}@Pg~sWZNni$sDyOkb}Yfx0zL&8t8@J*rh^4ghd5?qajCKz zQm85p)a-3hx!&s{j4YH!F^(uRYRD@sY$u_Mep?(91zngC0sSU^2a>*Cy^^4#*Zx_r zqw$x((Z9>OPhzCBf`1c|UO7Amq7pEs_1bzp))6OtLhLWWxAA_F){t3+y>R~?a3}Pa6`-oLxxl9g_@D)?YCG?_uhH;d|3HQZ2}?c8aGMX zrYE&XNX6mc&W#)3P(&euHbe{ai3b-*8uH?DNJ@I0@iQX|mmr-9;0*u$FJ6umFSFI# zunc+j-i`l-R#$2em=(tZ3JYLbN!YQ($I}pCz(NFxYyv6kB*P(hd>=G6EiFo#LbmzX zn1=CI{y!3#Scyp-5jI6M(MkZ6SIU4FT1 z#~*WBKO*(TCLeB2wjb|ayEP~%sMG6fDQjy6G&14Xwzl`4US3AOA~EZRC^Edd1?mpvs-Vk*|h%-GWs8DONDmY$_4RgQK)K(;9Qmkz4%0KGOkYodk5itqiINS(!suRD87 z6pZVkCOwfgB^X5Ra|S@Ea&UcG0%P~&=mMm-puvLR(MnSl71>nn5Z`>3O?!0)AN@u@ zI5etq5>g3|zO}!X8`Z$a>Eb+L)vo|3k($k@`%{bL>AL-f{}`(`6!MB0Y@zY+K0lvD z5c0y#jxG{l+o8pqQK6j@)ti?Y1l%m2cwSwM&q4zylkJkxOt!j@WZ8bXtzKB$`d&Da zeSOxs@Ed+4Z_q-O58U2jTxRg1^9C%z%OfL0I}`;6ZyFNlV`)8PIG7n()uC@8%?^x8 zC$O2`A^n#tV)M(9Sl9;*I1e8*0S)|o8vO*)IU7pI+QsfRU?2Xh1fG~bYHUiJ(JN?Z zK!9(3uK(M{QGDSQnYkqN0;MD@c^V#fmk;alB;}*={@DWE;DiHOhmEe_Vwiwfh@=9$ zFRg;a;7O;(@1NQKj*QH#;va*>RZ`01lEcJK3YD&}$%)0-VKE%@VACOv#Q5=n#R#H* zzP{;yxZzDBDV8cyQ$a#TcG~QLWzw#Ja9WK03V}z5KnCJTdN;a5YJKmV|LpF*1el&q?Do z=Th>$;Qo1krx7Cr!N9!1CgA7q@har^uz&BX{klj5(aCTDiobE%;(`zt#r{PSsW`eQ z40dI8<(D&dsNqemiBvWd;BIB|u62b%L9-ckL3}Uz ziFR_v>`>l>kOuGIW5^X2{zeKAd~WX03Hb|HkHEMd5(?A)@Ftqxa!&;S%Dl{rI5zx`4f%O&DumuPv=*$ZohZHrUbto7v#RBiq z3!2!2A9kiRH7PFWF-=oDiEyJ?{42|Q8W=vzH^CfQ@|uCyMImX)iR^g@Vvg$A zAj2j8wSj`atve>V5&r-sI-wCEKnaL;k@2 zurNU-Vzl}5XD`?U5dpT*U-LIsFl+?Cp|^^db(IIYp<)I;c5NYUn?1bFYIrm?zgSHi zS5?W0HU!r&#e|e7fe|Sqpm#}iDv_{T{E=yQGXkt}%tNK17bCQTC5>wT#>F-hJT!oy zW<7p(Jw5B5n>UaK+h+Fq&D9sUW2mly;b}vjSy$h-_tbSTzO7nUM1_XuuVf1;9=*f4 zWYkFi`|0t8i+OfaQ?pm!+S~KS`3;R;OYY(2@zggDDTEo7@ zZSxTed!Pjvw1xg_dw*%aPI}QF%R?}#bYwR zNW#YS)5GiuI^a#b+QMYRB3~Xfe{5}9zOU)$$6VJZr3F7G5uG8D^ov?{lQNRBm}3Euz51=#m#vWmM$ zyZ+*LHT?VQ!fm^GC$Uff=^mh9m#}uK_vxw$9|$fKT4c`s@Xbw~REK7@Bk5y7PB63?tD7(_}t-x2hU^IQ!rV0RyBw19bLc%EdgR9JG9^%K&%PXydxOz9`=;8t`sWQIm&jEX# zD_oiKW9oWcm)EMC7#((|wmzAv9S8!Y%-!Q<)@-K&q3GBq>P78}nL2B^&Mz)D4-a+C zI)~?gxNo2rnnGtvXYKfydzbW@i=W_wR;8x=$}1(KHG#A*id8rwyV182MU7UF6+Og2 zpWfSl+3S|=`RSh;7ZbHn>Ce&=Lv&g}9{?Sem3*%-cTn5y-fH((|`V^jFEZ0>ygs{l%s@RX_cY>hp|% z)r&(xK>@Ye>QOCKacQv<2(>@;ll!^V4H=N`z#s$FSEz^pCgNF;(Q0^@wF`KXxK?*3 zTapkHJKC(1{(E>Z`0RHIQ6%F2)l+LiMQ~WYKKt8d9y2o$(rK1Htzafu4Y1|IcB}n4G3lOX+^23Q_!aVpdix8cBJT=V(-=!REiX zOoW6uNlp)sTaN<-7tVHTK`kPmXQ3*z$a0zCp`P|g1eBx6UK!251L@1nl29rTP1kfj^I;%qVDe*QPAw=1an}I-d^_a7I)NVmuAQNledoR9}mhemV^!2d&wzT(c@NE zQ0l6{Tye8ZSu4=wlQK5-B77)D$Z-hZj&AV5?P7;*Z5dCPn{Ut|F~LuW!-=R^#Uarb zQFEHdM8`OtbwhvU4}jb)GT`K|Xwk{b%YXmeqWUBZ)@cwB`z|-Lo}Zs5rl!IZ5fRN) z8<4THv!9-xiYhB(cw3W*N=r+tXF*c*SROSsPoclY-ZL{6qk6pBnuhnnCe zrHo{%_3LtHvBCd7yKloAdL|7_-K|iHY=he{+hlz0!z2IdG&>q0;!aNWG|7qXYZU>B*q(k-2+RtQ3GRFRtW3YV`Vip zakH|sll{zKa%e7|-zlEqIBS%gwx7!Ec+!!x(93S!)H5MZaP2b6GS15R`wM^h`nBTG zgWu^ikKw4ZJW@#^(Jn!;{N7Jf!*W2nAxc(;Ey`|aZ1jVicd?QIBR^w>&Jjvf+z#!j zOHynVO?UkaSA)G#+TVX4zcd;%i<7I`^M?L<-#ud#z6hVMRaH7KR#v`*75HvzaY@1q zi#EXU?evtw)+0N1vJ!JG*n}Rgk7`{&Ryr_u`4E8yO92B7kM>LCpv%iBirwcZ9+mzO|X*FhWwfu#*M#t`D!~U zB@|j`H)5^AGDd-7uOWwv;JW>xr^CbG^{?opA}PMr4g*RB7n)ZmH96dz^>W=cmw)N} zasb8n^C8z??DB-i9`UHo&vgfWW)nD^6F4TNi2Drhzk8GN^1Q;8t8Fb~|14hO!)H&K zknyCY%Y0}IP0T)78ED3Dil4*NTF$#<`EqRFzxW@_!z*P;RC`@)60#ux6nB7iAVJGA z8wmJ97+^}{dl3i=`-0JtPNB7!hNHGzmo85}y98i4?{lwLnGD2hH&|k(u;?L?k&!_H zffoxdt9~pFqo(2MMt2cQfFKn%@j$TIr^XBPcvwj~HzYR|r~jmU&V}N_DEq&3d=`EQ`5UR{+4E+fWzAILydtQP7@%-X$g<5N$Ya5McDw zCqofy8fqq5MreI*j2?g{7yyqzp2z3D`DWMNQ`$in8=Ijg_2FiF z_~`)?i!1@`3a{rqGH<7iZGCwX$>Cgd=nQ{+@5pBF#;)o`#W;)qjZh<<7ib8ZUY_BF zZ#ShEPgqvkKAo?EJ|VKB-u?J9kXaP@pZwd8WYza;Jm3*U7uIi4s;sz~s8Hq9khVVt2ThD5_Is&C$1&mcpw=(82uKuNaV+bBBaw_<5qyD}r zMkjd#AGAtFMI7@ya|;<5QjkPDv!yG4hv0pfVvkJ|p=u-eA|nPS^$3maCxv4Z;73;!)iobAaj$9feBmz#(5W7PJKyM0X3fnSE6 z)%rEnusR)!EnYxSt~}*`U3^KH3I?WKKAuSp%|$H&Q24=vs=!boP(M_E7J+lM&mE|AU}i+!C9Kl$rS zX8l*lW+Soa3{QOrJOVMIMTPd2ZvV4ttpDxQ7OsK#{L>ZFft95_5uj)JM)*?ik3Mb4 zwz+!$ZLb-qb9t_Ug#{8O>f5U*I{HNNVhBd?ZjlW6KD1t6nAn%8DU7Fo1OS8^Mu`R#AAHgj zGfWzF`AhqiA2Y_H**Z-UB|7S+LCNWFdh3`#AcE8Vy&KB@RMAwyhT>&um3B5@3fa?+ zCK>U^lkt??t|+!d+@;-m0#BR*5R_BH#>=YJNnC*#?0oP-N0$co6FPnCP!o*vbEt=H z=IH4x)zrX8jO0c_tia+}$_gZ*G$$6RzDlLGV(d?t4cuY>6h&Xeg>fPR&G416!v+_$ zXSfsnOI9urCFxY4Y!|5{Pag`Em=$q3ty`AtD|KT?VImn&Dp6tGRDSXgCsQv1PGyf9i~iOGuW}R z%|}APT=7*h4Yo*BcwJpS8-9SA2V)@Bo+0Ab-{Z`jog`4c=s_T0{8kuaz$i-=0?KwC zAH`%qq8@}Jfd1)LdG6+6zXKn?BZ~PZqgV`U&Fo;mVzN6W~=@>nc?}yJjP$fN+efVDjBv^}XbAz2M)yFlab|*#E+&o@Sa?5=- z00KtZDtBh)=6<0r&&xA(Txw3lJ#+PNOgC9CtUQX z{3okx)2o;{Z$6wO^aE!EZ_D(PQpLR>a&)m6f>>vt{DzP_@z1-v-!y3``>OtaAHkgi zdIgl+|6%JZgR+d)c3&EmlG6*ZNmd>jqDMyuo6h0A7Q5<%988JD3k3Vw$g zmN!*QkTP?3wUqn#_g8f#=;QzkmO|TRFvr`3uB|StzlsH48xEJ^3ug-h6i{)gm%vFP zrsqe2^nh+9%4SQe=of>xLCgEYB5!`=?`m&?qezwcJr9SY^E01F;&LaDjH{)ro)PQ? zD+R^M&kW4tB0VxN-tgyJ`CZEOkX#vUmQuT(sUuPWrl}k8LGUe?y-5-^MIOBF7lfc| z8XQ0cffuyK0mP@IdTYh@r7BwzaA7z=69<0=*ygQOnqKZ(*3V6t?{9`I99fG1_{_k@ zB7AB%)nIS0LRI>N0=OLdyi9W8bQ7!Glpc1!2l#CR*6UoKtp8%ZQHCLL=Vm4bomb)( zB|})|w}l=2oo`<;whDM_;@B7*bS*BDnwnB`!j$wmnGc2@TmBLG_>mm^2SHl&C4)R` zY4i@4m}NGw*fOQDH8nX50x-+e6%=S+h%4GVC!)DbaAdxrE-^wVX>aNBqB6y$SNUGw zc7tf`P<8G0@+$5G?pD27@Sl65n7-2flgZ}#GN{oIb<{s-6LTHTyg(z?D06mvhZJ2c zF#2oVc>n$k+2yBJs0*WJ2e2>z)1xEiVvk;cVX!Meo@@CGlk+YebKg(lsBm4dmFrGJ zVxjs^GR<-pI!D##RQW87#S=}W5@c_wawG(U&Y7y+tE(>3yrKq;(^jBaL9Evi^(Aa> zG#KrRMm*wXWLN$xA#yzTk6nY$A$Ok*e7Nf`7nT6mtnEBdRG?)B9Y;62UPrJBH#U`J zXG5UqWL&wbiSG}aJQ;Yd8^@BGNwM?W>`pWj{vN$4EG*XJwwuSlTk@@F8Y)!;t>kH1 z1Q^@Upn>0SDCsz5y-5K>AkHf@H6QGNzVhk;u{hhxjg4`T2-h$hHTU!rfpRg!0i zDFkDEMd>d^_QDSW#S?-uBKlveR^XgL9`twL0oD(0C}^$2@%|lMbh*%z{Q0<S6KNEGk~tp&=@C%O_uNq&CNT@RbpCML;>^rIzjBis2#!Ny(Dre&`y3iG^? z$*-=B^)){;XX#bfRQh^Y90BMneF=A<+?&%}<2l`o6|%Y@vGYs>d4dt=D^ zfmm76Z-J>wS9?+gIVBTr8Hh3PW70{0rG7aX5P!*A?tNI|%ktr#1Ob5&aNq2AXPtqa zSjSv9T%hL{^>Uq;8-2g!=L(-$-L6DBr%w` zI@u5c-`g9dU>zPvAindm83yvRj1jfkN4cj+bPVXq9rnA#MUr)C3+V=6$%*0ozNM#+ zhoa%MUY(qO0D^vGuVak+(eS57Jv`=Tf{+mqsjJp8zTBXpWI}tEZArM)HQ9{x$6en> zPow_zP1_uHI2+4*z4a=T^k9}C-xmQPxY}Z>q00PL0P=->c{#M09jy3!?f*hRL7oE` z0};g$Z6thZk&IG1JKHy1+KQ}zQo*+d)Z40qw^sB(EcM=8B}##k^v@*&-$-(}&XZQ` zEn7;sO-rlEMNxC@<$EJ;?hn1faAm2sC}qC_*2rLMj&!hPzh;b{~zaVJvi8X7~M6jIXk^kuMcWD)|p#LiRNA%Mp; zX;YTQ+3m|;*ZsGvE9^Vn1U5AyN>4Ap>j#^)`v(KFysSebAQmv^c1~^Tx&lLB83@!x z=i8sQCo2MMP0@&Ap1kiej$?`*(Zy(MELT+zE@w19|Hq*A#~UfsJX8UzVl69SjnA5t z?b#3r=1ma2jiQa3T$b(E3od87sJYHqu#d8*!Xo)0b_Bqo=CY?4T^?g1er~5ToG^%$ z0#O<_3&Yvw4ju7uNcBz5tW!&5N6dPNVzfESEuXmIS@MSefRl9o=#uaw{)k*m?xV){ zUZC$TF^I$-=mz?4Y9J=>I5dKOO}0$7P)p(E{T7J~Jwaf7o-?7@VPUU5=+zx{-H7RoPr^SB?RZVKQ%n%Pp5eXaSg7+sv9f_how_@VlzgmQ*a zXJUhE)w|G&OR}#AN7M0zA3!Uh;lC33oUQK+v8pZ9DjtMD@pTt%O?~izeEFkz=jKI*Rzt1qruKf@0a#b zTul`w4)wELc#ib(s8~{AZPK*#%);H>E%lQS?(CW;1{}h!%=X7`+XRsN7Yw0+(Wrlo zONJ!{OIrz%Iir`0%>P|BgQ|a`HEr=hSoS-jE;0p=`vadYKCjZzBUr9K(E$0CRB`d^ z)^sQK< z%K=9F31mjoE#V`Jm`=bB5^y@3g?1ijshGxnKVaGtCw8Fo@A^?D7f>Gp4Gj%cgSx3D?> z1Wa{m<{_YX|`OI=BM;F`@<5WHu>4%*4LBKGaI57%ew8W~ohVa&>R#?c8BrUMrX5 z&7xpSjmazEWW=|)a3Kg{kO#E^F$E61N(L-INg-pd_>{3VOjzuNO?hg^6BDnaEFPOx zQFzL)T%-$N!CqC%U}1D_?ziM6Ch_HRJ*{*FdRgz#Xo zwIEj4+Vr53`oqA&0EX8tYK;e|wo07-`7-JBt-3CCuLC4}OZZ8<{HzzawFF^Qz8f6n zgMITW)~luEe1?(^K${>q96szvS4&A>d`TO+ii^lKsy(rsZZqO2Ce@hRjQzP@4ZJ_P z)wDHx9+Ef=gjjFqld4H3CH#wG3m&gS;whUuT#mQi_Q=~)s%Eawa^M2}agJy}J5%4G zrcKD|#iITgp)f?it8}?i&1S9L6qI!YH0ezY#eO*!9AzV zu*I~rlog#=0tEt0weDzVeUO1)iod3$izM~ej?50%` zotlXzlzuJEVQDy#Y#T9lO_;aCnz|l-63)pgAxqj^6&Znp@J*Cy^`_oJplc!=( zGXl?&SJ+{tr&(PCzvtl)@1M*;Ct@0R#g|ILQTq!-Fl}v`HV0aDJ^hQV(HxXw*lNt$ z*}4?{C&X?hoQJ~l_mNvm-lGkHrBJb11bvaP%lU%O@zkm#zUMm|4r?9oqGpPju4kr| zRj)@sH}Cah%4oC^KRs-b-Fx4nECL@O%T`^!8p~iak4tzcow@}ZOnUWc%WgaWix=PK znQ?&6qR121GLLQweSmU7jkV%hMUH@fh2bXtCXE#)Cc3m-g6k%QV*53QpDz^?9OTw@ zC*V6}sbsiA4ah2r&fq^+^Y&5661u*>=+$Uic~y1^?2hl9Pv79mq!&4*mHF;b|5aBpLYO;I!+jJ;@bWIhe32Xcv%7cIYoUtKG6v??P@(;EVQJsFO-`WPfyQ; zmXw!UGLLuxJz2wmZ*=^WEAzdd=VI4?N$7LsSf%33m#Vx(8Ie^yTp3vbUW{>Q zSW%Y=TtttT?B^#VA|ma_Bejg(v-1>j6q}`P%{PVW zA~3j-HzfEH_RrwVdx{T?bd0i?=_R?hE>S(aZ>112w^+&Fz{81&Me)Vt*Kn}(bvVtM z-Ihd92pqb;preyViP&7uU03svRnKa)qB+15}V-Xuef!nuyImpJAo`q(u*aGCDtE5FDujIWY>xs zo74U0^xxPvkHzbj5LC%hfP4=WT7A`xZuy>n+4J(+f%s~MK9f0?sPJ9@4 z^L&|^c~9xA0h9r{PxnqW1pH{E??YD_>lgbLdP*XR)skF)nMR1KQrekE+gShkx70r6 zjiFN|C1t-oj?UF8W`ITbGEFzfT-f3D09pLzN4@)A(cAy!0<@%YDo@Z9C;x;&*(Sl) z*s$r@Op%KwqFQF<{iq5QGArx)6AnoVATWLO0z5Wn0)>@FS*4BA9NIB4$b>hWfh-pWjGBjuLW{geLG!2i901FO(tO=F)Cc@T&7FZJ@CN9Dq=AessIuFd;?jT)NQ z&d)bnPI|aIS#(_@@(e@m+8@iS{fnTXsi8j^piY_wZ7K*wG6src&3F|E&~DdB@K&C^ ze08!rJ4#>2;E>K|mQu1>xS5~K8bW=EwmqH_klRz!+d~84)7!@9YP~DRloXfgEt}C8 z(rtV_ZD^>Vp&ez#%9`Bj_!c}!lpu$aWZ9iFgU_`QI%$3Z1z;T9$!B!!Co;U{y}W%Z zy}Y_!Q1wq5m zm~<%IU^gWPZ|1_pQ)|w$UDrp@9q?kDymrtybOgG#rwp_fkYe zWZXFmg3MRAmR#XUBc`GfL6``}vld5P`soL+Egm#MK9`>tZZg4R^`m63+TqJG3<*oc z+*42#n_UgoSErqAU1?Y`!TJ68N6D%4NVs%TEzyTl6-Yg$eN*tbozJ@S>XUC($NhH9*da-cTfR(iU(2Mbo@m=Lp zBFV}as0<7|?K$`{6ax75sQ>>JB5sDv}r9P}wq8%O~;o-u?qq|m{ee?Iv z%%=)X{?wGDSG*f_)YOOTiO9zS=#ARtJc%7}Y`P)-5{rmZGTl{?c+R-h$N_%i>J?pj zs{;f120RWo)pB#a{aWAF@AvVDqndpm!?%Orfe&ss(0SN+dm9+o>Xxp3E7`!s-nLdp z^0Ns=TkCc|LlMfHyE1Z~Wm<|UIlVn(V&ZvNGX**sU=MdytjkQpq@%WeVypn|!S3bh zZR6JN16G%+M{+7F_HGTpT{}a_Ek+$`tRkuqv(_=lvw_dxxX9jz;lTgtOQN+7SjUSr z0$?ZOJ7F{#9zJ$c;^up4x=c+W+s>&4jA4j)Ut9lKUWJUPAAi|pZLpWkiwp*s;wvppG z<|$7qor4M}ls+mEJPO}k=G;Z4nDg0V+>T@hdeg{H-6pS}5YEkrV8Dx)J8x`-b>Q>e z+z@!Z13L>XBlEb0`tVM{kFZD&i?Cwo&PfRcsN{H=qWMbvD!xRLPOVgF~@ zSfkCu?<$779SoLllEvPH`0%KBfu4ZtX{bI>WPVfY({_qV|f z9^Xd+F(4@y#6C?BgM*6$%_YDstQXzAoiC(zgm-$!V>$Jz0hN{BFh}id3v%l(3XQ92gjkOO5k?D3mjhd3rQoT_!)t zGn~!M%UfAqhHy)%Sxlf7n3!y{TX9hyElFB@<^^_H_LsAq4b2?lUnP9ly`9XhL-j5X zkVe(lO_=EzdRz06@P+M{XMdscIRuIWyC}$h*5@H(yyZh}0D~{8U0;+ET5`D(UHd z&-43k?@tL1Fvxd!I0RW%iKR!v5c_n>ly9+G>_Y_nxUZ;XOD>o!o!HsEs*Ly}BBd$T zZX!6oqu?MU(PtFa*H#C}pKSus>vO_3-`$2?%m6qwSoC>5dw9yddNe= z!xIdmM0AmqKQ)gdFeRk&O{wDzdei@Du$tBB#`yB9aif3qF~9_(2+}))ot@nXMl$dG zBvwo@!c4Wy%$YWi#HvtH%l?I4@tTJ}H)n1SH^mS?W%cqfTyFsa%$#2(%M*zV`mk^n z4!Fr%?$N*PdIiiMgI>6872sv@NK^#`C6rdIghfQY_i(9`YsPkmW=dQSr{yxN%1eB} zw?^ryk$p)NC1t>=wJ-9~1T0&iz@I+3*=ur^v|Q^Cw$G>ob$)ugL!e91?%0mibjL}$ zT#R%dJ}{0j|D%E!cXjyw^Bqy@;9H~k_=~?;;Ao;+F%%lLb<(H?<0$?_?1w6y^yzyu zsNV9jvoUZ|QaO~kSmnIyIkUnmmb7DD1R_fPZ7*iKy>w@dVg+QNwWD#f2`#@*g9G-A z)QDh;OOTRK1c6m_n5B}(<<8`wQx+%h^n5Kr${O4TV-GLJfQcCOxDP zD8R;Ow729NiRf|*k5K%y$WQz63xjgjNQU#Q2ND9^OCL5E0cJSnc>guGGNEL)xQSTb z1M$%Ccnmfh!c|f%Vw}NX*~Z)5y7PT&6o9nZ^u@g2mj1QO=>-pp0+I8kM^uTNMVz)i%yzEYjQAIsR*-Vdm;&TNnEw>-$9lCwoUgj#9YkBQ%lQUhJZuqK<#hx5oot|4GqbZ9=KQ& zt3BL1C9a_yU-bUCS`?=d7S`al43hKlnK?gfu5oTdyEB|~0VDOjcVl)D@?hIF>`kyZ z-*hbtH?*A7iiJG?6)1_9=URBb%Vxz`083oQe&pke;H=pCDXXM4KR~?&dlQC|uXm*b z)#2N>#mt4K#`^z4P`_O|Jod>tue={lJc@CNetQsVe&@dAb8+P|%$t;GLKSG6fdt9g z9G-9}j=+gWd2AE%IN&bKhNMT0)7Gf4BASKcx!@!viuO50=t#@N)hDnbW@5?jyg`O- zn{OjoiHrLd)15HEQwXwlpo^`ZfEpTmdg^!%VhL?#YeWGI_jrFM52%-&P$*J&77OdE zOS{sc&)J%+nsj32C<$t~m^eD)Iq@DNgTtsvPtu}JV#pMnSm0FJK}Q{yxX|XSw|Dx> z`)>7Ln|GGm(4OmIt~R9M6I5l`!NsSEkP2^aQ?mIwNNRhX;ysId|5utz`CY`SE&ldO zRh@Hdr!#{v;-bb`HrWwL}wdV70k8XTwqmpt&{?uzQ&I}}n<(wSN-JVPU+mAhnQ z;5!@N_f|Z=$#9qrsSgsDOsk!d{)Om;;-}&Mj<{oc)v!Bsvf0k4Z#l|I+whtHfpf_uY%-ra0y}XCyEsjnNU7NkgD4U3S+kw2v>1 z=fE;h;#T2o=e!tzf@9rRErsmKsh-75*xVotsRkZ?3PuL$(&4FrPt|_3+1Fm`)?}NI zazmo3;`XO0=T!4=dSrMI_Hr9s2<^A@Y?#ue-w#v%N6jkzP29%YQV6CjIKmiRVkeeH@{(*&swLe;H zg_0KPMB?t+=z_8$3xD#GEpzL`ct;EwN9B+AG#B$5bM<4kT_1)XjByrkcdgD-IEjwD z?#ik{#gQ%ab#K0)A`Yj;*0(<*{u0&=N5i67c??AABgHKa5y}W$Ufq9_x@6hak*d0E zy><{?elRDhfUB-lA)&ZweQSPZitM=1CC#y9p^-_(B~!=Ff!W$!XC*-~`jsy;pa zGAnt$V-p0*=3T0)QQcx;vQUuMn62@(m+t9TRkav{iFM$1XuJF?WXiKLvpYd|yqF2o z7-AS|WI%7t+`<-SBLxS!bIoh}F{gNPwR_qcV z!t3?Gp_i~iUJQ9G30TD4SNpcBY03EraXmirPrcJL&R2}vt*)sQDO4+S;9+kuH0*}~ z0%LrPzXg5Jg$7S(^0P39lq~Gl6D$ zrQ%|*>TVlTU|>LO+U0=7Y6#y5eXq)A0dRVQgCp>M^<5REP)QQn$OL`8H947}1NNt# zIZj&&nrbm^of)h~J;+jb0KA-%no|cy;k6_tx|ai(tsh5!2S6YJ+4sbX?;<>ZNV*2b zZUo0sfn&BaAAjPjh{&p=3A!k}daK^>Q z2QGnlE}+-*+P86Z>cuM8Iu*c`!w1w#${Haji7oMd|G?NZqVMu)mpw+9jtrM=a%o^0 ztu3OhvHMr83B}MN`4A=)W|K6jqNWzIIh;mCLj(28qpWql98ev*1SybXPM;-vsD%~a z-*HmE1!Zl6;~kf#r^nQGvH-+s_&`6=h1P*MbiNDrr%nR|lt&Q4LoZ3ED+Oo36AxHx z@HjbQn5p0lc;GkRzeAufNnSXA85fG?ZI#kq48 z4<(o_BMPyEr-^*|Dn~1ze6t)HHvO)VkR%%8Q}5+O&u&-P?~%b!0;uXxeS+`(#gSCh z_N26Y9u{6wdQ{0NjZSQgsK&=IAcDU-@7a+RHm^-gOvwJ3|Djqa2HYe8)ZapoJAyqG zCs>5HKjCT#Z0dFb!Z>~wqms|ch76r(JH3N!b1A78ZFh(KkPe`LjQjR2;QIh1zO_}5 zCnk^_nU<;iV;ErWRB$%z?^~A{y$tYsFdJQ6Vo=ApI;L&W-0^re*XOXdaTb_XF!A8Eh9~CoR|9N#(e>CVfyBH)267Vi9dydvLFlA-&BEMdaOU7Al`JFz zUS4GD1;SBO(s%#1L~Y!fn^ttcxirBsNK126)s{^qmFDr0^6eqX=?y9%AGtNt^)3^5Nz+i-87LQ-Dd$#k(Y@HG0{{6e#LGAQJ5@7Df0RaM_bm!#f15!aRFkK2s zOCx&y`t|b0258oOJm3FZHM}r5D7}}w=d%!jL&R;lff;-%UPO>^>ge=dPqSLrj#$VS z=9gN9nw}7*1A>|DV$Vr5(OcffH0IW@HB3xRt!+FZ?iY_fKHrGjbHE7}|7>awoHLZ` z4+K0=pjk(F0v-Xqs=iSl3S+}zSJcr9Z)UwfU+@><2v25rbvNvvSD(eo0v+fsQ5w~zBxXj{> zd@viyZP1Q+JD!OrIFh$-E#qN_3&NFQAWyoD0Za1f9Tyq=e`=JJ9*_G!3Osd&?hZcx zzz7qw;dsz@SC%3@EDZVj`nuy+NU59DFk>vkFHFk8ZYr{*iC#ZscRVFj?fu`c5Jv}`NTRolKt7E zx6ISF(TArK{&}4qJk5!qq$Ip|L&FM6nusx~>!S$Rv5hi)aLW@X4f`!##4pjIImh;I zOpKQYP5#<$apQ2JUAXK{yl`-ExZX(8Q_$6o$$1|NoL*4CC~tpyM70EOU%S2ilb)=mI5uzbH4WSzh8X4 z9xh~K-syR(tM!hn>Rs_1^zE0S7;aM5NTy!%X@0#35#{u9aHLE91 z9O7RUk>F1ZEmcOO^(|vh2U%Fah@i8Xbn_+mu0`ylP7`fA3i57I5qVPJi->u|DAuUY z)$r}4Jo-ZhuftJsrGoWsO+y}a&XXFFCv=xaGQ`iq{)E30%Xi)#P!;&4Poa$zN?|^L z{qts&xsUYQ;yK#OzZUG=>{vDx%(uZ`6xT})Wgf;S#xOw(_e=R*15|YX7q;K_%ht{X z=ZZlG;;ts5wrYmrtf~7gJb2j@*y&CKH`7irfI!U+87O=)zu$L3Og>Rnc@;g@%4EIi>l{G?WWE7`9}p~T?^gkk zKKv2w=78sd7Qjq7+DsMBzM!=pQ~=goOhpm5h+La80l{4p5o{Jmh4b!4r?J1?aJ$%b zGMELMZfL~V(qYmA11wMDoV2vxs_TeZf2dQ-{n6w#F)|4r)zd@KvSOf$i4X_}Zg6Uh zZU|ub@7DQrcDk+nQ+9DiC!539Ycmc;=(5Zuw*vM> z;?-Z2w{})@`&aVk=>hrQSRnQZgkyoma%sghxEV45(=FMo5z%B$j~cbP$kCWCKa7R! zZ2+2#G4p76|NGkoTZ-EP3j$SM9Aa8*Kk~Q`li4js@xKUvPMq5*RcCd$am+YJ3&JoQ z0IwoM+-iDtv(=Lpq>zC#0jp6j5|G++UjEsw^&Lr{OvNfzW8Ngf)E#K zXRDv^6O69S)i3^)ZqH)UL?##wWQ8f=_)!7{FO(bvSX$LvcQW5y3O|ZWW=GVsm|&}6 zLJ%Q{)XdY!pPV%P4f_0a{rzhXmsAL9y;Sbh7#*DF?96ewK0M^&WjEUcAgBNC$|6!- za7EuNlxoR460~L95)y;!03PqpW!SVCO%@9(TuOQ_wBa)RVJi%OR4CI$&P{kZs8!vUVyy3K(2dQ}dk_id4N%;~@po$18(Q4xo3)C@7$B zXT{(l2s|+MpA6vu&inPAmINH}0o0nXdWaDKuDVkLl6_Y{sc_tWDQ_=rdqHtA2tXF+ z+2{8B7n5Z0`LEP=v zf7F=0+juOoL8c#Kd(|39&3*yNJ!^lzHM&+XHab;O0R-frHyyC%?o3v)Tqt9TXivO; z>ez2=x~n4#q!Fz>`I;9t15t$72-xy&&fkE#?mM`*FST%>y*4lXNzB5Xch=2)axkdU zIioo;GJkS{K2larLev2B-rHMWVzk}(=Q~Es^`*XROzto9%_y$;j9-#(nP3$;509^w4BrPvQB$*ljO0Qc=YgEr|xET_EWr zx2%F0Y-0wdrignI(Qgj&_$I_hIRml;Dw4M>7tGcKNDz%)7ff_DxvDcy27lCqLkwY+ z{+@gF^RxNQzIl|}1!qFScQyAGP_9$hpJG3^yvoOablZVw&bCjD(S#Ar;o-|>4-zj(epwmhF$Dde%&mk>eS&Uf$-K<8SKqZ1h;8V>_E<~yR@ayUXd8b38^U@u4WJ*BSt-Ab&$HQrO!5;p`ZUPh@WkTv#9lsKC<; zIOklcX{qaUM;ow%B>-^x-YJ<70a2krHUjn-lU0*xkvK%pa8Rthq^ z*`F8UY*aKz73B{@Zg7-AUoERa)Yk~76$T+bG7vP?T%PuL{xSs<;_Qd*VTmd&6XPVG zvaYsL0GRYs*U1Rt;$dJHVH;z>JDDS_<;|HVmJsL8coPLXRY^y6A z`3{D<(wkJldh7IyQ}0EyYt6SkddjKUClQtMrLp1P1&*w2N`(hg4;uX&j`4Gs68U&c zr}w~@gOZ=QLKp7{Oa=7TK-iP^igtj%5VkajzsPM4K78|rvOXIEIy{-T$)O{95Tp$+ z<+W`v(WE|3G(N6v@&zS-+h8}-2c=aXDI!JC#K7~a9I5hQDx2~M8c|1Ng*GuTjy0!V z@prJQcNIIOvGMuXd}KOYX$L~P&r2zj20PCL@QwKBV0bbmW93L#O618}F8}dm%;`qQ z9HIJ|{Vx}w697u*d_qR7;ULKa??)D@qeFIyt^*41Cx;wRAN##3(H7`g~BDXk?3dA44 z6!t|&3%&s@$$U-XeQXDki|-D-V!b76?>)W)jEQPxrGIExyh1LcB%{8S5A@?4u&ySO zRIl8jIq$bgu!Dx?1@3+JX(4DNTD7pFPTzd$K$2-MwY%I{Jw&xZwu*A1(ZQ&JFI@Pr z&qD~M(>Ne^6rCsp)B^?mCnY!0{xIKhH)-h@#+x3mMFUHOrv4acC|V5uyB38jAZ98K zCcE74TpgV%9+msh+qVK0^rhVKefsElzZHtwTcA~6GuwB*Cn@RxZhw-?q@Vp7l!o?+ zuTg_9irnSi3ftL*W$e~k7zh7;AG%!F>T6LOyT8!f=dB7I^?e8yw-O#N5}@z)RriU) z^f*lVxRoeB{Y+<=OggZ!aoB|p10c+{M-lV+>Q0~m%0lfoo@~z=lT#KO+X;2NxwJuX zH-U-ZRN`hz1BpGGL*Li0!P7d~N$NzSvBc4;V3K@*y0SGx2;mBnEri7gqgKwX=lj3x>*}FPg`?!{MaVh+{xzrp7-Id( z?hiL~*B)DF;;Zh_L1$>SJj!HG$2n|vd>#JEq~AOL{)NqIy(9!sYk_Vp!4ycI6552X z#1^{Qp(!(d1Wo6?I@^M5bCS64DaAM#2-r8j|8E!VKX3BtUxk+94GISmnlCYbj5`)J z{+D;48J7co*$=h-A*5c<3ue=_7Cl22p98s0ZR+~0=S6yx2hIz0GFSy{UZuh~?0>VF z@maA)E=PTM(n(V#Pmyu;# zO6D^T7Jd?_jby3sJ?_4YFA}O))i*;*VLM^KpP2ki^jY)I)RsqniMF+JC|cal*u*rS z524|sIlV<7vf7$}^hcrkp37%<_h-To^*g)1BCJ!&;y-Rlv+CcUEB-8^Uq+ORnspqoc!(6n6Xo z3d{hfyeGt|aj+5mMr3a3$3#9Q`?E17*^QK=+*q0@ZNu_Xi{v%M(M;G7}hPpVM+SxgxX4N$451JRej{**`k<;EW1w>pE*VpoU8F+dVvwUyke^;*G zY@tpG7n6W0J>y&fkk^y>ZPVi3NV8Gtv<(;ZWG)9*SBSCQKcrN5$Zgydww5u1x>#>d=>050$3S(Q;{`>{twEGQ zm;S>K8#YzyT5nPa;b)EAnqF!es`~8gzDeD1ixhTK6uw&9vijXK&lu8ii@|~6&f#X) zu{PCGdOJK4vRE)FzRbFk2nK-0vlN#Q_?$oNuHvY6jj!mVSQ7>}IE75X-~_EgNMA<{ zJA%czC$=7wmfD(7<}{71kcgpsDp^bJ>$F1Y zW;2ZQl1xF7Bg1H!f;8@WjPnZ)Mq6m$6QIRbS7W@!$iCV=11KLY6P>>!YHsdrOUK0I z)*woa*6)dAZ3PZ$U2gRNa%8~8l7LRF&INs37%%hoyg%DW^5At*(=9i@JCVkWMU9AP zy@Zgj7(~2TqXc*1`T@$|_*qrXg^YwuZ|&xz*r$joRvZS{nu>TWh3DLw2EIL8n`tCK z!jURTdA+u?m;+M4S?(hUpXY8Jno1UG3dnZU1&yG zH07wjBQLWNic4J`ev>_>wdYmH`z4>`(~;&rQnuoPU%&) zRd3roYaff*{x)>6+*RSLxoYF0sHhTZGyr2sSQ|XKGj5Q1(7gFw%THLf8k)OjtK)8~ z=?jZ?ucKJuYmPZJL=rH-^YkRcYSmBuV?u-E_=h~9EegN+z)Kc5H9xda>yz)aKb8Y$ z%tnB(oR6TeB*}st)!C#-6{XXJa^!PIvZ^YcO$Y&`^bJSe5GIYVN#UocrwqYJ{*!P> zX+O;X{(CtvKu*tK5W#S{20%LN&4Fu_;1{L$02D<|dwS_Jdx!VvdihT6>7E^20NI_f z?v3RvKzq%Ni!(wDMj9M|uYlYB0rCF+@y%YOfssjp?H>3Tx+y&WI}3>q2(5-CK?esN z(3U^|j@QQX0i8)r{*iamSGzYBurFUPz=7us!Wh5weTAo%?xki3X#??l%SIq_B?%aT zBKcVi53~LH^;W0HJ|Dxv@{+SmBaSEyY=+}_j!gOQlp0^g_UI# zDRy*=FPWl)!|vcNjYK#rOiTak&35@L)}G@q(tRPv4~2|?_6+)H(vJ6+WttQZ+nB_U zhg$b7tKRvfd|?+A%;i%ydib0#Um#tsoyVqOK0uFnP8aI!D}TisYlF3KHwNrZQ~$)? zvVtHC&c@;qHC<;L$pF*bF zpRKnHfAX}+8M&{h%!l`sPiMJyJ72LCqlAcV38}TEXJ$kLsy^3pe-w0Ebj&M#M@PdOf*RDjsrs-gnSJ0|NG88SD#f`-}(@`}RcoV|T)%ZW3Dc8W;RC0Y&@FSuw zRC4j>*a@I2gJbTb>SQVUhw{P!x=+vVTWP6y*Q4K!RDd}!N3XUo_r_-yJzBq?NZ;dvY1l zT6Ta{G2Nyr4Q^^ToxAP&wJai9fNX^V2LTJlnOX(cxHFmtteNZMZHgk&Ej<^C&dl+vvii4?pB`H~g!g zA0f$+oER+NWqpZwVZKT04El~{6|HK&V6Y6|JoDbT`CW4x^5KCmctZ_#JLxqMKl%W5m#6(UcJsY_2YMucM-S?~%EM{BTE}#@zCXW|CC8S_5=_glA)zB# zlAv+*T1n}6A4NP?(>{Xiux96lU$ug^9Qi?B~=@wOi{sqWfmo>$p7!kXKm2qLw5g6ChKa5mobZ#gLRkPlHRujmY23;jw|oMuM+|K2 zpcoSV)xU3hUuHME9#L{|U;~HTWNtg%TG9#dCy&3Vt~2~;tm9iMszWQ&R5jZMBLYPR z5Y|LKhRJ!hFEdHtqf-1ZxX%{Vj{-B=Um<3z<4oXC=wlb4ailkR5%?HEbM|U>kkbVa z{NoFkU98iqLS_50WPyO;^M{{?b0m=SZS!<5djLQ)GVu~njQ_~#?vdISa-!CZ@xb-X z5;EF1P@My$Be*NGkldqv-eW>8VrJlyR~2fk^0uHL`Fb<$n1uDUx9M=m9sBz#mqFaT zsNN;0BuUcWi3;h;{@Q}I-F+0WHD_E_9{$~8ZFRpQU3Xkq-3-uHY?iEbCS3-=x$2R6 zIP4@!1QRfW^}anHv*dT)#U9BLjsA6{T&QsD1#p_%lneRcA+UaDhh>I*ylbR z$_+|1=M5;p<`KB2)Rt8T6A1!kOwkZ~laPnSq(Mw8U_m~=vz1cr?*{vRkKhOts)c1X zgwG0lG@NMy%1c9zveuU4z9@o$30a&$Gyr(A+Fe+_)2){)-Mw$kf8$l@kjEPGsb84@ z<8=VFT#RN~GfzzSZwAahB5aEMMNQz~t4V{(?&x^DYnDY|Cn8S)C@OlkOXUJ_`ITqK znEU+hf5YLfQjVUf{uC4|oBqY#~3WI(8 zNjJvt*=){r2Nux4CfltJcHVjrNxzERMakgMf9~Yl^_W0Jo|tt0{O#`U2aG8Ur6kZ6 zH9TN$r=(^jU6`$h?kN{eSrWy0DxZ9K0H_3Q(qNpH0wkM~(a4B@j_^vHFy{XyBL!|1 z#sdO!ZM9OnkE(VCz&FsuK6Zn#T{sRfJM8u%T? z&;XosHf7WF?dxsrE61PQp4&hVQ%e}}__3z4so9BzG}`IuZyu~&9bCjf>{*9sm1crb z1X*ABrhILE*;Be75j?6%v5j6`th4YOx0;Gx%_{ z2=U|1%j%6dYKMbb9G&~xDrf}Cz%qo8BI(SblXlIkme%O1FSXuKXA-?OU613`KIg-M zs>X4D7H3KVCg7kG2Nx58@j5L%YD76Rv;$jf9u$MbLQA)LZywHEqhl>hEMADNTi-{; zQGo!Q2*5a2A5G(d#IjW@XsAq(%fk!rv{tdHMihix>NXxfO&JdQ!|e`&BNfj{w?tq? zIRUpl#OKi}OKon#EQG6JD-grmbM{I?*0Hfgw?!TVg_0F=x>Uk;0EG`QoL0=cGwsg} z32b(LkA;K;BnPE)pr_4B$)RE;?fw2#TLZssrcQS6(X*+E@=bQx7->8KaM>C?!P*^+ zadF|fSTl8NY}_du1&cUM;be*)!$yWYKif&KlvWi6*` zCA7wN0RrKUk3X4?CR$GHHeT+)XW@N=-zM9mu8mQ>Ghtzji)&fS`&rA#<>-&)LMHv= z>-QE@KM6>Mysl^V5|Wa-hko{LRRZo|<((t^p^!)ZxKM+Q7QAQVk)ivyR`m)BwhaoN zjUnm8KR&;g{Tt%Hhj>rhZO@yk6viN%PyLj5m^9JV)&2PKW6|Y>s$RNVo952xUU0y- z*IorD1+oQ^^>~Q?L)?1+MYT0;qob%CmEchk6%hjE6cC> zUydHa1qGdoCuqYHCA!kga9I~x)Bk!O5gdGmlZ(sJ&Gq)FncW@Qha?>t8KuEdKFVd6 z^yTIMT3dVOLe2PBu!`!-*Dvo1H#K_&`;!VHOKa;8nWsqS%@3QC+uJ=EyUl6m>pN;> z`s#rVTd%tI6H|r@P|NeCa%pCCE;)XCdpdh$Yxy`ZAB0-Q*7~!e>#aRK=lm$ya+c>Ms*NEUzw`89ZdgaH+$;mafw63zVpGudH6$iB*p<>@nJ&dY*^yaXu!t8lSTGX^( zde@e}+T7HQb6+f9yjpInviE5;+#x-$IzkOR2RDPo;*9zjj^S#`zOQMl`F%` zh%Ct4@I59GUFtf;6P9N>ql)mAcxIG-#ws=%i#Y%b;)aKAg9A6(?JqsH%6jj%N+GAeO?g*RP zQX+l@7~#>a)aKruZ9#~LV<|oK_0J0>h(S17cWA=Q^p#e!q)fj8QvKYqmdKOcD`Em? zYBE<{XC}_Z@_AIy?rqFq|2`0MCcDJX&YrEi(HPkI9zwE+JmZ5ybK7%V&`DP=$U`o& zBN*FC&3De-OGrb@y(&vLAW2$4^3dpJas#_-%lSKGq1daz@fTjO9y+Az9tV_&{}!)J zz!uSt1G?Q3Ki10;dkq0ObNm-rOv@Q%ulGvbOepY`E#W6W>zy2Ua>HBY3o{ zyC?Y$$|$3>G+d0|1s%V{Pj)H-QU8qGAkL3)12+0R#I_G1>J}{P&w6TWM}QJqcM<~$ zKhZCee7-M^5gjmvF^+h?qt$l28K>?Wbi?dd#_-QD(>uYYeA|av+FR10)qx6s$NSyA z@mWLQjl(bX$st6hCthpT*|%Vw{+s<4QjNv49*8%=Z!Kq$MHbavdt@~D6TbosV;CRRJ z?*~0IRKd(51Pv)cCwyq^MuJxFsfvTWd;D)I%GPbJY?IC;!~R6GpLZN$wl8!v&);Lw ze+!J_TalaaPTp;<3ck?+ww{}s|F0p_md^^V2c3BL_=GF$^$s2oCeB@ncvV~5G1F&i zNYzgzuzj>?fWc0HqkeQ^PWF%zjF57|=mvG{MZSBt{LX>Pl93JmWXt|jI_i zG~8%lh7>2mIm79pLc`jch=e$y7aXy(5z&!}I?%&$W1v9g274NcxBa}=73hzJKG3X- zPkfF)c_R9BuqGpVx_N$J864i3W=U4vXRrKDLU8V$JQ*S(JC}SoVfUS5PcAXpgX|CN zl!!~^Pmkjk2PInK#2`>+{rNp1NO1y|Rz&`HqL47g(<37l&g6bQITBPfHfzf2{EKso zxuL64sk*F`A||(${-&?q=Jl14K_EyDKb0Tb86-If9rD-;8xph~SJLev= zdVk+Qzfo;xYKV1OfcE~lMz8xC3cph9MSPX=$Ci2BiR%H<2S|sxzb>$OC6M@_55Gsd zq=Lr_%sw$N5>h+st1F+E{V2PQ!=CCd^F8PA4@D#RujF1a=&X_pZ9FP*;JgtducNGe z@%!dtZS5LKucppNjq8&P8LslWn5?ca;EYhE{0@&<^rb?9c;ZL_z=|SDiX!}$%r~ym zLvO7Z9QeD>a&zTff9&mmtVGF1>GVm|u$1gw>)VGQY>qWr1K;WX-j9XvFS^9637ar< zZS3g5g`exe8UojVGe6@;iRJsdBpC}@C?k1(`_E4*xRE3T>4LF2qf2$kLc`2jqvp8w z+KC(2bM6mUGjc*QNN?~V26eqco9@nphd?~RfxQ#>U6@klYJ<2UcXq z@#7GGxFbVO-Vxl3H?lJJzH-3Jz{pYu`7PXz4ljY-`iQrAEo;)V2YE+{Y^~x?AEmS{ z3hU{9o0)t3Qt5B^DIA?2i{zKNtxfHQ(ZJ za78?XEWoB@cMY?Zo+Km>>(@$NSxeP2<8r!0Q07B2krbZv@fb0JqD(eXn*&X?MLMhG z7_f`~4AD`Do~gMVdj4Vb*}ac1K6M&>$^6nTF%t*rFoY;N!7l{&kl@{x_hcnnk;9B+ z#kEc7tNN>mp4F$kKLmo>k5&Wd+({@5E4NtX2kAG2{QB74-Mr~aXNi(FqG$f06vghC z6V`3MyNZ|EYe7)~%m*-+wXXrpK(v3|lNkIw7`P@%;v|R5ms6$uf}iR-{s#5L$C;xi z-}1&mRK$z|i3tyR>0Gs^zTy!xKc5dy_f$`n2qZwhs178DavRS?V}%~}Q5qy9pzga5 z!D1VYjxDF1=7Y4y2Tiva(*JfQ0-)~7KQK_6{RSeH=buy9Pu%ZJ&)|xjU;SIj_ip2* z2F8b2iuW^b=PrTTO^K%m`AL?()>oh)->B{JLvnAz`uLlIlDw?Op;#vLq#e{#upF!t^tZ zIjApyyl_Y|;M<`8?2`2`i66cE%gr;sEOpRtVJlUJfrVLeWVEf2CmLpY%b4}dA1%Pu zZivyHtQ&S3%D$U(TH{b_OKaUn`^hB+=978@MR!1JNGl>rnDgt5@+YhGP&)Swq-CBT z%%*|}@p(5AAh~({FJ8^fQ_LLd*6J^dkAUs-(sA+zpeH&;WO$Ad>j~e?}J>$fyuF2~z2qFKgQt@eRzXI_;L)Ga!be}s1Ntk&(a;qwAHy8PLhf1cD>L_0k z5=>{x7c)u*A*#w2&Qb?)p=qmKnAcw!G=w&s6RdK}OG+4Z*YIZuDf<}7x;n$r=GAw> z@wwK~97ac~TUrpEm2#-meD7VxC&tQq;u!!b#`M7|4xbWk_ZFFuripT$y}fUBYHIRTqKNBvZ_{`QC8>fpq_b# zI!qilwj|qrIYQKkA2^lEP~B{%qQjVtWO%#PeWdRx&tBloY*b_kE*5``d9x!A@m zhzdA?BP=WU=_{DIGi4zX1ALh6K>(Nncf8A zueaZVHS}*&+1NDVA*Z1>IqT+9D5p9n`_i>py5^=Py4it{NL58ux~p_Y0VFADtCO`e z*}X#YddP1v`foXK(_)e_t=v6NrK$ZdVV08V5cu1K#ummI^#|z-`j-6 z@Pb!#nBRRJjsIYzc6ezl-qm&k?+A$~5+EqdtZ;{X-2mOc8e(4_0U4CIX5a#Qa3cwa z17RJ#)3%%+hjJ#4hm&J>Ne=cZtxg)e9C`aP>BM`^5&E%Fh*IM`grJh0toXH>2&T## zEv+qIKMm=1dO!>c>Os5=A(22bIA+rYP!4^tv9=ESCf*o1SUKo9b2hDVQ?#jxIjM7_ znD#&3$~FFKl5X9jqAY*779;%1#MdPk$UJ&>YY2RN#D5Ku3sGh zx!FD1FREZ?x*0917c_PTrmGym=OGL-Y+phO<_FgRA`daAv;4=h1}iSax5Ni(q|5e{ zSxK2QBCEMimKZUkQ;Kc+P9Ip~K@@ZDSUF*H2mdEjLlI#gNkVt=wi4u84{t3Wqmcmx zukAiwZb_J_ILQ8%ku;OEw9Gnhb(E0xO(z+nK^IODlZT*hs-AJ4?jB?_#m-Xydxjmc>;>%L4+m`Qj^?C!wFYGsQc`Tiec(MQViZi*NGj>8UaWsd-Kp660;lF?Kt)DV{F#wb z4u3_y@fjX$aq41@BJ)iqWeXFNqMXJCUvoJt7lRK74Smh6n&GxMk-yDXq= zh}+?EXLDS~Xdn=X?(rY?&C~v0tje94IeLn^dYD`7v;%^RF-zO%w76-fg&`eLM2Gbk z_LyqbJ3cw3S$>zxSKBNqO{^XuRSFBUWND!1WtCQmyz;gnvE=Hiam-fc^G6z9$aRPL zJ#w?nDt8OFMmpru#xkZ@frjhh*tM7{pMEKpvCMSCzH6-dLYy1?-G}$~9L*gZirhqD zLX3jf#Gmi)Gv%F|UXxt@Iy!oSR4HTE)!evEl~~tYvi@>q!IQ*d(?<*(yy_(euGvuy z^%2x35u+2@pBtuEL7tG&5FKPZpB5s$lVrtKiA}|c#N)j!`cvLiR92`et233;mhb%t z{N8+UVK~8~NI$oj-5tMs<^ZYUyf<$XwK-ox`0B6p^xRfBF7J>t?^JFVFmArLSUvlt zBe_2$3f)02hnta$ZV3oD$!+}kFYhDA1iS0Wsno56=JyS;_qXGVhZ_oQOTPHE-asI% zzd)8vSN+hn#sy8CwG^>fLyVxoM}w>^CK7K4%$^1)p1ihn>1P2JgZ7txkG z7;D90o=VqDIj%{6p5W7`(R-0?4Fftu1I6}k?&p%-`qF}v?=^_5pd?uZ%<%U+$ir{i z3g$e{TbYhU*0Qe+R*!VQ2;X18Ja`frY>6tf*xZJ?prM|k9WrY5hI1^~@`uDKAJ%5wA}cB?RZR*SJ(?C)S-yYQ#k?7BG{xkq=h_N)k(O4> zzFD8h#66#@9TzF_W#%=XT^N$?E9)9V-bmw>uwPvX2@SU2;EOesdxdQg3GGo5dWp-; zRnpKK|Ii+If*e|*8-Y+QacYf@$9bW00|Rpwx>b}FaNdX840VwAQrG9FvusWx z_)CztZ)0q05Csp-CWmGF9`mL%j&r7n5R+qe`-k=Qc~)e@s}5?k3`WA@7Hs8|RMOjV zonwiyNcUF_v&Hw0pE zq0hq&^v&HB$>X{YAtrfuGIP6EH;((BXIB)liBIS=aJP~UIQH@5ho@?f->)xVJB)EH z%r~)nY?UeJAhbO7QN!}bovtF0~9{c{YpoAQ7}87Vt_%peb7-uuKoxDarv`J!E|xMWt@g(4+G%5 z3&dn*6F1_g*30Z$1CEu322*i?5Q6QXM9-h?I3EWEB7wIT<65%tbcS&c<6aKcao1)y z<}qU#>4nC)oQv(86CILj>e>@nS56Xh{MIsS`NW9WR^UF*L3Mw0XzE~vFbniw7^Ct0 zWQ=QWYGb7#vb5K47-nr8Y1g=Y=n!jpaXF{ETt`l}4)b`IK!1L^o1oij#QDB?C-);r4V2s)bk)XQ4RWZrXDXRhJ_pm-#3{Q2y{Q@jjr08PCr2+K+AV zFyc zn8g$@@ksdj`SCa}=x%Rsn?sAy06e9ETjJ1-{ZG&fD9fZZicv^N=j6GY@47A2+}tXB zNCY-90q6WAVCi3K;Ek4p_mqYFsZs4S)Y23GO8g9yr{NP%*MbiY3^ydRPoLJsJmEmt z_s%<|8k@{KHP6ugT;JqJ`T&v8#`*RjyZRX7mX-a=PK51unj6)zT7JH%iUrDLVPmc( z#Gb1;oom~4GGPo0A!(_!RwGq<(D$+{gi*y|wUy6ux-&H>rOuC%SwKL`dBnX0NY$cQ z;a4KLGEz#jR5r?!91pu)S~QEQmg)CCIL^Ab9~&I_HmFM)VHd8g>*uLyJtryH4;7QjAP~Qd!h5< z4XKf(J)wG-FGjZe%-(fIO1WecbC*dH?CLM@vL9=~ChL_;&uTbLE7wY;^vFf4qmCXu zhLU>pvN@+O*S>DDY2(cs)(dn@QdUts!G^ue?j~UK^i!*e9rZ$^lAy(n+T+JrDlAv8 z-mZN`9!~51@+B%I)+fPgSsIyyQ=7MA&n!+X=Ak@DrP^8!y5yA0gtD@2JF z=0gL;I-=yFFEt!h&rGXa@m)EYO`mpHF?OHRvggh8g>f*YNf~U_kBzR4?zY5;NP0_1 zd>EJyIM{ybIvbYRE<$u{iB{_PapdZ^SKXoFeq19LzJ5oXyU^JNYN0jgI#mY7 zyJ)}Runovqb?P%Nk#Q<_*bCM8kTw+w$&4{Y#ZL3uMV2^u^-Q%x>AUpDjvi$c6RU1A zg!DfqY6|>70JYjaNojSwvN97EE~>wbPh~!ekMHW=&t}y7;#XOOoHMer-kPtWC_JR4 zGT6~+DTS)8{obT_>_prC>$kTrvKyj9UDs~GeKc^w%%p^_HJo;D4-P|j> zldW`k$V^R5oX_btVqK(~np-PTVu6+_#b(p}j!kSI&^pS@bOs(}FGQBoS0=yD&bsO+ z-%UzN`Y}GPHoFjtcp_R+Q31*1PPZq+LPAiEvps`1l&+&IbnirFuOOu|2G)+vTpU<7 zl1kqYmftP;5IP-7F8a0O#bHP54sk1sMpYym8(YKU^^;2GDh_qN6oji67Z>L?{PdD5 zuEM;FJ~bu9?8le)A2e^SY;I~ODZL+C-`#cdj`?s-yw-=b{Q}CgflRy9&0Bu|0xUOQ ze+$)AdnF-S=fxbho-A98l8?6!+KTjmcIvUFJuy$6QNC)g?XoJYbo@g3A{N%gMOnw? zdj>rB+bec*ROT_vc*emyRL>W0;$^!`TkS6G!18Uamc=D349UrQ7nH3>S+0%u7s(gN zv*@7QGu0_~Bb~)ID<(X4vd3Iof@nOkGVv`vxZFx~#FsCAO***qKDPQ|34{3$UqORU zk&Vklh)L*smF&K=kpb<9OeCKZqh>aj27L6A`6yYG8yUUseiFaqSU0YxhnX$EQ7+7_ zdR*?JrRjuz16Rh7HEwsDncHUjM8nQLzg^1Z3da~Nj?B3=$JTa@RJo2S_Z_s&NR_Dw z2OFD0rbceh*7Df+{9bjSITrPvlauo-6;)E_LlR+CJ}O=$#*(c9a* zsj=~!=<$-Ev!|2P7AvxyDc#e+{Ln$^V4)nU9D4tYk<{7pRM_NK``gpH>HX7Ib6WP3 zGs}&^KgQfU&a@0^hgM;52A+D5`u#yWJBr6L>=^0A6nF&FQys}j*!HOxqxo%<@SBU! zR%#*xcEvYu-uS+GBanM*fce5TJx$e=maLm~5)!dE(~lxnLweinTLvYIUu&$xaZfHH zavtPsa&ro3T@Ma!4KTuhc{esT)j-(@)mENb7;9n;pN22BW>YJLb_UP>tVh=l9~b~S zibFegr6IpniA7P@CO?`J8`VYq*e-bP+b*wGsroXAA;t~3biX?P=_ zJ}sq}i*oq*9N@jsyeOL#Y^1&cuS-YUU%HxKph*Wbkf~@B|Ak%#F+*l)n?I~odOwrb zIwU?f*H=M@CSSsB$fM?2!RJ!V1|rk#w;xuh#poCv+b{b^?NhGLe7#*_AS8RpRI~jo zOt2KXb1o%2Eey=+X8j=E6dfK0w?WE(oU^%3uFCoO&b>f1yRdVAsZNBT!7EwJw`1OM zyjEj9iFYCe$rI!wch|V^t1XfExt&EdRo%J5;-J8ilFR3XdOka#%x#sFE&?cg>=aFs z?B;T%mi-aO&Hk{soc5{%ea|=nmEd@f`Z*L#0_7#DS1il7zun#+R9x^0lN;6*_7r2Y zHYF(LsE_s;n#|OxZi_8)X?yHmuyA8sCwrzVKe=DGzhzDnbx$suyCY4Ypa^=KkGF2~n6n>KK@CppjzSLLolM7oz z6jiZLsh)~zP}JSKW`kd^nlwB+$&;m_keq{QnmdmeJ)IF5`JwNzJkK4UfQd=--8@73 z)E7cuM4hn9mdQb7(qNV06OJBx^l(CK)*8_cb+xGDYP(NjqA$tGu85Cbt*?!lx}uX@ zyuFcg>o|fVqWYi+t&*wUU&N;}y+i)Rq=1o0Sa(r)1BG;+Gjmc!AkZlix-kHUnX}%D zl$w$UpZ=&vQn70{|E2eOM(6V+-xK8NS5F`i4_xQTE8*iE z8IuLQ@OosURk~sS1=PD}|5ewdecG+c`d& z9DJ+AY$v{wl|PQC*>*ARSM~Px5e(3w`#hVtJl1;hNXb$Sv>^GKB(eQmPtb8YE$^#h zC8r}=;3HhN@L;^@{v8@NJ#<#gCFAiQQ)8(z>ywcO>_X~AT6+WZv6v9lkaakTAH+La zPa1P}N&2!HR1Z&Yt^Iu_$l+%7tv<;>^fyM&vLlF^SUTs~v_grKMUFUYKurvZwhs%g zebAkKkF64|1wI>wCt+!bEx8f9>Y(Zd@`zKuw{1 zQZh2lEdj@Wom_CHOl&X{Jjm47$4Jk1Bs{;T=NKBdQ(Ku+KJ2<^Udub_gQB$>ESB>; zX!k_#Aqx&RvaVg^=I=PgVtU?3e}C$s%@SlqK!lm@CD%VXV8lP5w{nVXhfk!{Gc zS|4-ZA*KAJeYowq1nC`PUMfqtn}nl=?pp1qwTbKGxZd@i)Rg<7En#6PBTgymJL}r@ z4YN^I?rUwIB+@IDm+=iJP85oW=mA{I-MF;ydF_2t`s#XisrB~#A3hqcKnOaLG)z<(TDvDy+<0=MDl`hh4EXj_Bc??AN~S5qiw!j>h-Hhl#$>Rm?%kmGi+s(s z@9vw^bvp}OLhz~+j+s(NMn(z?SF+2h*7|WXuU?kUVOk$PWQs*E`tnH7F>*0CXIw>g zzE3X0?}mo!*~vLi@XB?tlslO>HmOOs4kp)!>n0@v)1uNG_H+Iy^&N7W9-L(l(};yy zF4_F-?%@qM&jKswO<6dL16u3mIUtEJYnb)+JSS9U&sn`{N6XZ8gqY{v!;Qy;ZIVd% z(M-h;dbtk1iB=r={IpU5dL?n5i|%q%eDZ-qF0UdxU_^9lVD(;_;pSevZvc%m7MM6P z_#Jcb@Qe8S&Gf&9poX3|QRf)f}z6(fwSdRL~q*6vReBF$c-_JDZ^#<#b74cwmXn zXJ8IW8MwsuKWAKJl47ekc)mcx3=yJY`0S%PbVjr!W_R%46n4RJZVR8vHD7j zHf7}S*=(BANw0lK9hbhmKjE)LV`IZ()%iK*0gTq2C81m4^k~i&eH6bl+c{+xeEJZj z&Hs{xM8tf(DTr~?aSi1Yr#{FVFkW6ke?4z?fzqJ%c&j0Y&@Ca0;2W2*E zW@&9eU9x+q4jeB51G&^`&xcs~82?5fCKtUoM{v~>2O40WLSR|eF>6?KcC|-P zXKGh+TA{&{p{rdeOjnS7loD4qX?-R*GF`fBGW@1iQfVnaHNT=QvSnqwnc~u=Iez_$ zr}^L*QBYH7D~Ro>mOITWf0B^(Gbnb_^X^{`I-Nb3cN;(>-G{D%Q)>8&OzPEN&8<(Ezh4rv(~p~*?` zs;y)caK&qDD?DU`Szs?FLUIcB10AUn;#RB?$BcTx*MVjkD^{yCh(YZYyT3~M^hvn* zDwA41;|vJi%%M_FwYr_%hp}FZBBm=>YHTl^J)5$-yIbI~feATFAe9_1bE(HRyj26P zsgq5oL{OjRi3{$;`}x7cdgnOnMJb%$m%*!jBnl=n9&_E{}il%PWU>*53B|O_LWNxCTwY!-c1E6%*<@+ zXuQFHkp$8T1kXC)vItr(nwRkC`Z11~Io10}3JMB=;o$iAMJ)H_$Q#3dv;ZtOZ#v-L z67YXml=im*6{iIUdK08-z>vrJ zpV-^03FHS#GVpT1CXab%%BRVfuY8zh3)@`x-o29^If;l%1IIj?*Q)c~RoHh9BdllB z<2{rgJV?06XZ_geEaHibmX;PsQM;cVB7)!7+}*F*7fgF`V*T?=lgoXtGJyvDZ>Km7 zct!m1{@#w|aD@xxYAYETB^enRwc-07=2U5OiSB;Q;C(^G^J^h0vWV{6Wo*z0T=Scg z=MrG?N%RPJOK)|{3c*h;5sH9)NT$u5r@xZ?_s|94z}VmM)Dqh%^;DS z($7IhHwf-1fx5S?c+^dfUzl9i-s&6yZV23)ey#~ zD}hyN#4|yBUi0?tSj$qrK!2sx-sskVZ@f26i1~*;UYB%eFo;*~vh>NA=BXuo-??A= z3Ia2*)pLS7VFu9^3NSN8rTfQ*4mxE7qvCCx00|E^jmC*EVK4S%rW4%UcgGy10-fd| zwqjf|`?mBWbokiBNiH=>jq1D}0%%hrHN7v`F6Z9)pt8K)9^h9r55}pdO z{1iFBBSC{)S97m$$oODq2QjSVz;M=XA}$j`2oq6bOfRzc_FS%R22d*VtxcT-OXIn- zSx$hNHQB=KPFaG5X5aY9NXdGdRnnUVnwvkXqE7BREmq#(o%_(tPSOw+oyoh8&L!6KDsGtKCn?3anQZFJ9xAfi_ zm~|`Y6sw{nFcLFD3mvHu~APo1Q&f zwB>r7F$Oyu>G8ml(z9LMXB~I?xs())3Io@uwur?qdEXW55Z zz-n{6U72vjQA!%ESe>+LOrHY4a!Iyv2&1MQ_~xx@^VSyh%h)c^uTx42axe~OeS13l zkTsw3TA%B}`|vuMku04XQUt^s7sJ{2gMf*1R_hJYX2G$?qW!5T&YnuRT8W(bxLgwX z^JYI3-hAacK0cl*!Ca-WylkXZadW08%|EVc$2$CG*V5?LK!_;|b}qL{J+bIM=iJKj zN)Lq|YgdbT7kGcDFM08U$`E#T@`bPu;0%$iQ7;q8Lx_^1C*Q<@cgukI!_S{XLPJxx zS0^o39v!=Ms7A_~57O;_9!qBd;Sg3%lc-b69K+>0xD^0t2}j%md}i)+w}Ablf>4_( z7w~40llcv@^5YlI|9*=Q*gS(`>+Ti>QCsf2bmnci=C!`fk?4pf`#-M-!lD=Ksk-)o zm81pfjb&MD0e4Xd^%>-9Wr`4!p{!PA9P5n38`((;9>RTwBY_ORA5PkzXBB&(@Ze1w_G~WN+ zztTWc#2A`b`)eqa==9E}5YJsY#v5!3HY}Q>E0(yECpvxk4pITitJ{@WM{ydxRIl0# zYmUxmm~^@>;W<=Q*@^END(~Ltl(FO|U^qBt(DTz5LVMts$@g+-C@E6Y7owD}<6dxq z&s(e@i>}|F00YzeA=;L$ zS*?%5F5SChBZ3EOK&?Gi-d?M#X5o8|lG1Wvuxw>@wd?iY*DtLTKmlash1VH%fDy!2 zxY!$;np%E*aTI7^-@rf|Sk5*f)%y<;a;jH+R|$Rr;aITIxra;z2`aG#zf_rMtRPw; zJs?e&Y6A5;a>&Zsmf^I!Wt**ArhMjxJQi?Ak;|%yROk0PzfcHomM>Q60Bzl0WT~;9 ze(dL%x3V46v_9?$I_7+%jT?Vu;Mtwvx}f%>=8@wi5$*QMow&aDvMQ-*Q(qJBP*BZQ zEX}pW3U$MLA$-b6OM4fm=NtYH|rhF`~KMhZ`Ew#pmGy2%b)ifW4}s zVuHsWp7Gi>ama-Y*gWzpT!!!bMNB0005s|p3KU7BJXky^v|QAutFL3EW&b*->Dcd+ zJ>bC4S0e7djgJ?xyU=`c*bG8vvXTLpI#5c<4s*R6mEcEXR@;QyT_DjAj7Dp~Xc$Q? z|Lmfixp@ZQU{$y%uM9*Di2M2C+CVAdWB4TZh+=x|V&$?bm~2r|(ZL%E56}Db{e6Xz zo8dn7=`(a4e0Dt1t?K4O)+M<95mz?0+>@duooQ3P6wDxIKL%k=vrjLNhsK@1bSVp{ z^DjpED7e5AfBp3iFg_umBm(#Ngn)4gROc=+c%q0X`iTHnS5<6nm$gQ^(iLb@)6#kr zuOZkCZrys8rEVEILUZQKb-%h#m8WmTPK=Eu2X#5k}CR_3>iNV`@4VP!UbP{ ze>G`oU*y`}Qjh zS_LNNUlVRy^cQ9bI!q;$3?pTXjg2pIo4q5cRXo_=CEm-$BGnHAk;du@4RZlH8jkbJ-PY^U@mC_Aif`1 zx$HCc^yA7nK@1hyl@=LsBBy@M<@ZQg2X6T(;#LqckiGvR>h7PDU1oMacz&Yj;nAZ_ zt<79lNi9xvP5!z8qxk^rP=Jx`(M4s2kwC~rgrM-$!ghDb+wtjPIr`Zkg9RxFAu>*d z-^3+54_^hws^EGL|GhsH>O&ECoSwxw>UsVEh!*&54w)Jy&1`Z=4 z5|6XMiEIf_%MLrJ&{rOBmmAru&<9pYuj{?6+d;6W(0M|2)P7GC#Pwy@!(Vw#B=-1J2mTSNLxIIX`L)$nW+o?MIrm7iGU+{&!$q zUDc4FD0)?tq~u)`MBwOYIVt~)SVQCn!9i;(C;z1D*9k&{>%qC5P0qt%;1Tq7Sj3e4 zyGZrD-kqw%r;9~W*RuSJG2TIl795Tua1EMDw7WC!5D$6GvjbsUwub5XZUkDr_n`kk zjXp!@4P~bLY|c&=Uaw3~_y@%#lu)u_-P!hsYYfqxj_Y z>Vq5|8dqK5NwPL9kuEOg6Ik;TCrKT#+H{{S;nUq7+5My7o%R=7Mc+Ktr4~V| zLT2)N>+<>Hg1A~$B;v_WD-jgLRb^e!hplvVUWi8s`{w562GEG4{AcP?@vZy zczBqJg{3mVUPf9PTWqDX5d17Ua`i_ra>MIjDmk~x4o$50iavVWf@$w~q!1Zg_Sos+cPm`crx%i*l&rZ@e zzrj@P36d$_w{L%Fok6r+;p5W)Yc^3!GaHP@!7c>VzfWb&OOQ$d@A5O=|)gfqUHz75{-W{;%l2?K3xrMZ2H2}B$Xbfh~HgFztq_FtKcXZYf z2xP(`Tr-a&>w+&XA)kh_QU9=C|Bb-1R?qi>7|&Df$*pI{oXT~TzI`*1m4X zzk~d4U?Ui?=kCt4={?yE;leX2k+nunzsEr2r~yMoIebu)q3*C-N(j3JpN{nVW8D-R zj7^L)@=z#o_ko2W9-4a6f3~Wb(y7;f0g11HDtR54yUlL>?B73ffn^S1{TPe=swxPz zGZ2RSXWHcKR)hR{-IYS+?f;x-lmk589FUeqQ`b6hL)+z_2RR_Tq6gb{EL!VcE7|J* z$O}9?OeF0E{ut3`#7Ng~;zOI?N2nVJ^mmWdaMos=+o#W}JYN5vl-s$FkHX?WuSyqo zH~N$et$)t~A;5pOn3vo<|4!=pF^BRBaTbVzgQ`&WzDmJAM&Pp9ulIbBpX?-QH?vB5 zR9HHFe#hxw^B#Tv9m={Y(6}Y&fl4RgD?04>^2){vr;xh;KRfl)yqEREqjEqw4BAYSE`&(mynV=N1z~#hx z>~EV&DO*^efszDnkX9~|Q^KiyhzA7jKabJFF$IiKpf0N3U(eMpcgp5{-1`&95tCoW zHia@5nY3QqgQRpkjJaMzM1DUA0C5TkGnI6TA9uXJF1ZG|?`6Awzea<0h=Qgng}~-& zS0amEczKOaPNwJOu~R^?ZkS~kDc>OIHn12+OKMR!O;|I)2CI}<57A0pzxVcQXR3_e zzQpg5ZmGoMd=D@=CdZuL65hCunch!H=|rk+loW-UA^@E2X5QB%geXoLVpi8mTbz@!^z& zo{T;N?sbm8t+BCeGy$PVdGyGUFBU~{%j-Z96Lu_^jI^R62n>o#%=9_g~ z1FMVvPgaJAm~2V*I3k^Ya&4Q(vd63|P3|Hzr%2|tpODpa84sQ5O!d<)ew+awN+lF* z2ud-HbGi4h{;iUsVWJPog(LtP7*>(rPdp~wo>u~jDtXjT%e0H`WW~8}Xr*gss%Nh? zvsJ4q2>$*U%4BlzkB^U-IsO0jg8Tvhs-ViZd}V zeBGEIa0I8DuuJfbxD|x4>M{KfV9jou^7r)=Y19rH8P3SHs#mLBbQNS(*U>s;Q)l^k~JQWcWGk~iO7>gBh)(36?3 z=j{cqHK=12MST=8*pnpT2lCG50(wNuZFOiN$YzkksFD^z_+7m~db_EK2{1e|_7g3( zSiAE@dNBp?{=FAPW=fQpypG_PvOCb>vsECO3I9lRg&*j<4Cv9nF4i1H&T+vUA zd-o2Ae54xc)vH&Qohee?!9PQx@KJPR-Q4ddiBJr-kWg9sA89W5$mqQ`(-I_rIv>Lb z>O+XZHke>^Hb@{Ms)e(F6b(xGO;jltM40(cjg0(o_&u-%RS6Aa)Cc?YhL1NrscJv=95CZFDJ#v=x#9v=uDTt-&QwuVH zbO6*YYhOOU$-41d5i|Otbnl)_5VpYG)qQ(L3}X7f&YnQ_Was_A=lK6;$sWLzJk#3u$d&VdjX()Wq__|xxh`#`akN+og~S}dv&0Tm~lF2<~`23x$cvNCY@<1Ywez2q7_U@Yz5A?nrQTmeD#gE_lHmSe}(#f(?r-)zy-ac@z5j~{B_u>b*Y7%2>OJz)poJ?&B^^~1lkO2J}Pp*)QY=m?@XjeXmkle*w#UHfu9u=E-y z0$U1ETW#2VlnBOgDmhSahH^)?FIkctz=5i%D&mP{krD7xsFV80X*!-?o?<}%f1tVk z55_;;&TP&6?-in(+mrFR3n;_`-*&MBUDwTpZW|Cu6~fwboi75nsv|`zcPvype}%mNPv9=Z~@2#=tXs4x>b=#yo8tv0h6*y zlOh)0fX{$^2m1^I1H=7x;nlav8-(F}sI1=^p0^mn6p=xK5_Av*ZMk%iot+)`lhIr2 z(_NC$d^SB48oE&Ii42s@a+n!(Cb#2^>X3cA2!U@TXjNQ63HG>Gkr3aG;CRRlX04z~ z7r}3%q3@E4++1L(Wrr0?2aY^ItJKug47i-2l)tavGlL8sdASa;{pCO~3rtK*onb6G z+#b76vvo@3shZrL4??=TvDDq$s(jX1;OmQw>P}>972NaOZZ-%3?jCxMT5c{3oggFY z0h4Oe6Mhmg$s#DI4aKU$Xub zfV_M6CD|qZQHMCV>t7R|U-sO!CEQEG${2n+MoI|AX=5BRz^YL%!aD2#-X;T(yyL>4 z7Ptngj~+d0b$gCbl%toAK`B60B%6w5A85awxfj`q!)dTp?|u;7nd92q897LUNyr<4 zi^~2U=!>p=0O5w`~P*e-AlRDj-9Er^w`{4|TiXS^+?(!#W5zAx3Dp z0HaW3H!dx*^5wc3MnPH`UKTnj@S5Qd>~`^BmE;#8c}zu9Ga_Xr?Y6ab4s+EJ#FGpH zA)S$tfgc#zhxJ@&;h8o!SXMwwu&lfQ93Ld1P*CSW0Zt>J)RBdD;~42HPdWAh#@pJR`1Uo&>k%(JA{ULQktMt~kS0JY z<$sY!IbPJg5GsX*i>lbgA5qlkfSuwD>1AO}!hmy5<-FE|C}@WUP@fRAmd$rw)NhU8 zxU)Lh?kwvPo&uYe*?3cEP>O*4gxQyOS4||2AXH!nk#!!leFeUSs Date: Thu, 14 Sep 2023 14:22:03 +0200 Subject: [PATCH 022/115] Update quality metrics index docs --- doc/modules/qualitymetrics.rst | 2 ++ 1 file changed, 2 insertions(+) diff --git a/doc/modules/qualitymetrics.rst b/doc/modules/qualitymetrics.rst index 8c7c0a2cc3..447d83db52 100644 --- a/doc/modules/qualitymetrics.rst +++ b/doc/modules/qualitymetrics.rst @@ -25,9 +25,11 @@ For more details about each metric and it's availability and use within SpikeInt :glob: qualitymetrics/amplitude_cutoff + qualitymetrics/amplitude_cv qualitymetrics/amplitude_median qualitymetrics/d_prime qualitymetrics/drift + qualitymetrics/firing_range qualitymetrics/firing_rate qualitymetrics/isi_violations qualitymetrics/isolation_distance From 8d9ce49d14df99c1901854a398c2862c13184ceb Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Tue, 19 Sep 2023 10:00:38 +0200 Subject: [PATCH 023/115] group in same file CollisionGTComparison and CollisionGTStudy group in same file CorrelogramGTComparison and CorrelogramGTStudy --- .../{collisioncomparison.py => collision.py} | 94 ++++++++++++++++++- .../comparison/collisionstudy.py | 88 ----------------- ...orrelogramcomparison.py => correlogram.py} | 79 +++++++++++++++- .../comparison/correlogramstudy.py | 76 --------------- 4 files changed, 170 insertions(+), 167 deletions(-) rename src/spikeinterface/comparison/{collisioncomparison.py => collision.py} (58%) delete mode 100644 src/spikeinterface/comparison/collisionstudy.py rename src/spikeinterface/comparison/{correlogramcomparison.py => correlogram.py} (58%) delete mode 100644 src/spikeinterface/comparison/correlogramstudy.py diff --git a/src/spikeinterface/comparison/collisioncomparison.py b/src/spikeinterface/comparison/collision.py similarity index 58% rename from src/spikeinterface/comparison/collisioncomparison.py rename to src/spikeinterface/comparison/collision.py index 3b279717b7..864809b04b 100644 --- a/src/spikeinterface/comparison/collisioncomparison.py +++ b/src/spikeinterface/comparison/collision.py @@ -1,8 +1,14 @@ -import numpy as np - from .paircomparisons import GroundTruthComparison +from .groundtruthstudy import GroundTruthStudy +from .studytools import iter_computed_sorting ## TODO remove this from .comparisontools import make_collision_events +import numpy as np + + + + + class CollisionGTComparison(GroundTruthComparison): """ @@ -156,3 +162,87 @@ def compute_collision_by_similarity(self, similarity_matrix, unit_ids=None, good pair_names = pair_names[order] return similarities, recall_scores, pair_names + + + +class CollisionGTStudy(GroundTruthStudy): + def run_comparisons(self, exhaustive_gt=True, collision_lag=2.0, nbins=11, **kwargs): + self.comparisons = {} + for rec_name, sorter_name, sorting in iter_computed_sorting(self.study_folder): + gt_sorting = self.get_ground_truth(rec_name) + comp = CollisionGTComparison( + gt_sorting, sorting, exhaustive_gt=exhaustive_gt, collision_lag=collision_lag, nbins=nbins + ) + self.comparisons[(rec_name, sorter_name)] = comp + self.exhaustive_gt = exhaustive_gt + self.collision_lag = collision_lag + + def get_lags(self): + fs = self.comparisons[(self.rec_names[0], self.sorter_names[0])].sorting1.get_sampling_frequency() + lags = self.comparisons[(self.rec_names[0], self.sorter_names[0])].bins / fs * 1000 + return lags + + def precompute_scores_by_similarities(self, good_only=True, min_accuracy=0.9): + if not hasattr(self, "_good_only") or self._good_only != good_only: + import sklearn + + similarity_matrix = {} + for rec_name in self.rec_names: + templates = self.get_templates(rec_name) + flat_templates = templates.reshape(templates.shape[0], -1) + similarity_matrix[rec_name] = sklearn.metrics.pairwise.cosine_similarity(flat_templates) + + self.all_similarities = {} + self.all_recall_scores = {} + self.good_only = good_only + + for sorter_ind, sorter_name in enumerate(self.sorter_names): + # loop over recordings + all_similarities = [] + all_recall_scores = [] + + for rec_name in self.rec_names: + if (rec_name, sorter_name) in self.comparisons.keys(): + comp = self.comparisons[(rec_name, sorter_name)] + similarities, recall_scores, pair_names = comp.compute_collision_by_similarity( + similarity_matrix[rec_name], good_only=good_only, min_accuracy=min_accuracy + ) + + all_similarities.append(similarities) + all_recall_scores.append(recall_scores) + + self.all_similarities[sorter_name] = np.concatenate(all_similarities, axis=0) + self.all_recall_scores[sorter_name] = np.concatenate(all_recall_scores, axis=0) + + def get_mean_over_similarity_range(self, similarity_range, sorter_name): + idx = (self.all_similarities[sorter_name] >= similarity_range[0]) & ( + self.all_similarities[sorter_name] <= similarity_range[1] + ) + all_similarities = self.all_similarities[sorter_name][idx] + all_recall_scores = self.all_recall_scores[sorter_name][idx] + + order = np.argsort(all_similarities) + all_similarities = all_similarities[order] + all_recall_scores = all_recall_scores[order, :] + + mean_recall_scores = np.nanmean(all_recall_scores, axis=0) + + return mean_recall_scores + + def get_lag_profile_over_similarity_bins(self, similarity_bins, sorter_name): + all_similarities = self.all_similarities[sorter_name] + all_recall_scores = self.all_recall_scores[sorter_name] + + order = np.argsort(all_similarities) + all_similarities = all_similarities[order] + all_recall_scores = all_recall_scores[order, :] + + result = {} + + for i in range(similarity_bins.size - 1): + cmin, cmax = similarity_bins[i], similarity_bins[i + 1] + amin, amax = np.searchsorted(all_similarities, [cmin, cmax]) + mean_recall_scores = np.nanmean(all_recall_scores[amin:amax], axis=0) + result[(cmin, cmax)] = mean_recall_scores + + return result diff --git a/src/spikeinterface/comparison/collisionstudy.py b/src/spikeinterface/comparison/collisionstudy.py deleted file mode 100644 index 34a556e8b9..0000000000 --- a/src/spikeinterface/comparison/collisionstudy.py +++ /dev/null @@ -1,88 +0,0 @@ -from .groundtruthstudy import GroundTruthStudy -from .studytools import iter_computed_sorting -from .collisioncomparison import CollisionGTComparison - -import numpy as np - - -class CollisionGTStudy(GroundTruthStudy): - def run_comparisons(self, exhaustive_gt=True, collision_lag=2.0, nbins=11, **kwargs): - self.comparisons = {} - for rec_name, sorter_name, sorting in iter_computed_sorting(self.study_folder): - gt_sorting = self.get_ground_truth(rec_name) - comp = CollisionGTComparison( - gt_sorting, sorting, exhaustive_gt=exhaustive_gt, collision_lag=collision_lag, nbins=nbins - ) - self.comparisons[(rec_name, sorter_name)] = comp - self.exhaustive_gt = exhaustive_gt - self.collision_lag = collision_lag - - def get_lags(self): - fs = self.comparisons[(self.rec_names[0], self.sorter_names[0])].sorting1.get_sampling_frequency() - lags = self.comparisons[(self.rec_names[0], self.sorter_names[0])].bins / fs * 1000 - return lags - - def precompute_scores_by_similarities(self, good_only=True, min_accuracy=0.9): - if not hasattr(self, "_good_only") or self._good_only != good_only: - import sklearn - - similarity_matrix = {} - for rec_name in self.rec_names: - templates = self.get_templates(rec_name) - flat_templates = templates.reshape(templates.shape[0], -1) - similarity_matrix[rec_name] = sklearn.metrics.pairwise.cosine_similarity(flat_templates) - - self.all_similarities = {} - self.all_recall_scores = {} - self.good_only = good_only - - for sorter_ind, sorter_name in enumerate(self.sorter_names): - # loop over recordings - all_similarities = [] - all_recall_scores = [] - - for rec_name in self.rec_names: - if (rec_name, sorter_name) in self.comparisons.keys(): - comp = self.comparisons[(rec_name, sorter_name)] - similarities, recall_scores, pair_names = comp.compute_collision_by_similarity( - similarity_matrix[rec_name], good_only=good_only, min_accuracy=min_accuracy - ) - - all_similarities.append(similarities) - all_recall_scores.append(recall_scores) - - self.all_similarities[sorter_name] = np.concatenate(all_similarities, axis=0) - self.all_recall_scores[sorter_name] = np.concatenate(all_recall_scores, axis=0) - - def get_mean_over_similarity_range(self, similarity_range, sorter_name): - idx = (self.all_similarities[sorter_name] >= similarity_range[0]) & ( - self.all_similarities[sorter_name] <= similarity_range[1] - ) - all_similarities = self.all_similarities[sorter_name][idx] - all_recall_scores = self.all_recall_scores[sorter_name][idx] - - order = np.argsort(all_similarities) - all_similarities = all_similarities[order] - all_recall_scores = all_recall_scores[order, :] - - mean_recall_scores = np.nanmean(all_recall_scores, axis=0) - - return mean_recall_scores - - def get_lag_profile_over_similarity_bins(self, similarity_bins, sorter_name): - all_similarities = self.all_similarities[sorter_name] - all_recall_scores = self.all_recall_scores[sorter_name] - - order = np.argsort(all_similarities) - all_similarities = all_similarities[order] - all_recall_scores = all_recall_scores[order, :] - - result = {} - - for i in range(similarity_bins.size - 1): - cmin, cmax = similarity_bins[i], similarity_bins[i + 1] - amin, amax = np.searchsorted(all_similarities, [cmin, cmax]) - mean_recall_scores = np.nanmean(all_recall_scores[amin:amax], axis=0) - result[(cmin, cmax)] = mean_recall_scores - - return result diff --git a/src/spikeinterface/comparison/correlogramcomparison.py b/src/spikeinterface/comparison/correlogram.py similarity index 58% rename from src/spikeinterface/comparison/correlogramcomparison.py rename to src/spikeinterface/comparison/correlogram.py index 80e881a152..9c5e1e91cf 100644 --- a/src/spikeinterface/comparison/correlogramcomparison.py +++ b/src/spikeinterface/comparison/correlogram.py @@ -1,8 +1,13 @@ -import numpy as np from .paircomparisons import GroundTruthComparison +from .groundtruthstudy import GroundTruthStudy +from .studytools import iter_computed_sorting ## TODO remove this from spikeinterface.postprocessing import compute_correlograms +import numpy as np + + + class CorrelogramGTComparison(GroundTruthComparison): """ This class is an extension of GroundTruthComparison by focusing @@ -108,3 +113,75 @@ def compute_correlogram_by_similarity(self, similarity_matrix, window_ms=None): errors = errors[order, :] return similarities, errors + + + +class CorrelogramGTStudy(GroundTruthStudy): + def run_comparisons(self, exhaustive_gt=True, window_ms=100.0, bin_ms=1.0, well_detected_score=0.8, **kwargs): + self.comparisons = {} + for rec_name, sorter_name, sorting in iter_computed_sorting(self.study_folder): + gt_sorting = self.get_ground_truth(rec_name) + comp = CorrelogramGTComparison( + gt_sorting, + sorting, + exhaustive_gt=exhaustive_gt, + window_ms=window_ms, + bin_ms=bin_ms, + well_detected_score=well_detected_score, + ) + self.comparisons[(rec_name, sorter_name)] = comp + + self.exhaustive_gt = exhaustive_gt + + @property + def time_bins(self): + for key, value in self.comparisons.items(): + return value.time_bins + + def precompute_scores_by_similarities(self, good_only=True): + if not hasattr(self, "_computed"): + import sklearn + + similarity_matrix = {} + for rec_name in self.rec_names: + templates = self.get_templates(rec_name) + flat_templates = templates.reshape(templates.shape[0], -1) + similarity_matrix[rec_name] = sklearn.metrics.pairwise.cosine_similarity(flat_templates) + + self.all_similarities = {} + self.all_errors = {} + self._computed = True + + for sorter_ind, sorter_name in enumerate(self.sorter_names): + # loop over recordings + all_errors = [] + all_similarities = [] + for rec_name in self.rec_names: + try: + comp = self.comparisons[(rec_name, sorter_name)] + similarities, errors = comp.compute_correlogram_by_similarity(similarity_matrix[rec_name]) + all_similarities.append(similarities) + all_errors.append(errors) + except Exception: + pass + + self.all_similarities[sorter_name] = np.concatenate(all_similarities, axis=0) + self.all_errors[sorter_name] = np.concatenate(all_errors, axis=0) + + def get_error_profile_over_similarity_bins(self, similarity_bins, sorter_name): + all_similarities = self.all_similarities[sorter_name] + all_errors = self.all_errors[sorter_name] + + order = np.argsort(all_similarities) + all_similarities = all_similarities[order] + all_errors = all_errors[order, :] + + result = {} + + for i in range(similarity_bins.size - 1): + cmin, cmax = similarity_bins[i], similarity_bins[i + 1] + amin, amax = np.searchsorted(all_similarities, [cmin, cmax]) + mean_errors = np.nanmean(all_errors[amin:amax], axis=0) + result[(cmin, cmax)] = mean_errors + + return result diff --git a/src/spikeinterface/comparison/correlogramstudy.py b/src/spikeinterface/comparison/correlogramstudy.py deleted file mode 100644 index fb00c08157..0000000000 --- a/src/spikeinterface/comparison/correlogramstudy.py +++ /dev/null @@ -1,76 +0,0 @@ -from .groundtruthstudy import GroundTruthStudy -from .studytools import iter_computed_sorting -from .correlogramcomparison import CorrelogramGTComparison - -import numpy as np - - -class CorrelogramGTStudy(GroundTruthStudy): - def run_comparisons(self, exhaustive_gt=True, window_ms=100.0, bin_ms=1.0, well_detected_score=0.8, **kwargs): - self.comparisons = {} - for rec_name, sorter_name, sorting in iter_computed_sorting(self.study_folder): - gt_sorting = self.get_ground_truth(rec_name) - comp = CorrelogramGTComparison( - gt_sorting, - sorting, - exhaustive_gt=exhaustive_gt, - window_ms=window_ms, - bin_ms=bin_ms, - well_detected_score=well_detected_score, - ) - self.comparisons[(rec_name, sorter_name)] = comp - - self.exhaustive_gt = exhaustive_gt - - @property - def time_bins(self): - for key, value in self.comparisons.items(): - return value.time_bins - - def precompute_scores_by_similarities(self, good_only=True): - if not hasattr(self, "_computed"): - import sklearn - - similarity_matrix = {} - for rec_name in self.rec_names: - templates = self.get_templates(rec_name) - flat_templates = templates.reshape(templates.shape[0], -1) - similarity_matrix[rec_name] = sklearn.metrics.pairwise.cosine_similarity(flat_templates) - - self.all_similarities = {} - self.all_errors = {} - self._computed = True - - for sorter_ind, sorter_name in enumerate(self.sorter_names): - # loop over recordings - all_errors = [] - all_similarities = [] - for rec_name in self.rec_names: - try: - comp = self.comparisons[(rec_name, sorter_name)] - similarities, errors = comp.compute_correlogram_by_similarity(similarity_matrix[rec_name]) - all_similarities.append(similarities) - all_errors.append(errors) - except Exception: - pass - - self.all_similarities[sorter_name] = np.concatenate(all_similarities, axis=0) - self.all_errors[sorter_name] = np.concatenate(all_errors, axis=0) - - def get_error_profile_over_similarity_bins(self, similarity_bins, sorter_name): - all_similarities = self.all_similarities[sorter_name] - all_errors = self.all_errors[sorter_name] - - order = np.argsort(all_similarities) - all_similarities = all_similarities[order] - all_errors = all_errors[order, :] - - result = {} - - for i in range(similarity_bins.size - 1): - cmin, cmax = similarity_bins[i], similarity_bins[i + 1] - amin, amax = np.searchsorted(all_similarities, [cmin, cmax]) - mean_errors = np.nanmean(all_errors[amin:amax], axis=0) - result[(cmin, cmax)] = mean_errors - - return result From 12fd197859a3bb91099e9f5fb73fc5f74f923847 Mon Sep 17 00:00:00 2001 From: Alessio Buccino Date: Tue, 19 Sep 2023 12:56:55 +0200 Subject: [PATCH 024/115] Use sparsity mask and handle right border correctly --- .../postprocessing/amplitude_scalings.py | 54 +++++++++---------- 1 file changed, 27 insertions(+), 27 deletions(-) diff --git a/src/spikeinterface/postprocessing/amplitude_scalings.py b/src/spikeinterface/postprocessing/amplitude_scalings.py index 5a0148c5c4..4dab68fdf8 100644 --- a/src/spikeinterface/postprocessing/amplitude_scalings.py +++ b/src/spikeinterface/postprocessing/amplitude_scalings.py @@ -90,10 +90,7 @@ def _run(self, **job_kwargs): if self._params["max_dense_channels"] is not None: assert recording.get_num_channels() <= self._params["max_dense_channels"], "" sparsity = ChannelSparsity.create_dense(we) - sparsity_inds = sparsity.unit_id_to_channel_indices - - # easier to use in chunk function as spikes use unit_index instead o id - unit_inds_to_channel_indices = {unit_ind: sparsity_inds[unit_id] for unit_ind, unit_id in enumerate(unit_ids)} + sparsity_mask = sparsity.mask all_templates = we.get_all_templates() # precompute segment slice @@ -113,7 +110,7 @@ def _run(self, **job_kwargs): self.spikes, all_templates, segment_slices, - unit_inds_to_channel_indices, + sparsity_mask, nbefore, nafter, cut_out_before, @@ -262,7 +259,7 @@ def _init_worker_amplitude_scalings( spikes, all_templates, segment_slices, - unit_inds_to_channel_indices, + sparsity_mask, nbefore, nafter, cut_out_before, @@ -282,7 +279,7 @@ def _init_worker_amplitude_scalings( worker_ctx["cut_out_before"] = cut_out_before worker_ctx["cut_out_after"] = cut_out_after worker_ctx["return_scaled"] = return_scaled - worker_ctx["unit_inds_to_channel_indices"] = unit_inds_to_channel_indices + worker_ctx["sparsity_mask"] = sparsity_mask worker_ctx["handle_collisions"] = handle_collisions worker_ctx["delta_collision_samples"] = delta_collision_samples @@ -306,7 +303,7 @@ def _amplitude_scalings_chunk(segment_index, start_frame, end_frame, worker_ctx) recording = worker_ctx["recording"] all_templates = worker_ctx["all_templates"] segment_slices = worker_ctx["segment_slices"] - unit_inds_to_channel_indices = worker_ctx["unit_inds_to_channel_indices"] + sparsity_mask = worker_ctx["sparsity_mask"] nbefore = worker_ctx["nbefore"] cut_out_before = worker_ctx["cut_out_before"] cut_out_after = worker_ctx["cut_out_after"] @@ -339,7 +336,7 @@ def _amplitude_scalings_chunk(segment_index, start_frame, end_frame, worker_ctx) i1_margin = np.searchsorted(spikes_in_segment["sample_index"], end_frame + right) local_spikes_w_margin = spikes_in_segment[i0_margin:i1_margin] collisions_local = find_collisions( - local_spikes, local_spikes_w_margin, delta_collision_samples, unit_inds_to_channel_indices + local_spikes, local_spikes_w_margin, delta_collision_samples, sparsity_mask ) else: collisions_local = {} @@ -354,7 +351,7 @@ def _amplitude_scalings_chunk(segment_index, start_frame, end_frame, worker_ctx) continue unit_index = spike["unit_index"] sample_index = spike["sample_index"] - sparse_indices = unit_inds_to_channel_indices[unit_index] + sparse_indices = sparsity_mask[unit_index] template = all_templates[unit_index][:, sparse_indices] template = template[nbefore - cut_out_before : nbefore + cut_out_after] sample_centered = sample_index - start_frame @@ -393,7 +390,7 @@ def _amplitude_scalings_chunk(segment_index, start_frame, end_frame, worker_ctx) right, nbefore, all_templates, - unit_inds_to_channel_indices, + sparsity_mask, cut_out_before, cut_out_after, ) @@ -410,14 +407,14 @@ def _amplitude_scalings_chunk(segment_index, start_frame, end_frame, worker_ctx) ### Collision handling ### -def _are_unit_indices_overlapping(unit_inds_to_channel_indices, i, j): +def _are_unit_indices_overlapping(sparsity_mask, i, j): """ Returns True if the unit indices i and j are overlapping, False otherwise Parameters ---------- - unit_inds_to_channel_indices: dict - A dictionary mapping unit indices to channel indices + sparsity_mask: boolean mask + The sparsity mask i: int The first unit index j: int @@ -428,13 +425,13 @@ def _are_unit_indices_overlapping(unit_inds_to_channel_indices, i, j): bool True if the unit indices i and j are overlapping, False otherwise """ - if len(np.intersect1d(unit_inds_to_channel_indices[i], unit_inds_to_channel_indices[j])) > 0: + if np.sum(np.logical_and(sparsity_mask[i], sparsity_mask[j])) > 0: return True else: return False -def find_collisions(spikes, spikes_w_margin, delta_collision_samples, unit_inds_to_channel_indices): +def find_collisions(spikes, spikes_w_margin, delta_collision_samples, sparsity_mask): """ Finds the collisions between spikes. @@ -446,8 +443,8 @@ def find_collisions(spikes, spikes_w_margin, delta_collision_samples, unit_inds_ An array of spikes within the added margin delta_collision_samples: int The maximum number of samples between two spikes to consider them as overlapping - unit_inds_to_channel_indices: dict - A dictionary mapping unit indices to channel indices + sparsity_mask: boolean mask + The sparsity mask Returns ------- @@ -480,7 +477,7 @@ def find_collisions(spikes, spikes_w_margin, delta_collision_samples, unit_inds_ # find the overlapping spikes in space as well for possible_overlapping_spike_index in possible_overlapping_spike_indices: if _are_unit_indices_overlapping( - unit_inds_to_channel_indices, + sparsity_mask, spike["unit_index"], spikes_w_margin[possible_overlapping_spike_index]["unit_index"], ): @@ -501,7 +498,7 @@ def fit_collision( right, nbefore, all_templates, - unit_inds_to_channel_indices, + sparsity_mask, cut_out_before, cut_out_after, ): @@ -528,8 +525,8 @@ def fit_collision( The number of samples before the spike to consider for the fit. all_templates: np.ndarray A numpy array of shape (n_units, n_samples, n_channels) containing the templates. - unit_inds_to_channel_indices: dict - A dictionary mapping unit indices to channel indices. + sparsity_mask: boolean mask + The sparsity mask cut_out_before: int The number of samples to cut out before the spike. cut_out_after: int @@ -547,14 +544,15 @@ def fit_collision( sample_last_centered = np.max(collision["sample_index"]) - (start_frame - left) # construct sparsity as union between units' sparsity - sparse_indices = np.array([], dtype="int") + sparse_indices = np.zeros(sparsity_mask.shape[1], dtype="int") for spike in collision: - sparse_indices_i = unit_inds_to_channel_indices[spike["unit_index"]] - sparse_indices = np.union1d(sparse_indices, sparse_indices_i) + sparse_indices_i = sparsity_mask[spike["unit_index"]] + sparse_indices = np.logical_or(sparse_indices, sparse_indices_i) local_waveform_start = max(0, sample_first_centered - cut_out_before) local_waveform_end = min(traces_with_margin.shape[0], sample_last_centered + cut_out_after) local_waveform = traces_with_margin[local_waveform_start:local_waveform_end, sparse_indices] + num_samples_local_waveform = local_waveform.shape[0] y = local_waveform.T.flatten() X = np.zeros((len(y), len(collision))) @@ -567,8 +565,10 @@ def fit_collision( # deal with borders if sample_centered - cut_out_before < 0: full_template[: sample_centered + cut_out_after] = template_cut[cut_out_before - sample_centered :] - elif sample_centered + cut_out_after > end_frame + right: - full_template[sample_centered - cut_out_before :] = template_cut[: -cut_out_after - (end_frame + right)] + elif sample_centered + cut_out_after > num_samples_local_waveform: + full_template[sample_centered - cut_out_before :] = template_cut[ + : -(cut_out_after + sample_centered - num_samples_local_waveform) + ] else: full_template[sample_centered - cut_out_before : sample_centered + cut_out_after] = template_cut X[:, i] = full_template.T.flatten() From b1297e6aef50aa507415359b773f1c5611230b1f Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Tue, 19 Sep 2023 13:08:36 +0200 Subject: [PATCH 025/115] Update CollisionGTStudy and CorrelogramGTStudy --- src/spikeinterface/comparison/__init__.py | 9 +- src/spikeinterface/comparison/collision.py | 96 +++++++++---------- src/spikeinterface/comparison/correlogram.py | 85 +++++++--------- .../comparison/groundtruthstudy.py | 4 +- .../_legacy_mpl_widgets/collisioncomp.py | 2 +- 5 files changed, 83 insertions(+), 113 deletions(-) diff --git a/src/spikeinterface/comparison/__init__.py b/src/spikeinterface/comparison/__init__.py index a390bb7689..7ac5b29aa2 100644 --- a/src/spikeinterface/comparison/__init__.py +++ b/src/spikeinterface/comparison/__init__.py @@ -28,12 +28,11 @@ compare_multiple_templates, MultiTemplateComparison, ) -from .collisioncomparison import CollisionGTComparison -from .correlogramcomparison import CorrelogramGTComparison + from .groundtruthstudy import GroundTruthStudy -from .collisionstudy import CollisionGTStudy -from .correlogramstudy import CorrelogramGTStudy -from .studytools import aggregate_performances_table +from .collision import CollisionGTComparison, CollisionGTStudy +from .correlogram import CorrelogramGTComparison, CorrelogramGTStudy +# from .studytools import aggregate_performances_table from .hybrid import ( HybridSpikesRecording, HybridUnitsRecording, diff --git a/src/spikeinterface/comparison/collision.py b/src/spikeinterface/comparison/collision.py index 864809b04b..c526c22ae4 100644 --- a/src/spikeinterface/comparison/collision.py +++ b/src/spikeinterface/comparison/collision.py @@ -12,8 +12,9 @@ class CollisionGTComparison(GroundTruthComparison): """ - This class is an extension of GroundTruthComparison by focusing - to benchmark spike in collision + This class is an extension of GroundTruthComparison by focusing to benchmark spike in collision. + + This class needs maintenance and need a bit of refactoring. collision_lag: float @@ -166,60 +167,49 @@ def compute_collision_by_similarity(self, similarity_matrix, unit_ids=None, good class CollisionGTStudy(GroundTruthStudy): - def run_comparisons(self, exhaustive_gt=True, collision_lag=2.0, nbins=11, **kwargs): - self.comparisons = {} - for rec_name, sorter_name, sorting in iter_computed_sorting(self.study_folder): - gt_sorting = self.get_ground_truth(rec_name) - comp = CollisionGTComparison( - gt_sorting, sorting, exhaustive_gt=exhaustive_gt, collision_lag=collision_lag, nbins=nbins - ) - self.comparisons[(rec_name, sorter_name)] = comp + def run_comparisons(self, case_keys=None, exhaustive_gt=True, collision_lag=2.0, nbins=11, **kwargs): + _kwargs = dict() + _kwargs.update(kwargs) + _kwargs["exhaustive_gt"] = exhaustive_gt + _kwargs["collision_lag"] = collision_lag + _kwargs["nbins"] = nbins + GroundTruthStudy.run_comparisons(self, case_keys=None, comparison_class=CollisionGTComparison, **_kwargs) self.exhaustive_gt = exhaustive_gt self.collision_lag = collision_lag - def get_lags(self): - fs = self.comparisons[(self.rec_names[0], self.sorter_names[0])].sorting1.get_sampling_frequency() - lags = self.comparisons[(self.rec_names[0], self.sorter_names[0])].bins / fs * 1000 + def get_lags(self, key): + comp = self.comparisons[key] + fs = comp.sorting1.get_sampling_frequency() + lags = comp.bins / fs * 1000. return lags - def precompute_scores_by_similarities(self, good_only=True, min_accuracy=0.9): - if not hasattr(self, "_good_only") or self._good_only != good_only: - import sklearn - - similarity_matrix = {} - for rec_name in self.rec_names: - templates = self.get_templates(rec_name) - flat_templates = templates.reshape(templates.shape[0], -1) - similarity_matrix[rec_name] = sklearn.metrics.pairwise.cosine_similarity(flat_templates) - - self.all_similarities = {} - self.all_recall_scores = {} - self.good_only = good_only - - for sorter_ind, sorter_name in enumerate(self.sorter_names): - # loop over recordings - all_similarities = [] - all_recall_scores = [] - - for rec_name in self.rec_names: - if (rec_name, sorter_name) in self.comparisons.keys(): - comp = self.comparisons[(rec_name, sorter_name)] - similarities, recall_scores, pair_names = comp.compute_collision_by_similarity( - similarity_matrix[rec_name], good_only=good_only, min_accuracy=min_accuracy - ) - - all_similarities.append(similarities) - all_recall_scores.append(recall_scores) - - self.all_similarities[sorter_name] = np.concatenate(all_similarities, axis=0) - self.all_recall_scores[sorter_name] = np.concatenate(all_recall_scores, axis=0) - - def get_mean_over_similarity_range(self, similarity_range, sorter_name): - idx = (self.all_similarities[sorter_name] >= similarity_range[0]) & ( - self.all_similarities[sorter_name] <= similarity_range[1] + def precompute_scores_by_similarities(self, case_keys=None, good_only=False, min_accuracy=0.9): + import sklearn + if case_keys is None: + case_keys = self.cases.keys() + + self.all_similarities = {} + self.all_recall_scores = {} + self.good_only = good_only + + for key in case_keys: + templates = self.get_templates(key) + flat_templates = templates.reshape(templates.shape[0], -1) + similarity = sklearn.metrics.pairwise.cosine_similarity(flat_templates) + comp = self.comparisons[key] + similarities, recall_scores, pair_names = comp.compute_collision_by_similarity( + similarity, good_only=good_only, min_accuracy=min_accuracy + ) + self.all_similarities[key] = similarities + self.all_recall_scores[key] = recall_scores + + + def get_mean_over_similarity_range(self, similarity_range, key): + idx = (self.all_similarities[key] >= similarity_range[0]) & ( + self.all_similarities[key] <= similarity_range[1] ) - all_similarities = self.all_similarities[sorter_name][idx] - all_recall_scores = self.all_recall_scores[sorter_name][idx] + all_similarities = self.all_similarities[key][idx] + all_recall_scores = self.all_recall_scores[key][idx] order = np.argsort(all_similarities) all_similarities = all_similarities[order] @@ -229,9 +219,9 @@ def get_mean_over_similarity_range(self, similarity_range, sorter_name): return mean_recall_scores - def get_lag_profile_over_similarity_bins(self, similarity_bins, sorter_name): - all_similarities = self.all_similarities[sorter_name] - all_recall_scores = self.all_recall_scores[sorter_name] + def get_lag_profile_over_similarity_bins(self, similarity_bins, key): + all_similarities = self.all_similarities[key] + all_recall_scores = self.all_recall_scores[key] order = np.argsort(all_similarities) all_similarities = all_similarities[order] diff --git a/src/spikeinterface/comparison/correlogram.py b/src/spikeinterface/comparison/correlogram.py index 9c5e1e91cf..b2376cb52d 100644 --- a/src/spikeinterface/comparison/correlogram.py +++ b/src/spikeinterface/comparison/correlogram.py @@ -11,11 +11,9 @@ class CorrelogramGTComparison(GroundTruthComparison): """ This class is an extension of GroundTruthComparison by focusing - to benchmark correlation reconstruction + to benchmark correlation reconstruction. - - collision_lag: float - Collision lag in ms. + This class needs maintenance and need a bit of refactoring. """ @@ -110,27 +108,21 @@ def compute_correlogram_by_similarity(self, similarity_matrix, window_ms=None): order = np.argsort(similarities) similarities = similarities[order] - errors = errors[order, :] + errors = errors[order] return similarities, errors class CorrelogramGTStudy(GroundTruthStudy): - def run_comparisons(self, exhaustive_gt=True, window_ms=100.0, bin_ms=1.0, well_detected_score=0.8, **kwargs): - self.comparisons = {} - for rec_name, sorter_name, sorting in iter_computed_sorting(self.study_folder): - gt_sorting = self.get_ground_truth(rec_name) - comp = CorrelogramGTComparison( - gt_sorting, - sorting, - exhaustive_gt=exhaustive_gt, - window_ms=window_ms, - bin_ms=bin_ms, - well_detected_score=well_detected_score, - ) - self.comparisons[(rec_name, sorter_name)] = comp - + def run_comparisons(self, case_keys=None, exhaustive_gt=True, window_ms=100.0, bin_ms=1.0, well_detected_score=0.8, **kwargs): + _kwargs = dict() + _kwargs.update(kwargs) + _kwargs["exhaustive_gt"] = exhaustive_gt + _kwargs["window_ms"] = window_ms + _kwargs["bin_ms"] = bin_ms + _kwargs["well_detected_score"] = well_detected_score + GroundTruthStudy.run_comparisons(self, case_keys=None, comparison_class=CorrelogramGTComparison, **_kwargs) self.exhaustive_gt = exhaustive_gt @property @@ -138,39 +130,28 @@ def time_bins(self): for key, value in self.comparisons.items(): return value.time_bins - def precompute_scores_by_similarities(self, good_only=True): - if not hasattr(self, "_computed"): - import sklearn - - similarity_matrix = {} - for rec_name in self.rec_names: - templates = self.get_templates(rec_name) - flat_templates = templates.reshape(templates.shape[0], -1) - similarity_matrix[rec_name] = sklearn.metrics.pairwise.cosine_similarity(flat_templates) - - self.all_similarities = {} - self.all_errors = {} - self._computed = True - - for sorter_ind, sorter_name in enumerate(self.sorter_names): - # loop over recordings - all_errors = [] - all_similarities = [] - for rec_name in self.rec_names: - try: - comp = self.comparisons[(rec_name, sorter_name)] - similarities, errors = comp.compute_correlogram_by_similarity(similarity_matrix[rec_name]) - all_similarities.append(similarities) - all_errors.append(errors) - except Exception: - pass - - self.all_similarities[sorter_name] = np.concatenate(all_similarities, axis=0) - self.all_errors[sorter_name] = np.concatenate(all_errors, axis=0) - - def get_error_profile_over_similarity_bins(self, similarity_bins, sorter_name): - all_similarities = self.all_similarities[sorter_name] - all_errors = self.all_errors[sorter_name] + def precompute_scores_by_similarities(self, case_keys=None, good_only=True): + import sklearn.metrics + + if case_keys is None: + case_keys = self.cases.keys() + + self.all_similarities = {} + self.all_errors = {} + + for key in case_keys: + templates = self.get_templates(key) + flat_templates = templates.reshape(templates.shape[0], -1) + similarity = sklearn.metrics.pairwise.cosine_similarity(flat_templates) + comp = self.comparisons[key] + similarities, errors = comp.compute_correlogram_by_similarity(similarity) + + self.all_similarities[key] = similarities + self.all_errors[key] = errors + + def get_error_profile_over_similarity_bins(self, similarity_bins, key): + all_similarities = self.all_similarities[key] + all_errors = self.all_errors[key] order = np.argsort(all_similarities) all_similarities = all_similarities[order] diff --git a/src/spikeinterface/comparison/groundtruthstudy.py b/src/spikeinterface/comparison/groundtruthstudy.py index 9f0039b9cb..0c08318ef4 100644 --- a/src/spikeinterface/comparison/groundtruthstudy.py +++ b/src/spikeinterface/comparison/groundtruthstudy.py @@ -155,7 +155,7 @@ def scan_folder(self): def __repr__(self): - t = f"GroundTruthStudy {self.folder.stem} \n" + t = f"{self.__class__.__name__} {self.folder.stem} \n" t += f" datasets: {len(self.datasets)} {list(self.datasets.keys())}\n" t += f" cases: {len(self.cases)} {list(self.cases.keys())}\n" num_computed = sum([1 for sorting in self.sortings.values() if sorting is not None]) @@ -303,7 +303,7 @@ def get_waveform_extractor(self, key): we.set_recording(recording) return we - def get_templates(self, key, mode="mean"): + def get_templates(self, key, mode="average"): we = self.get_waveform_extractor(key) templates = we.get_all_templates(mode=mode) return templates diff --git a/src/spikeinterface/widgets/_legacy_mpl_widgets/collisioncomp.py b/src/spikeinterface/widgets/_legacy_mpl_widgets/collisioncomp.py index 6d981e1fd4..096a5f3933 100644 --- a/src/spikeinterface/widgets/_legacy_mpl_widgets/collisioncomp.py +++ b/src/spikeinterface/widgets/_legacy_mpl_widgets/collisioncomp.py @@ -1,7 +1,7 @@ import numpy as np from .basewidget import BaseWidget -from spikeinterface.comparison.collisioncomparison import CollisionGTComparison +from spikeinterface.comparison import CollisionGTComparison class ComparisonCollisionPairByPairWidget(BaseWidget): From 8a7a90e130e3007ad73ae840ee4e889c9a6b146f Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Tue, 19 Sep 2023 13:35:50 +0200 Subject: [PATCH 026/115] wip --- src/spikeinterface/comparison/groundtruthstudy.py | 5 +---- .../widgets/_legacy_mpl_widgets/collisioncomp.py | 1 - 2 files changed, 1 insertion(+), 5 deletions(-) diff --git a/src/spikeinterface/comparison/groundtruthstudy.py b/src/spikeinterface/comparison/groundtruthstudy.py index 0c08318ef4..6898f381b6 100644 --- a/src/spikeinterface/comparison/groundtruthstudy.py +++ b/src/spikeinterface/comparison/groundtruthstudy.py @@ -17,10 +17,7 @@ from .paircomparisons import compare_sorter_to_ground_truth, GroundTruthComparison -# TODO : save comparison in folders when COmparison object will be able to serialize -# TODO ??: make an internal optional binary copy when running several external sorter -# on the same dataset to avoid multiple save binary ? even when the recording is float32 (ks need int16) - +# TODO later : save comparison in folders when comparison object will be able to serialize # This is to separate names when the key are tuples when saving folders diff --git a/src/spikeinterface/widgets/_legacy_mpl_widgets/collisioncomp.py b/src/spikeinterface/widgets/_legacy_mpl_widgets/collisioncomp.py index 096a5f3933..d25f1ea97b 100644 --- a/src/spikeinterface/widgets/_legacy_mpl_widgets/collisioncomp.py +++ b/src/spikeinterface/widgets/_legacy_mpl_widgets/collisioncomp.py @@ -1,7 +1,6 @@ import numpy as np from .basewidget import BaseWidget -from spikeinterface.comparison import CollisionGTComparison class ComparisonCollisionPairByPairWidget(BaseWidget): From fe6f60f45b8ee1f50e81c8d7b5b209965507c1df Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Tue, 19 Sep 2023 13:39:31 +0200 Subject: [PATCH 027/115] Re move studytools.py. Not needed anymore. --- src/spikeinterface/comparison/__init__.py | 2 +- src/spikeinterface/comparison/studytools.py | 352 -------------------- 2 files changed, 1 insertion(+), 353 deletions(-) delete mode 100644 src/spikeinterface/comparison/studytools.py diff --git a/src/spikeinterface/comparison/__init__.py b/src/spikeinterface/comparison/__init__.py index 7ac5b29aa2..bff85dde4a 100644 --- a/src/spikeinterface/comparison/__init__.py +++ b/src/spikeinterface/comparison/__init__.py @@ -32,7 +32,7 @@ from .groundtruthstudy import GroundTruthStudy from .collision import CollisionGTComparison, CollisionGTStudy from .correlogram import CorrelogramGTComparison, CorrelogramGTStudy -# from .studytools import aggregate_performances_table + from .hybrid import ( HybridSpikesRecording, HybridUnitsRecording, diff --git a/src/spikeinterface/comparison/studytools.py b/src/spikeinterface/comparison/studytools.py deleted file mode 100644 index 00119c1586..0000000000 --- a/src/spikeinterface/comparison/studytools.py +++ /dev/null @@ -1,352 +0,0 @@ -""" -High level tools to run many ground-truth comparison with -many sorter on many recordings and then collect and aggregate results -in an easy way. - -The all mechanism is based on an intrinsic organization -into a "study_folder" with several subfolder: - * raw_files : contain a copy in binary format of recordings - * sorter_folders : contains output of sorters - * ground_truth : contains a copy of sorting ground in npz format - * sortings: contains light copy of all sorting in npz format - * tables: some table in cvs format -""" - -from pathlib import Path -import shutil -import json -import os - - -from spikeinterface.core import load_extractor -from spikeinterface.core.job_tools import fix_job_kwargs -from spikeinterface.extractors import NpzSortingExtractor -from spikeinterface.sorters import sorter_dict -from spikeinterface.sorters.basesorter import is_log_ok - - -from .comparisontools import _perf_keys -from .paircomparisons import compare_sorter_to_ground_truth - - - - - -# This is deprecated and will be removed -def iter_working_folder(working_folder): - working_folder = Path(working_folder) - for rec_folder in working_folder.iterdir(): - if not rec_folder.is_dir(): - continue - for output_folder in rec_folder.iterdir(): - if (output_folder / "spikeinterface_job.json").is_file(): - with open(output_folder / "spikeinterface_job.json", "r") as f: - job_dict = json.load(f) - rec_name = job_dict["rec_name"] - sorter_name = job_dict["sorter_name"] - yield rec_name, sorter_name, output_folder - else: - rec_name = rec_folder.name - sorter_name = output_folder.name - if not output_folder.is_dir(): - continue - if not is_log_ok(output_folder): - continue - yield rec_name, sorter_name, output_folder - -# This is deprecated and will be removed -def iter_sorting_output(working_folder): - """Iterator over output_folder to retrieve all triplets of (rec_name, sorter_name, sorting).""" - for rec_name, sorter_name, output_folder in iter_working_folder(working_folder): - SorterClass = sorter_dict[sorter_name] - sorting = SorterClass.get_result_from_folder(output_folder) - yield rec_name, sorter_name, sorting - - - -def setup_comparison_study(study_folder, gt_dict, **job_kwargs): - """ - Based on a dict of (recording, sorting) create the study folder. - - Parameters - ---------- - study_folder: str - The study folder. - gt_dict : a dict of tuple (recording, sorting_gt) - Dict of tuple that contain recording and sorting ground truth - """ - job_kwargs = fix_job_kwargs(job_kwargs) - study_folder = Path(study_folder) - assert not study_folder.is_dir(), "'study_folder' already exists. Please remove it" - - study_folder.mkdir(parents=True, exist_ok=True) - sorting_folders = study_folder / "sortings" - log_folder = sorting_folders / "run_log" - log_folder.mkdir(parents=True, exist_ok=True) - tables_folder = study_folder / "tables" - tables_folder.mkdir(parents=True, exist_ok=True) - - for rec_name, (recording, sorting_gt) in gt_dict.items(): - # write recording using save with binary - folder = study_folder / "ground_truth" / rec_name - sorting_gt.save(folder=folder, format="numpy_folder") - folder = study_folder / "raw_files" / rec_name - recording.save(folder=folder, format="binary", **job_kwargs) - - # make an index of recording names - with open(study_folder / "names.txt", mode="w", encoding="utf8") as f: - for rec_name in gt_dict: - f.write(rec_name + "\n") - - -def get_rec_names(study_folder): - """ - Get list of keys of recordings. - Read from the 'names.txt' file in study folder. - - Parameters - ---------- - study_folder: str - The study folder. - - Returns - ------- - rec_names: list - List of names. - """ - study_folder = Path(study_folder) - with open(study_folder / "names.txt", mode="r", encoding="utf8") as f: - rec_names = f.read()[:-1].split("\n") - return rec_names - - -def get_recordings(study_folder): - """ - Get ground recording as a dict. - - They are read from the 'raw_files' folder with binary format. - - Parameters - ---------- - study_folder: str - The study folder. - - Returns - ------- - recording_dict: dict - Dict of recording. - """ - study_folder = Path(study_folder) - - rec_names = get_rec_names(study_folder) - recording_dict = {} - for rec_name in rec_names: - rec = load_extractor(study_folder / "raw_files" / rec_name) - recording_dict[rec_name] = rec - - return recording_dict - - -def get_ground_truths(study_folder): - """ - Get ground truth sorting extractor as a dict. - - They are read from the 'ground_truth' folder with npz format. - - Parameters - ---------- - study_folder: str - The study folder. - - Returns - ------- - ground_truths: dict - Dict of sorting_gt. - """ - study_folder = Path(study_folder) - rec_names = get_rec_names(study_folder) - ground_truths = {} - for rec_name in rec_names: - sorting = load_extractor(study_folder / "ground_truth" / rec_name) - ground_truths[rec_name] = sorting - return ground_truths - - -def iter_computed_names(study_folder): - sorting_folder = Path(study_folder) / "sortings" - for filename in os.listdir(sorting_folder): - if filename.endswith(".npz") and "[#]" in filename: - rec_name, sorter_name = filename.replace(".npz", "").split("[#]") - yield rec_name, sorter_name - - -def iter_computed_sorting(study_folder): - """ - Iter over sorting files. - """ - sorting_folder = Path(study_folder) / "sortings" - for filename in os.listdir(sorting_folder): - if filename.endswith(".npz") and "[#]" in filename: - rec_name, sorter_name = filename.replace(".npz", "").split("[#]") - sorting = NpzSortingExtractor(sorting_folder / filename) - yield rec_name, sorter_name, sorting - - -def collect_run_times(study_folder): - """ - Collect run times in a working folder and store it in CVS files. - - The output is list of (rec_name, sorter_name, run_time) - """ - import pandas as pd - - study_folder = Path(study_folder) - sorting_folders = study_folder / "sortings" - log_folder = sorting_folders / "run_log" - tables_folder = study_folder / "tables" - - tables_folder.mkdir(parents=True, exist_ok=True) - - run_times = [] - for filename in os.listdir(log_folder): - if filename.endswith(".json") and "[#]" in filename: - rec_name, sorter_name = filename.replace(".json", "").split("[#]") - with open(log_folder / filename, encoding="utf8", mode="r") as logfile: - log = json.load(logfile) - run_time = log.get("run_time", None) - run_times.append((rec_name, sorter_name, run_time)) - - run_times = pd.DataFrame(run_times, columns=["rec_name", "sorter_name", "run_time"]) - run_times = run_times.set_index(["rec_name", "sorter_name"]) - - return run_times - - -def aggregate_sorting_comparison(study_folder, exhaustive_gt=False): - """ - Loop over output folder in a tree to collect sorting output and run - ground_truth_comparison on them. - - Parameters - ---------- - study_folder: str - The study folder. - exhaustive_gt: bool (default True) - Tell if the ground true is "exhaustive" or not. In other world if the - GT have all possible units. It allows more performance measurement. - For instance, MEArec simulated dataset have exhaustive_gt=True - - Returns - ---------- - comparisons: a dict of SortingComparison - - """ - - study_folder = Path(study_folder) - - ground_truths = get_ground_truths(study_folder) - results = collect_study_sorting(study_folder) - - comparisons = {} - for (rec_name, sorter_name), sorting in results.items(): - gt_sorting = ground_truths[rec_name] - sc = compare_sorter_to_ground_truth(gt_sorting, sorting, exhaustive_gt=exhaustive_gt) - comparisons[(rec_name, sorter_name)] = sc - - return comparisons - - -def aggregate_performances_table(study_folder, exhaustive_gt=False, **karg_thresh): - """ - Aggregate some results into dataframe to have a "study" overview on all recordingXsorter. - - Tables are: - * run_times: run times per recordingXsorter - * perf_pooled_with_sum: GroundTruthComparison.see get_performance - * perf_pooled_with_average: GroundTruthComparison.see get_performance - * count_units: given some threshold count how many units : 'well_detected', 'redundant', 'false_postive_units, 'bad' - - Parameters - ---------- - study_folder: str - The study folder. - karg_thresh: dict - Threshold parameters used for the "count_units" table. - - Returns - ------- - dataframes: a dict of DataFrame - Return several useful DataFrame to compare all results. - Note that count_units depend on karg_thresh. - """ - import pandas as pd - - study_folder = Path(study_folder) - sorter_folders = study_folder / "sorter_folders" - tables_folder = study_folder / "tables" - - comparisons = aggregate_sorting_comparison(study_folder, exhaustive_gt=exhaustive_gt) - ground_truths = get_ground_truths(study_folder) - results = collect_study_sorting(study_folder) - - study_folder = Path(study_folder) - - dataframes = {} - - # get run times: - run_times = pd.read_csv(str(tables_folder / "run_times.csv"), sep="\t") - run_times.columns = ["rec_name", "sorter_name", "run_time"] - run_times = run_times.set_index( - [ - "rec_name", - "sorter_name", - ] - ) - dataframes["run_times"] = run_times - - perf_pooled_with_sum = pd.DataFrame(index=run_times.index, columns=_perf_keys) - dataframes["perf_pooled_with_sum"] = perf_pooled_with_sum - - perf_pooled_with_average = pd.DataFrame(index=run_times.index, columns=_perf_keys) - dataframes["perf_pooled_with_average"] = perf_pooled_with_average - - count_units = pd.DataFrame( - index=run_times.index, columns=["num_gt", "num_sorter", "num_well_detected", "num_redundant"] - ) - dataframes["count_units"] = count_units - if exhaustive_gt: - count_units["num_false_positive"] = None - count_units["num_bad"] = None - - perf_by_spiketrain = [] - - for (rec_name, sorter_name), comp in comparisons.items(): - gt_sorting = ground_truths[rec_name] - sorting = results[(rec_name, sorter_name)] - - perf = comp.get_performance(method="pooled_with_sum", output="pandas") - perf_pooled_with_sum.loc[(rec_name, sorter_name), :] = perf - - perf = comp.get_performance(method="pooled_with_average", output="pandas") - perf_pooled_with_average.loc[(rec_name, sorter_name), :] = perf - - perf = comp.get_performance(method="by_spiketrain", output="pandas") - perf["rec_name"] = rec_name - perf["sorter_name"] = sorter_name - perf = perf.reset_index() - - perf_by_spiketrain.append(perf) - - count_units.loc[(rec_name, sorter_name), "num_gt"] = len(gt_sorting.get_unit_ids()) - count_units.loc[(rec_name, sorter_name), "num_sorter"] = len(sorting.get_unit_ids()) - count_units.loc[(rec_name, sorter_name), "num_well_detected"] = comp.count_well_detected_units(**karg_thresh) - count_units.loc[(rec_name, sorter_name), "num_redundant"] = comp.count_redundant_units() - if exhaustive_gt: - count_units.loc[(rec_name, sorter_name), "num_false_positive"] = comp.count_false_positive_units() - count_units.loc[(rec_name, sorter_name), "num_bad"] = comp.count_bad_units() - - perf_by_spiketrain = pd.concat(perf_by_spiketrain) - perf_by_spiketrain = perf_by_spiketrain.set_index(["rec_name", "sorter_name", "gt_unit_id"]) - dataframes["perf_by_spiketrain"] = perf_by_spiketrain - - return dataframes From 77505adc76fce228d66347d0aeb66bacce94cc8c Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Tue, 19 Sep 2023 13:40:53 +0200 Subject: [PATCH 028/115] rm studytools part2 --- src/spikeinterface/comparison/collision.py | 1 - src/spikeinterface/comparison/correlogram.py | 1 - .../comparison/tests/test_studytools.py | 59 ------------------- 3 files changed, 61 deletions(-) delete mode 100644 src/spikeinterface/comparison/tests/test_studytools.py diff --git a/src/spikeinterface/comparison/collision.py b/src/spikeinterface/comparison/collision.py index c526c22ae4..01626b34b8 100644 --- a/src/spikeinterface/comparison/collision.py +++ b/src/spikeinterface/comparison/collision.py @@ -1,6 +1,5 @@ from .paircomparisons import GroundTruthComparison from .groundtruthstudy import GroundTruthStudy -from .studytools import iter_computed_sorting ## TODO remove this from .comparisontools import make_collision_events import numpy as np diff --git a/src/spikeinterface/comparison/correlogram.py b/src/spikeinterface/comparison/correlogram.py index b2376cb52d..150f5afe55 100644 --- a/src/spikeinterface/comparison/correlogram.py +++ b/src/spikeinterface/comparison/correlogram.py @@ -1,6 +1,5 @@ from .paircomparisons import GroundTruthComparison from .groundtruthstudy import GroundTruthStudy -from .studytools import iter_computed_sorting ## TODO remove this from spikeinterface.postprocessing import compute_correlograms diff --git a/src/spikeinterface/comparison/tests/test_studytools.py b/src/spikeinterface/comparison/tests/test_studytools.py deleted file mode 100644 index dbc39d5e1d..0000000000 --- a/src/spikeinterface/comparison/tests/test_studytools.py +++ /dev/null @@ -1,59 +0,0 @@ -import os -import shutil -from pathlib import Path - -import pytest - -from spikeinterface.extractors import toy_example -from spikeinterface.comparison.studytools import ( - setup_comparison_study, - iter_computed_names, - iter_computed_sorting, - get_rec_names, - get_ground_truths, - get_recordings, -) - -if hasattr(pytest, "global_test_folder"): - cache_folder = pytest.global_test_folder / "comparison" -else: - cache_folder = Path("cache_folder") / "comparison" - - -study_folder = cache_folder / "test_studytools" - - -def setup_module(): - if study_folder.is_dir(): - shutil.rmtree(study_folder) - - -def test_setup_comparison_study(): - rec0, gt_sorting0 = toy_example(num_channels=4, duration=30, seed=0, num_segments=1) - rec1, gt_sorting1 = toy_example(num_channels=32, duration=30, seed=0, num_segments=1) - - gt_dict = { - "toy_tetrode": (rec0, gt_sorting0), - "toy_probe32": (rec1, gt_sorting1), - } - setup_comparison_study(study_folder, gt_dict) - - -def test_get_ground_truths(): - names = get_rec_names(study_folder) - d = get_ground_truths(study_folder) - d = get_recordings(study_folder) - - -def test_loops(): - names = list(iter_computed_names(study_folder)) - for rec_name, sorter_name, sorting in iter_computed_sorting(study_folder): - print(rec_name, sorter_name) - print(sorting) - - -if __name__ == "__main__": - setup_module() - test_setup_comparison_study() - test_get_ground_truths() - test_loops() From b5376a9b30d84a201a6c8ad7db15c644abe993a9 Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Tue, 19 Sep 2023 14:26:22 +0200 Subject: [PATCH 029/115] Modify doc for gt study --- doc/modules/comparison.rst | 101 +++++++++++------- .../comparison/groundtruthstudy.py | 6 -- 2 files changed, 62 insertions(+), 45 deletions(-) diff --git a/doc/modules/comparison.rst b/doc/modules/comparison.rst index b452307e3c..9b2e701dac 100644 --- a/doc/modules/comparison.rst +++ b/doc/modules/comparison.rst @@ -248,21 +248,19 @@ An **over-merged** unit has a relatively high agreement (>= 0.2 by default) for We also have a high level class to compare many sorters against ground truth: :py:func:`~spiekinterface.comparison.GroundTruthStudy()` -A study is a systematic performance comparison of several ground truth recordings with several sorters. +A study is a systematic performance comparison of several ground truth recordings with several sorters or several cases +like the different parameter sets. -The study class proposes high-level tool functions to run many ground truth comparisons with many sorters +The study class proposes high-level tool functions to run many ground truth comparisons with many "cases" on many recordings and then collect and aggregate results in an easy way. The all mechanism is based on an intrinsic organization into a "study_folder" with several subfolder: - * raw_files : contain a copy of recordings in binary format - * sorter_folders : contains outputs of sorters - * ground_truth : contains a copy of sorting ground truth in npz format - * sortings: contains light copy of all sorting in npz format - * tables: some tables in csv format - -In order to run and rerun the computation all gt_sorting and recordings are copied to a fast and universal format: -binary (for recordings) and npz (for sortings). + * datasets: contains ground truth datasets + * sorters : contains outputs of sorters + * sortings: contains light copy of all sorting + * metrics: contains metrics + * ... .. code-block:: python @@ -274,28 +272,52 @@ binary (for recordings) and npz (for sortings). import spikeinterface.widgets as sw from spikeinterface.comparison import GroundTruthStudy - # Setup study folder - rec0, gt_sorting0 = se.toy_example(num_channels=4, duration=10, seed=10, num_segments=1) - rec1, gt_sorting1 = se.toy_example(num_channels=4, duration=10, seed=0, num_segments=1) - gt_dict = { - 'rec0': (rec0, gt_sorting0), - 'rec1': (rec1, gt_sorting1), + + # generate 2 simulated datasets (could be also mearec files) + rec0, gt_sorting0 = generate_ground_truth_recording(num_channels=4, durations=[30.], seed=42) + rec1, gt_sorting1 = generate_ground_truth_recording(num_channels=4, durations=[30.], seed=91) + + datasets = { + "toy0": (rec0, gt_sorting0), + "toy1": (rec1, gt_sorting1), } - study_folder = 'a_study_folder' - study = GroundTruthStudy.create(study_folder, gt_dict) - # all sorters for all recordings in one function. - sorter_list = ['herdingspikes', 'tridesclous', ] - study.run_sorters(sorter_list, mode_if_folder_exists="keep") + # define some "cases" here we want to tests tridesclous2 on 2 datasets and spykingcircus on one dataset + # so it is a two level study (sorter_name, dataset) + # this could be more complicated like (sorter_name, dataset, params) + cases = { + ("tdc2", "toy0"): { + "label": "tridesclous2 on tetrode0", + "dataset": "toy0", + "run_sorter_params": { + "sorter_name": "tridesclous2", + }, + }, + # + ("tdc2", "toy1"): { + "label": "tridesclous2 on tetrode1", + "dataset": "toy1", + "run_sorter_params": { + "sorter_name": "tridesclous2", + }, + }, + + ("sc", "toy0"): { + "label": "spykingcircus2 on tetrode0", + "dataset": "toy0", + "run_sorter_params": { + "sorter_name": "spykingcircus", + "docker_image": True + }, + }, + } + # this initilize a folder + study = GroundTruthStudy.create(study_folder, datasets=datasets, cases=cases, + levels=["sorter_name", "dataset"]) - # You can re-run **run_study_sorters** as many times as you want. - # By default **mode='keep'** so only uncomputed sorters are re-run. - # For instance, just remove the "sorter_folders/rec1/herdingspikes" to re-run - # only one sorter on one recording. - # - # Then we copy the spike sorting outputs into a separate subfolder. - # This allow us to remove the "large" sorter_folders. - study.copy_sortings() + + # all cases in one function + study.run_sorters() # Collect comparisons #   @@ -306,11 +328,11 @@ binary (for recordings) and npz (for sortings). # Note: use exhaustive_gt=True when you know exactly how many # units in ground truth (for synthetic datasets) + # run all comparisons and loop over the results study.run_comparisons(exhaustive_gt=True) - - for (rec_name, sorter_name), comp in study.comparisons.items(): + for key, comp in study.comparisons.items(): print('*' * 10) - print(rec_name, sorter_name) + print(key) # raw counting of tp/fp/... print(comp.count_score) # summary @@ -323,26 +345,27 @@ binary (for recordings) and npz (for sortings). # Collect synthetic dataframes and display # As shown previously, the performance is returned as a pandas dataframe. - # The :py:func:`~spikeinterface.comparison.aggregate_performances_table()` function, + # The :py:func:`~spikeinterface.comparison.get_performance_by_unit()` function, # gathers all the outputs in the study folder and merges them in a single dataframe. + # Same idea for :py:func:`~spikeinterface.comparison.get_count_units()` - dataframes = study.aggregate_dataframes() + # this is a dataframe + perfs = study.get_performance_by_unit() - # Pandas dataframes can be nicely displayed as tables in the notebook. - print(dataframes.keys()) + # this is a dataframe + unit_counts = study.get_count_units() # we can also access run times - print(dataframes['run_times']) + run_times = study.get_run_times() + print(run_times) # Easy plot with seaborn - run_times = dataframes['run_times'] fig1, ax1 = plt.subplots() sns.barplot(data=run_times, x='rec_name', y='run_time', hue='sorter_name', ax=ax1) ax1.set_title('Run times') ############################################################################## - perfs = dataframes['perf_by_unit'] fig2, ax2 = plt.subplots() sns.swarmplot(data=perfs, x='sorter_name', y='recall', hue='rec_name', ax=ax2) ax2.set_title('Recall') diff --git a/src/spikeinterface/comparison/groundtruthstudy.py b/src/spikeinterface/comparison/groundtruthstudy.py index 6898f381b6..6dc9cb30f0 100644 --- a/src/spikeinterface/comparison/groundtruthstudy.py +++ b/src/spikeinterface/comparison/groundtruthstudy.py @@ -126,9 +126,6 @@ def scan_folder(self): self.info = json.load(f) self.levels = self.info["levels"] - # if isinstance(self.levels, list): - # # because tuple caoont be stored in json - # self.levels = tuple(self.info["levels"]) for rec_file in (self.folder / "datasets/recordings").glob("*.pickle"): key = rec_file.stem @@ -169,9 +166,6 @@ def key_to_str(self, key): raise ValueError("Keys for cases must str or tuple") def run_sorters(self, case_keys=None, engine='loop', engine_kwargs={}, keep=True, verbose=False): - """ - - """ if case_keys is None: case_keys = self.cases.keys() From fac98233b84fa440b374d944d1c27b9d200cd0c1 Mon Sep 17 00:00:00 2001 From: Heberto Mayorquin Date: Tue, 19 Sep 2023 15:31:10 +0200 Subject: [PATCH 030/115] add tutorial to load matlab data --- doc/how_to/index.rst | 1 + doc/how_to/load_matalb_data.rst | 66 +++++++++++++++++++++++++++++++++ 2 files changed, 67 insertions(+) create mode 100644 doc/how_to/load_matalb_data.rst diff --git a/doc/how_to/index.rst b/doc/how_to/index.rst index dabad818f9..fa7210d4f0 100644 --- a/doc/how_to/index.rst +++ b/doc/how_to/index.rst @@ -7,3 +7,4 @@ How to guides get_started analyse_neuropixels handle_drift + load_matalb_data diff --git a/doc/how_to/load_matalb_data.rst b/doc/how_to/load_matalb_data.rst new file mode 100644 index 0000000000..39b9a48d65 --- /dev/null +++ b/doc/how_to/load_matalb_data.rst @@ -0,0 +1,66 @@ +Exporting MATLAB Data to Binary & Loading in SpikeInterface +=========================================================== + +In this tutorial, we'll go through the process of exporting your data from MATLAB in a binary format and then loading it using SpikeInterface in Python. Let's break down the steps. + +Exporting Data from MATLAB +-------------------------- + +First, ensure your data is structured correctly. The data matrix should be organized such that the first dimension corresponds to samples/time and the second dimension to channels. + +.. code-block:: matlab + + % Define the size of your data + num_samples = 1000; + num_channels = 384; + + % Generate random data as an example + data = rand(num_samples, num_channels); + + % Write the data to a binary file + fileID = fopen('your_data_as_a_binary.bin', 'wb'); + fwrite(fileID, data, 'double'); + fclose(fileID); + +.. note:: + + In a real-world scenario, replace the random data generation with your actual data. + +Loading Data in SpikeInterface +----------------------------- + +This should produce a binary file called `your_data_as_a_binary.bin` in your current MATLAB directory. +You will need the complete path (i.e. its location on your computer) to load it in Python. + +Once you have your data in a binary format, you can seamlessly load it into SpikeInterface using the following script: + +.. code-block:: python + + from spikeinterface.core.binaryrecordingextractor import BinaryRecordingExtractor + from pathlib import Path + + # Define the path to your binary file + file_path = Path("/The/Path/To/Your/Data/your_data_as_a_binary.bin") + + # Ensure the file exists + assert file_path.is_file() + + # Specify the parameters of your recording + sampling_frequency = 30_000.0 # in Hz, adjust as per your matlab dataset + num_channels = 384 # adjust as per your matlab dataset + dtype = "float64" + + # Load the data using SpikeInterface + recording = BinaryRecordingExtractor(file_path, sampling_frequency=sampling_frequency, + num_channels=num_channels, dtype=dtype, gain_to_uV=1, offset_to_uV=0) + + # Verify the shape of your data + assert recording.get_traces().shape == (num_samples, num_channels) + +Common Pitfalls & Tips +---------------------- + +1. **Data Shape**: Always ensure that your MATLAB data matrix's first dimension corresponds to samples/time and the second to channels. +2. **File Path**: Double-check the file path in Python to ensure you're pointing to the right directory. +3. **Data Type**: When moving data between MATLAB and Python, it's crucial to keep the data type consistent. In our example, we used `double` in MATLAB, which corresponds to `float64` in Python. +4. **Sampling Frequency**: Ensure you set the correct sampling frequency when loading data into SpikeInterface. From 26cfd5db963796865b4a5ec877bfdd37e8616537 Mon Sep 17 00:00:00 2001 From: Alessio Buccino Date: Tue, 19 Sep 2023 17:01:24 +0200 Subject: [PATCH 031/115] Percentiles need 0-100 and ad duinit_ids to syncrhony metrics --- .../qualitymetrics/misc_metrics.py | 25 +++++++++++-------- 1 file changed, 15 insertions(+), 10 deletions(-) diff --git a/src/spikeinterface/qualitymetrics/misc_metrics.py b/src/spikeinterface/qualitymetrics/misc_metrics.py index 6a42b12bb5..38add13c02 100644 --- a/src/spikeinterface/qualitymetrics/misc_metrics.py +++ b/src/spikeinterface/qualitymetrics/misc_metrics.py @@ -499,7 +499,7 @@ def compute_sliding_rp_violations( ) -def compute_synchrony_metrics(waveform_extractor, synchrony_sizes=(2, 4, 8), **kwargs): +def compute_synchrony_metrics(waveform_extractor, synchrony_sizes=(2, 4, 8), unit_ids=None, **kwargs): """Compute synchrony metrics. Synchrony metrics represent the rate of occurrences of "synchrony_size" spikes at the exact same sample index. @@ -509,6 +509,8 @@ def compute_synchrony_metrics(waveform_extractor, synchrony_sizes=(2, 4, 8), **k The waveform extractor object. synchrony_sizes : list or tuple, default: (2, 4, 8) The synchrony sizes to compute. + unit_ids : list or None, default: None + List of unit ids to compute the synchrony metrics. If None, all units are used. Returns ------- @@ -526,6 +528,9 @@ def compute_synchrony_metrics(waveform_extractor, synchrony_sizes=(2, 4, 8), **k sorting = waveform_extractor.sorting spikes = sorting.to_spike_vector(concatenated=False) + if unit_ids is None: + unit_ids = sorting.unit_ids + # Pre-allocate synchrony counts synchrony_counts = {} for synchrony_size in synchrony_sizes: @@ -538,20 +543,20 @@ def compute_synchrony_metrics(waveform_extractor, synchrony_sizes=(2, 4, 8), **k unique_spike_index, complexity = np.unique(spikes_in_segment["sample_index"], return_counts=True) # add counts for this segment - for unit_index in np.arange(len(sorting.unit_ids)): + for unit_id in unit_ids: + unit_index = sorting.unit_ids.index(unit_id) spikes_per_unit = spikes_in_segment[spikes_in_segment["unit_index"] == unit_index] # some segments/units might have no spikes if len(spikes_per_unit) == 0: continue spike_complexity = complexity[np.in1d(unique_spike_index, spikes_per_unit["sample_index"])] for synchrony_size in synchrony_sizes: - synchrony_counts[synchrony_size][unit_index] += np.count_nonzero(spike_complexity >= synchrony_size) + synchrony_counts[synchrony_size][unit_id] += np.count_nonzero(spike_complexity >= synchrony_size) # add counts for this segment synchrony_metrics_dict = { f"sync_spike_{synchrony_size}": { - unit_id: synchrony_counts[synchrony_size][unit_index] / spike_counts[unit_id] - for unit_index, unit_id in enumerate(sorting.unit_ids) + unit_id: synchrony_counts[synchrony_size][unit_id] / spike_counts[unit_id] for unit_id in unit_ids } for synchrony_size in synchrony_sizes } @@ -565,7 +570,7 @@ def compute_synchrony_metrics(waveform_extractor, synchrony_sizes=(2, 4, 8), **k _default_params["synchrony"] = dict(synchrony_sizes=(0, 2, 4)) -def compute_firing_ranges(waveform_extractor, bin_size_s=5, percentiles=(0.05, 0.95), unit_ids=None): +def compute_firing_ranges(waveform_extractor, bin_size_s=5, percentiles=(5, 95), unit_ids=None, **kwargs): """Calculate firing range, the range between the 5th and 95th percentiles of the firing rates distribution computed in non-overlapping time bins. @@ -575,7 +580,7 @@ def compute_firing_ranges(waveform_extractor, bin_size_s=5, percentiles=(0.05, 0 The waveform extractor object. bin_size_s : float, default: 5 The size of the bin in seconds. - percentiles : tuple, default: (0.05, 0.95) + percentiles : tuple, default: (5, 95) The percentiles to compute. unit_ids : list or None List of unit ids to compute the firing range. If None, all units are used. @@ -617,13 +622,13 @@ def compute_firing_ranges(waveform_extractor, bin_size_s=5, percentiles=(0.05, 0 return firing_ranges -_default_params["firing_range"] = dict(bin_size_s=5, percentiles=(0.05, 0.95)) +_default_params["firing_range"] = dict(bin_size_s=5, percentiles=(5, 95)) def compute_amplitude_cv_metrics( waveform_extractor, average_num_spikes_per_bin=50, - percentiles=(0.05, 0.95), + percentiles=(5, 95), min_num_bins=10, amplitude_extension="spike_amplitudes", unit_ids=None, @@ -726,7 +731,7 @@ def compute_amplitude_cv_metrics( _default_params["amplitude_cv"] = dict( - average_num_spikes_per_bin=50, percentiles=(0.05, 0.95), min_num_bins=10, amplitude_extension="spike_amplitudes" + average_num_spikes_per_bin=50, percentiles=(5, 95), min_num_bins=10, amplitude_extension="spike_amplitudes" ) From 2bd7dd6c1c0fea0e094293f1fb17f9293ce30bb6 Mon Sep 17 00:00:00 2001 From: Alessio Buccino Date: Tue, 19 Sep 2023 17:13:06 +0200 Subject: [PATCH 032/115] oups --- src/spikeinterface/qualitymetrics/misc_metrics.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/spikeinterface/qualitymetrics/misc_metrics.py b/src/spikeinterface/qualitymetrics/misc_metrics.py index 38add13c02..0a37da99c3 100644 --- a/src/spikeinterface/qualitymetrics/misc_metrics.py +++ b/src/spikeinterface/qualitymetrics/misc_metrics.py @@ -536,6 +536,7 @@ def compute_synchrony_metrics(waveform_extractor, synchrony_sizes=(2, 4, 8), uni for synchrony_size in synchrony_sizes: synchrony_counts[synchrony_size] = np.zeros(len(waveform_extractor.unit_ids), dtype=np.int64) + all_unit_ids = list(sorting.unit_ids) for segment_index in range(sorting.get_num_segments()): spikes_in_segment = spikes[segment_index] @@ -544,7 +545,7 @@ def compute_synchrony_metrics(waveform_extractor, synchrony_sizes=(2, 4, 8), uni # add counts for this segment for unit_id in unit_ids: - unit_index = sorting.unit_ids.index(unit_id) + unit_index = all_unit_ids.index(unit_id) spikes_per_unit = spikes_in_segment[spikes_in_segment["unit_index"] == unit_index] # some segments/units might have no spikes if len(spikes_per_unit) == 0: From 7c958c3789f5591ad9fb8c9a4eaef1b905e5c929 Mon Sep 17 00:00:00 2001 From: Alessio Buccino Date: Tue, 19 Sep 2023 17:16:52 +0200 Subject: [PATCH 033/115] Unify imports and comments for quality metrics docs --- doc/modules/qualitymetrics/amplitude_cutoff.rst | 6 +++--- doc/modules/qualitymetrics/amplitude_cv.rst | 4 ++-- doc/modules/qualitymetrics/amplitude_median.rst | 6 +++--- doc/modules/qualitymetrics/d_prime.rst | 4 ++-- doc/modules/qualitymetrics/drift.rst | 4 ++-- doc/modules/qualitymetrics/firing_range.rst | 6 +++--- doc/modules/qualitymetrics/firing_rate.rst | 6 +++--- doc/modules/qualitymetrics/isi_violations.rst | 4 ++-- doc/modules/qualitymetrics/presence_ratio.rst | 6 +++--- doc/modules/qualitymetrics/sliding_rp_violations.rst | 4 ++-- doc/modules/qualitymetrics/snr.rst | 6 +++--- doc/modules/qualitymetrics/synchrony.rst | 4 ++-- 12 files changed, 30 insertions(+), 30 deletions(-) diff --git a/doc/modules/qualitymetrics/amplitude_cutoff.rst b/doc/modules/qualitymetrics/amplitude_cutoff.rst index 9f747f8d40..a1e4d85d01 100644 --- a/doc/modules/qualitymetrics/amplitude_cutoff.rst +++ b/doc/modules/qualitymetrics/amplitude_cutoff.rst @@ -21,12 +21,12 @@ Example code .. code-block:: python - import spikeinterface.qualitymetrics as qm + import spikeinterface.qualitymetrics as sqm # It is also recommended to run `compute_spike_amplitudes(wvf_extractor)` # in order to use amplitudes from all spikes - fraction_missing = qm.compute_amplitude_cutoffs(wvf_extractor, peak_sign="neg") - # fraction_missing is a dict containing the units' IDs as keys, + fraction_missing = sqm.compute_amplitude_cutoffs(wvf_extractor, peak_sign="neg") + # fraction_missing is a dict containing the unit IDs as keys, # and their estimated fraction of missing spikes as values. Reference diff --git a/doc/modules/qualitymetrics/amplitude_cv.rst b/doc/modules/qualitymetrics/amplitude_cv.rst index 981813ef09..3edb1f9833 100644 --- a/doc/modules/qualitymetrics/amplitude_cv.rst +++ b/doc/modules/qualitymetrics/amplitude_cv.rst @@ -32,12 +32,12 @@ Example code .. code-block:: python - import spikeinterface.qualitymetrics as qm + import spikeinterface.qualitymetrics as sqm # Make recording, sorting and wvf_extractor object for your data. # It is required to run `compute_spike_amplitudes(wvf_extractor)` or # `compute_amplitude_scalings(wvf_extractor)` (if missing, values will be NaN) - amplitude_cv_median, amplitude_cv_range = qm.compute_amplitude_cv_metrics(wvf_extractor) + amplitude_cv_median, amplitude_cv_range = sqm.compute_amplitude_cv_metrics(wvf_extractor) # amplitude_cv_median and amplitude_cv_range are dicts containing the unit ids as keys, # and their amplitude_cv metrics as values. diff --git a/doc/modules/qualitymetrics/amplitude_median.rst b/doc/modules/qualitymetrics/amplitude_median.rst index ffc45d1cf6..3ac52560e8 100644 --- a/doc/modules/qualitymetrics/amplitude_median.rst +++ b/doc/modules/qualitymetrics/amplitude_median.rst @@ -20,12 +20,12 @@ Example code .. code-block:: python - import spikeinterface.qualitymetrics as qm + import spikeinterface.qualitymetrics as sqm # It is also recommended to run `compute_spike_amplitudes(wvf_extractor)` # in order to use amplitude values from all spikes. - amplitude_medians = qm.compute_amplitude_medians(wvf_extractor) - # amplitude_medians is a dict containing the units' IDs as keys, + amplitude_medians = sqm.compute_amplitude_medians(wvf_extractor) + # amplitude_medians is a dict containing the unit IDs as keys, # and their estimated amplitude medians as values. Reference diff --git a/doc/modules/qualitymetrics/d_prime.rst b/doc/modules/qualitymetrics/d_prime.rst index abb8c1dc74..e3bd61c580 100644 --- a/doc/modules/qualitymetrics/d_prime.rst +++ b/doc/modules/qualitymetrics/d_prime.rst @@ -32,9 +32,9 @@ Example code .. code-block:: python - import spikeinterface.qualitymetrics as qm + import spikeinterface.qualitymetrics as sqm - d_prime = qm.lda_metrics(all_pcs, all_labels, 0) + d_prime = sqm.lda_metrics(all_pcs, all_labels, 0) Reference diff --git a/doc/modules/qualitymetrics/drift.rst b/doc/modules/qualitymetrics/drift.rst index 4e78150ba7..ae52f7f883 100644 --- a/doc/modules/qualitymetrics/drift.rst +++ b/doc/modules/qualitymetrics/drift.rst @@ -40,12 +40,12 @@ Example code .. code-block:: python - import spikeinterface.qualitymetrics as qm + import spikeinterface.qualitymetrics as sqm # Make recording, sorting and wvf_extractor object for your data. # It is required to run `compute_spike_locations(wvf_extractor)` # (if missing, values will be NaN) - drift_ptps, drift_stds, drift_mads = qm.compute_drift_metrics(wvf_extractor, peak_sign="neg") + drift_ptps, drift_stds, drift_mads = sqm.compute_drift_metrics(wvf_extractor, peak_sign="neg") # drift_ptps, drift_stds, and drift_mads are dict containing the units' ID as keys, # and their metrics as values. diff --git a/doc/modules/qualitymetrics/firing_range.rst b/doc/modules/qualitymetrics/firing_range.rst index 3fd3d53573..925539e9c6 100644 --- a/doc/modules/qualitymetrics/firing_range.rst +++ b/doc/modules/qualitymetrics/firing_range.rst @@ -21,11 +21,11 @@ Example code .. code-block:: python - import spikeinterface.qualitymetrics as qm + import spikeinterface.qualitymetrics as sqm # Make recording, sorting and wvf_extractor object for your data. - firing_range = qm.compute_firing_ranges(wvf_extractor) - # firing_range is a dict containing the units' IDs as keys, + firing_range = sqm.compute_firing_ranges(wvf_extractor) + # firing_range is a dict containing the unit IDs as keys, # and their firing firing_range as values (in Hz). References diff --git a/doc/modules/qualitymetrics/firing_rate.rst b/doc/modules/qualitymetrics/firing_rate.rst index eddef3e48f..c0e15d7c2e 100644 --- a/doc/modules/qualitymetrics/firing_rate.rst +++ b/doc/modules/qualitymetrics/firing_rate.rst @@ -37,11 +37,11 @@ With SpikeInterface: .. code-block:: python - import spikeinterface.qualitymetrics as qm + import spikeinterface.qualitymetrics as sqm # Make recording, sorting and wvf_extractor object for your data. - firing_rate = qm.compute_firing_rates(wvf_extractor) - # firing_rate is a dict containing the units' IDs as keys, + firing_rate = sqm.compute_firing_rates(wvf_extractor) + # firing_rate is a dict containing the unit IDs as keys, # and their firing rates across segments as values (in Hz). References diff --git a/doc/modules/qualitymetrics/isi_violations.rst b/doc/modules/qualitymetrics/isi_violations.rst index 947e7d4938..725d9b0fd6 100644 --- a/doc/modules/qualitymetrics/isi_violations.rst +++ b/doc/modules/qualitymetrics/isi_violations.rst @@ -77,11 +77,11 @@ With SpikeInterface: .. code-block:: python - import spikeinterface.qualitymetrics as qm + import spikeinterface.qualitymetrics as sqm # Make recording, sorting and wvf_extractor object for your data. - isi_violations_ratio, isi_violations_count = qm.compute_isi_violations(wvf_extractor, isi_threshold_ms=1.0) + isi_violations_ratio, isi_violations_count = sqm.compute_isi_violations(wvf_extractor, isi_threshold_ms=1.0) References ---------- diff --git a/doc/modules/qualitymetrics/presence_ratio.rst b/doc/modules/qualitymetrics/presence_ratio.rst index e4de2248bd..5a420c8ccf 100644 --- a/doc/modules/qualitymetrics/presence_ratio.rst +++ b/doc/modules/qualitymetrics/presence_ratio.rst @@ -23,12 +23,12 @@ Example code .. code-block:: python - import spikeinterface.qualitymetrics as qm + import spikeinterface.qualitymetrics as sqm # Make recording, sorting and wvf_extractor object for your data. - presence_ratio = qm.compute_presence_ratios(wvf_extractor) - # presence_ratio is a dict containing the units' IDs as keys + presence_ratio = sqm.compute_presence_ratios(wvf_extractor) + # presence_ratio is a dict containing the unit IDs as keys # and their presence ratio (between 0 and 1) as values. Links to original implementations diff --git a/doc/modules/qualitymetrics/sliding_rp_violations.rst b/doc/modules/qualitymetrics/sliding_rp_violations.rst index 843242c1e8..de68c3a92f 100644 --- a/doc/modules/qualitymetrics/sliding_rp_violations.rst +++ b/doc/modules/qualitymetrics/sliding_rp_violations.rst @@ -27,11 +27,11 @@ With SpikeInterface: .. code-block:: python - import spikeinterface.qualitymetrics as qm + import spikeinterface.qualitymetrics as sqm # Make recording, sorting and wvf_extractor object for your data. - contamination = qm.compute_sliding_rp_violations(wvf_extractor, bin_size_ms=0.25) + contamination = sqm.compute_sliding_rp_violations(wvf_extractor, bin_size_ms=0.25) References ---------- diff --git a/doc/modules/qualitymetrics/snr.rst b/doc/modules/qualitymetrics/snr.rst index 288ab60515..b88d3291be 100644 --- a/doc/modules/qualitymetrics/snr.rst +++ b/doc/modules/qualitymetrics/snr.rst @@ -41,12 +41,12 @@ With SpikeInterface: .. code-block:: python - import spikeinterface.qualitymetrics as qm + import spikeinterface.qualitymetrics as sqm # Make recording, sorting and wvf_extractor object for your data. - SNRs = qm.compute_snrs(wvf_extractor) - # SNRs is a dict containing the units' IDs as keys and their SNRs as values. + SNRs = sqm.compute_snrs(wvf_extractor) + # SNRs is a dict containing the unit IDs as keys and their SNRs as values. Links to original implementations --------------------------------- diff --git a/doc/modules/qualitymetrics/synchrony.rst b/doc/modules/qualitymetrics/synchrony.rst index 2f566bf8a7..0750940199 100644 --- a/doc/modules/qualitymetrics/synchrony.rst +++ b/doc/modules/qualitymetrics/synchrony.rst @@ -27,9 +27,9 @@ Example code .. code-block:: python - import spikeinterface.qualitymetrics as qm + import spikeinterface.qualitymetrics as sqm # Make recording, sorting and wvf_extractor object for your data. - synchrony = qm.compute_synchrony_metrics(wvf_extractor, synchrony_sizes=(2, 4, 8)) + synchrony = sqm.compute_synchrony_metrics(wvf_extractor, synchrony_sizes=(2, 4, 8)) # synchrony is a tuple of dicts with the synchrony metrics for each unit From 16cf79e222c51ab54f82f0783a8f23734c270bdb Mon Sep 17 00:00:00 2001 From: Alessio Buccino Date: Tue, 19 Sep 2023 17:56:04 +0200 Subject: [PATCH 034/115] Default synchrony sizes and assertion --- src/spikeinterface/qualitymetrics/misc_metrics.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/spikeinterface/qualitymetrics/misc_metrics.py b/src/spikeinterface/qualitymetrics/misc_metrics.py index 0a37da99c3..b02bfae9ba 100644 --- a/src/spikeinterface/qualitymetrics/misc_metrics.py +++ b/src/spikeinterface/qualitymetrics/misc_metrics.py @@ -523,7 +523,7 @@ def compute_synchrony_metrics(waveform_extractor, synchrony_sizes=(2, 4, 8), uni Based on concepts described in [Gruen]_ This code was adapted from `Elephant - Electrophysiology Analysis Toolkit `_ """ - assert np.all(s > 1 for s in synchrony_sizes), "Synchrony sizes must be greater than 1" + assert np.all([s > 1 for s in synchrony_sizes]), "Synchrony sizes must be greater than 1" spike_counts = waveform_extractor.sorting.count_num_spikes_per_unit() sorting = waveform_extractor.sorting spikes = sorting.to_spike_vector(concatenated=False) @@ -568,7 +568,7 @@ def compute_synchrony_metrics(waveform_extractor, synchrony_sizes=(2, 4, 8), uni return synchrony_metrics -_default_params["synchrony"] = dict(synchrony_sizes=(0, 2, 4)) +_default_params["synchrony"] = dict(synchrony_sizes=(2, 4, 8)) def compute_firing_ranges(waveform_extractor, bin_size_s=5, percentiles=(5, 95), unit_ids=None, **kwargs): From d3fe469bb95d4a8b3e6cff1ecde37e1bc5c4e0c6 Mon Sep 17 00:00:00 2001 From: Alessio Buccino Date: Tue, 19 Sep 2023 20:11:19 +0200 Subject: [PATCH 035/115] Update src/spikeinterface/qualitymetrics/misc_metrics.py --- src/spikeinterface/qualitymetrics/misc_metrics.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/spikeinterface/qualitymetrics/misc_metrics.py b/src/spikeinterface/qualitymetrics/misc_metrics.py index c742141d5d..f449b3c31b 100644 --- a/src/spikeinterface/qualitymetrics/misc_metrics.py +++ b/src/spikeinterface/qualitymetrics/misc_metrics.py @@ -523,7 +523,7 @@ def compute_synchrony_metrics(waveform_extractor, synchrony_sizes=(2, 4, 8), uni Based on concepts described in [Gruen]_ This code was adapted from `Elephant - Electrophysiology Analysis Toolkit `_ """ - assert np.all([s > 1 for s in synchrony_sizes]), "Synchrony sizes must be greater than 1" + assert min(synchrony_sizes) > 1, "Synchrony sizes must be greater than 1" spike_counts = waveform_extractor.sorting.count_num_spikes_per_unit() sorting = waveform_extractor.sorting spikes = sorting.to_spike_vector(concatenated=False) From d7aaa95e295d16fd1c9e6fe10fd82f93029a5cb1 Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Tue, 19 Sep 2023 21:01:18 +0200 Subject: [PATCH 036/115] gt study widget xlim --- src/spikeinterface/widgets/gtstudy.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/spikeinterface/widgets/gtstudy.py b/src/spikeinterface/widgets/gtstudy.py index bc2c1246b7..438858beae 100644 --- a/src/spikeinterface/widgets/gtstudy.py +++ b/src/spikeinterface/widgets/gtstudy.py @@ -243,10 +243,14 @@ def plot_matplotlib(self, data_plot, **backend_kwargs): study = dp.study perfs = study.get_performance_by_unit(case_keys=dp.case_keys) + max_metric = 0 for key in dp.case_keys: x = study.get_metrics(key)[dp.metric_name].values y = perfs.xs(key)[dp.performance_name].values label = dp.study.cases[key]["label"] self.ax.scatter(x, y, label=label) + max_metric = max(max_metric, np.max(x)) - self.ax.legend() \ No newline at end of file + self.ax.legend() + self.ax.set_xlim(0, max_metric * 1.05) + self.ax.set_ylim(0, 1.05) \ No newline at end of file From a395c3c7253cd7dadd813b25a4862610221f9cf4 Mon Sep 17 00:00:00 2001 From: Heberto Mayorquin Date: Wed, 20 Sep 2023 10:24:30 +0200 Subject: [PATCH 037/115] suggestions --- doc/how_to/index.rst | 2 +- ...d_matalb_data.rst => load_matlab_data.rst} | 26 +++++++++++-------- 2 files changed, 16 insertions(+), 12 deletions(-) rename doc/how_to/{load_matalb_data.rst => load_matlab_data.rst} (70%) diff --git a/doc/how_to/index.rst b/doc/how_to/index.rst index fa7210d4f0..da94cf549c 100644 --- a/doc/how_to/index.rst +++ b/doc/how_to/index.rst @@ -7,4 +7,4 @@ How to guides get_started analyse_neuropixels handle_drift - load_matalb_data + load_matlab_data diff --git a/doc/how_to/load_matalb_data.rst b/doc/how_to/load_matlab_data.rst similarity index 70% rename from doc/how_to/load_matalb_data.rst rename to doc/how_to/load_matlab_data.rst index 39b9a48d65..cca579036a 100644 --- a/doc/how_to/load_matalb_data.rst +++ b/doc/how_to/load_matlab_data.rst @@ -7,15 +7,16 @@ Exporting Data from MATLAB -------------------------- First, ensure your data is structured correctly. The data matrix should be organized such that the first dimension corresponds to samples/time and the second dimension to channels. +In the following MATLAB code, we generate random data as an example and then write it to a binary file. .. code-block:: matlab % Define the size of your data - num_samples = 1000; - num_channels = 384; + numSamples = 1000; + numChannels = 384; % Generate random data as an example - data = rand(num_samples, num_channels); + data = rand(numSamples, numChannels); % Write the data to a binary file fileID = fopen('your_data_as_a_binary.bin', 'wb'); @@ -36,22 +37,24 @@ Once you have your data in a binary format, you can seamlessly load it into Spik .. code-block:: python - from spikeinterface.core.binaryrecordingextractor import BinaryRecordingExtractor + import spikeinterface as si from pathlib import Path - # Define the path to your binary file + # In linux or mac file_path = Path("/The/Path/To/Your/Data/your_data_as_a_binary.bin") + # or for Windows + # file_path = Path(r"c:\path\to\your\data\your_data_as_a_binary.bin") # Ensure the file exists assert file_path.is_file() # Specify the parameters of your recording - sampling_frequency = 30_000.0 # in Hz, adjust as per your matlab dataset - num_channels = 384 # adjust as per your matlab dataset - dtype = "float64" + sampling_frequency = 30_000.0 # in Hz, adjust as per your MATLAB dataset + num_channels = 384 # adjust as per your MATLAB dataset + dtype = "float64" # equivalent of MATLAB double # Load the data using SpikeInterface - recording = BinaryRecordingExtractor(file_path, sampling_frequency=sampling_frequency, + recording = si.read_binary(file_path, sampling_frequency=sampling_frequency, num_channels=num_channels, dtype=dtype, gain_to_uV=1, offset_to_uV=0) # Verify the shape of your data @@ -61,6 +64,7 @@ Common Pitfalls & Tips ---------------------- 1. **Data Shape**: Always ensure that your MATLAB data matrix's first dimension corresponds to samples/time and the second to channels. -2. **File Path**: Double-check the file path in Python to ensure you're pointing to the right directory. +2. **File Path**: Double-check the file path in Python to ensure you are pointing to the right directory. 3. **Data Type**: When moving data between MATLAB and Python, it's crucial to keep the data type consistent. In our example, we used `double` in MATLAB, which corresponds to `float64` in Python. -4. **Sampling Frequency**: Ensure you set the correct sampling frequency when loading data into SpikeInterface. +4. **Sampling Frequency**: Ensure you set the correct sampling frequency in Hz when loading data into SpikeInterface. +5. **Working on Python**: Matlab to python can feel like a big jump. If you are new to Python, we recommend checking out numpy's [Python for MATLAB Users](https://numpy.org/doc/stable/user/numpy-for-matlab-users.html) guide. From 6130e5bad0c8d825a4c44da881b5473e691a8712 Mon Sep 17 00:00:00 2001 From: Heberto Mayorquin Date: Wed, 20 Sep 2023 10:27:17 +0200 Subject: [PATCH 038/115] add an assertion --- doc/how_to/load_matlab_data.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/how_to/load_matlab_data.rst b/doc/how_to/load_matlab_data.rst index cca579036a..0a8345b792 100644 --- a/doc/how_to/load_matlab_data.rst +++ b/doc/how_to/load_matlab_data.rst @@ -46,7 +46,7 @@ Once you have your data in a binary format, you can seamlessly load it into Spik # file_path = Path(r"c:\path\to\your\data\your_data_as_a_binary.bin") # Ensure the file exists - assert file_path.is_file() + assert file_path.is_file(), f"Your path {file_path} is not a file, you probably have a typo or got the wrong path." # Specify the parameters of your recording sampling_frequency = 30_000.0 # in Hz, adjust as per your MATLAB dataset From 9a97e68f848d1126126bfecd819f456e12113813 Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Wed, 20 Sep 2023 10:52:05 +0200 Subject: [PATCH 039/115] Improve the concept of check_if_json_serializable to more serialation engine like pickle. --- src/spikeinterface/comparison/hybrid.py | 6 ++- .../comparison/multicomparisons.py | 7 ++- src/spikeinterface/core/base.py | 50 +++++++++++++------ src/spikeinterface/core/generate.py | 2 + src/spikeinterface/core/numpyextractors.py | 16 ++++-- src/spikeinterface/core/old_api_utils.py | 8 ++- src/spikeinterface/core/tests/test_base.py | 12 +++-- .../core/tests/test_jsonification.py | 10 +++- .../core/tests/test_waveform_extractor.py | 41 +++++++++++++-- src/spikeinterface/core/waveform_extractor.py | 34 ++++++++++--- src/spikeinterface/preprocessing/motion.py | 3 +- src/spikeinterface/sorters/basesorter.py | 3 +- 12 files changed, 150 insertions(+), 42 deletions(-) diff --git a/src/spikeinterface/comparison/hybrid.py b/src/spikeinterface/comparison/hybrid.py index af410255b9..c48ce70147 100644 --- a/src/spikeinterface/comparison/hybrid.py +++ b/src/spikeinterface/comparison/hybrid.py @@ -84,7 +84,8 @@ def __init__( ) # save injected sorting if necessary self.injected_sorting = injected_sorting - if not self.injected_sorting.check_if_json_serializable(): + # if not self.injected_sorting.check_if_json_serializable(): + if not self.injected_sorting.check_serializablility("json"): assert injected_sorting_folder is not None, "Provide injected_sorting_folder to injected sorting object" self.injected_sorting = self.injected_sorting.save(folder=injected_sorting_folder) @@ -180,7 +181,8 @@ def __init__( self.injected_sorting = injected_sorting # save injected sorting if necessary - if not self.injected_sorting.check_if_json_serializable(): + # if not self.injected_sorting.check_if_json_serializable(): + if not self.injected_sorting.check_serializablility("json"): assert injected_sorting_folder is not None, "Provide injected_sorting_folder to injected sorting object" self.injected_sorting = self.injected_sorting.save(folder=injected_sorting_folder) diff --git a/src/spikeinterface/comparison/multicomparisons.py b/src/spikeinterface/comparison/multicomparisons.py index 9e02fd5b2d..3a7075905e 100644 --- a/src/spikeinterface/comparison/multicomparisons.py +++ b/src/spikeinterface/comparison/multicomparisons.py @@ -182,7 +182,8 @@ def get_agreement_sorting(self, minimum_agreement_count=1, minimum_agreement_cou def save_to_folder(self, save_folder): for sorting in self.object_list: assert ( - sorting.check_if_json_serializable() + # sorting.check_if_json_serializable() + sorting.check_serializablility("json") ), "MultiSortingComparison.save_to_folder() need json serializable sortings" save_folder = Path(save_folder) @@ -244,7 +245,9 @@ def __init__( BaseSorting.__init__(self, sampling_frequency=sampling_frequency, unit_ids=unit_ids) - self._is_json_serializable = False + # self._is_json_serializable = False + self._serializablility["json"] = False + self._serializablility["pickle"] = True if len(unit_ids) > 0: for k in ("agreement_number", "avg_agreement", "unit_ids"): diff --git a/src/spikeinterface/core/base.py b/src/spikeinterface/core/base.py index 87c0805630..d87bd617c4 100644 --- a/src/spikeinterface/core/base.py +++ b/src/spikeinterface/core/base.py @@ -58,7 +58,8 @@ def __init__(self, main_ids: Sequence) -> None: self._properties = {} self._is_dumpable = True - self._is_json_serializable = True + # self._is_json_serializable = True + self._serializablility = {'json': True, 'pickle': True} # extractor specific list of pip extra requirements self.extra_requirements = [] @@ -490,6 +491,18 @@ def check_if_dumpable(self): return all([v.check_if_dumpable() for k, v in value.items()]) return self._is_dumpable + def check_serializablility(self, type="json"): + kwargs = self._kwargs + for value in kwargs.values(): + # here we check if the value is a BaseExtractor, a list of BaseExtractors, or a dict of BaseExtractors + if isinstance(value, BaseExtractor): + return value.check_serializablility(type=type) + elif isinstance(value, list) and (len(value) > 0) and isinstance(value[0], BaseExtractor): + return all([v.check_serializablility(type=type) for v in value]) + elif isinstance(value, dict) and isinstance(value[list(value.keys())[0]], BaseExtractor): + return all([v.check_serializablility(type=type) for k, v in value.items()]) + return self._serializablility[type] + def check_if_json_serializable(self): """ Check if the object is json serializable, including nested objects. @@ -499,16 +512,23 @@ def check_if_json_serializable(self): bool True if the object is json serializable, False otherwise. """ - kwargs = self._kwargs - for value in kwargs.values(): - # here we check if the value is a BaseExtractor, a list of BaseExtractors, or a dict of BaseExtractors - if isinstance(value, BaseExtractor): - return value.check_if_json_serializable() - elif isinstance(value, list) and (len(value) > 0) and isinstance(value[0], BaseExtractor): - return all([v.check_if_json_serializable() for v in value]) - elif isinstance(value, dict) and isinstance(value[list(value.keys())[0]], BaseExtractor): - return all([v.check_if_json_serializable() for k, v in value.items()]) - return self._is_json_serializable + # we keep this for backward compatilibity or not ???? + return self.check_serializablility("json") + + # kwargs = self._kwargs + # for value in kwargs.values(): + # # here we check if the value is a BaseExtractor, a list of BaseExtractors, or a dict of BaseExtractors + # if isinstance(value, BaseExtractor): + # return value.check_if_json_serializable() + # elif isinstance(value, list) and (len(value) > 0) and isinstance(value[0], BaseExtractor): + # return all([v.check_if_json_serializable() for v in value]) + # elif isinstance(value, dict) and isinstance(value[list(value.keys())[0]], BaseExtractor): + # return all([v.check_if_json_serializable() for k, v in value.items()]) + # return self._is_json_serializable + + def check_if_pickle_serializable(self): + # is this needed + return self.check_serializablility("pickle") @staticmethod def _get_file_path(file_path: Union[str, Path], extensions: Sequence) -> Path: @@ -557,7 +577,7 @@ def dump(self, file_path: Union[str, Path], relative_to=None, folder_metadata=No if str(file_path).endswith(".json"): self.dump_to_json(file_path, relative_to=relative_to, folder_metadata=folder_metadata) elif str(file_path).endswith(".pkl") or str(file_path).endswith(".pickle"): - self.dump_to_pickle(file_path, relative_to=relative_to, folder_metadata=folder_metadata) + self.dump_to_pickle(file_path, folder_metadata=folder_metadata) else: raise ValueError("Dump: file must .json or .pkl") @@ -576,7 +596,8 @@ def dump_to_json(self, file_path: Union[str, Path, None] = None, relative_to=Non folder_metadata: str, Path, or None Folder with files containing additional information (e.g. probe in BaseRecording) and properties. """ - assert self.check_if_json_serializable(), "The extractor is not json serializable" + # assert self.check_if_json_serializable(), "The extractor is not json serializable" + assert self.check_serializablility("json"), "The extractor is not json serializable" # Writing paths as relative_to requires recursively expanding the dict if relative_to: @@ -814,7 +835,8 @@ def save_to_folder(self, name=None, folder=None, verbose=True, **save_kwargs): # dump provenance provenance_file = folder / f"provenance.json" - if self.check_if_json_serializable(): + # if self.check_if_json_serializable(): + if self.check_serializablility("json"): self.dump(provenance_file) else: provenance_file.write_text(json.dumps({"warning": "the provenace is not dumpable!!!"}), encoding="utf8") diff --git a/src/spikeinterface/core/generate.py b/src/spikeinterface/core/generate.py index 07837bcef7..706054c957 100644 --- a/src/spikeinterface/core/generate.py +++ b/src/spikeinterface/core/generate.py @@ -1431,5 +1431,7 @@ def generate_ground_truth_recording( ) recording.annotate(is_filtered=True) recording.set_probe(probe, in_place=True) + recording.set_property("gain_to_uV", np.ones(num_channels)) + recording.set_property("offset_to_uV", np.zeros(num_channels)) return recording, sorting diff --git a/src/spikeinterface/core/numpyextractors.py b/src/spikeinterface/core/numpyextractors.py index d5663156c7..f55b975ddb 100644 --- a/src/spikeinterface/core/numpyextractors.py +++ b/src/spikeinterface/core/numpyextractors.py @@ -64,7 +64,9 @@ def __init__(self, traces_list, sampling_frequency, t_starts=None, channel_ids=N assert len(t_starts) == len(traces_list), "t_starts must be a list of same size than traces_list" t_starts = [float(t_start) for t_start in t_starts] - self._is_json_serializable = False + # self._is_json_serializable = False + self._serializablility["json"] = False + self._serializablility["pickle"] = False for i, traces in enumerate(traces_list): if t_starts is None: @@ -127,7 +129,9 @@ def __init__(self, spikes, sampling_frequency, unit_ids): BaseSorting.__init__(self, sampling_frequency, unit_ids) self._is_dumpable = True - self._is_json_serializable = False + # self._is_json_serializable = False + self._serializablility["json"] = False + self._serializablility["pickle"] = False if spikes.size == 0: nseg = 1 @@ -358,7 +362,9 @@ def __init__(self, shm_name, shape, sampling_frequency, unit_ids, dtype=minimum_ BaseSorting.__init__(self, sampling_frequency, unit_ids) self._is_dumpable = True - self._is_json_serializable = False + # self._is_json_serializable = False + self._serializablility["json"] = False + self._serializablility["pickle"] = False self.shm = SharedMemory(shm_name, create=False) self.shm_spikes = np.ndarray(shape=shape, dtype=dtype, buffer=self.shm.buf) @@ -517,7 +523,9 @@ def __init__(self, snippets_list, spikesframes_list, sampling_frequency, nbefore ) self._is_dumpable = False - self._is_json_serializable = False + # self._is_json_serializable = False + self._serializablility["json"] = False + self._serializablility["pickle"] = False for snippets, spikesframes in zip(snippets_list, spikesframes_list): snp_segment = NumpySnippetsSegment(snippets, spikesframes) diff --git a/src/spikeinterface/core/old_api_utils.py b/src/spikeinterface/core/old_api_utils.py index 1ff31127f4..38fbef1547 100644 --- a/src/spikeinterface/core/old_api_utils.py +++ b/src/spikeinterface/core/old_api_utils.py @@ -183,7 +183,9 @@ def __init__(self, oldapi_recording_extractor): # set _is_dumpable to False to use dumping mechanism of old extractor self._is_dumpable = False - self._is_json_serializable = False + # self._is_json_serializable = False + self._serializablility["json"] = False + self._serializablility["pickle"] = False self.annotate(is_filtered=oldapi_recording_extractor.is_filtered) @@ -269,7 +271,9 @@ def __init__(self, oldapi_sorting_extractor): self.add_sorting_segment(sorting_segment) self._is_dumpable = False - self._is_json_serializable = False + # self._is_json_serializable = False + self._serializablility["json"] = False + self._serializablility["pickle"] = False # add old properties copy_properties(oldapi_extractor=oldapi_sorting_extractor, new_extractor=self) diff --git a/src/spikeinterface/core/tests/test_base.py b/src/spikeinterface/core/tests/test_base.py index ea1a9cf0d2..77a5d7d9bf 100644 --- a/src/spikeinterface/core/tests/test_base.py +++ b/src/spikeinterface/core/tests/test_base.py @@ -50,18 +50,22 @@ def test_check_if_json_serializable(): test_extractor = generate_recording(seed=0, durations=[2]) # make a list of dumpable objects - test_extractor._is_json_serializable = True + # test_extractor._is_json_serializable = True + test_extractor._serializablility["json"] = True extractors_json_serializable = make_nested_extractors(test_extractor) for extractor in extractors_json_serializable: print(extractor) - assert extractor.check_if_json_serializable() + # assert extractor.check_if_json_serializable() + assert extractor.check_serializablility("json") # make not dumpable - test_extractor._is_json_serializable = False + # test_extractor._is_json_serializable = False + test_extractor._serializablility["json"] = False extractors_not_json_serializable = make_nested_extractors(test_extractor) for extractor in extractors_not_json_serializable: print(extractor) - assert not extractor.check_if_json_serializable() + # assert not extractor.check_if_json_serializable() + assert not extractor.check_serializablility("json") if __name__ == "__main__": diff --git a/src/spikeinterface/core/tests/test_jsonification.py b/src/spikeinterface/core/tests/test_jsonification.py index 473648c5ec..8572cda23e 100644 --- a/src/spikeinterface/core/tests/test_jsonification.py +++ b/src/spikeinterface/core/tests/test_jsonification.py @@ -142,9 +142,12 @@ def __init__(self, attribute, other_extractor=None, extractor_list=None, extract self.extractor_list = extractor_list self.extractor_dict = extractor_dict + BaseExtractor.__init__(self, main_ids=['1', '2']) # this already the case by default self._is_dumpable = True - self._is_json_serializable = True + # self._is_json_serializable = True + self._serializablility["json"] = True + self._serializablility["pickle"] = True self._kwargs = { "attribute": attribute, @@ -195,3 +198,8 @@ def test_encoding_numpy_scalars_within_nested_extractors_list(nested_extractor_l def test_encoding_numpy_scalars_within_nested_extractors_dict(nested_extractor_dict): json.dumps(nested_extractor_dict, cls=SIJsonEncoder) + + +if __name__ == '__main__': + nested_extractor = nested_extractor() + test_encoding_numpy_scalars_within_nested_extractors(nested_extractor_) \ No newline at end of file diff --git a/src/spikeinterface/core/tests/test_waveform_extractor.py b/src/spikeinterface/core/tests/test_waveform_extractor.py index 107ef5f180..f53b9cf18d 100644 --- a/src/spikeinterface/core/tests/test_waveform_extractor.py +++ b/src/spikeinterface/core/tests/test_waveform_extractor.py @@ -6,7 +6,7 @@ import zarr -from spikeinterface.core import generate_recording, generate_sorting, NumpySorting, ChannelSparsity +from spikeinterface.core import generate_recording, generate_sorting, NumpySorting, ChannelSparsity, generate_ground_truth_recording from spikeinterface import WaveformExtractor, BaseRecording, extract_waveforms, load_waveforms from spikeinterface.core.waveform_extractor import precompute_sparsity @@ -509,11 +509,46 @@ def test_compute_sparsity(): ) print(sparsity) +def test_non_json_object(): + recording, sorting = generate_ground_truth_recording( + durations=[30, 40], + sampling_frequency=30000.0, + num_channels=32, + num_units=5, + ) + + # recording is not save to keep it in memory + sorting = sorting.save() + + wf_folder = cache_folder / "test_waveform_extractor" + if wf_folder.is_dir(): + shutil.rmtree(wf_folder) + + + we = extract_waveforms( + recording, + sorting, + wf_folder, + mode="folder", + sparsity=None, + sparse=False, + ms_before=1.0, + ms_after=1.6, + max_spikes_per_unit=50, + n_jobs=4, + chunk_size=30000, + progress_bar=True, + ) + + # This used to fail because of json + we = load_waveforms(wf_folder) + if __name__ == "__main__": - test_WaveformExtractor() + # test_WaveformExtractor() # test_extract_waveforms() - # test_sparsity() # test_portability() # test_recordingless() # test_compute_sparsity() + test_non_json_object() + diff --git a/src/spikeinterface/core/waveform_extractor.py b/src/spikeinterface/core/waveform_extractor.py index 6881ab3ec5..53852bf319 100644 --- a/src/spikeinterface/core/waveform_extractor.py +++ b/src/spikeinterface/core/waveform_extractor.py @@ -159,11 +159,20 @@ def load_from_folder( else: rec_attributes["probegroup"] = None else: - try: - recording = load_extractor(folder / "recording.json", base_folder=folder) - rec_attributes = None - except: + recording = None + if (folder / "recording.json").exists(): + try: + recording = load_extractor(folder / "recording.json", base_folder=folder) + except: + pass + elif (folder / "recording.pickle").exists(): + try: + recording = load_extractor(folder / "recording.pickle") + except: + pass + if recording is None: raise Exception("The recording could not be loaded. You can use the `with_recording=False` argument") + rec_attributes = None if sorting is None: sorting = load_extractor(folder / "sorting.json", base_folder=folder) @@ -271,9 +280,16 @@ def create( else: relative_to = None - if recording.check_if_json_serializable(): + # if recording.check_if_json_serializable(): + if recording.check_serializablility("json"): recording.dump(folder / "recording.json", relative_to=relative_to) - if sorting.check_if_json_serializable(): + elif recording.check_serializablility("pickle"): + # In this case we loose the relative_to!! + # TODO make sure that we do not dump to pickle a NumpyRecording!!!!! + recording.dump(folder / "recording.pickle") + + # if sorting.check_if_json_serializable(): + if sorting.check_serializablility("json"): sorting.dump(folder / "sorting.json", relative_to=relative_to) else: warn( @@ -879,9 +895,11 @@ def save( (folder / "params.json").write_text(json.dumps(check_json(self._params), indent=4), encoding="utf8") if self.has_recording(): - if self.recording.check_if_json_serializable(): + # if self.recording.check_if_json_serializable(): + if self.recording.check_serializablility("json"): self.recording.dump(folder / "recording.json", relative_to=relative_to) - if self.sorting.check_if_json_serializable(): + # if self.sorting.check_if_json_serializable(): + if self.sorting.check_serializablility("json"): self.sorting.dump(folder / "sorting.json", relative_to=relative_to) else: warn( diff --git a/src/spikeinterface/preprocessing/motion.py b/src/spikeinterface/preprocessing/motion.py index e2ef6e6794..0054fb94d4 100644 --- a/src/spikeinterface/preprocessing/motion.py +++ b/src/spikeinterface/preprocessing/motion.py @@ -333,7 +333,8 @@ def correct_motion( ) (folder / "parameters.json").write_text(json.dumps(parameters, indent=4, cls=SIJsonEncoder), encoding="utf8") (folder / "run_times.json").write_text(json.dumps(run_times, indent=4), encoding="utf8") - if recording.check_if_json_serializable(): + # if recording.check_if_json_serializable(): + if recording.check_serializablility("json"): recording.dump_to_json(folder / "recording.json") np.save(folder / "peaks.npy", peaks) diff --git a/src/spikeinterface/sorters/basesorter.py b/src/spikeinterface/sorters/basesorter.py index c7581ba1e1..da20506965 100644 --- a/src/spikeinterface/sorters/basesorter.py +++ b/src/spikeinterface/sorters/basesorter.py @@ -137,7 +137,8 @@ def initialize_folder(cls, recording, output_folder, verbose, remove_existing_fo ) rec_file = output_folder / "spikeinterface_recording.json" - if recording.check_if_json_serializable(): + # if recording.check_if_json_serializable(): + if recording.check_serializablility("json"): recording.dump_to_json(rec_file, relative_to=output_folder) else: d = {"warning": "The recording is not serializable to json"} From 0842509422d8498fab0c506d6ed2839b4f4d0a74 Mon Sep 17 00:00:00 2001 From: Heberto Mayorquin Date: Wed, 20 Sep 2023 12:29:11 +0200 Subject: [PATCH 040/115] my final version --- doc/how_to/load_matlab_data.rst | 30 ++++++++++++++++++++++++++++-- 1 file changed, 28 insertions(+), 2 deletions(-) diff --git a/doc/how_to/load_matlab_data.rst b/doc/how_to/load_matlab_data.rst index 0a8345b792..3e602012a1 100644 --- a/doc/how_to/load_matlab_data.rst +++ b/doc/how_to/load_matlab_data.rst @@ -55,16 +55,42 @@ Once you have your data in a binary format, you can seamlessly load it into Spik # Load the data using SpikeInterface recording = si.read_binary(file_path, sampling_frequency=sampling_frequency, - num_channels=num_channels, dtype=dtype, gain_to_uV=1, offset_to_uV=0) + num_channels=num_channels, dtype=dtype) # Verify the shape of your data assert recording.get_traces().shape == (num_samples, num_channels) +This should be enough to get you started with loading your MATLAB data into SpikeInterface. You can use all the Spikeinterface machinery to process your data, including filtering, spike sorting, and more. + Common Pitfalls & Tips ---------------------- -1. **Data Shape**: Always ensure that your MATLAB data matrix's first dimension corresponds to samples/time and the second to channels. +1. **Data Shape**: Always ensure that your MATLAB data matrix's first dimension corresponds to samples/time and the second to channels. If the time happens to be in the second dimension, you can use `time_axis=1` as an argument in `si.read_binary()` to account for this. 2. **File Path**: Double-check the file path in Python to ensure you are pointing to the right directory. 3. **Data Type**: When moving data between MATLAB and Python, it's crucial to keep the data type consistent. In our example, we used `double` in MATLAB, which corresponds to `float64` in Python. 4. **Sampling Frequency**: Ensure you set the correct sampling frequency in Hz when loading data into SpikeInterface. 5. **Working on Python**: Matlab to python can feel like a big jump. If you are new to Python, we recommend checking out numpy's [Python for MATLAB Users](https://numpy.org/doc/stable/user/numpy-for-matlab-users.html) guide. + + +Using gains and offsets for integer data +---------------------------------------- + +A common technique used in raw formats is to store data as integer values, which provides a memory-efficient representation (i.e. lower ram) and use a gain and offset to convert it to float values that represent meaningful physical units. +In SpikeInterface this is done using the `gain_to_uV` and `offset_to_uV` parameters as the we handle traces in microvolts. Both values can be passed to `read_binary` when loading the data: + +.. code-block:: python + + sampling_frequency = 30_000.0 # in Hz, adjust as per your MATLAB dataset + num_channels = 384 # adjust as per your MATLAB dataset + dtype_int = 'int16' # adjust as per your MATLAB dataset + gain_to_uV = 0.195 # adjust as per your MATLAB dataset + offset_to_uV = 0 # adjust as per your MATLAB dataset + + recording = si.read_binary(file_path, sampling_frequency=sampling_frequency, + num_channels=num_channels, dtype=dtype_int, + gain_to_uV=gain_to_uV, offset_to_uV=offset_to_uV) + + recording.get_traces(start) + + +This will equip your recording object with capabilities to convert the data to float values in uV using the `get_traces()` method with the `return_scaled` parameter set to True. From 1ead6a33e658bf5a0365d21506a90dd9bd32e67c Mon Sep 17 00:00:00 2001 From: Heberto Mayorquin Date: Wed, 20 Sep 2023 12:45:06 +0200 Subject: [PATCH 041/115] final review --- doc/how_to/load_matlab_data.rst | 72 +++++++++++++++++---------------- 1 file changed, 38 insertions(+), 34 deletions(-) diff --git a/doc/how_to/load_matlab_data.rst b/doc/how_to/load_matlab_data.rst index 3e602012a1..0a80f1fdf9 100644 --- a/doc/how_to/load_matlab_data.rst +++ b/doc/how_to/load_matlab_data.rst @@ -1,13 +1,13 @@ Exporting MATLAB Data to Binary & Loading in SpikeInterface =========================================================== -In this tutorial, we'll go through the process of exporting your data from MATLAB in a binary format and then loading it using SpikeInterface in Python. Let's break down the steps. +In this tutorial, we will walk through the process of exporting data from MATLAB in a binary format and subsequently loading it using SpikeInterface in Python. Exporting Data from MATLAB -------------------------- -First, ensure your data is structured correctly. The data matrix should be organized such that the first dimension corresponds to samples/time and the second dimension to channels. -In the following MATLAB code, we generate random data as an example and then write it to a binary file. +Begin by ensuring your data structure is correct. Organize your data matrix so that the first dimension corresponds to samples/time and the second to channels. +Here, we present a MATLAB code that creates a random dataset and writes it to a binary file as an illustration. .. code-block:: matlab @@ -25,72 +25,76 @@ In the following MATLAB code, we generate random data as an example and then wri .. note:: - In a real-world scenario, replace the random data generation with your actual data. + In your own script, replace the random data generation with your actual dataset. Loading Data in SpikeInterface ----------------------------- -This should produce a binary file called `your_data_as_a_binary.bin` in your current MATLAB directory. -You will need the complete path (i.e. its location on your computer) to load it in Python. +After executing the above MATLAB code, a binary file named `your_data_as_a_binary.bin` will be created in your MATLAB directory. To load this file in Python, you'll need its full path. -Once you have your data in a binary format, you can seamlessly load it into SpikeInterface using the following script: +Use the following Python script to load the binary data into SpikeInterface: .. code-block:: python import spikeinterface as si from pathlib import Path - # In linux or mac + # Define file path + # For Linux or macOS: file_path = Path("/The/Path/To/Your/Data/your_data_as_a_binary.bin") - # or for Windows + # For Windows: # file_path = Path(r"c:\path\to\your\data\your_data_as_a_binary.bin") - # Ensure the file exists - assert file_path.is_file(), f"Your path {file_path} is not a file, you probably have a typo or got the wrong path." + # Confirm file existence + assert file_path.is_file(), f"Error: {file_path} is not a valid file. Please check the path." - # Specify the parameters of your recording - sampling_frequency = 30_000.0 # in Hz, adjust as per your MATLAB dataset - num_channels = 384 # adjust as per your MATLAB dataset - dtype = "float64" # equivalent of MATLAB double + # Define recording parameters + sampling_frequency = 30_000.0 # Adjust according to your MATLAB dataset + num_channels = 384 # Adjust according to your MATLAB dataset + dtype = "float64" # MATLAB's double corresponds to Python's float64 - # Load the data using SpikeInterface + # Load data using SpikeInterface recording = si.read_binary(file_path, sampling_frequency=sampling_frequency, num_channels=num_channels, dtype=dtype) - # Verify the shape of your data - assert recording.get_traces().shape == (num_samples, num_channels) + # Confirm the data shape + assert recording.get_traces().shape == (numSamples, num_channels) -This should be enough to get you started with loading your MATLAB data into SpikeInterface. You can use all the Spikeinterface machinery to process your data, including filtering, spike sorting, and more. +Follow the steps above to seamlessly import your MATLAB data into SpikeInterface. Once loaded, you can harness the full power of SpikeInterface for data processing, including filtering, spike sorting, and more. Common Pitfalls & Tips ---------------------- -1. **Data Shape**: Always ensure that your MATLAB data matrix's first dimension corresponds to samples/time and the second to channels. If the time happens to be in the second dimension, you can use `time_axis=1` as an argument in `si.read_binary()` to account for this. -2. **File Path**: Double-check the file path in Python to ensure you are pointing to the right directory. -3. **Data Type**: When moving data between MATLAB and Python, it's crucial to keep the data type consistent. In our example, we used `double` in MATLAB, which corresponds to `float64` in Python. -4. **Sampling Frequency**: Ensure you set the correct sampling frequency in Hz when loading data into SpikeInterface. -5. **Working on Python**: Matlab to python can feel like a big jump. If you are new to Python, we recommend checking out numpy's [Python for MATLAB Users](https://numpy.org/doc/stable/user/numpy-for-matlab-users.html) guide. - +1. **Data Shape**: Make sure your MATLAB data matrix's first dimension is samples/time and the second is channels. If your time is in the second dimension, use `time_axis=1` in `si.read_binary()`. +2. **File Path**: Always double-check the Python file path. +3. **Data Type Consistency**: Ensure data types between MATLAB and Python are consistent. MATLAB's `double` is equivalent to nUMPY's `float64`. +4. **Sampling Frequency**: Set the appropriate sampling frequency in Hz for SpikeInterface. +5. **Transition to Python**: Moving from MATLAB to Python can be challenging. For newcomers to Python, consider reviewing numpy's [Numpy for MATLAB Users](https://numpy.org/doc/stable/user/numpy-for-matlab-users.html) guide. Using gains and offsets for integer data ---------------------------------------- -A common technique used in raw formats is to store data as integer values, which provides a memory-efficient representation (i.e. lower ram) and use a gain and offset to convert it to float values that represent meaningful physical units. -In SpikeInterface this is done using the `gain_to_uV` and `offset_to_uV` parameters as the we handle traces in microvolts. Both values can be passed to `read_binary` when loading the data: +Raw data formats often store data as integer values for memory efficiency. To give these integers meaningful physical units, you can apply a gain and an offset. +In SpikeInterface, you can use the `gain_to_uV` and `offset_to_uV` parameters, since traces are handled in microvolts (uV). Both parameters can be integrated into the `read_binary` function. +If your data in MATLAB is stored as `int16`, and you know the gain and offset, you can use the following code to load the data: .. code-block:: python - sampling_frequency = 30_000.0 # in Hz, adjust as per your MATLAB dataset - num_channels = 384 # adjust as per your MATLAB dataset - dtype_int = 'int16' # adjust as per your MATLAB dataset - gain_to_uV = 0.195 # adjust as per your MATLAB dataset - offset_to_uV = 0 # adjust as per your MATLAB dataset + sampling_frequency = 30_000.0 # Adjust according to your MATLAB dataset + num_channels = 384 # Adjust according to your MATLAB dataset + dtype_int = 'int16' # Adjust according to your MATLAB dataset + gain_to_uV = 0.195 # Adjust according to your MATLAB dataset + offset_to_uV = 0 # Adjust according to your MATLAB dataset recording = si.read_binary(file_path, sampling_frequency=sampling_frequency, num_channels=num_channels, dtype=dtype_int, gain_to_uV=gain_to_uV, offset_to_uV=offset_to_uV) - recording.get_traces(start) + recording.get_traces(return_scaled=True) # Return traces in micro volts (uV) + +This will equip your recording object with capabilities to convert the data to float values in uV using the `get_traces()` method with the `return_scaled` parameter set to `True`. + +.. note:: -This will equip your recording object with capabilities to convert the data to float values in uV using the `get_traces()` method with the `return_scaled` parameter set to True. + The gain and offset parameters are usually format depend and you will need to find out the correct values for your data format. You can load your data without gain and offset but then the traces will be in integer values and not in uV. From e31978ce8355dda2d87a713c2495ec915b805f92 Mon Sep 17 00:00:00 2001 From: Heberto Mayorquin Date: Wed, 20 Sep 2023 12:53:47 +0200 Subject: [PATCH 042/115] typo --- doc/how_to/load_matlab_data.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/how_to/load_matlab_data.rst b/doc/how_to/load_matlab_data.rst index 0a80f1fdf9..ca543ba43a 100644 --- a/doc/how_to/load_matlab_data.rst +++ b/doc/how_to/load_matlab_data.rst @@ -67,7 +67,7 @@ Common Pitfalls & Tips 1. **Data Shape**: Make sure your MATLAB data matrix's first dimension is samples/time and the second is channels. If your time is in the second dimension, use `time_axis=1` in `si.read_binary()`. 2. **File Path**: Always double-check the Python file path. -3. **Data Type Consistency**: Ensure data types between MATLAB and Python are consistent. MATLAB's `double` is equivalent to nUMPY's `float64`. +3. **Data Type Consistency**: Ensure data types between MATLAB and Python are consistent. MATLAB's `double` is equivalent to Numpy's `float64`. 4. **Sampling Frequency**: Set the appropriate sampling frequency in Hz for SpikeInterface. 5. **Transition to Python**: Moving from MATLAB to Python can be challenging. For newcomers to Python, consider reviewing numpy's [Numpy for MATLAB Users](https://numpy.org/doc/stable/user/numpy-for-matlab-users.html) guide. From 5aba5e0f65532165488303203d7739e188fe6e0c Mon Sep 17 00:00:00 2001 From: Heberto Mayorquin Date: Wed, 20 Sep 2023 12:57:44 +0200 Subject: [PATCH 043/115] Update doc/how_to/load_matlab_data.rst Co-authored-by: Zach McKenzie <92116279+zm711@users.noreply.github.com> --- doc/how_to/load_matlab_data.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/how_to/load_matlab_data.rst b/doc/how_to/load_matlab_data.rst index ca543ba43a..7f90684701 100644 --- a/doc/how_to/load_matlab_data.rst +++ b/doc/how_to/load_matlab_data.rst @@ -97,4 +97,4 @@ This will equip your recording object with capabilities to convert the data to f .. note:: - The gain and offset parameters are usually format depend and you will need to find out the correct values for your data format. You can load your data without gain and offset but then the traces will be in integer values and not in uV. + The gain and offset parameters are usually format dependent and you will need to find out the correct values for your data format. You can load your data without gain and offset but then the traces will be in integer values and not in uV. From 3f4e182380995f56d458163356a70a813af6b146 Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Wed, 20 Sep 2023 14:01:50 +0200 Subject: [PATCH 044/115] More check and clean for check_if_serializable() --- src/spikeinterface/comparison/hybrid.py | 4 +- .../comparison/multicomparisons.py | 2 - src/spikeinterface/core/base.py | 46 +++++++++---------- src/spikeinterface/core/generate.py | 2 + src/spikeinterface/core/numpyextractors.py | 8 ++-- src/spikeinterface/core/old_api_utils.py | 2 - src/spikeinterface/core/tests/test_base.py | 7 +-- .../core/tests/test_waveform_extractor.py | 2 + src/spikeinterface/core/waveform_extractor.py | 18 +++++--- src/spikeinterface/preprocessing/motion.py | 1 - 10 files changed, 44 insertions(+), 48 deletions(-) diff --git a/src/spikeinterface/comparison/hybrid.py b/src/spikeinterface/comparison/hybrid.py index c48ce70147..3b8e9e0a72 100644 --- a/src/spikeinterface/comparison/hybrid.py +++ b/src/spikeinterface/comparison/hybrid.py @@ -84,8 +84,8 @@ def __init__( ) # save injected sorting if necessary self.injected_sorting = injected_sorting - # if not self.injected_sorting.check_if_json_serializable(): if not self.injected_sorting.check_serializablility("json"): + # TODO later : also use pickle assert injected_sorting_folder is not None, "Provide injected_sorting_folder to injected sorting object" self.injected_sorting = self.injected_sorting.save(folder=injected_sorting_folder) @@ -181,8 +181,8 @@ def __init__( self.injected_sorting = injected_sorting # save injected sorting if necessary - # if not self.injected_sorting.check_if_json_serializable(): if not self.injected_sorting.check_serializablility("json"): + # TODO later : also use pickle assert injected_sorting_folder is not None, "Provide injected_sorting_folder to injected sorting object" self.injected_sorting = self.injected_sorting.save(folder=injected_sorting_folder) diff --git a/src/spikeinterface/comparison/multicomparisons.py b/src/spikeinterface/comparison/multicomparisons.py index 3a7075905e..09a8c8aed1 100644 --- a/src/spikeinterface/comparison/multicomparisons.py +++ b/src/spikeinterface/comparison/multicomparisons.py @@ -182,7 +182,6 @@ def get_agreement_sorting(self, minimum_agreement_count=1, minimum_agreement_cou def save_to_folder(self, save_folder): for sorting in self.object_list: assert ( - # sorting.check_if_json_serializable() sorting.check_serializablility("json") ), "MultiSortingComparison.save_to_folder() need json serializable sortings" @@ -245,7 +244,6 @@ def __init__( BaseSorting.__init__(self, sampling_frequency=sampling_frequency, unit_ids=unit_ids) - # self._is_json_serializable = False self._serializablility["json"] = False self._serializablility["pickle"] = True diff --git a/src/spikeinterface/core/base.py b/src/spikeinterface/core/base.py index d87bd617c4..63cf8e894f 100644 --- a/src/spikeinterface/core/base.py +++ b/src/spikeinterface/core/base.py @@ -484,11 +484,16 @@ def check_if_dumpable(self): for value in kwargs.values(): # here we check if the value is a BaseExtractor, a list of BaseExtractors, or a dict of BaseExtractors if isinstance(value, BaseExtractor): - return value.check_if_dumpable() - elif isinstance(value, list) and (len(value) > 0) and isinstance(value[0], BaseExtractor): - return all([v.check_if_dumpable() for v in value]) - elif isinstance(value, dict) and isinstance(value[list(value.keys())[0]], BaseExtractor): - return all([v.check_if_dumpable() for k, v in value.items()]) + if not value.check_if_dumpable(): + return False + elif isinstance(value, list): + for v in value: + if isinstance(v, BaseExtractor) and not v.check_if_dumpable(): + return False + elif isinstance(value, dict): + for v in value.values(): + if isinstance(v, BaseExtractor) and not v.check_if_dumpable(): + return False return self._is_dumpable def check_serializablility(self, type="json"): @@ -496,11 +501,16 @@ def check_serializablility(self, type="json"): for value in kwargs.values(): # here we check if the value is a BaseExtractor, a list of BaseExtractors, or a dict of BaseExtractors if isinstance(value, BaseExtractor): - return value.check_serializablility(type=type) - elif isinstance(value, list) and (len(value) > 0) and isinstance(value[0], BaseExtractor): - return all([v.check_serializablility(type=type) for v in value]) - elif isinstance(value, dict) and isinstance(value[list(value.keys())[0]], BaseExtractor): - return all([v.check_serializablility(type=type) for k, v in value.items()]) + if not value.check_serializablility(type=type): + return False + elif isinstance(value, list): + for v in value: + if isinstance(v, BaseExtractor) and not v.check_serializablility(type=type): + return False + elif isinstance(value, dict): + for v in value.values(): + if isinstance(v, BaseExtractor) and not v.check_serializablility(type=type): + return False return self._serializablility[type] def check_if_json_serializable(self): @@ -513,21 +523,11 @@ def check_if_json_serializable(self): True if the object is json serializable, False otherwise. """ # we keep this for backward compatilibity or not ???? + # is this needed ??? I think no. return self.check_serializablility("json") - # kwargs = self._kwargs - # for value in kwargs.values(): - # # here we check if the value is a BaseExtractor, a list of BaseExtractors, or a dict of BaseExtractors - # if isinstance(value, BaseExtractor): - # return value.check_if_json_serializable() - # elif isinstance(value, list) and (len(value) > 0) and isinstance(value[0], BaseExtractor): - # return all([v.check_if_json_serializable() for v in value]) - # elif isinstance(value, dict) and isinstance(value[list(value.keys())[0]], BaseExtractor): - # return all([v.check_if_json_serializable() for k, v in value.items()]) - # return self._is_json_serializable - def check_if_pickle_serializable(self): - # is this needed + # is this needed ??? I think no. return self.check_serializablility("pickle") @staticmethod @@ -596,7 +596,6 @@ def dump_to_json(self, file_path: Union[str, Path, None] = None, relative_to=Non folder_metadata: str, Path, or None Folder with files containing additional information (e.g. probe in BaseRecording) and properties. """ - # assert self.check_if_json_serializable(), "The extractor is not json serializable" assert self.check_serializablility("json"), "The extractor is not json serializable" # Writing paths as relative_to requires recursively expanding the dict @@ -835,7 +834,6 @@ def save_to_folder(self, name=None, folder=None, verbose=True, **save_kwargs): # dump provenance provenance_file = folder / f"provenance.json" - # if self.check_if_json_serializable(): if self.check_serializablility("json"): self.dump(provenance_file) else: diff --git a/src/spikeinterface/core/generate.py b/src/spikeinterface/core/generate.py index 706054c957..362b598b0b 100644 --- a/src/spikeinterface/core/generate.py +++ b/src/spikeinterface/core/generate.py @@ -1056,6 +1056,8 @@ def __init__( dtype = parent_recording.dtype if parent_recording is not None else templates.dtype BaseRecording.__init__(self, sorting.get_sampling_frequency(), channel_ids, dtype) + # Important : self._serializablility is not change here because it will depend on the sorting parents itself. + n_units = len(sorting.unit_ids) assert len(templates) == n_units self.spike_vector = sorting.to_spike_vector() diff --git a/src/spikeinterface/core/numpyextractors.py b/src/spikeinterface/core/numpyextractors.py index f55b975ddb..5ef955a6eb 100644 --- a/src/spikeinterface/core/numpyextractors.py +++ b/src/spikeinterface/core/numpyextractors.py @@ -64,7 +64,6 @@ def __init__(self, traces_list, sampling_frequency, t_starts=None, channel_ids=N assert len(t_starts) == len(traces_list), "t_starts must be a list of same size than traces_list" t_starts = [float(t_start) for t_start in t_starts] - # self._is_json_serializable = False self._serializablility["json"] = False self._serializablility["pickle"] = False @@ -129,9 +128,9 @@ def __init__(self, spikes, sampling_frequency, unit_ids): BaseSorting.__init__(self, sampling_frequency, unit_ids) self._is_dumpable = True - # self._is_json_serializable = False self._serializablility["json"] = False - self._serializablility["pickle"] = False + # theorically this should be False but for simplicity make generators simples we still need this. + self._serializablility["pickle"] = True if spikes.size == 0: nseg = 1 @@ -362,7 +361,7 @@ def __init__(self, shm_name, shape, sampling_frequency, unit_ids, dtype=minimum_ BaseSorting.__init__(self, sampling_frequency, unit_ids) self._is_dumpable = True - # self._is_json_serializable = False + self._serializablility["json"] = False self._serializablility["pickle"] = False @@ -523,7 +522,6 @@ def __init__(self, snippets_list, spikesframes_list, sampling_frequency, nbefore ) self._is_dumpable = False - # self._is_json_serializable = False self._serializablility["json"] = False self._serializablility["pickle"] = False diff --git a/src/spikeinterface/core/old_api_utils.py b/src/spikeinterface/core/old_api_utils.py index 38fbef1547..a31edb0dd7 100644 --- a/src/spikeinterface/core/old_api_utils.py +++ b/src/spikeinterface/core/old_api_utils.py @@ -183,7 +183,6 @@ def __init__(self, oldapi_recording_extractor): # set _is_dumpable to False to use dumping mechanism of old extractor self._is_dumpable = False - # self._is_json_serializable = False self._serializablility["json"] = False self._serializablility["pickle"] = False @@ -271,7 +270,6 @@ def __init__(self, oldapi_sorting_extractor): self.add_sorting_segment(sorting_segment) self._is_dumpable = False - # self._is_json_serializable = False self._serializablility["json"] = False self._serializablility["pickle"] = False diff --git a/src/spikeinterface/core/tests/test_base.py b/src/spikeinterface/core/tests/test_base.py index 77a5d7d9bf..b716f6b1dd 100644 --- a/src/spikeinterface/core/tests/test_base.py +++ b/src/spikeinterface/core/tests/test_base.py @@ -46,16 +46,14 @@ def test_check_if_dumpable(): assert not extractor.check_if_dumpable() -def test_check_if_json_serializable(): +def test_check_if_serializable(): test_extractor = generate_recording(seed=0, durations=[2]) # make a list of dumpable objects - # test_extractor._is_json_serializable = True test_extractor._serializablility["json"] = True extractors_json_serializable = make_nested_extractors(test_extractor) for extractor in extractors_json_serializable: print(extractor) - # assert extractor.check_if_json_serializable() assert extractor.check_serializablility("json") # make not dumpable @@ -64,10 +62,9 @@ def test_check_if_json_serializable(): extractors_not_json_serializable = make_nested_extractors(test_extractor) for extractor in extractors_not_json_serializable: print(extractor) - # assert not extractor.check_if_json_serializable() assert not extractor.check_serializablility("json") if __name__ == "__main__": test_check_if_dumpable() - test_check_if_json_serializable() + test_check_if_serializable() diff --git a/src/spikeinterface/core/tests/test_waveform_extractor.py b/src/spikeinterface/core/tests/test_waveform_extractor.py index f53b9cf18d..3972c9186c 100644 --- a/src/spikeinterface/core/tests/test_waveform_extractor.py +++ b/src/spikeinterface/core/tests/test_waveform_extractor.py @@ -517,6 +517,8 @@ def test_non_json_object(): num_units=5, ) + + print(recording.check_serializablility("pickle")) # recording is not save to keep it in memory sorting = sorting.save() diff --git a/src/spikeinterface/core/waveform_extractor.py b/src/spikeinterface/core/waveform_extractor.py index 53852bf319..3de1429feb 100644 --- a/src/spikeinterface/core/waveform_extractor.py +++ b/src/spikeinterface/core/waveform_extractor.py @@ -280,17 +280,17 @@ def create( else: relative_to = None - # if recording.check_if_json_serializable(): if recording.check_serializablility("json"): recording.dump(folder / "recording.json", relative_to=relative_to) elif recording.check_serializablility("pickle"): # In this case we loose the relative_to!! - # TODO make sure that we do not dump to pickle a NumpyRecording!!!!! recording.dump(folder / "recording.pickle") - # if sorting.check_if_json_serializable(): if sorting.check_serializablility("json"): sorting.dump(folder / "sorting.json", relative_to=relative_to) + elif sorting.check_serializablility("pickle"): + # In this case we loose the relative_to!! + sorting.dump(folder / "sorting.pickle") else: warn( "Sorting object is not dumpable, which might result in downstream errors for " @@ -895,12 +895,16 @@ def save( (folder / "params.json").write_text(json.dumps(check_json(self._params), indent=4), encoding="utf8") if self.has_recording(): - # if self.recording.check_if_json_serializable(): if self.recording.check_serializablility("json"): self.recording.dump(folder / "recording.json", relative_to=relative_to) - # if self.sorting.check_if_json_serializable(): + elif self.recording.check_serializablility("pickle"): + self.recording.dump(folder / "recording.pickle") + + if self.sorting.check_serializablility("json"): self.sorting.dump(folder / "sorting.json", relative_to=relative_to) + elif self.sorting.check_serializablility("pickle"): + self.sorting.dump(folder / "sorting.pickle", relative_to=relative_to) else: warn( "Sorting object is not dumpable, which might result in downstream errors for " @@ -949,10 +953,10 @@ def save( # write metadata zarr_root.attrs["params"] = check_json(self._params) if self.has_recording(): - if self.recording.check_if_json_serializable(): + if self.recording.check_serializablility("json"): rec_dict = self.recording.to_dict(relative_to=relative_to, recursive=True) zarr_root.attrs["recording"] = check_json(rec_dict) - if self.sorting.check_if_json_serializable(): + if self.sorting.check_serializablility("json"): sort_dict = self.sorting.to_dict(relative_to=relative_to, recursive=True) zarr_root.attrs["sorting"] = check_json(sort_dict) else: diff --git a/src/spikeinterface/preprocessing/motion.py b/src/spikeinterface/preprocessing/motion.py index 0054fb94d4..6ab1a9afce 100644 --- a/src/spikeinterface/preprocessing/motion.py +++ b/src/spikeinterface/preprocessing/motion.py @@ -333,7 +333,6 @@ def correct_motion( ) (folder / "parameters.json").write_text(json.dumps(parameters, indent=4, cls=SIJsonEncoder), encoding="utf8") (folder / "run_times.json").write_text(json.dumps(run_times, indent=4), encoding="utf8") - # if recording.check_if_json_serializable(): if recording.check_serializablility("json"): recording.dump_to_json(folder / "recording.json") From 615c5d9cd219e4016e7149f1ce170f043d507333 Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Wed, 20 Sep 2023 14:19:46 +0200 Subject: [PATCH 045/115] Make pickle possible to dump in run sorter when json is not possible. --- src/spikeinterface/sorters/basesorter.py | 61 ++++++++++++------- .../sorters/external/herdingspikes.py | 4 +- .../sorters/external/mountainsort4.py | 4 +- .../sorters/external/mountainsort5.py | 4 +- .../sorters/external/pykilosort.py | 4 +- .../sorters/internal/spyking_circus2.py | 5 +- .../sorters/internal/tridesclous2.py | 4 +- src/spikeinterface/sorters/runsorter.py | 15 ++++- 8 files changed, 59 insertions(+), 42 deletions(-) diff --git a/src/spikeinterface/sorters/basesorter.py b/src/spikeinterface/sorters/basesorter.py index da20506965..bbcde31eed 100644 --- a/src/spikeinterface/sorters/basesorter.py +++ b/src/spikeinterface/sorters/basesorter.py @@ -137,9 +137,10 @@ def initialize_folder(cls, recording, output_folder, verbose, remove_existing_fo ) rec_file = output_folder / "spikeinterface_recording.json" - # if recording.check_if_json_serializable(): if recording.check_serializablility("json"): - recording.dump_to_json(rec_file, relative_to=output_folder) + recording.dump(rec_file, relative_to=output_folder) + elif recording.check_serializablility("pickle"): + recording.dump(output_folder / "spikeinterface_recording.pickle") else: d = {"warning": "The recording is not serializable to json"} rec_file.write_text(json.dumps(d, indent=4), encoding="utf8") @@ -186,6 +187,28 @@ def set_params_to_folder(cls, recording, output_folder, new_params, verbose): return params + @classmethod + def load_recording_from_folder(cls, output_folder, with_warnings=False): + + json_file = output_folder / "spikeinterface_recording.json" + pickle_file = output_folder / "spikeinterface_recording.pickle" + + + if json_file.exists(): + with (json_file).open("r", encoding="utf8") as f: + recording_dict = json.load(f) + if "warning" in recording_dict.keys() and with_warnings: + warnings.warn( + "The recording that has been sorted is not JSON serializable: it cannot be registered to the sorting object." + ) + recording = None + else: + recording = load_extractor(json_file, base_folder=output_folder) + elif pickle_file.exits(): + recording = load_extractor(pickle_file) + + return recording + @classmethod def _dump_params(cls, recording, output_folder, sorter_params, verbose): with (output_folder / "spikeinterface_params.json").open(mode="w", encoding="utf8") as f: @@ -272,7 +295,7 @@ def run_from_folder(cls, output_folder, raise_error, verbose): return run_time @classmethod - def get_result_from_folder(cls, output_folder): + def get_result_from_folder(cls, output_folder, register_recording=True, sorting_info=True): output_folder = Path(output_folder) sorter_output_folder = output_folder / "sorter_output" # check errors in log file @@ -295,27 +318,21 @@ def get_result_from_folder(cls, output_folder): # back-compatibility sorting = cls._get_result_from_folder(output_folder) - # register recording to Sorting object - # check if not json serializable - with (output_folder / "spikeinterface_recording.json").open("r", encoding="utf8") as f: - recording_dict = json.load(f) - if "warning" in recording_dict.keys(): - warnings.warn( - "The recording that has been sorted is not JSON serializable: it cannot be registered to the sorting object." - ) - else: - recording = load_extractor(output_folder / "spikeinterface_recording.json", base_folder=output_folder) + if register_recording: + # register recording to Sorting object + recording = cls.load_recording_from_folder( output_folder, with_warnings=False) if recording is not None: - # can be None when not dumpable sorting.register_recording(recording) - # set sorting info to Sorting object - with open(output_folder / "spikeinterface_recording.json", "r") as f: - rec_dict = json.load(f) - with open(output_folder / "spikeinterface_params.json", "r") as f: - params_dict = json.load(f) - with open(output_folder / "spikeinterface_log.json", "r") as f: - log_dict = json.load(f) - sorting.set_sorting_info(rec_dict, params_dict, log_dict) + + if sorting_info: + # set sorting info to Sorting object + with open(output_folder / "spikeinterface_recording.json", "r") as f: + rec_dict = json.load(f) + with open(output_folder / "spikeinterface_params.json", "r") as f: + params_dict = json.load(f) + with open(output_folder / "spikeinterface_log.json", "r") as f: + log_dict = json.load(f) + sorting.set_sorting_info(rec_dict, params_dict, log_dict) return sorting diff --git a/src/spikeinterface/sorters/external/herdingspikes.py b/src/spikeinterface/sorters/external/herdingspikes.py index a8d702ebe9..5180e6f1cc 100644 --- a/src/spikeinterface/sorters/external/herdingspikes.py +++ b/src/spikeinterface/sorters/external/herdingspikes.py @@ -147,9 +147,7 @@ def _run_from_folder(cls, sorter_output_folder, params, verbose): else: new_api = False - recording = load_extractor( - sorter_output_folder.parent / "spikeinterface_recording.json", base_folder=sorter_output_folder.parent - ) + recording = cls.load_recording_from_folder(sorter_output_folder.parent, with_warnings=False) p = params diff --git a/src/spikeinterface/sorters/external/mountainsort4.py b/src/spikeinterface/sorters/external/mountainsort4.py index 69f97fd11c..f6f0b3eaeb 100644 --- a/src/spikeinterface/sorters/external/mountainsort4.py +++ b/src/spikeinterface/sorters/external/mountainsort4.py @@ -89,9 +89,7 @@ def _setup_recording(cls, recording, sorter_output_folder, params, verbose): def _run_from_folder(cls, sorter_output_folder, params, verbose): import mountainsort4 - recording = load_extractor( - sorter_output_folder.parent / "spikeinterface_recording.json", base_folder=sorter_output_folder.parent - ) + recording = cls.load_recording_from_folder(sorter_output_folder.parent, with_warnings=False) # alias to params p = params diff --git a/src/spikeinterface/sorters/external/mountainsort5.py b/src/spikeinterface/sorters/external/mountainsort5.py index df6d276bf5..a88c59d688 100644 --- a/src/spikeinterface/sorters/external/mountainsort5.py +++ b/src/spikeinterface/sorters/external/mountainsort5.py @@ -115,9 +115,7 @@ def _setup_recording(cls, recording, sorter_output_folder, params, verbose): def _run_from_folder(cls, sorter_output_folder, params, verbose): import mountainsort5 as ms5 - recording: BaseRecording = load_extractor( - sorter_output_folder.parent / "spikeinterface_recording.json", base_folder=sorter_output_folder.parent - ) + recording = cls.load_recording_from_folder(sorter_output_folder.parent, with_warnings=False) # alias to params p = params diff --git a/src/spikeinterface/sorters/external/pykilosort.py b/src/spikeinterface/sorters/external/pykilosort.py index 2a41d793d5..1962d56206 100644 --- a/src/spikeinterface/sorters/external/pykilosort.py +++ b/src/spikeinterface/sorters/external/pykilosort.py @@ -148,9 +148,7 @@ def _setup_recording(cls, recording, sorter_output_folder, params, verbose): @classmethod def _run_from_folder(cls, sorter_output_folder, params, verbose): - recording = load_extractor( - sorter_output_folder.parent / "spikeinterface_recording.json", base_folder=sorter_output_folder.parent - ) + recording = cls.load_recording_from_folder(sorter_output_folder.parent, with_warnings=False) if not recording.binary_compatible_with(time_axis=0, file_paths_lenght=1): # saved by setup recording diff --git a/src/spikeinterface/sorters/internal/spyking_circus2.py b/src/spikeinterface/sorters/internal/spyking_circus2.py index 9de2762562..86cce1959b 100644 --- a/src/spikeinterface/sorters/internal/spyking_circus2.py +++ b/src/spikeinterface/sorters/internal/spyking_circus2.py @@ -54,9 +54,8 @@ def _run_from_folder(cls, sorter_output_folder, params, verbose): job_kwargs["verbose"] = verbose job_kwargs["progress_bar"] = verbose - recording = load_extractor( - sorter_output_folder.parent / "spikeinterface_recording.json", base_folder=sorter_output_folder.parent - ) + recording = cls.load_recording_from_folder(sorter_output_folder.parent, with_warnings=False) + sampling_rate = recording.get_sampling_frequency() num_channels = recording.get_num_channels() diff --git a/src/spikeinterface/sorters/internal/tridesclous2.py b/src/spikeinterface/sorters/internal/tridesclous2.py index 42f51d3a77..ed327e0f3c 100644 --- a/src/spikeinterface/sorters/internal/tridesclous2.py +++ b/src/spikeinterface/sorters/internal/tridesclous2.py @@ -49,9 +49,7 @@ def _run_from_folder(cls, sorter_output_folder, params, verbose): import hdbscan - recording_raw = load_extractor( - sorter_output_folder.parent / "spikeinterface_recording.json", base_folder=sorter_output_folder.parent - ) + recording_raw = cls.load_recording_from_folder(sorter_output_folder.parent, with_warnings=False) num_chans = recording_raw.get_num_channels() sampling_frequency = recording_raw.get_sampling_frequency() diff --git a/src/spikeinterface/sorters/runsorter.py b/src/spikeinterface/sorters/runsorter.py index 6e6ccc0358..e930ec7f79 100644 --- a/src/spikeinterface/sorters/runsorter.py +++ b/src/spikeinterface/sorters/runsorter.py @@ -624,10 +624,20 @@ def run_sorter_container( ) -def read_sorter_folder(output_folder, raise_error=True): +def read_sorter_folder(output_folder, register_recording=True, sorting_info=True, raise_error=True): """ Load a sorting object from a spike sorting output folder. The 'output_folder' must contain a valid 'spikeinterface_log.json' file + + + Parameters + ---------- + output_folder: Pth or str + The sorter folder + register_recording: bool, default: True + Attach recording (when json or pickle) to the sorting + sorting_info: bool, default: True + Attach sorting info to the sorting. """ output_folder = Path(output_folder) log_file = output_folder / "spikeinterface_log.json" @@ -647,7 +657,8 @@ def read_sorter_folder(output_folder, raise_error=True): sorter_name = log["sorter_name"] SorterClass = sorter_dict[sorter_name] - sorting = SorterClass.get_result_from_folder(output_folder) + sorting = SorterClass.get_result_from_folder(output_folder, register_recording=register_recording, + sorting_info=sorting_info) return sorting From b231e2dade552413bdd68e18aad95881a047f4cb Mon Sep 17 00:00:00 2001 From: Heberto Mayorquin Date: Wed, 20 Sep 2023 14:47:14 +0200 Subject: [PATCH 046/115] correction --- doc/how_to/load_matlab_data.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/how_to/load_matlab_data.rst b/doc/how_to/load_matlab_data.rst index 7f90684701..0186ecf72b 100644 --- a/doc/how_to/load_matlab_data.rst +++ b/doc/how_to/load_matlab_data.rst @@ -57,8 +57,8 @@ Use the following Python script to load the binary data into SpikeInterface: recording = si.read_binary(file_path, sampling_frequency=sampling_frequency, num_channels=num_channels, dtype=dtype) - # Confirm the data shape - assert recording.get_traces().shape == (numSamples, num_channels) + # Confirm that the data was loaded correctly by comparing the data shapes and see they match the MATLAB data + print(recording.get_num_frames(), recording.get_num_channels()) Follow the steps above to seamlessly import your MATLAB data into SpikeInterface. Once loaded, you can harness the full power of SpikeInterface for data processing, including filtering, spike sorting, and more. From fb7681520e74a01be0fd4e56740936a4f6de4e25 Mon Sep 17 00:00:00 2001 From: Heberto Mayorquin Date: Wed, 20 Sep 2023 16:40:43 +0200 Subject: [PATCH 047/115] Update doc/how_to/load_matlab_data.rst Co-authored-by: Zach McKenzie <92116279+zm711@users.noreply.github.com> --- doc/how_to/load_matlab_data.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/how_to/load_matlab_data.rst b/doc/how_to/load_matlab_data.rst index 0186ecf72b..3943fbd30f 100644 --- a/doc/how_to/load_matlab_data.rst +++ b/doc/how_to/load_matlab_data.rst @@ -28,7 +28,7 @@ Here, we present a MATLAB code that creates a random dataset and writes it to a In your own script, replace the random data generation with your actual dataset. Loading Data in SpikeInterface ------------------------------ +------------------------------ After executing the above MATLAB code, a binary file named `your_data_as_a_binary.bin` will be created in your MATLAB directory. To load this file in Python, you'll need its full path. From c8579b573236a6e454e27c329e9a03482be606f7 Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Wed, 20 Sep 2023 21:25:42 +0200 Subject: [PATCH 048/115] minor chnages on drift benchmark for figures --- .../benchmark/benchmark_motion_estimation.py | 33 +++++++++++-------- .../benchmark_motion_interpolation.py | 14 +++++--- 2 files changed, 29 insertions(+), 18 deletions(-) diff --git a/src/spikeinterface/sortingcomponents/benchmark/benchmark_motion_estimation.py b/src/spikeinterface/sortingcomponents/benchmark/benchmark_motion_estimation.py index dd35670abd..a47b97fb6d 100644 --- a/src/spikeinterface/sortingcomponents/benchmark/benchmark_motion_estimation.py +++ b/src/spikeinterface/sortingcomponents/benchmark/benchmark_motion_estimation.py @@ -487,7 +487,7 @@ def plot_errors_several_benchmarks(benchmarks, axes=None, show_legend=True, colo mean_error = np.sqrt(np.mean((errors) ** 2, axis=1)) depth_error = np.sqrt(np.mean((errors) ** 2, axis=0)) - axes[0].plot(benchmark.temporal_bins, mean_error, label=benchmark.title, color=c) + axes[0].plot(benchmark.temporal_bins, mean_error, lw=1, label=benchmark.title, color=c) parts = axes[1].violinplot(mean_error, [count], showmeans=True) if c is not None: for pc in parts["bodies"]: @@ -584,23 +584,30 @@ def plot_motions_several_benchmarks(benchmarks): _simpleaxis(ax) -def plot_speed_several_benchmarks(benchmarks, ax=None, colors=None): +def plot_speed_several_benchmarks(benchmarks, detailed=True, ax=None, colors=None): if ax is None: fig, ax = plt.subplots(figsize=(5, 5)) for count, benchmark in enumerate(benchmarks): color = colors[count] if colors is not None else None - bottom = 0 - i = 0 - patterns = ["/", "\\", "|", "*"] - for key, value in benchmark.run_times.items(): - if count == 0: - label = key.replace("_", " ") - else: - label = None - ax.bar([count], [value], label=label, bottom=bottom, color=color, edgecolor="black", hatch=patterns[i]) - bottom += value - i += 1 + + if detailed: + bottom = 0 + i = 0 + patterns = ["/", "\\", "|", "*"] + for key, value in benchmark.run_times.items(): + if count == 0: + label = key.replace("_", " ") + else: + label = None + ax.bar([count], [value], label=label, bottom=bottom, color=color, edgecolor="black", hatch=patterns[i]) + bottom += value + i += 1 + else: + total_run_time = np.sum([value for key, value in benchmark.run_times.items()]) + ax.bar([count], [total_run_time], color=color, edgecolor="black") + + # ax.legend() ax.set_ylabel("speed (s)") diff --git a/src/spikeinterface/sortingcomponents/benchmark/benchmark_motion_interpolation.py b/src/spikeinterface/sortingcomponents/benchmark/benchmark_motion_interpolation.py index 13a64e8168..8e5afb2e8e 100644 --- a/src/spikeinterface/sortingcomponents/benchmark/benchmark_motion_interpolation.py +++ b/src/spikeinterface/sortingcomponents/benchmark/benchmark_motion_interpolation.py @@ -9,7 +9,7 @@ from spikeinterface.extractors import read_mearec from spikeinterface.preprocessing import bandpass_filter, zscore, common_reference, scale, highpass_filter, whiten -from spikeinterface.sorters import run_sorter +from spikeinterface.sorters import run_sorter, read_sorter_folder from spikeinterface.widgets import plot_unit_waveforms, plot_gt_performances from spikeinterface.comparison import GroundTruthComparison @@ -184,7 +184,7 @@ def extract_waveforms(self): we.run_extract_waveforms(seed=22051977, **self.job_kwargs) self.waveforms[key] = we - def run_sorters(self): + def run_sorters(self, skip_already_done=True): for case in self.sorter_cases: label = case["label"] print("run sorter", label) @@ -192,9 +192,13 @@ def run_sorters(self): sorter_params = case["sorter_params"] recording = self.recordings[case["recording"]] output_folder = self.folder / f"tmp_sortings_{label}" - sorting = run_sorter( - sorter_name, recording, output_folder, **sorter_params, delete_output_folder=self.delete_output_folder - ) + if output_folder.exists() and skip_already_done: + print('already done') + sorting = read_sorter_folder(output_folder) + else: + sorting = run_sorter( + sorter_name, recording, output_folder, **sorter_params, delete_output_folder=self.delete_output_folder + ) self.sortings[label] = sorting def compute_distances_to_static(self, force=False): From 9ba6fc6cbf0b0fd3d7bfa0b22108c48a05770b67 Mon Sep 17 00:00:00 2001 From: Heberto Mayorquin Date: Thu, 21 Sep 2023 14:01:25 +0200 Subject: [PATCH 049/115] Update doc/how_to/load_matlab_data.rst Co-authored-by: Alessio Buccino --- doc/how_to/load_matlab_data.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/how_to/load_matlab_data.rst b/doc/how_to/load_matlab_data.rst index 3943fbd30f..aaca718096 100644 --- a/doc/how_to/load_matlab_data.rst +++ b/doc/how_to/load_matlab_data.rst @@ -93,7 +93,7 @@ If your data in MATLAB is stored as `int16`, and you know the gain and offset, y recording.get_traces(return_scaled=True) # Return traces in micro volts (uV) -This will equip your recording object with capabilities to convert the data to float values in uV using the `get_traces()` method with the `return_scaled` parameter set to `True`. +This will equip your recording object with capabilities to convert the data to float values in uV using the :code:`get_traces()` method with the :code:`return_scaled` parameter set to :code:`True`. .. note:: From e964731b33401db1757ce813d2078c00a36dcf34 Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Thu, 21 Sep 2023 16:36:31 +0200 Subject: [PATCH 050/115] Start refactor ipywidgets plot_traces --- src/spikeinterface/widgets/traces.py | 29 +- .../widgets/utils_ipywidgets.py | 251 ++++++++++++++++-- 2 files changed, 254 insertions(+), 26 deletions(-) diff --git a/src/spikeinterface/widgets/traces.py b/src/spikeinterface/widgets/traces.py index 7bb2126744..c6e36387f8 100644 --- a/src/spikeinterface/widgets/traces.py +++ b/src/spikeinterface/widgets/traces.py @@ -276,11 +276,16 @@ def plot_ipywidgets(self, data_plot, **backend_kwargs): import matplotlib.pyplot as plt import ipywidgets.widgets as widgets from IPython.display import display + import ipywidgets.widgets as W from .utils_ipywidgets import ( check_ipywidget_backend, make_timeseries_controller, make_channel_controller, make_scale_controller, + + TimeSlider, + ScaleWidget, + ) check_ipywidget_backend() @@ -308,6 +313,8 @@ def plot_ipywidgets(self, data_plot, **backend_kwargs): t_start = 0.0 t_stop = rec0.get_num_samples(segment_index=0) / rec0.get_sampling_frequency() + + ts_widget, ts_controller = make_timeseries_controller( t_start, t_stop, @@ -319,6 +326,22 @@ def plot_ipywidgets(self, data_plot, **backend_kwargs): width_cm, ) + # some widgets + self.time_slider = TimeSlider( + durations=[rec0.get_duration(s) for s in range(rec0.get_num_segments())], + sampling_frequency=rec0.sampling_frequency, + ) + self.layer_selector = W.Dropdown(description="layer", options=data_plot["layer_keys"], + layout=W.Layout(width="5cm"),) + self.mode_selector = W.Dropdown(options=["line", "map"], description="mode", value=data_plot["mode"], + layout=W.Layout(width="5cm"),) + self.scaler = ScaleWidget() + left_sidebar = W.VBox( + children=[self.layer_selector, self.mode_selector, self.scaler], + layout=W.Layout(width="5cm"), + ) + + ch_widget, ch_controller = make_channel_controller(rec0, width_cm=ratios[2] * width_cm, height_cm=height_cm) scale_widget, scale_controller = make_scale_controller(width_cm=ratios[0] * width_cm, height_cm=height_cm) @@ -346,8 +369,10 @@ def plot_ipywidgets(self, data_plot, **backend_kwargs): self.widget = widgets.AppLayout( center=self.figure.canvas, - footer=ts_widget, - left_sidebar=scale_widget, + # footer=ts_widget, + footer=self.time_slider, + # left_sidebar=scale_widget, + left_sidebar = left_sidebar, right_sidebar=ch_widget, pane_heights=[0, 6, 1], pane_widths=ratios, diff --git a/src/spikeinterface/widgets/utils_ipywidgets.py b/src/spikeinterface/widgets/utils_ipywidgets.py index a7c571d1f0..674a2d2cc7 100644 --- a/src/spikeinterface/widgets/utils_ipywidgets.py +++ b/src/spikeinterface/widgets/utils_ipywidgets.py @@ -1,4 +1,6 @@ -import ipywidgets.widgets as widgets +import ipywidgets.widgets as W +import traitlets + import numpy as np @@ -10,20 +12,20 @@ def check_ipywidget_backend(): def make_timeseries_controller(t_start, t_stop, layer_keys, num_segments, time_range, mode, all_layers, width_cm): - time_slider = widgets.FloatSlider( + time_slider = W.FloatSlider( orientation="horizontal", description="time:", value=time_range[0], min=t_start, max=t_stop, continuous_update=False, - layout=widgets.Layout(width=f"{width_cm}cm"), + layout=W.Layout(width=f"{width_cm}cm"), ) - layer_selector = widgets.Dropdown(description="layer", options=layer_keys) - segment_selector = widgets.Dropdown(description="segment", options=list(range(num_segments))) - window_sizer = widgets.BoundedFloatText(value=np.diff(time_range)[0], step=0.1, min=0.005, description="win (s)") - mode_selector = widgets.Dropdown(options=["line", "map"], description="mode", value=mode) - all_layers = widgets.Checkbox(description="plot all layers", value=all_layers) + layer_selector = W.Dropdown(description="layer", options=layer_keys) + segment_selector = W.Dropdown(description="segment", options=list(range(num_segments))) + window_sizer = W.BoundedFloatText(value=np.diff(time_range)[0], step=0.1, min=0.005, description="win (s)") + mode_selector = W.Dropdown(options=["line", "map"], description="mode", value=mode) + all_layers = W.Checkbox(description="plot all layers", value=all_layers) controller = { "layer_key": layer_selector, @@ -33,32 +35,32 @@ def make_timeseries_controller(t_start, t_stop, layer_keys, num_segments, time_r "mode": mode_selector, "all_layers": all_layers, } - widget = widgets.VBox( - [time_slider, widgets.HBox([all_layers, layer_selector, segment_selector, window_sizer, mode_selector])] + widget = W.VBox( + [time_slider, W.HBox([all_layers, layer_selector, segment_selector, window_sizer, mode_selector])] ) return widget, controller def make_unit_controller(unit_ids, all_unit_ids, width_cm, height_cm): - unit_label = widgets.Label(value="units:") + unit_label = W.Label(value="units:") - unit_selector = widgets.SelectMultiple( + unit_selector = W.SelectMultiple( options=all_unit_ids, value=list(unit_ids), disabled=False, - layout=widgets.Layout(width=f"{width_cm}cm", height=f"{height_cm}cm"), + layout=W.Layout(width=f"{width_cm}cm", height=f"{height_cm}cm"), ) controller = {"unit_ids": unit_selector} - widget = widgets.VBox([unit_label, unit_selector]) + widget = W.VBox([unit_label, unit_selector]) return widget, controller def make_channel_controller(recording, width_cm, height_cm): - channel_label = widgets.Label("channel indices:", layout=widgets.Layout(justify_content="center")) - channel_selector = widgets.IntRangeSlider( + channel_label = W.Label("channel indices:", layout=W.Layout(justify_content="center")) + channel_selector = W.IntRangeSlider( value=[0, recording.get_num_channels()], min=0, max=recording.get_num_channels(), @@ -68,37 +70,238 @@ def make_channel_controller(recording, width_cm, height_cm): orientation="vertical", readout=True, readout_format="d", - layout=widgets.Layout(width=f"{0.8 * width_cm}cm", height=f"{height_cm}cm"), + layout=W.Layout(width=f"{0.8 * width_cm}cm", height=f"{height_cm}cm"), ) controller = {"channel_inds": channel_selector} - widget = widgets.VBox([channel_label, channel_selector]) + widget = W.VBox([channel_label, channel_selector]) return widget, controller def make_scale_controller(width_cm, height_cm): - scale_label = widgets.Label("Scale", layout=widgets.Layout(justify_content="center")) + scale_label = W.Label("Scale", layout=W.Layout(justify_content="center")) - plus_selector = widgets.Button( + plus_selector = W.Button( description="", disabled=False, button_style="", # 'success', 'info', 'warning', 'danger' or '' tooltip="Increase scale", icon="arrow-up", - layout=widgets.Layout(width=f"{0.8 * width_cm}cm", height=f"{0.4 * height_cm}cm"), + layout=W.Layout(width=f"{0.8 * width_cm}cm", height=f"{0.4 * height_cm}cm"), ) - minus_selector = widgets.Button( + minus_selector = W.Button( description="", disabled=False, button_style="", # 'success', 'info', 'warning', 'danger' or '' tooltip="Decrease scale", icon="arrow-down", - layout=widgets.Layout(width=f"{0.8 * width_cm}cm", height=f"{0.4 * height_cm}cm"), + layout=W.Layout(width=f"{0.8 * width_cm}cm", height=f"{0.4 * height_cm}cm"), ) controller = {"plus": plus_selector, "minus": minus_selector} - widget = widgets.VBox([scale_label, plus_selector, minus_selector]) + widget = W.VBox([scale_label, plus_selector, minus_selector]) return widget, controller + + + +class TimeSlider(W.HBox): + + position = traitlets.Tuple(traitlets.Int(), traitlets.Int(), traitlets.Int()) + + def __init__(self, durations, sampling_frequency, time_range=(0, 1.), **kwargs): + + + self.num_segments = len(durations) + self.frame_limits = [int(sampling_frequency * d) for d in durations] + self.sampling_frequency = sampling_frequency + start_frame = int(time_range[0] * sampling_frequency) + end_frame = int(time_range[1] * sampling_frequency) + + self.frame_range = (start_frame, end_frame) + + self.segment_index = 0 + self.position = (start_frame, end_frame, self.segment_index) + + + layout = W.Layout(align_items="center", width="1.5cm", height="100%") + but_left = W.Button(description='', disabled=False, button_style='', icon='arrow-left', layout=layout) + but_right = W.Button(description='', disabled=False, button_style='', icon='arrow-right', layout=layout) + + but_left.on_click(self.move_left) + but_right.on_click(self.move_right) + + self.move_size = W.Dropdown(options=['10 ms', '100 ms', '1 s', '10 s', '1 m', '30 m', '1 h',], # '6 h', '24 h' + value='1 s', + description='', + layout = W.Layout(width="2cm") + ) + + # DatetimePicker is only for ipywidget v8 (which is not working in vscode 2023-03) + self.time_label = W.Text(value=f'{time_range[0]}',description='', + disabled=False, layout=W.Layout(width='5.5cm')) + self.time_label.observe(self.time_label_changed, names='value', type="change") + + + self.slider = W.IntSlider( + orientation='horizontal', + # description='time:', + value=start_frame, + min=0, + max=self.frame_limits[self.segment_index], + readout=False, + continuous_update=False, + layout=W.Layout(width=f'70%') + ) + + self.slider.observe(self.slider_moved, names='value', type="change") + + delta_s = np.diff(self.frame_range) / sampling_frequency + + self.window_sizer = W.BoundedFloatText(value=delta_s, step=1, + min=0.01, max=30., + description='win (s)', + layout=W.Layout(width='auto') + # layout=W.Layout(width=f'10%') + ) + self.window_sizer.observe(self.win_size_changed, names='value', type="change") + + self.segment_selector = W.Dropdown(description="segment", options=list(range(self.num_segments))) + self.segment_selector.observe(self.segment_changed, names='value', type="change") + + super(W.HBox, self).__init__(children=[self.segment_selector, but_left, self.move_size, but_right, + self.slider, self.time_label, self.window_sizer], + layout=W.Layout(align_items="center", width="100%", height="100%"), + **kwargs) + + self.observe(self.position_changed, names=['position'], type="change") + + def position_changed(self, change=None): + + self.unobserve(self.position_changed, names=['position'], type="change") + + start, stop, seg_index = self.position + if seg_index < 0 or seg_index >= self.num_segments: + self.position = change['old'] + return + if start < 0 or stop < 0: + self.position = change['old'] + return + if start >= self.frame_limits[seg_index] or start > self.frame_limits[seg_index]: + self.position = change['old'] + return + + self.segment_selector.value = seg_index + self.update_time(new_frame=start, update_slider=True, update_label=True) + delta_s = (stop - start) / self.sampling_frequency + self.window_sizer.value = delta_s + + self.observe(self.position_changed, names=['position'], type="change") + + def update_time(self, new_frame=None, new_time=None, update_slider=False, update_label=False): + if new_frame is None and new_time is None: + start_frame = self.slider.value + elif new_frame is None: + start_frame = int(new_time * self.sampling_frequency) + else: + start_frame = new_frame + delta_s = self.window_sizer.value + end_frame = start_frame + int(delta_s * self.sampling_frequency) + + # clip + start_frame = max(0, start_frame) + end_frame = min(self.frame_limits[self.segment_index], end_frame) + + + start_time = start_frame / self.sampling_frequency + + if update_label: + self.time_label.unobserve(self.time_label_changed, names='value', type="change") + self.time_label.value = f'{start_time}' + self.time_label.observe(self.time_label_changed, names='value', type="change") + + if update_slider: + self.slider.unobserve(self.slider_moved, names='value', type="change") + self.slider.value = start_frame + self.slider.observe(self.slider_moved, names='value', type="change") + + self.frame_range = (start_frame, end_frame) + + def time_label_changed(self, change=None): + try: + new_time = float(self.time_label.value) + except: + new_time = None + if new_time is not None: + self.update_time(new_time=new_time, update_slider=True) + + + def win_size_changed(self, change=None): + self.update_time() + + def slider_moved(self, change=None): + new_frame = self.slider.value + self.update_time(new_frame=new_frame, update_label=True) + + def move(self, sign): + value, units = self.move_size.value.split(' ') + value = int(value) + delta_s = (sign * np.timedelta64(value, units)) / np.timedelta64(1, 's') + delta_sample = int(delta_s * self.sampling_frequency) + + new_frame = self.frame_range[0] + delta_sample + self.slider.value = new_frame + + def move_left(self, change=None): + self.move(-1) + + def move_right(self, change=None): + self.move(+1) + + def segment_changed(self, change=None): + self.segment_index = self.segment_selector.value + + self.slider.unobserve(self.slider_moved, names='value', type="change") + # self.slider.value = 0 + self.slider.max = self.frame_limits[self.segment_index] + self.slider.observe(self.slider_moved, names='value', type="change") + + self.update_time(new_frame=0, update_slider=True, update_label=True) + + + +class ScaleWidget(W.VBox): + def __init__(self, **kwargs): + scale_label = W.Label("Scale", + layout=W.Layout(layout=W.Layout(width='95%'), + justify_content="center")) + + self.plus_selector = W.Button( + description="", + disabled=False, + button_style="", # 'success', 'info', 'warning', 'danger' or '' + tooltip="Increase scale", + icon="arrow-up", + # layout=W.Layout(width=f"{0.8 * width_cm}cm", height=f"{0.4 * height_cm}cm"), + layout=W.Layout(width='95%'), + ) + + self.minus_selector = W.Button( + description="", + disabled=False, + button_style="", # 'success', 'info', 'warning', 'danger' or '' + tooltip="Decrease scale", + icon="arrow-down", + # layout=W.Layout(width=f"{0.8 * width_cm}cm", height=f"{0.4 * height_cm}cm"), + layout=W.Layout(width='95%'), + ) + + # controller = {"plus": plus_selector, "minus": minus_selector} + # widget = W.VBox([scale_label, plus_selector, minus_selector]) + + + super(W.VBox, self).__init__(children=[scale_label, self.plus_selector, self.minus_selector], + # layout=W.Layout(align_items="center", width="100%", height="100%"), + **kwargs) From 389737efe1330f1f75afb73caedb41bb6bf84b4d Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Thu, 21 Sep 2023 20:58:38 +0200 Subject: [PATCH 051/115] wip refactor plot traces ipywidget --- src/spikeinterface/widgets/traces.py | 126 ++++++++++++++---- .../widgets/utils_ipywidgets.py | 62 ++++++--- 2 files changed, 145 insertions(+), 43 deletions(-) diff --git a/src/spikeinterface/widgets/traces.py b/src/spikeinterface/widgets/traces.py index c6e36387f8..efd32ffb24 100644 --- a/src/spikeinterface/widgets/traces.py +++ b/src/spikeinterface/widgets/traces.py @@ -279,9 +279,9 @@ def plot_ipywidgets(self, data_plot, **backend_kwargs): import ipywidgets.widgets as W from .utils_ipywidgets import ( check_ipywidget_backend, - make_timeseries_controller, + # make_timeseries_controller, make_channel_controller, - make_scale_controller, + # make_scale_controller, TimeSlider, ScaleWidget, @@ -315,21 +315,22 @@ def plot_ipywidgets(self, data_plot, **backend_kwargs): - ts_widget, ts_controller = make_timeseries_controller( - t_start, - t_stop, - data_plot["layer_keys"], - rec0.get_num_segments(), - data_plot["time_range"], - data_plot["mode"], - False, - width_cm, - ) + # ts_widget, ts_controller = make_timeseries_controller( + # t_start, + # t_stop, + # data_plot["layer_keys"], + # rec0.get_num_segments(), + # data_plot["time_range"], + # data_plot["mode"], + # False, + # width_cm, + # ) # some widgets self.time_slider = TimeSlider( durations=[rec0.get_duration(s) for s in range(rec0.get_num_segments())], sampling_frequency=rec0.sampling_frequency, + # layout=W.Layout(height="2cm"), ) self.layer_selector = W.Dropdown(description="layer", options=data_plot["layer_keys"], layout=W.Layout(width="5cm"),) @@ -338,22 +339,22 @@ def plot_ipywidgets(self, data_plot, **backend_kwargs): self.scaler = ScaleWidget() left_sidebar = W.VBox( children=[self.layer_selector, self.mode_selector, self.scaler], - layout=W.Layout(width="5cm"), + layout=W.Layout(width="3.5cm"), ) ch_widget, ch_controller = make_channel_controller(rec0, width_cm=ratios[2] * width_cm, height_cm=height_cm) - scale_widget, scale_controller = make_scale_controller(width_cm=ratios[0] * width_cm, height_cm=height_cm) + # scale_widget, scale_controller = make_scale_controller(width_cm=ratios[0] * width_cm, height_cm=height_cm) - self.controller = ts_controller - self.controller.update(ch_controller) - self.controller.update(scale_controller) + # self.controller = ts_controller + # self.controller.update(ch_controller) + # self.controller.update(scale_controller) self.recordings = data_plot["recordings"] self.return_scaled = data_plot["return_scaled"] self.list_traces = None - self.actual_segment_index = self.controller["segment_index"].value + # self.actual_segment_index = self.controller["segment_index"].value self.rec0 = self.recordings[self.data_plot["layer_keys"][0]] self.t_stops = [ @@ -361,11 +362,11 @@ def plot_ipywidgets(self, data_plot, **backend_kwargs): for seg_index in range(self.rec0.get_num_segments()) ] - for w in self.controller.values(): - if isinstance(w, widgets.Button): - w.on_click(self._update_ipywidget) - else: - w.observe(self._update_ipywidget) + # for w in self.controller.values(): + # if isinstance(w, widgets.Button): + # w.on_click(self._update_ipywidget) + # else: + # w.observe(self._update_ipywidget) self.widget = widgets.AppLayout( center=self.figure.canvas, @@ -379,12 +380,89 @@ def plot_ipywidgets(self, data_plot, **backend_kwargs): ) # a first update - self._update_ipywidget(None) + # self._update_ipywidget(None) + + self._retrieve_traces() + self._update_plot() + + # only layer selector and time change generate a new traces retrieve + self.time_slider.observe(self._retrieve_traces, names='value', type="change") + self.layer_selector.observe(self._retrieve_traces, names='value', type="change") + # other widgets only refresh + self.scaler.observe(self._update_plot, names='value', type="change") + self.mode_selector.observe(self._update_plot, names='value', type="change") + if backend_kwargs["display"]: # self.check_backend() display(self.widget) + + + def _retrieve_traces(self, change=None): + # done when: + # * time or window is changes + # * layer is changed + + # TODO connect with channel selector + channel_ids = self.rec0.channel_ids + + # all_channel_ids = self.recordings[list(self.recordings.keys())[0]].channel_ids + # if self.data_plot["order"] is not None: + # all_channel_ids = all_channel_ids[self.data_plot["order"]] + # channel_ids = all_channel_ids[channel_indices] + if self.data_plot["order_channel_by_depth"]: + order, _ = order_channels_by_depth(self.rec0, channel_ids) + else: + order = None + + start_frame, end_frame, segment_index = self.time_slider.value + time_range = np.array([start_frame, end_frame]) / self.rec0.sampling_frequency + + times, list_traces, frame_range, channel_ids = _get_trace_list( + self.recordings, channel_ids, time_range, segment_index, order, self.return_scaled + ) + self.list_traces = list_traces + + self._update_plot() + + def _update_plot(self, change=None): + # done when: + # * time or window is changed (after _retrive_traces) + # * layer is changed (after _retrive_traces) + #  * scale is change + # * mode is change + + data_plot = self.next_data_plot + + # matplotlib next_data_plot dict update at each call + data_plot["mode"] = self.mode_selector.value + # data_plot["frame_range"] = frame_range + # data_plot["time_range"] = time_range + data_plot["with_colorbar"] = False + # data_plot["recordings"] = recordings + # data_plot["layer_keys"] = layer_keys + # data_plot["list_traces"] = list_traces_plot + # data_plot["times"] = times + # data_plot["clims"] = clims + # data_plot["channel_ids"] = channel_ids + + list_traces = [traces * self.scaler.value for traces in self.list_traces] + data_plot["list_traces"] = list_traces + + backend_kwargs = {} + backend_kwargs["ax"] = self.ax + + self.ax.clear() + self.plot_matplotlib(data_plot, **backend_kwargs) + + fig = self.ax.figure + fig.canvas.draw() + fig.canvas.flush_events() + + + + def _update_ipywidget(self, change): import ipywidgets.widgets as widgets diff --git a/src/spikeinterface/widgets/utils_ipywidgets.py b/src/spikeinterface/widgets/utils_ipywidgets.py index 674a2d2cc7..ad0ead7bc0 100644 --- a/src/spikeinterface/widgets/utils_ipywidgets.py +++ b/src/spikeinterface/widgets/utils_ipywidgets.py @@ -109,7 +109,7 @@ def make_scale_controller(width_cm, height_cm): class TimeSlider(W.HBox): - position = traitlets.Tuple(traitlets.Int(), traitlets.Int(), traitlets.Int()) + value = traitlets.Tuple(traitlets.Int(), traitlets.Int(), traitlets.Int()) def __init__(self, durations, sampling_frequency, time_range=(0, 1.), **kwargs): @@ -123,10 +123,10 @@ def __init__(self, durations, sampling_frequency, time_range=(0, 1.), **kwargs): self.frame_range = (start_frame, end_frame) self.segment_index = 0 - self.position = (start_frame, end_frame, self.segment_index) + self.value = (start_frame, end_frame, self.segment_index) - layout = W.Layout(align_items="center", width="1.5cm", height="100%") + layout = W.Layout(align_items="center", width="2cm", hight="1.5cm") but_left = W.Button(description='', disabled=False, button_style='', icon='arrow-left', layout=layout) but_right = W.Button(description='', disabled=False, button_style='', icon='arrow-right', layout=layout) @@ -176,21 +176,21 @@ def __init__(self, durations, sampling_frequency, time_range=(0, 1.), **kwargs): layout=W.Layout(align_items="center", width="100%", height="100%"), **kwargs) - self.observe(self.position_changed, names=['position'], type="change") + self.observe(self.value_changed, names=['value'], type="change") - def position_changed(self, change=None): + def value_changed(self, change=None): - self.unobserve(self.position_changed, names=['position'], type="change") + self.unobserve(self.value_changed, names=['value'], type="change") - start, stop, seg_index = self.position + start, stop, seg_index = self.value if seg_index < 0 or seg_index >= self.num_segments: - self.position = change['old'] + self.value = change['old'] return if start < 0 or stop < 0: - self.position = change['old'] + self.value = change['old'] return if start >= self.frame_limits[seg_index] or start > self.frame_limits[seg_index]: - self.position = change['old'] + self.value = change['old'] return self.segment_selector.value = seg_index @@ -198,7 +198,7 @@ def position_changed(self, change=None): delta_s = (stop - start) / self.sampling_frequency self.window_sizer.value = delta_s - self.observe(self.position_changed, names=['position'], type="change") + self.observe(self.value_changed, names=['value'], type="change") def update_time(self, new_frame=None, new_time=None, update_slider=False, update_label=False): if new_frame is None and new_time is None: @@ -228,6 +228,7 @@ def update_time(self, new_frame=None, new_time=None, update_slider=False, update self.slider.observe(self.slider_moved, names='value', type="change") self.frame_range = (start_frame, end_frame) + self.value = (start_frame, end_frame, self.segment_index) def time_label_changed(self, change=None): try: @@ -273,8 +274,14 @@ def segment_changed(self, change=None): class ScaleWidget(W.VBox): - def __init__(self, **kwargs): - scale_label = W.Label("Scale", + value = traitlets.Float() + + def __init__(self, value=1., factor=1.2, **kwargs): + + assert factor > 1. + self.factor = factor + + self.scale_label = W.Label("Scale", layout=W.Layout(layout=W.Layout(width='95%'), justify_content="center")) @@ -285,7 +292,7 @@ def __init__(self, **kwargs): tooltip="Increase scale", icon="arrow-up", # layout=W.Layout(width=f"{0.8 * width_cm}cm", height=f"{0.4 * height_cm}cm"), - layout=W.Layout(width='95%'), + layout=W.Layout(width='60%', align_self='center'), ) self.minus_selector = W.Button( @@ -295,13 +302,30 @@ def __init__(self, **kwargs): tooltip="Decrease scale", icon="arrow-down", # layout=W.Layout(width=f"{0.8 * width_cm}cm", height=f"{0.4 * height_cm}cm"), - layout=W.Layout(width='95%'), + layout=W.Layout(width='60%', align_self='center'), ) - # controller = {"plus": plus_selector, "minus": minus_selector} - # widget = W.VBox([scale_label, plus_selector, minus_selector]) + self.plus_selector.on_click(self.plus_clicked) + self.minus_selector.on_click(self.minus_clicked) - - super(W.VBox, self).__init__(children=[scale_label, self.plus_selector, self.minus_selector], + self.value = 1. + super(W.VBox, self).__init__(children=[self.plus_selector, self.scale_label, self.minus_selector], # layout=W.Layout(align_items="center", width="100%", height="100%"), **kwargs) + + self.update_label() + self.observe(self.value_changed, names=['value'], type="change") + + def update_label(self): + self.scale_label.value = f"Scale: {self.value:0.2f}" + + + def plus_clicked(self, change=None): + self.value = self.value * self.factor + + def minus_clicked(self, change=None): + self.value = self.value / self.factor + + + def value_changed(self, change=None): + self.update_label() From e5995f2aa6445fd878e1c0881f11299f8ae22a2d Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Thu, 21 Sep 2023 22:59:53 +0200 Subject: [PATCH 052/115] ipywidget backend refactor wip --- src/spikeinterface/widgets/traces.py | 298 +++++------------- .../widgets/utils_ipywidgets.py | 175 ++++++---- 2 files changed, 190 insertions(+), 283 deletions(-) diff --git a/src/spikeinterface/widgets/traces.py b/src/spikeinterface/widgets/traces.py index efd32ffb24..d107c5cb23 100644 --- a/src/spikeinterface/widgets/traces.py +++ b/src/spikeinterface/widgets/traces.py @@ -280,23 +280,23 @@ def plot_ipywidgets(self, data_plot, **backend_kwargs): from .utils_ipywidgets import ( check_ipywidget_backend, # make_timeseries_controller, - make_channel_controller, + # make_channel_controller, # make_scale_controller, - TimeSlider, + ChannelSelector, ScaleWidget, - ) check_ipywidget_backend() self.next_data_plot = data_plot.copy() - self.next_data_plot["add_legend"] = False + - recordings = data_plot["recordings"] + self.recordings = data_plot["recordings"] # first layer - rec0 = recordings[data_plot["layer_keys"][0]] + # rec0 = recordings[data_plot["layer_keys"][0]] + rec0 = self.rec0 = self.recordings[self.data_plot["layer_keys"][0]] cm = 1 / 2.54 @@ -310,107 +310,92 @@ def plot_ipywidgets(self, data_plot, **backend_kwargs): self.figure, self.ax = plt.subplots(figsize=(0.9 * ratios[1] * width_cm * cm, height_cm * cm)) plt.show() - t_start = 0.0 - t_stop = rec0.get_num_samples(segment_index=0) / rec0.get_sampling_frequency() - - - - # ts_widget, ts_controller = make_timeseries_controller( - # t_start, - # t_stop, - # data_plot["layer_keys"], - # rec0.get_num_segments(), - # data_plot["time_range"], - # data_plot["mode"], - # False, - # width_cm, - # ) - # some widgets self.time_slider = TimeSlider( durations=[rec0.get_duration(s) for s in range(rec0.get_num_segments())], sampling_frequency=rec0.sampling_frequency, # layout=W.Layout(height="2cm"), ) - self.layer_selector = W.Dropdown(description="layer", options=data_plot["layer_keys"], - layout=W.Layout(width="5cm"),) - self.mode_selector = W.Dropdown(options=["line", "map"], description="mode", value=data_plot["mode"], - layout=W.Layout(width="5cm"),) + + start_frame = int(data_plot["time_range"][0] * rec0.sampling_frequency) + end_frame = int(data_plot["time_range"][1] * rec0.sampling_frequency) + + self.time_slider.value = start_frame, end_frame, data_plot["segment_index"] + + _layer_keys = data_plot["layer_keys"] + if len(_layer_keys) > 1: + _layer_keys = ['ALL'] + _layer_keys + self.layer_selector = W.Dropdown(options=_layer_keys, + layout=W.Layout(width="95%"), + ) + self.mode_selector = W.Dropdown(options=["line", "map"], value=data_plot["mode"], + # layout=W.Layout(width="5cm"), + layout=W.Layout(width="95%"), + ) self.scaler = ScaleWidget() + self.channel_selector = ChannelSelector(self.rec0.channel_ids) + left_sidebar = W.VBox( - children=[self.layer_selector, self.mode_selector, self.scaler], + children=[ + W.Label(value="layer"), + self.layer_selector, + W.Label(value="mode"), + self.mode_selector, + self.scaler, + # self.channel_selector, + ], layout=W.Layout(width="3.5cm"), + align_items='center', ) - - ch_widget, ch_controller = make_channel_controller(rec0, width_cm=ratios[2] * width_cm, height_cm=height_cm) - - # scale_widget, scale_controller = make_scale_controller(width_cm=ratios[0] * width_cm, height_cm=height_cm) - - # self.controller = ts_controller - # self.controller.update(ch_controller) - # self.controller.update(scale_controller) - - self.recordings = data_plot["recordings"] self.return_scaled = data_plot["return_scaled"] - self.list_traces = None - # self.actual_segment_index = self.controller["segment_index"].value - - self.rec0 = self.recordings[self.data_plot["layer_keys"][0]] - self.t_stops = [ - self.rec0.get_num_samples(segment_index=seg_index) / self.rec0.get_sampling_frequency() - for seg_index in range(self.rec0.get_num_segments()) - ] - - # for w in self.controller.values(): - # if isinstance(w, widgets.Button): - # w.on_click(self._update_ipywidget) - # else: - # w.observe(self._update_ipywidget) self.widget = widgets.AppLayout( center=self.figure.canvas, - # footer=ts_widget, footer=self.time_slider, - # left_sidebar=scale_widget, left_sidebar = left_sidebar, - right_sidebar=ch_widget, + right_sidebar=self.channel_selector, pane_heights=[0, 6, 1], pane_widths=ratios, ) # a first update - # self._update_ipywidget(None) - self._retrieve_traces() self._update_plot() - # only layer selector and time change generate a new traces retrieve + # callbacks: + # some widgets generate a full retrieve + refresh self.time_slider.observe(self._retrieve_traces, names='value', type="change") self.layer_selector.observe(self._retrieve_traces, names='value', type="change") + self.channel_selector.observe(self._retrieve_traces, names='value', type="change") # other widgets only refresh self.scaler.observe(self._update_plot, names='value', type="change") - self.mode_selector.observe(self._update_plot, names='value', type="change") + # map is a special case because needs to check layer also + self.mode_selector.observe(self._mode_changed, names='value', type="change") - if backend_kwargs["display"]: # self.check_backend() display(self.widget) - + def _get_layers(self): + layer = self.layer_selector.value + if layer == 'ALL': + layer_keys = self.data_plot["layer_keys"] + else: + layer_keys = [layer] + if self.mode_selector.value == "map": + layer_keys = layer_keys[:1] + return layer_keys + + def _mode_changed(self, change=None): + if self.mode_selector.value == "map" and self.layer_selector.value == "ALL": + self.layer_selector.value = self.data_plot["layer_keys"][0] + else: + self._update_plot() def _retrieve_traces(self, change=None): - # done when: - # * time or window is changes - # * layer is changed + channel_ids = np.array(self.channel_selector.value) - # TODO connect with channel selector - channel_ids = self.rec0.channel_ids - - # all_channel_ids = self.recordings[list(self.recordings.keys())[0]].channel_ids - # if self.data_plot["order"] is not None: - # all_channel_ids = all_channel_ids[self.data_plot["order"]] - # channel_ids = all_channel_ids[channel_indices] if self.data_plot["order_channel_by_depth"]: order, _ = order_channels_by_depth(self.rec0, channel_ids) else: @@ -419,176 +404,61 @@ def _retrieve_traces(self, change=None): start_frame, end_frame, segment_index = self.time_slider.value time_range = np.array([start_frame, end_frame]) / self.rec0.sampling_frequency + self._selected_recordings = {k: self.recordings[k] for k in self._get_layers()} times, list_traces, frame_range, channel_ids = _get_trace_list( - self.recordings, channel_ids, time_range, segment_index, order, self.return_scaled + self._selected_recordings, channel_ids, time_range, segment_index, order, self.return_scaled ) - self.list_traces = list_traces + + self._channel_ids = channel_ids + self._list_traces = list_traces + self._times = times + self._time_range = time_range + self._frame_range = (start_frame, end_frame) + self._segment_index = segment_index self._update_plot() def _update_plot(self, change=None): - # done when: - # * time or window is changed (after _retrive_traces) - # * layer is changed (after _retrive_traces) - #  * scale is change - # * mode is change - data_plot = self.next_data_plot # matplotlib next_data_plot dict update at each call - data_plot["mode"] = self.mode_selector.value - # data_plot["frame_range"] = frame_range - # data_plot["time_range"] = time_range - data_plot["with_colorbar"] = False - # data_plot["recordings"] = recordings - # data_plot["layer_keys"] = layer_keys - # data_plot["list_traces"] = list_traces_plot - # data_plot["times"] = times - # data_plot["clims"] = clims - # data_plot["channel_ids"] = channel_ids - - list_traces = [traces * self.scaler.value for traces in self.list_traces] - data_plot["list_traces"] = list_traces - - backend_kwargs = {} - backend_kwargs["ax"] = self.ax - - self.ax.clear() - self.plot_matplotlib(data_plot, **backend_kwargs) - - fig = self.ax.figure - fig.canvas.draw() - fig.canvas.flush_events() - - - - - def _update_ipywidget(self, change): - import ipywidgets.widgets as widgets - - # if changing the layer_key, no need to retrieve and process traces - retrieve_traces = True - scale_up = False - scale_down = False - if change is not None: - for cname, c in self.controller.items(): - if isinstance(change, dict): - if change["owner"] is c and cname == "layer_key": - retrieve_traces = False - elif isinstance(change, widgets.Button): - if change is c and cname == "plus": - scale_up = True - if change is c and cname == "minus": - scale_down = True - - t_start = self.controller["t_start"].value - window = self.controller["window"].value - layer_key = self.controller["layer_key"].value - segment_index = self.controller["segment_index"].value - mode = self.controller["mode"].value - chan_start, chan_stop = self.controller["channel_inds"].value - - if mode == "line": - self.controller["all_layers"].layout.visibility = "visible" - all_layers = self.controller["all_layers"].value - elif mode == "map": - self.controller["all_layers"].layout.visibility = "hidden" - all_layers = False - - if all_layers: - self.controller["layer_key"].layout.visibility = "hidden" - else: - self.controller["layer_key"].layout.visibility = "visible" - - if chan_start == chan_stop: - chan_stop += 1 - channel_indices = np.arange(chan_start, chan_stop) - - t_stop = self.t_stops[segment_index] - if self.actual_segment_index != segment_index: - # change time_slider limits - self.controller["t_start"].max = t_stop - self.actual_segment_index = segment_index - - # protect limits - if t_start >= t_stop - window: - t_start = t_stop - window - - time_range = np.array([t_start, t_start + window]) - data_plot = self.next_data_plot + mode = self.mode_selector.value + layer_keys = self._get_layers() - if retrieve_traces: - all_channel_ids = self.recordings[list(self.recordings.keys())[0]].channel_ids - if self.data_plot["order"] is not None: - all_channel_ids = all_channel_ids[self.data_plot["order"]] - channel_ids = all_channel_ids[channel_indices] - if self.data_plot["order_channel_by_depth"]: - order, _ = order_channels_by_depth(self.rec0, channel_ids) - else: - order = None - times, list_traces, frame_range, channel_ids = _get_trace_list( - self.recordings, channel_ids, time_range, segment_index, order, self.return_scaled - ) - self.list_traces = list_traces - else: - times = data_plot["times"] - list_traces = data_plot["list_traces"] - frame_range = data_plot["frame_range"] - channel_ids = data_plot["channel_ids"] - - if all_layers: - layer_keys = self.data_plot["layer_keys"] - recordings = self.recordings - list_traces_plot = self.list_traces - else: - layer_keys = [layer_key] - recordings = {layer_key: self.recordings[layer_key]} - list_traces_plot = [self.list_traces[list(self.recordings.keys()).index(layer_key)]] - - if scale_up: - if mode == "line": - data_plot["vspacing"] *= 0.8 - elif mode == "map": - data_plot["clims"] = { - layer: (1.2 * val[0], 1.2 * val[1]) for layer, val in self.data_plot["clims"].items() - } - if scale_down: - if mode == "line": - data_plot["vspacing"] *= 1.2 - elif mode == "map": - data_plot["clims"] = { - layer: (0.8 * val[0], 0.8 * val[1]) for layer, val in self.data_plot["clims"].items() - } - - self.next_data_plot["vspacing"] = data_plot["vspacing"] - self.next_data_plot["clims"] = data_plot["clims"] + data_plot["mode"] = mode + data_plot["frame_range"] = self._frame_range + data_plot["time_range"] = self._time_range + data_plot["with_colorbar"] = False + data_plot["recordings"] = self._selected_recordings + data_plot["add_legend"] = False if mode == "line": clims = None elif mode == "map": - clims = {layer_key: self.data_plot["clims"][layer_key]} + clims = {k: self.data_plot["clims"][k] for k in layer_keys} - # matplotlib next_data_plot dict update at each call - data_plot["mode"] = mode - data_plot["frame_range"] = frame_range - data_plot["time_range"] = time_range - data_plot["with_colorbar"] = False - data_plot["recordings"] = recordings - data_plot["layer_keys"] = layer_keys - data_plot["list_traces"] = list_traces_plot - data_plot["times"] = times data_plot["clims"] = clims - data_plot["channel_ids"] = channel_ids + data_plot["channel_ids"] = self._channel_ids + + data_plot["layer_keys"] = layer_keys + data_plot["colors"] = {k:self.data_plot["colors"][k] for k in layer_keys} + + list_traces = [traces * self.scaler.value for traces in self._list_traces] + data_plot["list_traces"] = list_traces + data_plot["times"] = self._times backend_kwargs = {} backend_kwargs["ax"] = self.ax + self.ax.clear() self.plot_matplotlib(data_plot, **backend_kwargs) + self.ax.set_title("") fig = self.ax.figure fig.canvas.draw() fig.canvas.flush_events() + def plot_sortingview(self, data_plot, **backend_kwargs): import sortingview.views as vv from .utils_sortingview import handle_display_and_url diff --git a/src/spikeinterface/widgets/utils_ipywidgets.py b/src/spikeinterface/widgets/utils_ipywidgets.py index ad0ead7bc0..ab2b51a7bb 100644 --- a/src/spikeinterface/widgets/utils_ipywidgets.py +++ b/src/spikeinterface/widgets/utils_ipywidgets.py @@ -11,35 +11,35 @@ def check_ipywidget_backend(): assert "ipympl" in mpl_backend, "To use the 'ipywidgets' backend, you have to set %matplotlib widget" -def make_timeseries_controller(t_start, t_stop, layer_keys, num_segments, time_range, mode, all_layers, width_cm): - time_slider = W.FloatSlider( - orientation="horizontal", - description="time:", - value=time_range[0], - min=t_start, - max=t_stop, - continuous_update=False, - layout=W.Layout(width=f"{width_cm}cm"), - ) - layer_selector = W.Dropdown(description="layer", options=layer_keys) - segment_selector = W.Dropdown(description="segment", options=list(range(num_segments))) - window_sizer = W.BoundedFloatText(value=np.diff(time_range)[0], step=0.1, min=0.005, description="win (s)") - mode_selector = W.Dropdown(options=["line", "map"], description="mode", value=mode) - all_layers = W.Checkbox(description="plot all layers", value=all_layers) - - controller = { - "layer_key": layer_selector, - "segment_index": segment_selector, - "window": window_sizer, - "t_start": time_slider, - "mode": mode_selector, - "all_layers": all_layers, - } - widget = W.VBox( - [time_slider, W.HBox([all_layers, layer_selector, segment_selector, window_sizer, mode_selector])] - ) - - return widget, controller +# def make_timeseries_controller(t_start, t_stop, layer_keys, num_segments, time_range, mode, all_layers, width_cm): +# time_slider = W.FloatSlider( +# orientation="horizontal", +# description="time:", +# value=time_range[0], +# min=t_start, +# max=t_stop, +# continuous_update=False, +# layout=W.Layout(width=f"{width_cm}cm"), +# ) +# layer_selector = W.Dropdown(description="layer", options=layer_keys) +# segment_selector = W.Dropdown(description="segment", options=list(range(num_segments))) +# window_sizer = W.BoundedFloatText(value=np.diff(time_range)[0], step=0.1, min=0.005, description="win (s)") +# mode_selector = W.Dropdown(options=["line", "map"], description="mode", value=mode) +# all_layers = W.Checkbox(description="plot all layers", value=all_layers) + +# controller = { +# "layer_key": layer_selector, +# "segment_index": segment_selector, +# "window": window_sizer, +# "t_start": time_slider, +# "mode": mode_selector, +# "all_layers": all_layers, +# } +# widget = W.VBox( +# [time_slider, W.HBox([all_layers, layer_selector, segment_selector, window_sizer, mode_selector])] +# ) + +# return widget, controller def make_unit_controller(unit_ids, all_unit_ids, width_cm, height_cm): @@ -58,52 +58,52 @@ def make_unit_controller(unit_ids, all_unit_ids, width_cm, height_cm): return widget, controller -def make_channel_controller(recording, width_cm, height_cm): - channel_label = W.Label("channel indices:", layout=W.Layout(justify_content="center")) - channel_selector = W.IntRangeSlider( - value=[0, recording.get_num_channels()], - min=0, - max=recording.get_num_channels(), - step=1, - disabled=False, - continuous_update=False, - orientation="vertical", - readout=True, - readout_format="d", - layout=W.Layout(width=f"{0.8 * width_cm}cm", height=f"{height_cm}cm"), - ) +# def make_channel_controller(recording, width_cm, height_cm): +# channel_label = W.Label("channel indices:", layout=W.Layout(justify_content="center")) +# channel_selector = W.IntRangeSlider( +# value=[0, recording.get_num_channels()], +# min=0, +# max=recording.get_num_channels(), +# step=1, +# disabled=False, +# continuous_update=False, +# orientation="vertical", +# readout=True, +# readout_format="d", +# layout=W.Layout(width=f"{0.8 * width_cm}cm", height=f"{height_cm}cm"), +# ) - controller = {"channel_inds": channel_selector} - widget = W.VBox([channel_label, channel_selector]) +# controller = {"channel_inds": channel_selector} +# widget = W.VBox([channel_label, channel_selector]) - return widget, controller +# return widget, controller -def make_scale_controller(width_cm, height_cm): - scale_label = W.Label("Scale", layout=W.Layout(justify_content="center")) +# def make_scale_controller(width_cm, height_cm): +# scale_label = W.Label("Scale", layout=W.Layout(justify_content="center")) - plus_selector = W.Button( - description="", - disabled=False, - button_style="", # 'success', 'info', 'warning', 'danger' or '' - tooltip="Increase scale", - icon="arrow-up", - layout=W.Layout(width=f"{0.8 * width_cm}cm", height=f"{0.4 * height_cm}cm"), - ) +# plus_selector = W.Button( +# description="", +# disabled=False, +# button_style="", # 'success', 'info', 'warning', 'danger' or '' +# tooltip="Increase scale", +# icon="arrow-up", +# layout=W.Layout(width=f"{0.8 * width_cm}cm", height=f"{0.4 * height_cm}cm"), +# ) - minus_selector = W.Button( - description="", - disabled=False, - button_style="", # 'success', 'info', 'warning', 'danger' or '' - tooltip="Decrease scale", - icon="arrow-down", - layout=W.Layout(width=f"{0.8 * width_cm}cm", height=f"{0.4 * height_cm}cm"), - ) +# minus_selector = W.Button( +# description="", +# disabled=False, +# button_style="", # 'success', 'info', 'warning', 'danger' or '' +# tooltip="Decrease scale", +# icon="arrow-down", +# layout=W.Layout(width=f"{0.8 * width_cm}cm", height=f"{0.4 * height_cm}cm"), +# ) - controller = {"plus": plus_selector, "minus": minus_selector} - widget = W.VBox([scale_label, plus_selector, minus_selector]) +# controller = {"plus": plus_selector, "minus": minus_selector} +# widget = W.VBox([scale_label, plus_selector, minus_selector]) - return widget, controller +# return widget, controller @@ -126,7 +126,7 @@ def __init__(self, durations, sampling_frequency, time_range=(0, 1.), **kwargs): self.value = (start_frame, end_frame, self.segment_index) - layout = W.Layout(align_items="center", width="2cm", hight="1.5cm") + layout = W.Layout(align_items="center", width="2.5cm", height="1.cm") but_left = W.Button(description='', disabled=False, button_style='', icon='arrow-left', layout=layout) but_right = W.Button(description='', disabled=False, button_style='', icon='arrow-right', layout=layout) @@ -141,7 +141,7 @@ def __init__(self, durations, sampling_frequency, time_range=(0, 1.), **kwargs): # DatetimePicker is only for ipywidget v8 (which is not working in vscode 2023-03) self.time_label = W.Text(value=f'{time_range[0]}',description='', - disabled=False, layout=W.Layout(width='5.5cm')) + disabled=False, layout=W.Layout(width='2.5cm')) self.time_label.observe(self.time_label_changed, names='value', type="change") @@ -271,6 +271,43 @@ def segment_changed(self, change=None): self.update_time(new_frame=0, update_slider=True, update_label=True) +class ChannelSelector(W.VBox): + value = traitlets.List() + + def __init__(self, channel_ids, **kwargs): + self.channel_ids = list(channel_ids) + self.value = self.channel_ids + + channel_label = W.Label("Channels", layout=W.Layout(justify_content="center")) + n = len(channel_ids) + self.slider = W.IntRangeSlider( + value=[0, n], + min=0, + max=n, + step=1, + disabled=False, + continuous_update=False, + orientation="vertical", + readout=True, + readout_format="d", + # layout=W.Layout(width=f"{0.8 * width_cm}cm", height=f"{height_cm}cm"), + layout=W.Layout(height="100%"), + ) + + + + super(W.VBox, self).__init__(children=[channel_label, self.slider], + layout=W.Layout(align_items="center"), + # layout=W.Layout(align_items="center", width="100%", height="100%"), + **kwargs) + self.slider.observe(self.on_slider_changed, names=['value'], type="change") + # self.update_label() + # self.observe(self.value_changed, names=['value'], type="change") + + def on_slider_changed(self, change=None): + i0, i1 = self.slider.value + self.value = self.channel_ids[i0:i1] + class ScaleWidget(W.VBox): From 7b92c2153d4fad412823100fd77079e3cf286138 Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Fri, 22 Sep 2023 08:06:37 +0200 Subject: [PATCH 053/115] improve channel selector --- .../widgets/utils_ipywidgets.py | 38 +++++++++++++++++-- 1 file changed, 35 insertions(+), 3 deletions(-) diff --git a/src/spikeinterface/widgets/utils_ipywidgets.py b/src/spikeinterface/widgets/utils_ipywidgets.py index ab2b51a7bb..705dd09f23 100644 --- a/src/spikeinterface/widgets/utils_ipywidgets.py +++ b/src/spikeinterface/widgets/utils_ipywidgets.py @@ -294,20 +294,52 @@ def __init__(self, channel_ids, **kwargs): layout=W.Layout(height="100%"), ) + # first channel are bottom: need reverse + self.selector = W.SelectMultiple( + options=self.channel_ids[::-1], + value=self.channel_ids[::-1], + disabled=False, + # layout=W.Layout(width=f"{width_cm}cm", height=f"{height_cm}cm"), + layout=W.Layout(height="100%", width="2cm"), + ) + hbox = W.HBox(children=[self.slider, self.selector]) - - super(W.VBox, self).__init__(children=[channel_label, self.slider], + super(W.VBox, self).__init__(children=[channel_label, hbox], layout=W.Layout(align_items="center"), # layout=W.Layout(align_items="center", width="100%", height="100%"), **kwargs) self.slider.observe(self.on_slider_changed, names=['value'], type="change") - # self.update_label() + self.selector.observe(self.on_selector_changed, names=['value'], type="change") + + # TODO external value change # self.observe(self.value_changed, names=['value'], type="change") def on_slider_changed(self, change=None): i0, i1 = self.slider.value + + self.selector.unobserve(self.on_selector_changed, names=['value'], type="change") + self.selector.value = self.channel_ids[i0:i1][::-1] + self.selector.observe(self.on_selector_changed, names=['value'], type="change") + self.value = self.channel_ids[i0:i1] + def on_selector_changed(self, change=None): + channel_ids = self.selector.value + channel_ids = channel_ids[::-1] + + if len(channel_ids) > 0: + self.slider.unobserve(self.on_slider_changed, names=['value'], type="change") + i0 = self.channel_ids.index(channel_ids[0]) + i1 = self.channel_ids.index(channel_ids[-1]) + 1 + self.slider.value = (i0, i1) + self.slider.observe(self.on_slider_changed, names=['value'], type="change") + + self.value = channel_ids + + + + + class ScaleWidget(W.VBox): From c46a7cba4b1e937d40050d0061017256ab5dade3 Mon Sep 17 00:00:00 2001 From: Alessio Buccino Date: Fri, 22 Sep 2023 10:31:05 +0200 Subject: [PATCH 054/115] Allow to restrict sparsity --- .../postprocessing/amplitude_scalings.py | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/src/spikeinterface/postprocessing/amplitude_scalings.py b/src/spikeinterface/postprocessing/amplitude_scalings.py index 4dab68fdf8..3eac333781 100644 --- a/src/spikeinterface/postprocessing/amplitude_scalings.py +++ b/src/spikeinterface/postprocessing/amplitude_scalings.py @@ -68,7 +68,6 @@ def _run(self, **job_kwargs): delta_collision_samples = int(delta_collision_ms / 1000 * we.sampling_frequency) return_scaled = we._params["return_scaled"] - unit_ids = we.unit_ids if ms_before is not None: assert ( @@ -82,9 +81,16 @@ def _run(self, **job_kwargs): cut_out_before = int(ms_before / 1000 * we.sampling_frequency) if ms_before is not None else nbefore cut_out_after = int(ms_after / 1000 * we.sampling_frequency) if ms_after is not None else nafter - if we.is_sparse(): + if we.is_sparse() and self._params["sparsity"] is None: sparsity = we.sparsity - elif self._params["sparsity"] is not None: + elif we.is_sparse() and self._params["sparsity"] is not None: + sparsity = self._params["sparsity"] + # assert provided sparsity is sparser than the one in the waveform extractor + waveform_sparsity = we.sparsity + assert np.all( + np.sum(waveform_sparsity.mask, 1) - np.sum(sparsity.mask, 1) > 0 + ), "The provided sparsity needs to be sparser than the one in the waveform extractor!" + elif not we.is_sparse() and self._params["sparsity"] is not None: sparsity = self._params["sparsity"] else: if self._params["max_dense_channels"] is not None: @@ -362,7 +368,7 @@ def _amplitude_scalings_chunk(segment_index, start_frame, end_frame, worker_ctx) template = template[cut_out_before - sample_index :] elif sample_index + cut_out_after > end_frame + right: local_waveform = traces_with_margin[cut_out_start:, sparse_indices] - template = template[: -(sample_index + cut_out_after - end_frame)] + template = template[: -(sample_index + cut_out_after - end_frame - right)] else: local_waveform = traces_with_margin[cut_out_start:cut_out_end, sparse_indices] assert template.shape == local_waveform.shape From 2e305586d5b39bb8bfa89280057579a97726e93a Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Fri, 22 Sep 2023 11:09:05 +0200 Subject: [PATCH 055/115] ipywidgets backend start UnitCOntroller --- src/spikeinterface/widgets/amplitudes.py | 69 ++++++++++--------- .../widgets/utils_ipywidgets.py | 39 +++++++++-- 2 files changed, 71 insertions(+), 37 deletions(-) diff --git a/src/spikeinterface/widgets/amplitudes.py b/src/spikeinterface/widgets/amplitudes.py index 7ef6e0ff61..b60de98cb0 100644 --- a/src/spikeinterface/widgets/amplitudes.py +++ b/src/spikeinterface/widgets/amplitudes.py @@ -171,9 +171,10 @@ def plot_matplotlib(self, data_plot, **backend_kwargs): def plot_ipywidgets(self, data_plot, **backend_kwargs): import matplotlib.pyplot as plt - import ipywidgets.widgets as widgets + # import ipywidgets.widgets as widgets + import ipywidgets.widgets as W from IPython.display import display - from .utils_ipywidgets import check_ipywidget_backend, make_unit_controller + from .utils_ipywidgets import check_ipywidget_backend, make_unit_controller, UnitSelector check_ipywidget_backend() @@ -188,60 +189,62 @@ def plot_ipywidgets(self, data_plot, **backend_kwargs): ratios = [0.15, 0.85] with plt.ioff(): - output = widgets.Output() + output = W.Output() with output: self.figure = plt.figure(figsize=((ratios[1] * width_cm) * cm, height_cm * cm)) plt.show() - data_plot["unit_ids"] = data_plot["unit_ids"][:1] - unit_widget, unit_controller = make_unit_controller( - data_plot["unit_ids"], we.unit_ids, ratios[0] * width_cm, height_cm - ) + self.unit_selector = UnitSelector(we.unit_ids) + self.unit_selector.value = list(we.unit_ids)[:1] - plot_histograms = widgets.Checkbox( + self.checkbox_histograms = W.Checkbox( value=data_plot["plot_histograms"], - description="plot histograms", - disabled=False, + description="hist", + # disabled=False, ) - footer = plot_histograms - - self.controller = {"plot_histograms": plot_histograms} - self.controller.update(unit_controller) - - for w in self.controller.values(): - w.observe(self._update_ipywidget) + left_sidebar = W.VBox( + children=[ + self.unit_selector, + self.checkbox_histograms, + ], + layout = W.Layout(align_items="center", width="4cm", height="100%"), + ) - self.widget = widgets.AppLayout( + self.widget = W.AppLayout( center=self.figure.canvas, - left_sidebar=unit_widget, + left_sidebar=left_sidebar, pane_widths=ratios + [0], - footer=footer, ) # a first update - self._update_ipywidget(None) + self._full_update_plot() + + self.unit_selector.observe(self._update_plot, names='value', type="change") + self.checkbox_histograms.observe(self._full_update_plot, names='value', type="change") if backend_kwargs["display"]: display(self.widget) - def _update_ipywidget(self, change): + def _full_update_plot(self, change=None): self.figure.clear() + data_plot = self.next_data_plot + data_plot["unit_ids"] = self.unit_selector.value + data_plot["plot_histograms"] = self.checkbox_histograms.value + + backend_kwargs = dict(figure=self.figure, axes=None, ax=None) + self.plot_matplotlib(data_plot, **backend_kwargs) + self._update_plot() - unit_ids = self.controller["unit_ids"].value - plot_histograms = self.controller["plot_histograms"].value + def _update_plot(self, change=None): + for ax in self.axes.flatten(): + ax.clear() - # matplotlib next_data_plot dict update at each call data_plot = self.next_data_plot - data_plot["unit_ids"] = unit_ids - data_plot["plot_histograms"] = plot_histograms - - backend_kwargs = {} - # backend_kwargs["figure"] = self.fig - backend_kwargs["figure"] = self.figure - backend_kwargs["axes"] = None - backend_kwargs["ax"] = None + data_plot["unit_ids"] = self.unit_selector.value + data_plot["plot_histograms"] = self.checkbox_histograms.value + backend_kwargs = dict(figure=None, axes=self.axes, ax=None) self.plot_matplotlib(data_plot, **backend_kwargs) self.figure.canvas.draw() diff --git a/src/spikeinterface/widgets/utils_ipywidgets.py b/src/spikeinterface/widgets/utils_ipywidgets.py index 705dd09f23..d2c41f234a 100644 --- a/src/spikeinterface/widgets/utils_ipywidgets.py +++ b/src/spikeinterface/widgets/utils_ipywidgets.py @@ -338,10 +338,6 @@ def on_selector_changed(self, change=None): - - - - class ScaleWidget(W.VBox): value = traitlets.Float() @@ -398,3 +394,38 @@ def minus_clicked(self, change=None): def value_changed(self, change=None): self.update_label() + + +class UnitSelector(W.VBox): + value = traitlets.List() + + def __init__(self, unit_ids, **kwargs): + self.unit_ids = list(unit_ids) + self.value = self.unit_ids + + label = W.Label("Units", layout=W.Layout(justify_content="center")) + + self.selector = W.SelectMultiple( + options=self.unit_ids, + value=self.unit_ids, + disabled=False, + layout=W.Layout(height="100%", width="2cm"), + ) + + super(W.VBox, self).__init__(children=[label, self.selector], + layout=W.Layout(align_items="center"), + **kwargs) + + self.selector.observe(self.on_selector_changed, names=['value'], type="change") + + self.observe(self.value_changed, names=['value'], type="change") + + def on_selector_changed(self, change=None): + unit_ids = self.selector.value + self.value = unit_ids + + def value_changed(self, change=None): + self.selector.unobserve(self.on_selector_changed, names=['value'], type="change") + self.selector.value = change['new'] + self.selector.observe(self.on_selector_changed, names=['value'], type="change") + From 4e31329d9aed376ecc41c4238a2f4836f94054ea Mon Sep 17 00:00:00 2001 From: Alessio Buccino Date: Fri, 22 Sep 2023 11:37:18 +0200 Subject: [PATCH 056/115] Add spikes on border when generating sorting, PCA sparse return fixes --- src/spikeinterface/core/generate.py | 28 +++++++++++++++++ .../core/tests/test_generate.py | 30 +++++++++++++++++-- .../postprocessing/amplitude_scalings.py | 12 ++++---- .../postprocessing/principal_component.py | 15 ++++++++-- .../tests/common_extension_tests.py | 26 ++++++++++++++-- .../tests/test_principal_component.py | 12 ++++---- 6 files changed, 104 insertions(+), 19 deletions(-) diff --git a/src/spikeinterface/core/generate.py b/src/spikeinterface/core/generate.py index 401c498f03..741dd20000 100644 --- a/src/spikeinterface/core/generate.py +++ b/src/spikeinterface/core/generate.py @@ -123,6 +123,9 @@ def generate_sorting( firing_rates=3.0, empty_units=None, refractory_period_ms=3.0, # in ms + add_spikes_on_borders=False, + num_spikes_per_border=3, + border_size_samples=20, seed=None, ): """ @@ -142,6 +145,12 @@ def generate_sorting( List of units that will have no spikes. (used for testing mainly). refractory_period_ms : float, default: 3.0 The refractory period in ms + add_spikes_on_borders : bool, default: False + If True, spikes will be added close to the borders of the segments. + num_spikes_per_border : int, default: 3 + The number of spikes to add close to the borders of the segments. + border_size_samples : int, default: 20 + The size of the border in samples to add border spikes. seed : int, default: None The random seed @@ -151,11 +160,13 @@ def generate_sorting( The sorting object """ seed = _ensure_seed(seed) + rng = np.random.default_rng(seed) num_segments = len(durations) unit_ids = np.arange(num_units) spikes = [] for segment_index in range(num_segments): + num_samples = int(sampling_frequency * durations[segment_index]) times, labels = synthesize_random_firings( num_units=num_units, sampling_frequency=sampling_frequency, @@ -175,7 +186,23 @@ def generate_sorting( spikes_in_seg["unit_index"] = labels spikes_in_seg["segment_index"] = segment_index spikes.append(spikes_in_seg) + + if add_spikes_on_borders: + spikes_on_borders = np.zeros(2 * num_spikes_per_border, dtype=minimum_spike_dtype) + spikes_on_borders["segment_index"] = segment_index + spikes_on_borders["unit_index"] = rng.choice(num_units, size=2 * num_spikes_per_border, replace=True) + # at start + spikes_on_borders["sample_index"][:num_spikes_per_border] = rng.integers( + 0, border_size_samples, num_spikes_per_border + ) + # at end + spikes_on_borders["sample_index"][num_spikes_per_border:] = rng.integers( + num_samples - border_size_samples, num_samples, num_spikes_per_border + ) + spikes.append(spikes_on_borders) + spikes = np.concatenate(spikes) + spikes = spikes[np.lexsort((spikes["sample_index"], spikes["segment_index"]))] sorting = NumpySorting(spikes, sampling_frequency, unit_ids) @@ -596,6 +623,7 @@ def __init__( dtype = np.dtype(dtype).name # Cast to string for serialization if dtype not in ("float32", "float64"): raise ValueError(f"'dtype' must be 'float32' or 'float64' but is {dtype}") + assert strategy in ("tile_pregenerated", "on_the_fly"), "'strategy' must be 'tile_pregenerated' or 'on_the_fly'" BaseRecording.__init__(self, sampling_frequency=sampling_frequency, channel_ids=channel_ids, dtype=dtype) diff --git a/src/spikeinterface/core/tests/test_generate.py b/src/spikeinterface/core/tests/test_generate.py index 9ba5de42d6..3844e421ac 100644 --- a/src/spikeinterface/core/tests/test_generate.py +++ b/src/spikeinterface/core/tests/test_generate.py @@ -26,15 +26,38 @@ def test_generate_recording(): - # TODO even this is extenssivly tested in all other function + # TODO even this is extensively tested in all other functions pass def test_generate_sorting(): - # TODO even this is extenssivly tested in all other function + # TODO even this is extensively tested in all other functions pass +def test_generate_sorting_with_spikes_on_borders(): + num_spikes_on_borders = 10 + border_size_samples = 10 + segment_duration = 10 + for nseg in [1, 2, 3]: + sorting = generate_sorting( + durations=[segment_duration] * nseg, + sampling_frequency=30000, + num_units=10, + add_spikes_on_borders=True, + num_spikes_per_border=num_spikes_on_borders, + border_size_samples=border_size_samples, + ) + spikes = sorting.to_spike_vector(concatenated=False) + # at least num_border spikes at borders for all segments + for i, spikes_in_segment in enumerate(spikes): + num_samples = int(segment_duration * 30000) + assert np.sum(spikes_in_segment["sample_index"] < border_size_samples) >= num_spikes_on_borders + assert ( + np.sum(spikes_in_segment["sample_index"] >= num_samples - border_size_samples) >= num_spikes_on_borders + ) + + def measure_memory_allocation(measure_in_process: bool = True) -> float: """ A local utility to measure memory allocation at a specific point in time. @@ -399,7 +422,7 @@ def test_generate_ground_truth_recording(): if __name__ == "__main__": strategy = "tile_pregenerated" # strategy = "on_the_fly" - test_noise_generator_memory() + # test_noise_generator_memory() # test_noise_generator_under_giga() # test_noise_generator_correct_shape(strategy) # test_noise_generator_consistency_across_calls(strategy, 0, 5) @@ -410,3 +433,4 @@ def test_generate_ground_truth_recording(): # test_generate_templates() # test_inject_templates() # test_generate_ground_truth_recording() + test_generate_sorting_with_spikes_on_borders() diff --git a/src/spikeinterface/postprocessing/amplitude_scalings.py b/src/spikeinterface/postprocessing/amplitude_scalings.py index 3eac333781..c86337a30d 100644 --- a/src/spikeinterface/postprocessing/amplitude_scalings.py +++ b/src/spikeinterface/postprocessing/amplitude_scalings.py @@ -16,6 +16,7 @@ class AmplitudeScalingsCalculator(BaseWaveformExtractorExtension): """ extension_name = "amplitude_scalings" + handle_sparsity = True def __init__(self, waveform_extractor): BaseWaveformExtractorExtension.__init__(self, waveform_extractor) @@ -357,7 +358,7 @@ def _amplitude_scalings_chunk(segment_index, start_frame, end_frame, worker_ctx) continue unit_index = spike["unit_index"] sample_index = spike["sample_index"] - sparse_indices = sparsity_mask[unit_index] + (sparse_indices,) = np.nonzero(sparsity_mask[unit_index]) template = all_templates[unit_index][:, sparse_indices] template = template[nbefore - cut_out_before : nbefore + cut_out_after] sample_centered = sample_index - start_frame @@ -368,7 +369,7 @@ def _amplitude_scalings_chunk(segment_index, start_frame, end_frame, worker_ctx) template = template[cut_out_before - sample_index :] elif sample_index + cut_out_after > end_frame + right: local_waveform = traces_with_margin[cut_out_start:, sparse_indices] - template = template[: -(sample_index + cut_out_after - end_frame - right)] + template = template[: -(sample_index + cut_out_after - (end_frame + right))] else: local_waveform = traces_with_margin[cut_out_start:cut_out_end, sparse_indices] assert template.shape == local_waveform.shape @@ -550,10 +551,11 @@ def fit_collision( sample_last_centered = np.max(collision["sample_index"]) - (start_frame - left) # construct sparsity as union between units' sparsity - sparse_indices = np.zeros(sparsity_mask.shape[1], dtype="int") + common_sparse_mask = np.zeros(sparsity_mask.shape[1], dtype="int") for spike in collision: - sparse_indices_i = sparsity_mask[spike["unit_index"]] - sparse_indices = np.logical_or(sparse_indices, sparse_indices_i) + mask_i = sparsity_mask[spike["unit_index"]] + common_sparse_mask = np.logical_or(common_sparse_mask, mask_i) + (sparse_indices,) = np.nonzero(common_sparse_mask) local_waveform_start = max(0, sample_first_centered - cut_out_before) local_waveform_end = min(traces_with_margin.shape[0], sample_last_centered + cut_out_after) diff --git a/src/spikeinterface/postprocessing/principal_component.py b/src/spikeinterface/postprocessing/principal_component.py index 233625e09e..1214b84ac4 100644 --- a/src/spikeinterface/postprocessing/principal_component.py +++ b/src/spikeinterface/postprocessing/principal_component.py @@ -84,9 +84,16 @@ def get_projections(self, unit_id): Returns ------- proj: np.array - The PCA projections (num_waveforms, num_components, num_channels) + The PCA projections (num_waveforms, num_components, num_channels). + In case sparsity is used, only the projections on sparse channels are returned. """ - return self._extension_data[f"pca_{unit_id}"] + projections = self._extension_data[f"pca_{unit_id}"] + mode = self._params["mode"] + if mode in ("by_channel_local", "by_channel_global"): + sparsity = self.get_sparsity() + if sparsity is not None: + projections = projections[:, :, sparsity.unit_id_to_channel_indices[unit_id]] + return projections def get_pca_model(self): """ @@ -211,6 +218,10 @@ def project_new(self, new_waveforms, unit_id=None): wfs_flat = new_waveforms.reshape(new_waveforms.shape[0], -1) projections = pca_model.transform(wfs_flat) + # take care of sparsity (not in case of concatenated) + if mode in ("by_channel_local", "by_channel_global"): + if sparsity is not None: + projections = projections[:, :, sparsity.unit_id_to_channel_indices[unit_id]] return projections def get_sparsity(self): diff --git a/src/spikeinterface/postprocessing/tests/common_extension_tests.py b/src/spikeinterface/postprocessing/tests/common_extension_tests.py index b9c72f9b99..8657d1dced 100644 --- a/src/spikeinterface/postprocessing/tests/common_extension_tests.py +++ b/src/spikeinterface/postprocessing/tests/common_extension_tests.py @@ -5,7 +5,7 @@ from pathlib import Path from spikeinterface import extract_waveforms, load_extractor, compute_sparsity -from spikeinterface.extractors import toy_example +from spikeinterface.core.generate import generate_ground_truth_recording if hasattr(pytest, "global_test_folder"): cache_folder = pytest.global_test_folder / "postprocessing" @@ -26,7 +26,18 @@ def setUp(self): self.cache_folder = cache_folder # 1-segment - recording, sorting = toy_example(num_segments=1, num_units=10, num_channels=12) + recording, sorting = generate_ground_truth_recording( + durations=[10], + sampling_frequency=30000, + num_channels=12, + num_units=10, + dtype="float32", + seed=91, + generate_sorting_kwargs=dict(add_spikes_on_borders=True), + noise_kwargs=dict(noise_level=10.0, strategy="tile_pregenerated"), + ) + + # add gains and offsets and save gain = 0.1 recording.set_channel_gains(gain) recording.set_channel_offsets(0) @@ -53,7 +64,16 @@ def setUp(self): self.sparsity1 = compute_sparsity(we1, method="radius", radius_um=50) # 2-segments - recording, sorting = toy_example(num_segments=2, num_units=10) + recording, sorting = generate_ground_truth_recording( + durations=[10, 5], + sampling_frequency=30000, + num_channels=12, + num_units=10, + dtype="float32", + seed=91, + generate_sorting_kwargs=dict(add_spikes_on_borders=True), + noise_kwargs=dict(noise_level=10.0, strategy="tile_pregenerated"), + ) recording.set_channel_gains(gain) recording.set_channel_offsets(0) if (cache_folder / "toy_rec_2seg").is_dir(): diff --git a/src/spikeinterface/postprocessing/tests/test_principal_component.py b/src/spikeinterface/postprocessing/tests/test_principal_component.py index 5d64525b52..04ce42b70e 100644 --- a/src/spikeinterface/postprocessing/tests/test_principal_component.py +++ b/src/spikeinterface/postprocessing/tests/test_principal_component.py @@ -87,13 +87,13 @@ def test_sparse(self): pc.run() for i, unit_id in enumerate(unit_ids): proj = pc.get_projections(unit_id) - assert proj.shape[1:] == (5, 4) + assert proj.shape[1:] == (5, len(sparsity.unit_id_to_channel_ids[unit_id])) # test project_new unit_id = 3 new_wfs = we.get_waveforms(unit_id) new_proj = pc.project_new(new_wfs, unit_id=unit_id) - assert new_proj.shape == (new_wfs.shape[0], 5, 4) + assert new_proj.shape == (new_wfs.shape[0], 5, len(sparsity.unit_id_to_channel_ids[unit_id])) if DEBUG: import matplotlib.pyplot as plt @@ -197,8 +197,8 @@ def test_project_new(self): if __name__ == "__main__": test = PrincipalComponentsExtensionTest() test.setUp() - test.test_extension() - test.test_shapes() - test.test_compute_for_all_spikes() + # test.test_extension() + # test.test_shapes() + # test.test_compute_for_all_spikes() test.test_sparse() - test.test_project_new() + # test.test_project_new() From 73ceaacefecc4426d994ebca4ca006d667dada42 Mon Sep 17 00:00:00 2001 From: Alessio Buccino Date: Fri, 22 Sep 2023 12:06:15 +0200 Subject: [PATCH 057/115] Extend PCA to be able to return sparse projections and fix tests --- .../postprocessing/principal_component.py | 16 ++++++++++------ .../tests/test_principal_component.py | 12 ++++++++---- .../tests/test_quality_metric_calculator.py | 7 ++++--- 3 files changed, 22 insertions(+), 13 deletions(-) diff --git a/src/spikeinterface/postprocessing/principal_component.py b/src/spikeinterface/postprocessing/principal_component.py index 5d62216c20..8383dcbb43 100644 --- a/src/spikeinterface/postprocessing/principal_component.py +++ b/src/spikeinterface/postprocessing/principal_component.py @@ -72,7 +72,7 @@ def _select_extension_data(self, unit_ids): new_extension_data[k] = v return new_extension_data - def get_projections(self, unit_id): + def get_projections(self, unit_id, sparse=False): """ Returns the computed projections for the sampled waveforms of a unit id. @@ -80,16 +80,18 @@ def get_projections(self, unit_id): ---------- unit_id : int or str The unit id to return PCA projections for + sparse: bool, default False + If True, and sparsity is not None, only projections on sparse channels are returned. Returns ------- - proj: np.array + projections: np.array The PCA projections (num_waveforms, num_components, num_channels). In case sparsity is used, only the projections on sparse channels are returned. """ projections = self._extension_data[f"pca_{unit_id}"] mode = self._params["mode"] - if mode in ("by_channel_local", "by_channel_global"): + if mode in ("by_channel_local", "by_channel_global") and sparse: sparsity = self.get_sparsity() if sparsity is not None: projections = projections[:, :, sparsity.unit_id_to_channel_indices[unit_id]] @@ -141,7 +143,7 @@ def get_all_projections(self, channel_ids=None, unit_ids=None, outputs="id"): all_labels = [] #  can be unit_id or unit_index all_projections = [] for unit_index, unit_id in enumerate(unit_ids): - proj = self.get_projections(unit_id) + proj = self.get_projections(unit_id, sparse=False) if channel_ids is not None: chan_inds = self.waveform_extractor.channel_ids_to_indices(channel_ids) proj = proj[:, :, chan_inds] @@ -158,7 +160,7 @@ def get_all_projections(self, channel_ids=None, unit_ids=None, outputs="id"): return all_labels, all_projections - def project_new(self, new_waveforms, unit_id=None): + def project_new(self, new_waveforms, unit_id=None, sparse=False): """ Projects new waveforms or traces snippets on the PC components. @@ -168,6 +170,8 @@ def project_new(self, new_waveforms, unit_id=None): Array with new waveforms to project with shape (num_waveforms, num_samples, num_channels) unit_id: int or str In case PCA is sparse and mode is by_channel_local, the unit_id of 'new_waveforms' + sparse: bool, default: False + If True, and sparsity is not None, only projections on sparse channels are returned. Returns ------- @@ -219,7 +223,7 @@ def project_new(self, new_waveforms, unit_id=None): projections = pca_model.transform(wfs_flat) # take care of sparsity (not in case of concatenated) - if mode in ("by_channel_local", "by_channel_global"): + if mode in ("by_channel_local", "by_channel_global") and sparse: if sparsity is not None: projections = projections[:, :, sparsity.unit_id_to_channel_indices[unit_id]] return projections diff --git a/src/spikeinterface/postprocessing/tests/test_principal_component.py b/src/spikeinterface/postprocessing/tests/test_principal_component.py index 04ce42b70e..49591d9b89 100644 --- a/src/spikeinterface/postprocessing/tests/test_principal_component.py +++ b/src/spikeinterface/postprocessing/tests/test_principal_component.py @@ -86,14 +86,18 @@ def test_sparse(self): pc.set_params(n_components=5, mode=mode, sparsity=sparsity) pc.run() for i, unit_id in enumerate(unit_ids): - proj = pc.get_projections(unit_id) - assert proj.shape[1:] == (5, len(sparsity.unit_id_to_channel_ids[unit_id])) + proj_sparse = pc.get_projections(unit_id, sparse=True) + assert proj_sparse.shape[1:] == (5, len(sparsity.unit_id_to_channel_ids[unit_id])) + proj_dense = pc.get_projections(unit_id, sparse=False) + assert proj_dense.shape[1:] == (5, num_channels) # test project_new unit_id = 3 new_wfs = we.get_waveforms(unit_id) - new_proj = pc.project_new(new_wfs, unit_id=unit_id) - assert new_proj.shape == (new_wfs.shape[0], 5, len(sparsity.unit_id_to_channel_ids[unit_id])) + new_proj_sparse = pc.project_new(new_wfs, unit_id=unit_id, sparse=True) + assert new_proj_sparse.shape == (new_wfs.shape[0], 5, len(sparsity.unit_id_to_channel_ids[unit_id])) + new_proj_dense = pc.project_new(new_wfs, unit_id=unit_id, sparse=False) + assert new_proj_dense.shape == (new_wfs.shape[0], 5, num_channels) if DEBUG: import matplotlib.pyplot as plt diff --git a/src/spikeinterface/qualitymetrics/tests/test_quality_metric_calculator.py b/src/spikeinterface/qualitymetrics/tests/test_quality_metric_calculator.py index 4fa65993d1..977beca210 100644 --- a/src/spikeinterface/qualitymetrics/tests/test_quality_metric_calculator.py +++ b/src/spikeinterface/qualitymetrics/tests/test_quality_metric_calculator.py @@ -261,7 +261,8 @@ def test_nn_metrics(self): we_sparse, metric_names=metric_names, sparsity=None, seed=0, n_jobs=2 ) for metric_name in metrics.columns: - assert np.allclose(metrics[metric_name], metrics_par[metric_name]) + # NaNs are skipped + assert np.allclose(metrics[metric_name].dropna(), metrics_par[metric_name].dropna()) def test_recordingless(self): we = self.we_long @@ -305,7 +306,7 @@ def test_empty_units(self): test.setUp() # test.test_drift_metrics() # test.test_extension() - # test.test_nn_metrics() + test.test_nn_metrics() # test.test_peak_sign() # test.test_empty_units() - test.test_recordingless() + # test.test_recordingless() From b9b6c15b42a64d877ea9fad9fca84424e2c97edf Mon Sep 17 00:00:00 2001 From: Alessio Buccino Date: Fri, 22 Sep 2023 12:12:21 +0200 Subject: [PATCH 058/115] Add test to check correct order of spikes with borders --- src/spikeinterface/core/tests/test_generate.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/src/spikeinterface/core/tests/test_generate.py b/src/spikeinterface/core/tests/test_generate.py index 3844e421ac..9a9c61766f 100644 --- a/src/spikeinterface/core/tests/test_generate.py +++ b/src/spikeinterface/core/tests/test_generate.py @@ -48,9 +48,15 @@ def test_generate_sorting_with_spikes_on_borders(): num_spikes_per_border=num_spikes_on_borders, border_size_samples=border_size_samples, ) + # check that segments are correctly sorted + all_spikes = sorting.to_spike_vector() + np.testing.assert_array_equal(all_spikes["segment_index"], np.sort(all_spikes["segment_index"])) + spikes = sorting.to_spike_vector(concatenated=False) # at least num_border spikes at borders for all segments - for i, spikes_in_segment in enumerate(spikes): + for spikes_in_segment in spikes: + # check that sample indices are correctly sorted within segments + np.testing.assert_array_equal(spikes_in_segment["sample_index"], np.sort(spikes_in_segment["sample_index"])) num_samples = int(segment_duration * 30000) assert np.sum(spikes_in_segment["sample_index"] < border_size_samples) >= num_spikes_on_borders assert ( From 4e79b5811d41e6343391a3a6b26fab97f657368b Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Fri, 22 Sep 2023 13:32:51 +0200 Subject: [PATCH 059/115] propagate UnitSelector to others ipywidgets --- src/spikeinterface/widgets/amplitudes.py | 12 ++- src/spikeinterface/widgets/base.py | 3 +- src/spikeinterface/widgets/metrics.py | 21 ++-- src/spikeinterface/widgets/spike_locations.py | 34 +++---- .../widgets/spikes_on_traces.py | 87 ++++++++++------- src/spikeinterface/widgets/unit_locations.py | 29 +++--- src/spikeinterface/widgets/unit_waveforms.py | 50 +++++----- .../widgets/utils_ipywidgets.py | 96 ------------------- 8 files changed, 121 insertions(+), 211 deletions(-) diff --git a/src/spikeinterface/widgets/amplitudes.py b/src/spikeinterface/widgets/amplitudes.py index b60de98cb0..5aa090b1b4 100644 --- a/src/spikeinterface/widgets/amplitudes.py +++ b/src/spikeinterface/widgets/amplitudes.py @@ -147,13 +147,16 @@ def plot_matplotlib(self, data_plot, **backend_kwargs): else: bins = dp.bins ax_hist = self.axes.flatten()[1] - ax_hist.hist(amps, bins=bins, orientation="horizontal", color=dp.unit_colors[unit_id], alpha=0.8) + # this is super slow, using plot and np.histogram is really much faster (and nicer!) + # ax_hist.hist(amps, bins=bins, orientation="horizontal", color=dp.unit_colors[unit_id], alpha=0.8) + count, bins = np.histogram(amps, bins=bins) + ax_hist.plot(count, bins[:-1], color=dp.unit_colors[unit_id], alpha=0.8) if dp.plot_histograms: ax_hist = self.axes.flatten()[1] ax_hist.set_ylim(scatter_ax.get_ylim()) ax_hist.axis("off") - self.figure.tight_layout() + # self.figure.tight_layout() if dp.plot_legend: if hasattr(self, "legend") and self.legend is not None: @@ -174,7 +177,7 @@ def plot_ipywidgets(self, data_plot, **backend_kwargs): # import ipywidgets.widgets as widgets import ipywidgets.widgets as W from IPython.display import display - from .utils_ipywidgets import check_ipywidget_backend, make_unit_controller, UnitSelector + from .utils_ipywidgets import check_ipywidget_backend, UnitSelector check_ipywidget_backend() @@ -200,7 +203,6 @@ def plot_ipywidgets(self, data_plot, **backend_kwargs): self.checkbox_histograms = W.Checkbox( value=data_plot["plot_histograms"], description="hist", - # disabled=False, ) left_sidebar = W.VBox( @@ -231,6 +233,7 @@ def _full_update_plot(self, change=None): data_plot = self.next_data_plot data_plot["unit_ids"] = self.unit_selector.value data_plot["plot_histograms"] = self.checkbox_histograms.value + data_plot["plot_legend"] = False backend_kwargs = dict(figure=self.figure, axes=None, ax=None) self.plot_matplotlib(data_plot, **backend_kwargs) @@ -243,6 +246,7 @@ def _update_plot(self, change=None): data_plot = self.next_data_plot data_plot["unit_ids"] = self.unit_selector.value data_plot["plot_histograms"] = self.checkbox_histograms.value + data_plot["plot_legend"] = False backend_kwargs = dict(figure=None, axes=self.axes, ax=None) self.plot_matplotlib(data_plot, **backend_kwargs) diff --git a/src/spikeinterface/widgets/base.py b/src/spikeinterface/widgets/base.py index 4ed83fcca9..1ff691320a 100644 --- a/src/spikeinterface/widgets/base.py +++ b/src/spikeinterface/widgets/base.py @@ -38,6 +38,7 @@ def set_default_plotter_backend(backend): "width_cm": "Width of the figure in cm (default 10)", "height_cm": "Height of the figure in cm (default 6)", "display": "If True, widgets are immediately displayed", + # "controllers": "" }, "ephyviewer": {}, } @@ -45,7 +46,7 @@ def set_default_plotter_backend(backend): default_backend_kwargs = { "matplotlib": {"figure": None, "ax": None, "axes": None, "ncols": 5, "figsize": None, "figtitle": None}, "sortingview": {"generate_url": True, "display": True, "figlabel": None, "height": None}, - "ipywidgets": {"width_cm": 25, "height_cm": 10, "display": True}, + "ipywidgets": {"width_cm": 25, "height_cm": 10, "display": True, "controllers": None}, "ephyviewer": {}, } diff --git a/src/spikeinterface/widgets/metrics.py b/src/spikeinterface/widgets/metrics.py index 9dc51f522e..604da35e65 100644 --- a/src/spikeinterface/widgets/metrics.py +++ b/src/spikeinterface/widgets/metrics.py @@ -128,7 +128,7 @@ def plot_ipywidgets(self, data_plot, **backend_kwargs): import matplotlib.pyplot as plt import ipywidgets.widgets as widgets from IPython.display import display - from .utils_ipywidgets import check_ipywidget_backend, make_unit_controller + from .utils_ipywidgets import check_ipywidget_backend, UnitSelector check_ipywidget_backend() @@ -147,34 +147,29 @@ def plot_ipywidgets(self, data_plot, **backend_kwargs): with output: self.figure = plt.figure(figsize=((ratios[1] * width_cm) * cm, height_cm * cm)) plt.show() - if data_plot["unit_ids"] is None: - data_plot["unit_ids"] = [] - unit_widget, unit_controller = make_unit_controller( - data_plot["unit_ids"], list(data_plot["unit_colors"].keys()), ratios[0] * width_cm, height_cm - ) - - self.controller = unit_controller + self.unit_selector = UnitSelector(data_plot["sorting"].unit_ids) + self.unit_selector.value = [ ] - for w in self.controller.values(): - w.observe(self._update_ipywidget) self.widget = widgets.AppLayout( center=self.figure.canvas, - left_sidebar=unit_widget, + left_sidebar=self.unit_selector, pane_widths=ratios + [0], ) # a first update self._update_ipywidget(None) + self.unit_selector.observe(self._update_ipywidget, names='value', type="change") + if backend_kwargs["display"]: display(self.widget) def _update_ipywidget(self, change): from matplotlib.lines import Line2D - unit_ids = self.controller["unit_ids"].value + unit_ids = self.unit_selector.value unit_colors = self.data_plot["unit_colors"] # matplotlib next_data_plot dict update at each call @@ -198,6 +193,7 @@ def _update_ipywidget(self, change): self.plot_matplotlib(self.data_plot, **backend_kwargs) if len(unit_ids) > 0: + # TODO later make option to control legend or not for l in self.figure.legends: l.remove() handles = [ @@ -212,6 +208,7 @@ def _update_ipywidget(self, change): self.figure.canvas.draw() self.figure.canvas.flush_events() + def plot_sortingview(self, data_plot, **backend_kwargs): import sortingview.views as vv from .utils_sortingview import generate_unit_table_view, make_serializable, handle_display_and_url diff --git a/src/spikeinterface/widgets/spike_locations.py b/src/spikeinterface/widgets/spike_locations.py index 9771b2c0e9..926051b8f9 100644 --- a/src/spikeinterface/widgets/spike_locations.py +++ b/src/spikeinterface/widgets/spike_locations.py @@ -191,7 +191,7 @@ def plot_ipywidgets(self, data_plot, **backend_kwargs): import matplotlib.pyplot as plt import ipywidgets.widgets as widgets from IPython.display import display - from .utils_ipywidgets import check_ipywidget_backend, make_unit_controller + from .utils_ipywidgets import check_ipywidget_backend, UnitSelector check_ipywidget_backend() @@ -210,48 +210,36 @@ def plot_ipywidgets(self, data_plot, **backend_kwargs): fig, self.ax = plt.subplots(figsize=((ratios[1] * width_cm) * cm, height_cm * cm)) plt.show() - data_plot["unit_ids"] = data_plot["unit_ids"][:1] - - unit_widget, unit_controller = make_unit_controller( - data_plot["unit_ids"], - list(data_plot["unit_colors"].keys()), - ratios[0] * width_cm, - height_cm, - ) - - self.controller = unit_controller - - for w in self.controller.values(): - w.observe(self._update_ipywidget) + self.unit_selector = UnitSelector(data_plot["unit_ids"]) + self.unit_selector.value = list(data_plot["unit_ids"])[:1] self.widget = widgets.AppLayout( center=fig.canvas, - left_sidebar=unit_widget, + left_sidebar=self.unit_selector, pane_widths=ratios + [0], ) # a first update - self._update_ipywidget(None) + self._update_ipywidget() + + self.unit_selector.observe(self._update_ipywidget, names='value', type="change") if backend_kwargs["display"]: display(self.widget) - def _update_ipywidget(self, change): + def _update_ipywidget(self, change=None): self.ax.clear() - unit_ids = self.controller["unit_ids"].value - # matplotlib next_data_plot dict update at each call data_plot = self.next_data_plot - data_plot["unit_ids"] = unit_ids + data_plot["unit_ids"] = self.unit_selector.value data_plot["plot_all_units"] = True + # TODO add an option checkbox for legend data_plot["plot_legend"] = True data_plot["hide_axis"] = True - backend_kwargs = {} - backend_kwargs["ax"] = self.ax + backend_kwargs = dict(ax=self.ax) - # self.mpl_plotter.do_plot(data_plot, **backend_kwargs) self.plot_matplotlib(data_plot, **backend_kwargs) fig = self.ax.get_figure() fig.canvas.draw() diff --git a/src/spikeinterface/widgets/spikes_on_traces.py b/src/spikeinterface/widgets/spikes_on_traces.py index ae036d1ba1..2f748cc0fc 100644 --- a/src/spikeinterface/widgets/spikes_on_traces.py +++ b/src/spikeinterface/widgets/spikes_on_traces.py @@ -149,20 +149,20 @@ def plot_matplotlib(self, data_plot, **backend_kwargs): sorting = we.sorting # first plot time series - ts_widget = TracesWidget(recording, **dp.options, backend="matplotlib", **backend_kwargs) - self.ax = ts_widget.ax - self.axes = ts_widget.axes - self.figure = ts_widget.figure + traces_widget = TracesWidget(recording, **dp.options, backend="matplotlib", **backend_kwargs) + self.ax = traces_widget.ax + self.axes = traces_widget.axes + self.figure = traces_widget.figure ax = self.ax - frame_range = ts_widget.data_plot["frame_range"] - segment_index = ts_widget.data_plot["segment_index"] - min_y = np.min(ts_widget.data_plot["channel_locations"][:, 1]) - max_y = np.max(ts_widget.data_plot["channel_locations"][:, 1]) + frame_range = traces_widget.data_plot["frame_range"] + segment_index = traces_widget.data_plot["segment_index"] + min_y = np.min(traces_widget.data_plot["channel_locations"][:, 1]) + max_y = np.max(traces_widget.data_plot["channel_locations"][:, 1]) - n = len(ts_widget.data_plot["channel_ids"]) - order = ts_widget.data_plot["order"] + n = len(traces_widget.data_plot["channel_ids"]) + order = traces_widget.data_plot["order"] if order is None: order = np.arange(n) @@ -210,13 +210,13 @@ def plot_matplotlib(self, data_plot, **backend_kwargs): # construct waveforms label_set = False if len(spike_frames_to_plot) > 0: - vspacing = ts_widget.data_plot["vspacing"] - traces = ts_widget.data_plot["list_traces"][0] + vspacing = traces_widget.data_plot["vspacing"] + traces = traces_widget.data_plot["list_traces"][0] waveform_idxs = spike_frames_to_plot[:, None] + np.arange(-we.nbefore, we.nafter) - frame_range[0] - waveform_idxs = np.clip(waveform_idxs, 0, len(ts_widget.data_plot["times"]) - 1) + waveform_idxs = np.clip(waveform_idxs, 0, len(traces_widget.data_plot["times"]) - 1) - times = ts_widget.data_plot["times"][waveform_idxs] + times = traces_widget.data_plot["times"][waveform_idxs] # discontinuity times[:, -1] = np.nan @@ -224,7 +224,7 @@ def plot_matplotlib(self, data_plot, **backend_kwargs): waveforms = traces[waveform_idxs] # [:, :, order] waveforms_r = waveforms.reshape((waveforms.shape[0] * waveforms.shape[1], waveforms.shape[2])) - for i, chan_id in enumerate(ts_widget.data_plot["channel_ids"]): + for i, chan_id in enumerate(traces_widget.data_plot["channel_ids"]): offset = vspacing * i if chan_id in chan_ids: l = ax.plot(times_r, offset + waveforms_r[:, i], color=dp.unit_colors[unit]) @@ -232,13 +232,13 @@ def plot_matplotlib(self, data_plot, **backend_kwargs): handles.append(l[0]) labels.append(unit) label_set = True - ax.legend(handles, labels) + # ax.legend(handles, labels) def plot_ipywidgets(self, data_plot, **backend_kwargs): import matplotlib.pyplot as plt import ipywidgets.widgets as widgets from IPython.display import display - from .utils_ipywidgets import check_ipywidget_backend, make_unit_controller + from .utils_ipywidgets import check_ipywidget_backend, UnitSelector check_ipywidget_backend() @@ -256,37 +256,58 @@ def plot_ipywidgets(self, data_plot, **backend_kwargs): width_cm = backend_kwargs["width_cm"] # plot timeseries - ts_widget = TracesWidget(we.recording, **dp.options, backend="ipywidgets", **backend_kwargs_ts) - self.ax = ts_widget.ax - self.axes = ts_widget.axes - self.figure = ts_widget.figure + self._traces_widget = TracesWidget(we.recording, **dp.options, backend="ipywidgets", **backend_kwargs_ts) + self.ax = self._traces_widget.ax + self.axes = self._traces_widget.axes + self.figure = self._traces_widget.figure - unit_widget, unit_controller = make_unit_controller( - data_plot["unit_ids"], we.unit_ids, ratios[0] * width_cm, height_cm - ) + self.sampling_frequency = self._traces_widget.rec0.sampling_frequency - self.controller = dict() - self.controller.update(ts_widget.controller) - self.controller.update(unit_controller) + self.time_slider = self._traces_widget.time_slider - for w in self.controller.values(): - w.observe(self._update_ipywidget) + self.unit_selector = UnitSelector(data_plot["unit_ids"]) + self.unit_selector.value = list(data_plot["unit_ids"])[:1] - self.widget = widgets.AppLayout(center=ts_widget.widget, left_sidebar=unit_widget, pane_widths=ratios + [0]) + self.widget = widgets.AppLayout(center=self._traces_widget.widget, + left_sidebar=self.unit_selector, + pane_widths=ratios + [0]) # a first update - self._update_ipywidget(None) + self._update_ipywidget() + + # remove callback from traces_widget + self.unit_selector.observe(self._update_ipywidget, names='value', type="change") + self._traces_widget.time_slider.observe(self._update_ipywidget, names='value', type="change") + self._traces_widget.channel_selector.observe(self._update_ipywidget, names='value', type="change") + self._traces_widget.scaler.observe(self._update_ipywidget, names='value', type="change") + if backend_kwargs["display"]: display(self.widget) - def _update_ipywidget(self, change): + def _update_ipywidget(self, change=None): self.ax.clear() - unit_ids = self.controller["unit_ids"].value + # TODO later: this is still a bit buggy because it make double refresh one from _traces_widget and one internal + + unit_ids = self.unit_selector.value + start_frame, end_frame, segment_index = self._traces_widget.time_slider.value + channel_ids = self._traces_widget.channel_selector.value + mode = self._traces_widget.mode_selector.value data_plot = self.next_data_plot data_plot["unit_ids"] = unit_ids + data_plot["options"].update( + dict( + channel_ids=channel_ids, + segment_index=segment_index, + # frame_range=(start_frame, end_frame), + time_range=np.array([start_frame, end_frame]) / self.sampling_frequency, + mode=mode, + with_colorbar=False, + ) + ) + backend_kwargs = {} backend_kwargs["ax"] = self.ax diff --git a/src/spikeinterface/widgets/unit_locations.py b/src/spikeinterface/widgets/unit_locations.py index 42267e711f..8526a95d60 100644 --- a/src/spikeinterface/widgets/unit_locations.py +++ b/src/spikeinterface/widgets/unit_locations.py @@ -167,7 +167,7 @@ def plot_ipywidgets(self, data_plot, **backend_kwargs): import matplotlib.pyplot as plt import ipywidgets.widgets as widgets from IPython.display import display - from .utils_ipywidgets import check_ipywidget_backend, make_unit_controller + from .utils_ipywidgets import check_ipywidget_backend, UnitSelector check_ipywidget_backend() @@ -186,42 +186,35 @@ def plot_ipywidgets(self, data_plot, **backend_kwargs): fig, self.ax = plt.subplots(figsize=((ratios[1] * width_cm) * cm, height_cm * cm)) plt.show() - data_plot["unit_ids"] = data_plot["unit_ids"][:1] - unit_widget, unit_controller = make_unit_controller( - data_plot["unit_ids"], list(data_plot["unit_colors"].keys()), ratios[0] * width_cm, height_cm - ) - - self.controller = unit_controller - - for w in self.controller.values(): - w.observe(self._update_ipywidget) + self.unit_selector = UnitSelector(data_plot["unit_ids"]) + self.unit_selector.value = list(data_plot["unit_ids"])[:1] self.widget = widgets.AppLayout( center=fig.canvas, - left_sidebar=unit_widget, + left_sidebar=self.unit_selector, pane_widths=ratios + [0], ) # a first update - self._update_ipywidget(None) + self._update_ipywidget() + + self.unit_selector.observe(self._update_ipywidget, names='value', type="change") if backend_kwargs["display"]: display(self.widget) - def _update_ipywidget(self, change): + def _update_ipywidget(self, change=None): self.ax.clear() - unit_ids = self.controller["unit_ids"].value - # matplotlib next_data_plot dict update at each call data_plot = self.next_data_plot - data_plot["unit_ids"] = unit_ids + data_plot["unit_ids"] = self.unit_selector.value data_plot["plot_all_units"] = True + # TODO later add an option checkbox for legend data_plot["plot_legend"] = True data_plot["hide_axis"] = True - backend_kwargs = {} - backend_kwargs["ax"] = self.ax + backend_kwargs = dict(ax=self.ax) self.plot_matplotlib(data_plot, **backend_kwargs) fig = self.ax.get_figure() diff --git a/src/spikeinterface/widgets/unit_waveforms.py b/src/spikeinterface/widgets/unit_waveforms.py index e64765b44b..f01c842b66 100644 --- a/src/spikeinterface/widgets/unit_waveforms.py +++ b/src/spikeinterface/widgets/unit_waveforms.py @@ -250,7 +250,7 @@ def plot_ipywidgets(self, data_plot, **backend_kwargs): import matplotlib.pyplot as plt import ipywidgets.widgets as widgets from IPython.display import display - from .utils_ipywidgets import check_ipywidget_backend, make_unit_controller + from .utils_ipywidgets import check_ipywidget_backend, UnitSelector check_ipywidget_backend() @@ -274,44 +274,33 @@ def plot_ipywidgets(self, data_plot, **backend_kwargs): self.fig_probe, self.ax_probe = plt.subplots(figsize=((ratios[2] * width_cm) * cm, height_cm * cm)) plt.show() - data_plot["unit_ids"] = data_plot["unit_ids"][:1] - unit_widget, unit_controller = make_unit_controller( - data_plot["unit_ids"], we.unit_ids, ratios[0] * width_cm, height_cm - ) + self.unit_selector = UnitSelector(data_plot["unit_ids"]) + self.unit_selector.value = list(data_plot["unit_ids"])[:1] + - same_axis_button = widgets.Checkbox( + self.same_axis_button = widgets.Checkbox( value=False, description="same axis", disabled=False, ) - plot_templates_button = widgets.Checkbox( + self.plot_templates_button = widgets.Checkbox( value=True, description="plot templates", disabled=False, ) - hide_axis_button = widgets.Checkbox( + self.hide_axis_button = widgets.Checkbox( value=True, description="hide axis", disabled=False, ) - footer = widgets.HBox([same_axis_button, plot_templates_button, hide_axis_button]) - - self.controller = { - "same_axis": same_axis_button, - "plot_templates": plot_templates_button, - "hide_axis": hide_axis_button, - } - self.controller.update(unit_controller) - - for w in self.controller.values(): - w.observe(self._update_ipywidget) + footer = widgets.HBox([self.same_axis_button, self.plot_templates_button, self.hide_axis_button]) self.widget = widgets.AppLayout( center=self.fig_wf.canvas, - left_sidebar=unit_widget, + left_sidebar=self.unit_selector, right_sidebar=self.fig_probe.canvas, pane_widths=ratios, footer=footer, @@ -320,6 +309,11 @@ def plot_ipywidgets(self, data_plot, **backend_kwargs): # a first update self._update_ipywidget(None) + self.unit_selector.observe(self._update_ipywidget, names='value', type="change") + for w in self.same_axis_button, self.plot_templates_button, self.hide_axis_button: + w.observe(self._update_ipywidget, names='value', type="change") + + if backend_kwargs["display"]: display(self.widget) @@ -327,10 +321,15 @@ def _update_ipywidget(self, change): self.fig_wf.clear() self.ax_probe.clear() - unit_ids = self.controller["unit_ids"].value - same_axis = self.controller["same_axis"].value - plot_templates = self.controller["plot_templates"].value - hide_axis = self.controller["hide_axis"].value + # unit_ids = self.controller["unit_ids"].value + unit_ids = self.unit_selector.value + # same_axis = self.controller["same_axis"].value + # plot_templates = self.controller["plot_templates"].value + # hide_axis = self.controller["hide_axis"].value + + same_axis = self.same_axis_button.value + plot_templates = self.plot_templates_button.value + hide_axis = self.hide_axis_button.value # matplotlib next_data_plot dict update at each call data_plot = self.next_data_plot @@ -341,6 +340,8 @@ def _update_ipywidget(self, change): data_plot["plot_templates"] = plot_templates if data_plot["plot_waveforms"]: data_plot["wfs_by_ids"] = {unit_id: self.we.get_waveforms(unit_id) for unit_id in unit_ids} + + # TODO option for plot_legend backend_kwargs = {} @@ -369,6 +370,7 @@ def _update_ipywidget(self, change): self.ax_probe.axis("off") self.ax_probe.axis("equal") + # TODO this could be done with probeinterface plotting plotting tools!! for unit in unit_ids: channel_inds = data_plot["sparsity"].unit_id_to_channel_indices[unit] self.ax_probe.plot( diff --git a/src/spikeinterface/widgets/utils_ipywidgets.py b/src/spikeinterface/widgets/utils_ipywidgets.py index d2c41f234a..57550c0910 100644 --- a/src/spikeinterface/widgets/utils_ipywidgets.py +++ b/src/spikeinterface/widgets/utils_ipywidgets.py @@ -11,102 +11,6 @@ def check_ipywidget_backend(): assert "ipympl" in mpl_backend, "To use the 'ipywidgets' backend, you have to set %matplotlib widget" -# def make_timeseries_controller(t_start, t_stop, layer_keys, num_segments, time_range, mode, all_layers, width_cm): -# time_slider = W.FloatSlider( -# orientation="horizontal", -# description="time:", -# value=time_range[0], -# min=t_start, -# max=t_stop, -# continuous_update=False, -# layout=W.Layout(width=f"{width_cm}cm"), -# ) -# layer_selector = W.Dropdown(description="layer", options=layer_keys) -# segment_selector = W.Dropdown(description="segment", options=list(range(num_segments))) -# window_sizer = W.BoundedFloatText(value=np.diff(time_range)[0], step=0.1, min=0.005, description="win (s)") -# mode_selector = W.Dropdown(options=["line", "map"], description="mode", value=mode) -# all_layers = W.Checkbox(description="plot all layers", value=all_layers) - -# controller = { -# "layer_key": layer_selector, -# "segment_index": segment_selector, -# "window": window_sizer, -# "t_start": time_slider, -# "mode": mode_selector, -# "all_layers": all_layers, -# } -# widget = W.VBox( -# [time_slider, W.HBox([all_layers, layer_selector, segment_selector, window_sizer, mode_selector])] -# ) - -# return widget, controller - - -def make_unit_controller(unit_ids, all_unit_ids, width_cm, height_cm): - unit_label = W.Label(value="units:") - - unit_selector = W.SelectMultiple( - options=all_unit_ids, - value=list(unit_ids), - disabled=False, - layout=W.Layout(width=f"{width_cm}cm", height=f"{height_cm}cm"), - ) - - controller = {"unit_ids": unit_selector} - widget = W.VBox([unit_label, unit_selector]) - - return widget, controller - - -# def make_channel_controller(recording, width_cm, height_cm): -# channel_label = W.Label("channel indices:", layout=W.Layout(justify_content="center")) -# channel_selector = W.IntRangeSlider( -# value=[0, recording.get_num_channels()], -# min=0, -# max=recording.get_num_channels(), -# step=1, -# disabled=False, -# continuous_update=False, -# orientation="vertical", -# readout=True, -# readout_format="d", -# layout=W.Layout(width=f"{0.8 * width_cm}cm", height=f"{height_cm}cm"), -# ) - -# controller = {"channel_inds": channel_selector} -# widget = W.VBox([channel_label, channel_selector]) - -# return widget, controller - - -# def make_scale_controller(width_cm, height_cm): -# scale_label = W.Label("Scale", layout=W.Layout(justify_content="center")) - -# plus_selector = W.Button( -# description="", -# disabled=False, -# button_style="", # 'success', 'info', 'warning', 'danger' or '' -# tooltip="Increase scale", -# icon="arrow-up", -# layout=W.Layout(width=f"{0.8 * width_cm}cm", height=f"{0.4 * height_cm}cm"), -# ) - -# minus_selector = W.Button( -# description="", -# disabled=False, -# button_style="", # 'success', 'info', 'warning', 'danger' or '' -# tooltip="Decrease scale", -# icon="arrow-down", -# layout=W.Layout(width=f"{0.8 * width_cm}cm", height=f"{0.4 * height_cm}cm"), -# ) - -# controller = {"plus": plus_selector, "minus": minus_selector} -# widget = W.VBox([scale_label, plus_selector, minus_selector]) - -# return widget, controller - - - class TimeSlider(W.HBox): value = traitlets.Tuple(traitlets.Int(), traitlets.Int(), traitlets.Int()) From f315594b0b88bed01f01232688d62c4c2e4bc0fe Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Fri, 22 Sep 2023 15:49:47 +0200 Subject: [PATCH 060/115] protect TimeSlider on the upper limit to avoid border effect on window size --- src/spikeinterface/widgets/utils_ipywidgets.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/src/spikeinterface/widgets/utils_ipywidgets.py b/src/spikeinterface/widgets/utils_ipywidgets.py index 57550c0910..ee6133a990 100644 --- a/src/spikeinterface/widgets/utils_ipywidgets.py +++ b/src/spikeinterface/widgets/utils_ipywidgets.py @@ -54,7 +54,7 @@ def __init__(self, durations, sampling_frequency, time_range=(0, 1.), **kwargs): # description='time:', value=start_frame, min=0, - max=self.frame_limits[self.segment_index], + max=self.frame_limits[self.segment_index] - 1, readout=False, continuous_update=False, layout=W.Layout(width=f'70%') @@ -112,10 +112,13 @@ def update_time(self, new_frame=None, new_time=None, update_slider=False, update else: start_frame = new_frame delta_s = self.window_sizer.value - end_frame = start_frame + int(delta_s * self.sampling_frequency) - + delta = int(delta_s * self.sampling_frequency) + # clip + start_frame = min(self.frame_limits[self.segment_index] - delta, start_frame) start_frame = max(0, start_frame) + end_frame = start_frame + delta + end_frame = min(self.frame_limits[self.segment_index], end_frame) @@ -170,7 +173,7 @@ def segment_changed(self, change=None): self.slider.unobserve(self.slider_moved, names='value', type="change") # self.slider.value = 0 - self.slider.max = self.frame_limits[self.segment_index] + self.slider.max = self.frame_limits[self.segment_index] - 1 self.slider.observe(self.slider_moved, names='value', type="change") self.update_time(new_frame=0, update_slider=True, update_label=True) From c33f7233b54ccce797a903f8f495d8dbb30f0b2a Mon Sep 17 00:00:00 2001 From: zm711 <92116279+zm711@users.noreply.github.com> Date: Fri, 22 Sep 2023 10:21:53 -0400 Subject: [PATCH 061/115] test reorganize folders --- doc/conf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/conf.py b/doc/conf.py index 15cb65d46a..b120393911 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -118,11 +118,11 @@ 'examples_dirs': ['../examples/modules_gallery'], 'gallery_dirs': ['modules_gallery', ], # path where to save gallery generated examples 'subsection_order': ExplicitOrder([ - '../examples/modules_gallery/core/', '../examples/modules_gallery/extractors/', '../examples/modules_gallery/qualitymetrics', '../examples/modules_gallery/comparison', '../examples/modules_gallery/widgets', + '../examples/modules_gallery/core/', ]), 'within_subsection_order': FileNameSortKey, 'ignore_pattern': '/generate_', From f2188266647d7faf721d89089b6f9c0bd1d9e637 Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Fri, 22 Sep 2023 16:22:01 +0200 Subject: [PATCH 062/115] feedback from Ramon --- src/spikeinterface/core/generate.py | 4 ++-- src/spikeinterface/core/tests/test_waveform_extractor.py | 2 -- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/src/spikeinterface/core/generate.py b/src/spikeinterface/core/generate.py index 362b598b0b..05d63f3c8d 100644 --- a/src/spikeinterface/core/generate.py +++ b/src/spikeinterface/core/generate.py @@ -1433,7 +1433,7 @@ def generate_ground_truth_recording( ) recording.annotate(is_filtered=True) recording.set_probe(probe, in_place=True) - recording.set_property("gain_to_uV", np.ones(num_channels)) - recording.set_property("offset_to_uV", np.zeros(num_channels)) + recording.set_channel_gains(1.) + recording.set_channel_offsets(0.) return recording, sorting diff --git a/src/spikeinterface/core/tests/test_waveform_extractor.py b/src/spikeinterface/core/tests/test_waveform_extractor.py index 3972c9186c..f53b9cf18d 100644 --- a/src/spikeinterface/core/tests/test_waveform_extractor.py +++ b/src/spikeinterface/core/tests/test_waveform_extractor.py @@ -517,8 +517,6 @@ def test_non_json_object(): num_units=5, ) - - print(recording.check_serializablility("pickle")) # recording is not save to keep it in memory sorting = sorting.save() From 96be72e5ac05ec7f3bd63f866783b733fca22ab8 Mon Sep 17 00:00:00 2001 From: zm711 <92116279+zm711@users.noreply.github.com> Date: Fri, 22 Sep 2023 10:43:48 -0400 Subject: [PATCH 063/115] try removing extra slash from some sections --- doc/conf.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/conf.py b/doc/conf.py index b120393911..eb8bee5f9a 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -118,11 +118,11 @@ 'examples_dirs': ['../examples/modules_gallery'], 'gallery_dirs': ['modules_gallery', ], # path where to save gallery generated examples 'subsection_order': ExplicitOrder([ - '../examples/modules_gallery/extractors/', + '../examples/modules_gallery/core', + '../examples/modules_gallery/extractors', '../examples/modules_gallery/qualitymetrics', '../examples/modules_gallery/comparison', '../examples/modules_gallery/widgets', - '../examples/modules_gallery/core/', ]), 'within_subsection_order': FileNameSortKey, 'ignore_pattern': '/generate_', From c4fec2f135f5166bc3dfe4ebbd1a3ccdff8ddd63 Mon Sep 17 00:00:00 2001 From: zm711 <92116279+zm711@users.noreply.github.com> Date: Fri, 22 Sep 2023 11:00:17 -0400 Subject: [PATCH 064/115] try setting nested_sections false --- doc/conf.py | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/conf.py b/doc/conf.py index eb8bee5f9a..13d1ef4e65 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -126,6 +126,7 @@ ]), 'within_subsection_order': FileNameSortKey, 'ignore_pattern': '/generate_', + 'nested_sections': False, } intersphinx_mapping = { From b23e7e444065ee9b7a72c549a9c0aee22ce39c25 Mon Sep 17 00:00:00 2001 From: zm711 <92116279+zm711@users.noreply.github.com> Date: Mon, 25 Sep 2023 16:11:30 -0400 Subject: [PATCH 065/115] allow relative path when exporting to phy --- src/spikeinterface/exporters/to_phy.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/src/spikeinterface/exporters/to_phy.py b/src/spikeinterface/exporters/to_phy.py index c92861a8bf..7de1a128e5 100644 --- a/src/spikeinterface/exporters/to_phy.py +++ b/src/spikeinterface/exporters/to_phy.py @@ -35,6 +35,7 @@ def export_to_phy( template_mode: str = "median", dtype: Optional[npt.DTypeLike] = None, verbose: bool = True, + use_relative_path: bool = False, **job_kwargs, ): """ @@ -64,6 +65,8 @@ def export_to_phy( Dtype to save binary data verbose: bool If True, output is verbose + use_relative_path : bool, default: False + If True saves the `dat_path` as a relative path, else an absolute {} """ @@ -94,7 +97,7 @@ def export_to_phy( used_sparsity = sparsity else: used_sparsity = ChannelSparsity.create_dense(waveform_extractor) - # convinient sparsity dict for the 3 cases to retrieve channl_inds + # convenient sparsity dict for the 3 cases to retrieve channl_inds sparse_dict = used_sparsity.unit_id_to_channel_indices empty_flag = False @@ -106,7 +109,7 @@ def export_to_phy( empty_flag = True unit_ids = non_empty_units if empty_flag: - warnings.warn("Empty units have been removed when being exported to Phy") + warnings.warn("Empty units have been removed while exporting to Phy") if len(unit_ids) == 0: raise Exception("No non-empty units in the sorting result, can't save to Phy.") @@ -149,7 +152,10 @@ def export_to_phy( # write params.py with (output_folder / "params.py").open("w") as f: - f.write(f"dat_path = r'{str(rec_path)}'\n") + if use_relative_path: + f.write(f"dat_path = r'recording.dat'\n") + else: + f.write(f"dat_path = r'{str(rec_path)}'\n") f.write(f"n_channels_dat = {num_chans}\n") f.write(f"dtype = '{dtype_str}'\n") f.write(f"offset = 0\n") From 68fe2ba08f9c41b3feaf7866fee934291d78f7ea Mon Sep 17 00:00:00 2001 From: Pierre Yger Date: Tue, 26 Sep 2023 09:26:40 +0200 Subject: [PATCH 066/115] OMP with SVD decomposition --- .../sortingcomponents/matching/circus.py | 307 ++++++++++++++++++ .../sortingcomponents/matching/method_list.py | 3 +- 2 files changed, 309 insertions(+), 1 deletion(-) diff --git a/src/spikeinterface/sortingcomponents/matching/circus.py b/src/spikeinterface/sortingcomponents/matching/circus.py index a19e7b71b5..e86c913976 100644 --- a/src/spikeinterface/sortingcomponents/matching/circus.py +++ b/src/spikeinterface/sortingcomponents/matching/circus.py @@ -478,6 +478,313 @@ def main_function(cls, traces, d): return spikes +class CircusOMPSVDPeeler(BaseTemplateMatchingEngine): + """ + Orthogonal Matching Pursuit inspired from Spyking Circus sorter + + https://elifesciences.org/articles/34518 + + This is an Orthogonal Template Matching algorithm. For speed and + memory optimization, templates are automatically sparsified. Signal + is convolved with the templates, and as long as some scalar products + are higher than a given threshold, we use a Cholesky decomposition + to compute the optimal amplitudes needed to reconstruct the signal. + + IMPORTANT NOTE: small chunks are more efficient for such Peeler, + consider using 100ms chunk + + Parameters + ---------- + amplitude: tuple + (Minimal, Maximal) amplitudes allowed for every template + omp_min_sps: float + Stopping criteria of the OMP algorithm, in percentage of the norm + noise_levels: array + The noise levels, for every channels. If None, they will be automatically + computed + random_chunk_kwargs: dict + Parameters for computing noise levels, if not provided (sub optimal) + sparse_kwargs: dict + Parameters to extract a sparsity mask from the waveform_extractor, if not + already sparse. + ----- + """ + + _default_params = { + "amplitudes": [0.6, 2], + "omp_min_sps": 0.1, + "waveform_extractor": None, + "templates": None, + "overlaps": None, + "norms": None, + "random_chunk_kwargs": {}, + "noise_levels": None, + "rank" : 3, + "sparse_kwargs": {"method": "ptp", "threshold": 1}, + "ignored_ids": [], + "vicinity": 0, + } + + @classmethod + def _prepare_templates(cls, d): + waveform_extractor = d["waveform_extractor"] + num_templates = len(d["waveform_extractor"].sorting.unit_ids) + + if not waveform_extractor.is_sparse(): + sparsity = compute_sparsity(waveform_extractor, **d["sparse_kwargs"]).mask + else: + sparsity = waveform_extractor.sparsity.mask + + templates = waveform_extractor.get_all_templates(mode="median").copy() + + temporal, singular, spatial = np.linalg.svd(templates, full_matrices=False) + + # Keep only the strongest components + rank = d['rank'] + d['templates'] = {} + d["norms"] = np.zeros(num_templates, dtype=np.float32) + d['sparsities'] = {} + d["norms"] = np.linalg.norm(templates, axis=(1, 2)) + for i in range(num_templates): + d['sparsities'][i] = np.arange(templates.shape[2]) + d['templates'][i] = templates[i] / d["norms"][i] + + temporal = temporal[:, :, :rank] + d["temporal"] = np.flip(temporal, axis=1) + d["singular"] = singular[:, :rank] + d["spatial"] = spatial[:, :rank, :] + + d['temporal'] /= d['norms'][:, np.newaxis, np.newaxis] + + d["spatial"] = np.moveaxis(d['spatial'][:, :rank, :], [0, 1, 2], [1, 0, 2]) + d['temporal'] = np.moveaxis(d['temporal'][:, :, :rank], [0, 1, 2], [1, 2, 0]) + d['singular'] = d['singular'].T[:, :, np.newaxis] + return d + + @classmethod + def initialize_and_check_kwargs(cls, recording, kwargs): + d = cls._default_params.copy() + d.update(kwargs) + + # assert isinstance(d['waveform_extractor'], WaveformExtractor) + + for v in ["omp_min_sps"]: + assert (d[v] >= 0) and (d[v] <= 1), f"{v} should be in [0, 1]" + + d["num_channels"] = d["waveform_extractor"].recording.get_num_channels() + d["num_samples"] = d["waveform_extractor"].nsamples + d["nbefore"] = d["waveform_extractor"].nbefore + d["nafter"] = d["waveform_extractor"].nafter + d["sampling_frequency"] = d["waveform_extractor"].recording.get_sampling_frequency() + d["vicinity"] *= d["num_samples"] + + if d["noise_levels"] is None: + print("CircusOMPPeeler : noise should be computed outside") + d["noise_levels"] = get_noise_levels(recording, **d["random_chunk_kwargs"], return_scaled=False) + + if d["templates"] is None: + d = cls._prepare_templates(d) + else: + for key in ["norms", "sparsities"]: + assert d[key] is not None, "If templates are provided, %d should also be there" % key + + d["num_templates"] = len(d["templates"]) + + if d["overlaps"] is None: + d["overlaps"] = compute_overlaps(d["templates"], d["num_samples"], d["num_channels"], d["sparsities"]) + + d["ignored_ids"] = np.array(d["ignored_ids"]) + + omp_min_sps = d["omp_min_sps"] + # nb_active_channels = np.array([len(d['sparsities'][count]) for count in range(d['num_templates'])]) + d["stop_criteria"] = omp_min_sps * np.sqrt(d["noise_levels"].sum() * d["num_samples"]) + + return d + + @classmethod + def serialize_method_kwargs(cls, kwargs): + kwargs = dict(kwargs) + # remove waveform_extractor + kwargs.pop("waveform_extractor") + return kwargs + + @classmethod + def unserialize_in_worker(cls, kwargs): + return kwargs + + @classmethod + def get_margin(cls, recording, kwargs): + margin = 2 * max(kwargs["nbefore"], kwargs["nafter"]) + return margin + + @classmethod + def main_function(cls, traces, d): + templates = d["templates"] + num_templates = d["num_templates"] + num_channels = d["num_channels"] + num_samples = d["num_samples"] + overlaps = d["overlaps"] + norms = d["norms"] + nbefore = d["nbefore"] + nafter = d["nafter"] + omp_tol = np.finfo(np.float32).eps + num_samples = d["nafter"] + d["nbefore"] + neighbor_window = num_samples - 1 + min_amplitude, max_amplitude = d["amplitudes"] + sparsities = d["sparsities"] + ignored_ids = d["ignored_ids"] + stop_criteria = d["stop_criteria"] + vicinity = d["vicinity"] + rank = d['rank'] + + num_timesteps = len(traces) + + num_peaks = num_timesteps - num_samples + 1 + conv_shape = (num_templates, num_peaks) + scalar_products = np.zeros(conv_shape, dtype=np.float32) + + # Filter using overlap-and-add convolution + spatially_filtered_data = np.matmul(d['spatial'], traces.T[np.newaxis, :, :]) + scaled_filtered_data = spatially_filtered_data * d['singular'] + objective_by_rank = scipy.signal.oaconvolve(scaled_filtered_data, d['temporal'], axes=2, mode="valid") + scalar_products += np.sum(objective_by_rank, axis=0) + + if len(ignored_ids) > 0: + scalar_products[ignored_ids] = -np.inf + + num_spikes = 0 + + spikes = np.empty(scalar_products.size, dtype=spike_dtype) + idx_lookup = np.arange(scalar_products.size).reshape(num_templates, -1) + + M = np.zeros((100, 100), dtype=np.float32) + + all_selections = np.empty((2, scalar_products.size), dtype=np.int32) + final_amplitudes = np.zeros(scalar_products.shape, dtype=np.float32) + num_selection = 0 + + full_sps = scalar_products.copy() + + neighbors = {} + cached_overlaps = {} + + is_valid = scalar_products > stop_criteria + all_amplitudes = np.zeros(0, dtype=np.float32) + is_in_vicinity = np.zeros(0, dtype=np.int32) + + while np.any(is_valid): + best_amplitude_ind = scalar_products[is_valid].argmax() + best_cluster_ind, peak_index = np.unravel_index(idx_lookup[is_valid][best_amplitude_ind], idx_lookup.shape) + + if num_selection > 0: + delta_t = selection[1] - peak_index + idx = np.where((delta_t < neighbor_window) & (delta_t > -num_samples))[0] + myline = num_samples + delta_t[idx] + + if not best_cluster_ind in cached_overlaps: + cached_overlaps[best_cluster_ind] = overlaps[best_cluster_ind].toarray() + + if num_selection == M.shape[0]: + Z = np.zeros((2 * num_selection, 2 * num_selection), dtype=np.float32) + Z[:num_selection, :num_selection] = M + M = Z + + M[num_selection, idx] = cached_overlaps[best_cluster_ind][selection[0, idx], myline] + + if vicinity == 0: + scipy.linalg.solve_triangular( + M[:num_selection, :num_selection], + M[num_selection, :num_selection], + trans=0, + lower=1, + overwrite_b=True, + check_finite=False, + ) + + v = nrm2(M[num_selection, :num_selection]) ** 2 + Lkk = 1 - v + if Lkk <= omp_tol: # selected atoms are dependent + break + M[num_selection, num_selection] = np.sqrt(Lkk) + else: + is_in_vicinity = np.where(np.abs(delta_t) < vicinity)[0] + + if len(is_in_vicinity) > 0: + L = M[is_in_vicinity, :][:, is_in_vicinity] + + M[num_selection, is_in_vicinity] = scipy.linalg.solve_triangular( + L, M[num_selection, is_in_vicinity], trans=0, lower=1, overwrite_b=True, check_finite=False + ) + + v = nrm2(M[num_selection, is_in_vicinity]) ** 2 + Lkk = 1 - v + if Lkk <= omp_tol: # selected atoms are dependent + break + M[num_selection, num_selection] = np.sqrt(Lkk) + else: + M[num_selection, num_selection] = 1.0 + else: + M[0, 0] = 1 + + all_selections[:, num_selection] = [best_cluster_ind, peak_index] + num_selection += 1 + + selection = all_selections[:, :num_selection] + res_sps = full_sps[selection[0], selection[1]] + + if True: # vicinity == 0: + all_amplitudes, _ = potrs(M[:num_selection, :num_selection], res_sps, lower=True, overwrite_b=False) + all_amplitudes /= norms[selection[0]] + else: + # This is not working, need to figure out why + is_in_vicinity = np.append(is_in_vicinity, num_selection - 1) + all_amplitudes = np.append(all_amplitudes, np.float32(1)) + L = M[is_in_vicinity, :][:, is_in_vicinity] + all_amplitudes[is_in_vicinity], _ = potrs(L, res_sps[is_in_vicinity], lower=True, overwrite_b=False) + all_amplitudes[is_in_vicinity] /= norms[selection[0][is_in_vicinity]] + + diff_amplitudes = all_amplitudes - final_amplitudes[selection[0], selection[1]] + modified = np.where(np.abs(diff_amplitudes) > omp_tol)[0] + final_amplitudes[selection[0], selection[1]] = all_amplitudes + + for i in modified: + tmp_best, tmp_peak = selection[:, i] + diff_amp = diff_amplitudes[i] * norms[tmp_best] + + if not tmp_best in cached_overlaps: + cached_overlaps[tmp_best] = overlaps[tmp_best].toarray() + + if not tmp_peak in neighbors.keys(): + idx = [max(0, tmp_peak - num_samples), min(num_peaks, tmp_peak + neighbor_window)] + tdx = [num_samples + idx[0] - tmp_peak, num_samples + idx[1] - tmp_peak] + neighbors[tmp_peak] = {"idx": idx, "tdx": tdx} + + idx = neighbors[tmp_peak]["idx"] + tdx = neighbors[tmp_peak]["tdx"] + + to_add = diff_amp * cached_overlaps[tmp_best][:, tdx[0] : tdx[1]] + scalar_products[:, idx[0] : idx[1]] -= to_add + + is_valid = scalar_products > stop_criteria + + is_valid = (final_amplitudes > min_amplitude) * (final_amplitudes < max_amplitude) + valid_indices = np.where(is_valid) + + num_spikes = len(valid_indices[0]) + spikes["sample_index"][:num_spikes] = valid_indices[1] + d["nbefore"] + spikes["channel_index"][:num_spikes] = 0 + spikes["cluster_index"][:num_spikes] = valid_indices[0] + spikes["amplitude"][:num_spikes] = final_amplitudes[valid_indices[0], valid_indices[1]] + + spikes = spikes[:num_spikes] + order = np.argsort(spikes["sample_index"]) + spikes = spikes[order] + + return spikes + + + + class CircusPeeler(BaseTemplateMatchingEngine): """ diff --git a/src/spikeinterface/sortingcomponents/matching/method_list.py b/src/spikeinterface/sortingcomponents/matching/method_list.py index bedc04a9d5..46c4a53872 100644 --- a/src/spikeinterface/sortingcomponents/matching/method_list.py +++ b/src/spikeinterface/sortingcomponents/matching/method_list.py @@ -1,6 +1,6 @@ from .naive import NaiveMatching from .tdc import TridesclousPeeler -from .circus import CircusPeeler, CircusOMPPeeler +from .circus import CircusPeeler, CircusOMPPeeler, CircusOMPSVDPeeler from .wobble import WobbleMatch matching_methods = { @@ -8,5 +8,6 @@ "tridesclous": TridesclousPeeler, "circus": CircusPeeler, "circus-omp": CircusOMPPeeler, + 'circus-omp-svd' : CircusOMPSVDPeeler, "wobble": WobbleMatch, } From 2602ebc5d830ba5945d1d4245c9ffd6020e0c88f Mon Sep 17 00:00:00 2001 From: weiglszonja Date: Tue, 26 Sep 2023 10:11:26 +0200 Subject: [PATCH 067/115] Add ignore_timestamps_errors to extractor --- .../extractors/neoextractors/openephys.py | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/src/spikeinterface/extractors/neoextractors/openephys.py b/src/spikeinterface/extractors/neoextractors/openephys.py index a771dc47b1..0d9a3887f8 100644 --- a/src/spikeinterface/extractors/neoextractors/openephys.py +++ b/src/spikeinterface/extractors/neoextractors/openephys.py @@ -45,14 +45,24 @@ class OpenEphysLegacyRecordingExtractor(NeoBaseRecordingExtractor): If there are several blocks (experiments), specify the block index you want to load. all_annotations: bool (default False) Load exhaustively all annotation from neo. + ignore_timestamps_errors: bool (default False) + Ignore the discontinuous timestamps errors in neo. """ mode = "folder" NeoRawIOClass = "OpenEphysRawIO" name = "openephyslegacy" - def __init__(self, folder_path, stream_id=None, stream_name=None, block_index=None, all_annotations=False): - neo_kwargs = self.map_to_neo_kwargs(folder_path) + def __init__( + self, + folder_path, + stream_id=None, + stream_name=None, + block_index=None, + all_annotations=False, + ignore_timestamps_errors=False, + ): + neo_kwargs = self.map_to_neo_kwargs(folder_path, ignore_timestamps_errors) NeoBaseRecordingExtractor.__init__( self, stream_id=stream_id, @@ -64,8 +74,8 @@ def __init__(self, folder_path, stream_id=None, stream_name=None, block_index=No self._kwargs.update(dict(folder_path=str(Path(folder_path).absolute()))) @classmethod - def map_to_neo_kwargs(cls, folder_path): - neo_kwargs = {"dirname": str(folder_path)} + def map_to_neo_kwargs(cls, folder_path, ignore_timestamps_errors=False): + neo_kwargs = {"dirname": str(folder_path), "ignore_timestamps_errors": ignore_timestamps_errors} return neo_kwargs From cc4720460127960d5d8cf16248690b3323c6c4a9 Mon Sep 17 00:00:00 2001 From: Pierre Yger Date: Tue, 26 Sep 2023 10:49:57 +0200 Subject: [PATCH 068/115] Increase default rank --- .../sortingcomponents/matching/circus.py | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/src/spikeinterface/sortingcomponents/matching/circus.py b/src/spikeinterface/sortingcomponents/matching/circus.py index e86c913976..bc378fb9a2 100644 --- a/src/spikeinterface/sortingcomponents/matching/circus.py +++ b/src/spikeinterface/sortingcomponents/matching/circus.py @@ -519,7 +519,7 @@ class CircusOMPSVDPeeler(BaseTemplateMatchingEngine): "norms": None, "random_chunk_kwargs": {}, "noise_levels": None, - "rank" : 3, + "rank" : 10, "sparse_kwargs": {"method": "ptp", "threshold": 1}, "ignored_ids": [], "vicinity": 0, @@ -537,17 +537,20 @@ def _prepare_templates(cls, d): templates = waveform_extractor.get_all_templates(mode="median").copy() - temporal, singular, spatial = np.linalg.svd(templates, full_matrices=False) - # Keep only the strongest components rank = d['rank'] d['templates'] = {} d["norms"] = np.zeros(num_templates, dtype=np.float32) d['sparsities'] = {} - d["norms"] = np.linalg.norm(templates, axis=(1, 2)) - for i in range(num_templates): - d['sparsities'][i] = np.arange(templates.shape[2]) - d['templates'][i] = templates[i] / d["norms"][i] + + for count in range(num_templates): + template = templates[count][:, sparsity[count]] + (d["sparsities"][count],) = np.nonzero(sparsity[count]) + d["norms"][count] = np.linalg.norm(template) + templates[count][:, ~sparsity[count]] = 0 + d["templates"][count] = template / d["norms"][count] + + temporal, singular, spatial = np.linalg.svd(templates, full_matrices=False) temporal = temporal[:, :, :rank] d["temporal"] = np.flip(temporal, axis=1) @@ -631,7 +634,6 @@ def main_function(cls, traces, d): num_samples = d["nafter"] + d["nbefore"] neighbor_window = num_samples - 1 min_amplitude, max_amplitude = d["amplitudes"] - sparsities = d["sparsities"] ignored_ids = d["ignored_ids"] stop_criteria = d["stop_criteria"] vicinity = d["vicinity"] From 10c33c1c8645aa7e144bdb8efbc06b993c79c4b0 Mon Sep 17 00:00:00 2001 From: Pierre Yger Date: Tue, 26 Sep 2023 12:01:10 +0200 Subject: [PATCH 069/115] To be tried --- src/spikeinterface/sortingcomponents/matching/circus.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/spikeinterface/sortingcomponents/matching/circus.py b/src/spikeinterface/sortingcomponents/matching/circus.py index bc378fb9a2..8c002a5cc7 100644 --- a/src/spikeinterface/sortingcomponents/matching/circus.py +++ b/src/spikeinterface/sortingcomponents/matching/circus.py @@ -601,6 +601,7 @@ def initialize_and_check_kwargs(cls, recording, kwargs): omp_min_sps = d["omp_min_sps"] # nb_active_channels = np.array([len(d['sparsities'][count]) for count in range(d['num_templates'])]) d["stop_criteria"] = omp_min_sps * np.sqrt(d["noise_levels"].sum() * d["num_samples"]) + #d['stop_criteria'] = omp_min_sps * np.maximum(d['norms'], np.sqrt(d["noise_levels"].sum() * d["num_samples"])) return d @@ -635,7 +636,7 @@ def main_function(cls, traces, d): neighbor_window = num_samples - 1 min_amplitude, max_amplitude = d["amplitudes"] ignored_ids = d["ignored_ids"] - stop_criteria = d["stop_criteria"] + stop_criteria = d["stop_criteria"]#[:, np.newaxis] vicinity = d["vicinity"] rank = d['rank'] From 8ea82ee0a43f04c8a51017651710e19eb9a156db Mon Sep 17 00:00:00 2001 From: weiglszonja Date: Tue, 26 Sep 2023 13:17:39 +0200 Subject: [PATCH 070/115] check neo version and pop ignore_timestamps_errors for version 0.12.0 and older --- .../extractors/neoextractors/openephys.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/src/spikeinterface/extractors/neoextractors/openephys.py b/src/spikeinterface/extractors/neoextractors/openephys.py index 0d9a3887f8..cd2b6fb941 100644 --- a/src/spikeinterface/extractors/neoextractors/openephys.py +++ b/src/spikeinterface/extractors/neoextractors/openephys.py @@ -22,6 +22,19 @@ from spikeinterface.extractors.neuropixels_utils import get_neuropixels_sample_shifts +def drop_invalid_neo_arguments_for_version_0_12_0(neo_kwargs): + # Temporary function until neo version 0.13.0 is released + from packaging.version import Version + from importlib.metadata import version as lib_version + + neo_version = lib_version("neo") + # The possibility of ignoring timestamps errors is not present in neo <= 0.12.0 + if Version(neo_version) <= Version("0.12.0"): + neo_kwargs.pop("ignore_timestamps_errors") + + return neo_kwargs + + class OpenEphysLegacyRecordingExtractor(NeoBaseRecordingExtractor): """ Class for reading data saved by the Open Ephys GUI. @@ -76,6 +89,7 @@ def __init__( @classmethod def map_to_neo_kwargs(cls, folder_path, ignore_timestamps_errors=False): neo_kwargs = {"dirname": str(folder_path), "ignore_timestamps_errors": ignore_timestamps_errors} + neo_kwargs = drop_invalid_neo_arguments_for_version_0_12_0(neo_kwargs) return neo_kwargs From 5029445580bc6274ee8845636dd8d09b07e85826 Mon Sep 17 00:00:00 2001 From: Garcia Samuel Date: Tue, 26 Sep 2023 13:25:48 +0200 Subject: [PATCH 071/115] Apply suggestions from code review thanks alessio Co-authored-by: Alessio Buccino --- doc/modules/comparison.rst | 1 - .../comparison/groundtruthstudy.py | 45 +++++++++---------- .../comparison/tests/test_groundtruthstudy.py | 1 - 3 files changed, 20 insertions(+), 27 deletions(-) diff --git a/doc/modules/comparison.rst b/doc/modules/comparison.rst index 9b2e701dac..57e9a0b5ba 100644 --- a/doc/modules/comparison.rst +++ b/doc/modules/comparison.rst @@ -293,7 +293,6 @@ The all mechanism is based on an intrinsic organization into a "study_folder" wi "sorter_name": "tridesclous2", }, }, - # ("tdc2", "toy1"): { "label": "tridesclous2 on tetrode1", "dataset": "toy1", diff --git a/src/spikeinterface/comparison/groundtruthstudy.py b/src/spikeinterface/comparison/groundtruthstudy.py index 6dc9cb30f0..2d4486bbe4 100644 --- a/src/spikeinterface/comparison/groundtruthstudy.py +++ b/src/spikeinterface/comparison/groundtruthstudy.py @@ -28,24 +28,23 @@ class GroundTruthStudy: """ - This class is an helper function to run any comparison on several "cases" for several ground truth dataset. + This class is an helper function to run any comparison on several "cases" for many ground-truth dataset. - "cases" can be: - * several sorter for comparisons + "cases" refer to: + * several sorters for comparisons * same sorter with differents parameters * parameters of comparisons - * any combination of theses + * any combination of these (and more) - For enough flexibility cases key can be a tuple so that we can varify complexity along several - "levels" or "axis" (paremeters or sorter). - - Generated dataframes will have index with several levels optionaly. + For increased flexibility, cases keys can be a tuple so that we can vary complexity along several + "levels" or "axis" (paremeters or sorters). + In this case, the result dataframes will have `MultiIndex` to handle the different levels. - Ground truth dataset need recording+sorting. This can be from mearec file or from the internal generator - :py:fun:`generate_ground_truth_recording()` + A ground-truth dataset is made of a `Recording` and a `Sorting` object. For example, it can be a simulated dataset with MEArec or internally generated (see + :py:fun:`~spikeinterface.core.generate.generate_ground_truth_recording()`). This GroundTruthStudy have been refactor in version 0.100 to be more flexible than previous versions. - Folders structures are not backward compatible at all. + Note that the underlying folder structure is not backward compatible! """ def __init__(self, study_folder): self.folder = Path(study_folder) @@ -85,21 +84,21 @@ def create(cls, study_folder, datasets={}, cases={}, levels=None): study_folder.mkdir(exist_ok=False, parents=True) (study_folder / "datasets").mkdir() - (study_folder / "datasets/recordings").mkdir() - (study_folder / "datasets/gt_sortings").mkdir() + (study_folder / "datasets" / "recordings").mkdir() + (study_folder / "datasets" / "gt_sortings").mkdir() (study_folder / "sorters").mkdir() (study_folder / "sortings").mkdir() (study_folder / "sortings" / "run_logs").mkdir() (study_folder / "metrics").mkdir() for key, (rec, gt_sorting) in datasets.items(): - assert "/" not in key - assert "\\" not in key + assert "/" not in key, "'/' cannot be in the key name!" + assert "\\" not in key, "'\\' cannot be in the key name!" - # rec are pickle + # recordings are pickled rec.dump_to_pickle(study_folder / f"datasets/recordings/{key}.pickle") - # sorting are pickle + saved as NumpyFolderSorting + # sortings are pickled + saved as NumpyFolderSorting gt_sorting.dump_to_pickle(study_folder / f"datasets/gt_sortings/{key}.pickle") gt_sorting.save(format="numpy_folder", folder=study_folder / f"datasets/gt_sortings/{key}") @@ -108,11 +107,7 @@ def create(cls, study_folder, datasets={}, cases={}, levels=None): info["levels"] = levels (study_folder / "info.json").write_text(json.dumps(info, indent=4), encoding="utf8") - # (study_folder / "cases.jon").write_text( - # json.dumps(cases, indent=4, cls=SIJsonEncoder), - # encoding="utf8", - # ) - # cases is dump to a pickle file, json is not possible because of tuple key + # cases is dumped to a pickle file, json is not possible because of the tuple key (study_folder / "cases.pickle").write_bytes(pickle.dumps(cases)) return cls(study_folder) @@ -127,10 +122,10 @@ def scan_folder(self): self.levels = self.info["levels"] - for rec_file in (self.folder / "datasets/recordings").glob("*.pickle"): + for rec_file in (self.folder / "datasets" / "recordings").glob("*.pickle"): key = rec_file.stem rec = load_extractor(rec_file) - gt_sorting = load_extractor(self.folder / f"datasets/gt_sortings/{key}") + gt_sorting = load_extractor(self.folder / f"datasets" / "gt_sortings" / key) self.datasets[key] = (rec, gt_sorting) with open(self.folder / "cases.pickle", "rb") as f: @@ -304,7 +299,7 @@ def compute_metrics(self, case_keys=None, metric_names=["snr", "firing_rate"], f case_keys = self.cases.keys() for key in case_keys: - filename = self.folder / "metrics" / f"{self.key_to_str(key)}.txt" + filename = self.folder / "metrics" / f"{self.key_to_str(key)}.csv" if filename.exists(): if force: os.remove(filename) diff --git a/src/spikeinterface/comparison/tests/test_groundtruthstudy.py b/src/spikeinterface/comparison/tests/test_groundtruthstudy.py index a75ac272be..12d764950e 100644 --- a/src/spikeinterface/comparison/tests/test_groundtruthstudy.py +++ b/src/spikeinterface/comparison/tests/test_groundtruthstudy.py @@ -16,7 +16,6 @@ study_folder = cache_folder / "test_groundtruthstudy/" -print(study_folder.absolute()) def setup_module(): if study_folder.is_dir(): From a970899c2e5162e842be6b0237a4338063508513 Mon Sep 17 00:00:00 2001 From: Zach McKenzie <92116279+zm711@users.noreply.github.com> Date: Tue, 26 Sep 2023 08:34:03 -0400 Subject: [PATCH 072/115] handle case of if-else copy_binary Co-authored-by: Alessio Buccino --- src/spikeinterface/exporters/to_phy.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/spikeinterface/exporters/to_phy.py b/src/spikeinterface/exporters/to_phy.py index 7de1a128e5..4af6f73b25 100644 --- a/src/spikeinterface/exporters/to_phy.py +++ b/src/spikeinterface/exporters/to_phy.py @@ -153,7 +153,10 @@ def export_to_phy( # write params.py with (output_folder / "params.py").open("w") as f: if use_relative_path: - f.write(f"dat_path = r'recording.dat'\n") + if copy_binary: + f.write(f"dat_path = r'recording.dat'\n") + else: + f.write(f"dat_path = r'{str(Path(rec_path).relative_to(output_folder))}'\n") else: f.write(f"dat_path = r'{str(rec_path)}'\n") f.write(f"n_channels_dat = {num_chans}\n") From b2a9b70abeb1fccbfa73e51f604253c0f02c81c0 Mon Sep 17 00:00:00 2001 From: Pierre Yger Date: Tue, 26 Sep 2023 16:33:17 +0200 Subject: [PATCH 073/115] WIP --- src/spikeinterface/sortingcomponents/matching/circus.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/src/spikeinterface/sortingcomponents/matching/circus.py b/src/spikeinterface/sortingcomponents/matching/circus.py index 8c002a5cc7..482d36956f 100644 --- a/src/spikeinterface/sortingcomponents/matching/circus.py +++ b/src/spikeinterface/sortingcomponents/matching/circus.py @@ -519,7 +519,7 @@ class CircusOMPSVDPeeler(BaseTemplateMatchingEngine): "norms": None, "random_chunk_kwargs": {}, "noise_levels": None, - "rank" : 10, + "rank" : 5, "sparse_kwargs": {"method": "ptp", "threshold": 1}, "ignored_ids": [], "vicinity": 0, @@ -599,9 +599,8 @@ def initialize_and_check_kwargs(cls, recording, kwargs): d["ignored_ids"] = np.array(d["ignored_ids"]) omp_min_sps = d["omp_min_sps"] - # nb_active_channels = np.array([len(d['sparsities'][count]) for count in range(d['num_templates'])]) - d["stop_criteria"] = omp_min_sps * np.sqrt(d["noise_levels"].sum() * d["num_samples"]) - #d['stop_criteria'] = omp_min_sps * np.maximum(d['norms'], np.sqrt(d["noise_levels"].sum() * d["num_samples"])) + #d["stop_criteria"] = omp_min_sps * np.sqrt(d["noise_levels"].sum() * d["num_samples"]) + d['stop_criteria'] = omp_min_sps * np.maximum(d['norms'], np.sqrt(d["noise_levels"].sum() * d["num_samples"])) return d @@ -636,7 +635,7 @@ def main_function(cls, traces, d): neighbor_window = num_samples - 1 min_amplitude, max_amplitude = d["amplitudes"] ignored_ids = d["ignored_ids"] - stop_criteria = d["stop_criteria"]#[:, np.newaxis] + stop_criteria = d["stop_criteria"][:, np.newaxis] vicinity = d["vicinity"] rank = d['rank'] From 3c94594fdd5ee6a58c2635a2f9a8dba9c8ce500d Mon Sep 17 00:00:00 2001 From: Pierre Yger Date: Tue, 26 Sep 2023 17:01:51 +0200 Subject: [PATCH 074/115] Working with circus2 --- .../sorters/internal/spyking_circus2.py | 2 +- .../clustering/clustering_tools.py | 7 ++-- .../sortingcomponents/matching/circus.py | 37 ++++++++++--------- 3 files changed, 25 insertions(+), 21 deletions(-) diff --git a/src/spikeinterface/sorters/internal/spyking_circus2.py b/src/spikeinterface/sorters/internal/spyking_circus2.py index db3d88f116..7097b9e56b 100644 --- a/src/spikeinterface/sorters/internal/spyking_circus2.py +++ b/src/spikeinterface/sorters/internal/spyking_circus2.py @@ -152,7 +152,7 @@ def _run_from_folder(cls, sorter_output_folder, params, verbose): matching_job_params["chunk_duration"] = "100ms" spikes = find_spikes_from_templates( - recording_f, method="circus-omp", method_kwargs=matching_params, **matching_job_params + recording_f, method="circus-omp-svd", method_kwargs=matching_params, **matching_job_params ) if verbose: diff --git a/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py b/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py index b87bbc7cee..99836fa293 100644 --- a/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py +++ b/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py @@ -602,8 +602,6 @@ def remove_duplicates_via_matching( "noise_levels": noise_levels, "amplitudes": [0.95, 1.05], "omp_min_sps": 0.1, - "templates": None, - "overlaps": None, } ) @@ -618,7 +616,7 @@ def remove_duplicates_via_matching( method_kwargs.update({"ignored_ids": ignore_ids + [i]}) spikes, computed = find_spikes_from_templates( - sub_recording, method="circus-omp", method_kwargs=method_kwargs, extra_outputs=True, **job_kwargs + sub_recording, method="circus-omp-svd", method_kwargs=method_kwargs, extra_outputs=True, **job_kwargs ) method_kwargs.update( { @@ -626,6 +624,9 @@ def remove_duplicates_via_matching( "templates": computed["templates"], "norms": computed["norms"], "sparsities": computed["sparsities"], + "temporal" : computed["temporal"], + "spatial" : computed["spatial"], + "singular" : computed["singular"], } ) valid = (spikes["sample_index"] >= half_marging) * (spikes["sample_index"] < duration + half_marging) diff --git a/src/spikeinterface/sortingcomponents/matching/circus.py b/src/spikeinterface/sortingcomponents/matching/circus.py index 482d36956f..e955687ed7 100644 --- a/src/spikeinterface/sortingcomponents/matching/circus.py +++ b/src/spikeinterface/sortingcomponents/matching/circus.py @@ -514,9 +514,6 @@ class CircusOMPSVDPeeler(BaseTemplateMatchingEngine): "amplitudes": [0.6, 2], "omp_min_sps": 0.1, "waveform_extractor": None, - "templates": None, - "overlaps": None, - "norms": None, "random_chunk_kwargs": {}, "noise_levels": None, "rank" : 5, @@ -537,28 +534,34 @@ def _prepare_templates(cls, d): templates = waveform_extractor.get_all_templates(mode="median").copy() - # Keep only the strongest components - rank = d['rank'] - d['templates'] = {} - d["norms"] = np.zeros(num_templates, dtype=np.float32) + #First, we set masked channels to 0 d['sparsities'] = {} - for count in range(num_templates): template = templates[count][:, sparsity[count]] (d["sparsities"][count],) = np.nonzero(sparsity[count]) - d["norms"][count] = np.linalg.norm(template) templates[count][:, ~sparsity[count]] = 0 - d["templates"][count] = template / d["norms"][count] + # Then we keep only the strongest components + rank = d['rank'] temporal, singular, spatial = np.linalg.svd(templates, full_matrices=False) - - temporal = temporal[:, :, :rank] - d["temporal"] = np.flip(temporal, axis=1) + d["temporal"] = temporal[:, :, :rank] d["singular"] = singular[:, :rank] d["spatial"] = spatial[:, :rank, :] - d['temporal'] /= d['norms'][:, np.newaxis, np.newaxis] + # We reconstruct the approximated templates + templates = np.matmul(d["temporal"] * d["singular"][:, np.newaxis, :], d["spatial"]) + + d["temporal"] = np.flip(temporal, axis=1) + d['templates'] = {} + d["norms"] = np.zeros(num_templates, dtype=np.float32) + + # And get the norms, saving compressed templates for CC matrix + for count in range(num_templates): + template = templates[count][:, sparsity[count]] + d["norms"][count] = np.linalg.norm(template) + d["templates"][count] = template / d["norms"][count] + d['temporal'] /= d['norms'][:, np.newaxis, np.newaxis] d["spatial"] = np.moveaxis(d['spatial'][:, :rank, :], [0, 1, 2], [1, 0, 2]) d['temporal'] = np.moveaxis(d['temporal'][:, :, :rank], [0, 1, 2], [1, 2, 0]) d['singular'] = d['singular'].T[:, :, np.newaxis] @@ -585,15 +588,15 @@ def initialize_and_check_kwargs(cls, recording, kwargs): print("CircusOMPPeeler : noise should be computed outside") d["noise_levels"] = get_noise_levels(recording, **d["random_chunk_kwargs"], return_scaled=False) - if d["templates"] is None: + if "templates" not in d: d = cls._prepare_templates(d) else: - for key in ["norms", "sparsities"]: + for key in ["norms", "sparsities", 'temporal', 'spatial', 'singular']: assert d[key] is not None, "If templates are provided, %d should also be there" % key d["num_templates"] = len(d["templates"]) - if d["overlaps"] is None: + if "overlaps" not in d: d["overlaps"] = compute_overlaps(d["templates"], d["num_samples"], d["num_channels"], d["sparsities"]) d["ignored_ids"] = np.array(d["ignored_ids"]) From ad78ef269136a0d4bec37236a79c30f15862581f Mon Sep 17 00:00:00 2001 From: Zach McKenzie <92116279+zm711@users.noreply.github.com> Date: Tue, 26 Sep 2023 12:10:06 -0400 Subject: [PATCH 075/115] improve docstring-feedback --- src/spikeinterface/exporters/to_phy.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/spikeinterface/exporters/to_phy.py b/src/spikeinterface/exporters/to_phy.py index 4af6f73b25..edfca0fa52 100644 --- a/src/spikeinterface/exporters/to_phy.py +++ b/src/spikeinterface/exporters/to_phy.py @@ -66,7 +66,8 @@ def export_to_phy( verbose: bool If True, output is verbose use_relative_path : bool, default: False - If True saves the `dat_path` as a relative path, else an absolute + If True and `copy_binary=True` saves the binary file `dat_path` in the `params.py` relative to `output_folder` (ie `dat_path=r'recording.dat'`). If `copy_binary=False`, then uses a path relative to the `output_folder` + If False, uses an absolute path in the `params.py` (ie `dat_path=r'path/to/the/recording.dat'`) {} """ From 32d3d7a6aebdaed8757fe6ca994c537e6034927c Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Tue, 26 Sep 2023 20:52:40 +0200 Subject: [PATCH 076/115] extract_waveforms_gt must be done on dataset key instead of case key. --- .../comparison/groundtruthstudy.py | 29 ++++++++++++++----- 1 file changed, 21 insertions(+), 8 deletions(-) diff --git a/src/spikeinterface/comparison/groundtruthstudy.py b/src/spikeinterface/comparison/groundtruthstudy.py index 2d4486bbe4..8a294a88af 100644 --- a/src/spikeinterface/comparison/groundtruthstudy.py +++ b/src/spikeinterface/comparison/groundtruthstudy.py @@ -267,24 +267,29 @@ def get_run_times(self, case_keys=None): return pd.Series(run_times, name="run_time") def extract_waveforms_gt(self, case_keys=None, **extract_kwargs): - + if case_keys is None: case_keys = self.cases.keys() base_folder = self.folder / "waveforms" base_folder.mkdir(exist_ok=True) - for key in case_keys: - dataset_key = self.cases[key]["dataset"] + dataset_keys = [self.cases[key]["dataset"] for key in case_keys] + dataset_keys = set(dataset_keys) + for dataset_key in dataset_keys: + # the waveforms depend on the dataset key + wf_folder = base_folder / self.key_to_str(dataset_key) recording, gt_sorting = self.datasets[dataset_key] - wf_folder = base_folder / self.key_to_str(key) we = extract_waveforms(recording, gt_sorting, folder=wf_folder) def get_waveform_extractor(self, key): # some recording are not dumpable to json and the waveforms extactor need it! # so we load it with and put after - we = load_waveforms(self.folder / "waveforms" / self.key_to_str(key), with_recording=False) + # this should be fixed in PR 2027 so remove this after + dataset_key = self.cases[key]["dataset"] + wf_folder = self.folder / "waveforms" / self.key_to_str(dataset_key) + we = load_waveforms(wf_folder, with_recording=False) recording, _ = self.datasets[dataset_key] we.set_recording(recording) return we @@ -298,21 +303,29 @@ def compute_metrics(self, case_keys=None, metric_names=["snr", "firing_rate"], f if case_keys is None: case_keys = self.cases.keys() + done = [] for key in case_keys: - filename = self.folder / "metrics" / f"{self.key_to_str(key)}.csv" + dataset_key = self.cases[key]["dataset"] + if dataset_key in done: + # some case can share the same waveform extractor + continue + done.append(dataset_key) + filename = self.folder / "metrics" / f"{self.key_to_str(dataset_key)}.csv" if filename.exists(): if force: os.remove(filename) else: continue - we = self.get_waveform_extractor(key) metrics = compute_quality_metrics(we, metric_names=metric_names) metrics.to_csv(filename, sep="\t", index=True) def get_metrics(self, key): import pandas as pd - filename = self.folder / "metrics" / f"{self.key_to_str(key)}.txt" + + dataset_key = self.cases[key]["dataset"] + + filename = self.folder / "metrics" / f"{self.key_to_str(dataset_key)}.csv" if not filename.exists(): return metrics = pd.read_csv(filename, sep="\t", index_col=0) From 46149ef0730a8965f2ae612e9672419a18dc674c Mon Sep 17 00:00:00 2001 From: Pierre Yger Date: Tue, 26 Sep 2023 22:29:35 +0200 Subject: [PATCH 077/115] Put OMP with SVD as default --- .../sorters/internal/spyking_circus2.py | 2 +- .../clustering/clustering_tools.py | 2 +- .../sortingcomponents/matching/circus.py | 315 ------------------ .../sortingcomponents/matching/method_list.py | 1 - 4 files changed, 2 insertions(+), 318 deletions(-) diff --git a/src/spikeinterface/sorters/internal/spyking_circus2.py b/src/spikeinterface/sorters/internal/spyking_circus2.py index 7097b9e56b..db3d88f116 100644 --- a/src/spikeinterface/sorters/internal/spyking_circus2.py +++ b/src/spikeinterface/sorters/internal/spyking_circus2.py @@ -152,7 +152,7 @@ def _run_from_folder(cls, sorter_output_folder, params, verbose): matching_job_params["chunk_duration"] = "100ms" spikes = find_spikes_from_templates( - recording_f, method="circus-omp-svd", method_kwargs=matching_params, **matching_job_params + recording_f, method="circus-omp", method_kwargs=matching_params, **matching_job_params ) if verbose: diff --git a/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py b/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py index 99836fa293..7a2af09942 100644 --- a/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py +++ b/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py @@ -616,7 +616,7 @@ def remove_duplicates_via_matching( method_kwargs.update({"ignored_ids": ignore_ids + [i]}) spikes, computed = find_spikes_from_templates( - sub_recording, method="circus-omp-svd", method_kwargs=method_kwargs, extra_outputs=True, **job_kwargs + sub_recording, method="circus-omp", method_kwargs=method_kwargs, extra_outputs=True, **job_kwargs ) method_kwargs.update( { diff --git a/src/spikeinterface/sortingcomponents/matching/circus.py b/src/spikeinterface/sortingcomponents/matching/circus.py index e955687ed7..aeac69fc86 100644 --- a/src/spikeinterface/sortingcomponents/matching/circus.py +++ b/src/spikeinterface/sortingcomponents/matching/circus.py @@ -195,321 +195,6 @@ class CircusOMPPeeler(BaseTemplateMatchingEngine): ----- """ - _default_params = { - "amplitudes": [0.6, 2], - "omp_min_sps": 0.1, - "waveform_extractor": None, - "templates": None, - "overlaps": None, - "norms": None, - "random_chunk_kwargs": {}, - "noise_levels": None, - "sparse_kwargs": {"method": "ptp", "threshold": 1}, - "ignored_ids": [], - "vicinity": 0, - } - - @classmethod - def _prepare_templates(cls, d): - waveform_extractor = d["waveform_extractor"] - num_templates = len(d["waveform_extractor"].sorting.unit_ids) - - if not waveform_extractor.is_sparse(): - sparsity = compute_sparsity(waveform_extractor, **d["sparse_kwargs"]).mask - else: - sparsity = waveform_extractor.sparsity.mask - - templates = waveform_extractor.get_all_templates(mode="median").copy() - - d["sparsities"] = {} - d["templates"] = {} - d["norms"] = np.zeros(num_templates, dtype=np.float32) - - for count, unit_id in enumerate(waveform_extractor.sorting.unit_ids): - template = templates[count][:, sparsity[count]] - (d["sparsities"][count],) = np.nonzero(sparsity[count]) - d["norms"][count] = np.linalg.norm(template) - d["templates"][count] = template / d["norms"][count] - - return d - - @classmethod - def initialize_and_check_kwargs(cls, recording, kwargs): - d = cls._default_params.copy() - d.update(kwargs) - - # assert isinstance(d['waveform_extractor'], WaveformExtractor) - - for v in ["omp_min_sps"]: - assert (d[v] >= 0) and (d[v] <= 1), f"{v} should be in [0, 1]" - - d["num_channels"] = d["waveform_extractor"].recording.get_num_channels() - d["num_samples"] = d["waveform_extractor"].nsamples - d["nbefore"] = d["waveform_extractor"].nbefore - d["nafter"] = d["waveform_extractor"].nafter - d["sampling_frequency"] = d["waveform_extractor"].recording.get_sampling_frequency() - d["vicinity"] *= d["num_samples"] - - if d["noise_levels"] is None: - print("CircusOMPPeeler : noise should be computed outside") - d["noise_levels"] = get_noise_levels(recording, **d["random_chunk_kwargs"], return_scaled=False) - - if d["templates"] is None: - d = cls._prepare_templates(d) - else: - for key in ["norms", "sparsities"]: - assert d[key] is not None, "If templates are provided, %d should also be there" % key - - d["num_templates"] = len(d["templates"]) - - if d["overlaps"] is None: - d["overlaps"] = compute_overlaps(d["templates"], d["num_samples"], d["num_channels"], d["sparsities"]) - - d["ignored_ids"] = np.array(d["ignored_ids"]) - - omp_min_sps = d["omp_min_sps"] - # nb_active_channels = np.array([len(d['sparsities'][count]) for count in range(d['num_templates'])]) - d["stop_criteria"] = omp_min_sps * np.sqrt(d["noise_levels"].sum() * d["num_samples"]) - - return d - - @classmethod - def serialize_method_kwargs(cls, kwargs): - kwargs = dict(kwargs) - # remove waveform_extractor - kwargs.pop("waveform_extractor") - return kwargs - - @classmethod - def unserialize_in_worker(cls, kwargs): - return kwargs - - @classmethod - def get_margin(cls, recording, kwargs): - margin = 2 * max(kwargs["nbefore"], kwargs["nafter"]) - return margin - - @classmethod - def main_function(cls, traces, d): - templates = d["templates"] - num_templates = d["num_templates"] - num_channels = d["num_channels"] - num_samples = d["num_samples"] - overlaps = d["overlaps"] - norms = d["norms"] - nbefore = d["nbefore"] - nafter = d["nafter"] - omp_tol = np.finfo(np.float32).eps - num_samples = d["nafter"] + d["nbefore"] - neighbor_window = num_samples - 1 - min_amplitude, max_amplitude = d["amplitudes"] - sparsities = d["sparsities"] - ignored_ids = d["ignored_ids"] - stop_criteria = d["stop_criteria"] - vicinity = d["vicinity"] - - if "cached_fft_kernels" not in d: - d["cached_fft_kernels"] = {"fshape": 0} - - cached_fft_kernels = d["cached_fft_kernels"] - - num_timesteps = len(traces) - - num_peaks = num_timesteps - num_samples + 1 - - traces = traces.T - - dummy_filter = np.empty((num_channels, num_samples), dtype=np.float32) - dummy_traces = np.empty((num_channels, num_timesteps), dtype=np.float32) - - fshape, axes = get_scipy_shape(dummy_filter, traces, axes=1) - fft_cache = {"full": sp_fft.rfftn(traces, fshape, axes=axes)} - - scalar_products = np.empty((num_templates, num_peaks), dtype=np.float32) - - flagged_chunk = cached_fft_kernels["fshape"] != fshape[0] - - for i in range(num_templates): - if i not in ignored_ids: - if i not in cached_fft_kernels or flagged_chunk: - kernel_filter = np.ascontiguousarray(templates[i][::-1].T) - cached_fft_kernels.update({i: sp_fft.rfftn(kernel_filter, fshape, axes=axes)}) - cached_fft_kernels["fshape"] = fshape[0] - - fft_cache.update({"mask": sparsities[i], "template": cached_fft_kernels[i]}) - - convolution = fftconvolve_with_cache(dummy_filter, dummy_traces, fft_cache, axes=1, mode="valid") - if len(convolution) > 0: - scalar_products[i] = convolution.sum(0) - else: - scalar_products[i] = 0 - - if len(ignored_ids) > 0: - scalar_products[ignored_ids] = -np.inf - - num_spikes = 0 - - spikes = np.empty(scalar_products.size, dtype=spike_dtype) - idx_lookup = np.arange(scalar_products.size).reshape(num_templates, -1) - - M = np.zeros((100, 100), dtype=np.float32) - - all_selections = np.empty((2, scalar_products.size), dtype=np.int32) - final_amplitudes = np.zeros(scalar_products.shape, dtype=np.float32) - num_selection = 0 - - full_sps = scalar_products.copy() - - neighbors = {} - cached_overlaps = {} - - is_valid = scalar_products > stop_criteria - all_amplitudes = np.zeros(0, dtype=np.float32) - is_in_vicinity = np.zeros(0, dtype=np.int32) - - while np.any(is_valid): - best_amplitude_ind = scalar_products[is_valid].argmax() - best_cluster_ind, peak_index = np.unravel_index(idx_lookup[is_valid][best_amplitude_ind], idx_lookup.shape) - - if num_selection > 0: - delta_t = selection[1] - peak_index - idx = np.where((delta_t < neighbor_window) & (delta_t > -num_samples))[0] - myline = num_samples + delta_t[idx] - - if not best_cluster_ind in cached_overlaps: - cached_overlaps[best_cluster_ind] = overlaps[best_cluster_ind].toarray() - - if num_selection == M.shape[0]: - Z = np.zeros((2 * num_selection, 2 * num_selection), dtype=np.float32) - Z[:num_selection, :num_selection] = M - M = Z - - M[num_selection, idx] = cached_overlaps[best_cluster_ind][selection[0, idx], myline] - - if vicinity == 0: - scipy.linalg.solve_triangular( - M[:num_selection, :num_selection], - M[num_selection, :num_selection], - trans=0, - lower=1, - overwrite_b=True, - check_finite=False, - ) - - v = nrm2(M[num_selection, :num_selection]) ** 2 - Lkk = 1 - v - if Lkk <= omp_tol: # selected atoms are dependent - break - M[num_selection, num_selection] = np.sqrt(Lkk) - else: - is_in_vicinity = np.where(np.abs(delta_t) < vicinity)[0] - - if len(is_in_vicinity) > 0: - L = M[is_in_vicinity, :][:, is_in_vicinity] - - M[num_selection, is_in_vicinity] = scipy.linalg.solve_triangular( - L, M[num_selection, is_in_vicinity], trans=0, lower=1, overwrite_b=True, check_finite=False - ) - - v = nrm2(M[num_selection, is_in_vicinity]) ** 2 - Lkk = 1 - v - if Lkk <= omp_tol: # selected atoms are dependent - break - M[num_selection, num_selection] = np.sqrt(Lkk) - else: - M[num_selection, num_selection] = 1.0 - else: - M[0, 0] = 1 - - all_selections[:, num_selection] = [best_cluster_ind, peak_index] - num_selection += 1 - - selection = all_selections[:, :num_selection] - res_sps = full_sps[selection[0], selection[1]] - - if True: # vicinity == 0: - all_amplitudes, _ = potrs(M[:num_selection, :num_selection], res_sps, lower=True, overwrite_b=False) - all_amplitudes /= norms[selection[0]] - else: - # This is not working, need to figure out why - is_in_vicinity = np.append(is_in_vicinity, num_selection - 1) - all_amplitudes = np.append(all_amplitudes, np.float32(1)) - L = M[is_in_vicinity, :][:, is_in_vicinity] - all_amplitudes[is_in_vicinity], _ = potrs(L, res_sps[is_in_vicinity], lower=True, overwrite_b=False) - all_amplitudes[is_in_vicinity] /= norms[selection[0][is_in_vicinity]] - - diff_amplitudes = all_amplitudes - final_amplitudes[selection[0], selection[1]] - modified = np.where(np.abs(diff_amplitudes) > omp_tol)[0] - final_amplitudes[selection[0], selection[1]] = all_amplitudes - - for i in modified: - tmp_best, tmp_peak = selection[:, i] - diff_amp = diff_amplitudes[i] * norms[tmp_best] - - if not tmp_best in cached_overlaps: - cached_overlaps[tmp_best] = overlaps[tmp_best].toarray() - - if not tmp_peak in neighbors.keys(): - idx = [max(0, tmp_peak - num_samples), min(num_peaks, tmp_peak + neighbor_window)] - tdx = [num_samples + idx[0] - tmp_peak, num_samples + idx[1] - tmp_peak] - neighbors[tmp_peak] = {"idx": idx, "tdx": tdx} - - idx = neighbors[tmp_peak]["idx"] - tdx = neighbors[tmp_peak]["tdx"] - - to_add = diff_amp * cached_overlaps[tmp_best][:, tdx[0] : tdx[1]] - scalar_products[:, idx[0] : idx[1]] -= to_add - - is_valid = scalar_products > stop_criteria - - is_valid = (final_amplitudes > min_amplitude) * (final_amplitudes < max_amplitude) - valid_indices = np.where(is_valid) - - num_spikes = len(valid_indices[0]) - spikes["sample_index"][:num_spikes] = valid_indices[1] + d["nbefore"] - spikes["channel_index"][:num_spikes] = 0 - spikes["cluster_index"][:num_spikes] = valid_indices[0] - spikes["amplitude"][:num_spikes] = final_amplitudes[valid_indices[0], valid_indices[1]] - - spikes = spikes[:num_spikes] - order = np.argsort(spikes["sample_index"]) - spikes = spikes[order] - - return spikes - - -class CircusOMPSVDPeeler(BaseTemplateMatchingEngine): - """ - Orthogonal Matching Pursuit inspired from Spyking Circus sorter - - https://elifesciences.org/articles/34518 - - This is an Orthogonal Template Matching algorithm. For speed and - memory optimization, templates are automatically sparsified. Signal - is convolved with the templates, and as long as some scalar products - are higher than a given threshold, we use a Cholesky decomposition - to compute the optimal amplitudes needed to reconstruct the signal. - - IMPORTANT NOTE: small chunks are more efficient for such Peeler, - consider using 100ms chunk - - Parameters - ---------- - amplitude: tuple - (Minimal, Maximal) amplitudes allowed for every template - omp_min_sps: float - Stopping criteria of the OMP algorithm, in percentage of the norm - noise_levels: array - The noise levels, for every channels. If None, they will be automatically - computed - random_chunk_kwargs: dict - Parameters for computing noise levels, if not provided (sub optimal) - sparse_kwargs: dict - Parameters to extract a sparsity mask from the waveform_extractor, if not - already sparse. - ----- - """ - _default_params = { "amplitudes": [0.6, 2], "omp_min_sps": 0.1, diff --git a/src/spikeinterface/sortingcomponents/matching/method_list.py b/src/spikeinterface/sortingcomponents/matching/method_list.py index 46c4a53872..c00c0a1fd3 100644 --- a/src/spikeinterface/sortingcomponents/matching/method_list.py +++ b/src/spikeinterface/sortingcomponents/matching/method_list.py @@ -8,6 +8,5 @@ "tridesclous": TridesclousPeeler, "circus": CircusPeeler, "circus-omp": CircusOMPPeeler, - 'circus-omp-svd' : CircusOMPSVDPeeler, "wobble": WobbleMatch, } From f21d80bf3cb34e5f39d59a7692a0c594025ea7b8 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 26 Sep 2023 20:32:10 +0000 Subject: [PATCH 078/115] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- .../clustering/clustering_tools.py | 6 +-- .../sortingcomponents/matching/circus.py | 44 +++++++++---------- 2 files changed, 24 insertions(+), 26 deletions(-) diff --git a/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py b/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py index 7a2af09942..c1b635fdaf 100644 --- a/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py +++ b/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py @@ -624,9 +624,9 @@ def remove_duplicates_via_matching( "templates": computed["templates"], "norms": computed["norms"], "sparsities": computed["sparsities"], - "temporal" : computed["temporal"], - "spatial" : computed["spatial"], - "singular" : computed["singular"], + "temporal": computed["temporal"], + "spatial": computed["spatial"], + "singular": computed["singular"], } ) valid = (spikes["sample_index"] >= half_marging) * (spikes["sample_index"] < duration + half_marging) diff --git a/src/spikeinterface/sortingcomponents/matching/circus.py b/src/spikeinterface/sortingcomponents/matching/circus.py index aeac69fc86..d2b02ea15d 100644 --- a/src/spikeinterface/sortingcomponents/matching/circus.py +++ b/src/spikeinterface/sortingcomponents/matching/circus.py @@ -201,7 +201,7 @@ class CircusOMPPeeler(BaseTemplateMatchingEngine): "waveform_extractor": None, "random_chunk_kwargs": {}, "noise_levels": None, - "rank" : 5, + "rank": 5, "sparse_kwargs": {"method": "ptp", "threshold": 1}, "ignored_ids": [], "vicinity": 0, @@ -219,37 +219,37 @@ def _prepare_templates(cls, d): templates = waveform_extractor.get_all_templates(mode="median").copy() - #First, we set masked channels to 0 - d['sparsities'] = {} + # First, we set masked channels to 0 + d["sparsities"] = {} for count in range(num_templates): template = templates[count][:, sparsity[count]] (d["sparsities"][count],) = np.nonzero(sparsity[count]) templates[count][:, ~sparsity[count]] = 0 # Then we keep only the strongest components - rank = d['rank'] + rank = d["rank"] temporal, singular, spatial = np.linalg.svd(templates, full_matrices=False) d["temporal"] = temporal[:, :, :rank] d["singular"] = singular[:, :rank] d["spatial"] = spatial[:, :rank, :] - + # We reconstruct the approximated templates templates = np.matmul(d["temporal"] * d["singular"][:, np.newaxis, :], d["spatial"]) d["temporal"] = np.flip(temporal, axis=1) - d['templates'] = {} + d["templates"] = {} d["norms"] = np.zeros(num_templates, dtype=np.float32) - + # And get the norms, saving compressed templates for CC matrix for count in range(num_templates): template = templates[count][:, sparsity[count]] d["norms"][count] = np.linalg.norm(template) - d["templates"][count] = template / d["norms"][count] - - d['temporal'] /= d['norms'][:, np.newaxis, np.newaxis] - d["spatial"] = np.moveaxis(d['spatial'][:, :rank, :], [0, 1, 2], [1, 0, 2]) - d['temporal'] = np.moveaxis(d['temporal'][:, :, :rank], [0, 1, 2], [1, 2, 0]) - d['singular'] = d['singular'].T[:, :, np.newaxis] + d["templates"][count] = template / d["norms"][count] + + d["temporal"] /= d["norms"][:, np.newaxis, np.newaxis] + d["spatial"] = np.moveaxis(d["spatial"][:, :rank, :], [0, 1, 2], [1, 0, 2]) + d["temporal"] = np.moveaxis(d["temporal"][:, :, :rank], [0, 1, 2], [1, 2, 0]) + d["singular"] = d["singular"].T[:, :, np.newaxis] return d @classmethod @@ -276,7 +276,7 @@ def initialize_and_check_kwargs(cls, recording, kwargs): if "templates" not in d: d = cls._prepare_templates(d) else: - for key in ["norms", "sparsities", 'temporal', 'spatial', 'singular']: + for key in ["norms", "sparsities", "temporal", "spatial", "singular"]: assert d[key] is not None, "If templates are provided, %d should also be there" % key d["num_templates"] = len(d["templates"]) @@ -287,8 +287,8 @@ def initialize_and_check_kwargs(cls, recording, kwargs): d["ignored_ids"] = np.array(d["ignored_ids"]) omp_min_sps = d["omp_min_sps"] - #d["stop_criteria"] = omp_min_sps * np.sqrt(d["noise_levels"].sum() * d["num_samples"]) - d['stop_criteria'] = omp_min_sps * np.maximum(d['norms'], np.sqrt(d["noise_levels"].sum() * d["num_samples"])) + # d["stop_criteria"] = omp_min_sps * np.sqrt(d["noise_levels"].sum() * d["num_samples"]) + d["stop_criteria"] = omp_min_sps * np.maximum(d["norms"], np.sqrt(d["noise_levels"].sum() * d["num_samples"])) return d @@ -325,18 +325,18 @@ def main_function(cls, traces, d): ignored_ids = d["ignored_ids"] stop_criteria = d["stop_criteria"][:, np.newaxis] vicinity = d["vicinity"] - rank = d['rank'] + rank = d["rank"] num_timesteps = len(traces) num_peaks = num_timesteps - num_samples + 1 conv_shape = (num_templates, num_peaks) scalar_products = np.zeros(conv_shape, dtype=np.float32) - + # Filter using overlap-and-add convolution - spatially_filtered_data = np.matmul(d['spatial'], traces.T[np.newaxis, :, :]) - scaled_filtered_data = spatially_filtered_data * d['singular'] - objective_by_rank = scipy.signal.oaconvolve(scaled_filtered_data, d['temporal'], axes=2, mode="valid") + spatially_filtered_data = np.matmul(d["spatial"], traces.T[np.newaxis, :, :]) + scaled_filtered_data = spatially_filtered_data * d["singular"] + objective_by_rank = scipy.signal.oaconvolve(scaled_filtered_data, d["temporal"], axes=2, mode="valid") scalar_products += np.sum(objective_by_rank, axis=0) if len(ignored_ids) > 0: @@ -473,8 +473,6 @@ def main_function(cls, traces, d): return spikes - - class CircusPeeler(BaseTemplateMatchingEngine): """ From a275bcaaf14819e64aa24a78a504b134f1d9288e Mon Sep 17 00:00:00 2001 From: Pierre Yger Date: Tue, 26 Sep 2023 22:32:57 +0200 Subject: [PATCH 079/115] Patch --- src/spikeinterface/sortingcomponents/matching/method_list.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/spikeinterface/sortingcomponents/matching/method_list.py b/src/spikeinterface/sortingcomponents/matching/method_list.py index c00c0a1fd3..bedc04a9d5 100644 --- a/src/spikeinterface/sortingcomponents/matching/method_list.py +++ b/src/spikeinterface/sortingcomponents/matching/method_list.py @@ -1,6 +1,6 @@ from .naive import NaiveMatching from .tdc import TridesclousPeeler -from .circus import CircusPeeler, CircusOMPPeeler, CircusOMPSVDPeeler +from .circus import CircusPeeler, CircusOMPPeeler from .wobble import WobbleMatch matching_methods = { From 85eb432c16a0719520a8dcbb24d2c8bb2c804d60 Mon Sep 17 00:00:00 2001 From: Pierre Yger Date: Tue, 26 Sep 2023 22:44:15 +0200 Subject: [PATCH 080/115] Cleaning useless functions --- .../clustering/clustering_tools.py | 6 -- .../sortingcomponents/matching/circus.py | 95 ------------------- 2 files changed, 101 deletions(-) diff --git a/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py b/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py index c1b635fdaf..5ff74db3e7 100644 --- a/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py +++ b/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py @@ -546,7 +546,6 @@ def remove_duplicates_via_matching( from spikeinterface.core import NumpySorting from spikeinterface.core import extract_waveforms from spikeinterface.core import get_global_tmp_folder - from spikeinterface.sortingcomponents.matching.circus import get_scipy_shape import string, random, shutil, os from pathlib import Path @@ -591,11 +590,6 @@ def remove_duplicates_via_matching( chunk_size = duration + 3 * margin - dummy_filter = np.empty((num_chans, duration), dtype=np.float32) - dummy_traces = np.empty((num_chans, chunk_size), dtype=np.float32) - - fshape, axes = get_scipy_shape(dummy_filter, dummy_traces, axes=1) - method_kwargs.update( { "waveform_extractor": waveform_extractor, diff --git a/src/spikeinterface/sortingcomponents/matching/circus.py b/src/spikeinterface/sortingcomponents/matching/circus.py index d2b02ea15d..ec6ef3a292 100644 --- a/src/spikeinterface/sortingcomponents/matching/circus.py +++ b/src/spikeinterface/sortingcomponents/matching/circus.py @@ -35,101 +35,6 @@ ################# # Circus peeler # -################# - -from scipy.fft._helper import _init_nd_shape_and_axes - -try: - from scipy.signal.signaltools import _init_freq_conv_axes, _apply_conv_mode -except Exception: - from scipy.signal._signaltools import _init_freq_conv_axes, _apply_conv_mode -from scipy import linalg, fft as sp_fft - - -def get_scipy_shape(in1, in2, mode="full", axes=None, calc_fast_len=True): - in1 = np.asarray(in1) - in2 = np.asarray(in2) - - if in1.ndim == in2.ndim == 0: # scalar inputs - return in1 * in2 - elif in1.ndim != in2.ndim: - raise ValueError("in1 and in2 should have the same dimensionality") - elif in1.size == 0 or in2.size == 0: # empty arrays - return np.array([]) - - in1, in2, axes = _init_freq_conv_axes(in1, in2, mode, axes, sorted_axes=False) - - s1 = in1.shape - s2 = in2.shape - - shape = [max((s1[i], s2[i])) if i not in axes else s1[i] + s2[i] - 1 for i in range(in1.ndim)] - - if not len(axes): - return in1 * in2 - - complex_result = in1.dtype.kind == "c" or in2.dtype.kind == "c" - - if calc_fast_len: - # Speed up FFT by padding to optimal size. - fshape = [sp_fft.next_fast_len(shape[a], not complex_result) for a in axes] - else: - fshape = shape - - return fshape, axes - - -def fftconvolve_with_cache(in1, in2, cache, mode="full", axes=None): - in1 = np.asarray(in1) - in2 = np.asarray(in2) - - if in1.ndim == in2.ndim == 0: # scalar inputs - return in1 * in2 - elif in1.ndim != in2.ndim: - raise ValueError("in1 and in2 should have the same dimensionality") - elif in1.size == 0 or in2.size == 0: # empty arrays - return np.array([]) - - in1, in2, axes = _init_freq_conv_axes(in1, in2, mode, axes, sorted_axes=False) - - s1 = in1.shape - s2 = in2.shape - - shape = [max((s1[i], s2[i])) if i not in axes else s1[i] + s2[i] - 1 for i in range(in1.ndim)] - - ret = _freq_domain_conv(in1, in2, axes, shape, cache, calc_fast_len=True) - - return _apply_conv_mode(ret, s1, s2, mode, axes) - - -def _freq_domain_conv(in1, in2, axes, shape, cache, calc_fast_len=True): - if not len(axes): - return in1 * in2 - - complex_result = in1.dtype.kind == "c" or in2.dtype.kind == "c" - - if calc_fast_len: - # Speed up FFT by padding to optimal size. - fshape = [sp_fft.next_fast_len(shape[a], not complex_result) for a in axes] - else: - fshape = shape - - if not complex_result: - fft, ifft = sp_fft.rfftn, sp_fft.irfftn - else: - fft, ifft = sp_fft.fftn, sp_fft.ifftn - - sp1 = cache["full"][cache["mask"]] - sp2 = cache["template"] - - # sp2 = fft(in2[cache['mask']], fshape, axes=axes) - ret = ifft(sp1 * sp2, fshape, axes=axes) - - if calc_fast_len: - fslice = tuple([slice(sz) for sz in shape]) - ret = ret[fslice] - - return ret - def compute_overlaps(templates, num_samples, num_channels, sparsities): num_templates = len(templates) From 15ae43215bf5a3b49a52081e18ad8ba3810bce15 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 26 Sep 2023 20:44:37 +0000 Subject: [PATCH 081/115] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- src/spikeinterface/sortingcomponents/matching/circus.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/spikeinterface/sortingcomponents/matching/circus.py b/src/spikeinterface/sortingcomponents/matching/circus.py index ec6ef3a292..7bef8358de 100644 --- a/src/spikeinterface/sortingcomponents/matching/circus.py +++ b/src/spikeinterface/sortingcomponents/matching/circus.py @@ -36,6 +36,7 @@ ################# # Circus peeler # + def compute_overlaps(templates, num_samples, num_channels, sparsities): num_templates = len(templates) From d48cd681f97fcee2374b65a97f0ecbc9d10b4588 Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Wed, 27 Sep 2023 09:02:05 +0200 Subject: [PATCH 082/115] implement some TODOs --- .../comparison/groundtruthstudy.py | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/src/spikeinterface/comparison/groundtruthstudy.py b/src/spikeinterface/comparison/groundtruthstudy.py index 8a294a88af..34777c6f20 100644 --- a/src/spikeinterface/comparison/groundtruthstudy.py +++ b/src/spikeinterface/comparison/groundtruthstudy.py @@ -22,8 +22,6 @@ # This is to separate names when the key are tuples when saving folders _key_separator = " ## " -# This would be more funny -# _key_separator = " (°_°) " class GroundTruthStudy: @@ -184,8 +182,12 @@ def run_sorters(self, case_keys=None, engine='loop', engine_kwargs={}, keep=True continue if sorting_exists: - # TODO : delete sorting + log - pass + # delete older sorting + log before running sorters + shutil.rmtree(sorting_exists) + log_file = self.folder / "sortings" / "run_logs" / f"{self.key_to_str(key)}.json" + if log_file.exists(): + log_file.unlink() + params = self.cases[key]["run_sorter_params"].copy() # this ensure that sorter_name is given @@ -201,7 +203,7 @@ def run_sorters(self, case_keys=None, engine='loop', engine_kwargs={}, keep=True run_sorter_jobs(job_list, engine=engine, engine_kwargs=engine_kwargs, return_output=False) - # TODO create a list in laucher for engine blocking and non-blocking + # TODO later create a list in laucher for engine blocking and non-blocking if engine not in ("slurm", ): self.copy_sortings(case_keys) @@ -223,8 +225,10 @@ def copy_sortings(self, case_keys=None, force=True): if sorting is not None: if sorting_folder.exists(): if force: - # TODO delete folder + log + # delete folder + log shutil.rmtree(sorting_folder) + if log_file.exists(): + log_file.unlink() else: continue From 3c3451ecf6452419ebf83dd6dd2d9454ba7e6419 Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Wed, 27 Sep 2023 10:00:35 +0200 Subject: [PATCH 083/115] replace is_dumpable() by a more explicit naming : is_memory_serializable() --- src/spikeinterface/core/base.py | 53 +++++++++---------- src/spikeinterface/core/job_tools.py | 2 +- src/spikeinterface/core/numpyextractors.py | 6 +-- src/spikeinterface/core/old_api_utils.py | 6 +-- src/spikeinterface/core/tests/test_base.py | 10 ++-- .../core/tests/test_jsonification.py | 3 +- .../postprocessing/spike_amplitudes.py | 2 +- 7 files changed, 38 insertions(+), 44 deletions(-) diff --git a/src/spikeinterface/core/base.py b/src/spikeinterface/core/base.py index 63cf8e894f..3b8765a398 100644 --- a/src/spikeinterface/core/base.py +++ b/src/spikeinterface/core/base.py @@ -57,9 +57,7 @@ def __init__(self, main_ids: Sequence) -> None: # * number of units for sorting self._properties = {} - self._is_dumpable = True - # self._is_json_serializable = True - self._serializablility = {'json': True, 'pickle': True} + self._serializablility = {'memory': True, 'json': True, 'pickle': True} # extractor specific list of pip extra requirements self.extra_requirements = [] @@ -472,31 +470,8 @@ def clone(self) -> "BaseExtractor": clone = BaseExtractor.from_dict(d) return clone - def check_if_dumpable(self): - """Check if the object is dumpable, including nested objects. - - Returns - ------- - bool - True if the object is dumpable, False otherwise. - """ - kwargs = self._kwargs - for value in kwargs.values(): - # here we check if the value is a BaseExtractor, a list of BaseExtractors, or a dict of BaseExtractors - if isinstance(value, BaseExtractor): - if not value.check_if_dumpable(): - return False - elif isinstance(value, list): - for v in value: - if isinstance(v, BaseExtractor) and not v.check_if_dumpable(): - return False - elif isinstance(value, dict): - for v in value.values(): - if isinstance(v, BaseExtractor) and not v.check_if_dumpable(): - return False - return self._is_dumpable - def check_serializablility(self, type="json"): + def check_serializablility(self, type): kwargs = self._kwargs for value in kwargs.values(): # here we check if the value is a BaseExtractor, a list of BaseExtractors, or a dict of BaseExtractors @@ -512,6 +487,26 @@ def check_serializablility(self, type="json"): if isinstance(v, BaseExtractor) and not v.check_serializablility(type=type): return False return self._serializablility[type] + + + def check_if_dumpable(self): + warnings.warn( + "check_if_dumpable() is replace by is_memory_serializable()", DeprecationWarning, stacklevel=2 + ) + return self.check_serializablility("memory") + + def is_memory_serializable(self): + """ + Check if the object is serializable to memory with pickle, including nested objects. + + Returns + ------- + bool + True if the object is json serializable, False otherwise. + """ + return self.check_serializablility("memory") + + def check_if_json_serializable(self): """ @@ -636,7 +631,7 @@ def dump_to_pickle( folder_metadata: str, Path, or None Folder with files containing additional information (e.g. probe in BaseRecording) and properties. """ - assert self.check_if_dumpable(), "The extractor is not dumpable" + assert self.check_if_pickle_serializable(), "The extractor is not dumpable" dump_dict = self.to_dict( include_annotations=True, @@ -931,7 +926,7 @@ def save_to_zarr( zarr_root = zarr.open(zarr_path_init, mode="w", storage_options=storage_options) - if self.check_if_dumpable(): + if self.check_if_json_serializable(): zarr_root.attrs["provenance"] = check_json(self.to_dict()) else: zarr_root.attrs["provenance"] = None diff --git a/src/spikeinterface/core/job_tools.py b/src/spikeinterface/core/job_tools.py index c0ee77d2fd..0535872ca6 100644 --- a/src/spikeinterface/core/job_tools.py +++ b/src/spikeinterface/core/job_tools.py @@ -167,7 +167,7 @@ def ensure_n_jobs(recording, n_jobs=1): print(f"Python {sys.version} does not support parallel processing") n_jobs = 1 - if not recording.check_if_dumpable(): + if not recording.is_memory_serializable(): if n_jobs != 1: raise RuntimeError( "Recording is not dumpable and can't be processed in parallel. " diff --git a/src/spikeinterface/core/numpyextractors.py b/src/spikeinterface/core/numpyextractors.py index 5ef955a6eb..d09016c8f1 100644 --- a/src/spikeinterface/core/numpyextractors.py +++ b/src/spikeinterface/core/numpyextractors.py @@ -127,7 +127,7 @@ def __init__(self, spikes, sampling_frequency, unit_ids): """ """ BaseSorting.__init__(self, sampling_frequency, unit_ids) - self._is_dumpable = True + self._serializablility["memory"] = True self._serializablility["json"] = False # theorically this should be False but for simplicity make generators simples we still need this. self._serializablility["pickle"] = True @@ -360,8 +360,8 @@ def __init__(self, shm_name, shape, sampling_frequency, unit_ids, dtype=minimum_ assert shape[0] > 0, "SharedMemorySorting only supported with no empty sorting" BaseSorting.__init__(self, sampling_frequency, unit_ids) - self._is_dumpable = True + self._serializablility["memory"] = True self._serializablility["json"] = False self._serializablility["pickle"] = False @@ -521,7 +521,7 @@ def __init__(self, snippets_list, spikesframes_list, sampling_frequency, nbefore dtype=dtype, ) - self._is_dumpable = False + self._serializablility["memory"] = False self._serializablility["json"] = False self._serializablility["pickle"] = False diff --git a/src/spikeinterface/core/old_api_utils.py b/src/spikeinterface/core/old_api_utils.py index a31edb0dd7..879700cc15 100644 --- a/src/spikeinterface/core/old_api_utils.py +++ b/src/spikeinterface/core/old_api_utils.py @@ -181,8 +181,8 @@ def __init__(self, oldapi_recording_extractor): dtype=oldapi_recording_extractor.get_dtype(return_scaled=False), ) - # set _is_dumpable to False to use dumping mechanism of old extractor - self._is_dumpable = False + # set to False to use dumping mechanism of old extractor + self._serializablility["memory"] = False self._serializablility["json"] = False self._serializablility["pickle"] = False @@ -269,7 +269,7 @@ def __init__(self, oldapi_sorting_extractor): sorting_segment = OldToNewSortingSegment(oldapi_sorting_extractor) self.add_sorting_segment(sorting_segment) - self._is_dumpable = False + self._serializablility["memory"] = False self._serializablility["json"] = False self._serializablility["pickle"] = False diff --git a/src/spikeinterface/core/tests/test_base.py b/src/spikeinterface/core/tests/test_base.py index b716f6b1dd..28dbd166ec 100644 --- a/src/spikeinterface/core/tests/test_base.py +++ b/src/spikeinterface/core/tests/test_base.py @@ -31,19 +31,19 @@ def make_nested_extractors(extractor): ) -def test_check_if_dumpable(): +def test_is_memory_serializable(): test_extractor = generate_recording(seed=0, durations=[2]) # make a list of dumpable objects extractors_dumpable = make_nested_extractors(test_extractor) for extractor in extractors_dumpable: - assert extractor.check_if_dumpable() + assert extractor.is_memory_serializable() # make not dumpable - test_extractor._is_dumpable = False + test_extractor._serializablility["memory"] = False extractors_not_dumpable = make_nested_extractors(test_extractor) for extractor in extractors_not_dumpable: - assert not extractor.check_if_dumpable() + assert not extractor.is_memory_serializable() def test_check_if_serializable(): @@ -66,5 +66,5 @@ def test_check_if_serializable(): if __name__ == "__main__": - test_check_if_dumpable() + test_is_memory_serializable() test_check_if_serializable() diff --git a/src/spikeinterface/core/tests/test_jsonification.py b/src/spikeinterface/core/tests/test_jsonification.py index 8572cda23e..026e676966 100644 --- a/src/spikeinterface/core/tests/test_jsonification.py +++ b/src/spikeinterface/core/tests/test_jsonification.py @@ -144,8 +144,7 @@ def __init__(self, attribute, other_extractor=None, extractor_list=None, extract BaseExtractor.__init__(self, main_ids=['1', '2']) # this already the case by default - self._is_dumpable = True - # self._is_json_serializable = True + self._serializablility["memory"] = True self._serializablility["json"] = True self._serializablility["pickle"] = True diff --git a/src/spikeinterface/postprocessing/spike_amplitudes.py b/src/spikeinterface/postprocessing/spike_amplitudes.py index 38cb714d59..aa99f7fc5e 100644 --- a/src/spikeinterface/postprocessing/spike_amplitudes.py +++ b/src/spikeinterface/postprocessing/spike_amplitudes.py @@ -75,7 +75,7 @@ def _run(self, **job_kwargs): n_jobs = ensure_n_jobs(recording, job_kwargs.get("n_jobs", None)) if n_jobs != 1: # TODO: avoid dumping sorting and use spike vector and peak pipeline instead - assert sorting.check_if_dumpable(), ( + assert sorting.is_memory_serializable(), ( "The sorting object is not dumpable and cannot be processed in parallel. You can use the " "`sorting.save()` function to make it dumpable" ) From 9d3dceaacc77158487c47972a2d949a71bb3c65a Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Wed, 27 Sep 2023 08:52:23 +0000 Subject: [PATCH 084/115] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- src/spikeinterface/comparison/multicomparisons.py | 4 ++-- src/spikeinterface/core/base.py | 10 ++-------- src/spikeinterface/core/generate.py | 4 ++-- src/spikeinterface/core/numpyextractors.py | 2 +- .../core/tests/test_jsonification.py | 8 ++++---- .../core/tests/test_waveform_extractor.py | 15 ++++++++++----- src/spikeinterface/core/waveform_extractor.py | 1 - src/spikeinterface/sorters/basesorter.py | 6 ++---- src/spikeinterface/sorters/runsorter.py | 7 ++++--- 9 files changed, 27 insertions(+), 30 deletions(-) diff --git a/src/spikeinterface/comparison/multicomparisons.py b/src/spikeinterface/comparison/multicomparisons.py index 6fe474822b..f44e14c4c4 100644 --- a/src/spikeinterface/comparison/multicomparisons.py +++ b/src/spikeinterface/comparison/multicomparisons.py @@ -189,8 +189,8 @@ def save_to_folder(self, save_folder): stacklevel=2, ) for sorting in self.object_list: - assert ( - sorting.check_serializablility("json") + assert sorting.check_serializablility( + "json" ), "MultiSortingComparison.save_to_folder() need json serializable sortings" save_folder = Path(save_folder) diff --git a/src/spikeinterface/core/base.py b/src/spikeinterface/core/base.py index 3b8765a398..6e91cedcb5 100644 --- a/src/spikeinterface/core/base.py +++ b/src/spikeinterface/core/base.py @@ -57,7 +57,7 @@ def __init__(self, main_ids: Sequence) -> None: # * number of units for sorting self._properties = {} - self._serializablility = {'memory': True, 'json': True, 'pickle': True} + self._serializablility = {"memory": True, "json": True, "pickle": True} # extractor specific list of pip extra requirements self.extra_requirements = [] @@ -470,7 +470,6 @@ def clone(self) -> "BaseExtractor": clone = BaseExtractor.from_dict(d) return clone - def check_serializablility(self, type): kwargs = self._kwargs for value in kwargs.values(): @@ -488,11 +487,8 @@ def check_serializablility(self, type): return False return self._serializablility[type] - def check_if_dumpable(self): - warnings.warn( - "check_if_dumpable() is replace by is_memory_serializable()", DeprecationWarning, stacklevel=2 - ) + warnings.warn("check_if_dumpable() is replace by is_memory_serializable()", DeprecationWarning, stacklevel=2) return self.check_serializablility("memory") def is_memory_serializable(self): @@ -506,8 +502,6 @@ def is_memory_serializable(self): """ return self.check_serializablility("memory") - - def check_if_json_serializable(self): """ Check if the object is json serializable, including nested objects. diff --git a/src/spikeinterface/core/generate.py b/src/spikeinterface/core/generate.py index 05d63f3c8d..eeb1e8af60 100644 --- a/src/spikeinterface/core/generate.py +++ b/src/spikeinterface/core/generate.py @@ -1433,7 +1433,7 @@ def generate_ground_truth_recording( ) recording.annotate(is_filtered=True) recording.set_probe(probe, in_place=True) - recording.set_channel_gains(1.) - recording.set_channel_offsets(0.) + recording.set_channel_gains(1.0) + recording.set_channel_offsets(0.0) return recording, sorting diff --git a/src/spikeinterface/core/numpyextractors.py b/src/spikeinterface/core/numpyextractors.py index d09016c8f1..3d7ec6cd1a 100644 --- a/src/spikeinterface/core/numpyextractors.py +++ b/src/spikeinterface/core/numpyextractors.py @@ -523,7 +523,7 @@ def __init__(self, snippets_list, spikesframes_list, sampling_frequency, nbefore self._serializablility["memory"] = False self._serializablility["json"] = False - self._serializablility["pickle"] = False + self._serializablility["pickle"] = False for snippets, spikesframes in zip(snippets_list, spikesframes_list): snp_segment = NumpySnippetsSegment(snippets, spikesframes) diff --git a/src/spikeinterface/core/tests/test_jsonification.py b/src/spikeinterface/core/tests/test_jsonification.py index 026e676966..1c491bd7a6 100644 --- a/src/spikeinterface/core/tests/test_jsonification.py +++ b/src/spikeinterface/core/tests/test_jsonification.py @@ -142,11 +142,11 @@ def __init__(self, attribute, other_extractor=None, extractor_list=None, extract self.extractor_list = extractor_list self.extractor_dict = extractor_dict - BaseExtractor.__init__(self, main_ids=['1', '2']) + BaseExtractor.__init__(self, main_ids=["1", "2"]) # this already the case by default self._serializablility["memory"] = True self._serializablility["json"] = True - self._serializablility["pickle"] = True + self._serializablility["pickle"] = True self._kwargs = { "attribute": attribute, @@ -199,6 +199,6 @@ def test_encoding_numpy_scalars_within_nested_extractors_dict(nested_extractor_d json.dumps(nested_extractor_dict, cls=SIJsonEncoder) -if __name__ == '__main__': +if __name__ == "__main__": nested_extractor = nested_extractor() - test_encoding_numpy_scalars_within_nested_extractors(nested_extractor_) \ No newline at end of file + test_encoding_numpy_scalars_within_nested_extractors(nested_extractor_) diff --git a/src/spikeinterface/core/tests/test_waveform_extractor.py b/src/spikeinterface/core/tests/test_waveform_extractor.py index f53b9cf18d..12dac52d43 100644 --- a/src/spikeinterface/core/tests/test_waveform_extractor.py +++ b/src/spikeinterface/core/tests/test_waveform_extractor.py @@ -6,7 +6,13 @@ import zarr -from spikeinterface.core import generate_recording, generate_sorting, NumpySorting, ChannelSparsity, generate_ground_truth_recording +from spikeinterface.core import ( + generate_recording, + generate_sorting, + NumpySorting, + ChannelSparsity, + generate_ground_truth_recording, +) from spikeinterface import WaveformExtractor, BaseRecording, extract_waveforms, load_waveforms from spikeinterface.core.waveform_extractor import precompute_sparsity @@ -509,14 +515,15 @@ def test_compute_sparsity(): ) print(sparsity) + def test_non_json_object(): recording, sorting = generate_ground_truth_recording( durations=[30, 40], sampling_frequency=30000.0, num_channels=32, num_units=5, - ) - + ) + # recording is not save to keep it in memory sorting = sorting.save() @@ -524,7 +531,6 @@ def test_non_json_object(): if wf_folder.is_dir(): shutil.rmtree(wf_folder) - we = extract_waveforms( recording, sorting, @@ -551,4 +557,3 @@ def test_non_json_object(): # test_recordingless() # test_compute_sparsity() test_non_json_object() - diff --git a/src/spikeinterface/core/waveform_extractor.py b/src/spikeinterface/core/waveform_extractor.py index 3de1429feb..cd8a62f5bc 100644 --- a/src/spikeinterface/core/waveform_extractor.py +++ b/src/spikeinterface/core/waveform_extractor.py @@ -900,7 +900,6 @@ def save( elif self.recording.check_serializablility("pickle"): self.recording.dump(folder / "recording.pickle") - if self.sorting.check_serializablility("json"): self.sorting.dump(folder / "sorting.json", relative_to=relative_to) elif self.sorting.check_serializablility("pickle"): diff --git a/src/spikeinterface/sorters/basesorter.py b/src/spikeinterface/sorters/basesorter.py index bbcde31eed..8d87558191 100644 --- a/src/spikeinterface/sorters/basesorter.py +++ b/src/spikeinterface/sorters/basesorter.py @@ -189,11 +189,9 @@ def set_params_to_folder(cls, recording, output_folder, new_params, verbose): @classmethod def load_recording_from_folder(cls, output_folder, with_warnings=False): - json_file = output_folder / "spikeinterface_recording.json" pickle_file = output_folder / "spikeinterface_recording.pickle" - if json_file.exists(): with (json_file).open("r", encoding="utf8") as f: recording_dict = json.load(f) @@ -206,7 +204,7 @@ def load_recording_from_folder(cls, output_folder, with_warnings=False): recording = load_extractor(json_file, base_folder=output_folder) elif pickle_file.exits(): recording = load_extractor(pickle_file) - + return recording @classmethod @@ -320,7 +318,7 @@ def get_result_from_folder(cls, output_folder, register_recording=True, sorting_ if register_recording: # register recording to Sorting object - recording = cls.load_recording_from_folder( output_folder, with_warnings=False) + recording = cls.load_recording_from_folder(output_folder, with_warnings=False) if recording is not None: sorting.register_recording(recording) diff --git a/src/spikeinterface/sorters/runsorter.py b/src/spikeinterface/sorters/runsorter.py index e930ec7f79..bd5667b15f 100644 --- a/src/spikeinterface/sorters/runsorter.py +++ b/src/spikeinterface/sorters/runsorter.py @@ -629,7 +629,7 @@ def read_sorter_folder(output_folder, register_recording=True, sorting_info=True Load a sorting object from a spike sorting output folder. The 'output_folder' must contain a valid 'spikeinterface_log.json' file - + Parameters ---------- output_folder: Pth or str @@ -657,8 +657,9 @@ def read_sorter_folder(output_folder, register_recording=True, sorting_info=True sorter_name = log["sorter_name"] SorterClass = sorter_dict[sorter_name] - sorting = SorterClass.get_result_from_folder(output_folder, register_recording=register_recording, - sorting_info=sorting_info) + sorting = SorterClass.get_result_from_folder( + output_folder, register_recording=register_recording, sorting_info=sorting_info + ) return sorting From 7329927cfb3035d764648a2175d617aa8999c67b Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Wed, 27 Sep 2023 10:54:57 +0200 Subject: [PATCH 085/115] rename to check_if_memory_serializable --- src/spikeinterface/core/base.py | 6 +----- src/spikeinterface/core/job_tools.py | 2 +- src/spikeinterface/core/tests/test_base.py | 8 ++++---- src/spikeinterface/postprocessing/spike_amplitudes.py | 2 +- 4 files changed, 7 insertions(+), 11 deletions(-) diff --git a/src/spikeinterface/core/base.py b/src/spikeinterface/core/base.py index 6e91cedcb5..b1b5065339 100644 --- a/src/spikeinterface/core/base.py +++ b/src/spikeinterface/core/base.py @@ -487,11 +487,7 @@ def check_serializablility(self, type): return False return self._serializablility[type] - def check_if_dumpable(self): - warnings.warn("check_if_dumpable() is replace by is_memory_serializable()", DeprecationWarning, stacklevel=2) - return self.check_serializablility("memory") - - def is_memory_serializable(self): + def check_if_memory_serializable(self): """ Check if the object is serializable to memory with pickle, including nested objects. diff --git a/src/spikeinterface/core/job_tools.py b/src/spikeinterface/core/job_tools.py index 0535872ca6..9369ad0b61 100644 --- a/src/spikeinterface/core/job_tools.py +++ b/src/spikeinterface/core/job_tools.py @@ -167,7 +167,7 @@ def ensure_n_jobs(recording, n_jobs=1): print(f"Python {sys.version} does not support parallel processing") n_jobs = 1 - if not recording.is_memory_serializable(): + if not recording.check_if_memory_serializable(): if n_jobs != 1: raise RuntimeError( "Recording is not dumpable and can't be processed in parallel. " diff --git a/src/spikeinterface/core/tests/test_base.py b/src/spikeinterface/core/tests/test_base.py index 28dbd166ec..8d0907c700 100644 --- a/src/spikeinterface/core/tests/test_base.py +++ b/src/spikeinterface/core/tests/test_base.py @@ -31,19 +31,19 @@ def make_nested_extractors(extractor): ) -def test_is_memory_serializable(): +def test_check_if_memory_serializable(): test_extractor = generate_recording(seed=0, durations=[2]) # make a list of dumpable objects extractors_dumpable = make_nested_extractors(test_extractor) for extractor in extractors_dumpable: - assert extractor.is_memory_serializable() + assert extractor.check_if_memory_serializable() # make not dumpable test_extractor._serializablility["memory"] = False extractors_not_dumpable = make_nested_extractors(test_extractor) for extractor in extractors_not_dumpable: - assert not extractor.is_memory_serializable() + assert not extractor.check_if_memory_serializable() def test_check_if_serializable(): @@ -66,5 +66,5 @@ def test_check_if_serializable(): if __name__ == "__main__": - test_is_memory_serializable() + test_check_if_memory_serializable() test_check_if_serializable() diff --git a/src/spikeinterface/postprocessing/spike_amplitudes.py b/src/spikeinterface/postprocessing/spike_amplitudes.py index aa99f7fc5e..9eb5a815d4 100644 --- a/src/spikeinterface/postprocessing/spike_amplitudes.py +++ b/src/spikeinterface/postprocessing/spike_amplitudes.py @@ -75,7 +75,7 @@ def _run(self, **job_kwargs): n_jobs = ensure_n_jobs(recording, job_kwargs.get("n_jobs", None)) if n_jobs != 1: # TODO: avoid dumping sorting and use spike vector and peak pipeline instead - assert sorting.is_memory_serializable(), ( + assert sorting.check_if_memory_serializable(), ( "The sorting object is not dumpable and cannot be processed in parallel. You can use the " "`sorting.save()` function to make it dumpable" ) From b9c6a38e99430fc7b734e0751871e6d08eb5aea1 Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Wed, 27 Sep 2023 10:56:28 +0200 Subject: [PATCH 086/115] oups --- src/spikeinterface/core/base.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/spikeinterface/core/base.py b/src/spikeinterface/core/base.py index b1b5065339..e3b88588e2 100644 --- a/src/spikeinterface/core/base.py +++ b/src/spikeinterface/core/base.py @@ -494,7 +494,7 @@ def check_if_memory_serializable(self): Returns ------- bool - True if the object is json serializable, False otherwise. + True if the object is memory serializable, False otherwise. """ return self.check_serializablility("memory") From 331379a3f441e2691eb15985b60254fcc9e3f887 Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Wed, 27 Sep 2023 11:13:29 +0200 Subject: [PATCH 087/115] Remove "dumpable" naming also in doc and warnings. --- doc/modules/core.rst | 3 +-- src/spikeinterface/comparison/hybrid.py | 4 ++-- src/spikeinterface/core/base.py | 8 ++++---- src/spikeinterface/core/job_tools.py | 4 ++-- src/spikeinterface/core/tests/test_base.py | 17 ++++++++--------- .../core/tests/test_core_tools.py | 1 - src/spikeinterface/core/tests/test_job_tools.py | 6 +++--- .../core/tests/test_waveform_extractor.py | 2 +- src/spikeinterface/core/waveform_extractor.py | 15 ++++++++------- .../postprocessing/spike_amplitudes.py | 6 ------ .../sorters/tests/test_launcher.py | 2 +- 11 files changed, 30 insertions(+), 38 deletions(-) diff --git a/doc/modules/core.rst b/doc/modules/core.rst index fdc4d71fe7..976a82a4a3 100644 --- a/doc/modules/core.rst +++ b/doc/modules/core.rst @@ -547,8 +547,7 @@ workflow. In order to do this, one can use the :code:`Numpy*` classes, :py:class:`~spikeinterface.core.NumpyRecording`, :py:class:`~spikeinterface.core.NumpySorting`, :py:class:`~spikeinterface.core.NumpyEvent`, and :py:class:`~spikeinterface.core.NumpySnippets`. These object behave exactly like normal SpikeInterface objects, -but they are not bound to a file. This makes these objects *not dumpable*, so parallel processing is not supported. -In order to make them *dumpable*, one can simply :code:`save()` them (see :ref:`save_load`). +but they are not bound to a file. Also note the class :py:class:`~spikeinterface.core.SharedMemorySorting` which is very similar to Similar to :py:class:`~spikeinterface.core.NumpySorting` but with an unerlying SharedMemory which is usefull for diff --git a/src/spikeinterface/comparison/hybrid.py b/src/spikeinterface/comparison/hybrid.py index 3b8e9e0a72..e0c98cd772 100644 --- a/src/spikeinterface/comparison/hybrid.py +++ b/src/spikeinterface/comparison/hybrid.py @@ -39,7 +39,7 @@ class HybridUnitsRecording(InjectTemplatesRecording): The refractory period of the injected spike train (in ms). injected_sorting_folder: str | Path | None If given, the injected sorting is saved to this folder. - It must be specified if injected_sorting is None or not dumpable. + It must be specified if injected_sorting is None or not serialisable to file. Returns ------- @@ -138,7 +138,7 @@ class HybridSpikesRecording(InjectTemplatesRecording): this refractory period. injected_sorting_folder: str | Path | None If given, the injected sorting is saved to this folder. - It must be specified if injected_sorting is None or not dumpable. + It must be specified if injected_sorting is None or not serializable to file. Returns ------- diff --git a/src/spikeinterface/core/base.py b/src/spikeinterface/core/base.py index e3b88588e2..73f8619348 100644 --- a/src/spikeinterface/core/base.py +++ b/src/spikeinterface/core/base.py @@ -621,7 +621,7 @@ def dump_to_pickle( folder_metadata: str, Path, or None Folder with files containing additional information (e.g. probe in BaseRecording) and properties. """ - assert self.check_if_pickle_serializable(), "The extractor is not dumpable" + assert self.check_if_pickle_serializable(), "The extractor is not serializable to file with pickle" dump_dict = self.to_dict( include_annotations=True, @@ -658,8 +658,8 @@ def load(file_path: Union[str, Path], base_folder: Optional[Union[Path, str, boo d = pickle.load(f) else: raise ValueError(f"Impossible to load {file_path}") - if "warning" in d and "not dumpable" in d["warning"]: - print("The extractor was not dumpable") + if "warning" in d: + print("The extractor was not serializable to file") return None extractor = BaseExtractor.from_dict(d, base_folder=base_folder) return extractor @@ -822,7 +822,7 @@ def save_to_folder(self, name=None, folder=None, verbose=True, **save_kwargs): if self.check_serializablility("json"): self.dump(provenance_file) else: - provenance_file.write_text(json.dumps({"warning": "the provenace is not dumpable!!!"}), encoding="utf8") + provenance_file.write_text(json.dumps({"warning": "the provenace is not json serializable!!!"}), encoding="utf8") self.save_metadata_to_folder(folder) diff --git a/src/spikeinterface/core/job_tools.py b/src/spikeinterface/core/job_tools.py index 9369ad0b61..84ee502c14 100644 --- a/src/spikeinterface/core/job_tools.py +++ b/src/spikeinterface/core/job_tools.py @@ -170,8 +170,8 @@ def ensure_n_jobs(recording, n_jobs=1): if not recording.check_if_memory_serializable(): if n_jobs != 1: raise RuntimeError( - "Recording is not dumpable and can't be processed in parallel. " - "You can use the `recording.save()` function to make it dumpable or set 'n_jobs' to 1." + "Recording is not serializable to memory and can't be processed in parallel. " + "You can use the `rec = recording.save(folder=...)` function or set 'n_jobs' to 1." ) return n_jobs diff --git a/src/spikeinterface/core/tests/test_base.py b/src/spikeinterface/core/tests/test_base.py index 8d0907c700..a944be3da0 100644 --- a/src/spikeinterface/core/tests/test_base.py +++ b/src/spikeinterface/core/tests/test_base.py @@ -34,30 +34,29 @@ def make_nested_extractors(extractor): def test_check_if_memory_serializable(): test_extractor = generate_recording(seed=0, durations=[2]) - # make a list of dumpable objects - extractors_dumpable = make_nested_extractors(test_extractor) - for extractor in extractors_dumpable: + # make a list of memory serializable objects + extractors_mem_serializable = make_nested_extractors(test_extractor) + for extractor in extractors_mem_serializable: assert extractor.check_if_memory_serializable() - # make not dumpable + # make not not memory serilizable test_extractor._serializablility["memory"] = False - extractors_not_dumpable = make_nested_extractors(test_extractor) - for extractor in extractors_not_dumpable: + extractors_not_mem_serializable = make_nested_extractors(test_extractor) + for extractor in extractors_not_mem_serializable: assert not extractor.check_if_memory_serializable() def test_check_if_serializable(): test_extractor = generate_recording(seed=0, durations=[2]) - # make a list of dumpable objects + # make a list of json serializable objects test_extractor._serializablility["json"] = True extractors_json_serializable = make_nested_extractors(test_extractor) for extractor in extractors_json_serializable: print(extractor) assert extractor.check_serializablility("json") - # make not dumpable - # test_extractor._is_json_serializable = False + # make of not json serializable objects test_extractor._serializablility["json"] = False extractors_not_json_serializable = make_nested_extractors(test_extractor) for extractor in extractors_not_json_serializable: diff --git a/src/spikeinterface/core/tests/test_core_tools.py b/src/spikeinterface/core/tests/test_core_tools.py index a3cd0caa92..223b2a8a3a 100644 --- a/src/spikeinterface/core/tests/test_core_tools.py +++ b/src/spikeinterface/core/tests/test_core_tools.py @@ -142,7 +142,6 @@ def test_write_memory_recording(): recording = NoiseGeneratorRecording( num_channels=2, durations=[10.325, 3.5], sampling_frequency=30_000, strategy="tile_pregenerated" ) - # make dumpable recording = recording.save() # write with loop diff --git a/src/spikeinterface/core/tests/test_job_tools.py b/src/spikeinterface/core/tests/test_job_tools.py index 7d7af6025b..a904e4dd32 100644 --- a/src/spikeinterface/core/tests/test_job_tools.py +++ b/src/spikeinterface/core/tests/test_job_tools.py @@ -36,7 +36,7 @@ def test_ensure_n_jobs(): n_jobs = ensure_n_jobs(recording, n_jobs=1) assert n_jobs == 1 - # dumpable + # check serializable n_jobs = ensure_n_jobs(recording.save(), n_jobs=-1) assert n_jobs > 1 @@ -45,7 +45,7 @@ def test_ensure_chunk_size(): recording = generate_recording(num_channels=2) dtype = recording.get_dtype() assert dtype == "float32" - # make dumpable + # make serializable recording = recording.save() chunk_size = ensure_chunk_size(recording, total_memory="512M", chunk_size=None, chunk_memory=None, n_jobs=2) @@ -90,7 +90,7 @@ def init_func(arg1, arg2, arg3): def test_ChunkRecordingExecutor(): recording = generate_recording(num_channels=2) - # make dumpable + # make serializable recording = recording.save() init_args = "a", 120, "yep" diff --git a/src/spikeinterface/core/tests/test_waveform_extractor.py b/src/spikeinterface/core/tests/test_waveform_extractor.py index 12dac52d43..2bbf5e9b0f 100644 --- a/src/spikeinterface/core/tests/test_waveform_extractor.py +++ b/src/spikeinterface/core/tests/test_waveform_extractor.py @@ -315,7 +315,7 @@ def test_recordingless(): recording = recording.save(folder=cache_folder / "recording1") sorting = sorting.save(folder=cache_folder / "sorting1") - # recording and sorting are not dumpable + # recording and sorting are not serializable wf_folder = cache_folder / "wf_recordingless" # save with relative paths diff --git a/src/spikeinterface/core/waveform_extractor.py b/src/spikeinterface/core/waveform_extractor.py index cd8a62f5bc..2710ff1338 100644 --- a/src/spikeinterface/core/waveform_extractor.py +++ b/src/spikeinterface/core/waveform_extractor.py @@ -290,11 +290,12 @@ def create( sorting.dump(folder / "sorting.json", relative_to=relative_to) elif sorting.check_serializablility("pickle"): # In this case we loose the relative_to!! + # TODO later the dump to pickle should dump the dictionary and so relative could be put back sorting.dump(folder / "sorting.pickle") else: warn( - "Sorting object is not dumpable, which might result in downstream errors for " - "parallel processing. To make the sorting dumpable, use the `sorting.save()` function." + "Sorting object is not serializable to file, which might result in downstream errors for " + "parallel processing. To make the sorting serializable, use the `sorting = sorting.save()` function." ) # dump some attributes of the recording for the mode with_recording=False at next load @@ -903,11 +904,11 @@ def save( if self.sorting.check_serializablility("json"): self.sorting.dump(folder / "sorting.json", relative_to=relative_to) elif self.sorting.check_serializablility("pickle"): - self.sorting.dump(folder / "sorting.pickle", relative_to=relative_to) + self.sorting.dump(folder / "sorting.pickle") else: warn( - "Sorting object is not dumpable, which might result in downstream errors for " - "parallel processing. To make the sorting dumpable, use the `sorting.save()` function." + "Sorting object is not serializable to file, which might result in downstream errors for " + "parallel processing. To make the sorting serializable, use the `sorting = sorting.save()` function." ) # dump some attributes of the recording for the mode with_recording=False at next load @@ -960,8 +961,8 @@ def save( zarr_root.attrs["sorting"] = check_json(sort_dict) else: warn( - "Sorting object is not dumpable, which might result in downstream errors for " - "parallel processing. To make the sorting dumpable, use the `sorting.save()` function." + "Sorting object is not json serializable, which might result in downstream errors for " + "parallel processing. To make the sorting serializable, use the `sorting = sorting.save()` function." ) recording_info = zarr_root.create_group("recording_info") recording_info.attrs["recording_attributes"] = check_json(rec_attributes) diff --git a/src/spikeinterface/postprocessing/spike_amplitudes.py b/src/spikeinterface/postprocessing/spike_amplitudes.py index 9eb5a815d4..ccd2121174 100644 --- a/src/spikeinterface/postprocessing/spike_amplitudes.py +++ b/src/spikeinterface/postprocessing/spike_amplitudes.py @@ -73,12 +73,6 @@ def _run(self, **job_kwargs): func = _spike_amplitudes_chunk init_func = _init_worker_spike_amplitudes n_jobs = ensure_n_jobs(recording, job_kwargs.get("n_jobs", None)) - if n_jobs != 1: - # TODO: avoid dumping sorting and use spike vector and peak pipeline instead - assert sorting.check_if_memory_serializable(), ( - "The sorting object is not dumpable and cannot be processed in parallel. You can use the " - "`sorting.save()` function to make it dumpable" - ) init_args = (recording, sorting.to_multiprocessing(n_jobs), extremum_channels_index, peak_shifts, return_scaled) processor = ChunkRecordingExecutor( recording, func, init_func, init_args, handle_returns=True, job_name="extract amplitudes", **job_kwargs diff --git a/src/spikeinterface/sorters/tests/test_launcher.py b/src/spikeinterface/sorters/tests/test_launcher.py index 14c938f8ba..a5e29c8fd9 100644 --- a/src/spikeinterface/sorters/tests/test_launcher.py +++ b/src/spikeinterface/sorters/tests/test_launcher.py @@ -178,7 +178,7 @@ def test_run_sorters_with_list(): if working_folder.is_dir(): shutil.rmtree(working_folder) - # make dumpable + # make serializable rec0 = load_extractor(cache_folder / "toy_rec_0") rec1 = load_extractor(cache_folder / "toy_rec_1") From 0ea10e3baf97fbcedc8c25c2745754cacabb7b5c Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Wed, 27 Sep 2023 09:13:52 +0000 Subject: [PATCH 088/115] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- src/spikeinterface/core/base.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/spikeinterface/core/base.py b/src/spikeinterface/core/base.py index 73f8619348..e8b3232e13 100644 --- a/src/spikeinterface/core/base.py +++ b/src/spikeinterface/core/base.py @@ -822,7 +822,9 @@ def save_to_folder(self, name=None, folder=None, verbose=True, **save_kwargs): if self.check_serializablility("json"): self.dump(provenance_file) else: - provenance_file.write_text(json.dumps({"warning": "the provenace is not json serializable!!!"}), encoding="utf8") + provenance_file.write_text( + json.dumps({"warning": "the provenace is not json serializable!!!"}), encoding="utf8" + ) self.save_metadata_to_folder(folder) From af72fbcaa040c4216e2f2b60465197b484e2d2c9 Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Wed, 27 Sep 2023 11:25:20 +0200 Subject: [PATCH 089/115] oups --- src/spikeinterface/comparison/groundtruthstudy.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/src/spikeinterface/comparison/groundtruthstudy.py b/src/spikeinterface/comparison/groundtruthstudy.py index 34777c6f20..fcebb356a0 100644 --- a/src/spikeinterface/comparison/groundtruthstudy.py +++ b/src/spikeinterface/comparison/groundtruthstudy.py @@ -31,7 +31,6 @@ class GroundTruthStudy: "cases" refer to: * several sorters for comparisons * same sorter with differents parameters - * parameters of comparisons * any combination of these (and more) For increased flexibility, cases keys can be a tuple so that we can vary complexity along several @@ -403,11 +402,11 @@ def get_count_units( count_units.loc[key, "num_well_detected"] = comp.count_well_detected_units( well_detected_score ) + count_units.loc[key, "num_overmerged"] = comp.count_overmerged_units( + overmerged_score + ) + count_units.loc[key, "num_redundant"] = comp.count_redundant_units(redundant_score) if comp.exhaustive_gt: - count_units.loc[key, "num_overmerged"] = comp.count_overmerged_units( - overmerged_score - ) - count_units.loc[key, "num_redundant"] = comp.count_redundant_units(redundant_score) count_units.loc[key, "num_false_positive"] = comp.count_false_positive_units( redundant_score ) From 2c015f78e9311e106e9d2fda4e4026a61ca68c5b Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Wed, 27 Sep 2023 09:28:28 +0000 Subject: [PATCH 090/115] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- src/spikeinterface/widgets/amplitudes.py | 7 +- src/spikeinterface/widgets/base.py | 2 +- src/spikeinterface/widgets/metrics.py | 6 +- src/spikeinterface/widgets/spike_locations.py | 2 +- .../widgets/spikes_on_traces.py | 20 +- src/spikeinterface/widgets/traces.py | 51 ++-- src/spikeinterface/widgets/unit_locations.py | 2 +- src/spikeinterface/widgets/unit_waveforms.py | 8 +- .../widgets/utils_ipywidgets.py | 222 +++++++++--------- 9 files changed, 163 insertions(+), 157 deletions(-) diff --git a/src/spikeinterface/widgets/amplitudes.py b/src/spikeinterface/widgets/amplitudes.py index 5aa090b1b4..6b6496a577 100644 --- a/src/spikeinterface/widgets/amplitudes.py +++ b/src/spikeinterface/widgets/amplitudes.py @@ -174,6 +174,7 @@ def plot_matplotlib(self, data_plot, **backend_kwargs): def plot_ipywidgets(self, data_plot, **backend_kwargs): import matplotlib.pyplot as plt + # import ipywidgets.widgets as widgets import ipywidgets.widgets as W from IPython.display import display @@ -210,7 +211,7 @@ def plot_ipywidgets(self, data_plot, **backend_kwargs): self.unit_selector, self.checkbox_histograms, ], - layout = W.Layout(align_items="center", width="4cm", height="100%"), + layout=W.Layout(align_items="center", width="4cm", height="100%"), ) self.widget = W.AppLayout( @@ -222,8 +223,8 @@ def plot_ipywidgets(self, data_plot, **backend_kwargs): # a first update self._full_update_plot() - self.unit_selector.observe(self._update_plot, names='value', type="change") - self.checkbox_histograms.observe(self._full_update_plot, names='value', type="change") + self.unit_selector.observe(self._update_plot, names="value", type="change") + self.checkbox_histograms.observe(self._full_update_plot, names="value", type="change") if backend_kwargs["display"]: display(self.widget) diff --git a/src/spikeinterface/widgets/base.py b/src/spikeinterface/widgets/base.py index 1ff691320a..9fc7b73707 100644 --- a/src/spikeinterface/widgets/base.py +++ b/src/spikeinterface/widgets/base.py @@ -38,7 +38,7 @@ def set_default_plotter_backend(backend): "width_cm": "Width of the figure in cm (default 10)", "height_cm": "Height of the figure in cm (default 6)", "display": "If True, widgets are immediately displayed", - # "controllers": "" + # "controllers": "" }, "ephyviewer": {}, } diff --git a/src/spikeinterface/widgets/metrics.py b/src/spikeinterface/widgets/metrics.py index 604da35e65..c7b701c8b0 100644 --- a/src/spikeinterface/widgets/metrics.py +++ b/src/spikeinterface/widgets/metrics.py @@ -149,8 +149,7 @@ def plot_ipywidgets(self, data_plot, **backend_kwargs): plt.show() self.unit_selector = UnitSelector(data_plot["sorting"].unit_ids) - self.unit_selector.value = [ ] - + self.unit_selector.value = [] self.widget = widgets.AppLayout( center=self.figure.canvas, @@ -161,7 +160,7 @@ def plot_ipywidgets(self, data_plot, **backend_kwargs): # a first update self._update_ipywidget(None) - self.unit_selector.observe(self._update_ipywidget, names='value', type="change") + self.unit_selector.observe(self._update_ipywidget, names="value", type="change") if backend_kwargs["display"]: display(self.widget) @@ -208,7 +207,6 @@ def _update_ipywidget(self, change): self.figure.canvas.draw() self.figure.canvas.flush_events() - def plot_sortingview(self, data_plot, **backend_kwargs): import sortingview.views as vv from .utils_sortingview import generate_unit_table_view, make_serializable, handle_display_and_url diff --git a/src/spikeinterface/widgets/spike_locations.py b/src/spikeinterface/widgets/spike_locations.py index 926051b8f9..fda2356105 100644 --- a/src/spikeinterface/widgets/spike_locations.py +++ b/src/spikeinterface/widgets/spike_locations.py @@ -222,7 +222,7 @@ def plot_ipywidgets(self, data_plot, **backend_kwargs): # a first update self._update_ipywidget() - self.unit_selector.observe(self._update_ipywidget, names='value', type="change") + self.unit_selector.observe(self._update_ipywidget, names="value", type="change") if backend_kwargs["display"]: display(self.widget) diff --git a/src/spikeinterface/widgets/spikes_on_traces.py b/src/spikeinterface/widgets/spikes_on_traces.py index 2f748cc0fc..c2bed8fe41 100644 --- a/src/spikeinterface/widgets/spikes_on_traces.py +++ b/src/spikeinterface/widgets/spikes_on_traces.py @@ -232,7 +232,7 @@ def plot_matplotlib(self, data_plot, **backend_kwargs): handles.append(l[0]) labels.append(unit) label_set = True - # ax.legend(handles, labels) + # ax.legend(handles, labels) def plot_ipywidgets(self, data_plot, **backend_kwargs): import matplotlib.pyplot as plt @@ -268,19 +268,18 @@ def plot_ipywidgets(self, data_plot, **backend_kwargs): self.unit_selector = UnitSelector(data_plot["unit_ids"]) self.unit_selector.value = list(data_plot["unit_ids"])[:1] - self.widget = widgets.AppLayout(center=self._traces_widget.widget, - left_sidebar=self.unit_selector, - pane_widths=ratios + [0]) + self.widget = widgets.AppLayout( + center=self._traces_widget.widget, left_sidebar=self.unit_selector, pane_widths=ratios + [0] + ) # a first update self._update_ipywidget() # remove callback from traces_widget - self.unit_selector.observe(self._update_ipywidget, names='value', type="change") - self._traces_widget.time_slider.observe(self._update_ipywidget, names='value', type="change") - self._traces_widget.channel_selector.observe(self._update_ipywidget, names='value', type="change") - self._traces_widget.scaler.observe(self._update_ipywidget, names='value', type="change") - + self.unit_selector.observe(self._update_ipywidget, names="value", type="change") + self._traces_widget.time_slider.observe(self._update_ipywidget, names="value", type="change") + self._traces_widget.channel_selector.observe(self._update_ipywidget, names="value", type="change") + self._traces_widget.scaler.observe(self._update_ipywidget, names="value", type="change") if backend_kwargs["display"]: display(self.widget) @@ -305,10 +304,9 @@ def _update_ipywidget(self, change=None): time_range=np.array([start_frame, end_frame]) / self.sampling_frequency, mode=mode, with_colorbar=False, - ) + ) ) - backend_kwargs = {} backend_kwargs["ax"] = self.ax diff --git a/src/spikeinterface/widgets/traces.py b/src/spikeinterface/widgets/traces.py index d107c5cb23..9b6716e8f3 100644 --- a/src/spikeinterface/widgets/traces.py +++ b/src/spikeinterface/widgets/traces.py @@ -290,7 +290,6 @@ def plot_ipywidgets(self, data_plot, **backend_kwargs): check_ipywidget_backend() self.next_data_plot = data_plot.copy() - self.recordings = data_plot["recordings"] @@ -314,7 +313,7 @@ def plot_ipywidgets(self, data_plot, **backend_kwargs): self.time_slider = TimeSlider( durations=[rec0.get_duration(s) for s in range(rec0.get_num_segments())], sampling_frequency=rec0.sampling_frequency, - # layout=W.Layout(height="2cm"), + # layout=W.Layout(height="2cm"), ) start_frame = int(data_plot["time_range"][0] * rec0.sampling_frequency) @@ -324,14 +323,17 @@ def plot_ipywidgets(self, data_plot, **backend_kwargs): _layer_keys = data_plot["layer_keys"] if len(_layer_keys) > 1: - _layer_keys = ['ALL'] + _layer_keys - self.layer_selector = W.Dropdown(options=_layer_keys, - layout=W.Layout(width="95%"), - ) - self.mode_selector = W.Dropdown(options=["line", "map"], value=data_plot["mode"], - # layout=W.Layout(width="5cm"), - layout=W.Layout(width="95%"), - ) + _layer_keys = ["ALL"] + _layer_keys + self.layer_selector = W.Dropdown( + options=_layer_keys, + layout=W.Layout(width="95%"), + ) + self.mode_selector = W.Dropdown( + options=["line", "map"], + value=data_plot["mode"], + # layout=W.Layout(width="5cm"), + layout=W.Layout(width="95%"), + ) self.scaler = ScaleWidget() self.channel_selector = ChannelSelector(self.rec0.channel_ids) @@ -343,9 +345,9 @@ def plot_ipywidgets(self, data_plot, **backend_kwargs): self.mode_selector, self.scaler, # self.channel_selector, - ], + ], layout=W.Layout(width="3.5cm"), - align_items='center', + align_items="center", ) self.return_scaled = data_plot["return_scaled"] @@ -353,7 +355,7 @@ def plot_ipywidgets(self, data_plot, **backend_kwargs): self.widget = widgets.AppLayout( center=self.figure.canvas, footer=self.time_slider, - left_sidebar = left_sidebar, + left_sidebar=left_sidebar, right_sidebar=self.channel_selector, pane_heights=[0, 6, 1], pane_widths=ratios, @@ -365,28 +367,28 @@ def plot_ipywidgets(self, data_plot, **backend_kwargs): # callbacks: # some widgets generate a full retrieve + refresh - self.time_slider.observe(self._retrieve_traces, names='value', type="change") - self.layer_selector.observe(self._retrieve_traces, names='value', type="change") - self.channel_selector.observe(self._retrieve_traces, names='value', type="change") + self.time_slider.observe(self._retrieve_traces, names="value", type="change") + self.layer_selector.observe(self._retrieve_traces, names="value", type="change") + self.channel_selector.observe(self._retrieve_traces, names="value", type="change") # other widgets only refresh - self.scaler.observe(self._update_plot, names='value', type="change") + self.scaler.observe(self._update_plot, names="value", type="change") # map is a special case because needs to check layer also - self.mode_selector.observe(self._mode_changed, names='value', type="change") - + self.mode_selector.observe(self._mode_changed, names="value", type="change") + if backend_kwargs["display"]: # self.check_backend() display(self.widget) def _get_layers(self): layer = self.layer_selector.value - if layer == 'ALL': + if layer == "ALL": layer_keys = self.data_plot["layer_keys"] else: layer_keys = [layer] if self.mode_selector.value == "map": layer_keys = layer_keys[:1] return layer_keys - + def _mode_changed(self, change=None): if self.mode_selector.value == "map" and self.layer_selector.value == "ALL": self.layer_selector.value = self.data_plot["layer_keys"][0] @@ -400,7 +402,7 @@ def _retrieve_traces(self, change=None): order, _ = order_channels_by_depth(self.rec0, channel_ids) else: order = None - + start_frame, end_frame, segment_index = self.time_slider.value time_range = np.array([start_frame, end_frame]) / self.rec0.sampling_frequency @@ -439,9 +441,9 @@ def _update_plot(self, change=None): data_plot["clims"] = clims data_plot["channel_ids"] = self._channel_ids - + data_plot["layer_keys"] = layer_keys - data_plot["colors"] = {k:self.data_plot["colors"][k] for k in layer_keys} + data_plot["colors"] = {k: self.data_plot["colors"][k] for k in layer_keys} list_traces = [traces * self.scaler.value for traces in self._list_traces] data_plot["list_traces"] = list_traces @@ -458,7 +460,6 @@ def _update_plot(self, change=None): fig.canvas.draw() fig.canvas.flush_events() - def plot_sortingview(self, data_plot, **backend_kwargs): import sortingview.views as vv from .utils_sortingview import handle_display_and_url diff --git a/src/spikeinterface/widgets/unit_locations.py b/src/spikeinterface/widgets/unit_locations.py index 8526a95d60..b41ee3508b 100644 --- a/src/spikeinterface/widgets/unit_locations.py +++ b/src/spikeinterface/widgets/unit_locations.py @@ -198,7 +198,7 @@ def plot_ipywidgets(self, data_plot, **backend_kwargs): # a first update self._update_ipywidget() - self.unit_selector.observe(self._update_ipywidget, names='value', type="change") + self.unit_selector.observe(self._update_ipywidget, names="value", type="change") if backend_kwargs["display"]: display(self.widget) diff --git a/src/spikeinterface/widgets/unit_waveforms.py b/src/spikeinterface/widgets/unit_waveforms.py index f01c842b66..8ffc931bf2 100644 --- a/src/spikeinterface/widgets/unit_waveforms.py +++ b/src/spikeinterface/widgets/unit_waveforms.py @@ -277,7 +277,6 @@ def plot_ipywidgets(self, data_plot, **backend_kwargs): self.unit_selector = UnitSelector(data_plot["unit_ids"]) self.unit_selector.value = list(data_plot["unit_ids"])[:1] - self.same_axis_button = widgets.Checkbox( value=False, description="same axis", @@ -309,10 +308,9 @@ def plot_ipywidgets(self, data_plot, **backend_kwargs): # a first update self._update_ipywidget(None) - self.unit_selector.observe(self._update_ipywidget, names='value', type="change") + self.unit_selector.observe(self._update_ipywidget, names="value", type="change") for w in self.same_axis_button, self.plot_templates_button, self.hide_axis_button: - w.observe(self._update_ipywidget, names='value', type="change") - + w.observe(self._update_ipywidget, names="value", type="change") if backend_kwargs["display"]: display(self.widget) @@ -340,7 +338,7 @@ def _update_ipywidget(self, change): data_plot["plot_templates"] = plot_templates if data_plot["plot_waveforms"]: data_plot["wfs_by_ids"] = {unit_id: self.we.get_waveforms(unit_id) for unit_id in unit_ids} - + # TODO option for plot_legend backend_kwargs = {} diff --git a/src/spikeinterface/widgets/utils_ipywidgets.py b/src/spikeinterface/widgets/utils_ipywidgets.py index ee6133a990..6e872eca55 100644 --- a/src/spikeinterface/widgets/utils_ipywidgets.py +++ b/src/spikeinterface/widgets/utils_ipywidgets.py @@ -12,12 +12,9 @@ def check_ipywidget_backend(): class TimeSlider(W.HBox): - value = traitlets.Tuple(traitlets.Int(), traitlets.Int(), traitlets.Int()) - - def __init__(self, durations, sampling_frequency, time_range=(0, 1.), **kwargs): - - + + def __init__(self, durations, sampling_frequency, time_range=(0, 1.0), **kwargs): self.num_segments = len(durations) self.frame_limits = [int(sampling_frequency * d) for d in durations] self.sampling_frequency = sampling_frequency @@ -28,81 +25,100 @@ def __init__(self, durations, sampling_frequency, time_range=(0, 1.), **kwargs): self.segment_index = 0 self.value = (start_frame, end_frame, self.segment_index) - - + layout = W.Layout(align_items="center", width="2.5cm", height="1.cm") - but_left = W.Button(description='', disabled=False, button_style='', icon='arrow-left', layout=layout) - but_right = W.Button(description='', disabled=False, button_style='', icon='arrow-right', layout=layout) - + but_left = W.Button(description="", disabled=False, button_style="", icon="arrow-left", layout=layout) + but_right = W.Button(description="", disabled=False, button_style="", icon="arrow-right", layout=layout) + but_left.on_click(self.move_left) but_right.on_click(self.move_right) - self.move_size = W.Dropdown(options=['10 ms', '100 ms', '1 s', '10 s', '1 m', '30 m', '1 h',], # '6 h', '24 h' - value='1 s', - description='', - layout = W.Layout(width="2cm") - ) + self.move_size = W.Dropdown( + options=[ + "10 ms", + "100 ms", + "1 s", + "10 s", + "1 m", + "30 m", + "1 h", + ], # '6 h', '24 h' + value="1 s", + description="", + layout=W.Layout(width="2cm"), + ) # DatetimePicker is only for ipywidget v8 (which is not working in vscode 2023-03) - self.time_label = W.Text(value=f'{time_range[0]}',description='', - disabled=False, layout=W.Layout(width='2.5cm')) - self.time_label.observe(self.time_label_changed, names='value', type="change") - + self.time_label = W.Text( + value=f"{time_range[0]}", description="", disabled=False, layout=W.Layout(width="2.5cm") + ) + self.time_label.observe(self.time_label_changed, names="value", type="change") self.slider = W.IntSlider( - orientation='horizontal', - # description='time:', + orientation="horizontal", + # description='time:', value=start_frame, min=0, max=self.frame_limits[self.segment_index] - 1, readout=False, continuous_update=False, - layout=W.Layout(width=f'70%') + layout=W.Layout(width=f"70%"), ) - - self.slider.observe(self.slider_moved, names='value', type="change") - + + self.slider.observe(self.slider_moved, names="value", type="change") + delta_s = np.diff(self.frame_range) / sampling_frequency - - self.window_sizer = W.BoundedFloatText(value=delta_s, step=1, - min=0.01, max=30., - description='win (s)', - layout=W.Layout(width='auto') - # layout=W.Layout(width=f'10%') - ) - self.window_sizer.observe(self.win_size_changed, names='value', type="change") + + self.window_sizer = W.BoundedFloatText( + value=delta_s, + step=1, + min=0.01, + max=30.0, + description="win (s)", + layout=W.Layout(width="auto") + # layout=W.Layout(width=f'10%') + ) + self.window_sizer.observe(self.win_size_changed, names="value", type="change") self.segment_selector = W.Dropdown(description="segment", options=list(range(self.num_segments))) - self.segment_selector.observe(self.segment_changed, names='value', type="change") + self.segment_selector.observe(self.segment_changed, names="value", type="change") + + super(W.HBox, self).__init__( + children=[ + self.segment_selector, + but_left, + self.move_size, + but_right, + self.slider, + self.time_label, + self.window_sizer, + ], + layout=W.Layout(align_items="center", width="100%", height="100%"), + **kwargs, + ) - super(W.HBox, self).__init__(children=[self.segment_selector, but_left, self.move_size, but_right, - self.slider, self.time_label, self.window_sizer], - layout=W.Layout(align_items="center", width="100%", height="100%"), - **kwargs) - - self.observe(self.value_changed, names=['value'], type="change") + self.observe(self.value_changed, names=["value"], type="change") def value_changed(self, change=None): - - self.unobserve(self.value_changed, names=['value'], type="change") + self.unobserve(self.value_changed, names=["value"], type="change") start, stop, seg_index = self.value if seg_index < 0 or seg_index >= self.num_segments: - self.value = change['old'] + self.value = change["old"] return if start < 0 or stop < 0: - self.value = change['old'] + self.value = change["old"] return if start >= self.frame_limits[seg_index] or start > self.frame_limits[seg_index]: - self.value = change['old'] + self.value = change["old"] return - + self.segment_selector.value = seg_index self.update_time(new_frame=start, update_slider=True, update_label=True) delta_s = (stop - start) / self.sampling_frequency self.window_sizer.value = delta_s - self.observe(self.value_changed, names=['value'], type="change") + self.observe(self.value_changed, names=["value"], type="change") def update_time(self, new_frame=None, new_time=None, update_slider=False, update_label=False): if new_frame is None and new_time is None: @@ -118,25 +134,24 @@ def update_time(self, new_frame=None, new_time=None, update_slider=False, update start_frame = min(self.frame_limits[self.segment_index] - delta, start_frame) start_frame = max(0, start_frame) end_frame = start_frame + delta - + end_frame = min(self.frame_limits[self.segment_index], end_frame) - start_time = start_frame / self.sampling_frequency if update_label: - self.time_label.unobserve(self.time_label_changed, names='value', type="change") - self.time_label.value = f'{start_time}' - self.time_label.observe(self.time_label_changed, names='value', type="change") + self.time_label.unobserve(self.time_label_changed, names="value", type="change") + self.time_label.value = f"{start_time}" + self.time_label.observe(self.time_label_changed, names="value", type="change") if update_slider: - self.slider.unobserve(self.slider_moved, names='value', type="change") + self.slider.unobserve(self.slider_moved, names="value", type="change") self.slider.value = start_frame - self.slider.observe(self.slider_moved, names='value', type="change") - + self.slider.observe(self.slider_moved, names="value", type="change") + self.frame_range = (start_frame, end_frame) self.value = (start_frame, end_frame, self.segment_index) - + def time_label_changed(self, change=None): try: new_time = float(self.time_label.value) @@ -145,39 +160,39 @@ def time_label_changed(self, change=None): if new_time is not None: self.update_time(new_time=new_time, update_slider=True) - def win_size_changed(self, change=None): self.update_time() - + def slider_moved(self, change=None): new_frame = self.slider.value self.update_time(new_frame=new_frame, update_label=True) - + def move(self, sign): - value, units = self.move_size.value.split(' ') + value, units = self.move_size.value.split(" ") value = int(value) - delta_s = (sign * np.timedelta64(value, units)) / np.timedelta64(1, 's') + delta_s = (sign * np.timedelta64(value, units)) / np.timedelta64(1, "s") delta_sample = int(delta_s * self.sampling_frequency) new_frame = self.frame_range[0] + delta_sample self.slider.value = new_frame - + def move_left(self, change=None): self.move(-1) def move_right(self, change=None): self.move(+1) - + def segment_changed(self, change=None): self.segment_index = self.segment_selector.value - self.slider.unobserve(self.slider_moved, names='value', type="change") + self.slider.unobserve(self.slider_moved, names="value", type="change") # self.slider.value = 0 self.slider.max = self.frame_limits[self.segment_index] - 1 - self.slider.observe(self.slider_moved, names='value', type="change") + self.slider.observe(self.slider_moved, names="value", type="change") self.update_time(new_frame=0, update_slider=True, update_label=True) + class ChannelSelector(W.VBox): value = traitlets.List() @@ -211,22 +226,24 @@ def __init__(self, channel_ids, **kwargs): ) hbox = W.HBox(children=[self.slider, self.selector]) - super(W.VBox, self).__init__(children=[channel_label, hbox], - layout=W.Layout(align_items="center"), - # layout=W.Layout(align_items="center", width="100%", height="100%"), - **kwargs) - self.slider.observe(self.on_slider_changed, names=['value'], type="change") - self.selector.observe(self.on_selector_changed, names=['value'], type="change") + super(W.VBox, self).__init__( + children=[channel_label, hbox], + layout=W.Layout(align_items="center"), + # layout=W.Layout(align_items="center", width="100%", height="100%"), + **kwargs, + ) + self.slider.observe(self.on_slider_changed, names=["value"], type="change") + self.selector.observe(self.on_selector_changed, names=["value"], type="change") # TODO external value change # self.observe(self.value_changed, names=['value'], type="change") - + def on_slider_changed(self, change=None): i0, i1 = self.slider.value - - self.selector.unobserve(self.on_selector_changed, names=['value'], type="change") + + self.selector.unobserve(self.on_selector_changed, names=["value"], type="change") self.selector.value = self.channel_ids[i0:i1][::-1] - self.selector.observe(self.on_selector_changed, names=['value'], type="change") + self.selector.observe(self.on_selector_changed, names=["value"], type="change") self.value = self.channel_ids[i0:i1] @@ -235,27 +252,23 @@ def on_selector_changed(self, change=None): channel_ids = channel_ids[::-1] if len(channel_ids) > 0: - self.slider.unobserve(self.on_slider_changed, names=['value'], type="change") + self.slider.unobserve(self.on_slider_changed, names=["value"], type="change") i0 = self.channel_ids.index(channel_ids[0]) i1 = self.channel_ids.index(channel_ids[-1]) + 1 self.slider.value = (i0, i1) - self.slider.observe(self.on_slider_changed, names=['value'], type="change") + self.slider.observe(self.on_slider_changed, names=["value"], type="change") self.value = channel_ids - class ScaleWidget(W.VBox): value = traitlets.Float() - def __init__(self, value=1., factor=1.2, **kwargs): - - assert factor > 1. + def __init__(self, value=1.0, factor=1.2, **kwargs): + assert factor > 1.0 self.factor = factor - self.scale_label = W.Label("Scale", - layout=W.Layout(layout=W.Layout(width='95%'), - justify_content="center")) + self.scale_label = W.Label("Scale", layout=W.Layout(layout=W.Layout(width="95%"), justify_content="center")) self.plus_selector = W.Button( description="", @@ -264,7 +277,7 @@ def __init__(self, value=1., factor=1.2, **kwargs): tooltip="Increase scale", icon="arrow-up", # layout=W.Layout(width=f"{0.8 * width_cm}cm", height=f"{0.4 * height_cm}cm"), - layout=W.Layout(width='60%', align_self='center'), + layout=W.Layout(width="60%", align_self="center"), ) self.minus_selector = W.Button( @@ -274,31 +287,31 @@ def __init__(self, value=1., factor=1.2, **kwargs): tooltip="Decrease scale", icon="arrow-down", # layout=W.Layout(width=f"{0.8 * width_cm}cm", height=f"{0.4 * height_cm}cm"), - layout=W.Layout(width='60%', align_self='center'), + layout=W.Layout(width="60%", align_self="center"), ) self.plus_selector.on_click(self.plus_clicked) self.minus_selector.on_click(self.minus_clicked) - self.value = 1. - super(W.VBox, self).__init__(children=[self.plus_selector, self.scale_label, self.minus_selector], - # layout=W.Layout(align_items="center", width="100%", height="100%"), - **kwargs) + self.value = 1.0 + super(W.VBox, self).__init__( + children=[self.plus_selector, self.scale_label, self.minus_selector], + # layout=W.Layout(align_items="center", width="100%", height="100%"), + **kwargs, + ) self.update_label() - self.observe(self.value_changed, names=['value'], type="change") - + self.observe(self.value_changed, names=["value"], type="change") + def update_label(self): self.scale_label.value = f"Scale: {self.value:0.2f}" - def plus_clicked(self, change=None): self.value = self.value * self.factor def minus_clicked(self, change=None): self.value = self.value / self.factor - def value_changed(self, change=None): self.update_label() @@ -319,20 +332,17 @@ def __init__(self, unit_ids, **kwargs): layout=W.Layout(height="100%", width="2cm"), ) - super(W.VBox, self).__init__(children=[label, self.selector], - layout=W.Layout(align_items="center"), - **kwargs) - - self.selector.observe(self.on_selector_changed, names=['value'], type="change") + super(W.VBox, self).__init__(children=[label, self.selector], layout=W.Layout(align_items="center"), **kwargs) + + self.selector.observe(self.on_selector_changed, names=["value"], type="change") + + self.observe(self.value_changed, names=["value"], type="change") - self.observe(self.value_changed, names=['value'], type="change") - def on_selector_changed(self, change=None): unit_ids = self.selector.value self.value = unit_ids - - def value_changed(self, change=None): - self.selector.unobserve(self.on_selector_changed, names=['value'], type="change") - self.selector.value = change['new'] - self.selector.observe(self.on_selector_changed, names=['value'], type="change") + def value_changed(self, change=None): + self.selector.unobserve(self.on_selector_changed, names=["value"], type="change") + self.selector.value = change["new"] + self.selector.observe(self.on_selector_changed, names=["value"], type="change") From eb80725559f6d5b3d1c882e9254e39e39331952d Mon Sep 17 00:00:00 2001 From: Garcia Samuel Date: Wed, 27 Sep 2023 11:30:41 +0200 Subject: [PATCH 091/115] Update doc/modules/qualitymetrics/amplitude_cv.rst --- doc/modules/qualitymetrics/amplitude_cv.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/modules/qualitymetrics/amplitude_cv.rst b/doc/modules/qualitymetrics/amplitude_cv.rst index 3edb1f9833..13117b607c 100644 --- a/doc/modules/qualitymetrics/amplitude_cv.rst +++ b/doc/modules/qualitymetrics/amplitude_cv.rst @@ -46,7 +46,7 @@ Example code References ---------- -.. autofunction:: spikeinterface.qualitymetrics.misc_metrics.compute_amplitude_spreads +.. autofunction:: spikeinterface.qualitymetrics.misc_metrics.compute_amplitude_cv_metrics Literature From 8e4b43a4f67a92a1497eda5d53f2be2e04f7779f Mon Sep 17 00:00:00 2001 From: Garcia Samuel Date: Wed, 27 Sep 2023 11:37:12 +0200 Subject: [PATCH 092/115] Update src/spikeinterface/postprocessing/amplitude_scalings.py --- src/spikeinterface/postprocessing/amplitude_scalings.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/spikeinterface/postprocessing/amplitude_scalings.py b/src/spikeinterface/postprocessing/amplitude_scalings.py index 8823fd6257..7e6c95a875 100644 --- a/src/spikeinterface/postprocessing/amplitude_scalings.py +++ b/src/spikeinterface/postprocessing/amplitude_scalings.py @@ -431,7 +431,7 @@ def _are_unit_indices_overlapping(sparsity_mask, i, j): bool True if the unit indices i and j are overlapping, False otherwise """ - if np.sum(np.logical_and(sparsity_mask[i], sparsity_mask[j])) > 0: + if np.any(sparsity_mask[i] & sparsity_mask[j]): return True else: return False From 7605222e5707f6451a2ecc8b4fdbde747883c7bc Mon Sep 17 00:00:00 2001 From: Zach McKenzie <92116279+zm711@users.noreply.github.com> Date: Wed, 27 Sep 2023 06:49:32 -0400 Subject: [PATCH 093/115] rec_path = None, from Sam Co-authored-by: Garcia Samuel --- src/spikeinterface/exporters/to_phy.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/spikeinterface/exporters/to_phy.py b/src/spikeinterface/exporters/to_phy.py index edfca0fa52..54ad0ea366 100644 --- a/src/spikeinterface/exporters/to_phy.py +++ b/src/spikeinterface/exporters/to_phy.py @@ -156,6 +156,8 @@ def export_to_phy( if use_relative_path: if copy_binary: f.write(f"dat_path = r'recording.dat'\n") + elif rec_path == "None": + f.write(f"dat_path = {rec_path}\n") else: f.write(f"dat_path = r'{str(Path(rec_path).relative_to(output_folder))}'\n") else: From f16b12c040ab512ce30e17219ca61e84168cc586 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Wed, 27 Sep 2023 10:49:49 +0000 Subject: [PATCH 094/115] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- src/spikeinterface/exporters/to_phy.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/spikeinterface/exporters/to_phy.py b/src/spikeinterface/exporters/to_phy.py index 54ad0ea366..ebc810b953 100644 --- a/src/spikeinterface/exporters/to_phy.py +++ b/src/spikeinterface/exporters/to_phy.py @@ -157,7 +157,7 @@ def export_to_phy( if copy_binary: f.write(f"dat_path = r'recording.dat'\n") elif rec_path == "None": - f.write(f"dat_path = {rec_path}\n") + f.write(f"dat_path = {rec_path}\n") else: f.write(f"dat_path = r'{str(Path(rec_path).relative_to(output_folder))}'\n") else: From 957a169e9cb663446398ed7e44abe47209e85619 Mon Sep 17 00:00:00 2001 From: Alessio Buccino Date: Wed, 27 Sep 2023 13:18:45 +0200 Subject: [PATCH 095/115] hotfix: synchrony metrics indexing --- src/spikeinterface/qualitymetrics/misc_metrics.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/spikeinterface/qualitymetrics/misc_metrics.py b/src/spikeinterface/qualitymetrics/misc_metrics.py index f449b3c31b..e9726a16da 100644 --- a/src/spikeinterface/qualitymetrics/misc_metrics.py +++ b/src/spikeinterface/qualitymetrics/misc_metrics.py @@ -552,12 +552,13 @@ def compute_synchrony_metrics(waveform_extractor, synchrony_sizes=(2, 4, 8), uni continue spike_complexity = complexity[np.isin(unique_spike_index, spikes_per_unit["sample_index"])] for synchrony_size in synchrony_sizes: - synchrony_counts[synchrony_size][unit_id] += np.count_nonzero(spike_complexity >= synchrony_size) + synchrony_counts[synchrony_size][unit_index] += np.count_nonzero(spike_complexity >= synchrony_size) # add counts for this segment synchrony_metrics_dict = { f"sync_spike_{synchrony_size}": { - unit_id: synchrony_counts[synchrony_size][unit_id] / spike_counts[unit_id] for unit_id in unit_ids + unit_id: synchrony_counts[synchrony_size][all_unit_ids.index(unit_id)] / spike_counts[unit_id] + for unit_id in unit_ids } for synchrony_size in synchrony_sizes } From 41155a1835f348d9181501d823cd78fca5cf6191 Mon Sep 17 00:00:00 2001 From: Pierre Yger Date: Wed, 27 Sep 2023 13:36:15 +0200 Subject: [PATCH 096/115] Changing the internal representation of overlaps --- .../clustering/clustering_tools.py | 4 +- .../sortingcomponents/matching/circus.py | 78 +++++++++++++------ 2 files changed, 59 insertions(+), 23 deletions(-) diff --git a/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py b/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py index 5ff74db3e7..032694a47e 100644 --- a/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py +++ b/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py @@ -617,10 +617,12 @@ def remove_duplicates_via_matching( "overlaps": computed["overlaps"], "templates": computed["templates"], "norms": computed["norms"], - "sparsities": computed["sparsities"], "temporal": computed["temporal"], "spatial": computed["spatial"], "singular": computed["singular"], + "units_overlaps": computed["units_overlaps"], + "unit_overlaps_indices": computed["unit_overlaps_indices"], + "sparsity_mask": computed["sparsity_mask"], } ) valid = (spikes["sample_index"] >= half_marging) * (spikes["sample_index"] < duration + half_marging) diff --git a/src/spikeinterface/sortingcomponents/matching/circus.py b/src/spikeinterface/sortingcomponents/matching/circus.py index ec6ef3a292..ffc2a225e8 100644 --- a/src/spikeinterface/sortingcomponents/matching/circus.py +++ b/src/spikeinterface/sortingcomponents/matching/circus.py @@ -122,14 +122,20 @@ def _prepare_templates(cls, d): else: sparsity = waveform_extractor.sparsity.mask + d['sparsity_mask'] = sparsity + units_overlaps = np.sum( + np.logical_and(sparsity[:, np.newaxis, :], sparsity[np.newaxis, :, :]), axis=2 + ) + d['units_overlaps'] = units_overlaps > 0 + d['unit_overlaps_indices'] = {} + for i in range(num_templates): + d['unit_overlaps_indices'][i], = np.nonzero(d['units_overlaps'][i]) + templates = waveform_extractor.get_all_templates(mode="median").copy() # First, we set masked channels to 0 - d["sparsities"] = {} for count in range(num_templates): - template = templates[count][:, sparsity[count]] - (d["sparsities"][count],) = np.nonzero(sparsity[count]) - templates[count][:, ~sparsity[count]] = 0 + templates[count][:, ~d['sparsity_mask'][count]] = 0 # Then we keep only the strongest components rank = d["rank"] @@ -141,19 +147,45 @@ def _prepare_templates(cls, d): # We reconstruct the approximated templates templates = np.matmul(d["temporal"] * d["singular"][:, np.newaxis, :], d["spatial"]) - d["temporal"] = np.flip(temporal, axis=1) d["templates"] = {} d["norms"] = np.zeros(num_templates, dtype=np.float32) # And get the norms, saving compressed templates for CC matrix for count in range(num_templates): - template = templates[count][:, sparsity[count]] + template = templates[count][:, d['sparsity_mask'][count]] d["norms"][count] = np.linalg.norm(template) d["templates"][count] = template / d["norms"][count] d["temporal"] /= d["norms"][:, np.newaxis, np.newaxis] - d["spatial"] = np.moveaxis(d["spatial"][:, :rank, :], [0, 1, 2], [1, 0, 2]) - d["temporal"] = np.moveaxis(d["temporal"][:, :, :rank], [0, 1, 2], [1, 2, 0]) + d["temporal"] = np.flip(d["temporal"], axis=1) + + d['overlaps'] = [] + for i in range(num_templates): + num_overlaps = np.sum(d['units_overlaps'][i]) + overlapping_units = np.where(d['units_overlaps'][i])[0] + + # Reconstruct unit template from SVD Matrices + data = d['temporal'][i] * d['singular'][i][np.newaxis, :] + template_i = np.matmul(data, d['spatial'][i, :, :]) + template_i = np.flipud(template_i) + + unit_overlaps = np.zeros([num_overlaps, 2*d['num_samples'] - 1], dtype=np.float32) + + for count, j in enumerate(overlapping_units): + overlapped_channels = d['sparsity_mask'][j] + visible_i = template_i[:, overlapped_channels] + + spatial_filters = d['spatial'][j, :, overlapped_channels] + spatially_filtered_template = np.matmul(visible_i, spatial_filters) + visible_i = spatially_filtered_template * d['singular'][j] + + for rank in range(visible_i.shape[1]): + unit_overlaps[count, :] += np.convolve(visible_i[:, rank], d['temporal'][j][:, rank], mode='full') + + d['overlaps'].append(unit_overlaps) + + d["spatial"] = np.moveaxis(d["spatial"], [0, 1, 2], [1, 0, 2]) + d["temporal"] = np.moveaxis(d["temporal"], [0, 1, 2], [1, 2, 0]) d["singular"] = d["singular"].T[:, :, np.newaxis] return d @@ -181,14 +213,10 @@ def initialize_and_check_kwargs(cls, recording, kwargs): if "templates" not in d: d = cls._prepare_templates(d) else: - for key in ["norms", "sparsities", "temporal", "spatial", "singular"]: + for key in ["norms", "temporal", "spatial", "singular", "units_overlaps", "sparsity_mask", "unit_overlaps_indices"]: assert d[key] is not None, "If templates are provided, %d should also be there" % key d["num_templates"] = len(d["templates"]) - - if "overlaps" not in d: - d["overlaps"] = compute_overlaps(d["templates"], d["num_samples"], d["num_channels"], d["sparsities"]) - d["ignored_ids"] = np.array(d["ignored_ids"]) omp_min_sps = d["omp_min_sps"] @@ -252,7 +280,7 @@ def main_function(cls, traces, d): spikes = np.empty(scalar_products.size, dtype=spike_dtype) idx_lookup = np.arange(scalar_products.size).reshape(num_templates, -1) - M = np.zeros((100, 100), dtype=np.float32) + M = np.zeros((num_templates, num_templates), dtype=np.float32) all_selections = np.empty((2, scalar_products.size), dtype=np.int32) final_amplitudes = np.zeros(scalar_products.shape, dtype=np.float32) @@ -273,18 +301,24 @@ def main_function(cls, traces, d): if num_selection > 0: delta_t = selection[1] - peak_index - idx = np.where((delta_t < neighbor_window) & (delta_t > -num_samples))[0] + idx = np.where((delta_t < neighbor_window) & (delta_t >= -num_samples))[0] myline = num_samples + delta_t[idx] + myindices = selection[0, idx] - if not best_cluster_ind in cached_overlaps: - cached_overlaps[best_cluster_ind] = overlaps[best_cluster_ind].toarray() + local_overlaps = overlaps[best_cluster_ind] + overlapping_templates = d['unit_overlaps_indices'][best_cluster_ind] if num_selection == M.shape[0]: Z = np.zeros((2 * num_selection, 2 * num_selection), dtype=np.float32) Z[:num_selection, :num_selection] = M M = Z - M[num_selection, idx] = cached_overlaps[best_cluster_ind][selection[0, idx], myline] + mask = np.isin(myindices, overlapping_templates) + a, b = myindices[mask], myline[mask] + + table = np.zeros(num_templates, dtype=int) + table[overlapping_templates] = np.arange(len(overlapping_templates)) + M[num_selection, myindices[mask]] = local_overlaps[table[a], b] if vicinity == 0: scipy.linalg.solve_triangular( @@ -346,8 +380,8 @@ def main_function(cls, traces, d): tmp_best, tmp_peak = selection[:, i] diff_amp = diff_amplitudes[i] * norms[tmp_best] - if not tmp_best in cached_overlaps: - cached_overlaps[tmp_best] = overlaps[tmp_best].toarray() + local_overlaps = overlaps[tmp_best] + overlapping_templates = d['units_overlaps'][tmp_best] if not tmp_peak in neighbors.keys(): idx = [max(0, tmp_peak - num_samples), min(num_peaks, tmp_peak + neighbor_window)] @@ -357,8 +391,8 @@ def main_function(cls, traces, d): idx = neighbors[tmp_peak]["idx"] tdx = neighbors[tmp_peak]["tdx"] - to_add = diff_amp * cached_overlaps[tmp_best][:, tdx[0] : tdx[1]] - scalar_products[:, idx[0] : idx[1]] -= to_add + to_add = diff_amp * local_overlaps[:, tdx[0] : tdx[1]] + scalar_products[overlapping_templates, idx[0] : idx[1]] -= to_add is_valid = scalar_products > stop_criteria From 97aff7f6754e7c4d333b95629552fe37151bf24f Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Wed, 27 Sep 2023 11:36:51 +0000 Subject: [PATCH 097/115] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- .../sortingcomponents/matching/circus.py | 54 ++++++++++--------- 1 file changed, 30 insertions(+), 24 deletions(-) diff --git a/src/spikeinterface/sortingcomponents/matching/circus.py b/src/spikeinterface/sortingcomponents/matching/circus.py index e047cbdd31..5924d3bc18 100644 --- a/src/spikeinterface/sortingcomponents/matching/circus.py +++ b/src/spikeinterface/sortingcomponents/matching/circus.py @@ -123,20 +123,18 @@ def _prepare_templates(cls, d): else: sparsity = waveform_extractor.sparsity.mask - d['sparsity_mask'] = sparsity - units_overlaps = np.sum( - np.logical_and(sparsity[:, np.newaxis, :], sparsity[np.newaxis, :, :]), axis=2 - ) - d['units_overlaps'] = units_overlaps > 0 - d['unit_overlaps_indices'] = {} + d["sparsity_mask"] = sparsity + units_overlaps = np.sum(np.logical_and(sparsity[:, np.newaxis, :], sparsity[np.newaxis, :, :]), axis=2) + d["units_overlaps"] = units_overlaps > 0 + d["unit_overlaps_indices"] = {} for i in range(num_templates): - d['unit_overlaps_indices'][i], = np.nonzero(d['units_overlaps'][i]) + (d["unit_overlaps_indices"][i],) = np.nonzero(d["units_overlaps"][i]) templates = waveform_extractor.get_all_templates(mode="median").copy() # First, we set masked channels to 0 for count in range(num_templates): - templates[count][:, ~d['sparsity_mask'][count]] = 0 + templates[count][:, ~d["sparsity_mask"][count]] = 0 # Then we keep only the strongest components rank = d["rank"] @@ -153,37 +151,37 @@ def _prepare_templates(cls, d): # And get the norms, saving compressed templates for CC matrix for count in range(num_templates): - template = templates[count][:, d['sparsity_mask'][count]] + template = templates[count][:, d["sparsity_mask"][count]] d["norms"][count] = np.linalg.norm(template) d["templates"][count] = template / d["norms"][count] d["temporal"] /= d["norms"][:, np.newaxis, np.newaxis] d["temporal"] = np.flip(d["temporal"], axis=1) - d['overlaps'] = [] + d["overlaps"] = [] for i in range(num_templates): - num_overlaps = np.sum(d['units_overlaps'][i]) - overlapping_units = np.where(d['units_overlaps'][i])[0] + num_overlaps = np.sum(d["units_overlaps"][i]) + overlapping_units = np.where(d["units_overlaps"][i])[0] # Reconstruct unit template from SVD Matrices - data = d['temporal'][i] * d['singular'][i][np.newaxis, :] - template_i = np.matmul(data, d['spatial'][i, :, :]) + data = d["temporal"][i] * d["singular"][i][np.newaxis, :] + template_i = np.matmul(data, d["spatial"][i, :, :]) template_i = np.flipud(template_i) - unit_overlaps = np.zeros([num_overlaps, 2*d['num_samples'] - 1], dtype=np.float32) + unit_overlaps = np.zeros([num_overlaps, 2 * d["num_samples"] - 1], dtype=np.float32) for count, j in enumerate(overlapping_units): - overlapped_channels = d['sparsity_mask'][j] + overlapped_channels = d["sparsity_mask"][j] visible_i = template_i[:, overlapped_channels] - spatial_filters = d['spatial'][j, :, overlapped_channels] + spatial_filters = d["spatial"][j, :, overlapped_channels] spatially_filtered_template = np.matmul(visible_i, spatial_filters) - visible_i = spatially_filtered_template * d['singular'][j] - + visible_i = spatially_filtered_template * d["singular"][j] + for rank in range(visible_i.shape[1]): - unit_overlaps[count, :] += np.convolve(visible_i[:, rank], d['temporal'][j][:, rank], mode='full') + unit_overlaps[count, :] += np.convolve(visible_i[:, rank], d["temporal"][j][:, rank], mode="full") - d['overlaps'].append(unit_overlaps) + d["overlaps"].append(unit_overlaps) d["spatial"] = np.moveaxis(d["spatial"], [0, 1, 2], [1, 0, 2]) d["temporal"] = np.moveaxis(d["temporal"], [0, 1, 2], [1, 2, 0]) @@ -214,7 +212,15 @@ def initialize_and_check_kwargs(cls, recording, kwargs): if "templates" not in d: d = cls._prepare_templates(d) else: - for key in ["norms", "temporal", "spatial", "singular", "units_overlaps", "sparsity_mask", "unit_overlaps_indices"]: + for key in [ + "norms", + "temporal", + "spatial", + "singular", + "units_overlaps", + "sparsity_mask", + "unit_overlaps_indices", + ]: assert d[key] is not None, "If templates are provided, %d should also be there" % key d["num_templates"] = len(d["templates"]) @@ -307,7 +313,7 @@ def main_function(cls, traces, d): myindices = selection[0, idx] local_overlaps = overlaps[best_cluster_ind] - overlapping_templates = d['unit_overlaps_indices'][best_cluster_ind] + overlapping_templates = d["unit_overlaps_indices"][best_cluster_ind] if num_selection == M.shape[0]: Z = np.zeros((2 * num_selection, 2 * num_selection), dtype=np.float32) @@ -382,7 +388,7 @@ def main_function(cls, traces, d): diff_amp = diff_amplitudes[i] * norms[tmp_best] local_overlaps = overlaps[tmp_best] - overlapping_templates = d['units_overlaps'][tmp_best] + overlapping_templates = d["units_overlaps"][tmp_best] if not tmp_peak in neighbors.keys(): idx = [max(0, tmp_peak - num_samples), min(num_peaks, tmp_peak + neighbor_window)] From 8da6b79daa95bc4148123e76742607fb82b23fb3 Mon Sep 17 00:00:00 2001 From: Pierre Yger Date: Wed, 27 Sep 2023 13:59:41 +0200 Subject: [PATCH 098/115] Keeping the two matching engines for more tests before merging and final decision --- .../clustering/clustering_tools.py | 39 +- .../sortingcomponents/matching/circus.py | 410 +++++++++++++++++- .../sortingcomponents/matching/method_list.py | 5 +- 3 files changed, 436 insertions(+), 18 deletions(-) diff --git a/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py b/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py index 032694a47e..455af3ddfd 100644 --- a/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py +++ b/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py @@ -539,6 +539,7 @@ def remove_duplicates_via_matching( method_kwargs={}, job_kwargs={}, tmp_folder=None, + method='circus-omp' ): from spikeinterface.sortingcomponents.matching import find_spikes_from_templates from spikeinterface import get_noise_levels @@ -610,21 +611,31 @@ def remove_duplicates_via_matching( method_kwargs.update({"ignored_ids": ignore_ids + [i]}) spikes, computed = find_spikes_from_templates( - sub_recording, method="circus-omp", method_kwargs=method_kwargs, extra_outputs=True, **job_kwargs - ) - method_kwargs.update( - { - "overlaps": computed["overlaps"], - "templates": computed["templates"], - "norms": computed["norms"], - "temporal": computed["temporal"], - "spatial": computed["spatial"], - "singular": computed["singular"], - "units_overlaps": computed["units_overlaps"], - "unit_overlaps_indices": computed["unit_overlaps_indices"], - "sparsity_mask": computed["sparsity_mask"], - } + sub_recording, method=method, method_kwargs=method_kwargs, extra_outputs=True, **job_kwargs ) + if method == 'circus-omp-vsd': + method_kwargs.update( + { + "overlaps": computed["overlaps"], + "templates": computed["templates"], + "norms": computed["norms"], + "temporal": computed["temporal"], + "spatial": computed["spatial"], + "singular": computed["singular"], + "units_overlaps": computed["units_overlaps"], + "unit_overlaps_indices": computed["unit_overlaps_indices"], + "sparsity_mask": computed["sparsity_mask"], + } + ) + elif method == 'circus-omp': + method_kwargs.update( + { + "overlaps": computed["overlaps"], + "templates": computed["templates"], + "norms": computed["norms"], + "sparsities": computed["sparsities"] + } + ) valid = (spikes["sample_index"] >= half_marging) * (spikes["sample_index"] < duration + half_marging) if np.sum(valid) > 0: if np.sum(valid) == 1: diff --git a/src/spikeinterface/sortingcomponents/matching/circus.py b/src/spikeinterface/sortingcomponents/matching/circus.py index e047cbdd31..08be0985f1 100644 --- a/src/spikeinterface/sortingcomponents/matching/circus.py +++ b/src/spikeinterface/sortingcomponents/matching/circus.py @@ -33,8 +33,100 @@ from .main import BaseTemplateMatchingEngine -################# -# Circus peeler # + +from scipy.fft._helper import _init_nd_shape_and_axes + +try: + from scipy.signal.signaltools import _init_freq_conv_axes, _apply_conv_mode +except Exception: + from scipy.signal._signaltools import _init_freq_conv_axes, _apply_conv_mode +from scipy import linalg, fft as sp_fft + + +def get_scipy_shape(in1, in2, mode="full", axes=None, calc_fast_len=True): + in1 = np.asarray(in1) + in2 = np.asarray(in2) + + if in1.ndim == in2.ndim == 0: # scalar inputs + return in1 * in2 + elif in1.ndim != in2.ndim: + raise ValueError("in1 and in2 should have the same dimensionality") + elif in1.size == 0 or in2.size == 0: # empty arrays + return np.array([]) + + in1, in2, axes = _init_freq_conv_axes(in1, in2, mode, axes, sorted_axes=False) + + s1 = in1.shape + s2 = in2.shape + + shape = [max((s1[i], s2[i])) if i not in axes else s1[i] + s2[i] - 1 for i in range(in1.ndim)] + + if not len(axes): + return in1 * in2 + + complex_result = in1.dtype.kind == "c" or in2.dtype.kind == "c" + + if calc_fast_len: + # Speed up FFT by padding to optimal size. + fshape = [sp_fft.next_fast_len(shape[a], not complex_result) for a in axes] + else: + fshape = shape + + return fshape, axes + + +def fftconvolve_with_cache(in1, in2, cache, mode="full", axes=None): + in1 = np.asarray(in1) + in2 = np.asarray(in2) + + if in1.ndim == in2.ndim == 0: # scalar inputs + return in1 * in2 + elif in1.ndim != in2.ndim: + raise ValueError("in1 and in2 should have the same dimensionality") + elif in1.size == 0 or in2.size == 0: # empty arrays + return np.array([]) + + in1, in2, axes = _init_freq_conv_axes(in1, in2, mode, axes, sorted_axes=False) + + s1 = in1.shape + s2 = in2.shape + + shape = [max((s1[i], s2[i])) if i not in axes else s1[i] + s2[i] - 1 for i in range(in1.ndim)] + + ret = _freq_domain_conv(in1, in2, axes, shape, cache, calc_fast_len=True) + + return _apply_conv_mode(ret, s1, s2, mode, axes) + + +def _freq_domain_conv(in1, in2, axes, shape, cache, calc_fast_len=True): + if not len(axes): + return in1 * in2 + + complex_result = in1.dtype.kind == "c" or in2.dtype.kind == "c" + + if calc_fast_len: + # Speed up FFT by padding to optimal size. + fshape = [sp_fft.next_fast_len(shape[a], not complex_result) for a in axes] + else: + fshape = shape + + if not complex_result: + fft, ifft = sp_fft.rfftn, sp_fft.irfftn + else: + fft, ifft = sp_fft.fftn, sp_fft.ifftn + + sp1 = cache["full"][cache["mask"]] + sp2 = cache["template"] + + # sp2 = fft(in2[cache['mask']], fshape, axes=axes) + ret = ifft(sp1 * sp2, fshape, axes=axes) + + if calc_fast_len: + fslice = tuple([slice(sz) for sz in shape]) + ret = ret[fslice] + + return ret + def compute_overlaps(templates, num_samples, num_channels, sparsities): @@ -101,6 +193,320 @@ class CircusOMPPeeler(BaseTemplateMatchingEngine): ----- """ + _default_params = { + "amplitudes": [0.6, 2], + "omp_min_sps": 0.1, + "waveform_extractor": None, + "templates": None, + "overlaps": None, + "norms": None, + "random_chunk_kwargs": {}, + "noise_levels": None, + "sparse_kwargs": {"method": "ptp", "threshold": 1}, + "ignored_ids": [], + "vicinity": 0, + } + + @classmethod + def _prepare_templates(cls, d): + waveform_extractor = d["waveform_extractor"] + num_templates = len(d["waveform_extractor"].sorting.unit_ids) + + if not waveform_extractor.is_sparse(): + sparsity = compute_sparsity(waveform_extractor, **d["sparse_kwargs"]).mask + else: + sparsity = waveform_extractor.sparsity.mask + + templates = waveform_extractor.get_all_templates(mode="median").copy() + + d["sparsities"] = {} + d["templates"] = {} + d["norms"] = np.zeros(num_templates, dtype=np.float32) + + for count, unit_id in enumerate(waveform_extractor.sorting.unit_ids): + template = templates[count][:, sparsity[count]] + (d["sparsities"][count],) = np.nonzero(sparsity[count]) + d["norms"][count] = np.linalg.norm(template) + d["templates"][count] = template / d["norms"][count] + + return d + + @classmethod + def initialize_and_check_kwargs(cls, recording, kwargs): + d = cls._default_params.copy() + d.update(kwargs) + + # assert isinstance(d['waveform_extractor'], WaveformExtractor) + + for v in ["omp_min_sps"]: + assert (d[v] >= 0) and (d[v] <= 1), f"{v} should be in [0, 1]" + + d["num_channels"] = d["waveform_extractor"].recording.get_num_channels() + d["num_samples"] = d["waveform_extractor"].nsamples + d["nbefore"] = d["waveform_extractor"].nbefore + d["nafter"] = d["waveform_extractor"].nafter + d["sampling_frequency"] = d["waveform_extractor"].recording.get_sampling_frequency() + d["vicinity"] *= d["num_samples"] + + if d["noise_levels"] is None: + print("CircusOMPPeeler : noise should be computed outside") + d["noise_levels"] = get_noise_levels(recording, **d["random_chunk_kwargs"], return_scaled=False) + + if d["templates"] is None: + d = cls._prepare_templates(d) + else: + for key in ["norms", "sparsities"]: + assert d[key] is not None, "If templates are provided, %d should also be there" % key + + d["num_templates"] = len(d["templates"]) + + if d["overlaps"] is None: + d["overlaps"] = compute_overlaps(d["templates"], d["num_samples"], d["num_channels"], d["sparsities"]) + + d["ignored_ids"] = np.array(d["ignored_ids"]) + + omp_min_sps = d["omp_min_sps"] + # nb_active_channels = np.array([len(d['sparsities'][count]) for count in range(d['num_templates'])]) + d["stop_criteria"] = omp_min_sps * np.sqrt(d["noise_levels"].sum() * d["num_samples"]) + + return d + + @classmethod + def serialize_method_kwargs(cls, kwargs): + kwargs = dict(kwargs) + # remove waveform_extractor + kwargs.pop("waveform_extractor") + return kwargs + + @classmethod + def unserialize_in_worker(cls, kwargs): + return kwargs + + @classmethod + def get_margin(cls, recording, kwargs): + margin = 2 * max(kwargs["nbefore"], kwargs["nafter"]) + return margin + + @classmethod + def main_function(cls, traces, d): + templates = d["templates"] + num_templates = d["num_templates"] + num_channels = d["num_channels"] + num_samples = d["num_samples"] + overlaps = d["overlaps"] + norms = d["norms"] + nbefore = d["nbefore"] + nafter = d["nafter"] + omp_tol = np.finfo(np.float32).eps + num_samples = d["nafter"] + d["nbefore"] + neighbor_window = num_samples - 1 + min_amplitude, max_amplitude = d["amplitudes"] + sparsities = d["sparsities"] + ignored_ids = d["ignored_ids"] + stop_criteria = d["stop_criteria"] + vicinity = d["vicinity"] + + if "cached_fft_kernels" not in d: + d["cached_fft_kernels"] = {"fshape": 0} + + cached_fft_kernels = d["cached_fft_kernels"] + + num_timesteps = len(traces) + + num_peaks = num_timesteps - num_samples + 1 + + traces = traces.T + + dummy_filter = np.empty((num_channels, num_samples), dtype=np.float32) + dummy_traces = np.empty((num_channels, num_timesteps), dtype=np.float32) + + fshape, axes = get_scipy_shape(dummy_filter, traces, axes=1) + fft_cache = {"full": sp_fft.rfftn(traces, fshape, axes=axes)} + + scalar_products = np.empty((num_templates, num_peaks), dtype=np.float32) + + flagged_chunk = cached_fft_kernels["fshape"] != fshape[0] + + for i in range(num_templates): + if i not in ignored_ids: + if i not in cached_fft_kernels or flagged_chunk: + kernel_filter = np.ascontiguousarray(templates[i][::-1].T) + cached_fft_kernels.update({i: sp_fft.rfftn(kernel_filter, fshape, axes=axes)}) + cached_fft_kernels["fshape"] = fshape[0] + + fft_cache.update({"mask": sparsities[i], "template": cached_fft_kernels[i]}) + + convolution = fftconvolve_with_cache(dummy_filter, dummy_traces, fft_cache, axes=1, mode="valid") + if len(convolution) > 0: + scalar_products[i] = convolution.sum(0) + else: + scalar_products[i] = 0 + + if len(ignored_ids) > 0: + scalar_products[ignored_ids] = -np.inf + + num_spikes = 0 + + spikes = np.empty(scalar_products.size, dtype=spike_dtype) + idx_lookup = np.arange(scalar_products.size).reshape(num_templates, -1) + + M = np.zeros((100, 100), dtype=np.float32) + + all_selections = np.empty((2, scalar_products.size), dtype=np.int32) + final_amplitudes = np.zeros(scalar_products.shape, dtype=np.float32) + num_selection = 0 + + full_sps = scalar_products.copy() + + neighbors = {} + cached_overlaps = {} + + is_valid = scalar_products > stop_criteria + all_amplitudes = np.zeros(0, dtype=np.float32) + is_in_vicinity = np.zeros(0, dtype=np.int32) + + while np.any(is_valid): + best_amplitude_ind = scalar_products[is_valid].argmax() + best_cluster_ind, peak_index = np.unravel_index(idx_lookup[is_valid][best_amplitude_ind], idx_lookup.shape) + + if num_selection > 0: + delta_t = selection[1] - peak_index + idx = np.where((delta_t < neighbor_window) & (delta_t > -num_samples))[0] + myline = num_samples + delta_t[idx] + + if not best_cluster_ind in cached_overlaps: + cached_overlaps[best_cluster_ind] = overlaps[best_cluster_ind].toarray() + + if num_selection == M.shape[0]: + Z = np.zeros((2 * num_selection, 2 * num_selection), dtype=np.float32) + Z[:num_selection, :num_selection] = M + M = Z + + M[num_selection, idx] = cached_overlaps[best_cluster_ind][selection[0, idx], myline] + + if vicinity == 0: + scipy.linalg.solve_triangular( + M[:num_selection, :num_selection], + M[num_selection, :num_selection], + trans=0, + lower=1, + overwrite_b=True, + check_finite=False, + ) + + v = nrm2(M[num_selection, :num_selection]) ** 2 + Lkk = 1 - v + if Lkk <= omp_tol: # selected atoms are dependent + break + M[num_selection, num_selection] = np.sqrt(Lkk) + else: + is_in_vicinity = np.where(np.abs(delta_t) < vicinity)[0] + + if len(is_in_vicinity) > 0: + L = M[is_in_vicinity, :][:, is_in_vicinity] + + M[num_selection, is_in_vicinity] = scipy.linalg.solve_triangular( + L, M[num_selection, is_in_vicinity], trans=0, lower=1, overwrite_b=True, check_finite=False + ) + + v = nrm2(M[num_selection, is_in_vicinity]) ** 2 + Lkk = 1 - v + if Lkk <= omp_tol: # selected atoms are dependent + break + M[num_selection, num_selection] = np.sqrt(Lkk) + else: + M[num_selection, num_selection] = 1.0 + else: + M[0, 0] = 1 + + all_selections[:, num_selection] = [best_cluster_ind, peak_index] + num_selection += 1 + + selection = all_selections[:, :num_selection] + res_sps = full_sps[selection[0], selection[1]] + + if True: # vicinity == 0: + all_amplitudes, _ = potrs(M[:num_selection, :num_selection], res_sps, lower=True, overwrite_b=False) + all_amplitudes /= norms[selection[0]] + else: + # This is not working, need to figure out why + is_in_vicinity = np.append(is_in_vicinity, num_selection - 1) + all_amplitudes = np.append(all_amplitudes, np.float32(1)) + L = M[is_in_vicinity, :][:, is_in_vicinity] + all_amplitudes[is_in_vicinity], _ = potrs(L, res_sps[is_in_vicinity], lower=True, overwrite_b=False) + all_amplitudes[is_in_vicinity] /= norms[selection[0][is_in_vicinity]] + + diff_amplitudes = all_amplitudes - final_amplitudes[selection[0], selection[1]] + modified = np.where(np.abs(diff_amplitudes) > omp_tol)[0] + final_amplitudes[selection[0], selection[1]] = all_amplitudes + + for i in modified: + tmp_best, tmp_peak = selection[:, i] + diff_amp = diff_amplitudes[i] * norms[tmp_best] + + if not tmp_best in cached_overlaps: + cached_overlaps[tmp_best] = overlaps[tmp_best].toarray() + + if not tmp_peak in neighbors.keys(): + idx = [max(0, tmp_peak - num_samples), min(num_peaks, tmp_peak + neighbor_window)] + tdx = [num_samples + idx[0] - tmp_peak, num_samples + idx[1] - tmp_peak] + neighbors[tmp_peak] = {"idx": idx, "tdx": tdx} + + idx = neighbors[tmp_peak]["idx"] + tdx = neighbors[tmp_peak]["tdx"] + + to_add = diff_amp * cached_overlaps[tmp_best][:, tdx[0] : tdx[1]] + scalar_products[:, idx[0] : idx[1]] -= to_add + + is_valid = scalar_products > stop_criteria + + is_valid = (final_amplitudes > min_amplitude) * (final_amplitudes < max_amplitude) + valid_indices = np.where(is_valid) + + num_spikes = len(valid_indices[0]) + spikes["sample_index"][:num_spikes] = valid_indices[1] + d["nbefore"] + spikes["channel_index"][:num_spikes] = 0 + spikes["cluster_index"][:num_spikes] = valid_indices[0] + spikes["amplitude"][:num_spikes] = final_amplitudes[valid_indices[0], valid_indices[1]] + + spikes = spikes[:num_spikes] + order = np.argsort(spikes["sample_index"]) + spikes = spikes[order] + + return spikes + +class CircusOMPSVDPeeler(BaseTemplateMatchingEngine): + """ + Orthogonal Matching Pursuit inspired from Spyking Circus sorter + + https://elifesciences.org/articles/34518 + + This is an Orthogonal Template Matching algorithm. For speed and + memory optimization, templates are automatically sparsified. Signal + is convolved with the templates, and as long as some scalar products + are higher than a given threshold, we use a Cholesky decomposition + to compute the optimal amplitudes needed to reconstruct the signal. + + IMPORTANT NOTE: small chunks are more efficient for such Peeler, + consider using 100ms chunk + + Parameters + ---------- + amplitude: tuple + (Minimal, Maximal) amplitudes allowed for every template + omp_min_sps: float + Stopping criteria of the OMP algorithm, in percentage of the norm + noise_levels: array + The noise levels, for every channels. If None, they will be automatically + computed + random_chunk_kwargs: dict + Parameters for computing noise levels, if not provided (sub optimal) + sparse_kwargs: dict + Parameters to extract a sparsity mask from the waveform_extractor, if not + already sparse. + ----- + """ + _default_params = { "amplitudes": [0.6, 2], "omp_min_sps": 0.1, diff --git a/src/spikeinterface/sortingcomponents/matching/method_list.py b/src/spikeinterface/sortingcomponents/matching/method_list.py index bedc04a9d5..99c2817338 100644 --- a/src/spikeinterface/sortingcomponents/matching/method_list.py +++ b/src/spikeinterface/sortingcomponents/matching/method_list.py @@ -1,6 +1,6 @@ from .naive import NaiveMatching from .tdc import TridesclousPeeler -from .circus import CircusPeeler, CircusOMPPeeler +from .circus import CircusPeeler, CircusOMPPeeler, CircusOMPSVDPeeler from .wobble import WobbleMatch matching_methods = { @@ -8,5 +8,6 @@ "tridesclous": TridesclousPeeler, "circus": CircusPeeler, "circus-omp": CircusOMPPeeler, + 'circus-omp-svd' : CircusOMPSVDPeeler, "wobble": WobbleMatch, -} +} \ No newline at end of file From a6b4774000159f8db5439072acc8bdec4757d26b Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Wed, 27 Sep 2023 12:00:19 +0000 Subject: [PATCH 099/115] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- .../clustering/clustering_tools.py | 14 ++++---------- .../sortingcomponents/matching/circus.py | 2 +- .../sortingcomponents/matching/method_list.py | 4 ++-- 3 files changed, 7 insertions(+), 13 deletions(-) diff --git a/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py b/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py index 455af3ddfd..17c38e2f8a 100644 --- a/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py +++ b/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py @@ -533,13 +533,7 @@ def remove_duplicates( def remove_duplicates_via_matching( - waveform_extractor, - noise_levels, - peak_labels, - method_kwargs={}, - job_kwargs={}, - tmp_folder=None, - method='circus-omp' + waveform_extractor, noise_levels, peak_labels, method_kwargs={}, job_kwargs={}, tmp_folder=None, method="circus-omp" ): from spikeinterface.sortingcomponents.matching import find_spikes_from_templates from spikeinterface import get_noise_levels @@ -613,7 +607,7 @@ def remove_duplicates_via_matching( spikes, computed = find_spikes_from_templates( sub_recording, method=method, method_kwargs=method_kwargs, extra_outputs=True, **job_kwargs ) - if method == 'circus-omp-vsd': + if method == "circus-omp-vsd": method_kwargs.update( { "overlaps": computed["overlaps"], @@ -627,13 +621,13 @@ def remove_duplicates_via_matching( "sparsity_mask": computed["sparsity_mask"], } ) - elif method == 'circus-omp': + elif method == "circus-omp": method_kwargs.update( { "overlaps": computed["overlaps"], "templates": computed["templates"], "norms": computed["norms"], - "sparsities": computed["sparsities"] + "sparsities": computed["sparsities"], } ) valid = (spikes["sample_index"] >= half_marging) * (spikes["sample_index"] < duration + half_marging) diff --git a/src/spikeinterface/sortingcomponents/matching/circus.py b/src/spikeinterface/sortingcomponents/matching/circus.py index e7bdcd161c..502c887ac4 100644 --- a/src/spikeinterface/sortingcomponents/matching/circus.py +++ b/src/spikeinterface/sortingcomponents/matching/circus.py @@ -128,7 +128,6 @@ def _freq_domain_conv(in1, in2, axes, shape, cache, calc_fast_len=True): return ret - def compute_overlaps(templates, num_samples, num_channels, sparsities): num_templates = len(templates) @@ -475,6 +474,7 @@ def main_function(cls, traces, d): return spikes + class CircusOMPSVDPeeler(BaseTemplateMatchingEngine): """ Orthogonal Matching Pursuit inspired from Spyking Circus sorter diff --git a/src/spikeinterface/sortingcomponents/matching/method_list.py b/src/spikeinterface/sortingcomponents/matching/method_list.py index 99c2817338..d982943126 100644 --- a/src/spikeinterface/sortingcomponents/matching/method_list.py +++ b/src/spikeinterface/sortingcomponents/matching/method_list.py @@ -8,6 +8,6 @@ "tridesclous": TridesclousPeeler, "circus": CircusPeeler, "circus-omp": CircusOMPPeeler, - 'circus-omp-svd' : CircusOMPSVDPeeler, + "circus-omp-svd": CircusOMPSVDPeeler, "wobble": WobbleMatch, -} \ No newline at end of file +} From 257c74c856254f8ed31365f0629b53baf844fb74 Mon Sep 17 00:00:00 2001 From: Pierre Yger Date: Wed, 27 Sep 2023 15:52:38 +0200 Subject: [PATCH 100/115] Slight misalignement --- .../sortingcomponents/matching/circus.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/spikeinterface/sortingcomponents/matching/circus.py b/src/spikeinterface/sortingcomponents/matching/circus.py index e7bdcd161c..04d780bb6b 100644 --- a/src/spikeinterface/sortingcomponents/matching/circus.py +++ b/src/spikeinterface/sortingcomponents/matching/circus.py @@ -714,8 +714,8 @@ def main_function(cls, traces, d): if num_selection > 0: delta_t = selection[1] - peak_index - idx = np.where((delta_t < neighbor_window) & (delta_t >= -num_samples))[0] - myline = num_samples + delta_t[idx] + idx = np.where((delta_t < num_samples) & (delta_t > -num_samples))[0] + myline = neighbor_window + delta_t[idx] myindices = selection[0, idx] local_overlaps = overlaps[best_cluster_ind] @@ -731,7 +731,7 @@ def main_function(cls, traces, d): table = np.zeros(num_templates, dtype=int) table[overlapping_templates] = np.arange(len(overlapping_templates)) - M[num_selection, myindices[mask]] = local_overlaps[table[a], b] + M[num_selection, idx[mask]] = local_overlaps[table[a], b] if vicinity == 0: scipy.linalg.solve_triangular( @@ -797,8 +797,8 @@ def main_function(cls, traces, d): overlapping_templates = d["units_overlaps"][tmp_best] if not tmp_peak in neighbors.keys(): - idx = [max(0, tmp_peak - num_samples), min(num_peaks, tmp_peak + neighbor_window)] - tdx = [num_samples + idx[0] - tmp_peak, num_samples + idx[1] - tmp_peak] + idx = [max(0, tmp_peak - neighbor_window), min(num_peaks, tmp_peak + num_samples)] + tdx = [neighbor_window + idx[0] - tmp_peak, num_samples + idx[1] - tmp_peak - 1] neighbors[tmp_peak] = {"idx": idx, "tdx": tdx} idx = neighbors[tmp_peak]["idx"] From 6c561f214b02716e8da41a7ac198a94081f056a4 Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Wed, 27 Sep 2023 15:54:14 +0200 Subject: [PATCH 101/115] more fix after merge with main and the new pickle to file mechanism --- .../comparison/groundtruthstudy.py | 21 +++++++++++-------- src/spikeinterface/sorters/basesorter.py | 10 ++++++--- src/spikeinterface/sorters/launcher.py | 8 ++++++- 3 files changed, 26 insertions(+), 13 deletions(-) diff --git a/src/spikeinterface/comparison/groundtruthstudy.py b/src/spikeinterface/comparison/groundtruthstudy.py index fcebb356a0..eb430f69bd 100644 --- a/src/spikeinterface/comparison/groundtruthstudy.py +++ b/src/spikeinterface/comparison/groundtruthstudy.py @@ -194,10 +194,12 @@ def run_sorters(self, case_keys=None, engine='loop', engine_kwargs={}, keep=True sorter_name = params.pop("sorter_name") job = dict(sorter_name=sorter_name, recording=recording, - output_folder=sorter_folder) + output_folder=sorter_folder, + ) job.update(params) # the verbose is overwritten and global to all run_sorters job["verbose"] = verbose + job["with_output"] = False job_list.append(job) run_sorter_jobs(job_list, engine=engine, engine_kwargs=engine_kwargs, return_output=False) @@ -217,7 +219,8 @@ def copy_sortings(self, case_keys=None, force=True): if (sorter_folder / "spikeinterface_log.json").exists(): - sorting = read_sorter_folder(sorter_folder, raise_error=False) + sorting = read_sorter_folder(sorter_folder, raise_error=False, + register_recording=False, sorting_info=False) else: sorting = None @@ -383,13 +386,12 @@ def get_count_units( index = pd.MultiIndex.from_tuples(case_keys, names=self.levels) - columns = ["num_gt", "num_sorter", "num_well_detected", "num_redundant", "num_overmerged"] + columns = ["num_gt", "num_sorter", "num_well_detected"] comp = self.comparisons[case_keys[0]] if comp.exhaustive_gt: - columns.extend(["num_false_positive", "num_bad"]) + columns.extend(["num_false_positive", "num_redundant", "num_overmerged", "num_bad"]) count_units = pd.DataFrame(index=index, columns=columns, dtype=int) - for key in case_keys: comp = self.comparisons.get(key, None) assert comp is not None, "You need to do study.run_comparisons() first" @@ -402,11 +404,12 @@ def get_count_units( count_units.loc[key, "num_well_detected"] = comp.count_well_detected_units( well_detected_score ) - count_units.loc[key, "num_overmerged"] = comp.count_overmerged_units( - overmerged_score - ) - count_units.loc[key, "num_redundant"] = comp.count_redundant_units(redundant_score) + if comp.exhaustive_gt: + count_units.loc[key, "num_redundant"] = comp.count_redundant_units(redundant_score) + count_units.loc[key, "num_overmerged"] = comp.count_overmerged_units( + overmerged_score + ) count_units.loc[key, "num_false_positive"] = comp.count_false_positive_units( redundant_score ) diff --git a/src/spikeinterface/sorters/basesorter.py b/src/spikeinterface/sorters/basesorter.py index 8d87558191..a956f8c811 100644 --- a/src/spikeinterface/sorters/basesorter.py +++ b/src/spikeinterface/sorters/basesorter.py @@ -202,7 +202,7 @@ def load_recording_from_folder(cls, output_folder, with_warnings=False): recording = None else: recording = load_extractor(json_file, base_folder=output_folder) - elif pickle_file.exits(): + elif pickle_file.exists(): recording = load_extractor(pickle_file) return recording @@ -324,8 +324,12 @@ def get_result_from_folder(cls, output_folder, register_recording=True, sorting_ if sorting_info: # set sorting info to Sorting object - with open(output_folder / "spikeinterface_recording.json", "r") as f: - rec_dict = json.load(f) + if (output_folder / "spikeinterface_recording.json").exists(): + with open(output_folder / "spikeinterface_recording.json", "r") as f: + rec_dict = json.load(f) + else: + rec_dict = None + with open(output_folder / "spikeinterface_params.json", "r") as f: params_dict = json.load(f) with open(output_folder / "spikeinterface_log.json", "r") as f: diff --git a/src/spikeinterface/sorters/launcher.py b/src/spikeinterface/sorters/launcher.py index f32a468a22..12c59cbe45 100644 --- a/src/spikeinterface/sorters/launcher.py +++ b/src/spikeinterface/sorters/launcher.py @@ -66,7 +66,8 @@ def run_sorter_jobs(job_list, engine="loop", engine_kwargs={}, return_output=Fal engine_kwargs: dict return_output: bool, dfault False - Return a sorting or None. + Return a sortings or None. + This also overwrite kwargs in in run_sorter(with_sorting=True/False) Returns ------- @@ -88,8 +89,13 @@ def run_sorter_jobs(job_list, engine="loop", engine_kwargs={}, return_output=Fal "processpoolexecutor", ), "Only 'loop', 'joblib', and 'processpoolexecutor' support return_output=True." out = [] + for kwargs in job_list: + kwargs['with_output'] = True else: out = None + for kwargs in job_list: + kwargs['with_output'] = False + if engine == "loop": # simple loop in main process From cb9a2289cf1aab818307265aefa1abfcf2a0329c Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Wed, 27 Sep 2023 13:55:09 +0000 Subject: [PATCH 102/115] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- doc/modules/comparison.rst | 2 +- src/spikeinterface/comparison/collision.py | 17 +--- src/spikeinterface/comparison/correlogram.py | 6 +- .../comparison/groundtruthstudy.py | 92 ++++++++----------- .../comparison/tests/test_groundtruthstudy.py | 28 ++---- src/spikeinterface/sorters/launcher.py | 5 +- src/spikeinterface/widgets/gtstudy.py | 31 +++---- src/spikeinterface/widgets/widget_list.py | 2 +- 8 files changed, 74 insertions(+), 109 deletions(-) diff --git a/doc/modules/comparison.rst b/doc/modules/comparison.rst index 57e9a0b5ba..76ab7855c6 100644 --- a/doc/modules/comparison.rst +++ b/doc/modules/comparison.rst @@ -314,7 +314,7 @@ The all mechanism is based on an intrinsic organization into a "study_folder" wi study = GroundTruthStudy.create(study_folder, datasets=datasets, cases=cases, levels=["sorter_name", "dataset"]) - + # all cases in one function study.run_sorters() diff --git a/src/spikeinterface/comparison/collision.py b/src/spikeinterface/comparison/collision.py index 01626b34b8..dd04b2c72d 100644 --- a/src/spikeinterface/comparison/collision.py +++ b/src/spikeinterface/comparison/collision.py @@ -5,10 +5,6 @@ import numpy as np - - - - class CollisionGTComparison(GroundTruthComparison): """ This class is an extension of GroundTruthComparison by focusing to benchmark spike in collision. @@ -164,7 +160,6 @@ def compute_collision_by_similarity(self, similarity_matrix, unit_ids=None, good return similarities, recall_scores, pair_names - class CollisionGTStudy(GroundTruthStudy): def run_comparisons(self, case_keys=None, exhaustive_gt=True, collision_lag=2.0, nbins=11, **kwargs): _kwargs = dict() @@ -179,11 +174,12 @@ def run_comparisons(self, case_keys=None, exhaustive_gt=True, collision_lag=2.0, def get_lags(self, key): comp = self.comparisons[key] fs = comp.sorting1.get_sampling_frequency() - lags = comp.bins / fs * 1000. + lags = comp.bins / fs * 1000.0 return lags def precompute_scores_by_similarities(self, case_keys=None, good_only=False, min_accuracy=0.9): import sklearn + if case_keys is None: case_keys = self.cases.keys() @@ -197,16 +193,13 @@ def precompute_scores_by_similarities(self, case_keys=None, good_only=False, min similarity = sklearn.metrics.pairwise.cosine_similarity(flat_templates) comp = self.comparisons[key] similarities, recall_scores, pair_names = comp.compute_collision_by_similarity( - similarity, good_only=good_only, min_accuracy=min_accuracy - ) + similarity, good_only=good_only, min_accuracy=min_accuracy + ) self.all_similarities[key] = similarities self.all_recall_scores[key] = recall_scores - def get_mean_over_similarity_range(self, similarity_range, key): - idx = (self.all_similarities[key] >= similarity_range[0]) & ( - self.all_similarities[key] <= similarity_range[1] - ) + idx = (self.all_similarities[key] >= similarity_range[0]) & (self.all_similarities[key] <= similarity_range[1]) all_similarities = self.all_similarities[key][idx] all_recall_scores = self.all_recall_scores[key][idx] diff --git a/src/spikeinterface/comparison/correlogram.py b/src/spikeinterface/comparison/correlogram.py index 150f5afe55..aaffef1887 100644 --- a/src/spikeinterface/comparison/correlogram.py +++ b/src/spikeinterface/comparison/correlogram.py @@ -6,7 +6,6 @@ import numpy as np - class CorrelogramGTComparison(GroundTruthComparison): """ This class is an extension of GroundTruthComparison by focusing @@ -112,9 +111,10 @@ def compute_correlogram_by_similarity(self, similarity_matrix, window_ms=None): return similarities, errors - class CorrelogramGTStudy(GroundTruthStudy): - def run_comparisons(self, case_keys=None, exhaustive_gt=True, window_ms=100.0, bin_ms=1.0, well_detected_score=0.8, **kwargs): + def run_comparisons( + self, case_keys=None, exhaustive_gt=True, window_ms=100.0, bin_ms=1.0, well_detected_score=0.8, **kwargs + ): _kwargs = dict() _kwargs.update(kwargs) _kwargs["exhaustive_gt"] = exhaustive_gt diff --git a/src/spikeinterface/comparison/groundtruthstudy.py b/src/spikeinterface/comparison/groundtruthstudy.py index eb430f69bd..d43727cb44 100644 --- a/src/spikeinterface/comparison/groundtruthstudy.py +++ b/src/spikeinterface/comparison/groundtruthstudy.py @@ -32,17 +32,18 @@ class GroundTruthStudy: * several sorters for comparisons * same sorter with differents parameters * any combination of these (and more) - + For increased flexibility, cases keys can be a tuple so that we can vary complexity along several "levels" or "axis" (paremeters or sorters). In this case, the result dataframes will have `MultiIndex` to handle the different levels. - - A ground-truth dataset is made of a `Recording` and a `Sorting` object. For example, it can be a simulated dataset with MEArec or internally generated (see + + A ground-truth dataset is made of a `Recording` and a `Sorting` object. For example, it can be a simulated dataset with MEArec or internally generated (see :py:fun:`~spikeinterface.core.generate.generate_ground_truth_recording()`). - + This GroundTruthStudy have been refactor in version 0.100 to be more flexible than previous versions. Note that the underlying folder structure is not backward compatible! """ + def __init__(self, study_folder): self.folder = Path(study_folder) @@ -55,7 +56,6 @@ def __init__(self, study_folder): @classmethod def create(cls, study_folder, datasets={}, cases={}, levels=None): - # check that cases keys are homogeneous key0 = list(cases.keys())[0] if isinstance(key0, str): @@ -67,7 +67,9 @@ def create(cls, study_folder, datasets={}, cases={}, levels=None): elif isinstance(key0, tuple): assert all(isinstance(key, tuple) for key in cases.keys()), "Keys for cases are not homogeneous" num_levels = len(key0) - assert all(len(key) == num_levels for key in cases.keys()), "Keys for cases are not homogeneous, tuple negth differ" + assert all( + len(key) == num_levels for key in cases.keys() + ), "Keys for cases are not homogeneous, tuple negth differ" if levels is None: levels = [f"level{i}" for i in range(num_levels)] else: @@ -76,7 +78,6 @@ def create(cls, study_folder, datasets={}, cases={}, levels=None): else: raise ValueError("Keys for cases must str or tuple") - study_folder = Path(study_folder) study_folder.mkdir(exist_ok=False, parents=True) @@ -98,8 +99,7 @@ def create(cls, study_folder, datasets={}, cases={}, levels=None): # sortings are pickled + saved as NumpyFolderSorting gt_sorting.dump_to_pickle(study_folder / f"datasets/gt_sortings/{key}.pickle") gt_sorting.save(format="numpy_folder", folder=study_folder / f"datasets/gt_sortings/{key}") - - + info = {} info["levels"] = levels (study_folder / "info.json").write_text(json.dumps(info, indent=4), encoding="utf8") @@ -109,14 +109,13 @@ def create(cls, study_folder, datasets={}, cases={}, levels=None): return cls(study_folder) - def scan_folder(self): if not (self.folder / "datasets").exists(): raise ValueError(f"This is folder is not a GroundTruthStudy : {self.folder.absolute()}") with open(self.folder / "info.json", "r") as f: self.info = json.load(f) - + self.levels = self.info["levels"] for rec_file in (self.folder / "datasets" / "recordings").glob("*.pickle"): @@ -124,7 +123,7 @@ def scan_folder(self): rec = load_extractor(rec_file) gt_sorting = load_extractor(self.folder / f"datasets" / "gt_sortings" / key) self.datasets[key] = (rec, gt_sorting) - + with open(self.folder / "cases.pickle", "rb") as f: self.cases = pickle.load(f) @@ -139,7 +138,6 @@ def scan_folder(self): sorting = None self.sortings[key] = sorting - def __repr__(self): t = f"{self.__class__.__name__} {self.folder.stem} \n" t += f" datasets: {len(self.datasets)} {list(self.datasets.keys())}\n" @@ -157,7 +155,7 @@ def key_to_str(self, key): else: raise ValueError("Keys for cases must str or tuple") - def run_sorters(self, case_keys=None, engine='loop', engine_kwargs={}, keep=True, verbose=False): + def run_sorters(self, case_keys=None, engine="loop", engine_kwargs={}, keep=True, verbose=False): if case_keys is None: case_keys = self.cases.keys() @@ -187,15 +185,15 @@ def run_sorters(self, case_keys=None, engine='loop', engine_kwargs={}, keep=True if log_file.exists(): log_file.unlink() - params = self.cases[key]["run_sorter_params"].copy() # this ensure that sorter_name is given recording, _ = self.datasets[self.cases[key]["dataset"]] sorter_name = params.pop("sorter_name") - job = dict(sorter_name=sorter_name, - recording=recording, - output_folder=sorter_folder, - ) + job = dict( + sorter_name=sorter_name, + recording=recording, + output_folder=sorter_folder, + ) job.update(params) # the verbose is overwritten and global to all run_sorters job["verbose"] = verbose @@ -205,25 +203,25 @@ def run_sorters(self, case_keys=None, engine='loop', engine_kwargs={}, keep=True run_sorter_jobs(job_list, engine=engine, engine_kwargs=engine_kwargs, return_output=False) # TODO later create a list in laucher for engine blocking and non-blocking - if engine not in ("slurm", ): + if engine not in ("slurm",): self.copy_sortings(case_keys) def copy_sortings(self, case_keys=None, force=True): if case_keys is None: case_keys = self.cases.keys() - + for key in case_keys: sorting_folder = self.folder / "sortings" / self.key_to_str(key) sorter_folder = self.folder / "sorters" / self.key_to_str(key) log_file = self.folder / "sortings" / "run_logs" / f"{self.key_to_str(key)}.json" - if (sorter_folder / "spikeinterface_log.json").exists(): - sorting = read_sorter_folder(sorter_folder, raise_error=False, - register_recording=False, sorting_info=False) + sorting = read_sorter_folder( + sorter_folder, raise_error=False, register_recording=False, sorting_info=False + ) else: sorting = None - + if sorting is not None: if sorting_folder.exists(): if force: @@ -241,7 +239,6 @@ def copy_sortings(self, case_keys=None, force=True): shutil.copyfile(sorter_folder / "spikeinterface_log.json", log_file) def run_comparisons(self, case_keys=None, comparison_class=GroundTruthComparison, **kwargs): - if case_keys is None: case_keys = self.cases.keys() @@ -250,18 +247,19 @@ def run_comparisons(self, case_keys=None, comparison_class=GroundTruthComparison _, gt_sorting = self.datasets[dataset_key] sorting = self.sortings[key] if sorting is None: - self.comparisons[key] = None + self.comparisons[key] = None continue comp = comparison_class(gt_sorting, sorting, **kwargs) self.comparisons[key] = comp def get_run_times(self, case_keys=None): import pandas as pd + if case_keys is None: case_keys = self.cases.keys() log_folder = self.folder / "sortings" / "run_logs" - + run_times = {} for key in case_keys: log_file = log_folder / f"{self.key_to_str(key)}.json" @@ -273,7 +271,6 @@ def get_run_times(self, case_keys=None): return pd.Series(run_times, name="run_time") def extract_waveforms_gt(self, case_keys=None, **extract_kwargs): - if case_keys is None: case_keys = self.cases.keys() @@ -292,11 +289,11 @@ def get_waveform_extractor(self, key): # some recording are not dumpable to json and the waveforms extactor need it! # so we load it with and put after # this should be fixed in PR 2027 so remove this after - + dataset_key = self.cases[key]["dataset"] wf_folder = self.folder / "waveforms" / self.key_to_str(dataset_key) we = load_waveforms(wf_folder, with_recording=False) - recording, _ = self.datasets[dataset_key] + recording, _ = self.datasets[dataset_key] we.set_recording(recording) return we @@ -308,7 +305,7 @@ def get_templates(self, key, mode="average"): def compute_metrics(self, case_keys=None, metric_names=["snr", "firing_rate"], force=False): if case_keys is None: case_keys = self.cases.keys() - + done = [] for key in case_keys: dataset_key = self.cases[key]["dataset"] @@ -327,7 +324,7 @@ def compute_metrics(self, case_keys=None, metric_names=["snr", "firing_rate"], f metrics.to_csv(filename, sep="\t", index=True) def get_metrics(self, key): - import pandas as pd + import pandas as pd dataset_key = self.cases[key]["dataset"] @@ -336,17 +333,15 @@ def get_metrics(self, key): return metrics = pd.read_csv(filename, sep="\t", index_col=0) dataset_key = self.cases[key]["dataset"] - recording, gt_sorting = self.datasets[dataset_key] + recording, gt_sorting = self.datasets[dataset_key] metrics.index = gt_sorting.unit_ids return metrics def get_units_snr(self, key): - """ - """ + """ """ return self.get_metrics(key)["snr"] def get_performance_by_unit(self, case_keys=None): - import pandas as pd if case_keys is None: @@ -363,7 +358,7 @@ def get_performance_by_unit(self, case_keys=None): elif isinstance(key, tuple): for col, k in zip(self.levels, key): perf[col] = k - + perf = perf.reset_index() perf_by_unit.append(perf) @@ -371,10 +366,7 @@ def get_performance_by_unit(self, case_keys=None): perf_by_unit = perf_by_unit.set_index(self.levels) return perf_by_unit - def get_count_units( - self, case_keys=None, well_detected_score=None, redundant_score=None, overmerged_score=None - ): - + def get_count_units(self, case_keys=None, well_detected_score=None, redundant_score=None, overmerged_score=None): import pandas as pd if case_keys is None: @@ -385,7 +377,6 @@ def get_count_units( else: index = pd.MultiIndex.from_tuples(case_keys, names=self.levels) - columns = ["num_gt", "num_sorter", "num_well_detected"] comp = self.comparisons[case_keys[0]] if comp.exhaustive_gt: @@ -401,19 +392,12 @@ def get_count_units( count_units.loc[key, "num_gt"] = len(gt_sorting.get_unit_ids()) count_units.loc[key, "num_sorter"] = len(sorting.get_unit_ids()) - count_units.loc[key, "num_well_detected"] = comp.count_well_detected_units( - well_detected_score - ) - + count_units.loc[key, "num_well_detected"] = comp.count_well_detected_units(well_detected_score) + if comp.exhaustive_gt: count_units.loc[key, "num_redundant"] = comp.count_redundant_units(redundant_score) - count_units.loc[key, "num_overmerged"] = comp.count_overmerged_units( - overmerged_score - ) - count_units.loc[key, "num_false_positive"] = comp.count_false_positive_units( - redundant_score - ) + count_units.loc[key, "num_overmerged"] = comp.count_overmerged_units(overmerged_score) + count_units.loc[key, "num_false_positive"] = comp.count_false_positive_units(redundant_score) count_units.loc[key, "num_bad"] = comp.count_bad_units() return count_units - diff --git a/src/spikeinterface/comparison/tests/test_groundtruthstudy.py b/src/spikeinterface/comparison/tests/test_groundtruthstudy.py index 12d764950e..91c8c640e0 100644 --- a/src/spikeinterface/comparison/tests/test_groundtruthstudy.py +++ b/src/spikeinterface/comparison/tests/test_groundtruthstudy.py @@ -7,7 +7,6 @@ from spikeinterface.comparison import GroundTruthStudy - if hasattr(pytest, "global_test_folder"): cache_folder = pytest.global_test_folder / "comparison" else: @@ -28,8 +27,8 @@ def simple_preprocess(rec): def create_a_study(study_folder): - rec0, gt_sorting0 = generate_ground_truth_recording(num_channels=4, durations=[30.], seed=42) - rec1, gt_sorting1 = generate_ground_truth_recording(num_channels=4, durations=[30.], seed=91) + rec0, gt_sorting0 = generate_ground_truth_recording(num_channels=4, durations=[30.0], seed=42) + rec1, gt_sorting1 = generate_ground_truth_recording(num_channels=4, durations=[30.0], seed=91) datasets = { "toy_tetrode": (rec0, gt_sorting0), @@ -46,9 +45,7 @@ def create_a_study(study_folder): "run_sorter_params": { "sorter_name": "tridesclous2", }, - "comparison_params": { - - }, + "comparison_params": {}, }, # ("tdc2", "with-preprocess", "probe32"): { @@ -57,11 +54,9 @@ def create_a_study(study_folder): "run_sorter_params": { "sorter_name": "tridesclous2", }, - "comparison_params": { - - }, + "comparison_params": {}, }, - # we comment this at the moement because SC2 is quite slow for testing + # we comment this at the moement because SC2 is quite slow for testing # ("sc2", "no-preprocess", "tetrode"): { # "label": "spykingcircus2 without preprocessing standar params", # "dataset": "toy_tetrode", @@ -69,16 +64,16 @@ def create_a_study(study_folder): # "sorter_name": "spykingcircus2", # }, # "comparison_params": { - # }, # }, } - study = GroundTruthStudy.create(study_folder, datasets=datasets, cases=cases, levels=["sorter_name", "processing", "probe_type"]) + study = GroundTruthStudy.create( + study_folder, datasets=datasets, cases=cases, levels=["sorter_name", "processing", "probe_type"] + ) # print(study) - def test_GroundTruthStudy(): study = GroundTruthStudy(study_folder) print(study) @@ -98,14 +93,11 @@ def test_GroundTruthStudy(): for key in study.cases: metrics = study.get_metrics(key) print(metrics) - + study.get_performance_by_unit() study.get_count_units() - if __name__ == "__main__": setup_module() - test_GroundTruthStudy() - - \ No newline at end of file + test_GroundTruthStudy() diff --git a/src/spikeinterface/sorters/launcher.py b/src/spikeinterface/sorters/launcher.py index 12c59cbe45..704f6843f2 100644 --- a/src/spikeinterface/sorters/launcher.py +++ b/src/spikeinterface/sorters/launcher.py @@ -90,12 +90,11 @@ def run_sorter_jobs(job_list, engine="loop", engine_kwargs={}, return_output=Fal ), "Only 'loop', 'joblib', and 'processpoolexecutor' support return_output=True." out = [] for kwargs in job_list: - kwargs['with_output'] = True + kwargs["with_output"] = True else: out = None for kwargs in job_list: - kwargs['with_output'] = False - + kwargs["with_output"] = False if engine == "loop": # simple loop in main process diff --git a/src/spikeinterface/widgets/gtstudy.py b/src/spikeinterface/widgets/gtstudy.py index 438858beae..6a27b78dec 100644 --- a/src/spikeinterface/widgets/gtstudy.py +++ b/src/spikeinterface/widgets/gtstudy.py @@ -29,7 +29,6 @@ def __init__( backend=None, **backend_kwargs, ): - if case_keys is None: case_keys = list(study.cases.keys()) @@ -53,9 +52,8 @@ def plot_matplotlib(self, data_plot, **backend_kwargs): label = dp.study.cases[key]["label"] rt = dp.run_times.loc[key] self.ax.bar(i, rt, width=0.8, label=label) - - self.ax.legend() + self.ax.legend() # TODO : plot optionally average on some levels using group by @@ -80,13 +78,12 @@ def __init__( backend=None, **backend_kwargs, ): - if case_keys is None: case_keys = list(study.cases.keys()) plot_data = dict( study=study, - count_units = study.get_count_units(case_keys=case_keys), + count_units=study.get_count_units(case_keys=case_keys), case_keys=case_keys, ) @@ -107,8 +104,7 @@ def plot_matplotlib(self, data_plot, **backend_kwargs): ncol = len(columns) - colors = get_some_colors(columns, color_engine="auto", - map_name="hot") + colors = get_some_colors(columns, color_engine="auto", map_name="hot") colors["num_well_detected"] = "green" xticklabels = [] @@ -118,7 +114,7 @@ def plot_matplotlib(self, data_plot, **backend_kwargs): y = dp.count_units.loc[key, col] if not "well_detected" in col: y = -y - + if i == 0: label = col.replace("num_", "").replace("_", " ").title() else: @@ -158,7 +154,6 @@ def __init__( backend=None, **backend_kwargs, ): - if case_keys is None: case_keys = list(study.cases.keys()) @@ -186,11 +181,15 @@ def plot_matplotlib(self, data_plot, **backend_kwargs): if dp.mode == "swarm": levels = perfs.index.names - df = pd.melt(perfs.reset_index(), id_vars=levels, var_name='Metric', value_name='Score', - value_vars=('accuracy','precision', 'recall')) - df['x'] = df.apply(lambda r: ' '.join([r[col] for col in levels]), axis=1) - sns.swarmplot(data=df, x='x', y='Score', hue='Metric', dodge=True) - + df = pd.melt( + perfs.reset_index(), + id_vars=levels, + var_name="Metric", + value_name="Score", + value_vars=("accuracy", "precision", "recall"), + ) + df["x"] = df.apply(lambda r: " ".join([r[col] for col in levels]), axis=1) + sns.swarmplot(data=df, x="x", y="Score", hue="Metric", dodge=True) class StudyPerformancesVsMetrics(BaseWidget): @@ -218,7 +217,6 @@ def __init__( backend=None, **backend_kwargs, ): - if case_keys is None: case_keys = list(study.cases.keys()) @@ -239,7 +237,6 @@ def plot_matplotlib(self, data_plot, **backend_kwargs): dp = to_attr(data_plot) self.figure, self.axes, self.ax = make_mpl_figure(**backend_kwargs) - study = dp.study perfs = study.get_performance_by_unit(case_keys=dp.case_keys) @@ -253,4 +250,4 @@ def plot_matplotlib(self, data_plot, **backend_kwargs): self.ax.legend() self.ax.set_xlim(0, max_metric * 1.05) - self.ax.set_ylim(0, 1.05) \ No newline at end of file + self.ax.set_ylim(0, 1.05) diff --git a/src/spikeinterface/widgets/widget_list.py b/src/spikeinterface/widgets/widget_list.py index ce853f16bf..ed77de6128 100644 --- a/src/spikeinterface/widgets/widget_list.py +++ b/src/spikeinterface/widgets/widget_list.py @@ -53,7 +53,7 @@ StudyRunTimesWidget, StudyUnitCountsWidget, StudyPerformances, - StudyPerformancesVsMetrics + StudyPerformancesVsMetrics, ] From 0a2c0f618b11374558f536147845a1cbc6710661 Mon Sep 17 00:00:00 2001 From: Pierre Yger Date: Wed, 27 Sep 2023 16:01:21 +0200 Subject: [PATCH 103/115] Default SVD Peeler is now good to go --- src/spikeinterface/sorters/internal/spyking_circus2.py | 2 +- .../sortingcomponents/clustering/clustering_tools.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/spikeinterface/sorters/internal/spyking_circus2.py b/src/spikeinterface/sorters/internal/spyking_circus2.py index db3d88f116..7097b9e56b 100644 --- a/src/spikeinterface/sorters/internal/spyking_circus2.py +++ b/src/spikeinterface/sorters/internal/spyking_circus2.py @@ -152,7 +152,7 @@ def _run_from_folder(cls, sorter_output_folder, params, verbose): matching_job_params["chunk_duration"] = "100ms" spikes = find_spikes_from_templates( - recording_f, method="circus-omp", method_kwargs=matching_params, **matching_job_params + recording_f, method="circus-omp-svd", method_kwargs=matching_params, **matching_job_params ) if verbose: diff --git a/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py b/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py index 17c38e2f8a..273b1402fe 100644 --- a/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py +++ b/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py @@ -533,7 +533,7 @@ def remove_duplicates( def remove_duplicates_via_matching( - waveform_extractor, noise_levels, peak_labels, method_kwargs={}, job_kwargs={}, tmp_folder=None, method="circus-omp" + waveform_extractor, noise_levels, peak_labels, method_kwargs={}, job_kwargs={}, tmp_folder=None, method="circus-omp-svd" ): from spikeinterface.sortingcomponents.matching import find_spikes_from_templates from spikeinterface import get_noise_levels From 5fbc88d416f863784ee7ed890c45f04726d4dc5a Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Wed, 27 Sep 2023 14:01:43 +0000 Subject: [PATCH 104/115] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- .../sortingcomponents/clustering/clustering_tools.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py b/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py index 273b1402fe..af3a9cb86a 100644 --- a/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py +++ b/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py @@ -533,7 +533,13 @@ def remove_duplicates( def remove_duplicates_via_matching( - waveform_extractor, noise_levels, peak_labels, method_kwargs={}, job_kwargs={}, tmp_folder=None, method="circus-omp-svd" + waveform_extractor, + noise_levels, + peak_labels, + method_kwargs={}, + job_kwargs={}, + tmp_folder=None, + method="circus-omp-svd", ): from spikeinterface.sortingcomponents.matching import find_spikes_from_templates from spikeinterface import get_noise_levels From 9f45f2e5757e9f3dcb890a65d69bdecbca8c7eb6 Mon Sep 17 00:00:00 2001 From: Pierre Yger Date: Wed, 27 Sep 2023 17:35:31 +0200 Subject: [PATCH 105/115] Enhance the clustering --- .../sorters/internal/spyking_circus2.py | 2 +- .../clustering/random_projections.py | 106 +++++++++--------- .../sortingcomponents/features_from_peaks.py | 27 +++-- 3 files changed, 71 insertions(+), 64 deletions(-) diff --git a/src/spikeinterface/sorters/internal/spyking_circus2.py b/src/spikeinterface/sorters/internal/spyking_circus2.py index 7097b9e56b..55a36d26d5 100644 --- a/src/spikeinterface/sorters/internal/spyking_circus2.py +++ b/src/spikeinterface/sorters/internal/spyking_circus2.py @@ -20,7 +20,7 @@ class Spykingcircus2Sorter(ComponentsBasedSorter): sorter_name = "spykingcircus2" _default_params = { - "general": {"ms_before": 2, "ms_after": 2, "radius_um": 75}, + "general": {"ms_before": 2, "ms_after": 2, "radius_um": 100}, "waveforms": {"max_spikes_per_unit": 200, "overwrite": True, "sparse": True, "method": "ptp", "threshold": 1}, "filtering": {"dtype": "float32"}, "detection": {"peak_sign": "neg", "detect_threshold": 5}, diff --git a/src/spikeinterface/sortingcomponents/clustering/random_projections.py b/src/spikeinterface/sortingcomponents/clustering/random_projections.py index be8ecd6702..8c0cab07c6 100644 --- a/src/spikeinterface/sortingcomponents/clustering/random_projections.py +++ b/src/spikeinterface/sortingcomponents/clustering/random_projections.py @@ -18,7 +18,9 @@ from .clustering_tools import remove_duplicates, remove_duplicates_via_matching, remove_duplicates_via_dip from spikeinterface.core import NumpySorting from spikeinterface.core import extract_waveforms -from spikeinterface.sortingcomponents.features_from_peaks import compute_features_from_peaks, EnergyFeature +from spikeinterface.sortingcomponents.waveforms.savgol_denoiser import SavGolDenoiser +from spikeinterface.sortingcomponents.features_from_peaks import RandomProjectionsFeature +from spikeinterface.core.node_pipeline import run_node_pipeline, ExtractDenseWaveforms, PeakRetriever class RandomProjectionClustering: @@ -34,17 +36,17 @@ class RandomProjectionClustering: "cluster_selection_method": "leaf", }, "cleaning_kwargs": {}, + "waveforms" : {"ms_before" : 2, "ms_after" : 2, "max_spikes_per_unit": 100}, "radius_um": 100, - "max_spikes_per_unit": 200, "selection_method": "closest_to_centroid", - "nb_projections": {"ptp": 8, "energy": 2}, - "ms_before": 1.5, - "ms_after": 1.5, + "nb_projections": 10, + "ms_before": 1, + "ms_after": 1, "random_seed": 42, - "shared_memory": False, - "min_values": {"ptp": 0, "energy": 0}, + "smoothing_kwargs" : {"window_length_ms" : 1}, + "shared_memory": True, "tmp_folder": None, - "job_kwargs": {"n_jobs": os.cpu_count(), "chunk_memory": "10M", "verbose": True, "progress_bar": True}, + "job_kwargs": {"n_jobs": os.cpu_count(), "chunk_memory": "100M", "verbose": True, "progress_bar": True}, } @classmethod @@ -74,50 +76,52 @@ def main_function(cls, recording, peaks, params): np.random.seed(d["random_seed"]) - features_params = {} - features_list = [] - - noise_snippets = None - - for proj_type in ["ptp", "energy"]: - if d["nb_projections"][proj_type] > 0: - features_list += [f"random_projections_{proj_type}"] - - if d["min_values"][proj_type] == "auto": - if noise_snippets is None: - num_segments = recording.get_num_segments() - num_chunks = 3 * d["max_spikes_per_unit"] // num_segments - noise_snippets = get_random_data_chunks( - recording, num_chunks_per_segment=num_chunks, chunk_size=num_samples, seed=42 - ) - noise_snippets = noise_snippets.reshape(num_chunks, num_samples, num_chans) - - if proj_type == "energy": - data = np.linalg.norm(noise_snippets, axis=1) - min_values = np.median(data, axis=0) - elif proj_type == "ptp": - data = np.ptp(noise_snippets, axis=1) - min_values = np.median(data, axis=0) - elif d["min_values"][proj_type] > 0: - min_values = d["min_values"][proj_type] - else: - min_values = None - - projections = np.random.randn(num_chans, d["nb_projections"][proj_type]) - features_params[f"random_projections_{proj_type}"] = { - "radius_um": params["radius_um"], - "projections": projections, - "min_values": min_values, - } - - features_data = compute_features_from_peaks( - recording, peaks, features_list, features_params, ms_before=1, ms_after=1, **params["job_kwargs"] + if params["tmp_folder"] is None: + name = "".join(random.choices(string.ascii_uppercase + string.digits, k=8)) + tmp_folder = get_global_tmp_folder() / name + else: + tmp_folder = Path(params["tmp_folder"]).absolute() + + ### Then we extract the SVD features + node0 = PeakRetriever(recording, peaks) + node1 = ExtractDenseWaveforms(recording, parents=[node0], return_output=False, + ms_before=params['ms_before'], + ms_after=params['ms_after'] ) - if len(features_data) > 1: - hdbscan_data = np.hstack((features_data[0], features_data[1])) - else: - hdbscan_data = features_data[0] + node2 = SavGolDenoiser(recording, parents=[node0, node1], return_output=False, **params['smoothing_kwargs']) + + projections = np.random.randn(num_chans, d["nb_projections"]) + projections -= projections.mean(0) + projections /= projections.std(0) + + nbefore = int(params['ms_before'] * fs / 1000) + nafter = int(params['ms_after'] * fs / 1000) + nsamples = nbefore + nafter + + import scipy + x = np.random.randn(100, nsamples, num_chans).astype(np.float32) + x = scipy.signal.savgol_filter(x, node2.window_length, node2.order, axis=1) + + ptps = np.ptp(x, axis=1) + a, b = np.histogram(ptps.flatten(), np.linspace(0, 100, 1000)) + ydata = np.cumsum(a)/a.sum() + xdata = b[1:] + + from scipy.optimize import curve_fit + def sigmoid(x, L ,x0, k, b): + y = L / (1 + np.exp(-k*(x-x0))) + b + return (y) + + p0 = [max(ydata), np.median(xdata), 1, min(ydata)] # this is an mandatory initial guess + popt, pcov = curve_fit(sigmoid, xdata, ydata, p0) + + node3 = RandomProjectionsFeature(recording, parents=[node0, node2], return_output=True, + projections=projections, radius_um=params['radius_um']) + + pipeline_nodes = [node0, node1, node2, node3] + + hdbscan_data = run_node_pipeline(recording, pipeline_nodes, params["job_kwargs"]) import sklearn @@ -132,7 +136,7 @@ def main_function(cls, recording, peaks, params): all_indices = np.arange(0, peak_labels.size) - max_spikes = params["max_spikes_per_unit"] + max_spikes = params['waveforms']["max_spikes_per_unit"] selection_method = params["selection_method"] for unit_ind in labels: diff --git a/src/spikeinterface/sortingcomponents/features_from_peaks.py b/src/spikeinterface/sortingcomponents/features_from_peaks.py index bd82ffa0a6..2f1acb6a19 100644 --- a/src/spikeinterface/sortingcomponents/features_from_peaks.py +++ b/src/spikeinterface/sortingcomponents/features_from_peaks.py @@ -184,41 +184,44 @@ def __init__( return_output=True, parents=None, projections=None, - radius_um=150.0, - min_values=None, + sigmoid=None, + radius_um=None ): PipelineNode.__init__(self, recording, return_output=return_output, parents=parents) self.projections = projections - self.radius_um = radius_um - self.min_values = min_values - + self.sigmoid = sigmoid self.contact_locations = recording.get_channel_locations() self.channel_distance = get_channel_distances(recording) self.neighbours_mask = self.channel_distance < radius_um - - self._kwargs.update(dict(projections=projections, radius_um=radius_um, min_values=min_values)) - + self.radius_um = radius_um + self._kwargs.update(dict(projections=projections, sigmoid=sigmoid, radius_um=radius_um)) self._dtype = recording.get_dtype() def get_dtype(self): return self._dtype + def _sigmoid(self, x): + L, x0, k, b = self.sigmoid + y = L / (1 + np.exp(-k*(x-x0))) + b + return y + def compute(self, traces, peaks, waveforms): all_projections = np.zeros((peaks.size, self.projections.shape[1]), dtype=self._dtype) + for main_chan in np.unique(peaks["channel_index"]): (idx,) = np.nonzero(peaks["channel_index"] == main_chan) (chan_inds,) = np.nonzero(self.neighbours_mask[main_chan]) local_projections = self.projections[chan_inds, :] - wf_ptp = (waveforms[idx][:, :, chan_inds]).ptp(axis=1) + wf_ptp = np.ptp(waveforms[idx][:, :, chan_inds], axis=1) - if self.min_values is not None: - wf_ptp = (wf_ptp / self.min_values[chan_inds]) ** 4 + if self.sigmoid is not None: + wf_ptp *= self._sigmoid(wf_ptp) denom = np.sum(wf_ptp, axis=1) mask = denom != 0 - all_projections[idx[mask]] = np.dot(wf_ptp[mask], local_projections) / (denom[mask][:, np.newaxis]) + return all_projections From 3cbf8f8fc8267ff0bffd8c340514db983e059a0c Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Wed, 27 Sep 2023 15:36:51 +0000 Subject: [PATCH 106/115] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- .../clustering/random_projections.py | 38 +++++++++++-------- .../sortingcomponents/features_from_peaks.py | 8 ++-- 2 files changed, 26 insertions(+), 20 deletions(-) diff --git a/src/spikeinterface/sortingcomponents/clustering/random_projections.py b/src/spikeinterface/sortingcomponents/clustering/random_projections.py index 8c0cab07c6..f8cad2cf3f 100644 --- a/src/spikeinterface/sortingcomponents/clustering/random_projections.py +++ b/src/spikeinterface/sortingcomponents/clustering/random_projections.py @@ -36,14 +36,14 @@ class RandomProjectionClustering: "cluster_selection_method": "leaf", }, "cleaning_kwargs": {}, - "waveforms" : {"ms_before" : 2, "ms_after" : 2, "max_spikes_per_unit": 100}, + "waveforms": {"ms_before": 2, "ms_after": 2, "max_spikes_per_unit": 100}, "radius_um": 100, "selection_method": "closest_to_centroid", "nb_projections": 10, "ms_before": 1, "ms_after": 1, "random_seed": 42, - "smoothing_kwargs" : {"window_length_ms" : 1}, + "smoothing_kwargs": {"window_length_ms": 1}, "shared_memory": True, "tmp_folder": None, "job_kwargs": {"n_jobs": os.cpu_count(), "chunk_memory": "100M", "verbose": True, "progress_bar": True}, @@ -84,40 +84,46 @@ def main_function(cls, recording, peaks, params): ### Then we extract the SVD features node0 = PeakRetriever(recording, peaks) - node1 = ExtractDenseWaveforms(recording, parents=[node0], return_output=False, - ms_before=params['ms_before'], - ms_after=params['ms_after'] + node1 = ExtractDenseWaveforms( + recording, parents=[node0], return_output=False, ms_before=params["ms_before"], ms_after=params["ms_after"] ) - node2 = SavGolDenoiser(recording, parents=[node0, node1], return_output=False, **params['smoothing_kwargs']) + node2 = SavGolDenoiser(recording, parents=[node0, node1], return_output=False, **params["smoothing_kwargs"]) projections = np.random.randn(num_chans, d["nb_projections"]) projections -= projections.mean(0) projections /= projections.std(0) - nbefore = int(params['ms_before'] * fs / 1000) - nafter = int(params['ms_after'] * fs / 1000) + nbefore = int(params["ms_before"] * fs / 1000) + nafter = int(params["ms_after"] * fs / 1000) nsamples = nbefore + nafter import scipy + x = np.random.randn(100, nsamples, num_chans).astype(np.float32) x = scipy.signal.savgol_filter(x, node2.window_length, node2.order, axis=1) ptps = np.ptp(x, axis=1) a, b = np.histogram(ptps.flatten(), np.linspace(0, 100, 1000)) - ydata = np.cumsum(a)/a.sum() + ydata = np.cumsum(a) / a.sum() xdata = b[1:] from scipy.optimize import curve_fit - def sigmoid(x, L ,x0, k, b): - y = L / (1 + np.exp(-k*(x-x0))) + b - return (y) - p0 = [max(ydata), np.median(xdata), 1, min(ydata)] # this is an mandatory initial guess + def sigmoid(x, L, x0, k, b): + y = L / (1 + np.exp(-k * (x - x0))) + b + return y + + p0 = [max(ydata), np.median(xdata), 1, min(ydata)] # this is an mandatory initial guess popt, pcov = curve_fit(sigmoid, xdata, ydata, p0) - node3 = RandomProjectionsFeature(recording, parents=[node0, node2], return_output=True, - projections=projections, radius_um=params['radius_um']) + node3 = RandomProjectionsFeature( + recording, + parents=[node0, node2], + return_output=True, + projections=projections, + radius_um=params["radius_um"], + ) pipeline_nodes = [node0, node1, node2, node3] @@ -136,7 +142,7 @@ def sigmoid(x, L ,x0, k, b): all_indices = np.arange(0, peak_labels.size) - max_spikes = params['waveforms']["max_spikes_per_unit"] + max_spikes = params["waveforms"]["max_spikes_per_unit"] selection_method = params["selection_method"] for unit_ind in labels: diff --git a/src/spikeinterface/sortingcomponents/features_from_peaks.py b/src/spikeinterface/sortingcomponents/features_from_peaks.py index 2f1acb6a19..b534c2356d 100644 --- a/src/spikeinterface/sortingcomponents/features_from_peaks.py +++ b/src/spikeinterface/sortingcomponents/features_from_peaks.py @@ -185,7 +185,7 @@ def __init__( parents=None, projections=None, sigmoid=None, - radius_um=None + radius_um=None, ): PipelineNode.__init__(self, recording, return_output=return_output, parents=parents) @@ -203,12 +203,12 @@ def get_dtype(self): def _sigmoid(self, x): L, x0, k, b = self.sigmoid - y = L / (1 + np.exp(-k*(x-x0))) + b + y = L / (1 + np.exp(-k * (x - x0))) + b return y def compute(self, traces, peaks, waveforms): all_projections = np.zeros((peaks.size, self.projections.shape[1]), dtype=self._dtype) - + for main_chan in np.unique(peaks["channel_index"]): (idx,) = np.nonzero(peaks["channel_index"] == main_chan) (chan_inds,) = np.nonzero(self.neighbours_mask[main_chan]) @@ -221,7 +221,7 @@ def compute(self, traces, peaks, waveforms): denom = np.sum(wf_ptp, axis=1) mask = denom != 0 all_projections[idx[mask]] = np.dot(wf_ptp[mask], local_projections) / (denom[mask][:, np.newaxis]) - + return all_projections From 9dde3760dd62803ea54d5c1f42d560fd907380a0 Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Wed, 27 Sep 2023 21:31:11 +0200 Subject: [PATCH 107/115] title --- .../benchmark/benchmark_motion_estimation.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/spikeinterface/sortingcomponents/benchmark/benchmark_motion_estimation.py b/src/spikeinterface/sortingcomponents/benchmark/benchmark_motion_estimation.py index a47b97fb6d..c505676c05 100644 --- a/src/spikeinterface/sortingcomponents/benchmark/benchmark_motion_estimation.py +++ b/src/spikeinterface/sortingcomponents/benchmark/benchmark_motion_estimation.py @@ -500,8 +500,8 @@ def plot_errors_several_benchmarks(benchmarks, axes=None, show_legend=True, colo axes[2].plot(benchmark.spatial_bins, depth_error, label=benchmark.title, color=c) ax0 = ax = axes[0] - ax.set_xlabel("time [s]") - ax.set_ylabel("error [um]") + ax.set_xlabel("Time [s]") + ax.set_ylabel("Error [μm]") if show_legend: ax.legend() _simpleaxis(ax) @@ -514,7 +514,7 @@ def plot_errors_several_benchmarks(benchmarks, axes=None, show_legend=True, colo ax2 = axes[2] ax2.set_yticks([]) - ax2.set_xlabel("depth [um]") + ax2.set_xlabel("Depth [μm]") # ax.set_ylabel('error') channel_positions = benchmark.recording.get_channel_locations() probe_y_min, probe_y_max = channel_positions[:, 1].min(), channel_positions[:, 1].max() From daddd8cef722a35233dbed530e14775de87b8caa Mon Sep 17 00:00:00 2001 From: Pierre Yger Date: Thu, 28 Sep 2023 09:16:51 +0200 Subject: [PATCH 108/115] Adding a lookup table --- .../sortingcomponents/matching/circus.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/src/spikeinterface/sortingcomponents/matching/circus.py b/src/spikeinterface/sortingcomponents/matching/circus.py index 5775589321..1d13eca1df 100644 --- a/src/spikeinterface/sortingcomponents/matching/circus.py +++ b/src/spikeinterface/sortingcomponents/matching/circus.py @@ -128,6 +128,7 @@ def _freq_domain_conv(in1, in2, axes, shape, cache, calc_fast_len=True): return ret + def compute_overlaps(templates, num_samples, num_channels, sparsities): num_templates = len(templates) @@ -474,7 +475,6 @@ def main_function(cls, traces, d): return spikes - class CircusOMPSVDPeeler(BaseTemplateMatchingEngine): """ Orthogonal Matching Pursuit inspired from Spyking Circus sorter @@ -632,6 +632,12 @@ def initialize_and_check_kwargs(cls, recording, kwargs): d["num_templates"] = len(d["templates"]) d["ignored_ids"] = np.array(d["ignored_ids"]) + d["unit_overlaps_tables"] = {} + for i in range(d["num_templates"]): + d["unit_overlaps_tables"][i] = np.zeros(d["num_templates"], dtype=int) + d["unit_overlaps_tables"][i][d["unit_overlaps_indices"][i]] = np.arange(len(d["unit_overlaps_indices"][i])) + + omp_min_sps = d["omp_min_sps"] # d["stop_criteria"] = omp_min_sps * np.sqrt(d["noise_levels"].sum() * d["num_samples"]) d["stop_criteria"] = omp_min_sps * np.maximum(d["norms"], np.sqrt(d["noise_levels"].sum() * d["num_samples"])) @@ -720,6 +726,7 @@ def main_function(cls, traces, d): local_overlaps = overlaps[best_cluster_ind] overlapping_templates = d["unit_overlaps_indices"][best_cluster_ind] + table = d["unit_overlaps_tables"][best_cluster_ind] if num_selection == M.shape[0]: Z = np.zeros((2 * num_selection, 2 * num_selection), dtype=np.float32) @@ -728,9 +735,6 @@ def main_function(cls, traces, d): mask = np.isin(myindices, overlapping_templates) a, b = myindices[mask], myline[mask] - - table = np.zeros(num_templates, dtype=int) - table[overlapping_templates] = np.arange(len(overlapping_templates)) M[num_selection, idx[mask]] = local_overlaps[table[a], b] if vicinity == 0: From d7dcbe05f082f5ecd93d9233b9f5ca30ae51a8f4 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Thu, 28 Sep 2023 07:17:14 +0000 Subject: [PATCH 109/115] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- src/spikeinterface/sortingcomponents/matching/circus.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/spikeinterface/sortingcomponents/matching/circus.py b/src/spikeinterface/sortingcomponents/matching/circus.py index 1d13eca1df..44c394aec9 100644 --- a/src/spikeinterface/sortingcomponents/matching/circus.py +++ b/src/spikeinterface/sortingcomponents/matching/circus.py @@ -128,7 +128,6 @@ def _freq_domain_conv(in1, in2, axes, shape, cache, calc_fast_len=True): return ret - def compute_overlaps(templates, num_samples, num_channels, sparsities): num_templates = len(templates) @@ -475,6 +474,7 @@ def main_function(cls, traces, d): return spikes + class CircusOMPSVDPeeler(BaseTemplateMatchingEngine): """ Orthogonal Matching Pursuit inspired from Spyking Circus sorter @@ -637,7 +637,6 @@ def initialize_and_check_kwargs(cls, recording, kwargs): d["unit_overlaps_tables"][i] = np.zeros(d["num_templates"], dtype=int) d["unit_overlaps_tables"][i][d["unit_overlaps_indices"][i]] = np.arange(len(d["unit_overlaps_indices"][i])) - omp_min_sps = d["omp_min_sps"] # d["stop_criteria"] = omp_min_sps * np.sqrt(d["noise_levels"].sum() * d["num_samples"]) d["stop_criteria"] = omp_min_sps * np.maximum(d["norms"], np.sqrt(d["noise_levels"].sum() * d["num_samples"])) From d623da38f38924b9c5857abdeccf16891c729bc7 Mon Sep 17 00:00:00 2001 From: Pierre Yger Date: Thu, 28 Sep 2023 10:21:08 +0200 Subject: [PATCH 110/115] typos for cleaning via matching --- .../clustering/clustering_tools.py | 2 +- .../clustering/random_projections.py | 2 +- .../sortingcomponents/matching/circus.py | 15 ++++++++++----- 3 files changed, 12 insertions(+), 7 deletions(-) diff --git a/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py b/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py index af3a9cb86a..28a1a63065 100644 --- a/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py +++ b/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py @@ -613,7 +613,7 @@ def remove_duplicates_via_matching( spikes, computed = find_spikes_from_templates( sub_recording, method=method, method_kwargs=method_kwargs, extra_outputs=True, **job_kwargs ) - if method == "circus-omp-vsd": + if method == "circus-omp-svd": method_kwargs.update( { "overlaps": computed["overlaps"], diff --git a/src/spikeinterface/sortingcomponents/clustering/random_projections.py b/src/spikeinterface/sortingcomponents/clustering/random_projections.py index f8cad2cf3f..df9290a1f5 100644 --- a/src/spikeinterface/sortingcomponents/clustering/random_projections.py +++ b/src/spikeinterface/sortingcomponents/clustering/random_projections.py @@ -127,7 +127,7 @@ def sigmoid(x, L, x0, k, b): pipeline_nodes = [node0, node1, node2, node3] - hdbscan_data = run_node_pipeline(recording, pipeline_nodes, params["job_kwargs"]) + hdbscan_data = run_node_pipeline(recording, pipeline_nodes, params["job_kwargs"], job_name="extracting features") import sklearn diff --git a/src/spikeinterface/sortingcomponents/matching/circus.py b/src/spikeinterface/sortingcomponents/matching/circus.py index 1d13eca1df..9e02aa4ff6 100644 --- a/src/spikeinterface/sortingcomponents/matching/circus.py +++ b/src/spikeinterface/sortingcomponents/matching/circus.py @@ -686,13 +686,18 @@ def main_function(cls, traces, d): scalar_products = np.zeros(conv_shape, dtype=np.float32) # Filter using overlap-and-add convolution - spatially_filtered_data = np.matmul(d["spatial"], traces.T[np.newaxis, :, :]) - scaled_filtered_data = spatially_filtered_data * d["singular"] - objective_by_rank = scipy.signal.oaconvolve(scaled_filtered_data, d["temporal"], axes=2, mode="valid") - scalar_products += np.sum(objective_by_rank, axis=0) - if len(ignored_ids) > 0: + mask = ~np.isin(np.arange(num_templates), ignored_ids) + spatially_filtered_data = np.matmul(d["spatial"][:, mask, :], traces.T[np.newaxis, :, :]) + scaled_filtered_data = spatially_filtered_data * d["singular"][:, mask, :] + objective_by_rank = scipy.signal.oaconvolve(scaled_filtered_data, d["temporal"][:, mask, :], axes=2, mode="valid") + scalar_products[mask] += np.sum(objective_by_rank, axis=0) scalar_products[ignored_ids] = -np.inf + else: + spatially_filtered_data = np.matmul(d["spatial"], traces.T[np.newaxis, :, :]) + scaled_filtered_data = spatially_filtered_data * d["singular"] + objective_by_rank = scipy.signal.oaconvolve(scaled_filtered_data, d["temporal"], axes=2, mode="valid") + scalar_products += np.sum(objective_by_rank, axis=0) num_spikes = 0 From fdb84668137ba71b1ca36787032551da52764842 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Thu, 28 Sep 2023 08:21:36 +0000 Subject: [PATCH 111/115] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- .../sortingcomponents/clustering/random_projections.py | 4 +++- src/spikeinterface/sortingcomponents/matching/circus.py | 8 +++++--- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/src/spikeinterface/sortingcomponents/clustering/random_projections.py b/src/spikeinterface/sortingcomponents/clustering/random_projections.py index df9290a1f5..864548e7d4 100644 --- a/src/spikeinterface/sortingcomponents/clustering/random_projections.py +++ b/src/spikeinterface/sortingcomponents/clustering/random_projections.py @@ -127,7 +127,9 @@ def sigmoid(x, L, x0, k, b): pipeline_nodes = [node0, node1, node2, node3] - hdbscan_data = run_node_pipeline(recording, pipeline_nodes, params["job_kwargs"], job_name="extracting features") + hdbscan_data = run_node_pipeline( + recording, pipeline_nodes, params["job_kwargs"], job_name="extracting features" + ) import sklearn diff --git a/src/spikeinterface/sortingcomponents/matching/circus.py b/src/spikeinterface/sortingcomponents/matching/circus.py index b963447ba2..358691cd25 100644 --- a/src/spikeinterface/sortingcomponents/matching/circus.py +++ b/src/spikeinterface/sortingcomponents/matching/circus.py @@ -687,16 +687,18 @@ def main_function(cls, traces, d): # Filter using overlap-and-add convolution if len(ignored_ids) > 0: mask = ~np.isin(np.arange(num_templates), ignored_ids) - spatially_filtered_data = np.matmul(d["spatial"][:, mask, :], traces.T[np.newaxis, :, :]) + spatially_filtered_data = np.matmul(d["spatial"][:, mask, :], traces.T[np.newaxis, :, :]) scaled_filtered_data = spatially_filtered_data * d["singular"][:, mask, :] - objective_by_rank = scipy.signal.oaconvolve(scaled_filtered_data, d["temporal"][:, mask, :], axes=2, mode="valid") + objective_by_rank = scipy.signal.oaconvolve( + scaled_filtered_data, d["temporal"][:, mask, :], axes=2, mode="valid" + ) scalar_products[mask] += np.sum(objective_by_rank, axis=0) scalar_products[ignored_ids] = -np.inf else: spatially_filtered_data = np.matmul(d["spatial"], traces.T[np.newaxis, :, :]) scaled_filtered_data = spatially_filtered_data * d["singular"] objective_by_rank = scipy.signal.oaconvolve(scaled_filtered_data, d["temporal"], axes=2, mode="valid") - scalar_products += np.sum(objective_by_rank, axis=0) + scalar_products += np.sum(objective_by_rank, axis=0) num_spikes = 0 From 986fe6f50fd33a81fd3bc8ff26e05db22964bf5d Mon Sep 17 00:00:00 2001 From: Alessio Buccino Date: Thu, 28 Sep 2023 13:15:14 +0200 Subject: [PATCH 112/115] CellExplorer: fix tests and deprecation (#2048) * CellExplorer: fix tests and deprecation * Drop session_info_matfile_path from __init__ --- .../cellexplorersortingextractor.py | 21 ------------------- .../tests/test_cellexplorerextractor.py | 2 +- 2 files changed, 1 insertion(+), 22 deletions(-) diff --git a/src/spikeinterface/extractors/cellexplorersortingextractor.py b/src/spikeinterface/extractors/cellexplorersortingextractor.py index b40b998103..31241a4147 100644 --- a/src/spikeinterface/extractors/cellexplorersortingextractor.py +++ b/src/spikeinterface/extractors/cellexplorersortingextractor.py @@ -40,7 +40,6 @@ def __init__( sampling_frequency: float | None = None, session_info_file_path: str | Path | None = None, spikes_matfile_path: str | Path | None = None, - session_info_matfile_path: str | Path | None = None, ): try: from pymatreader import read_mat @@ -67,26 +66,6 @@ def __init__( ) file_path = spikes_matfile_path if file_path is None else file_path - if session_info_matfile_path is not None: - # Raise an error if the warning period has expired - deprecation_issued = datetime.datetime(2023, 4, 1) - deprecation_deadline = deprecation_issued + datetime.timedelta(days=180) - if datetime.datetime.now() > deprecation_deadline: - raise ValueError( - "The session_info_matfile_path argument is no longer supported in. Use session_info_file_path instead." - ) - - # Otherwise, issue a DeprecationWarning - else: - warnings.warn( - "The session_info_matfile_path argument is deprecated and will be removed in six months. " - "Use session_info_file_path instead.", - DeprecationWarning, - ) - session_info_file_path = ( - session_info_matfile_path if session_info_file_path is None else session_info_file_path - ) - self.spikes_cellinfo_path = Path(file_path) self.session_path = self.spikes_cellinfo_path.parent self.session_id = self.spikes_cellinfo_path.stem.split(".")[0] diff --git a/src/spikeinterface/extractors/tests/test_cellexplorerextractor.py b/src/spikeinterface/extractors/tests/test_cellexplorerextractor.py index 35de8a23e2..c4c8d0c993 100644 --- a/src/spikeinterface/extractors/tests/test_cellexplorerextractor.py +++ b/src/spikeinterface/extractors/tests/test_cellexplorerextractor.py @@ -26,7 +26,7 @@ class CellExplorerSortingTest(SortingCommonTestSuite, unittest.TestCase): ( "cellexplorer/dataset_2/20170504_396um_0um_merge.spikes.cellinfo.mat", { - "session_info_matfile_path": local_folder + "session_info_file_path": local_folder / "cellexplorer/dataset_2/20170504_396um_0um_merge.sessionInfo.mat" }, ), From 719ffc9466f2f5f91ed14129fd514379a4c5962f Mon Sep 17 00:00:00 2001 From: Heberto Mayorquin Date: Thu, 28 Sep 2023 13:15:39 +0200 Subject: [PATCH 113/115] minor corrections to matlab documentation (#2047) --- doc/how_to/load_matlab_data.rst | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/doc/how_to/load_matlab_data.rst b/doc/how_to/load_matlab_data.rst index aaca718096..e12d83810a 100644 --- a/doc/how_to/load_matlab_data.rst +++ b/doc/how_to/load_matlab_data.rst @@ -30,7 +30,7 @@ Here, we present a MATLAB code that creates a random dataset and writes it to a Loading Data in SpikeInterface ------------------------------ -After executing the above MATLAB code, a binary file named `your_data_as_a_binary.bin` will be created in your MATLAB directory. To load this file in Python, you'll need its full path. +After executing the above MATLAB code, a binary file named :code:`your_data_as_a_binary.bin` will be created in your MATLAB directory. To load this file in Python, you'll need its full path. Use the following Python script to load the binary data into SpikeInterface: @@ -55,7 +55,7 @@ Use the following Python script to load the binary data into SpikeInterface: # Load data using SpikeInterface recording = si.read_binary(file_path, sampling_frequency=sampling_frequency, - num_channels=num_channels, dtype=dtype) + num_channels=num_channels, dtype=dtype) # Confirm that the data was loaded correctly by comparing the data shapes and see they match the MATLAB data print(recording.get_num_frames(), recording.get_num_channels()) @@ -65,18 +65,18 @@ Follow the steps above to seamlessly import your MATLAB data into SpikeInterface Common Pitfalls & Tips ---------------------- -1. **Data Shape**: Make sure your MATLAB data matrix's first dimension is samples/time and the second is channels. If your time is in the second dimension, use `time_axis=1` in `si.read_binary()`. +1. **Data Shape**: Make sure your MATLAB data matrix's first dimension is samples/time and the second is channels. If your time is in the second dimension, use :code:`time_axis=1` in :code:`si.read_binary()`. 2. **File Path**: Always double-check the Python file path. 3. **Data Type Consistency**: Ensure data types between MATLAB and Python are consistent. MATLAB's `double` is equivalent to Numpy's `float64`. 4. **Sampling Frequency**: Set the appropriate sampling frequency in Hz for SpikeInterface. -5. **Transition to Python**: Moving from MATLAB to Python can be challenging. For newcomers to Python, consider reviewing numpy's [Numpy for MATLAB Users](https://numpy.org/doc/stable/user/numpy-for-matlab-users.html) guide. +5. **Transition to Python**: Moving from MATLAB to Python can be challenging. For newcomers to Python, consider reviewing numpy's `Numpy for MATLAB Users `_ guide. Using gains and offsets for integer data ---------------------------------------- Raw data formats often store data as integer values for memory efficiency. To give these integers meaningful physical units, you can apply a gain and an offset. -In SpikeInterface, you can use the `gain_to_uV` and `offset_to_uV` parameters, since traces are handled in microvolts (uV). Both parameters can be integrated into the `read_binary` function. -If your data in MATLAB is stored as `int16`, and you know the gain and offset, you can use the following code to load the data: +In SpikeInterface, you can use the :code:`gain_to_uV` and :code:`offset_to_uV` parameters, since traces are handled in microvolts (uV). Both parameters can be integrated into the :code:`read_binary` function. +If your data in MATLAB is stored as :code:`int16`, and you know the gain and offset, you can use the following code to load the data: .. code-block:: python @@ -90,7 +90,8 @@ If your data in MATLAB is stored as `int16`, and you know the gain and offset, y num_channels=num_channels, dtype=dtype_int, gain_to_uV=gain_to_uV, offset_to_uV=offset_to_uV) - recording.get_traces(return_scaled=True) # Return traces in micro volts (uV) + recording.get_traces() # Return traces in original units [type: int] + recording.get_traces(return_scaled=True) # Return traces in micro volts (uV) [type: float] This will equip your recording object with capabilities to convert the data to float values in uV using the :code:`get_traces()` method with the :code:`return_scaled` parameter set to :code:`True`. From e0bcb28fc019e7ecde6df3ecdeb504e3c719fccc Mon Sep 17 00:00:00 2001 From: Heberto Mayorquin Date: Fri, 29 Sep 2023 07:22:13 +0200 Subject: [PATCH 114/115] move import in --- src/spikeinterface/extractors/cbin_ibl.py | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) diff --git a/src/spikeinterface/extractors/cbin_ibl.py b/src/spikeinterface/extractors/cbin_ibl.py index 3dde998ca1..bd56208ebe 100644 --- a/src/spikeinterface/extractors/cbin_ibl.py +++ b/src/spikeinterface/extractors/cbin_ibl.py @@ -6,13 +6,6 @@ from spikeinterface.extractors.neuropixels_utils import get_neuropixels_sample_shifts from spikeinterface.core.core_tools import define_function_from_class -try: - import mtscomp - - HAVE_MTSCOMP = True -except: - HAVE_MTSCOMP = False - class CompressedBinaryIblExtractor(BaseRecording): """Load IBL data as an extractor object. @@ -42,7 +35,6 @@ class CompressedBinaryIblExtractor(BaseRecording): """ extractor_name = "CompressedBinaryIbl" - installed = HAVE_MTSCOMP mode = "folder" installation_mesg = "To use the CompressedBinaryIblExtractor, install mtscomp: \n\n pip install mtscomp\n\n" name = "cbin_ibl" @@ -51,7 +43,10 @@ def __init__(self, folder_path, load_sync_channel=False, stream_name="ap"): # this work only for future neo from neo.rawio.spikeglxrawio import read_meta_file, extract_stream_info - assert HAVE_MTSCOMP + try: + import mtscomp + except: + raise ImportError(self.installation_mesg) folder_path = Path(folder_path) # check bands From c8be1a0def93d4a639370a146c5d3244234049c0 Mon Sep 17 00:00:00 2001 From: Alessio Buccino Date: Fri, 29 Sep 2023 14:17:37 +0200 Subject: [PATCH 115/115] Fix firing range when bin size is to small (#2054) * Fix firing range when bin size is to small * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- src/spikeinterface/qualitymetrics/misc_metrics.py | 9 +++++++++ .../qualitymetrics/tests/test_metrics_functions.py | 8 ++++++-- .../benchmark/benchmark_motion_estimation.py | 6 ++---- .../benchmark/benchmark_motion_interpolation.py | 8 ++++++-- 4 files changed, 23 insertions(+), 8 deletions(-) diff --git a/src/spikeinterface/qualitymetrics/misc_metrics.py b/src/spikeinterface/qualitymetrics/misc_metrics.py index e9726a16da..d3f875959e 100644 --- a/src/spikeinterface/qualitymetrics/misc_metrics.py +++ b/src/spikeinterface/qualitymetrics/misc_metrics.py @@ -602,6 +602,15 @@ def compute_firing_ranges(waveform_extractor, bin_size_s=5, percentiles=(5, 95), if unit_ids is None: unit_ids = sorting.unit_ids + if all( + [ + waveform_extractor.get_num_samples(segment_index) < bin_size_samples + for segment_index in range(waveform_extractor.get_num_segments()) + ] + ): + warnings.warn(f"Bin size of {bin_size_s}s is larger than each segment duration. Firing ranges are set to NaN.") + return {unit_id: np.nan for unit_id in unit_ids} + # for each segment, we compute the firing rate histogram and we concatenate them firing_rate_histograms = {unit_id: np.array([], dtype=float) for unit_id in sorting.unit_ids} for segment_index in range(waveform_extractor.get_num_segments()): diff --git a/src/spikeinterface/qualitymetrics/tests/test_metrics_functions.py b/src/spikeinterface/qualitymetrics/tests/test_metrics_functions.py index 2d63a06b17..8a32c4cee8 100644 --- a/src/spikeinterface/qualitymetrics/tests/test_metrics_functions.py +++ b/src/spikeinterface/qualitymetrics/tests/test_metrics_functions.py @@ -220,6 +220,10 @@ def test_calculate_firing_range(waveform_extractor_simple): firing_ranges = compute_firing_ranges(we) print(firing_ranges) + with pytest.warns(UserWarning) as w: + firing_ranges_nan = compute_firing_ranges(we, bin_size_s=we.get_total_duration() + 1) + assert np.all([np.isnan(f) for f in firing_ranges_nan.values()]) + def test_calculate_amplitude_cutoff(waveform_extractor_simple): we = waveform_extractor_simple @@ -378,7 +382,7 @@ def test_calculate_drift_metrics(waveform_extractor_simple): if __name__ == "__main__": sim_data = _simulated_data() we = _waveform_extractor_simple() - we_violations = _waveform_extractor_violations(sim_data) + # we_violations = _waveform_extractor_violations(sim_data) # test_calculate_amplitude_cutoff(we) # test_calculate_presence_ratio(we) # test_calculate_amplitude_median(we) @@ -387,4 +391,4 @@ def test_calculate_drift_metrics(waveform_extractor_simple): # test_calculate_drift_metrics(we) # test_synchrony_metrics(we) test_calculate_firing_range(we) - test_calculate_amplitude_cv_metrics(we) + # test_calculate_amplitude_cv_metrics(we) diff --git a/src/spikeinterface/sortingcomponents/benchmark/benchmark_motion_estimation.py b/src/spikeinterface/sortingcomponents/benchmark/benchmark_motion_estimation.py index c505676c05..abf40b2da6 100644 --- a/src/spikeinterface/sortingcomponents/benchmark/benchmark_motion_estimation.py +++ b/src/spikeinterface/sortingcomponents/benchmark/benchmark_motion_estimation.py @@ -584,13 +584,13 @@ def plot_motions_several_benchmarks(benchmarks): _simpleaxis(ax) -def plot_speed_several_benchmarks(benchmarks, detailed=True, ax=None, colors=None): +def plot_speed_several_benchmarks(benchmarks, detailed=True, ax=None, colors=None): if ax is None: fig, ax = plt.subplots(figsize=(5, 5)) for count, benchmark in enumerate(benchmarks): color = colors[count] if colors is not None else None - + if detailed: bottom = 0 i = 0 @@ -606,8 +606,6 @@ def plot_speed_several_benchmarks(benchmarks, detailed=True, ax=None, colors=No else: total_run_time = np.sum([value for key, value in benchmark.run_times.items()]) ax.bar([count], [total_run_time], color=color, edgecolor="black") - - # ax.legend() ax.set_ylabel("speed (s)") diff --git a/src/spikeinterface/sortingcomponents/benchmark/benchmark_motion_interpolation.py b/src/spikeinterface/sortingcomponents/benchmark/benchmark_motion_interpolation.py index 8e5afb2e8e..b28b29f17c 100644 --- a/src/spikeinterface/sortingcomponents/benchmark/benchmark_motion_interpolation.py +++ b/src/spikeinterface/sortingcomponents/benchmark/benchmark_motion_interpolation.py @@ -193,11 +193,15 @@ def run_sorters(self, skip_already_done=True): recording = self.recordings[case["recording"]] output_folder = self.folder / f"tmp_sortings_{label}" if output_folder.exists() and skip_already_done: - print('already done') + print("already done") sorting = read_sorter_folder(output_folder) else: sorting = run_sorter( - sorter_name, recording, output_folder, **sorter_params, delete_output_folder=self.delete_output_folder + sorter_name, + recording, + output_folder, + **sorter_params, + delete_output_folder=self.delete_output_folder, ) self.sortings[label] = sorting