From 9dc04f1fa68cf7202eed224394bb60b95a7b4e6d Mon Sep 17 00:00:00 2001 From: Pierre Yger Date: Fri, 14 Jul 2023 13:44:25 +0200 Subject: [PATCH 01/90] WIP --- .../sortingcomponents/matching/circus.py | 517 ++++++++---------- 1 file changed, 218 insertions(+), 299 deletions(-) diff --git a/src/spikeinterface/sortingcomponents/matching/circus.py b/src/spikeinterface/sortingcomponents/matching/circus.py index 2196320378..8f08aac9c5 100644 --- a/src/spikeinterface/sortingcomponents/matching/circus.py +++ b/src/spikeinterface/sortingcomponents/matching/circus.py @@ -16,7 +16,8 @@ except ImportError: HAVE_SKLEARN = False -from spikeinterface.core import get_noise_levels, get_random_data_chunks + +from spikeinterface.core import get_noise_levels, get_random_data_chunks, compute_sparsity from spikeinterface.sortingcomponents.peak_detection import DetectPeakByChannel (potrs,) = scipy.linalg.get_lapack_funcs(("potrs",), dtype=np.float32) @@ -130,6 +131,38 @@ def _freq_domain_conv(in1, in2, axes, shape, cache, calc_fast_len=True): return ret +def compute_overlaps(templates, num_samples, num_channels, sparsities): + + num_templates = len(templates) + + dense_templates = np.zeros((num_templates, num_samples, num_channels), dtype=np.float32) + for i in range(num_templates): + dense_templates[i, :, sparsities[i]] = templates[i].T + + size = 2 * num_samples - 1 + + all_delays = list(range(0, num_samples+1)) + + overlaps = {} + + for delay in all_delays: + source = dense_templates[:, :delay, :].reshape(num_templates, -1) + target = dense_templates[:, num_samples-delay:, :].reshape(num_templates, -1) + + overlaps[delay] = scipy.sparse.csr_matrix(source.dot(target.T)) + + if delay < num_samples: + overlaps[size - delay + 1] = overlaps[delay].T.tocsr() + + new_overlaps = [] + + for i in range(num_templates): + data = [overlaps[j][i, :].T for j in range(size)] + data = scipy.sparse.hstack(data) + new_overlaps += [data] + + return new_overlaps + class CircusOMPPeeler(BaseTemplateMatchingEngine): """ @@ -152,11 +185,6 @@ class CircusOMPPeeler(BaseTemplateMatchingEngine): (Minimal, Maximal) amplitudes allowed for every template omp_min_sps: float Stopping criteria of the OMP algorithm, in percentage of the norm - sparsify_threshold: float - Templates are sparsified in order to keep only the channels necessary - to explain. ptp limit for considering a channel as silent - smoothing_factor: float - Templates are smoothed via Spline Interpolation noise_levels: array The noise levels, for every channels. If None, they will be automatically computed @@ -175,133 +203,77 @@ class CircusOMPPeeler(BaseTemplateMatchingEngine): "norms": None, "random_chunk_kwargs": {}, "noise_levels": None, - "smoothing_factor": 0.25, + 'sparse_kwargs' : {'method' : 'ptp', 'threshold' : 1}, "ignored_ids": [], + "vicinity" : 0 } - @classmethod - def _sparsify_template(cls, template, sparsify_threshold): - is_silent = template.ptp(0) < sparsify_threshold - template[:, is_silent] = 0 - (active_channels,) = np.where(np.logical_not(is_silent)) - - return template, active_channels - - @classmethod - def _regularize_template(cls, template, smoothing_factor=0.25): - nb_channels = template.shape[1] - nb_timesteps = template.shape[0] - xaxis = np.arange(nb_timesteps) - for i in range(nb_channels): - z = scipy.interpolate.UnivariateSpline(xaxis, template[:, i]) - z.set_smoothing_factor(smoothing_factor) - template[:, i] = z(xaxis) - return template - @classmethod def _prepare_templates(cls, d): - waveform_extractor = d["waveform_extractor"] - num_samples = d["num_samples"] - num_channels = d["num_channels"] - num_templates = len(d["waveform_extractor"].sorting.unit_ids) + + waveform_extractor = d['waveform_extractor'] + num_templates = len(d['waveform_extractor'].sorting.unit_ids) - templates = waveform_extractor.get_all_templates(mode="median").copy() + if not waveform_extractor.is_sparse(): + sparsity = compute_sparsity(waveform_extractor, **d['sparse_kwargs']).mask + else: + sparsity = waveform_extractor.sparsity.mask + + templates = waveform_extractor.get_all_templates(mode='median').copy() - d["sparsities"] = {} - d["templates"] = {} - d["norms"] = np.zeros(num_templates, dtype=np.float32) + d['sparsities'] = {} + d['templates'] = {} + d['norms'] = np.zeros(num_templates, dtype=np.float32) for count, unit_id in enumerate(waveform_extractor.sorting.unit_ids): - if d["smoothing_factor"] > 0: - template = cls._regularize_template(templates[count], d["smoothing_factor"]) - else: - template = templates[count] - template, active_channels = cls._sparsify_template(template, d["sparsify_threshold"]) - d["sparsities"][count] = active_channels - d["norms"][count] = np.linalg.norm(template) - d["templates"][count] = template[:, active_channels] / d["norms"][count] - - return d - - @classmethod - def _prepare_overlaps(cls, d): - templates = d["templates"] - num_samples = d["num_samples"] - num_channels = d["num_channels"] - num_templates = d["num_templates"] - sparsities = d["sparsities"] - - dense_templates = np.zeros((num_templates, num_samples, num_channels), dtype=np.float32) - for i in range(num_templates): - dense_templates[i, :, sparsities[i]] = templates[i].T - - size = 2 * num_samples - 1 - - all_delays = list(range(0, num_samples + 1)) - - overlaps = {} - - for delay in all_delays: - source = dense_templates[:, :delay, :].reshape(num_templates, -1) - target = dense_templates[:, num_samples - delay :, :].reshape(num_templates, -1) - - overlaps[delay] = scipy.sparse.csr_matrix(source.dot(target.T)) - - if delay < num_samples: - overlaps[size - delay + 1] = overlaps[delay].T.tocsr() - - new_overlaps = [] - - for i in range(num_templates): - data = [overlaps[j][i, :].T for j in range(size)] - data = scipy.sparse.hstack(data) - new_overlaps += [data] - - d["overlaps"] = new_overlaps + template = templates[count] + d['sparsities'][count], = np.nonzero(sparsity[count]) + d['norms'][count] = np.linalg.norm(template) + d['templates'][count] = template[:, d['sparsities'][count]]/d['norms'][count] return d @classmethod def initialize_and_check_kwargs(cls, recording, kwargs): + d = cls._default_params.copy() d.update(kwargs) - # assert isinstance(d['waveform_extractor'], WaveformExtractor) - - for v in ["omp_min_sps"]: - assert (d[v] >= 0) and (d[v] <= 1), f"{v} should be in [0, 1]" + #assert isinstance(d['waveform_extractor'], WaveformExtractor) - d["num_channels"] = d["waveform_extractor"].recording.get_num_channels() - d["num_samples"] = d["waveform_extractor"].nsamples - d["nbefore"] = d["waveform_extractor"].nbefore - d["nafter"] = d["waveform_extractor"].nafter - d["sampling_frequency"] = d["waveform_extractor"].recording.get_sampling_frequency() + for v in ['omp_min_sps']: + assert (d[v] >= 0) and (d[v] <= 1), f'{v} should be in [0, 1]' + + d['num_channels'] = d['waveform_extractor'].recording.get_num_channels() + d['num_samples'] = d['waveform_extractor'].nsamples + d['nbefore'] = d['waveform_extractor'].nbefore + d['nafter'] = d['waveform_extractor'].nafter + d['sampling_frequency'] = d['waveform_extractor'].recording.get_sampling_frequency() + d['vicinity'] *= d['num_samples'] - if d["noise_levels"] is None: - print("CircusOMPPeeler : noise should be computed outside") - d["noise_levels"] = get_noise_levels(recording, **d["random_chunk_kwargs"], return_scaled=False) + if d['noise_levels'] is None: + print('CircusOMPPeeler : noise should be computed outside') + d['noise_levels'] = get_noise_levels(recording, **d['random_chunk_kwargs'], return_scaled=False) - if d["templates"] is None: + if d['templates'] is None: d = cls._prepare_templates(d) else: - for key in ["norms", "sparsities"]: - assert d[key] is not None, "If templates are provided, %d should also be there" % key + for key in ['norms', 'sparsities']: + assert d[key] is not None, "If templates are provided, %d should also be there" %key - d["num_templates"] = len(d["templates"]) + d['num_templates'] = len(d['templates']) - if d["overlaps"] is None: - d = cls._prepare_overlaps(d) + if d['overlaps'] is None: + d['overlaps'] = compute_overlaps(d['templates'], d['num_samples'], d['num_channels'], d['sparsities']) - d["ignored_ids"] = np.array(d["ignored_ids"]) + d['ignored_ids'] = np.array(d['ignored_ids']) - omp_min_sps = d["omp_min_sps"] - norms = d["norms"] - sparsities = d["sparsities"] + omp_min_sps = d['omp_min_sps'] + nb_active_channels = np.array([len(d['sparsities'][count]) for count in range(d['num_templates'])]) + d['stop_criteria'] = omp_min_sps * np.sqrt(nb_active_channels * d['num_samples']) - nb_active_channels = np.array([len(sparsities[i]) for i in range(d["num_templates"])]) - d["stop_criteria"] = omp_min_sps * np.sqrt(d["noise_levels"].sum() * d["num_samples"]) + return d - return d @classmethod def serialize_method_kwargs(cls, kwargs): @@ -321,26 +293,27 @@ def get_margin(cls, recording, kwargs): @classmethod def main_function(cls, traces, d): - templates = d["templates"] - num_templates = d["num_templates"] - num_channels = d["num_channels"] - num_samples = d["num_samples"] - overlaps = d["overlaps"] - norms = d["norms"] - nbefore = d["nbefore"] - nafter = d["nafter"] + templates = d['templates'] + num_templates = d['num_templates'] + num_channels = d['num_channels'] + num_samples = d['num_samples'] + overlaps = d['overlaps'] + norms = d['norms'] + nbefore = d['nbefore'] + nafter = d['nafter'] omp_tol = np.finfo(np.float32).eps - num_samples = d["nafter"] + d["nbefore"] + num_samples = d['nafter'] + d['nbefore'] neighbor_window = num_samples - 1 - min_amplitude, max_amplitude = d["amplitudes"] - sparsities = d["sparsities"] - ignored_ids = d["ignored_ids"] - stop_criteria = d["stop_criteria"] + min_amplitude, max_amplitude = d['amplitudes'] + sparsities = d['sparsities'] + ignored_ids = d['ignored_ids'] + stop_criteria = d['stop_criteria'][:, np.newaxis] + vicinity = d['vicinity'] - if "cached_fft_kernels" not in d: - d["cached_fft_kernels"] = {"fshape": 0} + if 'cached_fft_kernels' not in d: + d['cached_fft_kernels'] = {'fshape' : 0} - cached_fft_kernels = d["cached_fft_kernels"] + cached_fft_kernels = d['cached_fft_kernels'] num_timesteps = len(traces) @@ -352,22 +325,24 @@ def main_function(cls, traces, d): dummy_traces = np.empty((num_channels, num_timesteps), dtype=np.float32) fshape, axes = get_scipy_shape(dummy_filter, traces, axes=1) - fft_cache = {"full": sp_fft.rfftn(traces, fshape, axes=axes)} + fft_cache = {'full' : sp_fft.rfftn(traces, fshape, axes=axes)} scalar_products = np.empty((num_templates, num_peaks), dtype=np.float32) - flagged_chunk = cached_fft_kernels["fshape"] != fshape[0] + flagged_chunk = cached_fft_kernels['fshape'] != fshape[0] for i in range(num_templates): + if i not in ignored_ids: + if i not in cached_fft_kernels or flagged_chunk: kernel_filter = np.ascontiguousarray(templates[i][::-1].T) - cached_fft_kernels.update({i: sp_fft.rfftn(kernel_filter, fshape, axes=axes)}) - cached_fft_kernels["fshape"] = fshape[0] + cached_fft_kernels.update({i : sp_fft.rfftn(kernel_filter, fshape, axes=axes)}) + cached_fft_kernels['fshape'] = fshape[0] - fft_cache.update({"mask": sparsities[i], "template": cached_fft_kernels[i]}) + fft_cache.update({'mask' : sparsities[i], 'template' : cached_fft_kernels[i]}) - convolution = fftconvolve_with_cache(dummy_filter, dummy_traces, fft_cache, axes=1, mode="valid") + convolution = fftconvolve_with_cache(dummy_filter, dummy_traces, fft_cache, axes=1, mode='valid') if len(convolution) > 0: scalar_products[i] = convolution.sum(0) else: @@ -381,7 +356,7 @@ def main_function(cls, traces, d): spikes = np.empty(scalar_products.size, dtype=spike_dtype) idx_lookup = np.arange(scalar_products.size).reshape(num_templates, -1) - M = np.zeros((num_peaks, num_peaks), dtype=np.float32) + M = np.zeros((100, 100), dtype=np.float32) all_selections = np.empty((2, scalar_products.size), dtype=np.int32) final_amplitudes = np.zeros(scalar_products.shape, dtype=np.float32) @@ -392,13 +367,17 @@ def main_function(cls, traces, d): neighbors = {} cached_overlaps = {} - is_valid = scalar_products > stop_criteria + is_valid = (scalar_products > stop_criteria) + all_amplitudes = np.zeros(0, dtype=np.float32) + is_in_vicinity = np.zeros(0, dtype=np.int32) while np.any(is_valid): + best_amplitude_ind = scalar_products[is_valid].argmax() best_cluster_ind, peak_index = np.unravel_index(idx_lookup[is_valid][best_amplitude_ind], idx_lookup.shape) - + if num_selection > 0: + delta_t = selection[1] - peak_index idx = np.where((delta_t < neighbor_window) & (delta_t > -num_samples))[0] myline = num_samples + delta_t[idx] @@ -407,25 +386,42 @@ def main_function(cls, traces, d): cached_overlaps[best_cluster_ind] = overlaps[best_cluster_ind].toarray() if num_selection == M.shape[0]: - Z = np.zeros((2 * num_selection, 2 * num_selection), dtype=np.float32) + Z = np.zeros((2*num_selection, 2*num_selection), dtype=np.float32) Z[:num_selection, :num_selection] = M M = Z M[num_selection, idx] = cached_overlaps[best_cluster_ind][selection[0, idx], myline] - scipy.linalg.solve_triangular( - M[:num_selection, :num_selection], - M[num_selection, :num_selection], - trans=0, - lower=1, - overwrite_b=True, - check_finite=False, - ) - - v = nrm2(M[num_selection, :num_selection]) ** 2 - Lkk = 1 - v - if Lkk <= omp_tol: # selected atoms are dependent - break - M[num_selection, num_selection] = np.sqrt(Lkk) + + if vicinity == 0: + scipy.linalg.solve_triangular(M[:num_selection, :num_selection], M[num_selection, :num_selection], trans=0, + lower=1, + overwrite_b=True, + check_finite=False) + + v = nrm2(M[num_selection, :num_selection]) ** 2 + Lkk = 1 - v + if Lkk <= omp_tol: # selected atoms are dependent + break + M[num_selection, num_selection] = np.sqrt(Lkk) + else: + is_in_vicinity = np.where(np.abs(delta_t) < vicinity)[0] + + if len(is_in_vicinity) > 0: + + L = M[is_in_vicinity, :][:, is_in_vicinity] + + M[num_selection, is_in_vicinity] = scipy.linalg.solve_triangular(L, M[num_selection, is_in_vicinity], trans=0, + lower=1, + overwrite_b=True, + check_finite=False) + + v = nrm2(M[num_selection, is_in_vicinity]) ** 2 + Lkk = 1 - v + if Lkk <= omp_tol: # selected atoms are dependent + break + M[num_selection, num_selection] = np.sqrt(Lkk) + else: + M[num_selection, num_selection] = 1.0 else: M[0, 0] = 1 @@ -435,45 +431,54 @@ def main_function(cls, traces, d): selection = all_selections[:, :num_selection] res_sps = full_sps[selection[0], selection[1]] - all_amplitudes, _ = potrs(M[:num_selection, :num_selection], res_sps, lower=True, overwrite_b=False) - - all_amplitudes /= norms[selection[0]] - - diff_amplitudes = all_amplitudes - final_amplitudes[selection[0], selection[1]] + if vicinity == 0: + all_amplitudes, _ = potrs(M[:num_selection, :num_selection], res_sps, + lower=True, overwrite_b=False) + all_amplitudes /= norms[selection[0]] + else: + is_in_vicinity = np.append(is_in_vicinity, num_selection - 1) + all_amplitudes = np.append(all_amplitudes, np.float32(0)) + L = M[is_in_vicinity, :][:, is_in_vicinity] + all_amplitudes[is_in_vicinity], _ = potrs(L, res_sps[is_in_vicinity], + lower=True, overwrite_b=False) + all_amplitudes[is_in_vicinity] /= norms[selection[0][is_in_vicinity]] + + diff_amplitudes = (all_amplitudes - final_amplitudes[selection[0], selection[1]]) modified = np.where(np.abs(diff_amplitudes) > omp_tol)[0] final_amplitudes[selection[0], selection[1]] = all_amplitudes for i in modified: - tmp_best, tmp_peak = selection[:, i] - diff_amp = diff_amplitudes[i] * norms[tmp_best] + tmp_best, tmp_peak = selection[:, i] + diff_amp = diff_amplitudes[i]*norms[tmp_best] + if not tmp_best in cached_overlaps: cached_overlaps[tmp_best] = overlaps[tmp_best].toarray() if not tmp_peak in neighbors.keys(): idx = [max(0, tmp_peak - num_samples), min(num_peaks, tmp_peak + neighbor_window)] tdx = [num_samples + idx[0] - tmp_peak, num_samples + idx[1] - tmp_peak] - neighbors[tmp_peak] = {"idx": idx, "tdx": tdx} + neighbors[tmp_peak] = {'idx' : idx, 'tdx' : tdx} - idx = neighbors[tmp_peak]["idx"] - tdx = neighbors[tmp_peak]["tdx"] + idx = neighbors[tmp_peak]['idx'] + tdx = neighbors[tmp_peak]['tdx'] - to_add = diff_amp * cached_overlaps[tmp_best][:, tdx[0] : tdx[1]] - scalar_products[:, idx[0] : idx[1]] -= to_add + to_add = diff_amp * cached_overlaps[tmp_best][:, tdx[0]:tdx[1]] + scalar_products[:, idx[0]:idx[1]] -= to_add - is_valid = scalar_products > stop_criteria + is_valid = (scalar_products > stop_criteria) - is_valid = (final_amplitudes > min_amplitude) * (final_amplitudes < max_amplitude) + is_valid = (final_amplitudes > min_amplitude)*(final_amplitudes < max_amplitude) valid_indices = np.where(is_valid) num_spikes = len(valid_indices[0]) - spikes["sample_index"][:num_spikes] = valid_indices[1] + d["nbefore"] - spikes["channel_index"][:num_spikes] = 0 - spikes["cluster_index"][:num_spikes] = valid_indices[0] - spikes["amplitude"][:num_spikes] = final_amplitudes[valid_indices[0], valid_indices[1]] - + spikes['sample_index'][:num_spikes] = valid_indices[1] + d['nbefore'] + spikes['channel_index'][:num_spikes] = 0 + spikes['cluster_index'][:num_spikes] = valid_indices[0] + spikes['amplitude'][:num_spikes] = final_amplitudes[valid_indices[0], valid_indices[1]] + spikes = spikes[:num_spikes] - order = np.argsort(spikes["sample_index"]) + order = np.argsort(spikes['sample_index']) spikes = spikes[order] return spikes @@ -515,9 +520,6 @@ class CircusPeeler(BaseTemplateMatchingEngine): Maximal amplitude allowed for every template min_amplitude: float Minimal amplitude allowed for every template - sparsify_threshold: float - Templates are sparsified in order to keep only the channels necessary - to explain a given fraction of the total norm use_sparse_matrix_threshold: float If density of the templates is below a given threshold, sparse matrix are used (memory efficient) @@ -529,129 +531,57 @@ class CircusPeeler(BaseTemplateMatchingEngine): """ _default_params = { - "peak_sign": "neg", - "exclude_sweep_ms": 0.1, - "jitter_ms": 0.1, - "detect_threshold": 5, - "noise_levels": None, - "random_chunk_kwargs": {}, - "sparsify_threshold": 0.99, - "max_amplitude": 1.5, - "min_amplitude": 0.5, - "use_sparse_matrix_threshold": 0.25, - "progess_bar_steps": False, - "waveform_extractor": None, - "smoothing_factor": 0.25, + 'peak_sign': 'neg', + 'exclude_sweep_ms': 0.1, + 'jitter_ms' : 0.1, + 'detect_threshold': 5, + 'noise_levels': None, + 'random_chunk_kwargs': {}, + 'max_amplitude' : 1.5, + 'min_amplitude' : 0.5, + 'use_sparse_matrix_threshold' : 0.25, + 'progess_bar_steps' : False, + 'waveform_extractor': None, + 'sparse_kwargs' : {'method' : 'threshold', 'threshold' : 0.5, 'peak_sign' : 'both'} } - @classmethod - def _sparsify_template(cls, template, sparsify_threshold, noise_levels): - is_silent = template.std(0) < 0.1 * noise_levels - - template[:, is_silent] = 0 - - channel_norms = np.linalg.norm(template, axis=0) ** 2 - total_norm = np.linalg.norm(template) ** 2 - - idx = np.argsort(channel_norms)[::-1] - explained_norms = np.cumsum(channel_norms[idx] / total_norm) - channel = np.searchsorted(explained_norms, sparsify_threshold) - active_channels = np.sort(idx[:channel]) - template[:, idx[channel:]] = 0 - return template, active_channels - - @classmethod - def _regularize_template(cls, template, smoothing_factor=0.25): - nb_channels = template.shape[1] - nb_timesteps = template.shape[0] - xaxis = np.arange(nb_timesteps) - for i in range(nb_channels): - z = scipy.interpolate.UnivariateSpline(xaxis, template[:, i]) - z.set_smoothing_factor(smoothing_factor) - template[:, i] = z(xaxis) - return template - @classmethod def _prepare_templates(cls, d): - parameters = d - waveform_extractor = parameters["waveform_extractor"] - num_samples = parameters["num_samples"] - num_channels = parameters["num_channels"] - num_templates = parameters["num_templates"] - max_amplitude = parameters["max_amplitude"] - min_amplitude = parameters["min_amplitude"] - use_sparse_matrix_threshold = parameters["use_sparse_matrix_threshold"] + + waveform_extractor = d['waveform_extractor'] + num_samples = d['num_samples'] + num_channels = d['num_channels'] + num_templates = d['num_templates'] + use_sparse_matrix_threshold = d['use_sparse_matrix_threshold'] - parameters["norms"] = np.zeros(num_templates, dtype=np.float32) + d['norms'] = np.zeros(num_templates, dtype=np.float32) - all_units = list(parameters["waveform_extractor"].sorting.unit_ids) + all_units = list(d['waveform_extractor'].sorting.unit_ids) - templates = waveform_extractor.get_all_templates(mode="median").copy() + if not waveform_extractor.is_sparse(): + sparsity = compute_sparsity(waveform_extractor, **d['sparse_kwargs']).mask + templates = waveform_extractor.get_all_templates(mode='median').copy() + d['sparsities'] = {} + for count, unit_id in enumerate(all_units): - if parameters["smoothing_factor"] > 0: - templates[count] = cls._regularize_template(templates[count], parameters["smoothing_factor"]) - templates[count], _ = cls._sparsify_template( - templates[count], parameters["sparsify_threshold"], parameters["noise_levels"] - ) - parameters["norms"][count] = np.linalg.norm(templates[count]) - templates[count] /= parameters["norms"][count] + d['sparsities'][count], = np.nonzero(sparsity[count]) + templates[count][sparsity[count] == False] = 0 + d['norms'][count] = np.linalg.norm(templates[count]) + templates[count] /= d['norms'][count] templates = templates.reshape(num_templates, -1) - nnz = np.sum(templates != 0) / (num_templates * num_samples * num_channels) + nnz = np.sum(templates != 0)/(num_templates * num_samples * num_channels) if nnz <= use_sparse_matrix_threshold: templates = scipy.sparse.csr_matrix(templates) - print(f"Templates are automatically sparsified (sparsity level is {nnz})") - parameters["is_dense"] = False - else: - parameters["is_dense"] = True - - parameters["templates"] = templates - - return parameters - - @classmethod - def _prepare_overlaps(cls, d): - templates = d["templates"] - num_samples = d["num_samples"] - num_channels = d["num_channels"] - num_templates = d["num_templates"] - is_dense = d["is_dense"] - - if not is_dense: - dense_templates = templates.toarray() + print(f'Templates are automatically sparsified (sparsity level is {nnz})') + d['is_dense'] = False else: - dense_templates = templates - - dense_templates = dense_templates.reshape(num_templates, num_samples, num_channels) - - size = 2 * num_samples - 1 - - all_delays = list(range(0, num_samples + 1)) - if d["progess_bar_steps"]: - all_delays = tqdm(all_delays, desc="[1] compute overlaps") - - overlaps = {} - - for delay in all_delays: - source = dense_templates[:, :delay, :].reshape(num_templates, -1) - target = dense_templates[:, num_samples - delay :, :].reshape(num_templates, -1) - - overlaps[delay] = scipy.sparse.csr_matrix(source.dot(target.T)) + d['is_dense'] = True - if delay < num_samples: - overlaps[size - delay] = overlaps[delay].T.tocsr() - - new_overlaps = [] - - for i in range(num_templates): - data = [overlaps[j][i, :].T for j in range(size)] - data = scipy.sparse.hstack(data) - new_overlaps += [data] - - d["overlaps"] = new_overlaps + d['templates'] = templates return d @@ -661,9 +591,9 @@ def _mcc_error(cls, bounds, good, bad): fp = np.sum((bounds[0] <= bad) & (bad <= bounds[1])) tp = np.sum((bounds[0] <= good) & (good <= bounds[1])) tn = np.sum((bad < bounds[0]) | (bad > bounds[1])) - denom = (tp + fp) * (tp + fn) * (tn + fp) * (tn + fn) + denom = (tp+fp)*(tp+fn)*(tn+fp)*(tn+fn) if denom > 0: - mcc = 1 - (tp * tn - fp * fn) / np.sqrt(denom) + mcc = 1 - (tp*tn - fp*fn)/np.sqrt(denom) else: mcc = 1 return mcc @@ -708,16 +638,6 @@ def _optimize_amplitudes(cls, noise_snippets, d): res = scipy.optimize.differential_evolution(cls._cost_function_mcc, bounds=cost_bounds, args=cost_kwargs) parameters["amplitudes"][count] = res.x - # import pylab as plt - # plt.hist(good, 100, alpha=0.5) - # plt.hist(bad, 100, alpha=0.5) - # plt.hist(noise[count], 100, alpha=0.5) - # ymin, ymax = plt.ylim() - # plt.plot([res.x[0], res.x[0]], [ymin, ymax], 'k--') - # plt.plot([res.x[1], res.x[1]], [ymin, ymax], 'k--') - # plt.savefig('test_%d.png' %count) - # plt.close() - return d @classmethod @@ -727,7 +647,6 @@ def initialize_and_check_kwargs(cls, recording, kwargs): default_parameters.update(kwargs) # assert isinstance(d['waveform_extractor'], WaveformExtractor) - for v in ["sparsify_threshold", "use_sparse_matrix_threshold"]: assert (default_parameters[v] >= 0) and (default_parameters[v] <= 1), f"{v} should be in [0, 1]" @@ -817,31 +736,31 @@ def main_function(cls, traces, d): sym_patch = d["sym_patch"] peak_traces = traces[margin // 2 : -margin // 2, :] - peak_sample_ind, peak_chan_ind = DetectPeakByChannel.detect_peaks( + peak_sample_index, peak_chan_ind = DetectPeakByChannel.detect_peaks( peak_traces, peak_sign, abs_threholds, exclude_sweep_size ) if jitter > 0: - jittered_peaks = peak_sample_ind[:, np.newaxis] + np.arange(-jitter, jitter) + jittered_peaks = peak_sample_index[:, np.newaxis] + np.arange(-jitter, jitter) jittered_channels = peak_chan_ind[:, np.newaxis] + np.zeros(2 * jitter) mask = (jittered_peaks > 0) & (jittered_peaks < len(peak_traces)) jittered_peaks = jittered_peaks[mask] jittered_channels = jittered_channels[mask] - peak_sample_ind, unique_idx = np.unique(jittered_peaks, return_index=True) + peak_sample_index, unique_idx = np.unique(jittered_peaks, return_index=True) peak_chan_ind = jittered_channels[unique_idx] else: - peak_sample_ind, unique_idx = np.unique(peak_sample_ind, return_index=True) + peak_sample_index, unique_idx = np.unique(peak_sample_index, return_index=True) peak_chan_ind = peak_chan_ind[unique_idx] - num_peaks = len(peak_sample_ind) + num_peaks = len(peak_sample_index) if sym_patch: - snippets = extract_patches_2d(traces, patch_sizes)[peak_sample_ind] - peak_sample_ind += margin // 2 + snippets = extract_patches_2d(traces, patch_sizes)[peak_sample_index] + peak_sample_index += margin // 2 else: - peak_sample_ind += margin // 2 + peak_sample_index += margin // 2 snippet_window = np.arange(-d["nbefore"], d["nafter"]) - snippets = traces[peak_sample_ind[:, np.newaxis] + snippet_window] + snippets = traces[peak_sample_index[:, np.newaxis] + snippet_window] if num_peaks > 0: snippets = snippets.reshape(num_peaks, -1) @@ -865,10 +784,10 @@ def main_function(cls, traces, d): best_cluster_ind, peak_index = np.unravel_index(idx_lookup[is_valid][best_amplitude_ind], idx_lookup.shape) best_amplitude = scalar_products[best_cluster_ind, peak_index] - best_peak_sample_ind = peak_sample_ind[peak_index] + best_peak_sample_index = peak_sample_index[peak_index] best_peak_chan_ind = peak_chan_ind[peak_index] - peak_data = peak_sample_ind - peak_sample_ind[peak_index] + peak_data = peak_sample_index - peak_sample_index[peak_index] is_valid_nn = np.searchsorted(peak_data, [-neighbor_window, neighbor_window + 1]) idx_neighbor = peak_data[is_valid_nn[0] : is_valid_nn[1]] + neighbor_window @@ -880,7 +799,7 @@ def main_function(cls, traces, d): scalar_products[:, is_valid_nn[0] : is_valid_nn[1]] += to_add scalar_products[best_cluster_ind, is_valid_nn[0] : is_valid_nn[1]] = -np.inf - spikes["sample_index"][num_spikes] = best_peak_sample_ind + spikes["sample_index"][num_spikes] = best_peak_sample_index spikes["channel_index"][num_spikes] = best_peak_chan_ind spikes["cluster_index"][num_spikes] = best_cluster_ind spikes["amplitude"][num_spikes] = best_amplitude From 0f9fee6fe788a0cdc44c18d19fd8b0f11f10ff4f Mon Sep 17 00:00:00 2001 From: Pierre Yger Date: Mon, 17 Jul 2023 10:30:33 +0200 Subject: [PATCH 02/90] WIP --- .../sorters/internal/spyking_circus2.py | 59 ++++++++++--------- .../clustering/clustering_tools.py | 2 +- .../clustering/random_projections.py | 3 +- 3 files changed, 35 insertions(+), 29 deletions(-) diff --git a/src/spikeinterface/sorters/internal/spyking_circus2.py b/src/spikeinterface/sorters/internal/spyking_circus2.py index 24c4a7ccfc..18db5f37c8 100644 --- a/src/spikeinterface/sorters/internal/spyking_circus2.py +++ b/src/spikeinterface/sorters/internal/spyking_circus2.py @@ -3,7 +3,7 @@ import os import shutil import numpy as np -import os +import psutil from spikeinterface.core import NumpySorting, load_extractor, BaseRecording, get_noise_levels, extract_waveforms from spikeinterface.core.job_tools import fix_job_kwargs @@ -18,23 +18,24 @@ class Spykingcircus2Sorter(ComponentsBasedSorter): - sorter_name = "spykingcircus2" + sorter_name = 'spykingcircus2' _default_params = { - "general": {"ms_before": 2, "ms_after": 2, "local_radius_um": 100}, - "waveforms": {"max_spikes_per_unit": 200, "overwrite": True}, - "filtering": {"dtype": "float32"}, - "detection": {"peak_sign": "neg", "detect_threshold": 5}, - "selection": {"n_peaks_per_channel": 5000, "min_n_peaks": 20000}, - "localization": {}, - "clustering": {}, - "matching": {}, - "registration": {}, - "apply_preprocessing": True, - "shared_memory": False, - "job_kwargs": {}, + 'general' : {'ms_before' : 2, 'ms_after' : 2, 'local_radius_um' : 75}, + 'waveforms' : {'max_spikes_per_unit' : 200, 'overwrite' : True, 'sparse' : True, + 'method' : 'ptp', 'threshold' : 1}, + 'filtering' : {'dtype' : 'float32'}, + 'detection' : {'peak_sign': 'neg', 'detect_threshold': 5}, + 'selection' : {'n_peaks_per_channel' : 5000, 'min_n_peaks' : 20000}, + 'localization' : {}, + 'clustering': {}, + 'matching': {}, + 'apply_preprocessing': True, + 'shared_memory' : True, + 'job_kwargs' : {'n_jobs' : -1, 'chunk_memory' : "10M"} } + @classmethod def get_sorter_version(cls): return "2.0" @@ -63,8 +64,6 @@ def _run_from_folder(cls, sorter_output_folder, params, verbose): ## First, we are filtering the data filtering_params = params["filtering"].copy() if params["apply_preprocessing"]: - # if recording.is_filtered == True: - # print('Looks like the recording is already filtered, check preprocessing!') recording_f = bandpass_filter(recording, **filtering_params) recording_f = common_reference(recording_f) else: @@ -102,12 +101,15 @@ def _run_from_folder(cls, sorter_output_folder, params, verbose): ## We launch a clustering (using hdbscan) relying on positions and features extracted on ## the fly from the snippets - clustering_params = params["clustering"].copy() - clustering_params.update(params["waveforms"]) - clustering_params.update(params["general"]) - clustering_params.update(dict(shared_memory=params["shared_memory"])) - clustering_params["job_kwargs"] = job_kwargs - clustering_params["tmp_folder"] = sorter_output_folder / "clustering" + clustering_params = params['clustering'].copy() + clustering_params['waveforms_kwargs'] = params['waveforms'] + + for k in ['ms_before', 'ms_after']: + clustering_params['waveforms_kwargs'][k] = params['general'][k] + + clustering_params.update(dict(shared_memory=params['shared_memory'])) + clustering_params['job_kwargs'] = job_kwargs + clustering_params['tmp_folder'] = sorter_output_folder / "clustering" labels, peak_labels = find_cluster_from_peaks( recording_f, selected_peaks, method="random_projections", method_kwargs=clustering_params @@ -122,15 +124,18 @@ def _run_from_folder(cls, sorter_output_folder, params, verbose): sorting = sorting.save(folder=clustering_folder) - ## We get the templates our of such a clustering - waveforms_params = params["waveforms"].copy() + ## We get the templates our of such a clustering + waveforms_params = params['waveforms'].copy() waveforms_params.update(job_kwargs) - if params["shared_memory"]: - mode = "memory" + for k in ['ms_before', 'ms_after']: + waveforms_params[k] = params['general'][k] + + if params['shared_memory']: + mode = 'memory' waveforms_folder = None else: - mode = "folder" + mode = 'folder' waveforms_folder = sorter_output_folder / "waveforms" we = extract_waveforms( diff --git a/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py b/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py index 53833b01a2..6edf5af16b 100644 --- a/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py +++ b/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py @@ -579,7 +579,7 @@ def remove_duplicates_via_matching( f.write(blanck) f.close() - recording = BinaryRecordingExtractor(tmp_filename, num_chan=num_chans, sampling_frequency=fs, dtype="float32") + recording = BinaryRecordingExtractor(tmp_filename, num_channels=num_chans, sampling_frequency=fs, dtype="float32") recording.annotate(is_filtered=True) margin = 2 * max(waveform_extractor.nbefore, waveform_extractor.nafter) diff --git a/src/spikeinterface/sortingcomponents/clustering/random_projections.py b/src/spikeinterface/sortingcomponents/clustering/random_projections.py index 02247dd288..1450ba91db 100644 --- a/src/spikeinterface/sortingcomponents/clustering/random_projections.py +++ b/src/spikeinterface/sortingcomponents/clustering/random_projections.py @@ -238,7 +238,8 @@ def main_function(cls, recording, peaks, params): if params["tmp_folder"] is None: shutil.rmtree(tmp_folder) else: - shutil.rmtree(tmp_folder / "waveforms") + if not params["shared_memory"]: + shutil.rmtree(tmp_folder / "waveforms") shutil.rmtree(tmp_folder / "sorting") if verbose: From 7a3d4c2181da06c4106d6c17a015839a0cc55f4f Mon Sep 17 00:00:00 2001 From: Pierre Yger Date: Mon, 17 Jul 2023 14:06:10 +0200 Subject: [PATCH 03/90] WIP --- .../sortingcomponents/matching/circus.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/src/spikeinterface/sortingcomponents/matching/circus.py b/src/spikeinterface/sortingcomponents/matching/circus.py index 8f08aac9c5..d86dac97e2 100644 --- a/src/spikeinterface/sortingcomponents/matching/circus.py +++ b/src/spikeinterface/sortingcomponents/matching/circus.py @@ -194,7 +194,6 @@ class CircusOMPPeeler(BaseTemplateMatchingEngine): """ _default_params = { - "sparsify_threshold": 1, "amplitudes": [0.6, 2], "omp_min_sps": 0.1, "waveform_extractor": None, @@ -219,6 +218,7 @@ def _prepare_templates(cls, d): else: sparsity = waveform_extractor.sparsity.mask + print(sparsity.mean()) templates = waveform_extractor.get_all_templates(mode='median').copy() d['sparsities'] = {} @@ -226,10 +226,10 @@ def _prepare_templates(cls, d): d['norms'] = np.zeros(num_templates, dtype=np.float32) for count, unit_id in enumerate(waveform_extractor.sorting.unit_ids): - template = templates[count] + template = templates[count][:, sparsity[count]] d['sparsities'][count], = np.nonzero(sparsity[count]) d['norms'][count] = np.linalg.norm(template) - d['templates'][count] = template[:, d['sparsities'][count]]/d['norms'][count] + d['templates'][count] = template/d['norms'][count] return d @@ -269,8 +269,8 @@ def initialize_and_check_kwargs(cls, recording, kwargs): d['ignored_ids'] = np.array(d['ignored_ids']) omp_min_sps = d['omp_min_sps'] - nb_active_channels = np.array([len(d['sparsities'][count]) for count in range(d['num_templates'])]) - d['stop_criteria'] = omp_min_sps * np.sqrt(nb_active_channels * d['num_samples']) + #nb_active_channels = np.array([len(d['sparsities'][count]) for count in range(d['num_templates'])]) + d['stop_criteria'] = omp_min_sps * np.sqrt(d['noise_levels'].sum() * d['num_samples']) return d @@ -307,7 +307,7 @@ def main_function(cls, traces, d): min_amplitude, max_amplitude = d['amplitudes'] sparsities = d['sparsities'] ignored_ids = d['ignored_ids'] - stop_criteria = d['stop_criteria'][:, np.newaxis] + stop_criteria = d['stop_criteria'] vicinity = d['vicinity'] if 'cached_fft_kernels' not in d: @@ -356,7 +356,7 @@ def main_function(cls, traces, d): spikes = np.empty(scalar_products.size, dtype=spike_dtype) idx_lookup = np.arange(scalar_products.size).reshape(num_templates, -1) - M = np.zeros((100, 100), dtype=np.float32) + M = np.zeros((num_peaks, num_peaks), dtype=np.float32) all_selections = np.empty((2, scalar_products.size), dtype=np.int32) final_amplitudes = np.zeros(scalar_products.shape, dtype=np.float32) @@ -647,7 +647,7 @@ def initialize_and_check_kwargs(cls, recording, kwargs): default_parameters.update(kwargs) # assert isinstance(d['waveform_extractor'], WaveformExtractor) - for v in ["sparsify_threshold", "use_sparse_matrix_threshold"]: + for v in ["use_sparse_matrix_threshold"]: assert (default_parameters[v] >= 0) and (default_parameters[v] <= 1), f"{v} should be in [0, 1]" default_parameters["num_channels"] = default_parameters["waveform_extractor"].recording.get_num_channels() From 892305bef89b97454fcda956f39b81e3b7673d55 Mon Sep 17 00:00:00 2001 From: Pierre Yger Date: Mon, 24 Jul 2023 12:01:54 +0200 Subject: [PATCH 04/90] WIP --- src/spikeinterface/sortingcomponents/matching/circus.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/spikeinterface/sortingcomponents/matching/circus.py b/src/spikeinterface/sortingcomponents/matching/circus.py index d86dac97e2..d3d2c39836 100644 --- a/src/spikeinterface/sortingcomponents/matching/circus.py +++ b/src/spikeinterface/sortingcomponents/matching/circus.py @@ -218,7 +218,6 @@ def _prepare_templates(cls, d): else: sparsity = waveform_extractor.sparsity.mask - print(sparsity.mean()) templates = waveform_extractor.get_all_templates(mode='median').copy() d['sparsities'] = {} @@ -542,7 +541,7 @@ class CircusPeeler(BaseTemplateMatchingEngine): 'use_sparse_matrix_threshold' : 0.25, 'progess_bar_steps' : False, 'waveform_extractor': None, - 'sparse_kwargs' : {'method' : 'threshold', 'threshold' : 0.5, 'peak_sign' : 'both'} + 'sparse_kwargs' : {'method' : 'ptp', 'threshold' : 1} } @classmethod From 1cb122c040b256bd0073e798e96880e19bff6d59 Mon Sep 17 00:00:00 2001 From: Pierre Yger Date: Mon, 28 Aug 2023 13:35:59 +0200 Subject: [PATCH 05/90] WIP for circus2 --- .../sortingcomponents/clustering/clustering_tools.py | 1 + src/spikeinterface/sortingcomponents/matching/circus.py | 2 ++ 2 files changed, 3 insertions(+) diff --git a/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py b/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py index 6edf5af16b..06e0b8ea96 100644 --- a/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py +++ b/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py @@ -581,6 +581,7 @@ def remove_duplicates_via_matching( recording = BinaryRecordingExtractor(tmp_filename, num_channels=num_chans, sampling_frequency=fs, dtype="float32") recording.annotate(is_filtered=True) + recording = recording.set_probe(waveform_extractor.recording.get_probe()) margin = 2 * max(waveform_extractor.nbefore, waveform_extractor.nafter) half_marging = margin // 2 diff --git a/src/spikeinterface/sortingcomponents/matching/circus.py b/src/spikeinterface/sortingcomponents/matching/circus.py index d3d2c39836..ef823316a2 100644 --- a/src/spikeinterface/sortingcomponents/matching/circus.py +++ b/src/spikeinterface/sortingcomponents/matching/circus.py @@ -559,6 +559,8 @@ def _prepare_templates(cls, d): if not waveform_extractor.is_sparse(): sparsity = compute_sparsity(waveform_extractor, **d['sparse_kwargs']).mask + else: + sparsity = waveform_extractor.sparsity.mask templates = waveform_extractor.get_all_templates(mode='median').copy() d['sparsities'] = {} From ef204dd83e9f6fe627b849619932c44c331e2306 Mon Sep 17 00:00:00 2001 From: Pierre Yger Date: Mon, 28 Aug 2023 13:58:00 +0200 Subject: [PATCH 06/90] WIP --- .../clustering/clustering_tools.py | 13 +- .../clustering/random_projections.py | 131 +++++++----------- 2 files changed, 58 insertions(+), 86 deletions(-) diff --git a/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py b/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py index 06e0b8ea96..f93142152f 100644 --- a/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py +++ b/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py @@ -536,7 +536,6 @@ def remove_duplicates_via_matching( waveform_extractor, noise_levels, peak_labels, - sparsify_threshold=1, method_kwargs={}, job_kwargs={}, tmp_folder=None, @@ -552,6 +551,10 @@ def remove_duplicates_via_matching( from pathlib import Path job_kwargs = fix_job_kwargs(job_kwargs) + + if waveform_extractor.is_sparse(): + sparsity = waveform_extractor.sparsity.mask + templates = waveform_extractor.get_all_templates(mode="median").copy() nb_templates = len(templates) duration = waveform_extractor.nbefore + waveform_extractor.nafter @@ -559,9 +562,10 @@ def remove_duplicates_via_matching( fs = waveform_extractor.recording.get_sampling_frequency() num_chans = waveform_extractor.recording.get_num_channels() - for t in range(nb_templates): - is_silent = templates[t].ptp(0) < sparsify_threshold - templates[t, :, is_silent] = 0 + if waveform_extractor.is_sparse(): + for count, unit_id in enumerate(waveform_extractor.sorting.unit_ids): + templates[count][:, ~sparsity[count]] = 0 + zdata = templates.reshape(nb_templates, -1) @@ -598,7 +602,6 @@ def remove_duplicates_via_matching( "waveform_extractor": waveform_extractor, "noise_levels": noise_levels, "amplitudes": [0.95, 1.05], - "sparsify_threshold": sparsify_threshold, "omp_min_sps": 0.1, "templates": None, "overlaps": None, diff --git a/src/spikeinterface/sortingcomponents/clustering/random_projections.py b/src/spikeinterface/sortingcomponents/clustering/random_projections.py index 0803763573..5e14fa4736 100644 --- a/src/spikeinterface/sortingcomponents/clustering/random_projections.py +++ b/src/spikeinterface/sortingcomponents/clustering/random_projections.py @@ -41,7 +41,6 @@ class RandomProjectionClustering: "ms_before": 1.5, "ms_after": 1.5, "random_seed": 42, - "cleaning_method": "matching", "shared_memory": False, "min_values": {"ptp": 0, "energy": 0}, "tmp_folder": None, @@ -160,87 +159,57 @@ def main_function(cls, recording, peaks, params): spikes["segment_index"] = peaks[mask]["segment_index"] spikes["unit_index"] = peak_labels[mask] - cleaning_method = params["cleaning_method"] - if verbose: - print("We found %d raw clusters, starting to clean with %s..." % (len(labels), cleaning_method)) - - if cleaning_method == "cosine": - wfs_arrays = extract_waveforms_to_buffers( - recording, - spikes, - labels, - nbefore, - nafter, - mode="shared_memory", - return_scaled=False, - folder=None, - dtype=recording.get_dtype(), - sparsity_mask=None, - copy=True, - **params["job_kwargs"], - ) - - labels, peak_labels = remove_duplicates( - wfs_arrays, noise_levels, peak_labels, num_samples, num_chans, **params["cleaning_kwargs"] - ) - - elif cleaning_method == "dip": - wfs_arrays = {} - for label in labels: - mask = label == peak_labels - wfs_arrays[label] = hdbscan_data[mask] - - labels, peak_labels = remove_duplicates_via_dip(wfs_arrays, peak_labels, **params["cleaning_kwargs"]) - - elif cleaning_method == "matching": - # create a tmp folder - if params["tmp_folder"] is None: - name = "".join(random.choices(string.ascii_uppercase + string.digits, k=8)) - tmp_folder = get_global_tmp_folder() / name - else: - tmp_folder = Path(params["tmp_folder"]) - - if params["shared_memory"]: - waveform_folder = None - mode = "memory" - else: - waveform_folder = tmp_folder / "waveforms" - mode = "folder" - - sorting_folder = tmp_folder / "sorting" - sorting = NumpySorting.from_times_labels(spikes["sample_index"], spikes["unit_index"], fs) - sorting = sorting.save(folder=sorting_folder) - we = extract_waveforms( - recording, - sorting, - waveform_folder, - ms_before=params["ms_before"], - ms_after=params["ms_after"], - **params["job_kwargs"], - return_scaled=False, - mode=mode, - ) - - cleaning_matching_params = params["job_kwargs"].copy() - cleaning_matching_params["chunk_duration"] = "100ms" - cleaning_matching_params["n_jobs"] = 1 - cleaning_matching_params["verbose"] = False - cleaning_matching_params["progress_bar"] = False - - cleaning_params = params["cleaning_kwargs"].copy() - cleaning_params["tmp_folder"] = tmp_folder - - labels, peak_labels = remove_duplicates_via_matching( - we, noise_levels, peak_labels, job_kwargs=cleaning_matching_params, **cleaning_params - ) - - if params["tmp_folder"] is None: - shutil.rmtree(tmp_folder) - else: - if not params["shared_memory"]: - shutil.rmtree(tmp_folder / "waveforms") - shutil.rmtree(tmp_folder / "sorting") + print("We found %d raw clusters, starting to clean with matching..." % (len(labels))) + + + # create a tmp folder + if params["tmp_folder"] is None: + name = "".join(random.choices(string.ascii_uppercase + string.digits, k=8)) + tmp_folder = get_global_tmp_folder() / name + else: + tmp_folder = Path(params["tmp_folder"]) + + if params["shared_memory"]: + waveform_folder = None + mode = "memory" + else: + waveform_folder = tmp_folder / "waveforms" + mode = "folder" + + sorting_folder = tmp_folder / "sorting" + sorting = NumpySorting.from_times_labels(spikes["sample_index"], spikes["unit_index"], fs) + sorting = sorting.save(folder=sorting_folder) + we = extract_waveforms( + recording, + sorting, + waveform_folder, + ms_before=params["ms_before"], + ms_after=params["ms_after"], + **params["job_kwargs"], + return_scaled=False, + mode=mode, + ) + + cleaning_matching_params = params["job_kwargs"].copy() + cleaning_matching_params["chunk_duration"] = "100ms" + cleaning_matching_params["n_jobs"] = 1 + cleaning_matching_params["verbose"] = False + cleaning_matching_params["progress_bar"] = False + + cleaning_params = params["cleaning_kwargs"].copy() + cleaning_params["tmp_folder"] = tmp_folder + + labels, peak_labels = remove_duplicates_via_matching( + we, noise_levels, peak_labels, job_kwargs=cleaning_matching_params, **cleaning_params + ) + + if params["tmp_folder"] is None: + shutil.rmtree(tmp_folder) + else: + if not params["shared_memory"]: + shutil.rmtree(tmp_folder / "waveforms") + shutil.rmtree(tmp_folder / "sorting") if verbose: print("We kept %d non-duplicated clusters..." % len(labels)) From 242799ff582d886ad8438b9344eea594e07324af Mon Sep 17 00:00:00 2001 From: Pierre Yger Date: Mon, 28 Aug 2023 14:02:05 +0200 Subject: [PATCH 07/90] Docs --- .../sortingcomponents/matching/circus.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/spikeinterface/sortingcomponents/matching/circus.py b/src/spikeinterface/sortingcomponents/matching/circus.py index ef823316a2..50058ab39e 100644 --- a/src/spikeinterface/sortingcomponents/matching/circus.py +++ b/src/spikeinterface/sortingcomponents/matching/circus.py @@ -5,7 +5,6 @@ import scipy.spatial -from tqdm import tqdm import scipy try: @@ -190,6 +189,9 @@ class CircusOMPPeeler(BaseTemplateMatchingEngine): computed random_chunk_kwargs: dict Parameters for computing noise levels, if not provided (sub optimal) + sparse_kwargs: dict + Parameters to extract a sparsity mask from the waveform_extractor, if not + already sparse. ----- """ @@ -522,8 +524,9 @@ class CircusPeeler(BaseTemplateMatchingEngine): use_sparse_matrix_threshold: float If density of the templates is below a given threshold, sparse matrix are used (memory efficient) - progress_bar_steps: bool - In order to display or not steps from the algorithm + sparse_kwargs: dict + Parameters to extract a sparsity mask from the waveform_extractor, if not + already sparse. ----- @@ -539,7 +542,6 @@ class CircusPeeler(BaseTemplateMatchingEngine): 'max_amplitude' : 1.5, 'min_amplitude' : 0.5, 'use_sparse_matrix_threshold' : 0.25, - 'progess_bar_steps' : False, 'waveform_extractor': None, 'sparse_kwargs' : {'method' : 'ptp', 'threshold' : 1} } @@ -618,8 +620,6 @@ def _optimize_amplitudes(cls, noise_snippets, d): alpha = 0.5 norms = parameters["norms"] all_units = list(waveform_extractor.sorting.unit_ids) - if parameters["progess_bar_steps"]: - all_units = tqdm(all_units, desc="[2] compute amplitudes") parameters["amplitudes"] = np.zeros((num_templates, 2), dtype=np.float32) noise = templates.dot(noise_snippets) / norms[:, np.newaxis] From 5566c917ddbd32feda022e4293ba0bc93bdd3139 Mon Sep 17 00:00:00 2001 From: Pierre Yger Date: Tue, 29 Aug 2023 08:46:28 +0200 Subject: [PATCH 08/90] Fix for circus --- .../sortingcomponents/matching/circus.py | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/src/spikeinterface/sortingcomponents/matching/circus.py b/src/spikeinterface/sortingcomponents/matching/circus.py index 50058ab39e..f79cf60a31 100644 --- a/src/spikeinterface/sortingcomponents/matching/circus.py +++ b/src/spikeinterface/sortingcomponents/matching/circus.py @@ -357,7 +357,7 @@ def main_function(cls, traces, d): spikes = np.empty(scalar_products.size, dtype=spike_dtype) idx_lookup = np.arange(scalar_products.size).reshape(num_templates, -1) - M = np.zeros((num_peaks, num_peaks), dtype=np.float32) + M = np.zeros((100, 100), dtype=np.float32) all_selections = np.empty((2, scalar_products.size), dtype=np.int32) final_amplitudes = np.zeros(scalar_products.shape, dtype=np.float32) @@ -570,7 +570,7 @@ def _prepare_templates(cls, d): for count, unit_id in enumerate(all_units): d['sparsities'][count], = np.nonzero(sparsity[count]) - templates[count][sparsity[count] == False] = 0 + templates[count][:, ~sparsity[count]] = 0 d['norms'][count] = np.linalg.norm(templates[count]) templates[count] /= d['norms'][count] @@ -666,7 +666,15 @@ def initialize_and_check_kwargs(cls, recording, kwargs): ) default_parameters = cls._prepare_templates(default_parameters) - default_parameters = cls._prepare_overlaps(default_parameters) + + templates = default_parameters['templates'].reshape(len(default_parameters['templates']), + default_parameters['num_samples'], + default_parameters['num_channels']) + + default_parameters['overlaps'] = compute_overlaps(templates, + default_parameters['num_samples'], + default_parameters['num_channels'], + default_parameters['sparsities']) default_parameters["exclude_sweep_size"] = int( default_parameters["exclude_sweep_ms"] * recording.get_sampling_frequency() / 1000.0 From 75c97937c1f5f66714076dba237574eddbb9782c Mon Sep 17 00:00:00 2001 From: Pierre Yger Date: Tue, 29 Aug 2023 09:12:16 +0200 Subject: [PATCH 09/90] WIP --- src/spikeinterface/sortingcomponents/matching/circus.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/spikeinterface/sortingcomponents/matching/circus.py b/src/spikeinterface/sortingcomponents/matching/circus.py index f79cf60a31..baf7494002 100644 --- a/src/spikeinterface/sortingcomponents/matching/circus.py +++ b/src/spikeinterface/sortingcomponents/matching/circus.py @@ -432,13 +432,14 @@ def main_function(cls, traces, d): selection = all_selections[:, :num_selection] res_sps = full_sps[selection[0], selection[1]] - if vicinity == 0: + if True: #vicinity == 0: all_amplitudes, _ = potrs(M[:num_selection, :num_selection], res_sps, lower=True, overwrite_b=False) all_amplitudes /= norms[selection[0]] else: + # This is not working, need to figure out why is_in_vicinity = np.append(is_in_vicinity, num_selection - 1) - all_amplitudes = np.append(all_amplitudes, np.float32(0)) + all_amplitudes = np.append(all_amplitudes, np.float32(1)) L = M[is_in_vicinity, :][:, is_in_vicinity] all_amplitudes[is_in_vicinity], _ = potrs(L, res_sps[is_in_vicinity], lower=True, overwrite_b=False) From d7e9ac1c803121b7e0fb0d8c4af539340fb82bbe Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 29 Aug 2023 07:14:41 +0000 Subject: [PATCH 10/90] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- .../sorters/internal/spyking_circus2.py | 56 ++-- .../clustering/clustering_tools.py | 1 - .../clustering/random_projections.py | 1 - .../sortingcomponents/matching/circus.py | 286 +++++++++--------- 4 files changed, 166 insertions(+), 178 deletions(-) diff --git a/src/spikeinterface/sorters/internal/spyking_circus2.py b/src/spikeinterface/sorters/internal/spyking_circus2.py index 6635bbfca1..4ccaef8e29 100644 --- a/src/spikeinterface/sorters/internal/spyking_circus2.py +++ b/src/spikeinterface/sorters/internal/spyking_circus2.py @@ -18,24 +18,22 @@ class Spykingcircus2Sorter(ComponentsBasedSorter): - sorter_name = 'spykingcircus2' + sorter_name = "spykingcircus2" _default_params = { - 'general' : {'ms_before' : 2, 'ms_after' : 2, 'radius_um' : 75}, - 'waveforms' : {'max_spikes_per_unit' : 200, 'overwrite' : True, 'sparse' : True, - 'method' : 'ptp', 'threshold' : 1}, - 'filtering' : {'dtype' : 'float32'}, - 'detection' : {'peak_sign': 'neg', 'detect_threshold': 5}, - 'selection' : {'n_peaks_per_channel' : 5000, 'min_n_peaks' : 20000}, - 'localization' : {}, - 'clustering': {}, - 'matching': {}, - 'apply_preprocessing': True, - 'shared_memory' : True, - 'job_kwargs' : {'n_jobs' : -1, 'chunk_memory' : "10M"} + "general": {"ms_before": 2, "ms_after": 2, "radius_um": 75}, + "waveforms": {"max_spikes_per_unit": 200, "overwrite": True, "sparse": True, "method": "ptp", "threshold": 1}, + "filtering": {"dtype": "float32"}, + "detection": {"peak_sign": "neg", "detect_threshold": 5}, + "selection": {"n_peaks_per_channel": 5000, "min_n_peaks": 20000}, + "localization": {}, + "clustering": {}, + "matching": {}, + "apply_preprocessing": True, + "shared_memory": True, + "job_kwargs": {"n_jobs": -1, "chunk_memory": "10M"}, } - @classmethod def get_sorter_version(cls): return "2.0" @@ -101,15 +99,15 @@ def _run_from_folder(cls, sorter_output_folder, params, verbose): ## We launch a clustering (using hdbscan) relying on positions and features extracted on ## the fly from the snippets - clustering_params = params['clustering'].copy() - clustering_params['waveforms_kwargs'] = params['waveforms'] - - for k in ['ms_before', 'ms_after']: - clustering_params['waveforms_kwargs'][k] = params['general'][k] + clustering_params = params["clustering"].copy() + clustering_params["waveforms_kwargs"] = params["waveforms"] + + for k in ["ms_before", "ms_after"]: + clustering_params["waveforms_kwargs"][k] = params["general"][k] - clustering_params.update(dict(shared_memory=params['shared_memory'])) - clustering_params['job_kwargs'] = job_kwargs - clustering_params['tmp_folder'] = sorter_output_folder / "clustering" + clustering_params.update(dict(shared_memory=params["shared_memory"])) + clustering_params["job_kwargs"] = job_kwargs + clustering_params["tmp_folder"] = sorter_output_folder / "clustering" labels, peak_labels = find_cluster_from_peaks( recording_f, selected_peaks, method="random_projections", method_kwargs=clustering_params @@ -124,18 +122,18 @@ def _run_from_folder(cls, sorter_output_folder, params, verbose): sorting = sorting.save(folder=clustering_folder) - ## We get the templates our of such a clustering - waveforms_params = params['waveforms'].copy() + ## We get the templates our of such a clustering + waveforms_params = params["waveforms"].copy() waveforms_params.update(job_kwargs) - for k in ['ms_before', 'ms_after']: - waveforms_params[k] = params['general'][k] + for k in ["ms_before", "ms_after"]: + waveforms_params[k] = params["general"][k] - if params['shared_memory']: - mode = 'memory' + if params["shared_memory"]: + mode = "memory" waveforms_folder = None else: - mode = 'folder' + mode = "folder" waveforms_folder = sorter_output_folder / "waveforms" we = extract_waveforms( diff --git a/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py b/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py index f93142152f..b11af55d35 100644 --- a/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py +++ b/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py @@ -565,7 +565,6 @@ def remove_duplicates_via_matching( if waveform_extractor.is_sparse(): for count, unit_id in enumerate(waveform_extractor.sorting.unit_ids): templates[count][:, ~sparsity[count]] = 0 - zdata = templates.reshape(nb_templates, -1) diff --git a/src/spikeinterface/sortingcomponents/clustering/random_projections.py b/src/spikeinterface/sortingcomponents/clustering/random_projections.py index 5e14fa4736..ac564bda9a 100644 --- a/src/spikeinterface/sortingcomponents/clustering/random_projections.py +++ b/src/spikeinterface/sortingcomponents/clustering/random_projections.py @@ -162,7 +162,6 @@ def main_function(cls, recording, peaks, params): if verbose: print("We found %d raw clusters, starting to clean with matching..." % (len(labels))) - # create a tmp folder if params["tmp_folder"] is None: name = "".join(random.choices(string.ascii_uppercase + string.digits, k=8)) diff --git a/src/spikeinterface/sortingcomponents/matching/circus.py b/src/spikeinterface/sortingcomponents/matching/circus.py index baf7494002..b0f132e94d 100644 --- a/src/spikeinterface/sortingcomponents/matching/circus.py +++ b/src/spikeinterface/sortingcomponents/matching/circus.py @@ -130,8 +130,8 @@ def _freq_domain_conv(in1, in2, axes, shape, cache, calc_fast_len=True): return ret -def compute_overlaps(templates, num_samples, num_channels, sparsities): +def compute_overlaps(templates, num_samples, num_channels, sparsities): num_templates = len(templates) dense_templates = np.zeros((num_templates, num_samples, num_channels), dtype=np.float32) @@ -140,13 +140,13 @@ def compute_overlaps(templates, num_samples, num_channels, sparsities): size = 2 * num_samples - 1 - all_delays = list(range(0, num_samples+1)) + all_delays = list(range(0, num_samples + 1)) overlaps = {} - + for delay in all_delays: source = dense_templates[:, :delay, :].reshape(num_templates, -1) - target = dense_templates[:, num_samples-delay:, :].reshape(num_templates, -1) + target = dense_templates[:, num_samples - delay :, :].reshape(num_templates, -1) overlaps[delay] = scipy.sparse.csr_matrix(source.dot(target.T)) @@ -161,7 +161,7 @@ def compute_overlaps(templates, num_samples, num_channels, sparsities): new_overlaps += [data] return new_overlaps - + class CircusOMPPeeler(BaseTemplateMatchingEngine): """ @@ -204,77 +204,74 @@ class CircusOMPPeeler(BaseTemplateMatchingEngine): "norms": None, "random_chunk_kwargs": {}, "noise_levels": None, - 'sparse_kwargs' : {'method' : 'ptp', 'threshold' : 1}, + "sparse_kwargs": {"method": "ptp", "threshold": 1}, "ignored_ids": [], - "vicinity" : 0 + "vicinity": 0, } @classmethod def _prepare_templates(cls, d): - - waveform_extractor = d['waveform_extractor'] - num_templates = len(d['waveform_extractor'].sorting.unit_ids) + waveform_extractor = d["waveform_extractor"] + num_templates = len(d["waveform_extractor"].sorting.unit_ids) if not waveform_extractor.is_sparse(): - sparsity = compute_sparsity(waveform_extractor, **d['sparse_kwargs']).mask + sparsity = compute_sparsity(waveform_extractor, **d["sparse_kwargs"]).mask else: sparsity = waveform_extractor.sparsity.mask - - templates = waveform_extractor.get_all_templates(mode='median').copy() - d['sparsities'] = {} - d['templates'] = {} - d['norms'] = np.zeros(num_templates, dtype=np.float32) + templates = waveform_extractor.get_all_templates(mode="median").copy() + + d["sparsities"] = {} + d["templates"] = {} + d["norms"] = np.zeros(num_templates, dtype=np.float32) for count, unit_id in enumerate(waveform_extractor.sorting.unit_ids): template = templates[count][:, sparsity[count]] - d['sparsities'][count], = np.nonzero(sparsity[count]) - d['norms'][count] = np.linalg.norm(template) - d['templates'][count] = template/d['norms'][count] + (d["sparsities"][count],) = np.nonzero(sparsity[count]) + d["norms"][count] = np.linalg.norm(template) + d["templates"][count] = template / d["norms"][count] return d @classmethod def initialize_and_check_kwargs(cls, recording, kwargs): - d = cls._default_params.copy() d.update(kwargs) - #assert isinstance(d['waveform_extractor'], WaveformExtractor) + # assert isinstance(d['waveform_extractor'], WaveformExtractor) + + for v in ["omp_min_sps"]: + assert (d[v] >= 0) and (d[v] <= 1), f"{v} should be in [0, 1]" - for v in ['omp_min_sps']: - assert (d[v] >= 0) and (d[v] <= 1), f'{v} should be in [0, 1]' - - d['num_channels'] = d['waveform_extractor'].recording.get_num_channels() - d['num_samples'] = d['waveform_extractor'].nsamples - d['nbefore'] = d['waveform_extractor'].nbefore - d['nafter'] = d['waveform_extractor'].nafter - d['sampling_frequency'] = d['waveform_extractor'].recording.get_sampling_frequency() - d['vicinity'] *= d['num_samples'] + d["num_channels"] = d["waveform_extractor"].recording.get_num_channels() + d["num_samples"] = d["waveform_extractor"].nsamples + d["nbefore"] = d["waveform_extractor"].nbefore + d["nafter"] = d["waveform_extractor"].nafter + d["sampling_frequency"] = d["waveform_extractor"].recording.get_sampling_frequency() + d["vicinity"] *= d["num_samples"] - if d['noise_levels'] is None: - print('CircusOMPPeeler : noise should be computed outside') - d['noise_levels'] = get_noise_levels(recording, **d['random_chunk_kwargs'], return_scaled=False) + if d["noise_levels"] is None: + print("CircusOMPPeeler : noise should be computed outside") + d["noise_levels"] = get_noise_levels(recording, **d["random_chunk_kwargs"], return_scaled=False) - if d['templates'] is None: + if d["templates"] is None: d = cls._prepare_templates(d) else: - for key in ['norms', 'sparsities']: - assert d[key] is not None, "If templates are provided, %d should also be there" %key - - d['num_templates'] = len(d['templates']) + for key in ["norms", "sparsities"]: + assert d[key] is not None, "If templates are provided, %d should also be there" % key - if d['overlaps'] is None: - d['overlaps'] = compute_overlaps(d['templates'], d['num_samples'], d['num_channels'], d['sparsities']) + d["num_templates"] = len(d["templates"]) - d['ignored_ids'] = np.array(d['ignored_ids']) + if d["overlaps"] is None: + d["overlaps"] = compute_overlaps(d["templates"], d["num_samples"], d["num_channels"], d["sparsities"]) - omp_min_sps = d['omp_min_sps'] - #nb_active_channels = np.array([len(d['sparsities'][count]) for count in range(d['num_templates'])]) - d['stop_criteria'] = omp_min_sps * np.sqrt(d['noise_levels'].sum() * d['num_samples']) + d["ignored_ids"] = np.array(d["ignored_ids"]) - return d + omp_min_sps = d["omp_min_sps"] + # nb_active_channels = np.array([len(d['sparsities'][count]) for count in range(d['num_templates'])]) + d["stop_criteria"] = omp_min_sps * np.sqrt(d["noise_levels"].sum() * d["num_samples"]) + return d @classmethod def serialize_method_kwargs(cls, kwargs): @@ -294,27 +291,27 @@ def get_margin(cls, recording, kwargs): @classmethod def main_function(cls, traces, d): - templates = d['templates'] - num_templates = d['num_templates'] - num_channels = d['num_channels'] - num_samples = d['num_samples'] - overlaps = d['overlaps'] - norms = d['norms'] - nbefore = d['nbefore'] - nafter = d['nafter'] + templates = d["templates"] + num_templates = d["num_templates"] + num_channels = d["num_channels"] + num_samples = d["num_samples"] + overlaps = d["overlaps"] + norms = d["norms"] + nbefore = d["nbefore"] + nafter = d["nafter"] omp_tol = np.finfo(np.float32).eps - num_samples = d['nafter'] + d['nbefore'] + num_samples = d["nafter"] + d["nbefore"] neighbor_window = num_samples - 1 - min_amplitude, max_amplitude = d['amplitudes'] - sparsities = d['sparsities'] - ignored_ids = d['ignored_ids'] - stop_criteria = d['stop_criteria'] - vicinity = d['vicinity'] + min_amplitude, max_amplitude = d["amplitudes"] + sparsities = d["sparsities"] + ignored_ids = d["ignored_ids"] + stop_criteria = d["stop_criteria"] + vicinity = d["vicinity"] - if 'cached_fft_kernels' not in d: - d['cached_fft_kernels'] = {'fshape' : 0} + if "cached_fft_kernels" not in d: + d["cached_fft_kernels"] = {"fshape": 0} - cached_fft_kernels = d['cached_fft_kernels'] + cached_fft_kernels = d["cached_fft_kernels"] num_timesteps = len(traces) @@ -326,24 +323,22 @@ def main_function(cls, traces, d): dummy_traces = np.empty((num_channels, num_timesteps), dtype=np.float32) fshape, axes = get_scipy_shape(dummy_filter, traces, axes=1) - fft_cache = {'full' : sp_fft.rfftn(traces, fshape, axes=axes)} + fft_cache = {"full": sp_fft.rfftn(traces, fshape, axes=axes)} scalar_products = np.empty((num_templates, num_peaks), dtype=np.float32) - flagged_chunk = cached_fft_kernels['fshape'] != fshape[0] + flagged_chunk = cached_fft_kernels["fshape"] != fshape[0] for i in range(num_templates): - if i not in ignored_ids: - if i not in cached_fft_kernels or flagged_chunk: kernel_filter = np.ascontiguousarray(templates[i][::-1].T) - cached_fft_kernels.update({i : sp_fft.rfftn(kernel_filter, fshape, axes=axes)}) - cached_fft_kernels['fshape'] = fshape[0] + cached_fft_kernels.update({i: sp_fft.rfftn(kernel_filter, fshape, axes=axes)}) + cached_fft_kernels["fshape"] = fshape[0] - fft_cache.update({'mask' : sparsities[i], 'template' : cached_fft_kernels[i]}) + fft_cache.update({"mask": sparsities[i], "template": cached_fft_kernels[i]}) - convolution = fftconvolve_with_cache(dummy_filter, dummy_traces, fft_cache, axes=1, mode='valid') + convolution = fftconvolve_with_cache(dummy_filter, dummy_traces, fft_cache, axes=1, mode="valid") if len(convolution) > 0: scalar_products[i] = convolution.sum(0) else: @@ -368,17 +363,15 @@ def main_function(cls, traces, d): neighbors = {} cached_overlaps = {} - is_valid = (scalar_products > stop_criteria) + is_valid = scalar_products > stop_criteria all_amplitudes = np.zeros(0, dtype=np.float32) is_in_vicinity = np.zeros(0, dtype=np.int32) while np.any(is_valid): - best_amplitude_ind = scalar_products[is_valid].argmax() best_cluster_ind, peak_index = np.unravel_index(idx_lookup[is_valid][best_amplitude_ind], idx_lookup.shape) - - if num_selection > 0: + if num_selection > 0: delta_t = selection[1] - peak_index idx = np.where((delta_t < neighbor_window) & (delta_t > -num_samples))[0] myline = num_samples + delta_t[idx] @@ -387,17 +380,21 @@ def main_function(cls, traces, d): cached_overlaps[best_cluster_ind] = overlaps[best_cluster_ind].toarray() if num_selection == M.shape[0]: - Z = np.zeros((2*num_selection, 2*num_selection), dtype=np.float32) + Z = np.zeros((2 * num_selection, 2 * num_selection), dtype=np.float32) Z[:num_selection, :num_selection] = M M = Z M[num_selection, idx] = cached_overlaps[best_cluster_ind][selection[0, idx], myline] if vicinity == 0: - scipy.linalg.solve_triangular(M[:num_selection, :num_selection], M[num_selection, :num_selection], trans=0, - lower=1, - overwrite_b=True, - check_finite=False) + scipy.linalg.solve_triangular( + M[:num_selection, :num_selection], + M[num_selection, :num_selection], + trans=0, + lower=1, + overwrite_b=True, + check_finite=False, + ) v = nrm2(M[num_selection, :num_selection]) ** 2 Lkk = 1 - v @@ -408,13 +405,11 @@ def main_function(cls, traces, d): is_in_vicinity = np.where(np.abs(delta_t) < vicinity)[0] if len(is_in_vicinity) > 0: - L = M[is_in_vicinity, :][:, is_in_vicinity] - M[num_selection, is_in_vicinity] = scipy.linalg.solve_triangular(L, M[num_selection, is_in_vicinity], trans=0, - lower=1, - overwrite_b=True, - check_finite=False) + M[num_selection, is_in_vicinity] = scipy.linalg.solve_triangular( + L, M[num_selection, is_in_vicinity], trans=0, lower=1, overwrite_b=True, check_finite=False + ) v = nrm2(M[num_selection, is_in_vicinity]) ** 2 Lkk = 1 - v @@ -432,55 +427,52 @@ def main_function(cls, traces, d): selection = all_selections[:, :num_selection] res_sps = full_sps[selection[0], selection[1]] - if True: #vicinity == 0: - all_amplitudes, _ = potrs(M[:num_selection, :num_selection], res_sps, - lower=True, overwrite_b=False) + if True: # vicinity == 0: + all_amplitudes, _ = potrs(M[:num_selection, :num_selection], res_sps, lower=True, overwrite_b=False) all_amplitudes /= norms[selection[0]] else: # This is not working, need to figure out why is_in_vicinity = np.append(is_in_vicinity, num_selection - 1) all_amplitudes = np.append(all_amplitudes, np.float32(1)) L = M[is_in_vicinity, :][:, is_in_vicinity] - all_amplitudes[is_in_vicinity], _ = potrs(L, res_sps[is_in_vicinity], - lower=True, overwrite_b=False) + all_amplitudes[is_in_vicinity], _ = potrs(L, res_sps[is_in_vicinity], lower=True, overwrite_b=False) all_amplitudes[is_in_vicinity] /= norms[selection[0][is_in_vicinity]] - diff_amplitudes = (all_amplitudes - final_amplitudes[selection[0], selection[1]]) + diff_amplitudes = all_amplitudes - final_amplitudes[selection[0], selection[1]] modified = np.where(np.abs(diff_amplitudes) > omp_tol)[0] final_amplitudes[selection[0], selection[1]] = all_amplitudes for i in modified: - tmp_best, tmp_peak = selection[:, i] - diff_amp = diff_amplitudes[i]*norms[tmp_best] - + diff_amp = diff_amplitudes[i] * norms[tmp_best] + if not tmp_best in cached_overlaps: cached_overlaps[tmp_best] = overlaps[tmp_best].toarray() if not tmp_peak in neighbors.keys(): idx = [max(0, tmp_peak - num_samples), min(num_peaks, tmp_peak + neighbor_window)] tdx = [num_samples + idx[0] - tmp_peak, num_samples + idx[1] - tmp_peak] - neighbors[tmp_peak] = {'idx' : idx, 'tdx' : tdx} + neighbors[tmp_peak] = {"idx": idx, "tdx": tdx} - idx = neighbors[tmp_peak]['idx'] - tdx = neighbors[tmp_peak]['tdx'] + idx = neighbors[tmp_peak]["idx"] + tdx = neighbors[tmp_peak]["tdx"] - to_add = diff_amp * cached_overlaps[tmp_best][:, tdx[0]:tdx[1]] - scalar_products[:, idx[0]:idx[1]] -= to_add + to_add = diff_amp * cached_overlaps[tmp_best][:, tdx[0] : tdx[1]] + scalar_products[:, idx[0] : idx[1]] -= to_add - is_valid = (scalar_products > stop_criteria) + is_valid = scalar_products > stop_criteria - is_valid = (final_amplitudes > min_amplitude)*(final_amplitudes < max_amplitude) + is_valid = (final_amplitudes > min_amplitude) * (final_amplitudes < max_amplitude) valid_indices = np.where(is_valid) num_spikes = len(valid_indices[0]) - spikes['sample_index'][:num_spikes] = valid_indices[1] + d['nbefore'] - spikes['channel_index'][:num_spikes] = 0 - spikes['cluster_index'][:num_spikes] = valid_indices[0] - spikes['amplitude'][:num_spikes] = final_amplitudes[valid_indices[0], valid_indices[1]] - + spikes["sample_index"][:num_spikes] = valid_indices[1] + d["nbefore"] + spikes["channel_index"][:num_spikes] = 0 + spikes["cluster_index"][:num_spikes] = valid_indices[0] + spikes["amplitude"][:num_spikes] = final_amplitudes[valid_indices[0], valid_indices[1]] + spikes = spikes[:num_spikes] - order = np.argsort(spikes['sample_index']) + order = np.argsort(spikes["sample_index"]) spikes = spikes[order] return spikes @@ -534,58 +526,56 @@ class CircusPeeler(BaseTemplateMatchingEngine): """ _default_params = { - 'peak_sign': 'neg', - 'exclude_sweep_ms': 0.1, - 'jitter_ms' : 0.1, - 'detect_threshold': 5, - 'noise_levels': None, - 'random_chunk_kwargs': {}, - 'max_amplitude' : 1.5, - 'min_amplitude' : 0.5, - 'use_sparse_matrix_threshold' : 0.25, - 'waveform_extractor': None, - 'sparse_kwargs' : {'method' : 'ptp', 'threshold' : 1} + "peak_sign": "neg", + "exclude_sweep_ms": 0.1, + "jitter_ms": 0.1, + "detect_threshold": 5, + "noise_levels": None, + "random_chunk_kwargs": {}, + "max_amplitude": 1.5, + "min_amplitude": 0.5, + "use_sparse_matrix_threshold": 0.25, + "waveform_extractor": None, + "sparse_kwargs": {"method": "ptp", "threshold": 1}, } @classmethod def _prepare_templates(cls, d): - - waveform_extractor = d['waveform_extractor'] - num_samples = d['num_samples'] - num_channels = d['num_channels'] - num_templates = d['num_templates'] - use_sparse_matrix_threshold = d['use_sparse_matrix_threshold'] + waveform_extractor = d["waveform_extractor"] + num_samples = d["num_samples"] + num_channels = d["num_channels"] + num_templates = d["num_templates"] + use_sparse_matrix_threshold = d["use_sparse_matrix_threshold"] - d['norms'] = np.zeros(num_templates, dtype=np.float32) + d["norms"] = np.zeros(num_templates, dtype=np.float32) - all_units = list(d['waveform_extractor'].sorting.unit_ids) + all_units = list(d["waveform_extractor"].sorting.unit_ids) if not waveform_extractor.is_sparse(): - sparsity = compute_sparsity(waveform_extractor, **d['sparse_kwargs']).mask + sparsity = compute_sparsity(waveform_extractor, **d["sparse_kwargs"]).mask else: sparsity = waveform_extractor.sparsity.mask - templates = waveform_extractor.get_all_templates(mode='median').copy() - d['sparsities'] = {} - - for count, unit_id in enumerate(all_units): + templates = waveform_extractor.get_all_templates(mode="median").copy() + d["sparsities"] = {} - d['sparsities'][count], = np.nonzero(sparsity[count]) + for count, unit_id in enumerate(all_units): + (d["sparsities"][count],) = np.nonzero(sparsity[count]) templates[count][:, ~sparsity[count]] = 0 - d['norms'][count] = np.linalg.norm(templates[count]) - templates[count] /= d['norms'][count] + d["norms"][count] = np.linalg.norm(templates[count]) + templates[count] /= d["norms"][count] templates = templates.reshape(num_templates, -1) - nnz = np.sum(templates != 0)/(num_templates * num_samples * num_channels) + nnz = np.sum(templates != 0) / (num_templates * num_samples * num_channels) if nnz <= use_sparse_matrix_threshold: templates = scipy.sparse.csr_matrix(templates) - print(f'Templates are automatically sparsified (sparsity level is {nnz})') - d['is_dense'] = False + print(f"Templates are automatically sparsified (sparsity level is {nnz})") + d["is_dense"] = False else: - d['is_dense'] = True + d["is_dense"] = True - d['templates'] = templates + d["templates"] = templates return d @@ -595,9 +585,9 @@ def _mcc_error(cls, bounds, good, bad): fp = np.sum((bounds[0] <= bad) & (bad <= bounds[1])) tp = np.sum((bounds[0] <= good) & (good <= bounds[1])) tn = np.sum((bad < bounds[0]) | (bad > bounds[1])) - denom = (tp+fp)*(tp+fn)*(tn+fp)*(tn+fn) + denom = (tp + fp) * (tp + fn) * (tn + fp) * (tn + fn) if denom > 0: - mcc = 1 - (tp*tn - fp*fn)/np.sqrt(denom) + mcc = 1 - (tp * tn - fp * fn) / np.sqrt(denom) else: mcc = 1 return mcc @@ -668,14 +658,16 @@ def initialize_and_check_kwargs(cls, recording, kwargs): default_parameters = cls._prepare_templates(default_parameters) - templates = default_parameters['templates'].reshape(len(default_parameters['templates']), - default_parameters['num_samples'], - default_parameters['num_channels']) + templates = default_parameters["templates"].reshape( + len(default_parameters["templates"]), default_parameters["num_samples"], default_parameters["num_channels"] + ) - default_parameters['overlaps'] = compute_overlaps(templates, - default_parameters['num_samples'], - default_parameters['num_channels'], - default_parameters['sparsities']) + default_parameters["overlaps"] = compute_overlaps( + templates, + default_parameters["num_samples"], + default_parameters["num_channels"], + default_parameters["sparsities"], + ) default_parameters["exclude_sweep_size"] = int( default_parameters["exclude_sweep_ms"] * recording.get_sampling_frequency() / 1000.0 From 14c8f58571fefc60eaa544da476c0210d45d2b92 Mon Sep 17 00:00:00 2001 From: Pierre Yger Date: Tue, 29 Aug 2023 11:09:02 +0200 Subject: [PATCH 11/90] useless dependency --- src/spikeinterface/sorters/internal/spyking_circus2.py | 1 - 1 file changed, 1 deletion(-) diff --git a/src/spikeinterface/sorters/internal/spyking_circus2.py b/src/spikeinterface/sorters/internal/spyking_circus2.py index 4ccaef8e29..ec2a74b6bb 100644 --- a/src/spikeinterface/sorters/internal/spyking_circus2.py +++ b/src/spikeinterface/sorters/internal/spyking_circus2.py @@ -3,7 +3,6 @@ import os import shutil import numpy as np -import psutil from spikeinterface.core import NumpySorting, load_extractor, BaseRecording, get_noise_levels, extract_waveforms from spikeinterface.core.job_tools import fix_job_kwargs From e455da3f46cc5529986f60c56cb7868391f12af5 Mon Sep 17 00:00:00 2001 From: Pierre Yger Date: Tue, 29 Aug 2023 13:51:38 +0200 Subject: [PATCH 12/90] Fix for classical circus with sparsity --- .../sortingcomponents/matching/circus.py | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/src/spikeinterface/sortingcomponents/matching/circus.py b/src/spikeinterface/sortingcomponents/matching/circus.py index b0f132e94d..cdacfe1304 100644 --- a/src/spikeinterface/sortingcomponents/matching/circus.py +++ b/src/spikeinterface/sortingcomponents/matching/circus.py @@ -136,6 +136,7 @@ def compute_overlaps(templates, num_samples, num_channels, sparsities): dense_templates = np.zeros((num_templates, num_samples, num_channels), dtype=np.float32) for i in range(num_templates): + print(templates[i].shape, len(sparsities[i])) dense_templates[i, :, sparsities[i]] = templates[i].T size = 2 * num_samples - 1 @@ -558,12 +559,14 @@ def _prepare_templates(cls, d): templates = waveform_extractor.get_all_templates(mode="median").copy() d["sparsities"] = {} + d["circus_templates"] = {} for count, unit_id in enumerate(all_units): (d["sparsities"][count],) = np.nonzero(sparsity[count]) templates[count][:, ~sparsity[count]] = 0 d["norms"][count] = np.linalg.norm(templates[count]) templates[count] /= d["norms"][count] + d['circus_templates'][count] = templates[count][:, sparsity[count]] templates = templates.reshape(num_templates, -1) @@ -617,7 +620,7 @@ def _optimize_amplitudes(cls, noise_snippets, d): all_amps = {} for count, unit_id in enumerate(all_units): - waveform = waveform_extractor.get_waveforms(unit_id) + waveform = waveform_extractor.get_waveforms(unit_id, force_dense=True) snippets = waveform.reshape(waveform.shape[0], -1).T amps = templates.dot(snippets) / norms[:, np.newaxis] good = amps[count, :].flatten() @@ -658,12 +661,8 @@ def initialize_and_check_kwargs(cls, recording, kwargs): default_parameters = cls._prepare_templates(default_parameters) - templates = default_parameters["templates"].reshape( - len(default_parameters["templates"]), default_parameters["num_samples"], default_parameters["num_channels"] - ) - default_parameters["overlaps"] = compute_overlaps( - templates, + default_parameters['circus_templates'], default_parameters["num_samples"], default_parameters["num_channels"], default_parameters["sparsities"], From 2f84c6b632cd17391ba1eff0b89578b87f2fb892 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 29 Aug 2023 11:51:59 +0000 Subject: [PATCH 13/90] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- src/spikeinterface/sortingcomponents/matching/circus.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/spikeinterface/sortingcomponents/matching/circus.py b/src/spikeinterface/sortingcomponents/matching/circus.py index cdacfe1304..e92e7929f6 100644 --- a/src/spikeinterface/sortingcomponents/matching/circus.py +++ b/src/spikeinterface/sortingcomponents/matching/circus.py @@ -566,7 +566,7 @@ def _prepare_templates(cls, d): templates[count][:, ~sparsity[count]] = 0 d["norms"][count] = np.linalg.norm(templates[count]) templates[count] /= d["norms"][count] - d['circus_templates'][count] = templates[count][:, sparsity[count]] + d["circus_templates"][count] = templates[count][:, sparsity[count]] templates = templates.reshape(num_templates, -1) @@ -662,7 +662,7 @@ def initialize_and_check_kwargs(cls, recording, kwargs): default_parameters = cls._prepare_templates(default_parameters) default_parameters["overlaps"] = compute_overlaps( - default_parameters['circus_templates'], + default_parameters["circus_templates"], default_parameters["num_samples"], default_parameters["num_channels"], default_parameters["sparsities"], From 3d849fb91680f05c27c52dc240f61e65490c4a16 Mon Sep 17 00:00:00 2001 From: Pierre Yger Date: Tue, 29 Aug 2023 13:52:34 +0200 Subject: [PATCH 14/90] Fix for classical circus with sparsity --- src/spikeinterface/sortingcomponents/matching/circus.py | 1 - 1 file changed, 1 deletion(-) diff --git a/src/spikeinterface/sortingcomponents/matching/circus.py b/src/spikeinterface/sortingcomponents/matching/circus.py index cdacfe1304..06cd99d92a 100644 --- a/src/spikeinterface/sortingcomponents/matching/circus.py +++ b/src/spikeinterface/sortingcomponents/matching/circus.py @@ -136,7 +136,6 @@ def compute_overlaps(templates, num_samples, num_channels, sparsities): dense_templates = np.zeros((num_templates, num_samples, num_channels), dtype=np.float32) for i in range(num_templates): - print(templates[i].shape, len(sparsities[i])) dense_templates[i, :, sparsities[i]] = templates[i].T size = 2 * num_samples - 1 From 7dcfdb0b325ffefb980c54ac5070339a490f8b49 Mon Sep 17 00:00:00 2001 From: Pierre Yger Date: Tue, 29 Aug 2023 14:56:23 +0200 Subject: [PATCH 15/90] Fixing slow tests with SC2 --- src/spikeinterface/sorters/internal/spyking_circus2.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/spikeinterface/sorters/internal/spyking_circus2.py b/src/spikeinterface/sorters/internal/spyking_circus2.py index ec2a74b6bb..628ea991c1 100644 --- a/src/spikeinterface/sorters/internal/spyking_circus2.py +++ b/src/spikeinterface/sorters/internal/spyking_circus2.py @@ -30,7 +30,7 @@ class Spykingcircus2Sorter(ComponentsBasedSorter): "matching": {}, "apply_preprocessing": True, "shared_memory": True, - "job_kwargs": {"n_jobs": -1, "chunk_memory": "10M"}, + "job_kwargs": {"n_jobs": -1}, } @classmethod @@ -145,6 +145,9 @@ def _run_from_folder(cls, sorter_output_folder, params, verbose): matching_params.update({"noise_levels": noise_levels}) matching_job_params = job_kwargs.copy() + if 'chunk_memory' in matching_job_params: + matching_job_params.pop('chunk_memory') + matching_job_params["chunk_duration"] = "100ms" spikes = find_spikes_from_templates( From 9f196b58acf4a5d2cc1ebc45a0ee969c03451d83 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 29 Aug 2023 12:58:57 +0000 Subject: [PATCH 16/90] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- src/spikeinterface/sorters/internal/spyking_circus2.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/spikeinterface/sorters/internal/spyking_circus2.py b/src/spikeinterface/sorters/internal/spyking_circus2.py index 628ea991c1..8a7b353bd1 100644 --- a/src/spikeinterface/sorters/internal/spyking_circus2.py +++ b/src/spikeinterface/sorters/internal/spyking_circus2.py @@ -145,8 +145,8 @@ def _run_from_folder(cls, sorter_output_folder, params, verbose): matching_params.update({"noise_levels": noise_levels}) matching_job_params = job_kwargs.copy() - if 'chunk_memory' in matching_job_params: - matching_job_params.pop('chunk_memory') + if "chunk_memory" in matching_job_params: + matching_job_params.pop("chunk_memory") matching_job_params["chunk_duration"] = "100ms" From 1c7c8020147e24997e3c34e374c76df8a72bc684 Mon Sep 17 00:00:00 2001 From: Pierre Yger Date: Tue, 29 Aug 2023 15:25:58 +0200 Subject: [PATCH 17/90] WIP for cleaning --- .../sortingcomponents/clustering/random_projections.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/spikeinterface/sortingcomponents/clustering/random_projections.py b/src/spikeinterface/sortingcomponents/clustering/random_projections.py index ac564bda9a..d9a317ca06 100644 --- a/src/spikeinterface/sortingcomponents/clustering/random_projections.py +++ b/src/spikeinterface/sortingcomponents/clustering/random_projections.py @@ -191,6 +191,8 @@ def main_function(cls, recording, peaks, params): ) cleaning_matching_params = params["job_kwargs"].copy() + if 'chunk_memory' in cleaning_matching_params: + cleaning_matching_params.pop('chunk_memory') cleaning_matching_params["chunk_duration"] = "100ms" cleaning_matching_params["n_jobs"] = 1 cleaning_matching_params["verbose"] = False From af4f1877aa800ff0277bd40a2aa83fc408b1ef08 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 29 Aug 2023 13:31:36 +0000 Subject: [PATCH 18/90] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- .../sortingcomponents/clustering/random_projections.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/spikeinterface/sortingcomponents/clustering/random_projections.py b/src/spikeinterface/sortingcomponents/clustering/random_projections.py index d9a317ca06..d82f9a7808 100644 --- a/src/spikeinterface/sortingcomponents/clustering/random_projections.py +++ b/src/spikeinterface/sortingcomponents/clustering/random_projections.py @@ -191,8 +191,8 @@ def main_function(cls, recording, peaks, params): ) cleaning_matching_params = params["job_kwargs"].copy() - if 'chunk_memory' in cleaning_matching_params: - cleaning_matching_params.pop('chunk_memory') + if "chunk_memory" in cleaning_matching_params: + cleaning_matching_params.pop("chunk_memory") cleaning_matching_params["chunk_duration"] = "100ms" cleaning_matching_params["n_jobs"] = 1 cleaning_matching_params["verbose"] = False From 8c2af8fcfa4c0ab4aa058e4778545b4cee64fa08 Mon Sep 17 00:00:00 2001 From: Pierre Yger Date: Tue, 29 Aug 2023 18:09:23 +0200 Subject: [PATCH 19/90] WIP --- .../benchmark/benchmark_matching.py | 51 +++++++++++-------- 1 file changed, 30 insertions(+), 21 deletions(-) diff --git a/src/spikeinterface/sortingcomponents/benchmark/benchmark_matching.py b/src/spikeinterface/sortingcomponents/benchmark/benchmark_matching.py index 07c7db155c..8ce8efe25f 100644 --- a/src/spikeinterface/sortingcomponents/benchmark/benchmark_matching.py +++ b/src/spikeinterface/sortingcomponents/benchmark/benchmark_matching.py @@ -600,29 +600,38 @@ def plot_comparison_matching( else: ax = axs[j] comp1, comp2 = comp_per_method[method1], comp_per_method[method2] - for performance, color in zip(performance_names, colors): - perf1 = comp1.get_performance()[performance] - perf2 = comp2.get_performance()[performance] - ax.plot(perf2, perf1, ".", label=performance, color=color) - ax.plot([0, 1], [0, 1], "k--", alpha=0.5) - ax.set_ylim(ylim) - ax.set_xlim(ylim) - ax.spines[["right", "top"]].set_visible(False) - ax.set_aspect("equal") - - if j == 0: - ax.set_ylabel(f"{method1}") - else: - ax.set_yticks([]) - if i == num_methods - 1: - ax.set_xlabel(f"{method2}") + if i <= j: + for performance, color in zip(performance_names, colors): + perf1 = comp1.get_performance()[performance] + perf2 = comp2.get_performance()[performance] + ax.plot(perf2, perf1, ".", label=performance, color=color) + + ax.plot([0, 1], [0, 1], "k--", alpha=0.5) + ax.set_ylim(ylim) + ax.set_xlim(ylim) + ax.spines[["right", "top"]].set_visible(False) + ax.set_aspect("equal") + + if j == i: + ax.set_ylabel(f"{method1}") + else: + ax.set_yticks([]) + if i == j: + ax.set_xlabel(f"{method2}") + else: + ax.set_xticks([]) + if i == num_methods - 1 and j == num_methods - 1: + patches = [] + for color, name in zip(colors, performance_names): + patches.append(mpatches.Patch(color=color, label=name)) + ax.legend(handles=patches, bbox_to_anchor=(1.05, 1), loc="upper left", borderaxespad=0.0) else: + ax.spines['bottom'].set_visible(False) + ax.spines['left'].set_visible(False) + ax.spines['top'].set_visible(False) + ax.spines['right'].set_visible(False) ax.set_xticks([]) - if i == num_methods - 1 and j == num_methods - 1: - patches = [] - for color, name in zip(colors, performance_names): - patches.append(mpatches.Patch(color=color, label=name)) - ax.legend(handles=patches, bbox_to_anchor=(1.05, 1), loc="upper left", borderaxespad=0.0) + ax.set_yticks([]) plt.tight_layout(h_pad=0, w_pad=0) return fig, axs From 30d1ecce4249a3e645ca09be39799277186e11c6 Mon Sep 17 00:00:00 2001 From: Alessio Buccino Date: Mon, 4 Sep 2023 11:47:37 +0200 Subject: [PATCH 20/90] Allow to postprocess on read-only waveform folders --- src/spikeinterface/core/waveform_extractor.py | 55 ++++++++++--------- .../tests/common_extension_tests.py | 23 +++++++- .../postprocessing/unit_localization.py | 4 +- 3 files changed, 54 insertions(+), 28 deletions(-) diff --git a/src/spikeinterface/core/waveform_extractor.py b/src/spikeinterface/core/waveform_extractor.py index 877c9fb00c..e404e74be4 100644 --- a/src/spikeinterface/core/waveform_extractor.py +++ b/src/spikeinterface/core/waveform_extractor.py @@ -4,6 +4,7 @@ import shutil from typing import Iterable, Literal, Optional import json +import os import numpy as np from copy import deepcopy @@ -87,6 +88,7 @@ def __init__( self._template_cache = {} self._params = {} self._loaded_extensions = dict() + self._is_read_only = False self.sparsity = sparsity self.folder = folder @@ -103,6 +105,8 @@ def __init__( if (self.folder / "params.json").is_file(): with open(str(self.folder / "params.json"), "r") as f: self._params = json.load(f) + if not os.access(self.folder, os.W_OK): + self._is_read_only = True else: # this is in case of in-memory self.format = "memory" @@ -399,6 +403,9 @@ def return_scaled(self) -> bool: def dtype(self): return self._params["dtype"] + def is_read_only(self) -> bool: + return self._is_read_only + def has_recording(self) -> bool: return self._recording is not None @@ -514,18 +521,8 @@ def is_extension(self, extension_name) -> bool: exists: bool Whether the extension exists or not """ - if self.folder is None: - return extension_name in self._loaded_extensions - else: - if self.format == "binary": - return (self.folder / extension_name).is_dir() and ( - self.folder / extension_name / "params.json" - ).is_file() - elif self.format == "zarr": - return ( - extension_name in self._waveforms_root.keys() - and "params" in self._waveforms_root[extension_name].attrs.keys() - ) + # Extensions are always loaded in memory + return extension_name in self._loaded_extensions def load_extension(self, extension_name): """ @@ -1735,20 +1732,28 @@ def __init__(self, waveform_extractor): self.waveform_extractor = waveform_extractor if self.waveform_extractor.folder is not None: - self.folder = self.waveform_extractor.folder - self.format = self.waveform_extractor.format - if self.format == "binary": - self.extension_folder = self.folder / self.extension_name - if not self.extension_folder.is_dir(): - self.extension_folder.mkdir() - else: - import zarr - - zarr_root = zarr.open(self.folder, mode="r+") - if self.extension_name not in zarr_root.keys(): - self.extension_group = zarr_root.create_group(self.extension_name) + if not self.waveform_extractor.is_read_only(): + self.folder = self.waveform_extractor.folder + self.format = self.waveform_extractor.format + if self.format == "binary": + self.extension_folder = self.folder / self.extension_name + if not self.extension_folder.is_dir(): + self.extension_folder.mkdir() else: - self.extension_group = zarr_root[self.extension_name] + import zarr + + zarr_root = zarr.open(self.folder, mode="r+") + if self.extension_name not in zarr_root.keys(): + self.extension_group = zarr_root.create_group(self.extension_name) + else: + self.extension_group = zarr_root[self.extension_name] + else: + warn( + "WaveformExtractor: cannot save extension in read-only mode. " "Extension will be saved in memory." + ) + self.format = "memory" + self.extension_folder = None + self.folder = None else: self.format = "memory" self.extension_folder = None diff --git a/src/spikeinterface/postprocessing/tests/common_extension_tests.py b/src/spikeinterface/postprocessing/tests/common_extension_tests.py index b9c72f9b99..f44d58470c 100644 --- a/src/spikeinterface/postprocessing/tests/common_extension_tests.py +++ b/src/spikeinterface/postprocessing/tests/common_extension_tests.py @@ -4,7 +4,7 @@ import shutil from pathlib import Path -from spikeinterface import extract_waveforms, load_extractor, compute_sparsity +from spikeinterface import extract_waveforms, load_extractor, load_waveforms, compute_sparsity from spikeinterface.extractors import toy_example if hasattr(pytest, "global_test_folder"): @@ -76,6 +76,15 @@ def setUp(self): overwrite=True, ) self.we2 = we2 + + # make we read-only + we_ro_folder = cache_folder / "toy_waveforms_2seg_readonly" + if not we_ro_folder.is_dir(): + shutil.copytree(we2.folder, we_ro_folder) + # change permissions (R+X) + we_ro_folder.chmod(0o555) + self.we_ro = load_waveforms(we_ro_folder) + self.sparsity2 = compute_sparsity(we2, method="radius", radius_um=30) we_memory = extract_waveforms( recording, @@ -97,6 +106,11 @@ def setUp(self): folder=cache_folder / "toy_sorting_2seg_sparse", format="binary", sparsity=sparsity, overwrite=True ) + def tearDown(self): + # allow pytest to delete RO folder + we_ro_folder = cache_folder / "toy_waveforms_2seg_readonly" + we_ro_folder.chmod(0o777) + def _test_extension_folder(self, we, in_memory=False): if self.extension_function_kwargs_list is None: extension_function_kwargs_list = [dict()] @@ -177,3 +191,10 @@ def test_extension(self): assert ext_data_mem.equals(ext_data_zarr) else: print(f"{ext_data_name} of type {type(ext_data_mem)} not tested.") + + # read-only - Extension is memory only + _ = self.extension_class.get_extension_function()(self.we_ro, load_if_exists=False) + assert self.extension_class.extension_name in self.we_ro.get_available_extension_names() + ext_ro = self.we_ro.load_extension(self.extension_class.extension_name) + assert ext_ro.format == "memory" + assert ext_ro.extension_folder is None diff --git a/src/spikeinterface/postprocessing/unit_localization.py b/src/spikeinterface/postprocessing/unit_localization.py index 740fdd234b..d2739f69dd 100644 --- a/src/spikeinterface/postprocessing/unit_localization.py +++ b/src/spikeinterface/postprocessing/unit_localization.py @@ -570,6 +570,8 @@ def enforce_decrease_shells_data(wf_data, maxchan, radial_parents, in_place=Fals def get_grid_convolution_templates_and_weights( contact_locations, radius_um=50, upsampling_um=5, sigma_um=np.linspace(10, 50.0, 5), margin_um=50 ): + import sklearn.metrics + x_min, x_max = contact_locations[:, 0].min(), contact_locations[:, 0].max() y_min, y_max = contact_locations[:, 1].min(), contact_locations[:, 1].max() @@ -593,8 +595,6 @@ def get_grid_convolution_templates_and_weights( template_positions[:, 0] = all_x.flatten() template_positions[:, 1] = all_y.flatten() - import sklearn - # mask to get nearest template given a channel dist = sklearn.metrics.pairwise_distances(contact_locations, template_positions) nearest_template_mask = dist < radius_um From b8ee13c208cf928573595d941803b11e38278eb0 Mon Sep 17 00:00:00 2001 From: Alessio Buccino Date: Mon, 4 Sep 2023 15:02:13 +0200 Subject: [PATCH 21/90] Restore extension loading --- src/spikeinterface/core/waveform_extractor.py | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/src/spikeinterface/core/waveform_extractor.py b/src/spikeinterface/core/waveform_extractor.py index e404e74be4..6083732c11 100644 --- a/src/spikeinterface/core/waveform_extractor.py +++ b/src/spikeinterface/core/waveform_extractor.py @@ -521,8 +521,22 @@ def is_extension(self, extension_name) -> bool: exists: bool Whether the extension exists or not """ - # Extensions are always loaded in memory - return extension_name in self._loaded_extensions + if self.folder is None: + return extension_name in self._loaded_extensions + else: + # Extensions already loaded in memory + if extension_name in self._loaded_extensions: + return True + else: + if self.format == "binary": + return (self.folder / extension_name).is_dir() and ( + self.folder / extension_name / "params.json" + ).is_file() + elif self.format == "zarr": + return ( + extension_name in self._waveforms_root.keys() + and "params" in self._waveforms_root[extension_name].attrs.keys() + ) def load_extension(self, extension_name): """ From def525c20a463b625c2f014fd5a84be4f79a00ef Mon Sep 17 00:00:00 2001 From: Alessio Buccino Date: Tue, 5 Sep 2023 15:38:06 +0200 Subject: [PATCH 22/90] handle re-loading correctly --- src/spikeinterface/core/waveform_extractor.py | 140 ++++++++++-------- 1 file changed, 77 insertions(+), 63 deletions(-) diff --git a/src/spikeinterface/core/waveform_extractor.py b/src/spikeinterface/core/waveform_extractor.py index 6083732c11..39d115e22c 100644 --- a/src/spikeinterface/core/waveform_extractor.py +++ b/src/spikeinterface/core/waveform_extractor.py @@ -1746,28 +1746,39 @@ def __init__(self, waveform_extractor): self.waveform_extractor = waveform_extractor if self.waveform_extractor.folder is not None: - if not self.waveform_extractor.is_read_only(): - self.folder = self.waveform_extractor.folder - self.format = self.waveform_extractor.format - if self.format == "binary": - self.extension_folder = self.folder / self.extension_name - if not self.extension_folder.is_dir(): + self.folder = self.waveform_extractor.folder + self.format = self.waveform_extractor.format + if self.format == "binary": + self.extension_folder = self.folder / self.extension_name + if not self.extension_folder.is_dir(): + if not self.waveform_extractor.is_read_only(): self.extension_folder.mkdir() - else: - import zarr + else: + raise Exception( + "WaveformExtractor: cannot save extension in read-only mode. " + "Extension will be saved in memory." + ) + self.format = "memory" + self.extension_folder = None + self.folder = None + else: + import zarr - zarr_root = zarr.open(self.folder, mode="r+") - if self.extension_name not in zarr_root.keys(): + mode = "r+" if not self.waveform_extractor.is_read_only() else "r" + zarr_root = zarr.open(self.folder, mode=mode) + if self.extension_name not in zarr_root.keys(): + if not self.waveform_extractor.is_read_only(): self.extension_group = zarr_root.create_group(self.extension_name) else: - self.extension_group = zarr_root[self.extension_name] - else: - warn( - "WaveformExtractor: cannot save extension in read-only mode. " "Extension will be saved in memory." - ) - self.format = "memory" - self.extension_folder = None - self.folder = None + raise Exception( + "WaveformExtractor: cannot save extension in read-only mode. " + "Extension will be saved in memory." + ) + self.format = "memory" + self.extension_folder = None + self.folder = None + else: + self.extension_group = zarr_root[self.extension_name] else: self.format = "memory" self.extension_folder = None @@ -1882,53 +1893,56 @@ def save(self, **kwargs): self._save(**kwargs) def _save(self, **kwargs): - if self.format == "binary": - import pandas as pd - - for ext_data_name, ext_data in self._extension_data.items(): - if isinstance(ext_data, dict): - with (self.extension_folder / f"{ext_data_name}.json").open("w") as f: - json.dump(ext_data, f) - elif isinstance(ext_data, np.ndarray): - np.save(self.extension_folder / f"{ext_data_name}.npy", ext_data) - elif isinstance(ext_data, pd.DataFrame): - ext_data.to_csv(self.extension_folder / f"{ext_data_name}.csv", index=True) - else: - try: - with (self.extension_folder / f"{ext_data_name}.pkl").open("wb") as f: - pickle.dump(ext_data, f) - except: - raise Exception(f"Could not save {ext_data_name} as extension data") - elif self.format == "zarr": - from .zarrrecordingextractor import get_default_zarr_compressor - import pandas as pd - import numcodecs - - compressor = kwargs.get("compressor", None) - if compressor is None: - compressor = get_default_zarr_compressor() - for ext_data_name, ext_data in self._extension_data.items(): - if ext_data_name in self.extension_group: - del self.extension_group[ext_data_name] - if isinstance(ext_data, dict): - self.extension_group.create_dataset( - name=ext_data_name, data=[ext_data], object_codec=numcodecs.JSON() - ) - self.extension_group[ext_data_name].attrs["dict"] = True - elif isinstance(ext_data, np.ndarray): - self.extension_group.create_dataset(name=ext_data_name, data=ext_data, compressor=compressor) - elif isinstance(ext_data, pd.DataFrame): - ext_data.to_xarray().to_zarr( - store=self.extension_group.store, group=f"{self.extension_group.name}/{ext_data_name}", mode="a" - ) - self.extension_group[ext_data_name].attrs["dataframe"] = True - else: - try: + if not self.waveform_extractor.is_read_only(): + if self.format == "binary": + import pandas as pd + + for ext_data_name, ext_data in self._extension_data.items(): + if isinstance(ext_data, dict): + with (self.extension_folder / f"{ext_data_name}.json").open("w") as f: + json.dump(ext_data, f) + elif isinstance(ext_data, np.ndarray): + np.save(self.extension_folder / f"{ext_data_name}.npy", ext_data) + elif isinstance(ext_data, pd.DataFrame): + ext_data.to_csv(self.extension_folder / f"{ext_data_name}.csv", index=True) + else: + try: + with (self.extension_folder / f"{ext_data_name}.pkl").open("wb") as f: + pickle.dump(ext_data, f) + except: + raise Exception(f"Could not save {ext_data_name} as extension data") + elif self.format == "zarr": + from .zarrrecordingextractor import get_default_zarr_compressor + import pandas as pd + import numcodecs + + compressor = kwargs.get("compressor", None) + if compressor is None: + compressor = get_default_zarr_compressor() + for ext_data_name, ext_data in self._extension_data.items(): + if ext_data_name in self.extension_group: + del self.extension_group[ext_data_name] + if isinstance(ext_data, dict): self.extension_group.create_dataset( - name=ext_data_name, data=ext_data, object_codec=numcodecs.Pickle() + name=ext_data_name, data=[ext_data], object_codec=numcodecs.JSON() + ) + self.extension_group[ext_data_name].attrs["dict"] = True + elif isinstance(ext_data, np.ndarray): + self.extension_group.create_dataset(name=ext_data_name, data=ext_data, compressor=compressor) + elif isinstance(ext_data, pd.DataFrame): + ext_data.to_xarray().to_zarr( + store=self.extension_group.store, + group=f"{self.extension_group.name}/{ext_data_name}", + mode="a", ) - except: - raise Exception(f"Could not save {ext_data_name} as extension data") + self.extension_group[ext_data_name].attrs["dataframe"] = True + else: + try: + self.extension_group.create_dataset( + name=ext_data_name, data=ext_data, object_codec=numcodecs.Pickle() + ) + except: + raise Exception(f"Could not save {ext_data_name} as extension data") def reset(self): """ From dfa67e681afec0ef741b16e61417c70123c97ef5 Mon Sep 17 00:00:00 2001 From: Alessio Buccino Date: Wed, 6 Sep 2023 12:08:01 +0200 Subject: [PATCH 23/90] warn instead of raise --- src/spikeinterface/core/waveform_extractor.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/spikeinterface/core/waveform_extractor.py b/src/spikeinterface/core/waveform_extractor.py index 39d115e22c..431440c846 100644 --- a/src/spikeinterface/core/waveform_extractor.py +++ b/src/spikeinterface/core/waveform_extractor.py @@ -1754,7 +1754,7 @@ def __init__(self, waveform_extractor): if not self.waveform_extractor.is_read_only(): self.extension_folder.mkdir() else: - raise Exception( + warn( "WaveformExtractor: cannot save extension in read-only mode. " "Extension will be saved in memory." ) @@ -1770,7 +1770,7 @@ def __init__(self, waveform_extractor): if not self.waveform_extractor.is_read_only(): self.extension_group = zarr_root.create_group(self.extension_name) else: - raise Exception( + warn( "WaveformExtractor: cannot save extension in read-only mode. " "Extension will be saved in memory." ) From f60024b0c52e17edfebe02b8170f9ac3d78b053f Mon Sep 17 00:00:00 2001 From: Alessio Buccino Date: Wed, 6 Sep 2023 12:24:41 +0200 Subject: [PATCH 24/90] Do not overwrite similarity in Phy if available --- src/spikeinterface/exporters/to_phy.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/spikeinterface/exporters/to_phy.py b/src/spikeinterface/exporters/to_phy.py index 5615402fdb..c92861a8bf 100644 --- a/src/spikeinterface/exporters/to_phy.py +++ b/src/spikeinterface/exporters/to_phy.py @@ -178,7 +178,11 @@ def export_to_phy( templates[unit_ind, :, :][:, : len(chan_inds)] = template templates_ind[unit_ind, : len(chan_inds)] = chan_inds - template_similarity = compute_template_similarity(waveform_extractor, method="cosine_similarity") + if waveform_extractor.is_extension("similarity"): + tmc = waveform_extractor.load_extension("similarity") + template_similarity = tmc.get_data() + else: + template_similarity = compute_template_similarity(waveform_extractor, method="cosine_similarity") np.save(str(output_folder / "templates.npy"), templates) np.save(str(output_folder / "template_ind.npy"), templates_ind) From e64b8b4e99aabae273738e5f2985a651f321aa08 Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Wed, 6 Sep 2023 15:00:52 +0200 Subject: [PATCH 25/90] Recafor sorter launcher. Deorecated run_sorters() and add run_sorter_jobs() --- doc/api.rst | 1 + doc/modules/sorters.rst | 37 +- src/spikeinterface/comparison/studytools.py | 38 +- src/spikeinterface/sorters/__init__.py | 9 +- src/spikeinterface/sorters/basesorter.py | 11 + src/spikeinterface/sorters/launcher.py | 450 ++++++++---------- .../sorters/tests/test_launcher.py | 287 ++++++----- 7 files changed, 406 insertions(+), 427 deletions(-) diff --git a/doc/api.rst b/doc/api.rst index 2e9fc1567a..1e8d6d62b1 100644 --- a/doc/api.rst +++ b/doc/api.rst @@ -212,6 +212,7 @@ spikeinterface.sorters .. autofunction:: print_sorter_versions .. autofunction:: get_sorter_description .. autofunction:: run_sorter + .. autofunction:: run_sorter_jobs .. autofunction:: run_sorters .. autofunction:: run_sorter_by_property diff --git a/doc/modules/sorters.rst b/doc/modules/sorters.rst index 26f2365202..ad50f9e411 100644 --- a/doc/modules/sorters.rst +++ b/doc/modules/sorters.rst @@ -285,27 +285,26 @@ Running several sorters in parallel The :py:mod:`~spikeinterface.sorters` module also includes tools to run several spike sorting jobs sequentially or in parallel. This can be done with the -:py:func:`~spikeinterface.sorters.run_sorters()` function by specifying +:py:func:`~spikeinterface.sorters.run_sorter_jobs()` function by specifying an :code:`engine` that supports parallel processing (such as :code:`joblib` or :code:`slurm`). .. code-block:: python - recordings = {'rec1' : recording, 'rec2': another_recording} - sorter_list = ['herdingspikes', 'tridesclous'] - sorter_params = { - 'herdingspikes': {'clustering_bandwidth' : 8}, - 'tridesclous': {'detect_threshold' : 5.}, - } - sorting_output = run_sorters(sorter_list, recordings, working_folder='tmp_some_sorters', - mode_if_folder_exists='overwrite', sorter_params=sorter_params) + # here we run 2 sorters on 2 diffrents recording = 4 jobs + recording = ... + another_recording = ... + + job_list = [ + {'sorter_name': 'tridesclous', 'recording': recording, 'output_folder': '/folder1','detect_threshold': 5.}, + {'sorter_name': 'tridesclous', 'recording': another_recording, 'output_folder': '/folder2', 'detect_threshold': 5.}, + {'sorter_name': 'herdingspikes', 'recording': recording, 'output_folder': '/folder3', 'clustering_bandwidth': 8., 'docker_image': True}, + {'sorter_name': 'herdingspikes', 'recording': another_recording, 'output_folder': '/folder4', 'clustering_bandwidth': 8., 'docker_image': True}, + ] + + # run in loop + sortings = run_sorter_jobs(job_list, engine='loop') - # the output is a dict with (rec_name, sorter_name) as keys - for (rec_name, sorter_name), sorting in sorting_output.items(): - print(rec_name, sorter_name, ':', sorting.get_unit_ids()) -After the jobs are run, the :code:`sorting_outputs` is a dictionary with :code:`(rec_name, sorter_name)` as a key (e.g. -:code:`('rec1', 'tridesclous')` in this example), and the corresponding :py:class:`~spikeinterface.core.BaseSorting` -as a value. :py:func:`~spikeinterface.sorters.run_sorters` has several "engines" available to launch the computation: @@ -315,13 +314,11 @@ as a value. .. code-block:: python - run_sorters(sorter_list, recordings, engine='loop') + run_sorter_jobs(job_list, engine='loop') - run_sorters(sorter_list, recordings, engine='joblib', - engine_kwargs={'n_jobs': 2}) + run_sorter_jobs(job_list, engine='joblib', engine_kwargs={'n_jobs': 2}) - run_sorters(sorter_list, recordings, engine='slurm', - engine_kwargs={'cpus_per_task': 10, 'mem', '5G'}) + run_sorter_jobs(job_list, engine='slurm', engine_kwargs={'cpus_per_task': 10, 'mem', '5G'}) Spike sorting by group diff --git a/src/spikeinterface/comparison/studytools.py b/src/spikeinterface/comparison/studytools.py index 79227c865f..00119c1586 100644 --- a/src/spikeinterface/comparison/studytools.py +++ b/src/spikeinterface/comparison/studytools.py @@ -22,12 +22,48 @@ from spikeinterface.core.job_tools import fix_job_kwargs from spikeinterface.extractors import NpzSortingExtractor from spikeinterface.sorters import sorter_dict -from spikeinterface.sorters.launcher import iter_working_folder, iter_sorting_output +from spikeinterface.sorters.basesorter import is_log_ok + from .comparisontools import _perf_keys from .paircomparisons import compare_sorter_to_ground_truth + + + +# This is deprecated and will be removed +def iter_working_folder(working_folder): + working_folder = Path(working_folder) + for rec_folder in working_folder.iterdir(): + if not rec_folder.is_dir(): + continue + for output_folder in rec_folder.iterdir(): + if (output_folder / "spikeinterface_job.json").is_file(): + with open(output_folder / "spikeinterface_job.json", "r") as f: + job_dict = json.load(f) + rec_name = job_dict["rec_name"] + sorter_name = job_dict["sorter_name"] + yield rec_name, sorter_name, output_folder + else: + rec_name = rec_folder.name + sorter_name = output_folder.name + if not output_folder.is_dir(): + continue + if not is_log_ok(output_folder): + continue + yield rec_name, sorter_name, output_folder + +# This is deprecated and will be removed +def iter_sorting_output(working_folder): + """Iterator over output_folder to retrieve all triplets of (rec_name, sorter_name, sorting).""" + for rec_name, sorter_name, output_folder in iter_working_folder(working_folder): + SorterClass = sorter_dict[sorter_name] + sorting = SorterClass.get_result_from_folder(output_folder) + yield rec_name, sorter_name, sorting + + + def setup_comparison_study(study_folder, gt_dict, **job_kwargs): """ Based on a dict of (recording, sorting) create the study folder. diff --git a/src/spikeinterface/sorters/__init__.py b/src/spikeinterface/sorters/__init__.py index a0d437559d..ba663327e8 100644 --- a/src/spikeinterface/sorters/__init__.py +++ b/src/spikeinterface/sorters/__init__.py @@ -1,11 +1,4 @@ from .basesorter import BaseSorter from .sorterlist import * from .runsorter import * - -from .launcher import ( - run_sorters, - run_sorter_by_property, - collect_sorting_outputs, - iter_working_folder, - iter_sorting_output, -) +from .launcher import run_sorter_jobs, run_sorters, run_sorter_by_property diff --git a/src/spikeinterface/sorters/basesorter.py b/src/spikeinterface/sorters/basesorter.py index ff559cc78d..aa76809b58 100644 --- a/src/spikeinterface/sorters/basesorter.py +++ b/src/spikeinterface/sorters/basesorter.py @@ -411,3 +411,14 @@ def get_job_kwargs(params, verbose): if not verbose: job_kwargs["progress_bar"] = False return job_kwargs + + +def is_log_ok(output_folder): + # log is OK when run_time is not None + if (output_folder / "spikeinterface_log.json").is_file(): + with open(output_folder / "spikeinterface_log.json", mode="r", encoding="utf8") as logfile: + log = json.load(logfile) + run_time = log.get("run_time", None) + ok = run_time is not None + return ok + return False \ No newline at end of file diff --git a/src/spikeinterface/sorters/launcher.py b/src/spikeinterface/sorters/launcher.py index 52098f45cd..138b4c5848 100644 --- a/src/spikeinterface/sorters/launcher.py +++ b/src/spikeinterface/sorters/launcher.py @@ -10,55 +10,148 @@ import stat import subprocess import sys +import warnings from spikeinterface.core import load_extractor, aggregate_units from spikeinterface.core.core_tools import check_json from .sorterlist import sorter_dict -from .runsorter import run_sorter, run_sorter - - -def _run_one(arg_list): - # the multiprocessing python module force to have one unique tuple argument - ( - sorter_name, - recording, - output_folder, - verbose, - sorter_params, - docker_image, - singularity_image, - with_output, - ) = arg_list - - if isinstance(recording, dict): - recording = load_extractor(recording) +from .runsorter import run_sorter +from .basesorter import is_log_ok + +_implemented_engine = ("loop", "joblib", "dask", "slurm") + +def run_sorter_jobs(job_list, engine="loop", engine_kwargs={}, return_output=False): + """ + Run several :py:func:`run_sorter()` sequencially or in parralel given a list of job. + + For **engine="loop"** this is equivalent to: + + ..code:: + + for job in job_list: + run_sorter(**job) + + For some engines, this function is blocking until the results ("loop", "joblib", "multiprocessing", "dask"). + For some other engine ("slurm") the function return almost immediatly (akak non blocking) and the results + must be retrieve by hand when finished with :py:func:`read_sorter_folder()`. + + Parameters + ---------- + job_list: list of dict + A list a dict that are propagated to run_sorter(...) + engine: str "loop", "joblib", "dask", "slurm" + The engine to run the list. + * "loop": a simple loop. This engine is + engine_kwargs: dict + + return_output: bool, dfault False + Return a sorting or None. + + Returns + ------- + sortings: None or list of sorting + With engine="loop" or "joblib" you can optional get directly the list of sorting result if return_output=True. + """ + + assert engine in _implemented_engine, f"engine must be in {_implemented_engine}" + + if return_output: + assert engine in ("loop", "joblib", "multiprocessing") + out = [] else: - recording = recording - - # because this is checks in run_sorters before this call - remove_existing_folder = False - # result is retrieve later - delete_output_folder = False - # because we won't want the loop/worker to break - raise_error = False - - run_sorter( - sorter_name, - recording, - output_folder=output_folder, - remove_existing_folder=remove_existing_folder, - delete_output_folder=delete_output_folder, - verbose=verbose, - raise_error=raise_error, - docker_image=docker_image, - singularity_image=singularity_image, - with_output=with_output, - **sorter_params, - ) + out = None + + if engine == "loop": + # simple loop in main process + for kwargs in job_list: + sorting = run_sorter(**kwargs) + if return_output: + out.append(sorting) + + elif engine == "joblib": + from joblib import Parallel, delayed + + n_jobs = engine_kwargs.get("n_jobs", -1) + backend = engine_kwargs.get("backend", "loky") + sortings = Parallel(n_jobs=n_jobs, backend=backend)(delayed(run_sorter)(**kwargs) for kwargs in job_list) + if return_output: + out.extend(sortings) + + elif engine == "multiprocessing": + raise NotImplementedError() + + elif engine == "dask": + client = engine_kwargs.get("client", None) + assert client is not None, "For dask engine you have to provide : client = dask.distributed.Client(...)" + + tasks = [] + for kwargs in job_list: + task = client.submit(run_sorter, **kwargs) + tasks.append(task) + + for task in tasks: + task.result() + + elif engine == "slurm": + # generate python script for slurm + tmp_script_folder = engine_kwargs.get("tmp_script_folder", None) + if tmp_script_folder is None: + tmp_script_folder = tempfile.mkdtemp(prefix="spikeinterface_slurm_") + tmp_script_folder = Path(tmp_script_folder) + cpus_per_task = engine_kwargs.get("cpus_per_task", 1) + mem = engine_kwargs.get("mem", "1G") + + tmp_script_folder.mkdir(exist_ok=True, parents=True) + + # for i, task_args in enumerate(task_args_list): + for i, kwargs in enumerate(job_list): + script_name = tmp_script_folder / f"si_script_{i}.py" + with open(script_name, "w") as f: + kwargs_txt = "" + for k, v in kwargs.items(): + print(k, v) + kwargs_txt += " " + if k == "recording": + # put None temporally + kwargs_txt += "recording=None" + else: + if isinstance(v, str): + kwargs_txt += f'{k}="{v}"' + elif isinstance(v, Path): + kwargs_txt += f'{k}="{str(v.absolute())}"' + else: + kwargs_txt += f"{k}={v}" + kwargs_txt += ",\n" + + # recording_dict = task_args[1] + recording_dict = kwargs["recording"].to_dict() + slurm_script = _slurm_script.format( + python=sys.executable, recording_dict=recording_dict, kwargs_txt=kwargs_txt + ) + print(slurm_script) + f.write(slurm_script) + os.fchmod(f.fileno(), mode=stat.S_IRWXU) + + # subprocess.Popen(["sbatch", str(script_name.absolute()), f"-cpus-per-task={cpus_per_task}", f"-mem={mem}"]) + + return out + +_slurm_script = """#! {python} +from numpy import array +from spikeinterface.sorters import run_sorter + +rec_dict = {recording_dict} + +kwargs = dict( +{kwargs_txt} +) +kwargs['recording'] = load_extactor(rec_dict) + +run_sorter(**kwargs) +""" -_implemented_engine = ("loop", "joblib", "dask", "slurm") def run_sorter_by_property( @@ -66,7 +159,7 @@ def run_sorter_by_property( recording, grouping_property, working_folder, - mode_if_folder_exists="raise", + mode_if_folder_exists=None, engine="loop", engine_kwargs={}, verbose=False, @@ -93,11 +186,10 @@ def run_sorter_by_property( Property to split by before sorting working_folder: str The working directory. - mode_if_folder_exists: {'raise', 'overwrite', 'keep'} - The mode when the subfolder of recording/sorter already exists. - * 'raise' : raise error if subfolder exists - * 'overwrite' : delete and force recompute - * 'keep' : do not compute again if f=subfolder exists and log is OK + mode_if_folder_exists: None + Must be None. This is deprecated. + If not None then a warning is raise. + Will be removed in next release. engine: {'loop', 'joblib', 'dask'} Which engine to use to run sorter. engine_kwargs: dict @@ -127,46 +219,50 @@ def run_sorter_by_property( engine_kwargs={"n_jobs": 4}) """ + if mode_if_folder_exists is not None: + warnings.warn( + "run_sorter_by_property(): mode_if_folder_exists is not used anymore", + DeprecationWarning, + stacklevel=2, + ) + + working_folder = Path(working_folder).absolute() assert grouping_property in recording.get_property_keys(), ( f"The 'grouping_property' {grouping_property} is not " f"a recording property!" ) recording_dict = recording.split_by(grouping_property) - sorting_output = run_sorters( - [sorter_name], - recording_dict, - working_folder, - mode_if_folder_exists=mode_if_folder_exists, - engine=engine, - engine_kwargs=engine_kwargs, - verbose=verbose, - with_output=True, - docker_images={sorter_name: docker_image}, - singularity_images={sorter_name: singularity_image}, - sorter_params={sorter_name: sorter_params}, - ) - - grouping_property_values = None - sorting_list = [] - for output_name, sorting in sorting_output.items(): - prop_name, sorter_name = output_name - sorting_list.append(sorting) - if grouping_property_values is None: - grouping_property_values = np.array( - [prop_name] * len(sorting.get_unit_ids()), dtype=np.dtype(type(prop_name)) - ) - else: - grouping_property_values = np.concatenate( - (grouping_property_values, [prop_name] * len(sorting.get_unit_ids())) - ) + + job_list = [] + for k, rec in recording_dict.items(): + job = dict( + sorter_name=sorter_name, + recording=rec, + output_folder=working_folder / str(k), + verbose=verbose, + docker_image=docker_image, + singularity_image=singularity_image, + **sorter_params + ) + job_list.append(job) + + sorting_list = run_sorter_jobs(job_list, engine=engine, engine_kwargs=engine_kwargs, return_output=True) + + unit_groups = [] + for sorting, group in zip(sorting_list, recording_dict.keys()): + num_units = sorting.get_unit_ids().size + unit_groups.extend([group] * num_units) + unit_groups = np.array(unit_groups) aggregate_sorting = aggregate_units(sorting_list) - aggregate_sorting.set_property(key=grouping_property, values=grouping_property_values) + aggregate_sorting.set_property(key=grouping_property, values=unit_groups) aggregate_sorting.register_recording(recording) return aggregate_sorting + +# This is deprecated and will be removed def run_sorters( sorter_list, recording_dict_or_list, @@ -180,8 +276,10 @@ def run_sorters( docker_images={}, singularity_images={}, ): - """Run several sorter on several recordings. - + """ + This function is deprecated and will be removed. + Please use run_sorter_jobs() instead. + Parameters ---------- sorter_list: list of str @@ -221,6 +319,13 @@ def run_sorters( results : dict The output is nested dict[(rec_name, sorter_name)] of SortingExtractor. """ + + warnings.warn( + "run_sorters()is deprecated please use run_sorter_jobs() instead", + DeprecationWarning, + stacklevel=2, + ) + working_folder = Path(working_folder) mode_if_folder_exists in ("raise", "keep", "overwrite") @@ -247,8 +352,7 @@ def run_sorters( dtype_rec_name = np.dtype(type(list(recording_dict.keys())[0])) assert dtype_rec_name.kind in ("i", "u", "S", "U"), "Dict keys can only be integers or strings!" - need_dump = engine != "loop" - task_args_list = [] + job_list = [] for rec_name, recording in recording_dict.items(): for sorter_name in sorter_list: output_folder = working_folder / str(rec_name) / sorter_name @@ -260,6 +364,7 @@ def run_sorters( elif mode_if_folder_exists == "overwrite": shutil.rmtree(str(output_folder)) elif mode_if_folder_exists == "keep": + if is_log_ok(output_folder): continue else: @@ -268,181 +373,22 @@ def run_sorters( params = sorter_params.get(sorter_name, {}) docker_image = docker_images.get(sorter_name, None) singularity_image = singularity_images.get(sorter_name, None) - _check_container_images(docker_image, singularity_image, sorter_name) - - if need_dump: - if not recording.check_if_dumpable(): - raise Exception("recording not dumpable call recording.save() before") - recording_arg = recording.to_dict(recursive=True) - else: - recording_arg = recording - - task_args = ( - sorter_name, - recording_arg, - output_folder, - verbose, - params, - docker_image, - singularity_image, - with_output, - ) - task_args_list.append(task_args) - if engine == "loop": - # simple loop in main process - for task_args in task_args_list: - _run_one(task_args) - - elif engine == "joblib": - from joblib import Parallel, delayed - - n_jobs = engine_kwargs.get("n_jobs", -1) - backend = engine_kwargs.get("backend", "loky") - Parallel(n_jobs=n_jobs, backend=backend)(delayed(_run_one)(task_args) for task_args in task_args_list) - - elif engine == "dask": - client = engine_kwargs.get("client", None) - assert client is not None, "For dask engine you have to provide : client = dask.distributed.Client(...)" - - tasks = [] - for task_args in task_args_list: - task = client.submit(_run_one, task_args) - tasks.append(task) - - for task in tasks: - task.result() - - elif engine == "slurm": - # generate python script for slurm - tmp_script_folder = engine_kwargs.get("tmp_script_folder", None) - if tmp_script_folder is None: - tmp_script_folder = tempfile.mkdtemp(prefix="spikeinterface_slurm_") - tmp_script_folder = Path(tmp_script_folder) - cpus_per_task = engine_kwargs.get("cpus_per_task", 1) - mem = engine_kwargs.get("mem", "1G") - - for i, task_args in enumerate(task_args_list): - script_name = tmp_script_folder / f"si_script_{i}.py" - with open(script_name, "w") as f: - arg_list_txt = "(\n" - for j, arg in enumerate(task_args): - arg_list_txt += "\t" - if j != 1: - if isinstance(arg, str): - arg_list_txt += f'"{arg}"' - elif isinstance(arg, Path): - arg_list_txt += f'"{str(arg.absolute())}"' - else: - arg_list_txt += f"{arg}" - else: - arg_list_txt += "recording" - arg_list_txt += ",\r" - arg_list_txt += ")" - - recording_dict = task_args[1] - slurm_script = _slurm_script.format( - python=sys.executable, recording_dict=recording_dict, arg_list_txt=arg_list_txt - ) - f.write(slurm_script) - os.fchmod(f.fileno(), mode=stat.S_IRWXU) - - print(slurm_script) - - subprocess.Popen(["sbatch", str(script_name.absolute()), f"-cpus-per-task={cpus_per_task}", f"-mem={mem}"]) - - non_blocking_engine = ("loop", "joblib") - if engine in non_blocking_engine: - # dump spikeinterface_job.json - # only for non blocking engine - for rec_name, recording in recording_dict.items(): - for sorter_name in sorter_list: - output_folder = working_folder / str(rec_name) / sorter_name - with open(output_folder / "spikeinterface_job.json", "w") as f: - dump_dict = {"rec_name": rec_name, "sorter_name": sorter_name, "engine": engine} - if engine != "dask": - dump_dict.update({"engine_kwargs": engine_kwargs}) - json.dump(check_json(dump_dict), f) - - if with_output: - if engine not in non_blocking_engine: - print( - f'Warning!! With engine="{engine}" you cannot have directly output results\n' - "Use : run_sorters(..., with_output=False)\n" - "And then: results = collect_sorting_outputs(output_folders)" + job = dict( + sorter_name=sorter_name, + recording=recording, + output_folder=output_folder, + verbose=verbose, + docker_image=docker_image, + singularity_image=singularity_image, + **params ) - return + job_list.append(job) + + sorting_list = run_sorter_jobs(job_list, engine=engine, engine_kwargs=engine_kwargs, return_output=with_output) - results = collect_sorting_outputs(working_folder) + if with_output: + keys = [(rec_name, sorter_name) for rec_name in recording_dict for sorter_name in sorter_list ] + results = dict(zip(keys, sorting_list)) return results - -_slurm_script = """#! {python} -from numpy import array -from spikeinterface.sorters.launcher import _run_one - -recording = {recording_dict} - -arg_list = {arg_list_txt} - -_run_one(arg_list) -""" - - -def is_log_ok(output_folder): - # log is OK when run_time is not None - if (output_folder / "spikeinterface_log.json").is_file(): - with open(output_folder / "spikeinterface_log.json", mode="r", encoding="utf8") as logfile: - log = json.load(logfile) - run_time = log.get("run_time", None) - ok = run_time is not None - return ok - return False - - -def iter_working_folder(working_folder): - working_folder = Path(working_folder) - for rec_folder in working_folder.iterdir(): - if not rec_folder.is_dir(): - continue - for output_folder in rec_folder.iterdir(): - if (output_folder / "spikeinterface_job.json").is_file(): - with open(output_folder / "spikeinterface_job.json", "r") as f: - job_dict = json.load(f) - rec_name = job_dict["rec_name"] - sorter_name = job_dict["sorter_name"] - yield rec_name, sorter_name, output_folder - else: - rec_name = rec_folder.name - sorter_name = output_folder.name - if not output_folder.is_dir(): - continue - if not is_log_ok(output_folder): - continue - yield rec_name, sorter_name, output_folder - - -def iter_sorting_output(working_folder): - """Iterator over output_folder to retrieve all triplets of (rec_name, sorter_name, sorting).""" - for rec_name, sorter_name, output_folder in iter_working_folder(working_folder): - SorterClass = sorter_dict[sorter_name] - sorting = SorterClass.get_result_from_folder(output_folder) - yield rec_name, sorter_name, sorting - - -def collect_sorting_outputs(working_folder): - """Collect results in a working_folder. - - The output is a dict with double key access results[(rec_name, sorter_name)] of SortingExtractor. - """ - results = {} - for rec_name, sorter_name, sorting in iter_sorting_output(working_folder): - results[(rec_name, sorter_name)] = sorting - return results - - -def _check_container_images(docker_image, singularity_image, sorter_name): - if docker_image is not None: - assert singularity_image is None, f"Provide either a docker or a singularity image " f"for sorter {sorter_name}" - if singularity_image is not None: - assert docker_image is None, f"Provide either a docker or a singularity image " f"for sorter {sorter_name}" diff --git a/src/spikeinterface/sorters/tests/test_launcher.py b/src/spikeinterface/sorters/tests/test_launcher.py index cd8bc0fa5d..0d84dc0bdb 100644 --- a/src/spikeinterface/sorters/tests/test_launcher.py +++ b/src/spikeinterface/sorters/tests/test_launcher.py @@ -1,4 +1,5 @@ import os +import sys import shutil import time @@ -6,8 +7,9 @@ from pathlib import Path from spikeinterface.core import load_extractor -from spikeinterface.extractors import toy_example -from spikeinterface.sorters import run_sorters, run_sorter_by_property, collect_sorting_outputs +# from spikeinterface.extractors import toy_example +from spikeinterface import generate_ground_truth_recording +from spikeinterface.sorters import run_sorter_jobs, run_sorters, run_sorter_by_property if hasattr(pytest, "global_test_folder"): @@ -15,10 +17,16 @@ else: cache_folder = Path("cache_folder") / "sorters" +base_output = cache_folder / 'sorter_output' + +# no need to have many +num_recordings = 2 +sorters = ["tridesclous2"] def setup_module(): - rec, _ = toy_example(num_channels=8, duration=30, seed=0, num_segments=1) - for i in range(4): + base_seed = 42 + for i in range(num_recordings): + rec, _ = generate_ground_truth_recording(num_channels=8, durations=[10.0], seed=base_seed + i) rec_folder = cache_folder / f"toy_rec_{i}" if rec_folder.is_dir(): shutil.rmtree(rec_folder) @@ -31,19 +39,101 @@ def setup_module(): rec.save(folder=rec_folder) -def test_run_sorters_with_list(): - working_folder = cache_folder / "test_run_sorters_list" +def get_job_list(): + jobs = [] + for i in range(num_recordings): + for sorter_name in sorters: + recording = load_extractor(cache_folder / f"toy_rec_{i}") + kwargs = dict(sorter_name=sorter_name, + recording=recording, + output_folder=base_output / f"{sorter_name}_rec{i}", + verbose=True, + raise_error=False, + ) + jobs.append(kwargs) + + return jobs + +@pytest.fixture(scope="module") +def job_list(): + return get_job_list() + + + + + + + +################################ + + +def test_run_sorter_jobs_loop(job_list): + if base_output.is_dir(): + shutil.rmtree(base_output) + sortings = run_sorter_jobs(job_list, engine="loop", return_output=True) + print(sortings) + + +def test_run_sorter_jobs_joblib(job_list): + if base_output.is_dir(): + shutil.rmtree(base_output) + sortings = run_sorter_jobs(job_list, engine="joblib", engine_kwargs=dict(n_jobs=2, backend="loky"), return_output=True) + print(sortings) + +def test_run_sorter_jobs_multiprocessing(job_list): + pass + +@pytest.mark.skipif(True, reason="This is tested locally") +def test_run_sorter_jobs_dask(job_list): + if base_output.is_dir(): + shutil.rmtree(base_output) + + # create a dask Client for a slurm queue + from dask.distributed import Client + + test_mode = "local" + # test_mode = "client_slurm" + + if test_mode == "local": + client = Client() + elif test_mode == "client_slurm": + from dask_jobqueue import SLURMCluster + cluster = SLURMCluster( + processes=1, + cores=1, + memory="12GB", + python=sys.executable, + walltime="12:00:00", + ) + cluster.scale(2) + client = Client(cluster) + + # dask + t0 = time.perf_counter() + run_sorter_jobs(job_list, engine="dask", engine_kwargs=dict(client=client)) + t1 = time.perf_counter() + print(t1 - t0) + + +def test_run_sorter_jobs_slurm(job_list): + if base_output.is_dir(): + shutil.rmtree(base_output) + + working_folder = cache_folder / "test_run_sorters_slurm" if working_folder.is_dir(): shutil.rmtree(working_folder) - # make dumpable - rec0 = load_extractor(cache_folder / "toy_rec_0") - rec1 = load_extractor(cache_folder / "toy_rec_1") - - recording_list = [rec0, rec1] - sorter_list = ["tridesclous"] + tmp_script_folder = working_folder / "slurm_scripts" - run_sorters(sorter_list, recording_list, working_folder, engine="loop", verbose=False, with_output=False) + run_sorter_jobs( + job_list, + engine="slurm", + engine_kwargs=dict( + tmp_script_folder=tmp_script_folder, + cpus_per_task=32, + mem="32G", + ) + ) def test_run_sorter_by_property(): @@ -59,7 +149,7 @@ def test_run_sorter_by_property(): rec0_by = rec0.split_by("group") group_names0 = list(rec0_by.keys()) - sorter_name = "tridesclous" + sorter_name = "tridesclous2" sorting0 = run_sorter_by_property(sorter_name, rec0, "group", working_folder1, engine="loop", verbose=False) assert "group" in sorting0.get_property_keys() assert all([g in group_names0 for g in sorting0.get_property("group")]) @@ -68,13 +158,38 @@ def test_run_sorter_by_property(): rec1_by = rec1.split_by("group") group_names1 = list(rec1_by.keys()) - sorter_name = "tridesclous" + sorter_name = "tridesclous2" sorting1 = run_sorter_by_property(sorter_name, rec1, "group", working_folder2, engine="loop", verbose=False) assert "group" in sorting1.get_property_keys() assert all([g in group_names1 for g in sorting1.get_property("group")]) + +# run_sorters is deprecated +# This will test will be removed in next release +def test_run_sorters_with_list(): + + + working_folder = cache_folder / "test_run_sorters_list" + if working_folder.is_dir(): + shutil.rmtree(working_folder) + + # make dumpable + rec0 = load_extractor(cache_folder / "toy_rec_0") + rec1 = load_extractor(cache_folder / "toy_rec_1") + + recording_list = [rec0, rec1] + sorter_list = ["tridesclous2"] + + run_sorters(sorter_list, recording_list, working_folder, engine="loop", verbose=False, with_output=False) + + + + +# run_sorters is deprecated +# This will test will be removed in next release def test_run_sorters_with_dict(): + working_folder = cache_folder / "test_run_sorters_dict" if working_folder.is_dir(): shutil.rmtree(working_folder) @@ -84,9 +199,9 @@ def test_run_sorters_with_dict(): recording_dict = {"toy_tetrode": rec0, "toy_octotrode": rec1} - sorter_list = ["tridesclous", "tridesclous2"] + sorter_list = ["tridesclous2"] - sorter_params = {"tridesclous": dict(detect_threshold=5.6), "tridesclous2": dict()} + sorter_params = {"tridesclous2": dict()} # simple loop t0 = time.perf_counter() @@ -116,143 +231,23 @@ def test_run_sorters_with_dict(): ) -@pytest.mark.skipif(True, reason="This is tested locally") -def test_run_sorters_joblib(): - working_folder = cache_folder / "test_run_sorters_joblib" - if working_folder.is_dir(): - shutil.rmtree(working_folder) - - recording_dict = {} - for i in range(4): - rec = load_extractor(cache_folder / f"toy_rec_{i}") - recording_dict[f"rec_{i}"] = rec - - sorter_list = [ - "tridesclous", - ] - - # joblib - t0 = time.perf_counter() - run_sorters( - sorter_list, - recording_dict, - working_folder / "with_joblib", - engine="joblib", - engine_kwargs={"n_jobs": 4}, - with_output=False, - mode_if_folder_exists="keep", - ) - t1 = time.perf_counter() - print(t1 - t0) - - -@pytest.mark.skipif(True, reason="This is tested locally") -def test_run_sorters_dask(): - working_folder = cache_folder / "test_run_sorters_dask" - if working_folder.is_dir(): - shutil.rmtree(working_folder) - - recording_dict = {} - for i in range(4): - rec = load_extractor(cache_folder / f"toy_rec_{i}") - recording_dict[f"rec_{i}"] = rec - sorter_list = [ - "tridesclous", - ] - - # create a dask Client for a slurm queue - from dask.distributed import Client - from dask_jobqueue import SLURMCluster - - python = "/home/samuel.garcia/.virtualenvs/py36/bin/python3.6" - cluster = SLURMCluster( - processes=1, - cores=1, - memory="12GB", - python=python, - walltime="12:00:00", - ) - cluster.scale(5) - client = Client(cluster) - - # dask - t0 = time.perf_counter() - run_sorters( - sorter_list, - recording_dict, - working_folder, - engine="dask", - engine_kwargs={"client": client}, - with_output=False, - mode_if_folder_exists="keep", - ) - t1 = time.perf_counter() - print(t1 - t0) - - -@pytest.mark.skipif(True, reason="This is tested locally") -def test_run_sorters_slurm(): - working_folder = cache_folder / "test_run_sorters_slurm" - if working_folder.is_dir(): - shutil.rmtree(working_folder) - - # create recording - recording_dict = {} - for i in range(4): - rec = load_extractor(cache_folder / f"toy_rec_{i}") - recording_dict[f"rec_{i}"] = rec - - sorter_list = [ - "spykingcircus2", - "tridesclous2", - ] - - tmp_script_folder = working_folder / "slurm_scripts" - tmp_script_folder.mkdir(parents=True) - - run_sorters( - sorter_list, - recording_dict, - working_folder, - engine="slurm", - engine_kwargs={ - "tmp_script_folder": tmp_script_folder, - "cpus_per_task": 32, - "mem": "32G", - }, - with_output=False, - mode_if_folder_exists="keep", - verbose=True, - ) - - -def test_collect_sorting_outputs(): - working_folder = cache_folder / "test_run_sorters_dict" - results = collect_sorting_outputs(working_folder) - print(results) - - -def test_sorter_installation(): - # This import is to get error on github when import fails - import tridesclous - - # import circus if __name__ == "__main__": setup_module() - # pass - # test_run_sorters_with_list() + job_list = get_job_list() + + # test_run_sorter_jobs_loop(job_list) + # test_run_sorter_jobs_joblib(job_list) + # test_run_sorter_jobs_multiprocessing(job_list) + # test_run_sorter_jobs_dask(job_list) + # test_run_sorter_jobs_slurm(job_list) # test_run_sorter_by_property() + # this deprecated + test_run_sorters_with_list() test_run_sorters_with_dict() - # test_run_sorters_joblib() - - # test_run_sorters_dask() - - # test_run_sorters_slurm() - # test_collect_sorting_outputs() From 67dc176ec3305154adc7e0ce21b38b466c0fcd0b Mon Sep 17 00:00:00 2001 From: Garcia Samuel Date: Wed, 6 Sep 2023 15:12:06 +0200 Subject: [PATCH 26/90] Update doc/modules/sorters.rst Co-authored-by: Zach McKenzie <92116279+zm711@users.noreply.github.com> --- doc/modules/sorters.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/modules/sorters.rst b/doc/modules/sorters.rst index ad50f9e411..1843e80b8c 100644 --- a/doc/modules/sorters.rst +++ b/doc/modules/sorters.rst @@ -290,7 +290,7 @@ an :code:`engine` that supports parallel processing (such as :code:`joblib` or : .. code-block:: python - # here we run 2 sorters on 2 diffrents recording = 4 jobs + # here we run 2 sorters on 2 different recordings = 4 jobs recording = ... another_recording = ... From fe5052818fa4ddaed3f0e21fd657c9fe4151f988 Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Wed, 6 Sep 2023 15:32:30 +0200 Subject: [PATCH 27/90] add engine="processpoolexecutor" --- src/spikeinterface/sorters/launcher.py | 56 ++++++++++++++----- .../sorters/tests/test_launcher.py | 27 ++++----- 2 files changed, 55 insertions(+), 28 deletions(-) diff --git a/src/spikeinterface/sorters/launcher.py b/src/spikeinterface/sorters/launcher.py index 138b4c5848..60be6e1286 100644 --- a/src/spikeinterface/sorters/launcher.py +++ b/src/spikeinterface/sorters/launcher.py @@ -4,7 +4,6 @@ from pathlib import Path import shutil import numpy as np -import json import tempfile import os import stat @@ -12,14 +11,22 @@ import sys import warnings -from spikeinterface.core import load_extractor, aggregate_units -from spikeinterface.core.core_tools import check_json +from spikeinterface.core import aggregate_units from .sorterlist import sorter_dict from .runsorter import run_sorter from .basesorter import is_log_ok -_implemented_engine = ("loop", "joblib", "dask", "slurm") +_default_engine_kwargs = dict( + loop=dict(), + joblib=dict(n_jobs=-1, backend="loky"), + processpoolexecutor=dict(max_workers=2, mp_context=None), + dask=dict(client=None), + slurm=dict(tmp_script_folder=None, cpus_per_task=1, mem="1G"), +) + + +_implemented_engine = list(_default_engine_kwargs.keys()) def run_sorter_jobs(job_list, engine="loop", engine_kwargs={}, return_output=False): """ @@ -56,8 +63,15 @@ def run_sorter_jobs(job_list, engine="loop", engine_kwargs={}, return_output=Fal assert engine in _implemented_engine, f"engine must be in {_implemented_engine}" + engine_kwargs_ = dict() + engine_kwargs_.update(_default_engine_kwargs[engine]) + engine_kwargs_.update(engine_kwargs) + engine_kwargs = engine_kwargs_ + + + if return_output: - assert engine in ("loop", "joblib", "multiprocessing") + assert engine in ("loop", "joblib", "processpoolexecutor") out = [] else: out = None @@ -72,17 +86,30 @@ def run_sorter_jobs(job_list, engine="loop", engine_kwargs={}, return_output=Fal elif engine == "joblib": from joblib import Parallel, delayed - n_jobs = engine_kwargs.get("n_jobs", -1) - backend = engine_kwargs.get("backend", "loky") + n_jobs = engine_kwargs["n_jobs"] + backend = engine_kwargs["backend"] sortings = Parallel(n_jobs=n_jobs, backend=backend)(delayed(run_sorter)(**kwargs) for kwargs in job_list) if return_output: out.extend(sortings) - elif engine == "multiprocessing": - raise NotImplementedError() + elif engine == "processpoolexecutor": + from concurrent.futures import ProcessPoolExecutor + + max_workers = engine_kwargs["max_workers"] + mp_context = engine_kwargs["mp_context"] + + with ProcessPoolExecutor(max_workers=max_workers, mp_context=mp_context) as executor: + futures = [] + for kwargs in job_list: + res = executor.submit(run_sorter, **kwargs) + futures.append(res) + for futur in futures: + sorting = futur.result() + if return_output: + out.append(sorting) elif engine == "dask": - client = engine_kwargs.get("client", None) + client = engine_kwargs["client"] assert client is not None, "For dask engine you have to provide : client = dask.distributed.Client(...)" tasks = [] @@ -95,16 +122,15 @@ def run_sorter_jobs(job_list, engine="loop", engine_kwargs={}, return_output=Fal elif engine == "slurm": # generate python script for slurm - tmp_script_folder = engine_kwargs.get("tmp_script_folder", None) + tmp_script_folder = engine_kwargs["tmp_script_folder"] if tmp_script_folder is None: tmp_script_folder = tempfile.mkdtemp(prefix="spikeinterface_slurm_") tmp_script_folder = Path(tmp_script_folder) - cpus_per_task = engine_kwargs.get("cpus_per_task", 1) - mem = engine_kwargs.get("mem", "1G") + cpus_per_task = engine_kwargs["cpus_per_task"] + mem = engine_kwargs["mem"] tmp_script_folder.mkdir(exist_ok=True, parents=True) - # for i, task_args in enumerate(task_args_list): for i, kwargs in enumerate(job_list): script_name = tmp_script_folder / f"si_script_{i}.py" with open(script_name, "w") as f: @@ -133,7 +159,7 @@ def run_sorter_jobs(job_list, engine="loop", engine_kwargs={}, return_output=Fal f.write(slurm_script) os.fchmod(f.fileno(), mode=stat.S_IRWXU) - # subprocess.Popen(["sbatch", str(script_name.absolute()), f"-cpus-per-task={cpus_per_task}", f"-mem={mem}"]) + subprocess.Popen(["sbatch", str(script_name.absolute()), f"-cpus-per-task={cpus_per_task}", f"-mem={mem}"]) return out diff --git a/src/spikeinterface/sorters/tests/test_launcher.py b/src/spikeinterface/sorters/tests/test_launcher.py index 0d84dc0bdb..c1f8b6e0bb 100644 --- a/src/spikeinterface/sorters/tests/test_launcher.py +++ b/src/spikeinterface/sorters/tests/test_launcher.py @@ -59,14 +59,6 @@ def job_list(): return get_job_list() - - - - - -################################ - - def test_run_sorter_jobs_loop(job_list): if base_output.is_dir(): shutil.rmtree(base_output) @@ -74,14 +66,22 @@ def test_run_sorter_jobs_loop(job_list): print(sortings) + + def test_run_sorter_jobs_joblib(job_list): if base_output.is_dir(): shutil.rmtree(base_output) sortings = run_sorter_jobs(job_list, engine="joblib", engine_kwargs=dict(n_jobs=2, backend="loky"), return_output=True) print(sortings) -def test_run_sorter_jobs_multiprocessing(job_list): - pass +def test_run_sorter_jobs_processpoolexecutor(job_list): + if base_output.is_dir(): + shutil.rmtree(base_output) + sortings = run_sorter_jobs(job_list, engine="processpoolexecutor", engine_kwargs=dict(max_workers=2), return_output=True) + print(sortings) + + + @pytest.mark.skipif(True, reason="This is tested locally") def test_run_sorter_jobs_dask(job_list): @@ -235,11 +235,12 @@ def test_run_sorters_with_dict(): if __name__ == "__main__": - setup_module() + # setup_module() job_list = get_job_list() # test_run_sorter_jobs_loop(job_list) # test_run_sorter_jobs_joblib(job_list) + test_run_sorter_jobs_processpoolexecutor(job_list) # test_run_sorter_jobs_multiprocessing(job_list) # test_run_sorter_jobs_dask(job_list) # test_run_sorter_jobs_slurm(job_list) @@ -247,7 +248,7 @@ def test_run_sorters_with_dict(): # test_run_sorter_by_property() # this deprecated - test_run_sorters_with_list() - test_run_sorters_with_dict() + # test_run_sorters_with_list() + # test_run_sorters_with_dict() From f4b7c3caad2011606bf19a70c69d098a3922f277 Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Wed, 6 Sep 2023 16:29:17 +0200 Subject: [PATCH 28/90] debug slurm launcher --- src/spikeinterface/sorters/launcher.py | 5 ++--- src/spikeinterface/sorters/tests/test_launcher.py | 4 ++-- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/src/spikeinterface/sorters/launcher.py b/src/spikeinterface/sorters/launcher.py index 60be6e1286..6f3b972fdd 100644 --- a/src/spikeinterface/sorters/launcher.py +++ b/src/spikeinterface/sorters/launcher.py @@ -136,7 +136,6 @@ def run_sorter_jobs(job_list, engine="loop", engine_kwargs={}, return_output=Fal with open(script_name, "w") as f: kwargs_txt = "" for k, v in kwargs.items(): - print(k, v) kwargs_txt += " " if k == "recording": # put None temporally @@ -155,7 +154,6 @@ def run_sorter_jobs(job_list, engine="loop", engine_kwargs={}, return_output=Fal slurm_script = _slurm_script.format( python=sys.executable, recording_dict=recording_dict, kwargs_txt=kwargs_txt ) - print(slurm_script) f.write(slurm_script) os.fchmod(f.fileno(), mode=stat.S_IRWXU) @@ -165,6 +163,7 @@ def run_sorter_jobs(job_list, engine="loop", engine_kwargs={}, return_output=Fal _slurm_script = """#! {python} from numpy import array +from spikeinterface import load_extractor from spikeinterface.sorters import run_sorter rec_dict = {recording_dict} @@ -172,7 +171,7 @@ def run_sorter_jobs(job_list, engine="loop", engine_kwargs={}, return_output=Fal kwargs = dict( {kwargs_txt} ) -kwargs['recording'] = load_extactor(rec_dict) +kwargs['recording'] = load_extractor(rec_dict) run_sorter(**kwargs) """ diff --git a/src/spikeinterface/sorters/tests/test_launcher.py b/src/spikeinterface/sorters/tests/test_launcher.py index c1f8b6e0bb..2d8e6f3d3c 100644 --- a/src/spikeinterface/sorters/tests/test_launcher.py +++ b/src/spikeinterface/sorters/tests/test_launcher.py @@ -240,10 +240,10 @@ def test_run_sorters_with_dict(): # test_run_sorter_jobs_loop(job_list) # test_run_sorter_jobs_joblib(job_list) - test_run_sorter_jobs_processpoolexecutor(job_list) + # test_run_sorter_jobs_processpoolexecutor(job_list) # test_run_sorter_jobs_multiprocessing(job_list) # test_run_sorter_jobs_dask(job_list) - # test_run_sorter_jobs_slurm(job_list) + test_run_sorter_jobs_slurm(job_list) # test_run_sorter_by_property() From 93de4db5596a7c4ff3cc2925f6c702a9cabf7703 Mon Sep 17 00:00:00 2001 From: Garcia Samuel Date: Wed, 6 Sep 2023 16:25:42 +0200 Subject: [PATCH 29/90] Update doc/modules/sorters.rst Co-authored-by: Alessio Buccino --- doc/modules/sorters.rst | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/doc/modules/sorters.rst b/doc/modules/sorters.rst index 1843e80b8c..d17927cc42 100644 --- a/doc/modules/sorters.rst +++ b/doc/modules/sorters.rst @@ -295,10 +295,10 @@ an :code:`engine` that supports parallel processing (such as :code:`joblib` or : another_recording = ... job_list = [ - {'sorter_name': 'tridesclous', 'recording': recording, 'output_folder': '/folder1','detect_threshold': 5.}, - {'sorter_name': 'tridesclous', 'recording': another_recording, 'output_folder': '/folder2', 'detect_threshold': 5.}, - {'sorter_name': 'herdingspikes', 'recording': recording, 'output_folder': '/folder3', 'clustering_bandwidth': 8., 'docker_image': True}, - {'sorter_name': 'herdingspikes', 'recording': another_recording, 'output_folder': '/folder4', 'clustering_bandwidth': 8., 'docker_image': True}, + {'sorter_name': 'tridesclous', 'recording': recording, 'output_folder': 'folder1','detect_threshold': 5.}, + {'sorter_name': 'tridesclous', 'recording': another_recording, 'output_folder': 'folder2', 'detect_threshold': 5.}, + {'sorter_name': 'herdingspikes', 'recording': recording, 'output_folder': 'folder3', 'clustering_bandwidth': 8., 'docker_image': True}, + {'sorter_name': 'herdingspikes', 'recording': another_recording, 'output_folder': 'folder4', 'clustering_bandwidth': 8., 'docker_image': True}, ] # run in loop From fe2d7c532611add92ea46d877f051a396ead6ced Mon Sep 17 00:00:00 2001 From: Garcia Samuel Date: Thu, 7 Sep 2023 19:08:32 +0200 Subject: [PATCH 30/90] Suggestions from Zach Co-authored-by: Zach McKenzie <92116279+zm711@users.noreply.github.com> --- src/spikeinterface/sorters/launcher.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/spikeinterface/sorters/launcher.py b/src/spikeinterface/sorters/launcher.py index 6f3b972fdd..103f30dac5 100644 --- a/src/spikeinterface/sorters/launcher.py +++ b/src/spikeinterface/sorters/launcher.py @@ -30,7 +30,7 @@ def run_sorter_jobs(job_list, engine="loop", engine_kwargs={}, return_output=False): """ - Run several :py:func:`run_sorter()` sequencially or in parralel given a list of job. + Run several :py:func:`run_sorter()` sequentially or in parallel given a list of jobs. For **engine="loop"** this is equivalent to: @@ -39,9 +39,9 @@ def run_sorter_jobs(job_list, engine="loop", engine_kwargs={}, return_output=Fal for job in job_list: run_sorter(**job) - For some engines, this function is blocking until the results ("loop", "joblib", "multiprocessing", "dask"). - For some other engine ("slurm") the function return almost immediatly (akak non blocking) and the results - must be retrieve by hand when finished with :py:func:`read_sorter_folder()`. + For some engines ("loop", "joblib", "multiprocessing", "dask"), this function is blocking until the results . + For other engines ("slurm") the function returns almost immediately (aka non-blocking) and the results + must be retrieved by hand when finished with :py:func:`read_sorter_folder()`. Parameters ---------- From fe178c67ac9428477ca146dd6ac453bf1cccfc78 Mon Sep 17 00:00:00 2001 From: Alessio Buccino Date: Mon, 11 Sep 2023 10:37:00 +0200 Subject: [PATCH 31/90] Apply suggestions and avoid using chmod on windows --- src/spikeinterface/core/waveform_extractor.py | 111 +++++++++--------- .../tests/common_extension_tests.py | 28 +++-- 2 files changed, 73 insertions(+), 66 deletions(-) diff --git a/src/spikeinterface/core/waveform_extractor.py b/src/spikeinterface/core/waveform_extractor.py index 431440c846..3647e915bf 100644 --- a/src/spikeinterface/core/waveform_extractor.py +++ b/src/spikeinterface/core/waveform_extractor.py @@ -1751,9 +1751,7 @@ def __init__(self, waveform_extractor): if self.format == "binary": self.extension_folder = self.folder / self.extension_name if not self.extension_folder.is_dir(): - if not self.waveform_extractor.is_read_only(): - self.extension_folder.mkdir() - else: + if self.waveform_extractor.is_read_only(): warn( "WaveformExtractor: cannot save extension in read-only mode. " "Extension will be saved in memory." @@ -1761,15 +1759,16 @@ def __init__(self, waveform_extractor): self.format = "memory" self.extension_folder = None self.folder = None + else: + self.extension_folder.mkdir() + else: import zarr mode = "r+" if not self.waveform_extractor.is_read_only() else "r" zarr_root = zarr.open(self.folder, mode=mode) if self.extension_name not in zarr_root.keys(): - if not self.waveform_extractor.is_read_only(): - self.extension_group = zarr_root.create_group(self.extension_name) - else: + if self.waveform_extractor.is_read_only(): warn( "WaveformExtractor: cannot save extension in read-only mode. " "Extension will be saved in memory." @@ -1777,6 +1776,8 @@ def __init__(self, waveform_extractor): self.format = "memory" self.extension_folder = None self.folder = None + else: + self.extension_group = zarr_root.create_group(self.extension_name) else: self.extension_group = zarr_root[self.extension_name] else: @@ -1893,56 +1894,58 @@ def save(self, **kwargs): self._save(**kwargs) def _save(self, **kwargs): - if not self.waveform_extractor.is_read_only(): - if self.format == "binary": - import pandas as pd - - for ext_data_name, ext_data in self._extension_data.items(): - if isinstance(ext_data, dict): - with (self.extension_folder / f"{ext_data_name}.json").open("w") as f: - json.dump(ext_data, f) - elif isinstance(ext_data, np.ndarray): - np.save(self.extension_folder / f"{ext_data_name}.npy", ext_data) - elif isinstance(ext_data, pd.DataFrame): - ext_data.to_csv(self.extension_folder / f"{ext_data_name}.csv", index=True) - else: - try: - with (self.extension_folder / f"{ext_data_name}.pkl").open("wb") as f: - pickle.dump(ext_data, f) - except: - raise Exception(f"Could not save {ext_data_name} as extension data") - elif self.format == "zarr": - from .zarrrecordingextractor import get_default_zarr_compressor - import pandas as pd - import numcodecs - - compressor = kwargs.get("compressor", None) - if compressor is None: - compressor = get_default_zarr_compressor() - for ext_data_name, ext_data in self._extension_data.items(): - if ext_data_name in self.extension_group: - del self.extension_group[ext_data_name] - if isinstance(ext_data, dict): + # Only save if not read only + if self.waveform_extractor.is_read_only(): + return + if self.format == "binary": + import pandas as pd + + for ext_data_name, ext_data in self._extension_data.items(): + if isinstance(ext_data, dict): + with (self.extension_folder / f"{ext_data_name}.json").open("w") as f: + json.dump(ext_data, f) + elif isinstance(ext_data, np.ndarray): + np.save(self.extension_folder / f"{ext_data_name}.npy", ext_data) + elif isinstance(ext_data, pd.DataFrame): + ext_data.to_csv(self.extension_folder / f"{ext_data_name}.csv", index=True) + else: + try: + with (self.extension_folder / f"{ext_data_name}.pkl").open("wb") as f: + pickle.dump(ext_data, f) + except: + raise Exception(f"Could not save {ext_data_name} as extension data") + elif self.format == "zarr": + from .zarrrecordingextractor import get_default_zarr_compressor + import pandas as pd + import numcodecs + + compressor = kwargs.get("compressor", None) + if compressor is None: + compressor = get_default_zarr_compressor() + for ext_data_name, ext_data in self._extension_data.items(): + if ext_data_name in self.extension_group: + del self.extension_group[ext_data_name] + if isinstance(ext_data, dict): + self.extension_group.create_dataset( + name=ext_data_name, data=[ext_data], object_codec=numcodecs.JSON() + ) + self.extension_group[ext_data_name].attrs["dict"] = True + elif isinstance(ext_data, np.ndarray): + self.extension_group.create_dataset(name=ext_data_name, data=ext_data, compressor=compressor) + elif isinstance(ext_data, pd.DataFrame): + ext_data.to_xarray().to_zarr( + store=self.extension_group.store, + group=f"{self.extension_group.name}/{ext_data_name}", + mode="a", + ) + self.extension_group[ext_data_name].attrs["dataframe"] = True + else: + try: self.extension_group.create_dataset( - name=ext_data_name, data=[ext_data], object_codec=numcodecs.JSON() - ) - self.extension_group[ext_data_name].attrs["dict"] = True - elif isinstance(ext_data, np.ndarray): - self.extension_group.create_dataset(name=ext_data_name, data=ext_data, compressor=compressor) - elif isinstance(ext_data, pd.DataFrame): - ext_data.to_xarray().to_zarr( - store=self.extension_group.store, - group=f"{self.extension_group.name}/{ext_data_name}", - mode="a", + name=ext_data_name, data=ext_data, object_codec=numcodecs.Pickle() ) - self.extension_group[ext_data_name].attrs["dataframe"] = True - else: - try: - self.extension_group.create_dataset( - name=ext_data_name, data=ext_data, object_codec=numcodecs.Pickle() - ) - except: - raise Exception(f"Could not save {ext_data_name} as extension data") + except: + raise Exception(f"Could not save {ext_data_name} as extension data") def reset(self): """ diff --git a/src/spikeinterface/postprocessing/tests/common_extension_tests.py b/src/spikeinterface/postprocessing/tests/common_extension_tests.py index f44d58470c..f7272ddefe 100644 --- a/src/spikeinterface/postprocessing/tests/common_extension_tests.py +++ b/src/spikeinterface/postprocessing/tests/common_extension_tests.py @@ -2,6 +2,7 @@ import numpy as np import pandas as pd import shutil +import platform from pathlib import Path from spikeinterface import extract_waveforms, load_extractor, load_waveforms, compute_sparsity @@ -78,12 +79,13 @@ def setUp(self): self.we2 = we2 # make we read-only - we_ro_folder = cache_folder / "toy_waveforms_2seg_readonly" - if not we_ro_folder.is_dir(): - shutil.copytree(we2.folder, we_ro_folder) + if platform.system() != "Windows": + we_ro_folder = cache_folder / "toy_waveforms_2seg_readonly" + if not we_ro_folder.is_dir(): + shutil.copytree(we2.folder, we_ro_folder) # change permissions (R+X) - we_ro_folder.chmod(0o555) - self.we_ro = load_waveforms(we_ro_folder) + we_ro_folder.chmod(0o555) + self.we_ro = load_waveforms(we_ro_folder) self.sparsity2 = compute_sparsity(we2, method="radius", radius_um=30) we_memory = extract_waveforms( @@ -108,8 +110,9 @@ def setUp(self): def tearDown(self): # allow pytest to delete RO folder - we_ro_folder = cache_folder / "toy_waveforms_2seg_readonly" - we_ro_folder.chmod(0o777) + if platform.system() != "Windows": + we_ro_folder = cache_folder / "toy_waveforms_2seg_readonly" + we_ro_folder.chmod(0o777) def _test_extension_folder(self, we, in_memory=False): if self.extension_function_kwargs_list is None: @@ -193,8 +196,9 @@ def test_extension(self): print(f"{ext_data_name} of type {type(ext_data_mem)} not tested.") # read-only - Extension is memory only - _ = self.extension_class.get_extension_function()(self.we_ro, load_if_exists=False) - assert self.extension_class.extension_name in self.we_ro.get_available_extension_names() - ext_ro = self.we_ro.load_extension(self.extension_class.extension_name) - assert ext_ro.format == "memory" - assert ext_ro.extension_folder is None + if platform.system() != "Windows": + _ = self.extension_class.get_extension_function()(self.we_ro, load_if_exists=False) + assert self.extension_class.extension_name in self.we_ro.get_available_extension_names() + ext_ro = self.we_ro.load_extension(self.extension_class.extension_name) + assert ext_ro.format == "memory" + assert ext_ro.extension_folder is None From 6a364b03e3c6504969a9bdcf7eecf6885384ddb3 Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Tue, 12 Sep 2023 10:28:55 +0200 Subject: [PATCH 32/90] feedback from Zacha dn Ramon --- src/spikeinterface/sorters/launcher.py | 22 +++++++++++++++++----- 1 file changed, 17 insertions(+), 5 deletions(-) diff --git a/src/spikeinterface/sorters/launcher.py b/src/spikeinterface/sorters/launcher.py index 103f30dac5..b158eba22d 100644 --- a/src/spikeinterface/sorters/launcher.py +++ b/src/spikeinterface/sorters/launcher.py @@ -39,9 +39,21 @@ def run_sorter_jobs(job_list, engine="loop", engine_kwargs={}, return_output=Fal for job in job_list: run_sorter(**job) - For some engines ("loop", "joblib", "multiprocessing", "dask"), this function is blocking until the results . - For other engines ("slurm") the function returns almost immediately (aka non-blocking) and the results - must be retrieved by hand when finished with :py:func:`read_sorter_folder()`. + The following engines block the I/O: + * "loop" + * "joblib" + * "multiprocessing" + * "dask" + + The following engines are *asynchronous*: + * "slurm" + + Where *blocking* means that this function is blocking until the results are returned. + This is in opposition to *asynchronous*, where the function returns `None` almost immediately (aka non-blocking), + but the results must be retrieved by hand when jobs are finished. No mechanisim is provided here to be aware + when jobs are finish. + In this *asynchronous* case, the :py:func:read_sorter_folder() helps to retrieve individual results. + Parameters ---------- @@ -302,7 +314,7 @@ def run_sorters( singularity_images={}, ): """ - This function is deprecated and will be removed. + This function is deprecated and will be removed in version 0.100 Please use run_sorter_jobs() instead. Parameters @@ -346,7 +358,7 @@ def run_sorters( """ warnings.warn( - "run_sorters()is deprecated please use run_sorter_jobs() instead", + "run_sorters()is deprecated please use run_sorter_jobs() instead. This will be removed in 0.100", DeprecationWarning, stacklevel=2, ) From 99e7acc8044d91773b2c77c67d51669dfe6b2fd2 Mon Sep 17 00:00:00 2001 From: Pierre Yger Date: Wed, 13 Sep 2023 11:32:37 +0200 Subject: [PATCH 33/90] WIP --- src/spikeinterface/sorters/internal/spyking_circus2.py | 5 +++-- .../sortingcomponents/clustering/random_projections.py | 5 +++-- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/src/spikeinterface/sorters/internal/spyking_circus2.py b/src/spikeinterface/sorters/internal/spyking_circus2.py index 8a7b353bd1..571096caf9 100644 --- a/src/spikeinterface/sorters/internal/spyking_circus2.py +++ b/src/spikeinterface/sorters/internal/spyking_circus2.py @@ -145,8 +145,9 @@ def _run_from_folder(cls, sorter_output_folder, params, verbose): matching_params.update({"noise_levels": noise_levels}) matching_job_params = job_kwargs.copy() - if "chunk_memory" in matching_job_params: - matching_job_params.pop("chunk_memory") + for value in ['chunk_size', 'chunk_memory', 'total_memory', 'chunk_duration']: + if value in matching_job_params: + matching_job_params.pop(value) matching_job_params["chunk_duration"] = "100ms" diff --git a/src/spikeinterface/sortingcomponents/clustering/random_projections.py b/src/spikeinterface/sortingcomponents/clustering/random_projections.py index d82f9a7808..025555440a 100644 --- a/src/spikeinterface/sortingcomponents/clustering/random_projections.py +++ b/src/spikeinterface/sortingcomponents/clustering/random_projections.py @@ -191,8 +191,9 @@ def main_function(cls, recording, peaks, params): ) cleaning_matching_params = params["job_kwargs"].copy() - if "chunk_memory" in cleaning_matching_params: - cleaning_matching_params.pop("chunk_memory") + for value in ['chunk_size', 'chunk_memory', 'total_memory', 'chunk_duration']: + if value in cleaning_matching_params: + cleaning_matching_params.pop(value) cleaning_matching_params["chunk_duration"] = "100ms" cleaning_matching_params["n_jobs"] = 1 cleaning_matching_params["verbose"] = False From cc792136cf213c4701a962206295dc7efaa718ad Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Wed, 13 Sep 2023 09:32:58 +0000 Subject: [PATCH 34/90] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- src/spikeinterface/sorters/internal/spyking_circus2.py | 2 +- .../sortingcomponents/benchmark/benchmark_matching.py | 8 ++++---- .../sortingcomponents/clustering/random_projections.py | 2 +- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/src/spikeinterface/sorters/internal/spyking_circus2.py b/src/spikeinterface/sorters/internal/spyking_circus2.py index 571096caf9..db3d88f116 100644 --- a/src/spikeinterface/sorters/internal/spyking_circus2.py +++ b/src/spikeinterface/sorters/internal/spyking_circus2.py @@ -145,7 +145,7 @@ def _run_from_folder(cls, sorter_output_folder, params, verbose): matching_params.update({"noise_levels": noise_levels}) matching_job_params = job_kwargs.copy() - for value in ['chunk_size', 'chunk_memory', 'total_memory', 'chunk_duration']: + for value in ["chunk_size", "chunk_memory", "total_memory", "chunk_duration"]: if value in matching_job_params: matching_job_params.pop(value) diff --git a/src/spikeinterface/sortingcomponents/benchmark/benchmark_matching.py b/src/spikeinterface/sortingcomponents/benchmark/benchmark_matching.py index 8ce8efe25f..50d64e1349 100644 --- a/src/spikeinterface/sortingcomponents/benchmark/benchmark_matching.py +++ b/src/spikeinterface/sortingcomponents/benchmark/benchmark_matching.py @@ -626,10 +626,10 @@ def plot_comparison_matching( patches.append(mpatches.Patch(color=color, label=name)) ax.legend(handles=patches, bbox_to_anchor=(1.05, 1), loc="upper left", borderaxespad=0.0) else: - ax.spines['bottom'].set_visible(False) - ax.spines['left'].set_visible(False) - ax.spines['top'].set_visible(False) - ax.spines['right'].set_visible(False) + ax.spines["bottom"].set_visible(False) + ax.spines["left"].set_visible(False) + ax.spines["top"].set_visible(False) + ax.spines["right"].set_visible(False) ax.set_xticks([]) ax.set_yticks([]) plt.tight_layout(h_pad=0, w_pad=0) diff --git a/src/spikeinterface/sortingcomponents/clustering/random_projections.py b/src/spikeinterface/sortingcomponents/clustering/random_projections.py index 025555440a..5592b23c8d 100644 --- a/src/spikeinterface/sortingcomponents/clustering/random_projections.py +++ b/src/spikeinterface/sortingcomponents/clustering/random_projections.py @@ -191,7 +191,7 @@ def main_function(cls, recording, peaks, params): ) cleaning_matching_params = params["job_kwargs"].copy() - for value in ['chunk_size', 'chunk_memory', 'total_memory', 'chunk_duration']: + for value in ["chunk_size", "chunk_memory", "total_memory", "chunk_duration"]: if value in cleaning_matching_params: cleaning_matching_params.pop(value) cleaning_matching_params["chunk_duration"] = "100ms" From dda78037d9570a529392af35055d343fc6c56022 Mon Sep 17 00:00:00 2001 From: Pierre Yger Date: Wed, 13 Sep 2023 13:26:01 +0200 Subject: [PATCH 35/90] Adding unit_ids --- .../sortingcomponents/clustering/random_projections.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/spikeinterface/sortingcomponents/clustering/random_projections.py b/src/spikeinterface/sortingcomponents/clustering/random_projections.py index 5592b23c8d..be8ecd6702 100644 --- a/src/spikeinterface/sortingcomponents/clustering/random_projections.py +++ b/src/spikeinterface/sortingcomponents/clustering/random_projections.py @@ -177,7 +177,8 @@ def main_function(cls, recording, peaks, params): mode = "folder" sorting_folder = tmp_folder / "sorting" - sorting = NumpySorting.from_times_labels(spikes["sample_index"], spikes["unit_index"], fs) + unit_ids = np.arange(len(np.unique(spikes["unit_index"]))) + sorting = NumpySorting(spikes, fs, unit_ids=unit_ids) sorting = sorting.save(folder=sorting_folder) we = extract_waveforms( recording, From 1b28837a452da62e6890019bcb311cb5ced4009e Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Wed, 13 Sep 2023 13:45:38 +0200 Subject: [PATCH 36/90] skip slurm tests --- src/spikeinterface/sorters/tests/test_launcher.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/spikeinterface/sorters/tests/test_launcher.py b/src/spikeinterface/sorters/tests/test_launcher.py index 2d8e6f3d3c..ecab64ede6 100644 --- a/src/spikeinterface/sorters/tests/test_launcher.py +++ b/src/spikeinterface/sorters/tests/test_launcher.py @@ -115,6 +115,7 @@ def test_run_sorter_jobs_dask(job_list): print(t1 - t0) +@pytest.mark.skip("Slurm launcher need a machine with slurm") def test_run_sorter_jobs_slurm(job_list): if base_output.is_dir(): shutil.rmtree(base_output) From f013828bf4cc1363518fdc0e7940cfac07555149 Mon Sep 17 00:00:00 2001 From: Alessio Buccino Date: Thu, 14 Sep 2023 17:20:49 +0200 Subject: [PATCH 37/90] Allow MergeUnitsSorting to handle tuples --- src/spikeinterface/curation/mergeunitssorting.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/spikeinterface/curation/mergeunitssorting.py b/src/spikeinterface/curation/mergeunitssorting.py index 264ac3a56d..6baa68b0da 100644 --- a/src/spikeinterface/curation/mergeunitssorting.py +++ b/src/spikeinterface/curation/mergeunitssorting.py @@ -12,7 +12,7 @@ class MergeUnitsSorting(BaseSorting): ---------- parent_sorting: Recording The sorting object - units_to_merge: list of lists + units_to_merge: list/tuple of lists/tuples A list of lists for every merge group. Each element needs to have at least two elements (two units to merge), but it can also have more (merge multiple units at once). new_unit_ids: None or list @@ -24,6 +24,7 @@ class MergeUnitsSorting(BaseSorting): Default: 'keep' delta_time_ms: float or None Number of ms to consider for duplicated spikes. None won't check for duplications + Returns ------- sorting: Sorting @@ -33,7 +34,7 @@ class MergeUnitsSorting(BaseSorting): def __init__(self, parent_sorting, units_to_merge, new_unit_ids=None, properties_policy="keep", delta_time_ms=0.4): self._parent_sorting = parent_sorting - if not isinstance(units_to_merge[0], list): + if not isinstance(units_to_merge[0], (list, tuple)): # keep backward compatibility : the previous behavior was only one merge units_to_merge = [units_to_merge] From b78257cf7217764de00be0eac72b56deb499e1bd Mon Sep 17 00:00:00 2001 From: Sebastien Date: Fri, 15 Sep 2023 11:51:55 +0200 Subject: [PATCH 38/90] Speed up searchsorted calls --- src/spikeinterface/core/basesorting.py | 3 +-- src/spikeinterface/core/generate.py | 7 +++---- src/spikeinterface/core/node_pipeline.py | 12 ++++-------- src/spikeinterface/core/numpyextractors.py | 3 +-- src/spikeinterface/core/segmentutils.py | 6 ++---- src/spikeinterface/core/waveform_tools.py | 15 +++++---------- .../curation/remove_duplicated_spikes.py | 3 +-- .../postprocessing/amplitude_scalings.py | 3 +-- .../postprocessing/principal_component.py | 3 +-- .../postprocessing/spike_amplitudes.py | 4 +--- .../postprocessing/spike_locations.py | 3 +-- src/spikeinterface/qualitymetrics/misc_metrics.py | 6 ++---- .../sortingcomponents/motion_interpolation.py | 3 +-- 13 files changed, 24 insertions(+), 47 deletions(-) diff --git a/src/spikeinterface/core/basesorting.py b/src/spikeinterface/core/basesorting.py index 52f71c2399..eb141abde4 100644 --- a/src/spikeinterface/core/basesorting.py +++ b/src/spikeinterface/core/basesorting.py @@ -473,8 +473,7 @@ def to_spike_vector(self, concatenated=True, extremum_channel_inds=None, use_cac if not concatenated: spikes_ = [] for segment_index in range(self.get_num_segments()): - s0 = np.searchsorted(spikes["segment_index"], segment_index, side="left") - s1 = np.searchsorted(spikes["segment_index"], segment_index + 1, side="left") + s0, s1 = np.searchsorted(spikes["segment_index"], [segment_index, segment_index + 1], side="left") spikes_.append(spikes[s0:s1]) spikes = spikes_ diff --git a/src/spikeinterface/core/generate.py b/src/spikeinterface/core/generate.py index 401c498f03..56a2bb4f48 100644 --- a/src/spikeinterface/core/generate.py +++ b/src/spikeinterface/core/generate.py @@ -1109,8 +1109,7 @@ def __init__( num_samples = [num_samples] for segment_index in range(sorting.get_num_segments()): - start = np.searchsorted(self.spike_vector["segment_index"], segment_index, side="left") - end = np.searchsorted(self.spike_vector["segment_index"], segment_index, side="right") + start, end = np.searchsorted(self.spike_vector["segment_index"], [segment_index, segment_index+1], side="left") spikes = self.spike_vector[start:end] amplitude_vec = amplitude_vector[start:end] if amplitude_vector is not None else None upsample_vec = upsample_vector[start:end] if upsample_vector is not None else None @@ -1208,8 +1207,8 @@ def get_traces( else: traces = np.zeros([end_frame - start_frame, n_channels], dtype=self.dtype) - start = np.searchsorted(self.spike_vector["sample_index"], start_frame - self.templates.shape[1], side="left") - end = np.searchsorted(self.spike_vector["sample_index"], end_frame + self.templates.shape[1], side="right") + start, end = np.searchsorted(self.spike_vector["sample_index"], [start_frame - self.templates.shape[1], + end_frame + self.templates.shape[1] + 1], side="left") for i in range(start, end): spike = self.spike_vector[i] diff --git a/src/spikeinterface/core/node_pipeline.py b/src/spikeinterface/core/node_pipeline.py index b11f40a441..5627eba518 100644 --- a/src/spikeinterface/core/node_pipeline.py +++ b/src/spikeinterface/core/node_pipeline.py @@ -111,8 +111,7 @@ def __init__(self, recording, peaks): # precompute segment slice self.segment_slices = [] for segment_index in range(recording.get_num_segments()): - i0 = np.searchsorted(peaks["segment_index"], segment_index) - i1 = np.searchsorted(peaks["segment_index"], segment_index + 1) + i0, i1 = np.searchsorted(peaks["segment_index"], [segment_index, segment_index + 1]) self.segment_slices.append(slice(i0, i1)) def get_trace_margin(self): @@ -125,8 +124,7 @@ def compute(self, traces, start_frame, end_frame, segment_index, max_margin): # get local peaks sl = self.segment_slices[segment_index] peaks_in_segment = self.peaks[sl] - i0 = np.searchsorted(peaks_in_segment["sample_index"], start_frame) - i1 = np.searchsorted(peaks_in_segment["sample_index"], end_frame) + i0, i1 = np.searchsorted(peaks_in_segment["segment_index"], [start_frame, end_frame]) local_peaks = peaks_in_segment[i0:i1] # make sample index local to traces @@ -183,8 +181,7 @@ def __init__( # precompute segment slice self.segment_slices = [] for segment_index in range(recording.get_num_segments()): - i0 = np.searchsorted(self.peaks["segment_index"], segment_index) - i1 = np.searchsorted(self.peaks["segment_index"], segment_index + 1) + i0, i1 = np.searchsorted(self.peaks["segment_index"], [segment_index, segment_index + 1]) self.segment_slices.append(slice(i0, i1)) def get_trace_margin(self): @@ -197,8 +194,7 @@ def compute(self, traces, start_frame, end_frame, segment_index, max_margin): # get local peaks sl = self.segment_slices[segment_index] peaks_in_segment = self.peaks[sl] - i0 = np.searchsorted(peaks_in_segment["sample_index"], start_frame) - i1 = np.searchsorted(peaks_in_segment["sample_index"], end_frame) + i0, i1 = np.searchsorted(peaks_in_segment["segment_index"], [start_frame, end_frame]) local_peaks = peaks_in_segment[i0:i1] # make sample index local to traces diff --git a/src/spikeinterface/core/numpyextractors.py b/src/spikeinterface/core/numpyextractors.py index 97f22615df..d5663156c7 100644 --- a/src/spikeinterface/core/numpyextractors.py +++ b/src/spikeinterface/core/numpyextractors.py @@ -338,8 +338,7 @@ def get_unit_spike_train(self, unit_id, start_frame, end_frame): if self.spikes_in_seg is None: # the slicing of segment is done only once the first time # this fasten the constructor a lot - s0 = np.searchsorted(self.spikes["segment_index"], self.segment_index, side="left") - s1 = np.searchsorted(self.spikes["segment_index"], self.segment_index + 1, side="left") + s0, s1 = np.searchsorted(self.spikes["segment_index"], [self.segment_index, self.segment_index + 1]) self.spikes_in_seg = self.spikes[s0:s1] unit_index = self.unit_ids.index(unit_id) diff --git a/src/spikeinterface/core/segmentutils.py b/src/spikeinterface/core/segmentutils.py index f70c45bfe5..85e36cf7a5 100644 --- a/src/spikeinterface/core/segmentutils.py +++ b/src/spikeinterface/core/segmentutils.py @@ -174,8 +174,7 @@ def get_traces(self, start_frame, end_frame, channel_indices): # Return (0 * num_channels) array of correct dtype return self.parent_segments[0].get_traces(0, 0, channel_indices) - i0 = np.searchsorted(self.cumsum_length, start_frame, side="right") - 1 - i1 = np.searchsorted(self.cumsum_length, end_frame, side="right") - 1 + i0, i1 = np.searchsorted(self.cumsum_length, [start_frame, end_frame], side="right") - 1 # several case: # * come from one segment (i0 == i1) @@ -469,8 +468,7 @@ def get_unit_spike_train( if end_frame is None: end_frame = self.get_num_samples() - i0 = np.searchsorted(self.cumsum_length, start_frame, side="right") - 1 - i1 = np.searchsorted(self.cumsum_length, end_frame, side="right") - 1 + i0, i1 = np.searchsorted(self.cumsum_length, [start_frame, end_frame], side="right") - 1 # several case: # * come from one segment (i0 == i1) diff --git a/src/spikeinterface/core/waveform_tools.py b/src/spikeinterface/core/waveform_tools.py index da8e3d64b6..0ac20b9fec 100644 --- a/src/spikeinterface/core/waveform_tools.py +++ b/src/spikeinterface/core/waveform_tools.py @@ -344,15 +344,13 @@ def _worker_distribute_buffers(segment_index, start_frame, end_frame, worker_ctx # take only spikes with the correct segment_index # this is a slice so no copy!! - s0 = np.searchsorted(spikes["segment_index"], segment_index) - s1 = np.searchsorted(spikes["segment_index"], segment_index + 1) + s0, s1 = np.searchsorted(spikes["segment_index"], [segment_index, segment_index + 1]) in_seg_spikes = spikes[s0:s1] # take only spikes in range [start_frame, end_frame] # this is a slice so no copy!! # the border of segment are protected by nbefore on left an nafter on the right - i0 = np.searchsorted(in_seg_spikes["sample_index"], max(start_frame, nbefore)) - i1 = np.searchsorted(in_seg_spikes["sample_index"], min(end_frame, seg_size - nafter)) + i0, i1 = np.searchsorted(in_seg_spikes["sample_index"], [max(start_frame, nbefore), min(end_frame, seg_size - nafter)]) # slice in absolut in spikes vector l0 = i0 + s0 @@ -562,8 +560,7 @@ def _init_worker_distribute_single_buffer( # prepare segment slices segment_slices = [] for segment_index in range(recording.get_num_segments()): - s0 = np.searchsorted(spikes["segment_index"], segment_index) - s1 = np.searchsorted(spikes["segment_index"], segment_index + 1) + s0, s1 = np.searchsorted(spikes["segment_index"], [segment_index, segment_index + 1]) segment_slices.append((s0, s1)) worker_ctx["segment_slices"] = segment_slices @@ -590,8 +587,7 @@ def _worker_distribute_single_buffer(segment_index, start_frame, end_frame, work # take only spikes in range [start_frame, end_frame] # this is a slice so no copy!! # the border of segment are protected by nbefore on left an nafter on the right - i0 = np.searchsorted(in_seg_spikes["sample_index"], max(start_frame, nbefore)) - i1 = np.searchsorted(in_seg_spikes["sample_index"], min(end_frame, seg_size - nafter)) + i0, i1 = np.searchsorted(in_seg_spikes["sample_index"], [max(start_frame, nbefore), min(end_frame, seg_size - nafter)]) # slice in absolut in spikes vector l0 = i0 + s0 @@ -685,8 +681,7 @@ def has_exceeding_spikes(recording, sorting): """ spike_vector = sorting.to_spike_vector() for segment_index in range(recording.get_num_segments()): - start_seg_ind = np.searchsorted(spike_vector["segment_index"], segment_index) - end_seg_ind = np.searchsorted(spike_vector["segment_index"], segment_index + 1) + start_seg_ind, end_seg_ind = np.searchsorted(spike_vector["segment_index"], [segment_index, segment_index + 1]) spike_vector_seg = spike_vector[start_seg_ind:end_seg_ind] if len(spike_vector_seg) > 0: if spike_vector_seg["sample_index"][-1] > recording.get_num_samples(segment_index=segment_index) - 1: diff --git a/src/spikeinterface/curation/remove_duplicated_spikes.py b/src/spikeinterface/curation/remove_duplicated_spikes.py index 04af69b37a..3badaa9402 100644 --- a/src/spikeinterface/curation/remove_duplicated_spikes.py +++ b/src/spikeinterface/curation/remove_duplicated_spikes.py @@ -82,8 +82,7 @@ def get_unit_spike_train( if end_frame == None: end_frame = spike_train[-1] if len(spike_train) > 0 else 0 - start = np.searchsorted(spike_train, start_frame, side="left") - end = np.searchsorted(spike_train, end_frame, side="right") + start, end = np.searchsorted(spike_train, [start_frame, end + 1], side="left") return spike_train[start:end] diff --git a/src/spikeinterface/postprocessing/amplitude_scalings.py b/src/spikeinterface/postprocessing/amplitude_scalings.py index 5a0148c5c4..bb97f246d9 100644 --- a/src/spikeinterface/postprocessing/amplitude_scalings.py +++ b/src/spikeinterface/postprocessing/amplitude_scalings.py @@ -99,8 +99,7 @@ def _run(self, **job_kwargs): # precompute segment slice segment_slices = [] for segment_index in range(we.get_num_segments()): - i0 = np.searchsorted(self.spikes["segment_index"], segment_index) - i1 = np.searchsorted(self.spikes["segment_index"], segment_index + 1) + i0, i1 = np.searchsorted(self.spikes["segment_index"], [segment_index, segment_index + 1]) segment_slices.append(slice(i0, i1)) # and run diff --git a/src/spikeinterface/postprocessing/principal_component.py b/src/spikeinterface/postprocessing/principal_component.py index 233625e09e..ce1c3bd5a0 100644 --- a/src/spikeinterface/postprocessing/principal_component.py +++ b/src/spikeinterface/postprocessing/principal_component.py @@ -600,8 +600,7 @@ def _all_pc_extractor_chunk(segment_index, start_frame, end_frame, worker_ctx): seg_size = recording.get_num_samples(segment_index=segment_index) - i0 = np.searchsorted(spike_times, start_frame) - i1 = np.searchsorted(spike_times, end_frame) + i0, i1 = np.searchsorted(spike_times, [start_frame, end_frame]) if i0 != i1: # protect from spikes on border : spike_time<0 or spike_time>seg_size diff --git a/src/spikeinterface/postprocessing/spike_amplitudes.py b/src/spikeinterface/postprocessing/spike_amplitudes.py index 62a4e2c320..fd6078b9b0 100644 --- a/src/spikeinterface/postprocessing/spike_amplitudes.py +++ b/src/spikeinterface/postprocessing/spike_amplitudes.py @@ -218,9 +218,7 @@ def _spike_amplitudes_chunk(segment_index, start_frame, end_frame, worker_ctx): d = np.diff(spike_times) assert np.all(d >= 0) - i0 = np.searchsorted(spike_times, start_frame) - i1 = np.searchsorted(spike_times, end_frame) - + i0, i1 = np.searchsorted(spike_times, [start_frame, end_frame]) n_spikes = i1 - i0 amplitudes = np.zeros(n_spikes, dtype=recording.get_dtype()) diff --git a/src/spikeinterface/postprocessing/spike_locations.py b/src/spikeinterface/postprocessing/spike_locations.py index c6f498f7e8..5f23e25b32 100644 --- a/src/spikeinterface/postprocessing/spike_locations.py +++ b/src/spikeinterface/postprocessing/spike_locations.py @@ -77,8 +77,7 @@ def get_data(self, outputs="concatenated"): elif outputs == "by_unit": locations_by_unit = [] for segment_index in range(self.waveform_extractor.get_num_segments()): - i0 = np.searchsorted(self.spikes["segment_index"], segment_index, side="left") - i1 = np.searchsorted(self.spikes["segment_index"], segment_index, side="right") + i0, i1 = np.searchsorted(self.spikes["segment_index"], [segment_index, segment_index + 1], side="left") spikes = self.spikes[i0:i1] locations = self._extension_data["spike_locations"][i0:i1] diff --git a/src/spikeinterface/qualitymetrics/misc_metrics.py b/src/spikeinterface/qualitymetrics/misc_metrics.py index ee28485983..01701e4f65 100644 --- a/src/spikeinterface/qualitymetrics/misc_metrics.py +++ b/src/spikeinterface/qualitymetrics/misc_metrics.py @@ -848,16 +848,14 @@ def compute_drift_metrics( spike_vector = sorting.to_spike_vector() # retrieve spikes in segment - i0 = np.searchsorted(spike_vector["segment_index"], segment_index) - i1 = np.searchsorted(spike_vector["segment_index"], segment_index + 1) + i0, i1 = np.searchsorted(spike_vector["segment_index"], [segment_index, segment_index + 1]) spikes_in_segment = spike_vector[i0:i1] spike_locations_in_segment = spike_locations[i0:i1] # compute median positions (if less than min_spikes_per_interval, median position is 0) median_positions = np.nan * np.zeros((len(unit_ids), num_bin_edges - 1)) for bin_index, (start_frame, end_frame) in enumerate(zip(bins[:-1], bins[1:])): - i0 = np.searchsorted(spikes_in_segment["sample_index"], start_frame) - i1 = np.searchsorted(spikes_in_segment["sample_index"], end_frame) + i0, i1 = np.searchsorted(spikes_in_segment["sample_index"], [start_frame, end_frame]) spikes_in_bin = spikes_in_segment[i0:i1] spike_locations_in_bin = spike_locations_in_segment[i0:i1][direction] diff --git a/src/spikeinterface/sortingcomponents/motion_interpolation.py b/src/spikeinterface/sortingcomponents/motion_interpolation.py index b4a44105e4..1f6c348574 100644 --- a/src/spikeinterface/sortingcomponents/motion_interpolation.py +++ b/src/spikeinterface/sortingcomponents/motion_interpolation.py @@ -155,8 +155,7 @@ def interpolate_motion_on_traces( **spatial_interpolation_kwargs, ) - i0 = np.searchsorted(bin_inds, bin_ind, side="left") - i1 = np.searchsorted(bin_inds, bin_ind, side="right") + i0, i1 = np.searchsorted(bin_inds, [bin_ind, bin_ind + 1] side="left") # here we use a simple np.matmul even if dirft_kernel can be super sparse. # because the speed for a sparse matmul is not so good when we disable multi threaad (due multi processing From 164430c83cf66221bed677198fa8d468a8781c1d Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Fri, 15 Sep 2023 09:54:35 +0000 Subject: [PATCH 39/90] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- src/spikeinterface/core/generate.py | 11 ++++++++--- src/spikeinterface/core/waveform_tools.py | 8 ++++++-- 2 files changed, 14 insertions(+), 5 deletions(-) diff --git a/src/spikeinterface/core/generate.py b/src/spikeinterface/core/generate.py index 56a2bb4f48..6f85e76f1f 100644 --- a/src/spikeinterface/core/generate.py +++ b/src/spikeinterface/core/generate.py @@ -1109,7 +1109,9 @@ def __init__( num_samples = [num_samples] for segment_index in range(sorting.get_num_segments()): - start, end = np.searchsorted(self.spike_vector["segment_index"], [segment_index, segment_index+1], side="left") + start, end = np.searchsorted( + self.spike_vector["segment_index"], [segment_index, segment_index + 1], side="left" + ) spikes = self.spike_vector[start:end] amplitude_vec = amplitude_vector[start:end] if amplitude_vector is not None else None upsample_vec = upsample_vector[start:end] if upsample_vector is not None else None @@ -1207,8 +1209,11 @@ def get_traces( else: traces = np.zeros([end_frame - start_frame, n_channels], dtype=self.dtype) - start, end = np.searchsorted(self.spike_vector["sample_index"], [start_frame - self.templates.shape[1], - end_frame + self.templates.shape[1] + 1], side="left") + start, end = np.searchsorted( + self.spike_vector["sample_index"], + [start_frame - self.templates.shape[1], end_frame + self.templates.shape[1] + 1], + side="left", + ) for i in range(start, end): spike = self.spike_vector[i] diff --git a/src/spikeinterface/core/waveform_tools.py b/src/spikeinterface/core/waveform_tools.py index 0ac20b9fec..a2f1296e31 100644 --- a/src/spikeinterface/core/waveform_tools.py +++ b/src/spikeinterface/core/waveform_tools.py @@ -350,7 +350,9 @@ def _worker_distribute_buffers(segment_index, start_frame, end_frame, worker_ctx # take only spikes in range [start_frame, end_frame] # this is a slice so no copy!! # the border of segment are protected by nbefore on left an nafter on the right - i0, i1 = np.searchsorted(in_seg_spikes["sample_index"], [max(start_frame, nbefore), min(end_frame, seg_size - nafter)]) + i0, i1 = np.searchsorted( + in_seg_spikes["sample_index"], [max(start_frame, nbefore), min(end_frame, seg_size - nafter)] + ) # slice in absolut in spikes vector l0 = i0 + s0 @@ -587,7 +589,9 @@ def _worker_distribute_single_buffer(segment_index, start_frame, end_frame, work # take only spikes in range [start_frame, end_frame] # this is a slice so no copy!! # the border of segment are protected by nbefore on left an nafter on the right - i0, i1 = np.searchsorted(in_seg_spikes["sample_index"], [max(start_frame, nbefore), min(end_frame, seg_size - nafter)]) + i0, i1 = np.searchsorted( + in_seg_spikes["sample_index"], [max(start_frame, nbefore), min(end_frame, seg_size - nafter)] + ) # slice in absolut in spikes vector l0 = i0 + s0 From 426f395c6cb210b016b119225af540fd968fb30f Mon Sep 17 00:00:00 2001 From: Alessio Buccino Date: Fri, 15 Sep 2023 12:38:50 +0200 Subject: [PATCH 40/90] Removed unnecessary else --- src/spikeinterface/core/waveform_extractor.py | 26 +++++++++---------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/src/spikeinterface/core/waveform_extractor.py b/src/spikeinterface/core/waveform_extractor.py index 3647e915bf..6881ab3ec5 100644 --- a/src/spikeinterface/core/waveform_extractor.py +++ b/src/spikeinterface/core/waveform_extractor.py @@ -523,20 +523,20 @@ def is_extension(self, extension_name) -> bool: """ if self.folder is None: return extension_name in self._loaded_extensions + + if extension_name in self._loaded_extensions: + # extension already loaded in memory + return True else: - # Extensions already loaded in memory - if extension_name in self._loaded_extensions: - return True - else: - if self.format == "binary": - return (self.folder / extension_name).is_dir() and ( - self.folder / extension_name / "params.json" - ).is_file() - elif self.format == "zarr": - return ( - extension_name in self._waveforms_root.keys() - and "params" in self._waveforms_root[extension_name].attrs.keys() - ) + if self.format == "binary": + return (self.folder / extension_name).is_dir() and ( + self.folder / extension_name / "params.json" + ).is_file() + elif self.format == "zarr": + return ( + extension_name in self._waveforms_root.keys() + and "params" in self._waveforms_root[extension_name].attrs.keys() + ) def load_extension(self, extension_name): """ From 9ad5f56907a848b757977e8dc2316445f867e269 Mon Sep 17 00:00:00 2001 From: Alessio Buccino Date: Fri, 15 Sep 2023 13:01:43 +0200 Subject: [PATCH 41/90] Update src/spikeinterface/sortingcomponents/motion_interpolation.py Co-authored-by: Zach McKenzie <92116279+zm711@users.noreply.github.com> --- src/spikeinterface/sortingcomponents/motion_interpolation.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/spikeinterface/sortingcomponents/motion_interpolation.py b/src/spikeinterface/sortingcomponents/motion_interpolation.py index 1f6c348574..18bb4f5a99 100644 --- a/src/spikeinterface/sortingcomponents/motion_interpolation.py +++ b/src/spikeinterface/sortingcomponents/motion_interpolation.py @@ -155,7 +155,7 @@ def interpolate_motion_on_traces( **spatial_interpolation_kwargs, ) - i0, i1 = np.searchsorted(bin_inds, [bin_ind, bin_ind + 1] side="left") + i0, i1 = np.searchsorted(bin_inds, [bin_ind, bin_ind + 1], side="left") # here we use a simple np.matmul even if dirft_kernel can be super sparse. # because the speed for a sparse matmul is not so good when we disable multi threaad (due multi processing From 9c6e6c1cef249d0382c6c441cdd7d2a7b0194cb1 Mon Sep 17 00:00:00 2001 From: Sebastien Date: Fri, 15 Sep 2023 13:30:36 +0200 Subject: [PATCH 42/90] Typos while copy/paste --- src/spikeinterface/core/node_pipeline.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/spikeinterface/core/node_pipeline.py b/src/spikeinterface/core/node_pipeline.py index 5627eba518..651804c995 100644 --- a/src/spikeinterface/core/node_pipeline.py +++ b/src/spikeinterface/core/node_pipeline.py @@ -124,7 +124,7 @@ def compute(self, traces, start_frame, end_frame, segment_index, max_margin): # get local peaks sl = self.segment_slices[segment_index] peaks_in_segment = self.peaks[sl] - i0, i1 = np.searchsorted(peaks_in_segment["segment_index"], [start_frame, end_frame]) + i0, i1 = np.searchsorted(peaks_in_segment["sample_index"], [start_frame, end_frame]) local_peaks = peaks_in_segment[i0:i1] # make sample index local to traces @@ -194,7 +194,7 @@ def compute(self, traces, start_frame, end_frame, segment_index, max_margin): # get local peaks sl = self.segment_slices[segment_index] peaks_in_segment = self.peaks[sl] - i0, i1 = np.searchsorted(peaks_in_segment["segment_index"], [start_frame, end_frame]) + i0, i1 = np.searchsorted(peaks_in_segment["sample_index"], [start_frame, end_frame]) local_peaks = peaks_in_segment[i0:i1] # make sample index local to traces From 646455a1054bf4cebed133c3197e8598ef75e59f Mon Sep 17 00:00:00 2001 From: Sebastien Date: Fri, 15 Sep 2023 13:38:26 +0200 Subject: [PATCH 43/90] Some more searchsorted --- .../postprocessing/amplitude_scalings.py | 15 +++++---------- .../widgets/_legacy_mpl_widgets/activity.py | 3 +-- 2 files changed, 6 insertions(+), 12 deletions(-) diff --git a/src/spikeinterface/postprocessing/amplitude_scalings.py b/src/spikeinterface/postprocessing/amplitude_scalings.py index bb97f246d9..73e75870f9 100644 --- a/src/spikeinterface/postprocessing/amplitude_scalings.py +++ b/src/spikeinterface/postprocessing/amplitude_scalings.py @@ -316,8 +316,7 @@ def _amplitude_scalings_chunk(segment_index, start_frame, end_frame, worker_ctx) spikes_in_segment = spikes[segment_slices[segment_index]] - i0 = np.searchsorted(spikes_in_segment["sample_index"], start_frame) - i1 = np.searchsorted(spikes_in_segment["sample_index"], end_frame) + i0, i1 = np.searchsorted(spikes_in_segment["sample_index"], [start_frame, end_frame]) if i0 != i1: local_spikes = spikes_in_segment[i0:i1] @@ -334,8 +333,7 @@ def _amplitude_scalings_chunk(segment_index, start_frame, end_frame, worker_ctx) # set colliding spikes apart (if needed) if handle_collisions: # local spikes with margin! - i0_margin = np.searchsorted(spikes_in_segment["sample_index"], start_frame - left) - i1_margin = np.searchsorted(spikes_in_segment["sample_index"], end_frame + right) + i0_margin, i1_margin = np.searchsorted(spikes_in_segment["sample_index"], [start_frame - left, end_frame + right]) local_spikes_w_margin = spikes_in_segment[i0_margin:i1_margin] collisions_local = find_collisions( local_spikes, local_spikes_w_margin, delta_collision_samples, unit_inds_to_channel_indices @@ -461,14 +459,11 @@ def find_collisions(spikes, spikes_w_margin, delta_collision_samples, unit_inds_ spike_index_w_margin = np.where(spikes_w_margin == spike)[0][0] # find the possible spikes per and post within delta_collision_samples - consecutive_window_pre = np.searchsorted( + consecutive_window_pre, consecutive_window_post = np.searchsorted( spikes_w_margin["sample_index"], - spike["sample_index"] - delta_collision_samples, - ) - consecutive_window_post = np.searchsorted( - spikes_w_margin["sample_index"], - spike["sample_index"] + delta_collision_samples, + [spike["sample_index"] - delta_collision_samples, spike["sample_index"] + delta_collision_samples] ) + # exclude the spike itself (it is included in the collision_spikes by construction) pre_possible_consecutive_spike_indices = np.arange(consecutive_window_pre, spike_index_w_margin) post_possible_consecutive_spike_indices = np.arange(spike_index_w_margin + 1, consecutive_window_post) diff --git a/src/spikeinterface/widgets/_legacy_mpl_widgets/activity.py b/src/spikeinterface/widgets/_legacy_mpl_widgets/activity.py index 939475c17d..9715b7ea87 100644 --- a/src/spikeinterface/widgets/_legacy_mpl_widgets/activity.py +++ b/src/spikeinterface/widgets/_legacy_mpl_widgets/activity.py @@ -95,8 +95,7 @@ def plot(self): num_frames = int(duration / self.bin_duration_s) def animate_func(i): - i0 = np.searchsorted(peaks["sample_index"], bin_size * i) - i1 = np.searchsorted(peaks["sample_index"], bin_size * (i + 1)) + i0, i1 = np.searchsorted(peaks["sample_index"], [bin_size * i, bin_size * (i + 1)]) local_peaks = peaks[i0:i1] artists = self._plot_one_bin(rec, probe, local_peaks, self.bin_duration_s) return artists From 4410d6e8d06a8f3db8004846152be90bf04b8615 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Fri, 15 Sep 2023 11:40:20 +0000 Subject: [PATCH 44/90] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- src/spikeinterface/postprocessing/amplitude_scalings.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/spikeinterface/postprocessing/amplitude_scalings.py b/src/spikeinterface/postprocessing/amplitude_scalings.py index 73e75870f9..d4446e2289 100644 --- a/src/spikeinterface/postprocessing/amplitude_scalings.py +++ b/src/spikeinterface/postprocessing/amplitude_scalings.py @@ -333,7 +333,9 @@ def _amplitude_scalings_chunk(segment_index, start_frame, end_frame, worker_ctx) # set colliding spikes apart (if needed) if handle_collisions: # local spikes with margin! - i0_margin, i1_margin = np.searchsorted(spikes_in_segment["sample_index"], [start_frame - left, end_frame + right]) + i0_margin, i1_margin = np.searchsorted( + spikes_in_segment["sample_index"], [start_frame - left, end_frame + right] + ) local_spikes_w_margin = spikes_in_segment[i0_margin:i1_margin] collisions_local = find_collisions( local_spikes, local_spikes_w_margin, delta_collision_samples, unit_inds_to_channel_indices @@ -461,7 +463,7 @@ def find_collisions(spikes, spikes_w_margin, delta_collision_samples, unit_inds_ # find the possible spikes per and post within delta_collision_samples consecutive_window_pre, consecutive_window_post = np.searchsorted( spikes_w_margin["sample_index"], - [spike["sample_index"] - delta_collision_samples, spike["sample_index"] + delta_collision_samples] + [spike["sample_index"] - delta_collision_samples, spike["sample_index"] + delta_collision_samples], ) # exclude the spike itself (it is included in the collision_spikes by construction) From 334f178aaafc0cccbc81db9821749691b7d67da6 Mon Sep 17 00:00:00 2001 From: Sebastien Date: Fri, 15 Sep 2023 13:52:20 +0200 Subject: [PATCH 45/90] Fix --- src/spikeinterface/curation/remove_duplicated_spikes.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/spikeinterface/curation/remove_duplicated_spikes.py b/src/spikeinterface/curation/remove_duplicated_spikes.py index 3badaa9402..d01ca1f6a1 100644 --- a/src/spikeinterface/curation/remove_duplicated_spikes.py +++ b/src/spikeinterface/curation/remove_duplicated_spikes.py @@ -82,7 +82,7 @@ def get_unit_spike_train( if end_frame == None: end_frame = spike_train[-1] if len(spike_train) > 0 else 0 - start, end = np.searchsorted(spike_train, [start_frame, end + 1], side="left") + start, end = np.searchsorted(spike_train, [start_frame, end_frame + 1], side="left") return spike_train[start:end] From 1ac47ffd3c2525b4fa406937b7d2391ee759e4ea Mon Sep 17 00:00:00 2001 From: Heberto Mayorquin Date: Fri, 15 Sep 2023 14:12:04 +0200 Subject: [PATCH 46/90] in1d to isin --- src/spikeinterface/comparison/basecomparison.py | 4 ++-- src/spikeinterface/comparison/comparisontools.py | 2 +- src/spikeinterface/core/baserecording.py | 2 +- src/spikeinterface/core/basesnippets.py | 2 +- src/spikeinterface/core/basesorting.py | 2 +- src/spikeinterface/core/generate.py | 4 ++-- src/spikeinterface/core/tests/test_sparsity.py | 2 +- src/spikeinterface/curation/mergeunitssorting.py | 4 ++-- src/spikeinterface/extractors/bids.py | 2 +- .../postprocessing/amplitude_scalings.py | 4 ++-- src/spikeinterface/postprocessing/spike_amplitudes.py | 4 ++-- src/spikeinterface/postprocessing/spike_locations.py | 4 ++-- .../preprocessing/interpolate_bad_channels.py | 2 +- src/spikeinterface/qualitymetrics/misc_metrics.py | 2 +- src/spikeinterface/qualitymetrics/pca_metrics.py | 10 +++++----- .../sortingcomponents/benchmark/benchmark_matching.py | 4 ++-- .../benchmark/benchmark_peak_selection.py | 8 ++++---- .../sortingcomponents/clustering/clustering_tools.py | 10 +++++----- .../sortingcomponents/clustering/sliding_hdbscan.py | 10 +++++----- .../widgets/unit_waveforms_density_map.py | 2 +- 20 files changed, 42 insertions(+), 42 deletions(-) diff --git a/src/spikeinterface/comparison/basecomparison.py b/src/spikeinterface/comparison/basecomparison.py index 79c784491a..6f45f1497d 100644 --- a/src/spikeinterface/comparison/basecomparison.py +++ b/src/spikeinterface/comparison/basecomparison.py @@ -262,11 +262,11 @@ def get_ordered_agreement_scores(self): indexes = np.arange(scores.shape[1]) order1 = [] for r in range(scores.shape[0]): - possible = indexes[~np.in1d(indexes, order1)] + possible = indexes[~isin(indexes, order1)] if possible.size > 0: ind = np.argmax(scores.iloc[r, possible].values) order1.append(possible[ind]) - remain = indexes[~np.in1d(indexes, order1)] + remain = indexes[~isin(indexes, order1)] order1.extend(remain) scores = scores.iloc[:, order1] diff --git a/src/spikeinterface/comparison/comparisontools.py b/src/spikeinterface/comparison/comparisontools.py index db45e2b25b..eb7b5c703c 100644 --- a/src/spikeinterface/comparison/comparisontools.py +++ b/src/spikeinterface/comparison/comparisontools.py @@ -538,7 +538,7 @@ def do_confusion_matrix(event_counts1, event_counts2, match_12, match_event_coun matched_units2 = match_12[match_12 != -1].values unmatched_units1 = match_12[match_12 == -1].index - unmatched_units2 = unit2_ids[~np.in1d(unit2_ids, matched_units2)] + unmatched_units2 = unit2_ids[~isin(unit2_ids, matched_units2)] ordered_units1 = np.hstack([matched_units1, unmatched_units1]) ordered_units2 = np.hstack([matched_units2, unmatched_units2]) diff --git a/src/spikeinterface/core/baserecording.py b/src/spikeinterface/core/baserecording.py index af4970a4ad..8c4a2941a0 100644 --- a/src/spikeinterface/core/baserecording.py +++ b/src/spikeinterface/core/baserecording.py @@ -592,7 +592,7 @@ def _channel_slice(self, channel_ids, renamed_channel_ids=None): def _remove_channels(self, remove_channel_ids): from .channelslice import ChannelSliceRecording - new_channel_ids = self.channel_ids[~np.in1d(self.channel_ids, remove_channel_ids)] + new_channel_ids = self.channel_ids[~isin(self.channel_ids, remove_channel_ids)] sub_recording = ChannelSliceRecording(self, new_channel_ids) return sub_recording diff --git a/src/spikeinterface/core/basesnippets.py b/src/spikeinterface/core/basesnippets.py index 737087abc1..7fd0823fc0 100644 --- a/src/spikeinterface/core/basesnippets.py +++ b/src/spikeinterface/core/basesnippets.py @@ -139,7 +139,7 @@ def _channel_slice(self, channel_ids, renamed_channel_ids=None): def _remove_channels(self, remove_channel_ids): from .channelslice import ChannelSliceSnippets - new_channel_ids = self.channel_ids[~np.in1d(self.channel_ids, remove_channel_ids)] + new_channel_ids = self.channel_ids[~isin(self.channel_ids, remove_channel_ids)] sub_recording = ChannelSliceSnippets(self, new_channel_ids) return sub_recording diff --git a/src/spikeinterface/core/basesorting.py b/src/spikeinterface/core/basesorting.py index 52f71c2399..423f974220 100644 --- a/src/spikeinterface/core/basesorting.py +++ b/src/spikeinterface/core/basesorting.py @@ -346,7 +346,7 @@ def remove_units(self, remove_unit_ids): """ from spikeinterface import UnitsSelectionSorting - new_unit_ids = self.unit_ids[~np.in1d(self.unit_ids, remove_unit_ids)] + new_unit_ids = self.unit_ids[~isin(self.unit_ids, remove_unit_ids)] new_sorting = UnitsSelectionSorting(self, new_unit_ids) return new_sorting diff --git a/src/spikeinterface/core/generate.py b/src/spikeinterface/core/generate.py index 401c498f03..44d62818f9 100644 --- a/src/spikeinterface/core/generate.py +++ b/src/spikeinterface/core/generate.py @@ -166,7 +166,7 @@ def generate_sorting( ) if empty_units is not None: - keep = ~np.in1d(labels, empty_units) + keep = ~isin(labels, empty_units) times = times[keep] labels = labels[keep] @@ -219,7 +219,7 @@ def add_synchrony_to_sorting(sorting, sync_event_ratio=0.3, seed=None): sample_index = spike["sample_index"] if sample_index not in units_used_for_spike: units_used_for_spike[sample_index] = np.array([spike["unit_index"]]) - units_not_used = unit_ids[~np.in1d(unit_ids, units_used_for_spike[sample_index])] + units_not_used = unit_ids[~isin(unit_ids, units_used_for_spike[sample_index])] if len(units_not_used) == 0: continue diff --git a/src/spikeinterface/core/tests/test_sparsity.py b/src/spikeinterface/core/tests/test_sparsity.py index a6b94c9b84..61c4179652 100644 --- a/src/spikeinterface/core/tests/test_sparsity.py +++ b/src/spikeinterface/core/tests/test_sparsity.py @@ -34,7 +34,7 @@ def test_ChannelSparsity(): for key, v in sparsity.unit_id_to_channel_ids.items(): assert key in unit_ids - assert np.all(np.in1d(v, channel_ids)) + assert np.all(isin(v, channel_ids)) for key, v in sparsity.unit_id_to_channel_indices.items(): assert key in unit_ids diff --git a/src/spikeinterface/curation/mergeunitssorting.py b/src/spikeinterface/curation/mergeunitssorting.py index 264ac3a56d..ccbaa32e7b 100644 --- a/src/spikeinterface/curation/mergeunitssorting.py +++ b/src/spikeinterface/curation/mergeunitssorting.py @@ -59,7 +59,7 @@ def __init__(self, parent_sorting, units_to_merge, new_unit_ids=None, properties else: # we cannot automatically find new names new_unit_ids = [f"merge{i}" for i in range(num_merge)] - if np.any(np.in1d(new_unit_ids, keep_unit_ids)): + if np.any(isin(new_unit_ids, keep_unit_ids)): raise ValueError( "Unable to find 'new_unit_ids' because it is a string and parents " "already contain merges. Pass a list of 'new_unit_ids' as an argument." @@ -68,7 +68,7 @@ def __init__(self, parent_sorting, units_to_merge, new_unit_ids=None, properties # dtype int new_unit_ids = list(max(parents_unit_ids) + 1 + np.arange(num_merge, dtype=dtype)) else: - if np.any(np.in1d(new_unit_ids, keep_unit_ids)): + if np.any(isin(new_unit_ids, keep_unit_ids)): raise ValueError("'new_unit_ids' already exist in the sorting.unit_ids. Provide new ones") assert len(new_unit_ids) == num_merge, "new_unit_ids must have the same size as units_to_merge" diff --git a/src/spikeinterface/extractors/bids.py b/src/spikeinterface/extractors/bids.py index 02e7d5677d..9de272c56e 100644 --- a/src/spikeinterface/extractors/bids.py +++ b/src/spikeinterface/extractors/bids.py @@ -76,7 +76,7 @@ def _read_probe_group(folder, bids_name, recording_channel_ids): contact_ids = channels["contact_id"].values.astype("U") # extracting information of requested channels - keep = np.in1d(channel_ids, recording_channel_ids) + keep = isin(channel_ids, recording_channel_ids) channel_ids = channel_ids[keep] contact_ids = contact_ids[keep] diff --git a/src/spikeinterface/postprocessing/amplitude_scalings.py b/src/spikeinterface/postprocessing/amplitude_scalings.py index 5a0148c5c4..af618cf4db 100644 --- a/src/spikeinterface/postprocessing/amplitude_scalings.py +++ b/src/spikeinterface/postprocessing/amplitude_scalings.py @@ -47,9 +47,9 @@ def _set_params( def _select_extension_data(self, unit_ids): old_unit_ids = self.waveform_extractor.sorting.unit_ids - unit_inds = np.flatnonzero(np.in1d(old_unit_ids, unit_ids)) + unit_inds = np.flatnonzero(isin(old_unit_ids, unit_ids)) - spike_mask = np.in1d(self.spikes["unit_index"], unit_inds) + spike_mask = isin(self.spikes["unit_index"], unit_inds) new_amplitude_scalings = self._extension_data["amplitude_scalings"][spike_mask] return dict(amplitude_scalings=new_amplitude_scalings) diff --git a/src/spikeinterface/postprocessing/spike_amplitudes.py b/src/spikeinterface/postprocessing/spike_amplitudes.py index 62a4e2c320..729dbd12bb 100644 --- a/src/spikeinterface/postprocessing/spike_amplitudes.py +++ b/src/spikeinterface/postprocessing/spike_amplitudes.py @@ -28,13 +28,13 @@ def _select_extension_data(self, unit_ids): # load filter and save amplitude files sorting = self.waveform_extractor.sorting spikes = sorting.to_spike_vector(concatenated=False) - (keep_unit_indices,) = np.nonzero(np.in1d(sorting.unit_ids, unit_ids)) + (keep_unit_indices,) = np.nonzero(isin(sorting.unit_ids, unit_ids)) new_extension_data = dict() for seg_index in range(sorting.get_num_segments()): amp_data_name = f"amplitude_segment_{seg_index}" amps = self._extension_data[amp_data_name] - filtered_idxs = np.in1d(spikes[seg_index]["unit_index"], keep_unit_indices) + filtered_idxs = isin(spikes[seg_index]["unit_index"], keep_unit_indices) new_extension_data[amp_data_name] = amps[filtered_idxs] return new_extension_data diff --git a/src/spikeinterface/postprocessing/spike_locations.py b/src/spikeinterface/postprocessing/spike_locations.py index c6f498f7e8..eb3f1255c8 100644 --- a/src/spikeinterface/postprocessing/spike_locations.py +++ b/src/spikeinterface/postprocessing/spike_locations.py @@ -32,9 +32,9 @@ def _set_params(self, ms_before=0.5, ms_after=0.5, method="center_of_mass", meth def _select_extension_data(self, unit_ids): old_unit_ids = self.waveform_extractor.sorting.unit_ids - unit_inds = np.flatnonzero(np.in1d(old_unit_ids, unit_ids)) + unit_inds = np.flatnonzero(isin(old_unit_ids, unit_ids)) - spike_mask = np.in1d(self.spikes["unit_index"], unit_inds) + spike_mask = isin(self.spikes["unit_index"], unit_inds) new_spike_locations = self._extension_data["spike_locations"][spike_mask] return dict(spike_locations=new_spike_locations) diff --git a/src/spikeinterface/preprocessing/interpolate_bad_channels.py b/src/spikeinterface/preprocessing/interpolate_bad_channels.py index e634d55e7f..5773b6a2ef 100644 --- a/src/spikeinterface/preprocessing/interpolate_bad_channels.py +++ b/src/spikeinterface/preprocessing/interpolate_bad_channels.py @@ -49,7 +49,7 @@ def __init__(self, recording, bad_channel_ids, sigma_um=None, p=1.3, weights=Non self.bad_channel_ids = bad_channel_ids self._bad_channel_idxs = recording.ids_to_indices(self.bad_channel_ids) - self._good_channel_idxs = ~np.in1d(np.arange(recording.get_num_channels()), self._bad_channel_idxs) + self._good_channel_idxs = ~isin(np.arange(recording.get_num_channels()), self._bad_channel_idxs) self._bad_channel_idxs.setflags(write=False) if sigma_um is None: diff --git a/src/spikeinterface/qualitymetrics/misc_metrics.py b/src/spikeinterface/qualitymetrics/misc_metrics.py index ee28485983..a51bfe9164 100644 --- a/src/spikeinterface/qualitymetrics/misc_metrics.py +++ b/src/spikeinterface/qualitymetrics/misc_metrics.py @@ -544,7 +544,7 @@ def compute_synchrony_metrics(waveform_extractor, synchrony_sizes=(2, 4, 8), **k # some segments/units might have no spikes if len(spikes_per_unit) == 0: continue - spike_complexity = complexity[np.in1d(unique_spike_index, spikes_per_unit["sample_index"])] + spike_complexity = complexity[isin(unique_spike_index, spikes_per_unit["sample_index"])] for synchrony_size in synchrony_sizes: synchrony_counts[synchrony_size][unit_index] += np.count_nonzero(spike_complexity >= synchrony_size) diff --git a/src/spikeinterface/qualitymetrics/pca_metrics.py b/src/spikeinterface/qualitymetrics/pca_metrics.py index 59000211d4..0702c8f35a 100644 --- a/src/spikeinterface/qualitymetrics/pca_metrics.py +++ b/src/spikeinterface/qualitymetrics/pca_metrics.py @@ -152,8 +152,8 @@ def calculate_pc_metrics( neighbor_unit_ids = unit_ids neighbor_channel_indices = we.channel_ids_to_indices(neighbor_channel_ids) - labels = all_labels[np.in1d(all_labels, neighbor_unit_ids)] - pcs = all_pcs[np.in1d(all_labels, neighbor_unit_ids)][:, :, neighbor_channel_indices] + labels = all_labels[isin(all_labels, neighbor_unit_ids)] + pcs = all_pcs[isin(all_labels, neighbor_unit_ids)][:, :, neighbor_channel_indices] pcs_flat = pcs.reshape(pcs.shape[0], -1) func_args = ( @@ -506,7 +506,7 @@ def nearest_neighbors_isolation( other_units_ids = [ unit_id for unit_id in other_units_ids - if np.sum(np.in1d(sparsity.unit_id_to_channel_indices[unit_id], closest_chans_target_unit)) + if np.sum(isin(sparsity.unit_id_to_channel_indices[unit_id], closest_chans_target_unit)) >= (n_channels_target_unit * min_spatial_overlap) ] @@ -536,10 +536,10 @@ def nearest_neighbors_isolation( if waveform_extractor.is_sparse(): # in this case, waveforms are sparse so we need to do some smart indexing waveforms_target_unit_sampled = waveforms_target_unit_sampled[ - :, :, np.in1d(closest_chans_target_unit, common_channel_idxs) + :, :, isin(closest_chans_target_unit, common_channel_idxs) ] waveforms_other_unit_sampled = waveforms_other_unit_sampled[ - :, :, np.in1d(closest_chans_other_unit, common_channel_idxs) + :, :, isin(closest_chans_other_unit, common_channel_idxs) ] else: waveforms_target_unit_sampled = waveforms_target_unit_sampled[:, :, common_channel_idxs] diff --git a/src/spikeinterface/sortingcomponents/benchmark/benchmark_matching.py b/src/spikeinterface/sortingcomponents/benchmark/benchmark_matching.py index 07c7db155c..ee8ace42ee 100644 --- a/src/spikeinterface/sortingcomponents/benchmark/benchmark_matching.py +++ b/src/spikeinterface/sortingcomponents/benchmark/benchmark_matching.py @@ -502,7 +502,7 @@ def plot_errors_matching(benchmark, comp, unit_id, nb_spikes=200, metric="cosine seg_num = 0 # TODO: make compatible with multiple segments idx_1 = np.where(comp.get_labels1(unit_id)[seg_num] == label) idx_2 = benchmark.we.get_sampled_indices(unit_id)["spike_index"] - intersection = np.where(np.in1d(idx_2, idx_1))[0] + intersection = np.where(isin(idx_2, idx_1))[0] intersection = np.random.permutation(intersection)[:nb_spikes] if len(intersection) == 0: print(f"No {label}s found for unit {unit_id}") @@ -552,7 +552,7 @@ def plot_errors_matching_all_neurons(benchmark, comp, nb_spikes=200, metric="cos for label in ["TP", "FN"]: idx_1 = np.where(comp.get_labels1(unit_id) == label)[0] - intersection = np.where(np.in1d(idx_2, idx_1))[0] + intersection = np.where(isin(idx_2, idx_1))[0] intersection = np.random.permutation(intersection)[:nb_spikes] wfs_sliced = wfs[intersection, :, :] diff --git a/src/spikeinterface/sortingcomponents/benchmark/benchmark_peak_selection.py b/src/spikeinterface/sortingcomponents/benchmark/benchmark_peak_selection.py index 1514a63dd4..ca18db58d6 100644 --- a/src/spikeinterface/sortingcomponents/benchmark/benchmark_peak_selection.py +++ b/src/spikeinterface/sortingcomponents/benchmark/benchmark_peak_selection.py @@ -133,7 +133,7 @@ def run(self, peaks=None, positions=None, delta=0.2): matches = make_matching_events(times2, spikes1["sample_index"], int(delta * self.sampling_rate / 1000)) self.good_matches = matches["index1"] - garbage_matches = ~np.in1d(np.arange(len(times2)), self.good_matches) + garbage_matches = ~isin(np.arange(len(times2)), self.good_matches) garbage_channels = self.peaks["channel_index"][garbage_matches] garbage_peaks = times2[garbage_matches] nb_garbage = len(garbage_peaks) @@ -365,7 +365,7 @@ def plot_clusters_amplitudes(self, title=None, show_probe=False, clim=(-100, 0), idx = self.waveforms["full_gt"].get_sampled_indices(unit_id)["spike_index"] all_spikes = self.waveforms["full_gt"].sorting.get_unit_spike_train(unit_id) - mask = np.in1d(self.gt_peaks["sample_index"], all_spikes[idx]) + mask = isin(self.gt_peaks["sample_index"], all_spikes[idx]) colors = scalarMap.to_rgba(self.gt_peaks["amplitude"][mask]) ax.scatter(self.gt_positions["x"][mask], self.gt_positions["y"][mask], c=colors, s=1, alpha=0.5) x_mean, y_mean = (self.gt_positions["x"][mask].mean(), self.gt_positions["y"][mask].mean()) @@ -391,7 +391,7 @@ def plot_clusters_amplitudes(self, title=None, show_probe=False, clim=(-100, 0), idx = self.waveforms["gt"].get_sampled_indices(unit_id)["spike_index"] all_spikes = self.waveforms["gt"].sorting.get_unit_spike_train(unit_id) - mask = np.in1d(self.sliced_gt_peaks["sample_index"], all_spikes[idx]) + mask = isin(self.sliced_gt_peaks["sample_index"], all_spikes[idx]) colors = scalarMap.to_rgba(self.sliced_gt_peaks["amplitude"][mask]) ax.scatter( self.sliced_gt_positions["x"][mask], self.sliced_gt_positions["y"][mask], c=colors, s=1, alpha=0.5 @@ -420,7 +420,7 @@ def plot_clusters_amplitudes(self, title=None, show_probe=False, clim=(-100, 0), idx = self.waveforms["garbage"].get_sampled_indices(unit_id)["spike_index"] all_spikes = self.waveforms["garbage"].sorting.get_unit_spike_train(unit_id) - mask = np.in1d(self.garbage_peaks["sample_index"], all_spikes[idx]) + mask = isin(self.garbage_peaks["sample_index"], all_spikes[idx]) colors = scalarMap.to_rgba(self.garbage_peaks["amplitude"][mask]) ax.scatter(self.garbage_positions["x"][mask], self.garbage_positions["y"][mask], c=colors, s=1, alpha=0.5) x_mean, y_mean = (self.garbage_positions["x"][mask].mean(), self.garbage_positions["y"][mask].mean()) diff --git a/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py b/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py index 6edf5af16b..fb45e5fc3a 100644 --- a/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py +++ b/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py @@ -30,7 +30,7 @@ def _split_waveforms( local_labels_with_noise = clustering[0] cluster_probability = clustering[2] (persistent_clusters,) = np.nonzero(cluster_probability > probability_thr) - local_labels_with_noise[~np.in1d(local_labels_with_noise, persistent_clusters)] = -1 + local_labels_with_noise[~isin(local_labels_with_noise, persistent_clusters)] = -1 # remove super small cluster labels, count = np.unique(local_labels_with_noise[:valid_size], return_counts=True) @@ -43,7 +43,7 @@ def _split_waveforms( to_remove = labels[(count / valid_size) < minimum_cluster_size_ratio] # ~ print('to_remove', to_remove, count / valid_size) if to_remove.size > 0: - local_labels_with_noise[np.in1d(local_labels_with_noise, to_remove)] = -1 + local_labels_with_noise[isin(local_labels_with_noise, to_remove)] = -1 local_labels_with_noise[valid_size:] = -2 @@ -123,7 +123,7 @@ def _split_waveforms_nested( active_labels_with_noise = clustering[0] cluster_probability = clustering[2] (persistent_clusters,) = np.nonzero(clustering[2] > probability_thr) - active_labels_with_noise[~np.in1d(active_labels_with_noise, persistent_clusters)] = -1 + active_labels_with_noise[~isin(active_labels_with_noise, persistent_clusters)] = -1 active_labels = active_labels_with_noise[active_ind < valid_size] active_labels_set = np.unique(active_labels) @@ -381,9 +381,9 @@ def auto_clean_clustering( continue wfs0 = wfs_arrays[label0] - wfs0 = wfs0[:, :, np.in1d(channel_inds0, used_chans)] + wfs0 = wfs0[:, :, isin(channel_inds0, used_chans)] wfs1 = wfs_arrays[label1] - wfs1 = wfs1[:, :, np.in1d(channel_inds1, used_chans)] + wfs1 = wfs1[:, :, isin(channel_inds1, used_chans)] # TODO : remove assert wfs0.shape[2] == wfs1.shape[2] diff --git a/src/spikeinterface/sortingcomponents/clustering/sliding_hdbscan.py b/src/spikeinterface/sortingcomponents/clustering/sliding_hdbscan.py index aeec14158f..0f1d503bdf 100644 --- a/src/spikeinterface/sortingcomponents/clustering/sliding_hdbscan.py +++ b/src/spikeinterface/sortingcomponents/clustering/sliding_hdbscan.py @@ -198,7 +198,7 @@ def _find_clusters(cls, recording, peaks, wfs_arrays, sparsity_mask, noise, d): for chan_ind in prev_local_chan_inds: if total_count[chan_ind] == 0: continue - # ~ inds, = np.nonzero(np.in1d(peaks['channel_index'], closest_channels[chan_ind]) & (peak_labels==0)) + # ~ inds, = np.nonzero(isin(peaks['channel_index'], closest_channels[chan_ind]) & (peak_labels==0)) (inds,) = np.nonzero((peaks["channel_index"] == chan_ind) & (peak_labels == 0)) if inds.size <= d["min_spike_on_channel"]: chan_amps[chan_ind] = 0.0 @@ -235,12 +235,12 @@ def _find_clusters(cls, recording, peaks, wfs_arrays, sparsity_mask, noise, d): (wf_chans,) = np.nonzero(sparsity_mask[chan_ind]) # TODO: only for debug, remove later - assert np.all(np.in1d(local_chan_inds, wf_chans)) + assert np.all(isin(local_chan_inds, wf_chans)) # none label spikes wfs_chan = wfs_chan[inds, :, :] # only some channels - wfs_chan = wfs_chan[:, :, np.in1d(wf_chans, local_chan_inds)] + wfs_chan = wfs_chan[:, :, isin(wf_chans, local_chan_inds)] wfs.append(wfs_chan) # put noise to enhance clusters @@ -517,7 +517,7 @@ def _collect_sparse_waveforms(peaks, wfs_arrays, closest_channels, peak_labels, (wf_chans,) = np.nonzero(sparsity_mask[chan_ind]) # print('wf_chans', wf_chans) # TODO: only for debug, remove later - assert np.all(np.in1d(wanted_chans, wf_chans)) + assert np.all(isin(wanted_chans, wf_chans)) wfs_chan = wfs_arrays[chan_ind] # TODO: only for debug, remove later @@ -525,7 +525,7 @@ def _collect_sparse_waveforms(peaks, wfs_arrays, closest_channels, peak_labels, wfs_chan = wfs_chan[inds, :, :] # only some channels - wfs_chan = wfs_chan[:, :, np.in1d(wf_chans, wanted_chans)] + wfs_chan = wfs_chan[:, :, isin(wf_chans, wanted_chans)] wfs.append(wfs_chan) wfs = np.concatenate(wfs, axis=0) diff --git a/src/spikeinterface/widgets/unit_waveforms_density_map.py b/src/spikeinterface/widgets/unit_waveforms_density_map.py index e8a6868e92..2515d844eb 100644 --- a/src/spikeinterface/widgets/unit_waveforms_density_map.py +++ b/src/spikeinterface/widgets/unit_waveforms_density_map.py @@ -103,7 +103,7 @@ def __init__( if same_axis and not np.array_equal(chan_inds, shared_chan_inds): # add more channels if necessary wfs_ = np.zeros((wfs.shape[0], wfs.shape[1], shared_chan_inds.size), dtype=float) - mask = np.in1d(shared_chan_inds, chan_inds) + mask = isin(shared_chan_inds, chan_inds) wfs_[:, :, mask] = wfs wfs_[:, :, ~mask] = np.nan wfs = wfs_ From e947e09a9c3d397ceabfd8eae50ba8a5ed345cf5 Mon Sep 17 00:00:00 2001 From: Heberto Mayorquin Date: Fri, 15 Sep 2023 14:20:32 +0200 Subject: [PATCH 47/90] Revert "in1d to isin" This reverts commit 1ac47ffd3c2525b4fa406937b7d2391ee759e4ea. --- src/spikeinterface/comparison/basecomparison.py | 4 ++-- src/spikeinterface/comparison/comparisontools.py | 2 +- src/spikeinterface/core/baserecording.py | 2 +- src/spikeinterface/core/basesnippets.py | 2 +- src/spikeinterface/core/basesorting.py | 2 +- src/spikeinterface/core/generate.py | 4 ++-- src/spikeinterface/core/tests/test_sparsity.py | 2 +- src/spikeinterface/curation/mergeunitssorting.py | 4 ++-- src/spikeinterface/extractors/bids.py | 2 +- .../postprocessing/amplitude_scalings.py | 4 ++-- src/spikeinterface/postprocessing/spike_amplitudes.py | 4 ++-- src/spikeinterface/postprocessing/spike_locations.py | 4 ++-- .../preprocessing/interpolate_bad_channels.py | 2 +- src/spikeinterface/qualitymetrics/misc_metrics.py | 2 +- src/spikeinterface/qualitymetrics/pca_metrics.py | 10 +++++----- .../sortingcomponents/benchmark/benchmark_matching.py | 4 ++-- .../benchmark/benchmark_peak_selection.py | 8 ++++---- .../sortingcomponents/clustering/clustering_tools.py | 10 +++++----- .../sortingcomponents/clustering/sliding_hdbscan.py | 10 +++++----- .../widgets/unit_waveforms_density_map.py | 2 +- 20 files changed, 42 insertions(+), 42 deletions(-) diff --git a/src/spikeinterface/comparison/basecomparison.py b/src/spikeinterface/comparison/basecomparison.py index 6f45f1497d..79c784491a 100644 --- a/src/spikeinterface/comparison/basecomparison.py +++ b/src/spikeinterface/comparison/basecomparison.py @@ -262,11 +262,11 @@ def get_ordered_agreement_scores(self): indexes = np.arange(scores.shape[1]) order1 = [] for r in range(scores.shape[0]): - possible = indexes[~isin(indexes, order1)] + possible = indexes[~np.in1d(indexes, order1)] if possible.size > 0: ind = np.argmax(scores.iloc[r, possible].values) order1.append(possible[ind]) - remain = indexes[~isin(indexes, order1)] + remain = indexes[~np.in1d(indexes, order1)] order1.extend(remain) scores = scores.iloc[:, order1] diff --git a/src/spikeinterface/comparison/comparisontools.py b/src/spikeinterface/comparison/comparisontools.py index eb7b5c703c..db45e2b25b 100644 --- a/src/spikeinterface/comparison/comparisontools.py +++ b/src/spikeinterface/comparison/comparisontools.py @@ -538,7 +538,7 @@ def do_confusion_matrix(event_counts1, event_counts2, match_12, match_event_coun matched_units2 = match_12[match_12 != -1].values unmatched_units1 = match_12[match_12 == -1].index - unmatched_units2 = unit2_ids[~isin(unit2_ids, matched_units2)] + unmatched_units2 = unit2_ids[~np.in1d(unit2_ids, matched_units2)] ordered_units1 = np.hstack([matched_units1, unmatched_units1]) ordered_units2 = np.hstack([matched_units2, unmatched_units2]) diff --git a/src/spikeinterface/core/baserecording.py b/src/spikeinterface/core/baserecording.py index 8c4a2941a0..af4970a4ad 100644 --- a/src/spikeinterface/core/baserecording.py +++ b/src/spikeinterface/core/baserecording.py @@ -592,7 +592,7 @@ def _channel_slice(self, channel_ids, renamed_channel_ids=None): def _remove_channels(self, remove_channel_ids): from .channelslice import ChannelSliceRecording - new_channel_ids = self.channel_ids[~isin(self.channel_ids, remove_channel_ids)] + new_channel_ids = self.channel_ids[~np.in1d(self.channel_ids, remove_channel_ids)] sub_recording = ChannelSliceRecording(self, new_channel_ids) return sub_recording diff --git a/src/spikeinterface/core/basesnippets.py b/src/spikeinterface/core/basesnippets.py index 7fd0823fc0..737087abc1 100644 --- a/src/spikeinterface/core/basesnippets.py +++ b/src/spikeinterface/core/basesnippets.py @@ -139,7 +139,7 @@ def _channel_slice(self, channel_ids, renamed_channel_ids=None): def _remove_channels(self, remove_channel_ids): from .channelslice import ChannelSliceSnippets - new_channel_ids = self.channel_ids[~isin(self.channel_ids, remove_channel_ids)] + new_channel_ids = self.channel_ids[~np.in1d(self.channel_ids, remove_channel_ids)] sub_recording = ChannelSliceSnippets(self, new_channel_ids) return sub_recording diff --git a/src/spikeinterface/core/basesorting.py b/src/spikeinterface/core/basesorting.py index 423f974220..52f71c2399 100644 --- a/src/spikeinterface/core/basesorting.py +++ b/src/spikeinterface/core/basesorting.py @@ -346,7 +346,7 @@ def remove_units(self, remove_unit_ids): """ from spikeinterface import UnitsSelectionSorting - new_unit_ids = self.unit_ids[~isin(self.unit_ids, remove_unit_ids)] + new_unit_ids = self.unit_ids[~np.in1d(self.unit_ids, remove_unit_ids)] new_sorting = UnitsSelectionSorting(self, new_unit_ids) return new_sorting diff --git a/src/spikeinterface/core/generate.py b/src/spikeinterface/core/generate.py index 44d62818f9..401c498f03 100644 --- a/src/spikeinterface/core/generate.py +++ b/src/spikeinterface/core/generate.py @@ -166,7 +166,7 @@ def generate_sorting( ) if empty_units is not None: - keep = ~isin(labels, empty_units) + keep = ~np.in1d(labels, empty_units) times = times[keep] labels = labels[keep] @@ -219,7 +219,7 @@ def add_synchrony_to_sorting(sorting, sync_event_ratio=0.3, seed=None): sample_index = spike["sample_index"] if sample_index not in units_used_for_spike: units_used_for_spike[sample_index] = np.array([spike["unit_index"]]) - units_not_used = unit_ids[~isin(unit_ids, units_used_for_spike[sample_index])] + units_not_used = unit_ids[~np.in1d(unit_ids, units_used_for_spike[sample_index])] if len(units_not_used) == 0: continue diff --git a/src/spikeinterface/core/tests/test_sparsity.py b/src/spikeinterface/core/tests/test_sparsity.py index 61c4179652..a6b94c9b84 100644 --- a/src/spikeinterface/core/tests/test_sparsity.py +++ b/src/spikeinterface/core/tests/test_sparsity.py @@ -34,7 +34,7 @@ def test_ChannelSparsity(): for key, v in sparsity.unit_id_to_channel_ids.items(): assert key in unit_ids - assert np.all(isin(v, channel_ids)) + assert np.all(np.in1d(v, channel_ids)) for key, v in sparsity.unit_id_to_channel_indices.items(): assert key in unit_ids diff --git a/src/spikeinterface/curation/mergeunitssorting.py b/src/spikeinterface/curation/mergeunitssorting.py index ccbaa32e7b..264ac3a56d 100644 --- a/src/spikeinterface/curation/mergeunitssorting.py +++ b/src/spikeinterface/curation/mergeunitssorting.py @@ -59,7 +59,7 @@ def __init__(self, parent_sorting, units_to_merge, new_unit_ids=None, properties else: # we cannot automatically find new names new_unit_ids = [f"merge{i}" for i in range(num_merge)] - if np.any(isin(new_unit_ids, keep_unit_ids)): + if np.any(np.in1d(new_unit_ids, keep_unit_ids)): raise ValueError( "Unable to find 'new_unit_ids' because it is a string and parents " "already contain merges. Pass a list of 'new_unit_ids' as an argument." @@ -68,7 +68,7 @@ def __init__(self, parent_sorting, units_to_merge, new_unit_ids=None, properties # dtype int new_unit_ids = list(max(parents_unit_ids) + 1 + np.arange(num_merge, dtype=dtype)) else: - if np.any(isin(new_unit_ids, keep_unit_ids)): + if np.any(np.in1d(new_unit_ids, keep_unit_ids)): raise ValueError("'new_unit_ids' already exist in the sorting.unit_ids. Provide new ones") assert len(new_unit_ids) == num_merge, "new_unit_ids must have the same size as units_to_merge" diff --git a/src/spikeinterface/extractors/bids.py b/src/spikeinterface/extractors/bids.py index 9de272c56e..02e7d5677d 100644 --- a/src/spikeinterface/extractors/bids.py +++ b/src/spikeinterface/extractors/bids.py @@ -76,7 +76,7 @@ def _read_probe_group(folder, bids_name, recording_channel_ids): contact_ids = channels["contact_id"].values.astype("U") # extracting information of requested channels - keep = isin(channel_ids, recording_channel_ids) + keep = np.in1d(channel_ids, recording_channel_ids) channel_ids = channel_ids[keep] contact_ids = contact_ids[keep] diff --git a/src/spikeinterface/postprocessing/amplitude_scalings.py b/src/spikeinterface/postprocessing/amplitude_scalings.py index af618cf4db..5a0148c5c4 100644 --- a/src/spikeinterface/postprocessing/amplitude_scalings.py +++ b/src/spikeinterface/postprocessing/amplitude_scalings.py @@ -47,9 +47,9 @@ def _set_params( def _select_extension_data(self, unit_ids): old_unit_ids = self.waveform_extractor.sorting.unit_ids - unit_inds = np.flatnonzero(isin(old_unit_ids, unit_ids)) + unit_inds = np.flatnonzero(np.in1d(old_unit_ids, unit_ids)) - spike_mask = isin(self.spikes["unit_index"], unit_inds) + spike_mask = np.in1d(self.spikes["unit_index"], unit_inds) new_amplitude_scalings = self._extension_data["amplitude_scalings"][spike_mask] return dict(amplitude_scalings=new_amplitude_scalings) diff --git a/src/spikeinterface/postprocessing/spike_amplitudes.py b/src/spikeinterface/postprocessing/spike_amplitudes.py index 729dbd12bb..62a4e2c320 100644 --- a/src/spikeinterface/postprocessing/spike_amplitudes.py +++ b/src/spikeinterface/postprocessing/spike_amplitudes.py @@ -28,13 +28,13 @@ def _select_extension_data(self, unit_ids): # load filter and save amplitude files sorting = self.waveform_extractor.sorting spikes = sorting.to_spike_vector(concatenated=False) - (keep_unit_indices,) = np.nonzero(isin(sorting.unit_ids, unit_ids)) + (keep_unit_indices,) = np.nonzero(np.in1d(sorting.unit_ids, unit_ids)) new_extension_data = dict() for seg_index in range(sorting.get_num_segments()): amp_data_name = f"amplitude_segment_{seg_index}" amps = self._extension_data[amp_data_name] - filtered_idxs = isin(spikes[seg_index]["unit_index"], keep_unit_indices) + filtered_idxs = np.in1d(spikes[seg_index]["unit_index"], keep_unit_indices) new_extension_data[amp_data_name] = amps[filtered_idxs] return new_extension_data diff --git a/src/spikeinterface/postprocessing/spike_locations.py b/src/spikeinterface/postprocessing/spike_locations.py index eb3f1255c8..c6f498f7e8 100644 --- a/src/spikeinterface/postprocessing/spike_locations.py +++ b/src/spikeinterface/postprocessing/spike_locations.py @@ -32,9 +32,9 @@ def _set_params(self, ms_before=0.5, ms_after=0.5, method="center_of_mass", meth def _select_extension_data(self, unit_ids): old_unit_ids = self.waveform_extractor.sorting.unit_ids - unit_inds = np.flatnonzero(isin(old_unit_ids, unit_ids)) + unit_inds = np.flatnonzero(np.in1d(old_unit_ids, unit_ids)) - spike_mask = isin(self.spikes["unit_index"], unit_inds) + spike_mask = np.in1d(self.spikes["unit_index"], unit_inds) new_spike_locations = self._extension_data["spike_locations"][spike_mask] return dict(spike_locations=new_spike_locations) diff --git a/src/spikeinterface/preprocessing/interpolate_bad_channels.py b/src/spikeinterface/preprocessing/interpolate_bad_channels.py index 5773b6a2ef..e634d55e7f 100644 --- a/src/spikeinterface/preprocessing/interpolate_bad_channels.py +++ b/src/spikeinterface/preprocessing/interpolate_bad_channels.py @@ -49,7 +49,7 @@ def __init__(self, recording, bad_channel_ids, sigma_um=None, p=1.3, weights=Non self.bad_channel_ids = bad_channel_ids self._bad_channel_idxs = recording.ids_to_indices(self.bad_channel_ids) - self._good_channel_idxs = ~isin(np.arange(recording.get_num_channels()), self._bad_channel_idxs) + self._good_channel_idxs = ~np.in1d(np.arange(recording.get_num_channels()), self._bad_channel_idxs) self._bad_channel_idxs.setflags(write=False) if sigma_um is None: diff --git a/src/spikeinterface/qualitymetrics/misc_metrics.py b/src/spikeinterface/qualitymetrics/misc_metrics.py index a51bfe9164..ee28485983 100644 --- a/src/spikeinterface/qualitymetrics/misc_metrics.py +++ b/src/spikeinterface/qualitymetrics/misc_metrics.py @@ -544,7 +544,7 @@ def compute_synchrony_metrics(waveform_extractor, synchrony_sizes=(2, 4, 8), **k # some segments/units might have no spikes if len(spikes_per_unit) == 0: continue - spike_complexity = complexity[isin(unique_spike_index, spikes_per_unit["sample_index"])] + spike_complexity = complexity[np.in1d(unique_spike_index, spikes_per_unit["sample_index"])] for synchrony_size in synchrony_sizes: synchrony_counts[synchrony_size][unit_index] += np.count_nonzero(spike_complexity >= synchrony_size) diff --git a/src/spikeinterface/qualitymetrics/pca_metrics.py b/src/spikeinterface/qualitymetrics/pca_metrics.py index 0702c8f35a..59000211d4 100644 --- a/src/spikeinterface/qualitymetrics/pca_metrics.py +++ b/src/spikeinterface/qualitymetrics/pca_metrics.py @@ -152,8 +152,8 @@ def calculate_pc_metrics( neighbor_unit_ids = unit_ids neighbor_channel_indices = we.channel_ids_to_indices(neighbor_channel_ids) - labels = all_labels[isin(all_labels, neighbor_unit_ids)] - pcs = all_pcs[isin(all_labels, neighbor_unit_ids)][:, :, neighbor_channel_indices] + labels = all_labels[np.in1d(all_labels, neighbor_unit_ids)] + pcs = all_pcs[np.in1d(all_labels, neighbor_unit_ids)][:, :, neighbor_channel_indices] pcs_flat = pcs.reshape(pcs.shape[0], -1) func_args = ( @@ -506,7 +506,7 @@ def nearest_neighbors_isolation( other_units_ids = [ unit_id for unit_id in other_units_ids - if np.sum(isin(sparsity.unit_id_to_channel_indices[unit_id], closest_chans_target_unit)) + if np.sum(np.in1d(sparsity.unit_id_to_channel_indices[unit_id], closest_chans_target_unit)) >= (n_channels_target_unit * min_spatial_overlap) ] @@ -536,10 +536,10 @@ def nearest_neighbors_isolation( if waveform_extractor.is_sparse(): # in this case, waveforms are sparse so we need to do some smart indexing waveforms_target_unit_sampled = waveforms_target_unit_sampled[ - :, :, isin(closest_chans_target_unit, common_channel_idxs) + :, :, np.in1d(closest_chans_target_unit, common_channel_idxs) ] waveforms_other_unit_sampled = waveforms_other_unit_sampled[ - :, :, isin(closest_chans_other_unit, common_channel_idxs) + :, :, np.in1d(closest_chans_other_unit, common_channel_idxs) ] else: waveforms_target_unit_sampled = waveforms_target_unit_sampled[:, :, common_channel_idxs] diff --git a/src/spikeinterface/sortingcomponents/benchmark/benchmark_matching.py b/src/spikeinterface/sortingcomponents/benchmark/benchmark_matching.py index ee8ace42ee..07c7db155c 100644 --- a/src/spikeinterface/sortingcomponents/benchmark/benchmark_matching.py +++ b/src/spikeinterface/sortingcomponents/benchmark/benchmark_matching.py @@ -502,7 +502,7 @@ def plot_errors_matching(benchmark, comp, unit_id, nb_spikes=200, metric="cosine seg_num = 0 # TODO: make compatible with multiple segments idx_1 = np.where(comp.get_labels1(unit_id)[seg_num] == label) idx_2 = benchmark.we.get_sampled_indices(unit_id)["spike_index"] - intersection = np.where(isin(idx_2, idx_1))[0] + intersection = np.where(np.in1d(idx_2, idx_1))[0] intersection = np.random.permutation(intersection)[:nb_spikes] if len(intersection) == 0: print(f"No {label}s found for unit {unit_id}") @@ -552,7 +552,7 @@ def plot_errors_matching_all_neurons(benchmark, comp, nb_spikes=200, metric="cos for label in ["TP", "FN"]: idx_1 = np.where(comp.get_labels1(unit_id) == label)[0] - intersection = np.where(isin(idx_2, idx_1))[0] + intersection = np.where(np.in1d(idx_2, idx_1))[0] intersection = np.random.permutation(intersection)[:nb_spikes] wfs_sliced = wfs[intersection, :, :] diff --git a/src/spikeinterface/sortingcomponents/benchmark/benchmark_peak_selection.py b/src/spikeinterface/sortingcomponents/benchmark/benchmark_peak_selection.py index ca18db58d6..1514a63dd4 100644 --- a/src/spikeinterface/sortingcomponents/benchmark/benchmark_peak_selection.py +++ b/src/spikeinterface/sortingcomponents/benchmark/benchmark_peak_selection.py @@ -133,7 +133,7 @@ def run(self, peaks=None, positions=None, delta=0.2): matches = make_matching_events(times2, spikes1["sample_index"], int(delta * self.sampling_rate / 1000)) self.good_matches = matches["index1"] - garbage_matches = ~isin(np.arange(len(times2)), self.good_matches) + garbage_matches = ~np.in1d(np.arange(len(times2)), self.good_matches) garbage_channels = self.peaks["channel_index"][garbage_matches] garbage_peaks = times2[garbage_matches] nb_garbage = len(garbage_peaks) @@ -365,7 +365,7 @@ def plot_clusters_amplitudes(self, title=None, show_probe=False, clim=(-100, 0), idx = self.waveforms["full_gt"].get_sampled_indices(unit_id)["spike_index"] all_spikes = self.waveforms["full_gt"].sorting.get_unit_spike_train(unit_id) - mask = isin(self.gt_peaks["sample_index"], all_spikes[idx]) + mask = np.in1d(self.gt_peaks["sample_index"], all_spikes[idx]) colors = scalarMap.to_rgba(self.gt_peaks["amplitude"][mask]) ax.scatter(self.gt_positions["x"][mask], self.gt_positions["y"][mask], c=colors, s=1, alpha=0.5) x_mean, y_mean = (self.gt_positions["x"][mask].mean(), self.gt_positions["y"][mask].mean()) @@ -391,7 +391,7 @@ def plot_clusters_amplitudes(self, title=None, show_probe=False, clim=(-100, 0), idx = self.waveforms["gt"].get_sampled_indices(unit_id)["spike_index"] all_spikes = self.waveforms["gt"].sorting.get_unit_spike_train(unit_id) - mask = isin(self.sliced_gt_peaks["sample_index"], all_spikes[idx]) + mask = np.in1d(self.sliced_gt_peaks["sample_index"], all_spikes[idx]) colors = scalarMap.to_rgba(self.sliced_gt_peaks["amplitude"][mask]) ax.scatter( self.sliced_gt_positions["x"][mask], self.sliced_gt_positions["y"][mask], c=colors, s=1, alpha=0.5 @@ -420,7 +420,7 @@ def plot_clusters_amplitudes(self, title=None, show_probe=False, clim=(-100, 0), idx = self.waveforms["garbage"].get_sampled_indices(unit_id)["spike_index"] all_spikes = self.waveforms["garbage"].sorting.get_unit_spike_train(unit_id) - mask = isin(self.garbage_peaks["sample_index"], all_spikes[idx]) + mask = np.in1d(self.garbage_peaks["sample_index"], all_spikes[idx]) colors = scalarMap.to_rgba(self.garbage_peaks["amplitude"][mask]) ax.scatter(self.garbage_positions["x"][mask], self.garbage_positions["y"][mask], c=colors, s=1, alpha=0.5) x_mean, y_mean = (self.garbage_positions["x"][mask].mean(), self.garbage_positions["y"][mask].mean()) diff --git a/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py b/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py index fb45e5fc3a..6edf5af16b 100644 --- a/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py +++ b/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py @@ -30,7 +30,7 @@ def _split_waveforms( local_labels_with_noise = clustering[0] cluster_probability = clustering[2] (persistent_clusters,) = np.nonzero(cluster_probability > probability_thr) - local_labels_with_noise[~isin(local_labels_with_noise, persistent_clusters)] = -1 + local_labels_with_noise[~np.in1d(local_labels_with_noise, persistent_clusters)] = -1 # remove super small cluster labels, count = np.unique(local_labels_with_noise[:valid_size], return_counts=True) @@ -43,7 +43,7 @@ def _split_waveforms( to_remove = labels[(count / valid_size) < minimum_cluster_size_ratio] # ~ print('to_remove', to_remove, count / valid_size) if to_remove.size > 0: - local_labels_with_noise[isin(local_labels_with_noise, to_remove)] = -1 + local_labels_with_noise[np.in1d(local_labels_with_noise, to_remove)] = -1 local_labels_with_noise[valid_size:] = -2 @@ -123,7 +123,7 @@ def _split_waveforms_nested( active_labels_with_noise = clustering[0] cluster_probability = clustering[2] (persistent_clusters,) = np.nonzero(clustering[2] > probability_thr) - active_labels_with_noise[~isin(active_labels_with_noise, persistent_clusters)] = -1 + active_labels_with_noise[~np.in1d(active_labels_with_noise, persistent_clusters)] = -1 active_labels = active_labels_with_noise[active_ind < valid_size] active_labels_set = np.unique(active_labels) @@ -381,9 +381,9 @@ def auto_clean_clustering( continue wfs0 = wfs_arrays[label0] - wfs0 = wfs0[:, :, isin(channel_inds0, used_chans)] + wfs0 = wfs0[:, :, np.in1d(channel_inds0, used_chans)] wfs1 = wfs_arrays[label1] - wfs1 = wfs1[:, :, isin(channel_inds1, used_chans)] + wfs1 = wfs1[:, :, np.in1d(channel_inds1, used_chans)] # TODO : remove assert wfs0.shape[2] == wfs1.shape[2] diff --git a/src/spikeinterface/sortingcomponents/clustering/sliding_hdbscan.py b/src/spikeinterface/sortingcomponents/clustering/sliding_hdbscan.py index 0f1d503bdf..aeec14158f 100644 --- a/src/spikeinterface/sortingcomponents/clustering/sliding_hdbscan.py +++ b/src/spikeinterface/sortingcomponents/clustering/sliding_hdbscan.py @@ -198,7 +198,7 @@ def _find_clusters(cls, recording, peaks, wfs_arrays, sparsity_mask, noise, d): for chan_ind in prev_local_chan_inds: if total_count[chan_ind] == 0: continue - # ~ inds, = np.nonzero(isin(peaks['channel_index'], closest_channels[chan_ind]) & (peak_labels==0)) + # ~ inds, = np.nonzero(np.in1d(peaks['channel_index'], closest_channels[chan_ind]) & (peak_labels==0)) (inds,) = np.nonzero((peaks["channel_index"] == chan_ind) & (peak_labels == 0)) if inds.size <= d["min_spike_on_channel"]: chan_amps[chan_ind] = 0.0 @@ -235,12 +235,12 @@ def _find_clusters(cls, recording, peaks, wfs_arrays, sparsity_mask, noise, d): (wf_chans,) = np.nonzero(sparsity_mask[chan_ind]) # TODO: only for debug, remove later - assert np.all(isin(local_chan_inds, wf_chans)) + assert np.all(np.in1d(local_chan_inds, wf_chans)) # none label spikes wfs_chan = wfs_chan[inds, :, :] # only some channels - wfs_chan = wfs_chan[:, :, isin(wf_chans, local_chan_inds)] + wfs_chan = wfs_chan[:, :, np.in1d(wf_chans, local_chan_inds)] wfs.append(wfs_chan) # put noise to enhance clusters @@ -517,7 +517,7 @@ def _collect_sparse_waveforms(peaks, wfs_arrays, closest_channels, peak_labels, (wf_chans,) = np.nonzero(sparsity_mask[chan_ind]) # print('wf_chans', wf_chans) # TODO: only for debug, remove later - assert np.all(isin(wanted_chans, wf_chans)) + assert np.all(np.in1d(wanted_chans, wf_chans)) wfs_chan = wfs_arrays[chan_ind] # TODO: only for debug, remove later @@ -525,7 +525,7 @@ def _collect_sparse_waveforms(peaks, wfs_arrays, closest_channels, peak_labels, wfs_chan = wfs_chan[inds, :, :] # only some channels - wfs_chan = wfs_chan[:, :, isin(wf_chans, wanted_chans)] + wfs_chan = wfs_chan[:, :, np.in1d(wf_chans, wanted_chans)] wfs.append(wfs_chan) wfs = np.concatenate(wfs, axis=0) diff --git a/src/spikeinterface/widgets/unit_waveforms_density_map.py b/src/spikeinterface/widgets/unit_waveforms_density_map.py index 2515d844eb..e8a6868e92 100644 --- a/src/spikeinterface/widgets/unit_waveforms_density_map.py +++ b/src/spikeinterface/widgets/unit_waveforms_density_map.py @@ -103,7 +103,7 @@ def __init__( if same_axis and not np.array_equal(chan_inds, shared_chan_inds): # add more channels if necessary wfs_ = np.zeros((wfs.shape[0], wfs.shape[1], shared_chan_inds.size), dtype=float) - mask = isin(shared_chan_inds, chan_inds) + mask = np.in1d(shared_chan_inds, chan_inds) wfs_[:, :, mask] = wfs wfs_[:, :, ~mask] = np.nan wfs = wfs_ From 5e420f3a847102c145c705dddfb01b140b318ec3 Mon Sep 17 00:00:00 2001 From: Heberto Mayorquin Date: Fri, 15 Sep 2023 14:21:53 +0200 Subject: [PATCH 48/90] in1d to isin with correct alias (shame on me) --- src/spikeinterface/comparison/basecomparison.py | 4 ++-- src/spikeinterface/comparison/comparisontools.py | 2 +- src/spikeinterface/core/baserecording.py | 2 +- src/spikeinterface/core/basesnippets.py | 2 +- src/spikeinterface/core/basesorting.py | 2 +- src/spikeinterface/core/generate.py | 4 ++-- src/spikeinterface/core/tests/test_sparsity.py | 2 +- src/spikeinterface/curation/mergeunitssorting.py | 4 ++-- src/spikeinterface/extractors/bids.py | 2 +- .../postprocessing/amplitude_scalings.py | 4 ++-- src/spikeinterface/postprocessing/spike_amplitudes.py | 4 ++-- src/spikeinterface/postprocessing/spike_locations.py | 4 ++-- .../preprocessing/interpolate_bad_channels.py | 2 +- src/spikeinterface/qualitymetrics/misc_metrics.py | 2 +- src/spikeinterface/qualitymetrics/pca_metrics.py | 10 +++++----- .../sortingcomponents/benchmark/benchmark_matching.py | 4 ++-- .../benchmark/benchmark_peak_selection.py | 8 ++++---- .../sortingcomponents/clustering/clustering_tools.py | 10 +++++----- .../sortingcomponents/clustering/sliding_hdbscan.py | 10 +++++----- .../widgets/unit_waveforms_density_map.py | 2 +- 20 files changed, 42 insertions(+), 42 deletions(-) diff --git a/src/spikeinterface/comparison/basecomparison.py b/src/spikeinterface/comparison/basecomparison.py index 79c784491a..5af20d79b5 100644 --- a/src/spikeinterface/comparison/basecomparison.py +++ b/src/spikeinterface/comparison/basecomparison.py @@ -262,11 +262,11 @@ def get_ordered_agreement_scores(self): indexes = np.arange(scores.shape[1]) order1 = [] for r in range(scores.shape[0]): - possible = indexes[~np.in1d(indexes, order1)] + possible = indexes[~np.isin(indexes, order1)] if possible.size > 0: ind = np.argmax(scores.iloc[r, possible].values) order1.append(possible[ind]) - remain = indexes[~np.in1d(indexes, order1)] + remain = indexes[~np.isin(indexes, order1)] order1.extend(remain) scores = scores.iloc[:, order1] diff --git a/src/spikeinterface/comparison/comparisontools.py b/src/spikeinterface/comparison/comparisontools.py index db45e2b25b..20ee7910b4 100644 --- a/src/spikeinterface/comparison/comparisontools.py +++ b/src/spikeinterface/comparison/comparisontools.py @@ -538,7 +538,7 @@ def do_confusion_matrix(event_counts1, event_counts2, match_12, match_event_coun matched_units2 = match_12[match_12 != -1].values unmatched_units1 = match_12[match_12 == -1].index - unmatched_units2 = unit2_ids[~np.in1d(unit2_ids, matched_units2)] + unmatched_units2 = unit2_ids[~np.isin(unit2_ids, matched_units2)] ordered_units1 = np.hstack([matched_units1, unmatched_units1]) ordered_units2 = np.hstack([matched_units2, unmatched_units2]) diff --git a/src/spikeinterface/core/baserecording.py b/src/spikeinterface/core/baserecording.py index af4970a4ad..08f187895b 100644 --- a/src/spikeinterface/core/baserecording.py +++ b/src/spikeinterface/core/baserecording.py @@ -592,7 +592,7 @@ def _channel_slice(self, channel_ids, renamed_channel_ids=None): def _remove_channels(self, remove_channel_ids): from .channelslice import ChannelSliceRecording - new_channel_ids = self.channel_ids[~np.in1d(self.channel_ids, remove_channel_ids)] + new_channel_ids = self.channel_ids[~np.isin(self.channel_ids, remove_channel_ids)] sub_recording = ChannelSliceRecording(self, new_channel_ids) return sub_recording diff --git a/src/spikeinterface/core/basesnippets.py b/src/spikeinterface/core/basesnippets.py index 737087abc1..f35bc2b266 100644 --- a/src/spikeinterface/core/basesnippets.py +++ b/src/spikeinterface/core/basesnippets.py @@ -139,7 +139,7 @@ def _channel_slice(self, channel_ids, renamed_channel_ids=None): def _remove_channels(self, remove_channel_ids): from .channelslice import ChannelSliceSnippets - new_channel_ids = self.channel_ids[~np.in1d(self.channel_ids, remove_channel_ids)] + new_channel_ids = self.channel_ids[~np.isin(self.channel_ids, remove_channel_ids)] sub_recording = ChannelSliceSnippets(self, new_channel_ids) return sub_recording diff --git a/src/spikeinterface/core/basesorting.py b/src/spikeinterface/core/basesorting.py index 52f71c2399..056134a24e 100644 --- a/src/spikeinterface/core/basesorting.py +++ b/src/spikeinterface/core/basesorting.py @@ -346,7 +346,7 @@ def remove_units(self, remove_unit_ids): """ from spikeinterface import UnitsSelectionSorting - new_unit_ids = self.unit_ids[~np.in1d(self.unit_ids, remove_unit_ids)] + new_unit_ids = self.unit_ids[~np.isin(self.unit_ids, remove_unit_ids)] new_sorting = UnitsSelectionSorting(self, new_unit_ids) return new_sorting diff --git a/src/spikeinterface/core/generate.py b/src/spikeinterface/core/generate.py index 401c498f03..07837bcef7 100644 --- a/src/spikeinterface/core/generate.py +++ b/src/spikeinterface/core/generate.py @@ -166,7 +166,7 @@ def generate_sorting( ) if empty_units is not None: - keep = ~np.in1d(labels, empty_units) + keep = ~np.isin(labels, empty_units) times = times[keep] labels = labels[keep] @@ -219,7 +219,7 @@ def add_synchrony_to_sorting(sorting, sync_event_ratio=0.3, seed=None): sample_index = spike["sample_index"] if sample_index not in units_used_for_spike: units_used_for_spike[sample_index] = np.array([spike["unit_index"]]) - units_not_used = unit_ids[~np.in1d(unit_ids, units_used_for_spike[sample_index])] + units_not_used = unit_ids[~np.isin(unit_ids, units_used_for_spike[sample_index])] if len(units_not_used) == 0: continue diff --git a/src/spikeinterface/core/tests/test_sparsity.py b/src/spikeinterface/core/tests/test_sparsity.py index a6b94c9b84..75182bf532 100644 --- a/src/spikeinterface/core/tests/test_sparsity.py +++ b/src/spikeinterface/core/tests/test_sparsity.py @@ -34,7 +34,7 @@ def test_ChannelSparsity(): for key, v in sparsity.unit_id_to_channel_ids.items(): assert key in unit_ids - assert np.all(np.in1d(v, channel_ids)) + assert np.all(np.isin(v, channel_ids)) for key, v in sparsity.unit_id_to_channel_indices.items(): assert key in unit_ids diff --git a/src/spikeinterface/curation/mergeunitssorting.py b/src/spikeinterface/curation/mergeunitssorting.py index 264ac3a56d..2d20a58453 100644 --- a/src/spikeinterface/curation/mergeunitssorting.py +++ b/src/spikeinterface/curation/mergeunitssorting.py @@ -59,7 +59,7 @@ def __init__(self, parent_sorting, units_to_merge, new_unit_ids=None, properties else: # we cannot automatically find new names new_unit_ids = [f"merge{i}" for i in range(num_merge)] - if np.any(np.in1d(new_unit_ids, keep_unit_ids)): + if np.any(np.isin(new_unit_ids, keep_unit_ids)): raise ValueError( "Unable to find 'new_unit_ids' because it is a string and parents " "already contain merges. Pass a list of 'new_unit_ids' as an argument." @@ -68,7 +68,7 @@ def __init__(self, parent_sorting, units_to_merge, new_unit_ids=None, properties # dtype int new_unit_ids = list(max(parents_unit_ids) + 1 + np.arange(num_merge, dtype=dtype)) else: - if np.any(np.in1d(new_unit_ids, keep_unit_ids)): + if np.any(np.isin(new_unit_ids, keep_unit_ids)): raise ValueError("'new_unit_ids' already exist in the sorting.unit_ids. Provide new ones") assert len(new_unit_ids) == num_merge, "new_unit_ids must have the same size as units_to_merge" diff --git a/src/spikeinterface/extractors/bids.py b/src/spikeinterface/extractors/bids.py index 02e7d5677d..8b70722652 100644 --- a/src/spikeinterface/extractors/bids.py +++ b/src/spikeinterface/extractors/bids.py @@ -76,7 +76,7 @@ def _read_probe_group(folder, bids_name, recording_channel_ids): contact_ids = channels["contact_id"].values.astype("U") # extracting information of requested channels - keep = np.in1d(channel_ids, recording_channel_ids) + keep = np.isin(channel_ids, recording_channel_ids) channel_ids = channel_ids[keep] contact_ids = contact_ids[keep] diff --git a/src/spikeinterface/postprocessing/amplitude_scalings.py b/src/spikeinterface/postprocessing/amplitude_scalings.py index 5a0148c5c4..5a3542cdf9 100644 --- a/src/spikeinterface/postprocessing/amplitude_scalings.py +++ b/src/spikeinterface/postprocessing/amplitude_scalings.py @@ -47,9 +47,9 @@ def _set_params( def _select_extension_data(self, unit_ids): old_unit_ids = self.waveform_extractor.sorting.unit_ids - unit_inds = np.flatnonzero(np.in1d(old_unit_ids, unit_ids)) + unit_inds = np.flatnonzero(np.isin(old_unit_ids, unit_ids)) - spike_mask = np.in1d(self.spikes["unit_index"], unit_inds) + spike_mask = np.isin(self.spikes["unit_index"], unit_inds) new_amplitude_scalings = self._extension_data["amplitude_scalings"][spike_mask] return dict(amplitude_scalings=new_amplitude_scalings) diff --git a/src/spikeinterface/postprocessing/spike_amplitudes.py b/src/spikeinterface/postprocessing/spike_amplitudes.py index 62a4e2c320..b6f25cda95 100644 --- a/src/spikeinterface/postprocessing/spike_amplitudes.py +++ b/src/spikeinterface/postprocessing/spike_amplitudes.py @@ -28,13 +28,13 @@ def _select_extension_data(self, unit_ids): # load filter and save amplitude files sorting = self.waveform_extractor.sorting spikes = sorting.to_spike_vector(concatenated=False) - (keep_unit_indices,) = np.nonzero(np.in1d(sorting.unit_ids, unit_ids)) + (keep_unit_indices,) = np.nonzero(np.isin(sorting.unit_ids, unit_ids)) new_extension_data = dict() for seg_index in range(sorting.get_num_segments()): amp_data_name = f"amplitude_segment_{seg_index}" amps = self._extension_data[amp_data_name] - filtered_idxs = np.in1d(spikes[seg_index]["unit_index"], keep_unit_indices) + filtered_idxs = np.isin(spikes[seg_index]["unit_index"], keep_unit_indices) new_extension_data[amp_data_name] = amps[filtered_idxs] return new_extension_data diff --git a/src/spikeinterface/postprocessing/spike_locations.py b/src/spikeinterface/postprocessing/spike_locations.py index c6f498f7e8..4cbe4d665e 100644 --- a/src/spikeinterface/postprocessing/spike_locations.py +++ b/src/spikeinterface/postprocessing/spike_locations.py @@ -32,9 +32,9 @@ def _set_params(self, ms_before=0.5, ms_after=0.5, method="center_of_mass", meth def _select_extension_data(self, unit_ids): old_unit_ids = self.waveform_extractor.sorting.unit_ids - unit_inds = np.flatnonzero(np.in1d(old_unit_ids, unit_ids)) + unit_inds = np.flatnonzero(np.isin(old_unit_ids, unit_ids)) - spike_mask = np.in1d(self.spikes["unit_index"], unit_inds) + spike_mask = np.isin(self.spikes["unit_index"], unit_inds) new_spike_locations = self._extension_data["spike_locations"][spike_mask] return dict(spike_locations=new_spike_locations) diff --git a/src/spikeinterface/preprocessing/interpolate_bad_channels.py b/src/spikeinterface/preprocessing/interpolate_bad_channels.py index e634d55e7f..95ecd0fe52 100644 --- a/src/spikeinterface/preprocessing/interpolate_bad_channels.py +++ b/src/spikeinterface/preprocessing/interpolate_bad_channels.py @@ -49,7 +49,7 @@ def __init__(self, recording, bad_channel_ids, sigma_um=None, p=1.3, weights=Non self.bad_channel_ids = bad_channel_ids self._bad_channel_idxs = recording.ids_to_indices(self.bad_channel_ids) - self._good_channel_idxs = ~np.in1d(np.arange(recording.get_num_channels()), self._bad_channel_idxs) + self._good_channel_idxs = ~np.isin(np.arange(recording.get_num_channels()), self._bad_channel_idxs) self._bad_channel_idxs.setflags(write=False) if sigma_um is None: diff --git a/src/spikeinterface/qualitymetrics/misc_metrics.py b/src/spikeinterface/qualitymetrics/misc_metrics.py index ee28485983..4e871492f8 100644 --- a/src/spikeinterface/qualitymetrics/misc_metrics.py +++ b/src/spikeinterface/qualitymetrics/misc_metrics.py @@ -544,7 +544,7 @@ def compute_synchrony_metrics(waveform_extractor, synchrony_sizes=(2, 4, 8), **k # some segments/units might have no spikes if len(spikes_per_unit) == 0: continue - spike_complexity = complexity[np.in1d(unique_spike_index, spikes_per_unit["sample_index"])] + spike_complexity = complexity[np.isin(unique_spike_index, spikes_per_unit["sample_index"])] for synchrony_size in synchrony_sizes: synchrony_counts[synchrony_size][unit_index] += np.count_nonzero(spike_complexity >= synchrony_size) diff --git a/src/spikeinterface/qualitymetrics/pca_metrics.py b/src/spikeinterface/qualitymetrics/pca_metrics.py index 59000211d4..ed06f7d738 100644 --- a/src/spikeinterface/qualitymetrics/pca_metrics.py +++ b/src/spikeinterface/qualitymetrics/pca_metrics.py @@ -152,8 +152,8 @@ def calculate_pc_metrics( neighbor_unit_ids = unit_ids neighbor_channel_indices = we.channel_ids_to_indices(neighbor_channel_ids) - labels = all_labels[np.in1d(all_labels, neighbor_unit_ids)] - pcs = all_pcs[np.in1d(all_labels, neighbor_unit_ids)][:, :, neighbor_channel_indices] + labels = all_labels[np.isin(all_labels, neighbor_unit_ids)] + pcs = all_pcs[np.isin(all_labels, neighbor_unit_ids)][:, :, neighbor_channel_indices] pcs_flat = pcs.reshape(pcs.shape[0], -1) func_args = ( @@ -506,7 +506,7 @@ def nearest_neighbors_isolation( other_units_ids = [ unit_id for unit_id in other_units_ids - if np.sum(np.in1d(sparsity.unit_id_to_channel_indices[unit_id], closest_chans_target_unit)) + if np.sum(np.isin(sparsity.unit_id_to_channel_indices[unit_id], closest_chans_target_unit)) >= (n_channels_target_unit * min_spatial_overlap) ] @@ -536,10 +536,10 @@ def nearest_neighbors_isolation( if waveform_extractor.is_sparse(): # in this case, waveforms are sparse so we need to do some smart indexing waveforms_target_unit_sampled = waveforms_target_unit_sampled[ - :, :, np.in1d(closest_chans_target_unit, common_channel_idxs) + :, :, np.isin(closest_chans_target_unit, common_channel_idxs) ] waveforms_other_unit_sampled = waveforms_other_unit_sampled[ - :, :, np.in1d(closest_chans_other_unit, common_channel_idxs) + :, :, np.isin(closest_chans_other_unit, common_channel_idxs) ] else: waveforms_target_unit_sampled = waveforms_target_unit_sampled[:, :, common_channel_idxs] diff --git a/src/spikeinterface/sortingcomponents/benchmark/benchmark_matching.py b/src/spikeinterface/sortingcomponents/benchmark/benchmark_matching.py index 07c7db155c..772c99bc0a 100644 --- a/src/spikeinterface/sortingcomponents/benchmark/benchmark_matching.py +++ b/src/spikeinterface/sortingcomponents/benchmark/benchmark_matching.py @@ -502,7 +502,7 @@ def plot_errors_matching(benchmark, comp, unit_id, nb_spikes=200, metric="cosine seg_num = 0 # TODO: make compatible with multiple segments idx_1 = np.where(comp.get_labels1(unit_id)[seg_num] == label) idx_2 = benchmark.we.get_sampled_indices(unit_id)["spike_index"] - intersection = np.where(np.in1d(idx_2, idx_1))[0] + intersection = np.where(np.isin(idx_2, idx_1))[0] intersection = np.random.permutation(intersection)[:nb_spikes] if len(intersection) == 0: print(f"No {label}s found for unit {unit_id}") @@ -552,7 +552,7 @@ def plot_errors_matching_all_neurons(benchmark, comp, nb_spikes=200, metric="cos for label in ["TP", "FN"]: idx_1 = np.where(comp.get_labels1(unit_id) == label)[0] - intersection = np.where(np.in1d(idx_2, idx_1))[0] + intersection = np.where(np.isin(idx_2, idx_1))[0] intersection = np.random.permutation(intersection)[:nb_spikes] wfs_sliced = wfs[intersection, :, :] diff --git a/src/spikeinterface/sortingcomponents/benchmark/benchmark_peak_selection.py b/src/spikeinterface/sortingcomponents/benchmark/benchmark_peak_selection.py index 1514a63dd4..73497a59fd 100644 --- a/src/spikeinterface/sortingcomponents/benchmark/benchmark_peak_selection.py +++ b/src/spikeinterface/sortingcomponents/benchmark/benchmark_peak_selection.py @@ -133,7 +133,7 @@ def run(self, peaks=None, positions=None, delta=0.2): matches = make_matching_events(times2, spikes1["sample_index"], int(delta * self.sampling_rate / 1000)) self.good_matches = matches["index1"] - garbage_matches = ~np.in1d(np.arange(len(times2)), self.good_matches) + garbage_matches = ~np.isin(np.arange(len(times2)), self.good_matches) garbage_channels = self.peaks["channel_index"][garbage_matches] garbage_peaks = times2[garbage_matches] nb_garbage = len(garbage_peaks) @@ -365,7 +365,7 @@ def plot_clusters_amplitudes(self, title=None, show_probe=False, clim=(-100, 0), idx = self.waveforms["full_gt"].get_sampled_indices(unit_id)["spike_index"] all_spikes = self.waveforms["full_gt"].sorting.get_unit_spike_train(unit_id) - mask = np.in1d(self.gt_peaks["sample_index"], all_spikes[idx]) + mask = np.isin(self.gt_peaks["sample_index"], all_spikes[idx]) colors = scalarMap.to_rgba(self.gt_peaks["amplitude"][mask]) ax.scatter(self.gt_positions["x"][mask], self.gt_positions["y"][mask], c=colors, s=1, alpha=0.5) x_mean, y_mean = (self.gt_positions["x"][mask].mean(), self.gt_positions["y"][mask].mean()) @@ -391,7 +391,7 @@ def plot_clusters_amplitudes(self, title=None, show_probe=False, clim=(-100, 0), idx = self.waveforms["gt"].get_sampled_indices(unit_id)["spike_index"] all_spikes = self.waveforms["gt"].sorting.get_unit_spike_train(unit_id) - mask = np.in1d(self.sliced_gt_peaks["sample_index"], all_spikes[idx]) + mask = np.isin(self.sliced_gt_peaks["sample_index"], all_spikes[idx]) colors = scalarMap.to_rgba(self.sliced_gt_peaks["amplitude"][mask]) ax.scatter( self.sliced_gt_positions["x"][mask], self.sliced_gt_positions["y"][mask], c=colors, s=1, alpha=0.5 @@ -420,7 +420,7 @@ def plot_clusters_amplitudes(self, title=None, show_probe=False, clim=(-100, 0), idx = self.waveforms["garbage"].get_sampled_indices(unit_id)["spike_index"] all_spikes = self.waveforms["garbage"].sorting.get_unit_spike_train(unit_id) - mask = np.in1d(self.garbage_peaks["sample_index"], all_spikes[idx]) + mask = np.isin(self.garbage_peaks["sample_index"], all_spikes[idx]) colors = scalarMap.to_rgba(self.garbage_peaks["amplitude"][mask]) ax.scatter(self.garbage_positions["x"][mask], self.garbage_positions["y"][mask], c=colors, s=1, alpha=0.5) x_mean, y_mean = (self.garbage_positions["x"][mask].mean(), self.garbage_positions["y"][mask].mean()) diff --git a/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py b/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py index 6edf5af16b..23fdbf1979 100644 --- a/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py +++ b/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py @@ -30,7 +30,7 @@ def _split_waveforms( local_labels_with_noise = clustering[0] cluster_probability = clustering[2] (persistent_clusters,) = np.nonzero(cluster_probability > probability_thr) - local_labels_with_noise[~np.in1d(local_labels_with_noise, persistent_clusters)] = -1 + local_labels_with_noise[~np.isin(local_labels_with_noise, persistent_clusters)] = -1 # remove super small cluster labels, count = np.unique(local_labels_with_noise[:valid_size], return_counts=True) @@ -43,7 +43,7 @@ def _split_waveforms( to_remove = labels[(count / valid_size) < minimum_cluster_size_ratio] # ~ print('to_remove', to_remove, count / valid_size) if to_remove.size > 0: - local_labels_with_noise[np.in1d(local_labels_with_noise, to_remove)] = -1 + local_labels_with_noise[np.isin(local_labels_with_noise, to_remove)] = -1 local_labels_with_noise[valid_size:] = -2 @@ -123,7 +123,7 @@ def _split_waveforms_nested( active_labels_with_noise = clustering[0] cluster_probability = clustering[2] (persistent_clusters,) = np.nonzero(clustering[2] > probability_thr) - active_labels_with_noise[~np.in1d(active_labels_with_noise, persistent_clusters)] = -1 + active_labels_with_noise[~np.isin(active_labels_with_noise, persistent_clusters)] = -1 active_labels = active_labels_with_noise[active_ind < valid_size] active_labels_set = np.unique(active_labels) @@ -381,9 +381,9 @@ def auto_clean_clustering( continue wfs0 = wfs_arrays[label0] - wfs0 = wfs0[:, :, np.in1d(channel_inds0, used_chans)] + wfs0 = wfs0[:, :, np.isin(channel_inds0, used_chans)] wfs1 = wfs_arrays[label1] - wfs1 = wfs1[:, :, np.in1d(channel_inds1, used_chans)] + wfs1 = wfs1[:, :, np.isin(channel_inds1, used_chans)] # TODO : remove assert wfs0.shape[2] == wfs1.shape[2] diff --git a/src/spikeinterface/sortingcomponents/clustering/sliding_hdbscan.py b/src/spikeinterface/sortingcomponents/clustering/sliding_hdbscan.py index aeec14158f..08ce9f6791 100644 --- a/src/spikeinterface/sortingcomponents/clustering/sliding_hdbscan.py +++ b/src/spikeinterface/sortingcomponents/clustering/sliding_hdbscan.py @@ -198,7 +198,7 @@ def _find_clusters(cls, recording, peaks, wfs_arrays, sparsity_mask, noise, d): for chan_ind in prev_local_chan_inds: if total_count[chan_ind] == 0: continue - # ~ inds, = np.nonzero(np.in1d(peaks['channel_index'], closest_channels[chan_ind]) & (peak_labels==0)) + # ~ inds, = np.nonzero(np.isin(peaks['channel_index'], closest_channels[chan_ind]) & (peak_labels==0)) (inds,) = np.nonzero((peaks["channel_index"] == chan_ind) & (peak_labels == 0)) if inds.size <= d["min_spike_on_channel"]: chan_amps[chan_ind] = 0.0 @@ -235,12 +235,12 @@ def _find_clusters(cls, recording, peaks, wfs_arrays, sparsity_mask, noise, d): (wf_chans,) = np.nonzero(sparsity_mask[chan_ind]) # TODO: only for debug, remove later - assert np.all(np.in1d(local_chan_inds, wf_chans)) + assert np.all(np.isin(local_chan_inds, wf_chans)) # none label spikes wfs_chan = wfs_chan[inds, :, :] # only some channels - wfs_chan = wfs_chan[:, :, np.in1d(wf_chans, local_chan_inds)] + wfs_chan = wfs_chan[:, :, np.isin(wf_chans, local_chan_inds)] wfs.append(wfs_chan) # put noise to enhance clusters @@ -517,7 +517,7 @@ def _collect_sparse_waveforms(peaks, wfs_arrays, closest_channels, peak_labels, (wf_chans,) = np.nonzero(sparsity_mask[chan_ind]) # print('wf_chans', wf_chans) # TODO: only for debug, remove later - assert np.all(np.in1d(wanted_chans, wf_chans)) + assert np.all(np.isin(wanted_chans, wf_chans)) wfs_chan = wfs_arrays[chan_ind] # TODO: only for debug, remove later @@ -525,7 +525,7 @@ def _collect_sparse_waveforms(peaks, wfs_arrays, closest_channels, peak_labels, wfs_chan = wfs_chan[inds, :, :] # only some channels - wfs_chan = wfs_chan[:, :, np.in1d(wf_chans, wanted_chans)] + wfs_chan = wfs_chan[:, :, np.isin(wf_chans, wanted_chans)] wfs.append(wfs_chan) wfs = np.concatenate(wfs, axis=0) diff --git a/src/spikeinterface/widgets/unit_waveforms_density_map.py b/src/spikeinterface/widgets/unit_waveforms_density_map.py index e8a6868e92..b3391c0712 100644 --- a/src/spikeinterface/widgets/unit_waveforms_density_map.py +++ b/src/spikeinterface/widgets/unit_waveforms_density_map.py @@ -103,7 +103,7 @@ def __init__( if same_axis and not np.array_equal(chan_inds, shared_chan_inds): # add more channels if necessary wfs_ = np.zeros((wfs.shape[0], wfs.shape[1], shared_chan_inds.size), dtype=float) - mask = np.in1d(shared_chan_inds, chan_inds) + mask = np.isin(shared_chan_inds, chan_inds) wfs_[:, :, mask] = wfs wfs_[:, :, ~mask] = np.nan wfs = wfs_ From 7aa96d3a81c685dfb9d242fc5e3057d352c376dd Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Fri, 15 Sep 2023 13:31:16 +0000 Subject: [PATCH 49/90] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- src/spikeinterface/comparison/studytools.py | 5 +- src/spikeinterface/sorters/basesorter.py | 2 +- src/spikeinterface/sorters/launcher.py | 45 ++++++++--------- .../sorters/tests/test_launcher.py | 49 +++++++++---------- 4 files changed, 44 insertions(+), 57 deletions(-) diff --git a/src/spikeinterface/comparison/studytools.py b/src/spikeinterface/comparison/studytools.py index 00119c1586..26d2c1ad6f 100644 --- a/src/spikeinterface/comparison/studytools.py +++ b/src/spikeinterface/comparison/studytools.py @@ -29,9 +29,6 @@ from .paircomparisons import compare_sorter_to_ground_truth - - - # This is deprecated and will be removed def iter_working_folder(working_folder): working_folder = Path(working_folder) @@ -54,6 +51,7 @@ def iter_working_folder(working_folder): continue yield rec_name, sorter_name, output_folder + # This is deprecated and will be removed def iter_sorting_output(working_folder): """Iterator over output_folder to retrieve all triplets of (rec_name, sorter_name, sorting).""" @@ -63,7 +61,6 @@ def iter_sorting_output(working_folder): yield rec_name, sorter_name, sorting - def setup_comparison_study(study_folder, gt_dict, **job_kwargs): """ Based on a dict of (recording, sorting) create the study folder. diff --git a/src/spikeinterface/sorters/basesorter.py b/src/spikeinterface/sorters/basesorter.py index aa76809b58..c7581ba1e1 100644 --- a/src/spikeinterface/sorters/basesorter.py +++ b/src/spikeinterface/sorters/basesorter.py @@ -421,4 +421,4 @@ def is_log_ok(output_folder): run_time = log.get("run_time", None) ok = run_time is not None return ok - return False \ No newline at end of file + return False diff --git a/src/spikeinterface/sorters/launcher.py b/src/spikeinterface/sorters/launcher.py index b158eba22d..d04a89fdf1 100644 --- a/src/spikeinterface/sorters/launcher.py +++ b/src/spikeinterface/sorters/launcher.py @@ -11,7 +11,7 @@ import sys import warnings -from spikeinterface.core import aggregate_units +from spikeinterface.core import aggregate_units from .sorterlist import sorter_dict from .runsorter import run_sorter @@ -28,6 +28,7 @@ _implemented_engine = list(_default_engine_kwargs.keys()) + def run_sorter_jobs(job_list, engine="loop", engine_kwargs={}, return_output=False): """ Run several :py:func:`run_sorter()` sequentially or in parallel given a list of jobs. @@ -38,18 +39,18 @@ def run_sorter_jobs(job_list, engine="loop", engine_kwargs={}, return_output=Fal for job in job_list: run_sorter(**job) - + The following engines block the I/O: * "loop" * "joblib" * "multiprocessing" * "dask" - + The following engines are *asynchronous*: * "slurm" - + Where *blocking* means that this function is blocking until the results are returned. - This is in opposition to *asynchronous*, where the function returns `None` almost immediately (aka non-blocking), + This is in opposition to *asynchronous*, where the function returns `None` almost immediately (aka non-blocking), but the results must be retrieved by hand when jobs are finished. No mechanisim is provided here to be aware when jobs are finish. In this *asynchronous* case, the :py:func:read_sorter_folder() helps to retrieve individual results. @@ -61,7 +62,7 @@ def run_sorter_jobs(job_list, engine="loop", engine_kwargs={}, return_output=Fal A list a dict that are propagated to run_sorter(...) engine: str "loop", "joblib", "dask", "slurm" The engine to run the list. - * "loop": a simple loop. This engine is + * "loop": a simple loop. This engine is engine_kwargs: dict return_output: bool, dfault False @@ -79,8 +80,6 @@ def run_sorter_jobs(job_list, engine="loop", engine_kwargs={}, return_output=Fal engine_kwargs_.update(_default_engine_kwargs[engine]) engine_kwargs_.update(engine_kwargs) engine_kwargs = engine_kwargs_ - - if return_output: assert engine in ("loop", "joblib", "processpoolexecutor") @@ -109,7 +108,7 @@ def run_sorter_jobs(job_list, engine="loop", engine_kwargs={}, return_output=Fal max_workers = engine_kwargs["max_workers"] mp_context = engine_kwargs["mp_context"] - + with ProcessPoolExecutor(max_workers=max_workers, mp_context=mp_context) as executor: futures = [] for kwargs in job_list: @@ -173,6 +172,7 @@ def run_sorter_jobs(job_list, engine="loop", engine_kwargs={}, return_output=Fal return out + _slurm_script = """#! {python} from numpy import array from spikeinterface import load_extractor @@ -189,8 +189,6 @@ def run_sorter_jobs(job_list, engine="loop", engine_kwargs={}, return_output=Fal """ - - def run_sorter_by_property( sorter_name, recording, @@ -258,10 +256,10 @@ def run_sorter_by_property( """ if mode_if_folder_exists is not None: warnings.warn( - "run_sorter_by_property(): mode_if_folder_exists is not used anymore", - DeprecationWarning, - stacklevel=2, - ) + "run_sorter_by_property(): mode_if_folder_exists is not used anymore", + DeprecationWarning, + stacklevel=2, + ) working_folder = Path(working_folder).absolute() @@ -269,7 +267,7 @@ def run_sorter_by_property( f"The 'grouping_property' {grouping_property} is not " f"a recording property!" ) recording_dict = recording.split_by(grouping_property) - + job_list = [] for k, rec in recording_dict.items(): job = dict( @@ -279,10 +277,10 @@ def run_sorter_by_property( verbose=verbose, docker_image=docker_image, singularity_image=singularity_image, - **sorter_params + **sorter_params, ) job_list.append(job) - + sorting_list = run_sorter_jobs(job_list, engine=engine, engine_kwargs=engine_kwargs, return_output=True) unit_groups = [] @@ -298,7 +296,6 @@ def run_sorter_by_property( return aggregate_sorting - # This is deprecated and will be removed def run_sorters( sorter_list, @@ -316,7 +313,7 @@ def run_sorters( """ This function is deprecated and will be removed in version 0.100 Please use run_sorter_jobs() instead. - + Parameters ---------- sorter_list: list of str @@ -401,7 +398,6 @@ def run_sorters( elif mode_if_folder_exists == "overwrite": shutil.rmtree(str(output_folder)) elif mode_if_folder_exists == "keep": - if is_log_ok(output_folder): continue else: @@ -418,14 +414,13 @@ def run_sorters( verbose=verbose, docker_image=docker_image, singularity_image=singularity_image, - **params + **params, ) job_list.append(job) - + sorting_list = run_sorter_jobs(job_list, engine=engine, engine_kwargs=engine_kwargs, return_output=with_output) if with_output: - keys = [(rec_name, sorter_name) for rec_name in recording_dict for sorter_name in sorter_list ] + keys = [(rec_name, sorter_name) for rec_name in recording_dict for sorter_name in sorter_list] results = dict(zip(keys, sorting_list)) return results - diff --git a/src/spikeinterface/sorters/tests/test_launcher.py b/src/spikeinterface/sorters/tests/test_launcher.py index ecab64ede6..14c938f8ba 100644 --- a/src/spikeinterface/sorters/tests/test_launcher.py +++ b/src/spikeinterface/sorters/tests/test_launcher.py @@ -7,6 +7,7 @@ from pathlib import Path from spikeinterface.core import load_extractor + # from spikeinterface.extractors import toy_example from spikeinterface import generate_ground_truth_recording from spikeinterface.sorters import run_sorter_jobs, run_sorters, run_sorter_by_property @@ -17,12 +18,13 @@ else: cache_folder = Path("cache_folder") / "sorters" -base_output = cache_folder / 'sorter_output' +base_output = cache_folder / "sorter_output" # no need to have many num_recordings = 2 sorters = ["tridesclous2"] + def setup_module(): base_seed = 42 for i in range(num_recordings): @@ -44,16 +46,18 @@ def get_job_list(): for i in range(num_recordings): for sorter_name in sorters: recording = load_extractor(cache_folder / f"toy_rec_{i}") - kwargs = dict(sorter_name=sorter_name, - recording=recording, - output_folder=base_output / f"{sorter_name}_rec{i}", - verbose=True, - raise_error=False, - ) + kwargs = dict( + sorter_name=sorter_name, + recording=recording, + output_folder=base_output / f"{sorter_name}_rec{i}", + verbose=True, + raise_error=False, + ) jobs.append(kwargs) - + return jobs + @pytest.fixture(scope="module") def job_list(): return get_job_list() @@ -66,23 +70,24 @@ def test_run_sorter_jobs_loop(job_list): print(sortings) - - def test_run_sorter_jobs_joblib(job_list): if base_output.is_dir(): shutil.rmtree(base_output) - sortings = run_sorter_jobs(job_list, engine="joblib", engine_kwargs=dict(n_jobs=2, backend="loky"), return_output=True) + sortings = run_sorter_jobs( + job_list, engine="joblib", engine_kwargs=dict(n_jobs=2, backend="loky"), return_output=True + ) print(sortings) + def test_run_sorter_jobs_processpoolexecutor(job_list): if base_output.is_dir(): shutil.rmtree(base_output) - sortings = run_sorter_jobs(job_list, engine="processpoolexecutor", engine_kwargs=dict(max_workers=2), return_output=True) + sortings = run_sorter_jobs( + job_list, engine="processpoolexecutor", engine_kwargs=dict(max_workers=2), return_output=True + ) print(sortings) - - @pytest.mark.skipif(True, reason="This is tested locally") def test_run_sorter_jobs_dask(job_list): if base_output.is_dir(): @@ -92,12 +97,13 @@ def test_run_sorter_jobs_dask(job_list): from dask.distributed import Client test_mode = "local" - # test_mode = "client_slurm" + # test_mode = "client_slurm" if test_mode == "local": client = Client() elif test_mode == "client_slurm": from dask_jobqueue import SLURMCluster + cluster = SLURMCluster( processes=1, cores=1, @@ -133,7 +139,7 @@ def test_run_sorter_jobs_slurm(job_list): tmp_script_folder=tmp_script_folder, cpus_per_task=32, mem="32G", - ) + ), ) @@ -165,12 +171,9 @@ def test_run_sorter_by_property(): assert all([g in group_names1 for g in sorting1.get_property("group")]) - # run_sorters is deprecated # This will test will be removed in next release def test_run_sorters_with_list(): - - working_folder = cache_folder / "test_run_sorters_list" if working_folder.is_dir(): shutil.rmtree(working_folder) @@ -185,12 +188,9 @@ def test_run_sorters_with_list(): run_sorters(sorter_list, recording_list, working_folder, engine="loop", verbose=False, with_output=False) - - # run_sorters is deprecated # This will test will be removed in next release def test_run_sorters_with_dict(): - working_folder = cache_folder / "test_run_sorters_dict" if working_folder.is_dir(): shutil.rmtree(working_folder) @@ -232,9 +232,6 @@ def test_run_sorters_with_dict(): ) - - - if __name__ == "__main__": # setup_module() job_list = get_job_list() @@ -251,5 +248,3 @@ def test_run_sorters_with_dict(): # this deprecated # test_run_sorters_with_list() # test_run_sorters_with_dict() - - From 0bd70dd27b23e799696ef966d9b84a4eac3c3b22 Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Fri, 15 Sep 2023 16:54:36 +0200 Subject: [PATCH 50/90] detect_bad_channels some recording is not ordered. Add more chunk default computation. --- .../preprocessing/detect_bad_channels.py | 26 +++++++++---------- 1 file changed, 12 insertions(+), 14 deletions(-) diff --git a/src/spikeinterface/preprocessing/detect_bad_channels.py b/src/spikeinterface/preprocessing/detect_bad_channels.py index 0f4800c6e8..35ed2c349b 100644 --- a/src/spikeinterface/preprocessing/detect_bad_channels.py +++ b/src/spikeinterface/preprocessing/detect_bad_channels.py @@ -17,8 +17,8 @@ def detect_bad_channels( n_neighbors=11, nyquist_threshold=0.8, direction="y", - chunk_duration_s=0.3, - num_random_chunks=10, + chunk_duration_s=.5, + num_random_chunks=100, welch_window_ms=10.0, highpass_filter_cutoff=300, neighborhood_r2_threshold=0.9, @@ -81,9 +81,10 @@ def detect_bad_channels( highpass_filter_cutoff : float If the recording is not filtered, the cutoff frequency of the highpass filter, by default 300 chunk_duration_s : float - Duration of each chunk, by default 0.3 + Duration of each chunk, by default 0.5 num_random_chunks : int - Number of random chunks, by default 10 + Number of random chunks, by default 100 + Having many chunks is important for reproducibility. welch_window_ms : float Window size for the scipy.signal.welch that will be converted to nperseg, by default 10ms neighborhood_r2_threshold : float, default 0.95 @@ -174,20 +175,18 @@ def detect_bad_channels( channel_locations = recording.get_channel_locations() dim = ["x", "y", "z"].index(direction) assert dim < channel_locations.shape[1], f"Direction {direction} is wrong" - locs_depth = channel_locations[:, dim] - if np.array_equal(np.sort(locs_depth), locs_depth): + order_f, order_r = order_channels_by_depth(recording=recording, dimensions=("x", "y")) + if np.all(np.diff(order_f) == 1): + # already ordered order_f = None order_r = None - else: - # sort by x, y to avoid ambiguity - order_f, order_r = order_channels_by_depth(recording=recording, dimensions=("x", "y")) # Create empty channel labels and fill with bad-channel detection estimate for each chunk chunk_channel_labels = np.zeros((recording.get_num_channels(), len(random_data)), dtype=np.int8) for i, random_chunk in enumerate(random_data): - random_chunk_sorted = random_chunk[order_f] if order_f is not None else random_chunk - chunk_channel_labels[:, i] = detect_bad_channels_ibl( + random_chunk_sorted = random_chunk[:, order_f] if order_f is not None else random_chunk + chunk_labels = detect_bad_channels_ibl( raw=random_chunk_sorted, fs=recording.sampling_frequency, psd_hf_threshold=psd_hf_threshold, @@ -198,11 +197,10 @@ def detect_bad_channels( nyquist_threshold=nyquist_threshold, welch_window_ms=welch_window_ms, ) + chunk_channel_labels[:, i] = chunk_labels[order_r] if order_r is not None else chunk_labels # Take the mode of the chunk estimates as final result. Convert to binary good / bad channel output. mode_channel_labels, _ = scipy.stats.mode(chunk_channel_labels, axis=1, keepdims=False) - if order_r is not None: - mode_channel_labels = mode_channel_labels[order_r] (bad_inds,) = np.where(mode_channel_labels != 0) bad_channel_ids = recording.channel_ids[bad_inds] @@ -306,7 +304,7 @@ def detect_bad_channels_ibl( n_neighbors : int, optional Number of neighbors to compute median fitler, by default 11 nyquist_threshold : float, optional - Threshold on Nyquist frequency to calculate HF noise band, by default 0.8 + Threshold on Nyquist frequency to calcureclate HF noise band, by default 0.8 welch_window_ms: float Window size for the scipy.signal.welch that will be converted to nperseg, by default 10ms Returns From 05ad95be8f9811ca86d6905edc13a5b5d4c2251b Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Fri, 15 Sep 2023 14:55:58 +0000 Subject: [PATCH 51/90] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- src/spikeinterface/preprocessing/detect_bad_channels.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/spikeinterface/preprocessing/detect_bad_channels.py b/src/spikeinterface/preprocessing/detect_bad_channels.py index 35ed2c349b..fa61755aba 100644 --- a/src/spikeinterface/preprocessing/detect_bad_channels.py +++ b/src/spikeinterface/preprocessing/detect_bad_channels.py @@ -17,7 +17,7 @@ def detect_bad_channels( n_neighbors=11, nyquist_threshold=0.8, direction="y", - chunk_duration_s=.5, + chunk_duration_s=0.5, num_random_chunks=100, welch_window_ms=10.0, highpass_filter_cutoff=300, From bd26723e1cd1a86660abbe23d344cb299f9140ad Mon Sep 17 00:00:00 2001 From: Zach McKenzie <92116279+zm711@users.noreply.github.com> Date: Sun, 17 Sep 2023 09:40:10 -0400 Subject: [PATCH 52/90] fix folder --- .github/workflows/installation-tips-test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/installation-tips-test.yml b/.github/workflows/installation-tips-test.yml index 0e522e6baa..b3bf08954d 100644 --- a/.github/workflows/installation-tips-test.yml +++ b/.github/workflows/installation-tips-test.yml @@ -30,4 +30,4 @@ jobs: - name: Test Conda Environment Creation uses: conda-incubator/setup-miniconda@v2.2.0 with: - environment-file: ./installations_tips/full_spikeinterface_environment_${{ matrix.label }}.yml + environment-file: ./installation_tips/full_spikeinterface_environment_${{ matrix.label }}.yml From c57cfa71fae9e0cc4aada7e72435cb8f3667eecf Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Mon, 18 Sep 2023 11:03:29 +0200 Subject: [PATCH 53/90] Add an option to flip the order by depth --- src/spikeinterface/core/recording_tools.py | 7 ++++++- src/spikeinterface/core/tests/test_recording_tools.py | 2 ++ src/spikeinterface/preprocessing/depth_order.py | 8 ++++++-- 3 files changed, 14 insertions(+), 3 deletions(-) diff --git a/src/spikeinterface/core/recording_tools.py b/src/spikeinterface/core/recording_tools.py index e5901d7ee0..8236671a3b 100644 --- a/src/spikeinterface/core/recording_tools.py +++ b/src/spikeinterface/core/recording_tools.py @@ -302,7 +302,7 @@ def get_chunk_with_margin( return traces_chunk, left_margin, right_margin -def order_channels_by_depth(recording, channel_ids=None, dimensions=("x", "y")): +def order_channels_by_depth(recording, channel_ids=None, dimensions=("x", "y"), flip=False): """ Order channels by depth, by first ordering the x-axis, and then the y-axis. @@ -316,6 +316,9 @@ def order_channels_by_depth(recording, channel_ids=None, dimensions=("x", "y")): If str, it needs to be 'x', 'y', 'z'. If tuple or list, it sorts the locations in two dimensions using lexsort. This approach is recommended since there is less ambiguity, by default ('x', 'y') + flip: bool, default False + If flip is False then the order is bottom first (starting from tip of the probe). + If flip is True then the order is upper first. Returns ------- @@ -341,6 +344,8 @@ def order_channels_by_depth(recording, channel_ids=None, dimensions=("x", "y")): assert dim < ndim, "Invalid dimensions!" locations_to_sort += (locations[:, dim],) order_f = np.lexsort(locations_to_sort) + if flip: + order_f = order_f[::-1] order_r = np.argsort(order_f, kind="stable") return order_f, order_r diff --git a/src/spikeinterface/core/tests/test_recording_tools.py b/src/spikeinterface/core/tests/test_recording_tools.py index 6e92d155fe..1d99b192ee 100644 --- a/src/spikeinterface/core/tests/test_recording_tools.py +++ b/src/spikeinterface/core/tests/test_recording_tools.py @@ -138,11 +138,13 @@ def test_order_channels_by_depth(): order_1d, order_r1d = order_channels_by_depth(rec, dimensions="y") order_2d, order_r2d = order_channels_by_depth(rec, dimensions=("x", "y")) locations_rev = locations_copy[order_1d][order_r1d] + order_2d_fliped, order_r2d_fliped = order_channels_by_depth(rec, dimensions=("x", "y"), flip=True) assert np.array_equal(locations[:, 1], locations_copy[order_1d][:, 1]) assert np.array_equal(locations_copy[order_1d][:, 1], locations_copy[order_2d][:, 1]) assert np.array_equal(locations, locations_copy[order_2d]) assert np.array_equal(locations_copy, locations_copy[order_2d][order_r2d]) + assert np.array_equal(order_2d[::-1], order_2d_fliped) if __name__ == "__main__": diff --git a/src/spikeinterface/preprocessing/depth_order.py b/src/spikeinterface/preprocessing/depth_order.py index 0b8d8a730b..b9edded883 100644 --- a/src/spikeinterface/preprocessing/depth_order.py +++ b/src/spikeinterface/preprocessing/depth_order.py @@ -18,13 +18,16 @@ class DepthOrderRecording(ChannelSliceRecording): If str, it needs to be 'x', 'y', 'z'. If tuple or list, it sorts the locations in two dimensions using lexsort. This approach is recommended since there is less ambiguity, by default ('x', 'y') + flip: bool, default False + If flip is False then the order is bottom first (starting from tip of the probe). + If flip is True then the order is upper first. """ name = "depth_order" installed = True - def __init__(self, parent_recording, channel_ids=None, dimensions=("x", "y")): - order_f, order_r = order_channels_by_depth(parent_recording, channel_ids=channel_ids, dimensions=dimensions) + def __init__(self, parent_recording, channel_ids=None, dimensions=("x", "y"), flip=False): + order_f, order_r = order_channels_by_depth(parent_recording, channel_ids=channel_ids, dimensions=dimensions, flip=flip) reordered_channel_ids = parent_recording.channel_ids[order_f] ChannelSliceRecording.__init__( self, @@ -35,6 +38,7 @@ def __init__(self, parent_recording, channel_ids=None, dimensions=("x", "y")): parent_recording=parent_recording, channel_ids=channel_ids, dimensions=dimensions, + flip=flip, ) From ef165cb4a2d43df592a57a2c801c62ebe5ce780b Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 18 Sep 2023 09:03:59 +0000 Subject: [PATCH 54/90] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- src/spikeinterface/preprocessing/depth_order.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/spikeinterface/preprocessing/depth_order.py b/src/spikeinterface/preprocessing/depth_order.py index b9edded883..43c43a5843 100644 --- a/src/spikeinterface/preprocessing/depth_order.py +++ b/src/spikeinterface/preprocessing/depth_order.py @@ -27,7 +27,9 @@ class DepthOrderRecording(ChannelSliceRecording): installed = True def __init__(self, parent_recording, channel_ids=None, dimensions=("x", "y"), flip=False): - order_f, order_r = order_channels_by_depth(parent_recording, channel_ids=channel_ids, dimensions=dimensions, flip=flip) + order_f, order_r = order_channels_by_depth( + parent_recording, channel_ids=channel_ids, dimensions=dimensions, flip=flip + ) reordered_channel_ids = parent_recording.channel_ids[order_f] ChannelSliceRecording.__init__( self, From d431e4ebe817993a74173f414eda139c21a83171 Mon Sep 17 00:00:00 2001 From: Garcia Samuel Date: Mon, 18 Sep 2023 17:49:10 +0200 Subject: [PATCH 55/90] Update src/spikeinterface/preprocessing/detect_bad_channels.py Co-authored-by: Alessio Buccino --- src/spikeinterface/preprocessing/detect_bad_channels.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/spikeinterface/preprocessing/detect_bad_channels.py b/src/spikeinterface/preprocessing/detect_bad_channels.py index fa61755aba..3c712946eb 100644 --- a/src/spikeinterface/preprocessing/detect_bad_channels.py +++ b/src/spikeinterface/preprocessing/detect_bad_channels.py @@ -304,7 +304,7 @@ def detect_bad_channels_ibl( n_neighbors : int, optional Number of neighbors to compute median fitler, by default 11 nyquist_threshold : float, optional - Threshold on Nyquist frequency to calcureclate HF noise band, by default 0.8 + Threshold on Nyquist frequency to calculate HF noise band, by default 0.8 welch_window_ms: float Window size for the scipy.signal.welch that will be converted to nperseg, by default 10ms Returns From ef0d66e6cfeea0b1f3392c5a0a8758194a9c884d Mon Sep 17 00:00:00 2001 From: Pierre Yger Date: Tue, 19 Sep 2023 09:27:18 +0200 Subject: [PATCH 56/90] Bringing back right searches --- src/spikeinterface/core/generate.py | 8 +++----- src/spikeinterface/curation/remove_duplicated_spikes.py | 3 ++- src/spikeinterface/postprocessing/spike_locations.py | 3 ++- .../sortingcomponents/motion_interpolation.py | 5 +++-- 4 files changed, 10 insertions(+), 9 deletions(-) diff --git a/src/spikeinterface/core/generate.py b/src/spikeinterface/core/generate.py index 6f85e76f1f..33f3dea923 100644 --- a/src/spikeinterface/core/generate.py +++ b/src/spikeinterface/core/generate.py @@ -1209,11 +1209,9 @@ def get_traces( else: traces = np.zeros([end_frame - start_frame, n_channels], dtype=self.dtype) - start, end = np.searchsorted( - self.spike_vector["sample_index"], - [start_frame - self.templates.shape[1], end_frame + self.templates.shape[1] + 1], - side="left", - ) + start = np.searchsorted(self.spike_vector["sample_index"], start_frame - self.templates.shape[1], side="left") + end = np.searchsorted(self.spike_vector["sample_index"], end_frame + self.templates.shape[1], side="right") + for i in range(start, end): spike = self.spike_vector[i] diff --git a/src/spikeinterface/curation/remove_duplicated_spikes.py b/src/spikeinterface/curation/remove_duplicated_spikes.py index d01ca1f6a1..04af69b37a 100644 --- a/src/spikeinterface/curation/remove_duplicated_spikes.py +++ b/src/spikeinterface/curation/remove_duplicated_spikes.py @@ -82,7 +82,8 @@ def get_unit_spike_train( if end_frame == None: end_frame = spike_train[-1] if len(spike_train) > 0 else 0 - start, end = np.searchsorted(spike_train, [start_frame, end_frame + 1], side="left") + start = np.searchsorted(spike_train, start_frame, side="left") + end = np.searchsorted(spike_train, end_frame, side="right") return spike_train[start:end] diff --git a/src/spikeinterface/postprocessing/spike_locations.py b/src/spikeinterface/postprocessing/spike_locations.py index 5f23e25b32..c6f498f7e8 100644 --- a/src/spikeinterface/postprocessing/spike_locations.py +++ b/src/spikeinterface/postprocessing/spike_locations.py @@ -77,7 +77,8 @@ def get_data(self, outputs="concatenated"): elif outputs == "by_unit": locations_by_unit = [] for segment_index in range(self.waveform_extractor.get_num_segments()): - i0, i1 = np.searchsorted(self.spikes["segment_index"], [segment_index, segment_index + 1], side="left") + i0 = np.searchsorted(self.spikes["segment_index"], segment_index, side="left") + i1 = np.searchsorted(self.spikes["segment_index"], segment_index, side="right") spikes = self.spikes[i0:i1] locations = self._extension_data["spike_locations"][i0:i1] diff --git a/src/spikeinterface/sortingcomponents/motion_interpolation.py b/src/spikeinterface/sortingcomponents/motion_interpolation.py index 18bb4f5a99..9a4cd688c5 100644 --- a/src/spikeinterface/sortingcomponents/motion_interpolation.py +++ b/src/spikeinterface/sortingcomponents/motion_interpolation.py @@ -155,8 +155,9 @@ def interpolate_motion_on_traces( **spatial_interpolation_kwargs, ) - i0, i1 = np.searchsorted(bin_inds, [bin_ind, bin_ind + 1], side="left") - + i0 = np.searchsorted(bin_inds, bin_ind, side="left") + i1 = np.searchsorted(bin_inds, bin_ind, side="right") + # here we use a simple np.matmul even if dirft_kernel can be super sparse. # because the speed for a sparse matmul is not so good when we disable multi threaad (due multi processing # in ChunkRecordingExecutor) From f2d702a7e20f7fb6459a18b17dd9a4881c1fe337 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 19 Sep 2023 07:27:40 +0000 Subject: [PATCH 57/90] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- src/spikeinterface/core/generate.py | 1 - src/spikeinterface/sortingcomponents/motion_interpolation.py | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/src/spikeinterface/core/generate.py b/src/spikeinterface/core/generate.py index 33f3dea923..9adda4cb2b 100644 --- a/src/spikeinterface/core/generate.py +++ b/src/spikeinterface/core/generate.py @@ -1212,7 +1212,6 @@ def get_traces( start = np.searchsorted(self.spike_vector["sample_index"], start_frame - self.templates.shape[1], side="left") end = np.searchsorted(self.spike_vector["sample_index"], end_frame + self.templates.shape[1], side="right") - for i in range(start, end): spike = self.spike_vector[i] t = spike["sample_index"] diff --git a/src/spikeinterface/sortingcomponents/motion_interpolation.py b/src/spikeinterface/sortingcomponents/motion_interpolation.py index 9a4cd688c5..b4a44105e4 100644 --- a/src/spikeinterface/sortingcomponents/motion_interpolation.py +++ b/src/spikeinterface/sortingcomponents/motion_interpolation.py @@ -157,7 +157,7 @@ def interpolate_motion_on_traces( i0 = np.searchsorted(bin_inds, bin_ind, side="left") i1 = np.searchsorted(bin_inds, bin_ind, side="right") - + # here we use a simple np.matmul even if dirft_kernel can be super sparse. # because the speed for a sparse matmul is not so good when we disable multi threaad (due multi processing # in ChunkRecordingExecutor) From 9d07ec2fb467e4bc035f2e36566ea9a2aead772e Mon Sep 17 00:00:00 2001 From: Pierre Yger Date: Tue, 19 Sep 2023 09:31:02 +0200 Subject: [PATCH 58/90] One more --- src/spikeinterface/core/generate.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/spikeinterface/core/generate.py b/src/spikeinterface/core/generate.py index 9adda4cb2b..401c498f03 100644 --- a/src/spikeinterface/core/generate.py +++ b/src/spikeinterface/core/generate.py @@ -1109,9 +1109,8 @@ def __init__( num_samples = [num_samples] for segment_index in range(sorting.get_num_segments()): - start, end = np.searchsorted( - self.spike_vector["segment_index"], [segment_index, segment_index + 1], side="left" - ) + start = np.searchsorted(self.spike_vector["segment_index"], segment_index, side="left") + end = np.searchsorted(self.spike_vector["segment_index"], segment_index, side="right") spikes = self.spike_vector[start:end] amplitude_vec = amplitude_vector[start:end] if amplitude_vector is not None else None upsample_vec = upsample_vector[start:end] if upsample_vector is not None else None From e88b4b5da0b1d848bd910122a385b3f5fb01dc2c Mon Sep 17 00:00:00 2001 From: Garcia Samuel Date: Tue, 19 Sep 2023 11:04:43 +0200 Subject: [PATCH 59/90] Update src/spikeinterface/preprocessing/depth_order.py --- src/spikeinterface/preprocessing/depth_order.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/spikeinterface/preprocessing/depth_order.py b/src/spikeinterface/preprocessing/depth_order.py index 43c43a5843..55e34ba5dd 100644 --- a/src/spikeinterface/preprocessing/depth_order.py +++ b/src/spikeinterface/preprocessing/depth_order.py @@ -18,7 +18,7 @@ class DepthOrderRecording(ChannelSliceRecording): If str, it needs to be 'x', 'y', 'z'. If tuple or list, it sorts the locations in two dimensions using lexsort. This approach is recommended since there is less ambiguity, by default ('x', 'y') - flip: bool, default False + flip: bool, default: False If flip is False then the order is bottom first (starting from tip of the probe). If flip is True then the order is upper first. """ From b202c431a9f5d89bf7a5e92cf62acef64f040241 Mon Sep 17 00:00:00 2001 From: Garcia Samuel Date: Tue, 19 Sep 2023 11:05:23 +0200 Subject: [PATCH 60/90] Update src/spikeinterface/core/recording_tools.py --- src/spikeinterface/core/recording_tools.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/spikeinterface/core/recording_tools.py b/src/spikeinterface/core/recording_tools.py index 8236671a3b..ff9cd99389 100644 --- a/src/spikeinterface/core/recording_tools.py +++ b/src/spikeinterface/core/recording_tools.py @@ -316,7 +316,7 @@ def order_channels_by_depth(recording, channel_ids=None, dimensions=("x", "y"), If str, it needs to be 'x', 'y', 'z'. If tuple or list, it sorts the locations in two dimensions using lexsort. This approach is recommended since there is less ambiguity, by default ('x', 'y') - flip: bool, default False + flip: bool, default: False If flip is False then the order is bottom first (starting from tip of the probe). If flip is True then the order is upper first. From 73395fbd5a420be7d21e4017abcafb3d4a91d5ea Mon Sep 17 00:00:00 2001 From: Garcia Samuel Date: Tue, 19 Sep 2023 11:24:18 +0200 Subject: [PATCH 61/90] Update src/spikeinterface/preprocessing/detect_bad_channels.py --- src/spikeinterface/preprocessing/detect_bad_channels.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/spikeinterface/preprocessing/detect_bad_channels.py b/src/spikeinterface/preprocessing/detect_bad_channels.py index 3c712946eb..cc4e8601e2 100644 --- a/src/spikeinterface/preprocessing/detect_bad_channels.py +++ b/src/spikeinterface/preprocessing/detect_bad_channels.py @@ -17,7 +17,7 @@ def detect_bad_channels( n_neighbors=11, nyquist_threshold=0.8, direction="y", - chunk_duration_s=0.5, + chunk_duration_s=0.3, num_random_chunks=100, welch_window_ms=10.0, highpass_filter_cutoff=300, From 2f4d50a6651d4fc0ba568463df61a350d62ddd33 Mon Sep 17 00:00:00 2001 From: zm711 <92116279+zm711@users.noreply.github.com> Date: Tue, 19 Sep 2023 08:32:16 -0400 Subject: [PATCH 62/90] typo corrections, link corrections --- doc/development/development.rst | 6 +++--- doc/install_sorters.rst | 2 +- doc/modules/sorters.rst | 6 +++--- doc/modules/sortingcomponents.rst | 4 ++-- 4 files changed, 9 insertions(+), 9 deletions(-) diff --git a/doc/development/development.rst b/doc/development/development.rst index f1371639c3..4704b9b1e6 100644 --- a/doc/development/development.rst +++ b/doc/development/development.rst @@ -14,7 +14,7 @@ There are various ways to contribute to SpikeInterface as a user or developer. S * Writing unit tests to expand code coverage and use case scenarios. * Reporting bugs and issues. -We use a forking workflow _ to manage contributions. Here's a summary of the steps involved, with more details available in the provided link: +We use a forking workflow ``_ to manage contributions. Here's a summary of the steps involved, with more details available in the provided link: * Fork the SpikeInterface repository. * Create a new branch (e.g., :code:`git switch -c my-contribution`). @@ -22,7 +22,7 @@ We use a forking workflow _ . +While we appreciate all the contributions please be mindful of the cost of reviewing pull requests ``_ . How to run tests locally @@ -201,7 +201,7 @@ Implement a new extractor SpikeInterface already supports over 30 file formats, but the acquisition system you use might not be among the supported formats list (***ref***). Most of the extractord rely on the `NEO `_ package to read information from files. -Therefore, to implement a new extractor to handle the unsupported format, we recommend make a new `neo.rawio `_ class. +Therefore, to implement a new extractor to handle the unsupported format, we recommend make a new :code:``neo.rawio ` class. Once that is done, the new class can be easily wrapped into SpikeInterface as an extension of the :py:class:`~spikeinterface.extractors.neoextractors.neobaseextractors.NeoBaseRecordingExtractor` (for :py:class:`~spikeinterface.core.BaseRecording` objects) or diff --git a/doc/install_sorters.rst b/doc/install_sorters.rst index 3fda05848c..10a3185c5c 100644 --- a/doc/install_sorters.rst +++ b/doc/install_sorters.rst @@ -117,7 +117,7 @@ Kilosort2.5 git clone https://github.com/MouseLand/Kilosort # provide installation path by setting the KILOSORT2_5_PATH environment variable - # or using Kilosort2_5Sorter.set_kilosort2_path() + # or using Kilosort2_5Sorter.set_kilosort2_5_path() * See also for Matlab/CUDA: https://www.mathworks.com/help/parallel-computing/gpu-support-by-release.html diff --git a/doc/modules/sorters.rst b/doc/modules/sorters.rst index 34ab3d1151..1b27ed442c 100644 --- a/doc/modules/sorters.rst +++ b/doc/modules/sorters.rst @@ -239,7 +239,7 @@ There are three options: 1. **released PyPi version**: if you installed :code:`spikeinterface` with :code:`pip install spikeinterface`, the latest released version will be installed in the container. -2. **development :code:`main` version**: if you installed :code:`spikeinterface` from source from the cloned repo +2. **development** :code:`main` **version**: if you installed :code:`spikeinterface` from source from the cloned repo (with :code:`pip install .`) or with :code:`pip install git+https://github.com/SpikeInterface/spikeinterface.git`, the current development version from the :code:`main` branch will be installed in the container. @@ -458,7 +458,7 @@ Here is the list of external sorters accessible using the run_sorter wrapper: * **Kilosort** :code:`run_sorter('kilosort')` * **Kilosort2** :code:`run_sorter('kilosort2')` * **Kilosort2.5** :code:`run_sorter('kilosort2_5')` -* **Kilosort3** :code:`run_sorter('Kilosort3')` +* **Kilosort3** :code:`run_sorter('kilosort3')` * **PyKilosort** :code:`run_sorter('pykilosort')` * **Klusta** :code:`run_sorter('klusta')` * **Mountainsort4** :code:`run_sorter('mountainsort4')` @@ -474,7 +474,7 @@ Here is the list of external sorters accessible using the run_sorter wrapper: Here a list of internal sorter based on `spikeinterface.sortingcomponents`; they are totally experimental for now: -* **Spyking circus2** :code:`run_sorter('spykingcircus2')` +* **Spyking Circus2** :code:`run_sorter('spykingcircus2')` * **Tridesclous2** :code:`run_sorter('tridesclous2')` In 2023, we expect to add many more sorters to this list. diff --git a/doc/modules/sortingcomponents.rst b/doc/modules/sortingcomponents.rst index aa62ea5b33..422eaea890 100644 --- a/doc/modules/sortingcomponents.rst +++ b/doc/modules/sortingcomponents.rst @@ -223,7 +223,7 @@ Here is a short example that depends on the output of "Motion interpolation": **Notes**: * :code:`spatial_interpolation_method` "kriging" or "iwd" do not play a big role. - * :code:`border_mode` is a very important parameter. It controls how to deal with the border because motion causes units on the + * :code:`border_mode` is a very important parameter. It controls dealing with the border because motion causes units on the border to not be present throughout the entire recording. We highly recommend the :code:`border_mode='remove_channels'` because this removes channels on the border that will be impacted by drift. Of course the larger the motion is the more channels are removed. @@ -278,7 +278,7 @@ At the moment, there are five methods implemented: * 'naive': a very naive implemenation used as a reference for benchmarks * 'tridesclous': the algorithm for template matching implemented in Tridesclous * 'circus': the algorithm for template matching implemented in SpyKING-Circus - * 'circus-omp': a updated algorithm similar to SpyKING-Circus but with OMP (orthogonal macthing + * 'circus-omp': a updated algorithm similar to SpyKING-Circus but with OMP (orthogonal matching pursuit) * 'wobble' : an algorithm loosely based on YASS that scales template amplitudes and shifts them in time to match detected spikes From 46c4ada52b95a7deeed4babf5bb40a9e775047d4 Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Tue, 19 Sep 2023 14:45:53 +0200 Subject: [PATCH 63/90] Port plot_agreement_matrix to new widgets API --- .../widgets/_legacy_mpl_widgets/__init__.py | 2 +- .../_legacy_mpl_widgets/agreementmatrix.py | 91 ------------------- .../widgets/tests/test_widgets.py | 10 +- src/spikeinterface/widgets/widget_list.py | 3 + 4 files changed, 13 insertions(+), 93 deletions(-) delete mode 100644 src/spikeinterface/widgets/_legacy_mpl_widgets/agreementmatrix.py diff --git a/src/spikeinterface/widgets/_legacy_mpl_widgets/__init__.py b/src/spikeinterface/widgets/_legacy_mpl_widgets/__init__.py index c0dcd7ea6e..045b8acc8e 100644 --- a/src/spikeinterface/widgets/_legacy_mpl_widgets/__init__.py +++ b/src/spikeinterface/widgets/_legacy_mpl_widgets/__init__.py @@ -17,7 +17,7 @@ # comparison related from .confusionmatrix import plot_confusion_matrix, ConfusionMatrixWidget -from .agreementmatrix import plot_agreement_matrix, AgreementMatrixWidget + from .multicompgraph import ( plot_multicomp_graph, MultiCompGraphWidget, diff --git a/src/spikeinterface/widgets/_legacy_mpl_widgets/agreementmatrix.py b/src/spikeinterface/widgets/_legacy_mpl_widgets/agreementmatrix.py deleted file mode 100644 index 369746e99b..0000000000 --- a/src/spikeinterface/widgets/_legacy_mpl_widgets/agreementmatrix.py +++ /dev/null @@ -1,91 +0,0 @@ -import numpy as np - -from .basewidget import BaseWidget - - -class AgreementMatrixWidget(BaseWidget): - """ - Plots sorting comparison confusion matrix. - - Parameters - ---------- - sorting_comparison: GroundTruthComparison or SymmetricSortingComparison - The sorting comparison object. - Symetric or not. - ordered: bool - Order units with best agreement scores. - This enable to see agreement on a diagonal. - count_text: bool - If True counts are displayed as text - unit_ticks: bool - If True unit tick labels are displayed - figure: matplotlib figure - The figure to be used. If not given a figure is created - ax: matplotlib axis - The axis to be used. If not given an axis is created - """ - - def __init__(self, sorting_comparison, ordered=True, count_text=True, unit_ticks=True, figure=None, ax=None): - from matplotlib import pyplot as plt - - BaseWidget.__init__(self, figure, ax) - self._sc = sorting_comparison - self._ordered = ordered - self._count_text = count_text - self._unit_ticks = unit_ticks - self.name = "ConfusionMatrix" - - def plot(self): - self._do_plot() - - def _do_plot(self): - # a dataframe - if self._ordered: - scores = self._sc.get_ordered_agreement_scores() - else: - scores = self._sc.agreement_scores - - N1 = scores.shape[0] - N2 = scores.shape[1] - - unit_ids1 = scores.index.values - unit_ids2 = scores.columns.values - - # Using matshow here just because it sets the ticks up nicely. imshow is faster. - self.ax.matshow(scores.values, cmap="Greens") - - if self._count_text: - for i, u1 in enumerate(unit_ids1): - u2 = self._sc.best_match_12[u1] - if u2 != -1: - j = np.where(unit_ids2 == u2)[0][0] - - self.ax.text(j, i, "{:0.2f}".format(scores.at[u1, u2]), ha="center", va="center", color="white") - - # Major ticks - self.ax.set_xticks(np.arange(0, N2)) - self.ax.set_yticks(np.arange(0, N1)) - self.ax.xaxis.tick_bottom() - - # Labels for major ticks - if self._unit_ticks: - self.ax.set_yticklabels(scores.index, fontsize=12) - self.ax.set_xticklabels(scores.columns, fontsize=12) - - self.ax.set_xlabel(self._sc.name_list[1], fontsize=20) - self.ax.set_ylabel(self._sc.name_list[0], fontsize=20) - - self.ax.set_xlim(-0.5, N2 - 0.5) - self.ax.set_ylim( - N1 - 0.5, - -0.5, - ) - - -def plot_agreement_matrix(*args, **kwargs): - W = AgreementMatrixWidget(*args, **kwargs) - W.plot() - return W - - -plot_agreement_matrix.__doc__ = AgreementMatrixWidget.__doc__ diff --git a/src/spikeinterface/widgets/tests/test_widgets.py b/src/spikeinterface/widgets/tests/test_widgets.py index a5f75ebf50..2f11e5ee3c 100644 --- a/src/spikeinterface/widgets/tests/test_widgets.py +++ b/src/spikeinterface/widgets/tests/test_widgets.py @@ -324,6 +324,13 @@ def test_sorting_summary(self): sw.plot_sorting_summary(self.we, backend=backend, **self.backend_kwargs[backend]) sw.plot_sorting_summary(self.we_sparse, backend=backend, **self.backend_kwargs[backend]) + def test_plot_agreement_matrix(self): + possible_backends = list(sw.AgreementMatrixWidget.get_possible_backends()) + for backend in possible_backends: + if backend not in self.skip_backends: + sw.plot_agreement_matrix(self.gt_comp) + + if __name__ == "__main__": # unittest.main() @@ -344,7 +351,8 @@ def test_sorting_summary(self): # mytest.test_unit_locations() # mytest.test_quality_metrics() # mytest.test_template_metrics() - mytest.test_amplitudes() + # mytest.test_amplitudes() + mytest.test_plot_agreement_matrix() # plt.ion() plt.show() diff --git a/src/spikeinterface/widgets/widget_list.py b/src/spikeinterface/widgets/widget_list.py index 9c89b3981e..22b33e38aa 100644 --- a/src/spikeinterface/widgets/widget_list.py +++ b/src/spikeinterface/widgets/widget_list.py @@ -2,6 +2,7 @@ from .base import backend_kwargs_desc +from .agreement_matrix import AgreementMatrixWidget from .all_amplitudes_distributions import AllAmplitudesDistributionsWidget from .amplitudes import AmplitudesWidget from .autocorrelograms import AutoCorrelogramsWidget @@ -23,6 +24,7 @@ widget_list = [ + AgreementMatrixWidget, AllAmplitudesDistributionsWidget, AmplitudesWidget, AutoCorrelogramsWidget, @@ -76,6 +78,7 @@ # make function for all widgets +plot_agreement_matrix = AgreementMatrixWidget plot_all_amplitudes_distributions = AllAmplitudesDistributionsWidget plot_amplitudes = AmplitudesWidget plot_autocorrelograms = AutoCorrelogramsWidget From e49071e38394c039d70cbc083c8b5a2cbb785b1b Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Tue, 19 Sep 2023 14:53:01 +0200 Subject: [PATCH 64/90] Port plot_confusion_matrix to new API. --- .../widgets/_legacy_mpl_widgets/__init__.py | 3 - .../_legacy_mpl_widgets/confusionmatrix.py | 91 ------------------- .../widgets/tests/test_widgets.py | 9 +- src/spikeinterface/widgets/widget_list.py | 3 + 4 files changed, 11 insertions(+), 95 deletions(-) delete mode 100644 src/spikeinterface/widgets/_legacy_mpl_widgets/confusionmatrix.py diff --git a/src/spikeinterface/widgets/_legacy_mpl_widgets/__init__.py b/src/spikeinterface/widgets/_legacy_mpl_widgets/__init__.py index 045b8acc8e..6013512022 100644 --- a/src/spikeinterface/widgets/_legacy_mpl_widgets/__init__.py +++ b/src/spikeinterface/widgets/_legacy_mpl_widgets/__init__.py @@ -15,9 +15,6 @@ # units on probe from .unitprobemap import plot_unit_probe_map, UnitProbeMapWidget -# comparison related -from .confusionmatrix import plot_confusion_matrix, ConfusionMatrixWidget - from .multicompgraph import ( plot_multicomp_graph, MultiCompGraphWidget, diff --git a/src/spikeinterface/widgets/_legacy_mpl_widgets/confusionmatrix.py b/src/spikeinterface/widgets/_legacy_mpl_widgets/confusionmatrix.py deleted file mode 100644 index 942b613fbf..0000000000 --- a/src/spikeinterface/widgets/_legacy_mpl_widgets/confusionmatrix.py +++ /dev/null @@ -1,91 +0,0 @@ -import numpy as np - -from .basewidget import BaseWidget - - -class ConfusionMatrixWidget(BaseWidget): - """ - Plots sorting comparison confusion matrix. - - Parameters - ---------- - gt_comparison: GroundTruthComparison - The ground truth sorting comparison object - count_text: bool - If True counts are displayed as text - unit_ticks: bool - If True unit tick labels are displayed - figure: matplotlib figure - The figure to be used. If not given a figure is created - ax: matplotlib axis - The axis to be used. If not given an axis is created - - Returns - ------- - W: ConfusionMatrixWidget - The output widget - """ - - def __init__(self, gt_comparison, count_text=True, unit_ticks=True, figure=None, ax=None): - from matplotlib import pyplot as plt - - BaseWidget.__init__(self, figure, ax) - self._gtcomp = gt_comparison - self._count_text = count_text - self._unit_ticks = unit_ticks - self.name = "ConfusionMatrix" - - def plot(self): - self._do_plot() - - def _do_plot(self): - # a dataframe - confusion_matrix = self._gtcomp.get_confusion_matrix() - - N1 = confusion_matrix.shape[0] - 1 - N2 = confusion_matrix.shape[1] - 1 - - # Using matshow here just because it sets the ticks up nicely. imshow is faster. - self.ax.matshow(confusion_matrix.values, cmap="Greens") - - if self._count_text: - for (i, j), z in np.ndenumerate(confusion_matrix.values): - if z != 0: - if z > np.max(confusion_matrix.values) / 2.0: - self.ax.text(j, i, "{:d}".format(z), ha="center", va="center", color="white") - else: - self.ax.text(j, i, "{:d}".format(z), ha="center", va="center", color="black") - - self.ax.axhline(int(N1 - 1) + 0.5, color="black") - self.ax.axvline(int(N2 - 1) + 0.5, color="black") - - # Major ticks - self.ax.set_xticks(np.arange(0, N2 + 1)) - self.ax.set_yticks(np.arange(0, N1 + 1)) - self.ax.xaxis.tick_bottom() - - # Labels for major ticks - if self._unit_ticks: - self.ax.set_yticklabels(confusion_matrix.index, fontsize=12) - self.ax.set_xticklabels(confusion_matrix.columns, fontsize=12) - else: - self.ax.set_xticklabels(np.append([""] * N2, "FN"), fontsize=10) - self.ax.set_yticklabels(np.append([""] * N1, "FP"), fontsize=10) - - self.ax.set_xlabel(self._gtcomp.name_list[1], fontsize=20) - self.ax.set_ylabel(self._gtcomp.name_list[0], fontsize=20) - - self.ax.set_xlim(-0.5, N2 + 0.5) - self.ax.set_ylim( - N1 + 0.5, - -0.5, - ) - - -def plot_confusion_matrix(*args, **kwargs): - W = ConfusionMatrixWidget(*args, **kwargs) - W.plot() - return W - - -plot_confusion_matrix.__doc__ = ConfusionMatrixWidget.__doc__ diff --git a/src/spikeinterface/widgets/tests/test_widgets.py b/src/spikeinterface/widgets/tests/test_widgets.py index 2f11e5ee3c..0aa309f748 100644 --- a/src/spikeinterface/widgets/tests/test_widgets.py +++ b/src/spikeinterface/widgets/tests/test_widgets.py @@ -330,6 +330,12 @@ def test_plot_agreement_matrix(self): if backend not in self.skip_backends: sw.plot_agreement_matrix(self.gt_comp) + def test_plot_confusion_matrix(self): + possible_backends = list(sw.AgreementMatrixWidget.get_possible_backends()) + for backend in possible_backends: + if backend not in self.skip_backends: + sw.plot_confusion_matrix(self.gt_comp) + if __name__ == "__main__": @@ -352,7 +358,8 @@ def test_plot_agreement_matrix(self): # mytest.test_quality_metrics() # mytest.test_template_metrics() # mytest.test_amplitudes() - mytest.test_plot_agreement_matrix() + # mytest.test_plot_agreement_matrix() + mytest.test_plot_confusion_matrix() # plt.ion() plt.show() diff --git a/src/spikeinterface/widgets/widget_list.py b/src/spikeinterface/widgets/widget_list.py index 22b33e38aa..d02aa7de7a 100644 --- a/src/spikeinterface/widgets/widget_list.py +++ b/src/spikeinterface/widgets/widget_list.py @@ -6,6 +6,7 @@ from .all_amplitudes_distributions import AllAmplitudesDistributionsWidget from .amplitudes import AmplitudesWidget from .autocorrelograms import AutoCorrelogramsWidget +from .confusion_matrix import ConfusionMatrixWidget from .crosscorrelograms import CrossCorrelogramsWidget from .motion import MotionWidget from .quality_metrics import QualityMetricsWidget @@ -28,6 +29,7 @@ AllAmplitudesDistributionsWidget, AmplitudesWidget, AutoCorrelogramsWidget, + ConfusionMatrixWidget, CrossCorrelogramsWidget, MotionWidget, QualityMetricsWidget, @@ -82,6 +84,7 @@ plot_all_amplitudes_distributions = AllAmplitudesDistributionsWidget plot_amplitudes = AmplitudesWidget plot_autocorrelograms = AutoCorrelogramsWidget +plot_confusion_matrix = ConfusionMatrixWidget plot_crosscorrelograms = CrossCorrelogramsWidget plot_motion = MotionWidget plot_quality_metrics = QualityMetricsWidget From b90e35b9df6bb03bac2a7c3e76e36c79c3f68af3 Mon Sep 17 00:00:00 2001 From: Zach McKenzie <92116279+zm711@users.noreply.github.com> Date: Tue, 19 Sep 2023 08:56:48 -0400 Subject: [PATCH 65/90] Update doc/development/development.rst Co-authored-by: Alessio Buccino --- doc/development/development.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/development/development.rst b/doc/development/development.rst index 4704b9b1e6..7656da11ab 100644 --- a/doc/development/development.rst +++ b/doc/development/development.rst @@ -201,7 +201,7 @@ Implement a new extractor SpikeInterface already supports over 30 file formats, but the acquisition system you use might not be among the supported formats list (***ref***). Most of the extractord rely on the `NEO `_ package to read information from files. -Therefore, to implement a new extractor to handle the unsupported format, we recommend make a new :code:``neo.rawio ` class. +Therefore, to implement a new extractor to handle the unsupported format, we recommend make a new :code:`neo.rawio.BaseRawIO` class (see `example `_). Once that is done, the new class can be easily wrapped into SpikeInterface as an extension of the :py:class:`~spikeinterface.extractors.neoextractors.neobaseextractors.NeoBaseRecordingExtractor` (for :py:class:`~spikeinterface.core.BaseRecording` objects) or From b8023d0733e48b8bc96d50c763753a7da1b3a5d5 Mon Sep 17 00:00:00 2001 From: Alessio Buccino Date: Tue, 19 Sep 2023 16:16:40 +0200 Subject: [PATCH 66/90] Add read_binary and read_zarr functions to extractord and docs API --- doc/api.rst | 11 ++++++----- src/spikeinterface/extractors/extractorlist.py | 2 ++ 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/doc/api.rst b/doc/api.rst index 43f79386e6..122c88d01b 100644 --- a/doc/api.rst +++ b/doc/api.rst @@ -19,6 +19,8 @@ spikeinterface.core .. autofunction:: extract_waveforms .. autofunction:: load_waveforms .. autofunction:: compute_sparsity + .. autoclass:: ChannelSparsity + :members: .. autoclass:: BinaryRecordingExtractor .. autoclass:: ZarrRecordingExtractor .. autoclass:: BinaryFolderRecording @@ -48,10 +50,6 @@ spikeinterface.core .. autofunction:: get_template_extremum_channel .. autofunction:: get_template_extremum_channel_peak_shift .. autofunction:: get_template_extremum_amplitude - -.. - .. autofunction:: read_binary - .. autofunction:: read_zarr .. autofunction:: append_recordings .. autofunction:: concatenate_recordings .. autofunction:: split_recording @@ -59,6 +57,8 @@ spikeinterface.core .. autofunction:: append_sortings .. autofunction:: split_sorting .. autofunction:: select_segment_sorting + .. autofunction:: read_binary + .. autofunction:: read_zarr Low-level ~~~~~~~~~ @@ -67,7 +67,6 @@ Low-level :noindex: .. autoclass:: BaseWaveformExtractorExtension - .. autoclass:: ChannelSparsity .. autoclass:: ChunkRecordingExecutor spikeinterface.extractors @@ -83,6 +82,7 @@ NEO-based .. autofunction:: read_alphaomega_event .. autofunction:: read_axona .. autofunction:: read_biocam + .. autofunction:: read_binary .. autofunction:: read_blackrock .. autofunction:: read_ced .. autofunction:: read_intan @@ -104,6 +104,7 @@ NEO-based .. autofunction:: read_spikegadgets .. autofunction:: read_spikeglx .. autofunction:: read_tdt + .. autofunction:: read_zarr Non-NEO-based diff --git a/src/spikeinterface/extractors/extractorlist.py b/src/spikeinterface/extractors/extractorlist.py index ebff40fae0..235dd705dc 100644 --- a/src/spikeinterface/extractors/extractorlist.py +++ b/src/spikeinterface/extractors/extractorlist.py @@ -11,6 +11,8 @@ NumpySorting, NpySnippetsExtractor, ZarrRecordingExtractor, + read_binary, + read_zarr, ) # sorting/recording/event from neo From 0b2ac19982024f61cdcb4dc886e54ea813b962b6 Mon Sep 17 00:00:00 2001 From: Alessio Buccino Date: Tue, 19 Sep 2023 17:04:43 +0200 Subject: [PATCH 67/90] Fix Kilosort Phy reader docstrings --- .../extractors/phykilosortextractors.py | 22 ++++++++++++++----- 1 file changed, 16 insertions(+), 6 deletions(-) diff --git a/src/spikeinterface/extractors/phykilosortextractors.py b/src/spikeinterface/extractors/phykilosortextractors.py index c91aed644d..2769e03344 100644 --- a/src/spikeinterface/extractors/phykilosortextractors.py +++ b/src/spikeinterface/extractors/phykilosortextractors.py @@ -17,6 +17,10 @@ class BasePhyKilosortSortingExtractor(BaseSorting): Cluster groups to exclude (e.g. "noise" or ["noise", "mua"]). keep_good_only : bool, default: True Whether to only keep good units. + remove_empty_units : bool, default: True + If True, empty units are removed from the sorting extractor. + load_all_cluster_properties : bool, default: True + If True, all cluster properties are loaded from the tsv/csv files. """ extractor_name = "BasePhyKilosortSorting" @@ -197,18 +201,26 @@ class PhySortingExtractor(BasePhyKilosortSortingExtractor): Path to the output Phy folder (containing the params.py). exclude_cluster_groups: list or str, optional Cluster groups to exclude (e.g. "noise" or ["noise", "mua"]). + load_all_cluster_properties : bool, default: True + If True, all cluster properties are loaded from the tsv/csv files. Returns ------- extractor : PhySortingExtractor - The loaded data. + The loaded Sorting object. """ extractor_name = "PhySorting" name = "phy" - def __init__(self, folder_path, exclude_cluster_groups=None): - BasePhyKilosortSortingExtractor.__init__(self, folder_path, exclude_cluster_groups, keep_good_only=False) + def __init__(self, folder_path, exclude_cluster_groups=None, load_all_cluster_properties=True): + BasePhyKilosortSortingExtractor.__init__( + self, + folder_path, + exclude_cluster_groups, + keep_good_only=False, + load_all_cluster_properties=load_all_cluster_properties, + ) self._kwargs = { "folder_path": str(Path(folder_path).absolute()), @@ -223,8 +235,6 @@ class KiloSortSortingExtractor(BasePhyKilosortSortingExtractor): ---------- folder_path: str or Path Path to the output Phy folder (containing the params.py). - exclude_cluster_groups: list or str, optional - Cluster groups to exclude (e.g. "noise" or ["noise", "mua"]). keep_good_only : bool, default: True Whether to only keep good units. If True, only Kilosort-labeled 'good' units are returned. @@ -234,7 +244,7 @@ class KiloSortSortingExtractor(BasePhyKilosortSortingExtractor): Returns ------- extractor : KiloSortSortingExtractor - The loaded data. + The loaded Sorting object. """ extractor_name = "KiloSortSorting" From 3d792951a6036849b5d82ea523bb6cc20e784a07 Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Tue, 19 Sep 2023 17:09:32 +0200 Subject: [PATCH 68/90] port plot_probe_map() to new widgets API --- .../widgets/_legacy_mpl_widgets/__init__.py | 1 - .../widgets/_legacy_mpl_widgets/probemap.py | 77 ------------------- .../widgets/tests/test_widgets.py | 8 +- src/spikeinterface/widgets/widget_list.py | 3 + 4 files changed, 10 insertions(+), 79 deletions(-) delete mode 100644 src/spikeinterface/widgets/_legacy_mpl_widgets/probemap.py diff --git a/src/spikeinterface/widgets/_legacy_mpl_widgets/__init__.py b/src/spikeinterface/widgets/_legacy_mpl_widgets/__init__.py index 6013512022..af1419fb11 100644 --- a/src/spikeinterface/widgets/_legacy_mpl_widgets/__init__.py +++ b/src/spikeinterface/widgets/_legacy_mpl_widgets/__init__.py @@ -1,7 +1,6 @@ # basics # from .timeseries import plot_timeseries, TracesWidget from .rasters import plot_rasters, RasterWidget -from .probemap import plot_probe_map, ProbeMapWidget # isi/ccg/acg from .isidistribution import plot_isi_distribution, ISIDistributionWidget diff --git a/src/spikeinterface/widgets/_legacy_mpl_widgets/probemap.py b/src/spikeinterface/widgets/_legacy_mpl_widgets/probemap.py deleted file mode 100644 index 6e6578a4c4..0000000000 --- a/src/spikeinterface/widgets/_legacy_mpl_widgets/probemap.py +++ /dev/null @@ -1,77 +0,0 @@ -import numpy as np - -from .basewidget import BaseWidget - - -class ProbeMapWidget(BaseWidget): - """ - Plot the probe of a recording. - - Parameters - ---------- - recording: RecordingExtractor - The recording extractor object - channel_ids: list - The channel ids to display - with_channel_ids: bool False default - Add channel ids text on the probe - figure: matplotlib figure - The figure to be used. If not given a figure is created - ax: matplotlib axis - The axis to be used. If not given an axis is created - **plot_probe_kwargs: keyword arguments for probeinterface.plotting.plot_probe_group() function - - Returns - ------- - W: ProbeMapWidget - The output widget - """ - - def __init__(self, recording, channel_ids=None, with_channel_ids=False, figure=None, ax=None, **plot_probe_kwargs): - import matplotlib.pylab as plt - from probeinterface.plotting import plot_probe, get_auto_lims - - BaseWidget.__init__(self, figure, ax) - - if channel_ids is not None: - recording = recording.channel_slice(channel_ids) - self._recording = recording - self._probegroup = recording.get_probegroup() - self.with_channel_ids = with_channel_ids - self._plot_probe_kwargs = plot_probe_kwargs - - def plot(self): - self._do_plot() - - def _do_plot(self): - from probeinterface.plotting import get_auto_lims - - xlims, ylims, zlims = get_auto_lims(self._probegroup.probes[0]) - for i, probe in enumerate(self._probegroup.probes): - xlims2, ylims2, _ = get_auto_lims(probe) - xlims = min(xlims[0], xlims2[0]), max(xlims[1], xlims2[1]) - ylims = min(ylims[0], ylims2[0]), max(ylims[1], ylims2[1]) - - self._plot_probe_kwargs["title"] = False - pos = 0 - text_on_contact = None - for i, probe in enumerate(self._probegroup.probes): - n = probe.get_contact_count() - if self.with_channel_ids: - text_on_contact = self._recording.channel_ids[pos : pos + n] - pos += n - from probeinterface.plotting import plot_probe - - plot_probe(probe, ax=self.ax, text_on_contact=text_on_contact, **self._plot_probe_kwargs) - - self.ax.set_xlim(*xlims) - self.ax.set_ylim(*ylims) - - -def plot_probe_map(*args, **kwargs): - W = ProbeMapWidget(*args, **kwargs) - W.plot() - return W - - -plot_probe_map.__doc__ = ProbeMapWidget.__doc__ diff --git a/src/spikeinterface/widgets/tests/test_widgets.py b/src/spikeinterface/widgets/tests/test_widgets.py index 0aa309f748..bc0ec68041 100644 --- a/src/spikeinterface/widgets/tests/test_widgets.py +++ b/src/spikeinterface/widgets/tests/test_widgets.py @@ -336,6 +336,11 @@ def test_plot_confusion_matrix(self): if backend not in self.skip_backends: sw.plot_confusion_matrix(self.gt_comp) + def test_plot_probe_map(self): + possible_backends = list(sw.ProbeMapWidget.get_possible_backends()) + for backend in possible_backends: + if backend not in self.skip_backends: + sw.plot_probe_map(self.recording, with_channel_ids=True, with_contact_id=True) if __name__ == "__main__": @@ -359,7 +364,8 @@ def test_plot_confusion_matrix(self): # mytest.test_template_metrics() # mytest.test_amplitudes() # mytest.test_plot_agreement_matrix() - mytest.test_plot_confusion_matrix() + # mytest.test_plot_confusion_matrix() + mytest.test_plot_probe_map() # plt.ion() plt.show() diff --git a/src/spikeinterface/widgets/widget_list.py b/src/spikeinterface/widgets/widget_list.py index d02aa7de7a..77db17029f 100644 --- a/src/spikeinterface/widgets/widget_list.py +++ b/src/spikeinterface/widgets/widget_list.py @@ -9,6 +9,7 @@ from .confusion_matrix import ConfusionMatrixWidget from .crosscorrelograms import CrossCorrelogramsWidget from .motion import MotionWidget +from .probe_map import ProbeMapWidget from .quality_metrics import QualityMetricsWidget from .sorting_summary import SortingSummaryWidget from .spike_locations import SpikeLocationsWidget @@ -32,6 +33,7 @@ ConfusionMatrixWidget, CrossCorrelogramsWidget, MotionWidget, + ProbeMapWidget, QualityMetricsWidget, SortingSummaryWidget, SpikeLocationsWidget, @@ -87,6 +89,7 @@ plot_confusion_matrix = ConfusionMatrixWidget plot_crosscorrelograms = CrossCorrelogramsWidget plot_motion = MotionWidget +plot_probe_map = ProbeMapWidget plot_quality_metrics = QualityMetricsWidget plot_sorting_summary = SortingSummaryWidget plot_spike_locations = SpikeLocationsWidget From 2d4f7692196388a0d9a27808c3c4f8002090247f Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Tue, 19 Sep 2023 17:40:37 +0200 Subject: [PATCH 69/90] For connoisseur only: add a simple "ephyviewer" backend plot_traces(). --- src/spikeinterface/widgets/base.py | 2 ++ .../widgets/tests/test_widgets.py | 4 +-- src/spikeinterface/widgets/traces.py | 26 +++++++++++++++++++ 3 files changed, 30 insertions(+), 2 deletions(-) diff --git a/src/spikeinterface/widgets/base.py b/src/spikeinterface/widgets/base.py index dea46b8f51..4ed83fcca9 100644 --- a/src/spikeinterface/widgets/base.py +++ b/src/spikeinterface/widgets/base.py @@ -39,12 +39,14 @@ def set_default_plotter_backend(backend): "height_cm": "Height of the figure in cm (default 6)", "display": "If True, widgets are immediately displayed", }, + "ephyviewer": {}, } default_backend_kwargs = { "matplotlib": {"figure": None, "ax": None, "axes": None, "ncols": 5, "figsize": None, "figtitle": None}, "sortingview": {"generate_url": True, "display": True, "figlabel": None, "height": None}, "ipywidgets": {"width_cm": 25, "height_cm": 10, "display": True}, + "ephyviewer": {}, } diff --git a/src/spikeinterface/widgets/tests/test_widgets.py b/src/spikeinterface/widgets/tests/test_widgets.py index a5f75ebf50..7386167d0b 100644 --- a/src/spikeinterface/widgets/tests/test_widgets.py +++ b/src/spikeinterface/widgets/tests/test_widgets.py @@ -72,7 +72,7 @@ def setUpClass(cls): else: cls.we_sparse = cls.we.save(folder=cache_folder / "mearec_test_sparse", sparsity=cls.sparsity_radius) - cls.skip_backends = ["ipywidgets"] + cls.skip_backends = ["ipywidgets", "ephyviewer"] if ON_GITHUB and not KACHERY_CLOUD_SET: cls.skip_backends.append("sortingview") @@ -344,7 +344,7 @@ def test_sorting_summary(self): # mytest.test_unit_locations() # mytest.test_quality_metrics() # mytest.test_template_metrics() - mytest.test_amplitudes() + # mytest.test_amplitudes() # plt.ion() plt.show() diff --git a/src/spikeinterface/widgets/traces.py b/src/spikeinterface/widgets/traces.py index e025f779c1..e046623eb7 100644 --- a/src/spikeinterface/widgets/traces.py +++ b/src/spikeinterface/widgets/traces.py @@ -523,6 +523,32 @@ def plot_sortingview(self, data_plot, **backend_kwargs): backend_kwargs["display"] = False self.url = handle_display_and_url(self, self.view, **backend_kwargs) + + def plot_ephyviewer(self, data_plot, **backend_kwargs): + import ephyviewer + from ..preprocessing import depth_order + + dp = to_attr(data_plot) + + app = ephyviewer.mkQApp() + win = ephyviewer.MainViewer(debug=False, show_auto_scale=True) + + for k, rec in dp.recordings.items(): + + if dp.order_channel_by_depth: + rec = depth_order(rec, flip=True) + + sig_source = ephyviewer.SpikeInterfaceRecordingSource(recording=rec) + view = ephyviewer.TraceViewer(source=sig_source, name=k) + view.params['scale_mode'] = 'by_channel' + if dp.show_channel_ids: + view.params['display_labels'] = True + view.auto_scale() + win.add_view(view) + + win.show() + app.exec() + def _get_trace_list(recordings, channel_ids, time_range, segment_index, order=None, return_scaled=False): From 5c0bdbb546fd121db38cc9c5123360f7534eb94a Mon Sep 17 00:00:00 2001 From: Garcia Samuel Date: Tue, 19 Sep 2023 17:59:48 +0200 Subject: [PATCH 70/90] Suggestions from Alessio Co-authored-by: Alessio Buccino --- src/spikeinterface/sorters/launcher.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/spikeinterface/sorters/launcher.py b/src/spikeinterface/sorters/launcher.py index d04a89fdf1..d6506cade5 100644 --- a/src/spikeinterface/sorters/launcher.py +++ b/src/spikeinterface/sorters/launcher.py @@ -51,9 +51,9 @@ def run_sorter_jobs(job_list, engine="loop", engine_kwargs={}, return_output=Fal Where *blocking* means that this function is blocking until the results are returned. This is in opposition to *asynchronous*, where the function returns `None` almost immediately (aka non-blocking), - but the results must be retrieved by hand when jobs are finished. No mechanisim is provided here to be aware + but the results must be retrieved by hand when jobs are finished. No mechanisim is provided here to be know when jobs are finish. - In this *asynchronous* case, the :py:func:read_sorter_folder() helps to retrieve individual results. + In this *asynchronous* case, the :py:func:`~spikeinterface.sorters.read_sorter_folder()` helps to retrieve individual results. Parameters @@ -82,7 +82,7 @@ def run_sorter_jobs(job_list, engine="loop", engine_kwargs={}, return_output=Fal engine_kwargs = engine_kwargs_ if return_output: - assert engine in ("loop", "joblib", "processpoolexecutor") + assert engine in ("loop", "joblib", "processpoolexecutor"), "Only 'loop', 'joblib', and 'processpoolexecutor' support return_output=True." out = [] else: out = None @@ -355,7 +355,7 @@ def run_sorters( """ warnings.warn( - "run_sorters()is deprecated please use run_sorter_jobs() instead. This will be removed in 0.100", + "run_sorters() is deprecated please use run_sorter_jobs() instead. This will be removed in 0.100", DeprecationWarning, stacklevel=2, ) From 0ecf83b46dacf5426b7f55157f0d48497eb52245 Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Tue, 19 Sep 2023 18:02:51 +0200 Subject: [PATCH 71/90] add read_sorter_folder in api.rst --- doc/api.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/api.rst b/doc/api.rst index b605127426..8b269fc685 100644 --- a/doc/api.rst +++ b/doc/api.rst @@ -219,6 +219,7 @@ spikeinterface.sorters .. autofunction:: run_sorter_jobs .. autofunction:: run_sorters .. autofunction:: run_sorter_by_property + .. autofunction:: read_sorter_folder Low level ~~~~~~~~~ From 60e8989d3207f9ad213d96484c767a53b7e535a2 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 19 Sep 2023 16:05:28 +0000 Subject: [PATCH 72/90] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- src/spikeinterface/sorters/launcher.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/spikeinterface/sorters/launcher.py b/src/spikeinterface/sorters/launcher.py index d6506cade5..f32a468a22 100644 --- a/src/spikeinterface/sorters/launcher.py +++ b/src/spikeinterface/sorters/launcher.py @@ -82,7 +82,11 @@ def run_sorter_jobs(job_list, engine="loop", engine_kwargs={}, return_output=Fal engine_kwargs = engine_kwargs_ if return_output: - assert engine in ("loop", "joblib", "processpoolexecutor"), "Only 'loop', 'joblib', and 'processpoolexecutor' support return_output=True." + assert engine in ( + "loop", + "joblib", + "processpoolexecutor", + ), "Only 'loop', 'joblib', and 'processpoolexecutor' support return_output=True." out = [] else: out = None From 45012894a558a59903e7b87f235d5f85f7637711 Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Tue, 19 Sep 2023 18:41:27 +0200 Subject: [PATCH 73/90] Port plot_raster() to new API. --- .../widgets/_legacy_mpl_widgets/__init__.py | 4 - .../widgets/_legacy_mpl_widgets/rasters.py | 120 --------- .../tests/test_widgets_legacy.py | 48 +--- .../_legacy_mpl_widgets/timeseries_.py | 233 ------------------ .../widgets/tests/test_widgets.py | 10 +- src/spikeinterface/widgets/widget_list.py | 3 + 6 files changed, 13 insertions(+), 405 deletions(-) delete mode 100644 src/spikeinterface/widgets/_legacy_mpl_widgets/rasters.py delete mode 100644 src/spikeinterface/widgets/_legacy_mpl_widgets/timeseries_.py diff --git a/src/spikeinterface/widgets/_legacy_mpl_widgets/__init__.py b/src/spikeinterface/widgets/_legacy_mpl_widgets/__init__.py index af1419fb11..9593f14d1c 100644 --- a/src/spikeinterface/widgets/_legacy_mpl_widgets/__init__.py +++ b/src/spikeinterface/widgets/_legacy_mpl_widgets/__init__.py @@ -1,7 +1,3 @@ -# basics -# from .timeseries import plot_timeseries, TracesWidget -from .rasters import plot_rasters, RasterWidget - # isi/ccg/acg from .isidistribution import plot_isi_distribution, ISIDistributionWidget diff --git a/src/spikeinterface/widgets/_legacy_mpl_widgets/rasters.py b/src/spikeinterface/widgets/_legacy_mpl_widgets/rasters.py deleted file mode 100644 index d05373103e..0000000000 --- a/src/spikeinterface/widgets/_legacy_mpl_widgets/rasters.py +++ /dev/null @@ -1,120 +0,0 @@ -import numpy as np - -from .basewidget import BaseWidget - - -class RasterWidget(BaseWidget): - """ - Plots spike train rasters. - - Parameters - ---------- - sorting: SortingExtractor - The sorting extractor object - segment_index: None or int - The segment index. - unit_ids: list - List of unit ids - time_range: list - List with start time and end time - color: matplotlib color - The color to be used - figure: matplotlib figure - The figure to be used. If not given a figure is created - ax: matplotlib axis - The axis to be used. If not given an axis is created - - Returns - ------- - W: RasterWidget - The output widget - """ - - def __init__(self, sorting, segment_index=None, unit_ids=None, time_range=None, color="k", figure=None, ax=None): - from matplotlib import pyplot as plt - - BaseWidget.__init__(self, figure, ax) - self._sorting = sorting - - if segment_index is None: - nseg = sorting.get_num_segments() - if nseg != 1: - raise ValueError("You must provide segment_index=...") - else: - segment_index = 0 - self.segment_index = segment_index - - self._unit_ids = unit_ids - self._figure = None - self._sampling_frequency = sorting.get_sampling_frequency() - self._color = color - self._max_frame = 0 - for unit_id in self._sorting.get_unit_ids(): - spike_train = self._sorting.get_unit_spike_train(unit_id, segment_index=self.segment_index) - if len(spike_train) > 0: - curr_max_frame = np.max(spike_train) - if curr_max_frame > self._max_frame: - self._max_frame = curr_max_frame - self._visible_trange = time_range - if self._visible_trange is None: - self._visible_trange = [0, self._max_frame] - else: - assert len(time_range) == 2, "'time_range' should be a list with start and end time in seconds" - self._visible_trange = [int(t * self._sampling_frequency) for t in time_range] - - self._visible_trange = self._fix_trange(self._visible_trange) - self.name = "Raster" - - def plot(self): - self._do_plot() - - def _do_plot(self): - units_ids = self._unit_ids - if units_ids is None: - units_ids = self._sorting.get_unit_ids() - import matplotlib.pyplot as plt - - with plt.rc_context({"axes.edgecolor": "gray"}): - for u_i, unit_id in enumerate(units_ids): - spiketrain = self._sorting.get_unit_spike_train( - unit_id, - start_frame=self._visible_trange[0], - end_frame=self._visible_trange[1], - segment_index=self.segment_index, - ) - spiketimes = spiketrain / float(self._sampling_frequency) - self.ax.plot( - spiketimes, - u_i * np.ones_like(spiketimes), - marker="|", - mew=1, - markersize=3, - ls="", - color=self._color, - ) - visible_start_frame = self._visible_trange[0] / self._sampling_frequency - visible_end_frame = self._visible_trange[1] / self._sampling_frequency - self.ax.set_yticks(np.arange(len(units_ids))) - self.ax.set_yticklabels(units_ids) - self.ax.set_xlim(visible_start_frame, visible_end_frame) - self.ax.set_xlabel("time (s)") - - def _fix_trange(self, trange): - if trange[1] > self._max_frame: - # trange[0] += max_t - trange[1] - trange[1] = self._max_frame - if trange[0] < 0: - # trange[1] += -trange[0] - trange[0] = 0 - # trange[0] = np.maximum(0, trange[0]) - # trange[1] = np.minimum(max_t, trange[1]) - return trange - - -def plot_rasters(*args, **kwargs): - W = RasterWidget(*args, **kwargs) - W.plot() - return W - - -plot_rasters.__doc__ = RasterWidget.__doc__ diff --git a/src/spikeinterface/widgets/_legacy_mpl_widgets/tests/test_widgets_legacy.py b/src/spikeinterface/widgets/_legacy_mpl_widgets/tests/test_widgets_legacy.py index 5004765251..defe10f0d4 100644 --- a/src/spikeinterface/widgets/_legacy_mpl_widgets/tests/test_widgets_legacy.py +++ b/src/spikeinterface/widgets/_legacy_mpl_widgets/tests/test_widgets_legacy.py @@ -43,43 +43,7 @@ def setUp(self): def tearDown(self): pass - # def test_timeseries(self): - # sw.plot_timeseries(self._rec, mode='auto') - # sw.plot_timeseries(self._rec, mode='line', show_channel_ids=True) - # sw.plot_timeseries(self._rec, mode='map', show_channel_ids=True) - # sw.plot_timeseries(self._rec, mode='map', show_channel_ids=True, order_channel_by_depth=True) - - def test_rasters(self): - sw.plot_rasters(self._sorting) - - def test_plot_probe_map(self): - sw.plot_probe_map(self._rec) - sw.plot_probe_map(self._rec, with_channel_ids=True) - - # TODO - # def test_spectrum(self): - # sw.plot_spectrum(self._rec) - - # TODO - # def test_spectrogram(self): - # sw.plot_spectrogram(self._rec, channel=0) - - # def test_unitwaveforms(self): - # w = sw.plot_unit_waveforms(self._we) - # unit_ids = self._sorting.unit_ids[:6] - # sw.plot_unit_waveforms(self._we, max_channels=5, unit_ids=unit_ids) - # sw.plot_unit_waveforms(self._we, radius_um=60, unit_ids=unit_ids) - - # def test_plot_unit_waveform_density_map(self): - # unit_ids = self._sorting.unit_ids[:3] - # sw.plot_unit_waveform_density_map(self._we, unit_ids=unit_ids, max_channels=4) - # sw.plot_unit_waveform_density_map(self._we, unit_ids=unit_ids, radius_um=50) - # - # sw.plot_unit_waveform_density_map(self._we, unit_ids=unit_ids, radius_um=25, same_axis=True) - # sw.plot_unit_waveform_density_map(self._we, unit_ids=unit_ids, max_channels=2, same_axis=True) - - # def test_unittemplates(self): - # sw.plot_unit_templates(self._we) + def test_plot_unit_probe_map(self): sw.plot_unit_probe_map(self._we, with_channel_ids=True) @@ -120,12 +84,6 @@ def test_plot_peak_activity_map(self): sw.plot_peak_activity_map(self._rec, with_channel_ids=True) sw.plot_peak_activity_map(self._rec, bin_duration_s=1.0) - def test_confusion(self): - sw.plot_confusion_matrix(self._gt_comp, count_text=True) - - def test_agreement(self): - sw.plot_agreement_matrix(self._gt_comp, count_text=True) - def test_multicomp_graph(self): msc = sc.compare_multiple_sorters([self._sorting, self._sorting, self._sorting]) sw.plot_multicomp_graph(msc, edge_cmap="viridis", node_cmap="rainbow", draw_labels=False) @@ -150,8 +108,6 @@ def test_sorting_performance(self): mytest.setUp() # ~ mytest.test_timeseries() - # ~ mytest.test_rasters() - mytest.test_plot_probe_map() # ~ mytest.test_unitwaveforms() # ~ mytest.test_plot_unit_waveform_density_map() # mytest.test_unittemplates() @@ -169,8 +125,6 @@ def test_sorting_performance(self): # ~ mytest.test_plot_drift_over_time() # ~ mytest.test_plot_peak_activity_map() - # mytest.test_confusion() - # mytest.test_agreement() # ~ mytest.test_multicomp_graph() #  mytest.test_sorting_performance() diff --git a/src/spikeinterface/widgets/_legacy_mpl_widgets/timeseries_.py b/src/spikeinterface/widgets/_legacy_mpl_widgets/timeseries_.py deleted file mode 100644 index ab6fa2ace5..0000000000 --- a/src/spikeinterface/widgets/_legacy_mpl_widgets/timeseries_.py +++ /dev/null @@ -1,233 +0,0 @@ -import numpy as np -from matplotlib import pyplot as plt -from matplotlib.ticker import MaxNLocator -from .basewidget import BaseWidget - -import scipy.spatial - - -class TracesWidget(BaseWidget): - """ - Plots recording timeseries. - - Parameters - ---------- - recording: RecordingExtractor - The recording extractor object - segment_index: None or int - The segment index. - channel_ids: list - The channel ids to display. - order_channel_by_depth: boolean - Reorder channel by depth. - time_range: list - List with start time and end time - mode: 'line' or 'map' or 'auto' - 2 possible mode: - * 'line' : classical for low channel count - * 'map' : for high channel count use color heat map - * 'auto' : auto switch depending the channel count <32ch - cmap: str default 'RdBu' - matplotlib colormap used in mode 'map' - show_channel_ids: bool - Set yticks with channel ids - color_groups: bool - If True groups are plotted with different colors - color: matplotlib color, default: None - The color used to draw the traces. - clim: None or tupple - When mode='map' this control color lims - with_colorbar: bool default True - When mode='map' add colorbar - figure: matplotlib figure - The figure to be used. If not given a figure is created - ax: matplotlib axis - The axis to be used. If not given an axis is created - - Returns - ------- - W: TracesWidget - The output widget - """ - - def __init__( - self, - recording, - segment_index=None, - channel_ids=None, - order_channel_by_depth=False, - time_range=None, - mode="auto", - cmap="RdBu", - show_channel_ids=False, - color_groups=False, - color=None, - clim=None, - with_colorbar=True, - figure=None, - ax=None, - **plot_kwargs, - ): - BaseWidget.__init__(self, figure, ax) - self.recording = recording - self._sampling_frequency = recording.get_sampling_frequency() - self.visible_channel_ids = channel_ids - self._plot_kwargs = plot_kwargs - - if segment_index is None: - nseg = recording.get_num_segments() - if nseg != 1: - raise ValueError("You must provide segment_index=...") - segment_index = 0 - self.segment_index = segment_index - - if self.visible_channel_ids is None: - self.visible_channel_ids = recording.get_channel_ids() - - if order_channel_by_depth: - locations = self.recording.get_channel_locations() - channel_inds = self.recording.ids_to_indices(self.visible_channel_ids) - locations = locations[channel_inds, :] - origin = np.array([np.max(locations[:, 0]), np.min(locations[:, 1])])[None, :] - dist = scipy.spatial.distance.cdist(locations, origin, metric="euclidean") - dist = dist[:, 0] - self.order = np.argsort(dist) - else: - self.order = None - - if channel_ids is None: - channel_ids = recording.get_channel_ids() - - fs = recording.get_sampling_frequency() - if time_range is None: - time_range = (0, 1.0) - time_range = np.array(time_range) - - assert mode in ("auto", "line", "map"), "Mode must be in auto/line/map" - if mode == "auto": - if len(channel_ids) <= 64: - mode = "line" - else: - mode = "map" - self.mode = mode - self.cmap = cmap - - self.show_channel_ids = show_channel_ids - - self._frame_range = (time_range * fs).astype("int64") - a_max = self.recording.get_num_frames(segment_index=self.segment_index) - self._frame_range = np.clip(self._frame_range, 0, a_max) - self._time_range = [e / fs for e in self._frame_range] - - self.clim = clim - self.with_colorbar = with_colorbar - - self._initialize_stats() - - # self._vspacing = self._mean_channel_std * 20 - self._vspacing = self._max_channel_amp * 1.5 - - if recording.get_channel_groups() is None: - color_groups = False - - self._color_groups = color_groups - self._color = color - if color_groups: - self._colors = [] - self._group_color_map = {} - all_groups = recording.get_channel_groups() - groups = np.unique(all_groups) - N = len(groups) - import colorsys - - HSV_tuples = [(x * 1.0 / N, 0.5, 0.5) for x in range(N)] - self._colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), HSV_tuples)) - color_idx = 0 - for group in groups: - self._group_color_map[group] = color_idx - color_idx += 1 - self.name = "TimeSeries" - - def plot(self): - self._do_plot() - - def _do_plot(self): - chunk0 = self.recording.get_traces( - segment_index=self.segment_index, - channel_ids=self.visible_channel_ids, - start_frame=self._frame_range[0], - end_frame=self._frame_range[1], - ) - if self.order is not None: - chunk0 = chunk0[:, self.order] - self.visible_channel_ids = np.array(self.visible_channel_ids)[self.order] - - ax = self.ax - - n = len(self.visible_channel_ids) - - if self.mode == "line": - ax.set_xlim( - self._frame_range[0] / self._sampling_frequency, self._frame_range[1] / self._sampling_frequency - ) - ax.set_ylim(-self._vspacing, self._vspacing * n) - ax.get_xaxis().set_major_locator(MaxNLocator(prune="both")) - ax.get_yaxis().set_ticks([]) - ax.set_xlabel("time (s)") - - self._plots = {} - self._plot_offsets = {} - offset0 = self._vspacing * (n - 1) - times = np.arange(self._frame_range[0], self._frame_range[1]) / self._sampling_frequency - for im, m in enumerate(self.visible_channel_ids): - self._plot_offsets[m] = offset0 - if self._color_groups: - group = self.recording.get_channel_groups(channel_ids=[m])[0] - group_color_idx = self._group_color_map[group] - color = self._colors[group_color_idx] - else: - color = self._color - self._plots[m] = ax.plot(times, self._plot_offsets[m] + chunk0[:, im], color=color, **self._plot_kwargs) - offset0 = offset0 - self._vspacing - - if self.show_channel_ids: - ax.set_yticks(np.arange(n) * self._vspacing) - ax.set_yticklabels([str(chan_id) for chan_id in self.visible_channel_ids[::-1]]) - - elif self.mode == "map": - extent = (self._time_range[0], self._time_range[1], 0, self.recording.get_num_channels()) - im = ax.imshow( - chunk0.T, interpolation="nearest", origin="upper", aspect="auto", extent=extent, cmap=self.cmap - ) - - if self.clim is None: - im.set_clim(-self._max_channel_amp, self._max_channel_amp) - else: - im.set_clim(*self.clim) - - if self.with_colorbar: - self.figure.colorbar(im, ax=ax) - - if self.show_channel_ids: - ax.set_yticks(np.arange(n) + 0.5) - ax.set_yticklabels([str(chan_id) for chan_id in self.visible_channel_ids[::-1]]) - - def _initialize_stats(self): - chunk0 = self.recording.get_traces( - segment_index=self.segment_index, - channel_ids=self.visible_channel_ids, - start_frame=self._frame_range[0], - end_frame=self._frame_range[1], - ) - - self._mean_channel_std = np.mean(np.std(chunk0, axis=0)) - self._max_channel_amp = np.max(np.max(np.abs(chunk0), axis=0)) - - -def plot_timeseries(*args, **kwargs): - W = TracesWidget(*args, **kwargs) - W.plot() - return W - - -plot_timeseries.__doc__ = TracesWidget.__doc__ diff --git a/src/spikeinterface/widgets/tests/test_widgets.py b/src/spikeinterface/widgets/tests/test_widgets.py index bc0ec68041..509194cb93 100644 --- a/src/spikeinterface/widgets/tests/test_widgets.py +++ b/src/spikeinterface/widgets/tests/test_widgets.py @@ -342,6 +342,13 @@ def test_plot_probe_map(self): if backend not in self.skip_backends: sw.plot_probe_map(self.recording, with_channel_ids=True, with_contact_id=True) + def test_plot_rasters(self): + possible_backends = list(sw.RasterWidget.get_possible_backends()) + for backend in possible_backends: + if backend not in self.skip_backends: + sw.plot_rasters(self.sorting) + + if __name__ == "__main__": # unittest.main() @@ -365,7 +372,8 @@ def test_plot_probe_map(self): # mytest.test_amplitudes() # mytest.test_plot_agreement_matrix() # mytest.test_plot_confusion_matrix() - mytest.test_plot_probe_map() + # mytest.test_plot_probe_map() + mytest.test_plot_rasters() # plt.ion() plt.show() diff --git a/src/spikeinterface/widgets/widget_list.py b/src/spikeinterface/widgets/widget_list.py index 77db17029f..6ea2593432 100644 --- a/src/spikeinterface/widgets/widget_list.py +++ b/src/spikeinterface/widgets/widget_list.py @@ -11,6 +11,7 @@ from .motion import MotionWidget from .probe_map import ProbeMapWidget from .quality_metrics import QualityMetricsWidget +from .rasters import RasterWidget from .sorting_summary import SortingSummaryWidget from .spike_locations import SpikeLocationsWidget from .spikes_on_traces import SpikesOnTracesWidget @@ -35,6 +36,7 @@ MotionWidget, ProbeMapWidget, QualityMetricsWidget, + RasterWidget, SortingSummaryWidget, SpikeLocationsWidget, SpikesOnTracesWidget, @@ -91,6 +93,7 @@ plot_motion = MotionWidget plot_probe_map = ProbeMapWidget plot_quality_metrics = QualityMetricsWidget +plot_rasters = RasterWidget plot_sorting_summary = SortingSummaryWidget plot_spike_locations = SpikeLocationsWidget plot_spikes_on_traces = SpikesOnTracesWidget From 625ff5e35219d397215413bebdb4f64dac8f0707 Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Tue, 19 Sep 2023 18:44:28 +0200 Subject: [PATCH 74/90] Oups. --- .../widgets/agreement_matrix.py | 91 ++++++++++++++++++ .../widgets/confusion_matrix.py | 83 ++++++++++++++++ src/spikeinterface/widgets/probe_map.py | 78 +++++++++++++++ src/spikeinterface/widgets/rasters.py | 95 +++++++++++++++++++ 4 files changed, 347 insertions(+) create mode 100644 src/spikeinterface/widgets/agreement_matrix.py create mode 100644 src/spikeinterface/widgets/confusion_matrix.py create mode 100644 src/spikeinterface/widgets/probe_map.py create mode 100644 src/spikeinterface/widgets/rasters.py diff --git a/src/spikeinterface/widgets/agreement_matrix.py b/src/spikeinterface/widgets/agreement_matrix.py new file mode 100644 index 0000000000..55f38f078b --- /dev/null +++ b/src/spikeinterface/widgets/agreement_matrix.py @@ -0,0 +1,91 @@ +import numpy as np +from warnings import warn + +from .base import BaseWidget, to_attr +from .utils import get_unit_colors + + + +class AgreementMatrixWidget(BaseWidget): + """ + Plot unit depths + + Parameters + ---------- + sorting_comparison: GroundTruthComparison or SymmetricSortingComparison + The sorting comparison object. + Symetric or not. + ordered: bool + Order units with best agreement scores. + This enable to see agreement on a diagonal. + count_text: bool + If True counts are displayed as text + unit_ticks: bool + If True unit tick labels are displayed + + """ + + def __init__( + self, sorting_comparison, ordered=True, count_text=True, unit_ticks=True, + backend=None, **backend_kwargs + ): + plot_data = dict( + sorting_comparison=sorting_comparison, + ordered=ordered, + count_text=count_text, + unit_ticks=unit_ticks, + ) + BaseWidget.__init__(self, plot_data, backend=backend, **backend_kwargs) + + def plot_matplotlib(self, data_plot, **backend_kwargs): + import matplotlib.pyplot as plt + from .utils_matplotlib import make_mpl_figure + + dp = to_attr(data_plot) + + self.figure, self.axes, self.ax = make_mpl_figure(**backend_kwargs) + + comp = dp.sorting_comparison + + if dp.ordered: + scores = comp.get_ordered_agreement_scores() + else: + scores = comp.agreement_scores + + N1 = scores.shape[0] + N2 = scores.shape[1] + + unit_ids1 = scores.index.values + unit_ids2 = scores.columns.values + + # Using matshow here just because it sets the ticks up nicely. imshow is faster. + self.ax.matshow(scores.values, cmap="Greens") + + if dp.count_text: + for i, u1 in enumerate(unit_ids1): + u2 = comp.best_match_12[u1] + if u2 != -1: + j = np.where(unit_ids2 == u2)[0][0] + + self.ax.text(j, i, "{:0.2f}".format(scores.at[u1, u2]), ha="center", va="center", color="white") + + # Major ticks + self.ax.set_xticks(np.arange(0, N2)) + self.ax.set_yticks(np.arange(0, N1)) + self.ax.xaxis.tick_bottom() + + # Labels for major ticks + if dp.unit_ticks: + self.ax.set_yticklabels(scores.index, fontsize=12) + self.ax.set_xticklabels(scores.columns, fontsize=12) + + self.ax.set_xlabel(comp.name_list[1], fontsize=20) + self.ax.set_ylabel(comp.name_list[0], fontsize=20) + + self.ax.set_xlim(-0.5, N2 - 0.5) + self.ax.set_ylim( + N1 - 0.5, + -0.5, + ) + + diff --git a/src/spikeinterface/widgets/confusion_matrix.py b/src/spikeinterface/widgets/confusion_matrix.py new file mode 100644 index 0000000000..da021092db --- /dev/null +++ b/src/spikeinterface/widgets/confusion_matrix.py @@ -0,0 +1,83 @@ +import numpy as np +from warnings import warn + +from .base import BaseWidget, to_attr +from .utils import get_unit_colors + + + +class ConfusionMatrixWidget(BaseWidget): + """ + Plot unit depths + + Parameters + ---------- + gt_comparison: GroundTruthComparison + The ground truth sorting comparison object + count_text: bool + If True counts are displayed as text + unit_ticks: bool + If True unit tick labels are displayed + + """ + + def __init__( + self, gt_comparison, count_text=True, unit_ticks=True, + backend=None, **backend_kwargs + ): + plot_data = dict( + gt_comparison=gt_comparison, + count_text=count_text, + unit_ticks=unit_ticks, + ) + BaseWidget.__init__(self, plot_data, backend=backend, **backend_kwargs) + + def plot_matplotlib(self, data_plot, **backend_kwargs): + import matplotlib.pyplot as plt + from .utils_matplotlib import make_mpl_figure + + dp = to_attr(data_plot) + + self.figure, self.axes, self.ax = make_mpl_figure(**backend_kwargs) + + comp = dp.gt_comparison + + confusion_matrix = comp.get_confusion_matrix() + N1 = confusion_matrix.shape[0] - 1 + N2 = confusion_matrix.shape[1] - 1 + + # Using matshow here just because it sets the ticks up nicely. imshow is faster. + self.ax.matshow(confusion_matrix.values, cmap="Greens") + + if dp.count_text: + for (i, j), z in np.ndenumerate(confusion_matrix.values): + if z != 0: + if z > np.max(confusion_matrix.values) / 2.0: + self.ax.text(j, i, "{:d}".format(z), ha="center", va="center", color="white") + else: + self.ax.text(j, i, "{:d}".format(z), ha="center", va="center", color="black") + + self.ax.axhline(int(N1 - 1) + 0.5, color="black") + self.ax.axvline(int(N2 - 1) + 0.5, color="black") + + # Major ticks + self.ax.set_xticks(np.arange(0, N2 + 1)) + self.ax.set_yticks(np.arange(0, N1 + 1)) + self.ax.xaxis.tick_bottom() + + # Labels for major ticks + if dp.unit_ticks: + self.ax.set_yticklabels(confusion_matrix.index, fontsize=12) + self.ax.set_xticklabels(confusion_matrix.columns, fontsize=12) + else: + self.ax.set_xticklabels(np.append([""] * N2, "FN"), fontsize=10) + self.ax.set_yticklabels(np.append([""] * N1, "FP"), fontsize=10) + + self.ax.set_xlabel(comp.name_list[1], fontsize=20) + self.ax.set_ylabel(comp.name_list[0], fontsize=20) + + self.ax.set_xlim(-0.5, N2 + 0.5) + self.ax.set_ylim( + N1 + 0.5, + -0.5, + ) \ No newline at end of file diff --git a/src/spikeinterface/widgets/probe_map.py b/src/spikeinterface/widgets/probe_map.py new file mode 100644 index 0000000000..193711a34f --- /dev/null +++ b/src/spikeinterface/widgets/probe_map.py @@ -0,0 +1,78 @@ +import numpy as np +from warnings import warn + +from .base import BaseWidget, to_attr, default_backend_kwargs +from .utils import get_unit_colors + + + +class ProbeMapWidget(BaseWidget): + """ + Plot the probe of a recording. + + Parameters + ---------- + recording: RecordingExtractor + The recording extractor object + channel_ids: list + The channel ids to display + with_channel_ids: bool False default + Add channel ids text on the probe + **plot_probe_kwargs: keyword arguments for probeinterface.plotting.plot_probe_group() function + + """ + + def __init__( + self, recording, channel_ids=None, with_channel_ids=False, + backend=None, **backend_or_plot_probe_kwargs + ): + + # split backend_or_plot_probe_kwargs + backend_kwargs = dict() + plot_probe_kwargs = dict() + backend = self.check_backend(backend) + for k, v in backend_or_plot_probe_kwargs.items(): + if k in default_backend_kwargs[backend]: + backend_kwargs[k] = v + else: + plot_probe_kwargs[k] = v + + plot_data = dict( + recording=recording, + channel_ids=channel_ids, + with_channel_ids=with_channel_ids, + plot_probe_kwargs=plot_probe_kwargs, + ) + BaseWidget.__init__(self, plot_data, backend=backend, **backend_kwargs) + + def plot_matplotlib(self, data_plot, **backend_kwargs): + import matplotlib.pyplot as plt + from .utils_matplotlib import make_mpl_figure + from probeinterface.plotting import get_auto_lims, plot_probe + + dp = to_attr(data_plot) + + plot_probe_kwargs = dp.plot_probe_kwargs + + self.figure, self.axes, self.ax = make_mpl_figure(**backend_kwargs) + + probegroup = dp.recording.get_probegroup() + + xlims, ylims, zlims = get_auto_lims(probegroup.probes[0]) + for i, probe in enumerate(probegroup.probes): + xlims2, ylims2, _ = get_auto_lims(probe) + xlims = min(xlims[0], xlims2[0]), max(xlims[1], xlims2[1]) + ylims = min(ylims[0], ylims2[0]), max(ylims[1], ylims2[1]) + + plot_probe_kwargs["title"] = False + pos = 0 + text_on_contact = None + for i, probe in enumerate(probegroup.probes): + n = probe.get_contact_count() + if dp.with_channel_ids: + text_on_contact = dp.recording.channel_ids[pos : pos + n] + pos += n + plot_probe(probe, ax=self.ax, text_on_contact=text_on_contact, **plot_probe_kwargs) + + self.ax.set_xlim(*xlims) + self.ax.set_ylim(*ylims) diff --git a/src/spikeinterface/widgets/rasters.py b/src/spikeinterface/widgets/rasters.py new file mode 100644 index 0000000000..de855ebe45 --- /dev/null +++ b/src/spikeinterface/widgets/rasters.py @@ -0,0 +1,95 @@ +import numpy as np +from warnings import warn + +from .base import BaseWidget, to_attr, default_backend_kwargs + + + +class RasterWidget(BaseWidget): + """ + Plots spike train rasters. + + Parameters + ---------- + sorting: SortingExtractor + The sorting extractor object + segment_index: None or int + The segment index. + unit_ids: list + List of unit ids + time_range: list + List with start time and end time + color: matplotlib color + The color to be used + """ + + def __init__( + self, sorting, segment_index=None, unit_ids=None, time_range=None, color="k", + backend=None, **backend_kwargs + ): + + + if segment_index is None: + if sorting.get_num_segments() != 1: + raise ValueError("You must provide segment_index=...") + segment_index = 0 + + if time_range is None: + frame_range = [0, sorting.to_spike_vector()[-1]["sample_index"]] + time_range = [f / sorting.sampling_frequency for f in frame_range] + else: + assert len(time_range) == 2, "'time_range' should be a list with start and end time in seconds" + frame_range = [int(t * sorting.sampling_frequency) for t in time_range] + + plot_data = dict( + sorting=sorting, + segment_index=segment_index, + unit_ids=unit_ids, + color=color, + frame_range=frame_range, + time_range=time_range, + ) + BaseWidget.__init__(self, plot_data, backend=backend, **backend_kwargs) + + def plot_matplotlib(self, data_plot, **backend_kwargs): + import matplotlib.pyplot as plt + from .utils_matplotlib import make_mpl_figure + + dp = to_attr(data_plot) + sorting = dp.sorting + + self.figure, self.axes, self.ax = make_mpl_figure(**backend_kwargs) + + units_ids = dp.unit_ids + if units_ids is None: + units_ids = sorting.unit_ids + + with plt.rc_context({"axes.edgecolor": "gray"}): + for unit_index, unit_id in enumerate(units_ids): + spiketrain = sorting.get_unit_spike_train( + unit_id, + start_frame=dp.frame_range[0], + end_frame=dp.frame_range[1], + segment_index=dp.segment_index, + ) + spiketimes = spiketrain / float(sorting.sampling_frequency) + self.ax.plot( + spiketimes, + unit_index * np.ones_like(spiketimes), + marker="|", + mew=1, + markersize=3, + ls="", + color=dp.color, + ) + self.ax.set_yticks(np.arange(len(units_ids))) + self.ax.set_yticklabels(units_ids) + self.ax.set_xlim(*dp.time_range) + self.ax.set_xlabel("time (s)") + + + + + + + From 4b9149c663521c72b3a3a7915a18d920ddf51884 Mon Sep 17 00:00:00 2001 From: munahaf Date: Wed, 20 Sep 2023 06:55:04 +0000 Subject: [PATCH 75/90] Comment: Updated a test expression to remove two logical short circuits. --- src/spikeinterface/preprocessing/remove_artifacts.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/spikeinterface/preprocessing/remove_artifacts.py b/src/spikeinterface/preprocessing/remove_artifacts.py index 3148539165..0e1940a45f 100644 --- a/src/spikeinterface/preprocessing/remove_artifacts.py +++ b/src/spikeinterface/preprocessing/remove_artifacts.py @@ -165,7 +165,7 @@ def __init__( for l in np.unique(labels): assert l in artifacts.keys(), f"Artefacts are provided but label {l} has no value!" else: - assert "ms_before" != None and "ms_after" != None, f"ms_before/after should not be None for mode {mode}" + assert "ms_before" is not None and "ms_after" is not None, f"ms_before/after should not be None for mode {mode}" sorting = NumpySorting.from_times_labels(list_triggers, list_labels, recording.get_sampling_frequency()) sorting = sorting.save() waveforms_kwargs.update({"ms_before": ms_before, "ms_after": ms_after}) From c362aac3837027c26e284e6670c03bcab8865fb8 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Wed, 20 Sep 2023 06:58:41 +0000 Subject: [PATCH 76/90] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- src/spikeinterface/preprocessing/remove_artifacts.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/spikeinterface/preprocessing/remove_artifacts.py b/src/spikeinterface/preprocessing/remove_artifacts.py index 0e1940a45f..61f2f2eca1 100644 --- a/src/spikeinterface/preprocessing/remove_artifacts.py +++ b/src/spikeinterface/preprocessing/remove_artifacts.py @@ -165,7 +165,9 @@ def __init__( for l in np.unique(labels): assert l in artifacts.keys(), f"Artefacts are provided but label {l} has no value!" else: - assert "ms_before" is not None and "ms_after" is not None, f"ms_before/after should not be None for mode {mode}" + assert ( + "ms_before" is not None and "ms_after" is not None + ), f"ms_before/after should not be None for mode {mode}" sorting = NumpySorting.from_times_labels(list_triggers, list_labels, recording.get_sampling_frequency()) sorting = sorting.save() waveforms_kwargs.update({"ms_before": ms_before, "ms_after": ms_after}) From 2d1a33ad752480bef7b3d39bcc0619a8d8d0c127 Mon Sep 17 00:00:00 2001 From: Munawar Date: Wed, 20 Sep 2023 00:46:17 -0700 Subject: [PATCH 77/90] Update remove_artifacts.py to change string literals (probably used mistakenly) to actual variables. --- src/spikeinterface/preprocessing/remove_artifacts.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/spikeinterface/preprocessing/remove_artifacts.py b/src/spikeinterface/preprocessing/remove_artifacts.py index 61f2f2eca1..7e84822c61 100644 --- a/src/spikeinterface/preprocessing/remove_artifacts.py +++ b/src/spikeinterface/preprocessing/remove_artifacts.py @@ -166,7 +166,7 @@ def __init__( assert l in artifacts.keys(), f"Artefacts are provided but label {l} has no value!" else: assert ( - "ms_before" is not None and "ms_after" is not None + ms_before is not None and ms_after is not None ), f"ms_before/after should not be None for mode {mode}" sorting = NumpySorting.from_times_labels(list_triggers, list_labels, recording.get_sampling_frequency()) sorting = sorting.save() From 468396a8832038c0779feba8f72e0794fdea8ab0 Mon Sep 17 00:00:00 2001 From: Heberto Mayorquin Date: Wed, 20 Sep 2023 16:04:56 +0200 Subject: [PATCH 78/90] Add methods to sparsify and densify waveforms to `ChannelSparsity` (#1985) * add tests for densification and sparsification in ChannelSparsity * passing tests * fix docstrings * fix docstring * added checks * better assertion message * typo * base the implementation in unit_id instead of unit_index * better variable name * alessio suggestions * improve docstring --- src/spikeinterface/core/sparsity.py | 107 ++++++++++++++++-- .../core/tests/test_sparsity.py | 88 ++++++++++++++ 2 files changed, 184 insertions(+), 11 deletions(-) diff --git a/src/spikeinterface/core/sparsity.py b/src/spikeinterface/core/sparsity.py index 4c3680b021..455edcfc80 100644 --- a/src/spikeinterface/core/sparsity.py +++ b/src/spikeinterface/core/sparsity.py @@ -33,7 +33,9 @@ class ChannelSparsity: """ - Handle channel sparsity for a set of units. + Handle channel sparsity for a set of units. That is, for every unit, + it indicates which channels are used to represent the waveform and the rest + of the non-represented channels are assumed to be zero. Internally, sparsity is stored as a boolean mask. @@ -92,13 +94,17 @@ def __init__(self, mask, unit_ids, channel_ids): assert self.mask.shape[0] == self.unit_ids.shape[0] assert self.mask.shape[1] == self.channel_ids.shape[0] - # some precomputed dict + # Those are computed at first call self._unit_id_to_channel_ids = None self._unit_id_to_channel_indices = None + self.num_channels = self.channel_ids.size + self.num_units = self.unit_ids.size + self.max_num_active_channels = self.mask.sum(axis=1).max() + def __repr__(self): - ratio = np.mean(self.mask) - txt = f"ChannelSparsity - units: {self.unit_ids.size} - channels: {self.channel_ids.size} - ratio: {ratio:0.2f}" + density = np.mean(self.mask) + txt = f"ChannelSparsity - units: {self.num_units} - channels: {self.num_channels} - density, P(x=1): {density:0.2f}" return txt @property @@ -119,6 +125,85 @@ def unit_id_to_channel_indices(self): self._unit_id_to_channel_indices[unit_id] = channel_inds return self._unit_id_to_channel_indices + def sparsify_waveforms(self, waveforms: np.ndarray, unit_id: str) -> np.ndarray: + """ + Sparsify the waveforms according to a unit_id corresponding sparsity. + + + Given a unit_id, this method selects only the active channels for + that unit and removes the rest. + + Parameters + ---------- + waveforms : np.array + Dense waveforms with shape (num_waveforms, num_samples, num_channels) or a + single dense waveform (template) with shape (num_samples, num_channels). + unit_id : str + The unit_id for which to sparsify the waveform. + + Returns + ------- + sparsified_waveforms : np.array + Sparse waveforms with shape (num_waveforms, num_samples, num_active_channels) + or a single sparsified waveform (template) with shape (num_samples, num_active_channels). + """ + + assert_msg = ( + "Waveforms must be dense to sparsify them. " + f"Their last dimension {waveforms.shape[-1]} must be equal to the number of channels {self.num_channels}" + ) + assert self.are_waveforms_dense(waveforms=waveforms), assert_msg + + non_zero_indices = self.unit_id_to_channel_indices[unit_id] + sparsified_waveforms = waveforms[..., non_zero_indices] + + return sparsified_waveforms + + def densify_waveforms(self, waveforms: np.ndarray, unit_id: str) -> np.ndarray: + """ + Densify sparse waveforms that were sparisified according to a unit's channel sparsity. + + Given a unit_id its sparsified waveform, this method places the waveform back + into its original form within a dense array. + + Parameters + ---------- + waveforms : np.array + The sparsified waveforms array of shape (num_waveforms, num_samples, num_active_channels) or a single + sparse waveform (template) with shape (num_samples, num_active_channels). + unit_id : str + The unit_id that was used to sparsify the waveform. + + Returns + ------- + densified_waveforms : np.array + The densified waveforms array of shape (num_waveforms, num_samples, num_channels) or a single dense + waveform (template) with shape (num_samples, num_channels). + + """ + + non_zero_indices = self.unit_id_to_channel_indices[unit_id] + + assert_msg = ( + "Waveforms do not seem to be be in the sparsity shape of this unit_id. The number of active channels is " + f"{len(non_zero_indices)} but the waveform has {waveforms.shape[-1]} active channels." + ) + assert self.are_waveforms_sparse(waveforms=waveforms, unit_id=unit_id), assert_msg + + densified_shape = waveforms.shape[:-1] + (self.num_channels,) + densified_waveforms = np.zeros(densified_shape, dtype=waveforms.dtype) + densified_waveforms[..., non_zero_indices] = waveforms + + return densified_waveforms + + def are_waveforms_dense(self, waveforms: np.ndarray) -> bool: + return waveforms.shape[-1] == self.num_channels + + def are_waveforms_sparse(self, waveforms: np.ndarray, unit_id: str) -> bool: + non_zero_indices = self.unit_id_to_channel_indices[unit_id] + num_active_channels = len(non_zero_indices) + return waveforms.shape[-1] == num_active_channels + @classmethod def from_unit_id_to_channel_ids(cls, unit_id_to_channel_ids, unit_ids, channel_ids): """ @@ -144,16 +229,16 @@ def to_dict(self): ) @classmethod - def from_dict(cls, d): + def from_dict(cls, dictionary: dict): unit_id_to_channel_ids_corrected = {} - for unit_id in d["unit_ids"]: - if unit_id in d["unit_id_to_channel_ids"]: - unit_id_to_channel_ids_corrected[unit_id] = d["unit_id_to_channel_ids"][unit_id] + for unit_id in dictionary["unit_ids"]: + if unit_id in dictionary["unit_id_to_channel_ids"]: + unit_id_to_channel_ids_corrected[unit_id] = dictionary["unit_id_to_channel_ids"][unit_id] else: - unit_id_to_channel_ids_corrected[unit_id] = d["unit_id_to_channel_ids"][str(unit_id)] - d["unit_id_to_channel_ids"] = unit_id_to_channel_ids_corrected + unit_id_to_channel_ids_corrected[unit_id] = dictionary["unit_id_to_channel_ids"][str(unit_id)] + dictionary["unit_id_to_channel_ids"] = unit_id_to_channel_ids_corrected - return cls.from_unit_id_to_channel_ids(**d) + return cls.from_unit_id_to_channel_ids(**dictionary) ## Some convinient function to compute sparsity from several strategy @classmethod diff --git a/src/spikeinterface/core/tests/test_sparsity.py b/src/spikeinterface/core/tests/test_sparsity.py index 75182bf532..ac114ac161 100644 --- a/src/spikeinterface/core/tests/test_sparsity.py +++ b/src/spikeinterface/core/tests/test_sparsity.py @@ -55,5 +55,93 @@ def test_ChannelSparsity(): assert np.array_equal(sparsity.mask, sparsity4.mask) +def test_sparsify_waveforms(): + seed = 0 + rng = np.random.default_rng(seed=seed) + + num_units = 3 + num_samples = 5 + num_channels = 4 + + is_mask_valid = False + while not is_mask_valid: + sparsity_mask = rng.integers(0, 1, size=(num_units, num_channels), endpoint=True, dtype="bool") + is_mask_valid = np.all(sparsity_mask.sum(axis=1) > 0) + + unit_ids = np.arange(num_units) + channel_ids = np.arange(num_channels) + sparsity = ChannelSparsity(mask=sparsity_mask, unit_ids=unit_ids, channel_ids=channel_ids) + + for unit_id in unit_ids: + waveforms_dense = rng.random(size=(num_units, num_samples, num_channels)) + + # Test are_waveforms_dense + assert sparsity.are_waveforms_dense(waveforms_dense) + + # Test sparsify + waveforms_sparse = sparsity.sparsify_waveforms(waveforms_dense, unit_id=unit_id) + non_zero_indices = sparsity.unit_id_to_channel_indices[unit_id] + num_active_channels = len(non_zero_indices) + assert waveforms_sparse.shape == (num_units, num_samples, num_active_channels) + + # Test round-trip (note that this is loosy) + unit_id = unit_ids[unit_id] + non_zero_indices = sparsity.unit_id_to_channel_indices[unit_id] + waveforms_dense2 = sparsity.densify_waveforms(waveforms_sparse, unit_id=unit_id) + assert np.array_equal(waveforms_dense[..., non_zero_indices], waveforms_dense2[..., non_zero_indices]) + + # Test sparsify with one waveform (template) + template_dense = waveforms_dense.mean(axis=0) + template_sparse = sparsity.sparsify_waveforms(template_dense, unit_id=unit_id) + assert template_sparse.shape == (num_samples, num_active_channels) + + # Test round trip with template + template_dense2 = sparsity.densify_waveforms(template_sparse, unit_id=unit_id) + assert np.array_equal(template_dense[..., non_zero_indices], template_dense2[:, non_zero_indices]) + + +def test_densify_waveforms(): + seed = 0 + rng = np.random.default_rng(seed=seed) + + num_units = 3 + num_samples = 5 + num_channels = 4 + + is_mask_valid = False + while not is_mask_valid: + sparsity_mask = rng.integers(0, 1, size=(num_units, num_channels), endpoint=True, dtype="bool") + is_mask_valid = np.all(sparsity_mask.sum(axis=1) > 0) + + unit_ids = np.arange(num_units) + channel_ids = np.arange(num_channels) + sparsity = ChannelSparsity(mask=sparsity_mask, unit_ids=unit_ids, channel_ids=channel_ids) + + for unit_id in unit_ids: + non_zero_indices = sparsity.unit_id_to_channel_indices[unit_id] + num_active_channels = len(non_zero_indices) + waveforms_sparse = rng.random(size=(num_units, num_samples, num_active_channels)) + + # Test are waveforms sparse + assert sparsity.are_waveforms_sparse(waveforms_sparse, unit_id=unit_id) + + # Test densify + waveforms_dense = sparsity.densify_waveforms(waveforms_sparse, unit_id=unit_id) + assert waveforms_dense.shape == (num_units, num_samples, num_channels) + + # Test round-trip + waveforms_sparse2 = sparsity.sparsify_waveforms(waveforms_dense, unit_id=unit_id) + assert np.array_equal(waveforms_sparse, waveforms_sparse2) + + # Test densify with one waveform (template) + template_sparse = waveforms_sparse.mean(axis=0) + template_dense = sparsity.densify_waveforms(template_sparse, unit_id=unit_id) + assert template_dense.shape == (num_samples, num_channels) + + # Test round trip with template + template_sparse2 = sparsity.sparsify_waveforms(template_dense, unit_id=unit_id) + assert np.array_equal(template_sparse, template_sparse2) + + if __name__ == "__main__": test_ChannelSparsity() From 84051d1515a444a3174a4642029ed02aa69d755e Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Thu, 21 Sep 2023 10:41:27 +0200 Subject: [PATCH 79/90] oups --- src/spikeinterface/widgets/agreement_matrix.py | 2 +- src/spikeinterface/widgets/confusion_matrix.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/spikeinterface/widgets/agreement_matrix.py b/src/spikeinterface/widgets/agreement_matrix.py index 55f38f078b..22617f6be0 100644 --- a/src/spikeinterface/widgets/agreement_matrix.py +++ b/src/spikeinterface/widgets/agreement_matrix.py @@ -8,7 +8,7 @@ class AgreementMatrixWidget(BaseWidget): """ - Plot unit depths + Plots sorting comparison agreement matrix. Parameters ---------- diff --git a/src/spikeinterface/widgets/confusion_matrix.py b/src/spikeinterface/widgets/confusion_matrix.py index da021092db..b76283b421 100644 --- a/src/spikeinterface/widgets/confusion_matrix.py +++ b/src/spikeinterface/widgets/confusion_matrix.py @@ -8,7 +8,7 @@ class ConfusionMatrixWidget(BaseWidget): """ - Plot unit depths + Plots sorting comparison confusion matrix. Parameters ---------- From 85c7755f3a3c4a93117ecb7fb842309e00e22915 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Thu, 21 Sep 2023 08:43:30 +0000 Subject: [PATCH 80/90] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- .../tests/test_widgets_legacy.py | 2 -- src/spikeinterface/widgets/agreement_matrix.py | 8 ++------ src/spikeinterface/widgets/confusion_matrix.py | 10 +++------- src/spikeinterface/widgets/probe_map.py | 5 +---- src/spikeinterface/widgets/rasters.py | 15 ++------------- src/spikeinterface/widgets/tests/test_widgets.py | 3 +-- 6 files changed, 9 insertions(+), 34 deletions(-) diff --git a/src/spikeinterface/widgets/_legacy_mpl_widgets/tests/test_widgets_legacy.py b/src/spikeinterface/widgets/_legacy_mpl_widgets/tests/test_widgets_legacy.py index defe10f0d4..39eb80e2e5 100644 --- a/src/spikeinterface/widgets/_legacy_mpl_widgets/tests/test_widgets_legacy.py +++ b/src/spikeinterface/widgets/_legacy_mpl_widgets/tests/test_widgets_legacy.py @@ -43,8 +43,6 @@ def setUp(self): def tearDown(self): pass - - def test_plot_unit_probe_map(self): sw.plot_unit_probe_map(self._we, with_channel_ids=True) sw.plot_unit_probe_map(self._we, animated=True) diff --git a/src/spikeinterface/widgets/agreement_matrix.py b/src/spikeinterface/widgets/agreement_matrix.py index 22617f6be0..ec6ea1c87c 100644 --- a/src/spikeinterface/widgets/agreement_matrix.py +++ b/src/spikeinterface/widgets/agreement_matrix.py @@ -5,7 +5,6 @@ from .utils import get_unit_colors - class AgreementMatrixWidget(BaseWidget): """ Plots sorting comparison agreement matrix. @@ -22,12 +21,11 @@ class AgreementMatrixWidget(BaseWidget): If True counts are displayed as text unit_ticks: bool If True unit tick labels are displayed - + """ def __init__( - self, sorting_comparison, ordered=True, count_text=True, unit_ticks=True, - backend=None, **backend_kwargs + self, sorting_comparison, ordered=True, count_text=True, unit_ticks=True, backend=None, **backend_kwargs ): plot_data = dict( sorting_comparison=sorting_comparison, @@ -87,5 +85,3 @@ def plot_matplotlib(self, data_plot, **backend_kwargs): N1 - 0.5, -0.5, ) - - diff --git a/src/spikeinterface/widgets/confusion_matrix.py b/src/spikeinterface/widgets/confusion_matrix.py index b76283b421..8eb58f30b2 100644 --- a/src/spikeinterface/widgets/confusion_matrix.py +++ b/src/spikeinterface/widgets/confusion_matrix.py @@ -5,7 +5,6 @@ from .utils import get_unit_colors - class ConfusionMatrixWidget(BaseWidget): """ Plots sorting comparison confusion matrix. @@ -18,13 +17,10 @@ class ConfusionMatrixWidget(BaseWidget): If True counts are displayed as text unit_ticks: bool If True unit tick labels are displayed - + """ - def __init__( - self, gt_comparison, count_text=True, unit_ticks=True, - backend=None, **backend_kwargs - ): + def __init__(self, gt_comparison, count_text=True, unit_ticks=True, backend=None, **backend_kwargs): plot_data = dict( gt_comparison=gt_comparison, count_text=count_text, @@ -80,4 +76,4 @@ def plot_matplotlib(self, data_plot, **backend_kwargs): self.ax.set_ylim( N1 + 0.5, -0.5, - ) \ No newline at end of file + ) diff --git a/src/spikeinterface/widgets/probe_map.py b/src/spikeinterface/widgets/probe_map.py index 193711a34f..7fb74abd7c 100644 --- a/src/spikeinterface/widgets/probe_map.py +++ b/src/spikeinterface/widgets/probe_map.py @@ -5,7 +5,6 @@ from .utils import get_unit_colors - class ProbeMapWidget(BaseWidget): """ Plot the probe of a recording. @@ -23,10 +22,8 @@ class ProbeMapWidget(BaseWidget): """ def __init__( - self, recording, channel_ids=None, with_channel_ids=False, - backend=None, **backend_or_plot_probe_kwargs + self, recording, channel_ids=None, with_channel_ids=False, backend=None, **backend_or_plot_probe_kwargs ): - # split backend_or_plot_probe_kwargs backend_kwargs = dict() plot_probe_kwargs = dict() diff --git a/src/spikeinterface/widgets/rasters.py b/src/spikeinterface/widgets/rasters.py index de855ebe45..4a1d76279f 100644 --- a/src/spikeinterface/widgets/rasters.py +++ b/src/spikeinterface/widgets/rasters.py @@ -4,7 +4,6 @@ from .base import BaseWidget, to_attr, default_backend_kwargs - class RasterWidget(BaseWidget): """ Plots spike train rasters. @@ -24,16 +23,13 @@ class RasterWidget(BaseWidget): """ def __init__( - self, sorting, segment_index=None, unit_ids=None, time_range=None, color="k", - backend=None, **backend_kwargs + self, sorting, segment_index=None, unit_ids=None, time_range=None, color="k", backend=None, **backend_kwargs ): - - if segment_index is None: if sorting.get_num_segments() != 1: raise ValueError("You must provide segment_index=...") segment_index = 0 - + if time_range is None: frame_range = [0, sorting.to_spike_vector()[-1]["sample_index"]] time_range = [f / sorting.sampling_frequency for f in frame_range] @@ -86,10 +82,3 @@ def plot_matplotlib(self, data_plot, **backend_kwargs): self.ax.set_yticklabels(units_ids) self.ax.set_xlim(*dp.time_range) self.ax.set_xlabel("time (s)") - - - - - - - diff --git a/src/spikeinterface/widgets/tests/test_widgets.py b/src/spikeinterface/widgets/tests/test_widgets.py index 509194cb93..2c583391c3 100644 --- a/src/spikeinterface/widgets/tests/test_widgets.py +++ b/src/spikeinterface/widgets/tests/test_widgets.py @@ -349,7 +349,6 @@ def test_plot_rasters(self): sw.plot_rasters(self.sorting) - if __name__ == "__main__": # unittest.main() @@ -371,7 +370,7 @@ def test_plot_rasters(self): # mytest.test_template_metrics() # mytest.test_amplitudes() # mytest.test_plot_agreement_matrix() - # mytest.test_plot_confusion_matrix() + # mytest.test_plot_confusion_matrix() # mytest.test_plot_probe_map() mytest.test_plot_rasters() From 11a9ce0dcccf0fb367a1d1eb5a9659fc1bb05e48 Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Thu, 21 Sep 2023 10:53:39 +0200 Subject: [PATCH 81/90] Add doc for ephyviewer --- doc/images/plot_traces_ephyviewer.png | Bin 0 -> 102235 bytes doc/modules/widgets.rst | 40 ++++++++++++++++++++++++++ 2 files changed, 40 insertions(+) create mode 100644 doc/images/plot_traces_ephyviewer.png diff --git a/doc/images/plot_traces_ephyviewer.png b/doc/images/plot_traces_ephyviewer.png new file mode 100644 index 0000000000000000000000000000000000000000..9d926725a4f25e61fc6c1672d0b1876ae9d7f6d9 GIT binary patch literal 102235 zcmeFZcT`i^_ct7LY$!U8Ac6v;prSO9-mxG8(xi6R zJU21Y<>40O27y34dUtQ%2Z0Xn1A%_}{qRAcXLCfW5%@at_^wSL2z2}-=Z7mz;`mt* z=vR>5?HlHyj?2>_p}ZE8*fo+{%Fn;tJfvuRL`y=jGz# zGp|bG`{@AJfuAJBe=jmWt8+5c@j%HR_lNKPs(rI|Rm497_b{aH%A4gVEh5V5Is-kB zH73$}^Pk5rW79EQJ|IE+kCA+4@HJ(S7ycOUR= z5@mY$UzgD*3V;6BB}nAv`F~$(iVYt7{Gw{4EG;iZwA)#6uHzC|JNmbM;H~G2lCga?gA&URrmi}=k zs}M@xEGRi8d9!ywOXAmGwILL|1X`tgz}l}^1dnX?ghy4j~8*n1YC~1J6#gPo(b1i5BPX*sCu$tsgV?A=HXF3 z{u5_=;`<$8v+dbxA^8H*Zy$EVP!^n`6gx_s$KHOKXbp=O=`#OiO)b*+s&;s|D4wQIcvKHZ+yz^WpO#V5%1s5^6m>=x(?2cQLHM| zhFjz)`3;rQ;AsM<;VxM!wk=-h2?f8`f@#;o&T#GpN;RcXh{SCaIOpMy>mJN*o1vf8 zs2_eZ9+V%t>#yKDbvNOx((tJyEl0^t5RahzW0mv`)wA**pGWuSn;d?5!oafhDti-s zA=MSJ5XGqH4FHxL2q}-z&kNZM?ejRl>Z4Uea@<{I$Lf?2~iv_1GV;EVY*3 zG1OGftv^*vNImGT(lMdZ5`zr#&e@ns7@I3w-8Js|_3N8^F9Za&qHgB5jgSfo{b#WY z&-I4cERvt#^>0wtDvX^XcCqY@$L`h_8QEI`S?@y!)_ zU0oe*_uCagMenAa6X)JAMMAaNyBkSjKlEl*lQ`d9papy4;&cw!dq)fpalt8~u&^Tr z?$=J)Ty$|sOU+&WL~4s{xkC!9hSJH1Cr=M9P5Kd)C+MrBvPs|eTP$RY5uk(a`6tS` zn@P(tJZF)3xD|M`+-|gy1c81pj79meqlC1g>@m#EA*4f#5L!S$fIJRwjABP-i2gX^ zZ68J&m#A?&2x#5mcSz-UuD+Tn#HsD#pMMp352KoYYphF~aE>-FZfg!+MUrc3dX)X| zJ4qRsxLKwPD*8&sE0|4qr58W5VfPA3i>ZZVu@}x-VwjP0ojsJuMKHh8tSj z)@Dvn)4+wkkrP^bi;HqT6Y>t`>mJw|(&<{O&^ z^&2S#Fh|bv2;0U{iR!j_4~f30I=lMN=cPuGrbp*tMMq^?O^h{I^*3+c9Bp$5kHmOq ziTdFtP?Z*S>11O2V1*J#W?KhCNTrPuhg)W6bWT3HA8GIfR5yxjH~FS{6K zZlzwjm%A98_x;7q=!oA}>}3vELu%=K$(=>z@nV-H&#-m5{l$FQ zf!a1UeH@$v*o5hq=b!TN^1kUwySfx})^{))MX|uxDgdX``pA(Z$#0xsEQ`y=u4FPC z8L(-fo?MO-31;mQIi~k6v}rRs@vWx(|$9O z=?H)M^5+8w59%jZS|Td*HNwT;d0E`UpK*xloSSI$YZvpJ?OHY0XlR~j1Yg!`CRd)W z6cj2p%u!P7k}RLc<-l(~0vQ2qHqQStpvtb+M+5ybaV)@kkk=fQ`1+;Z-RX$JxP004C^tL8?nVoM(6hJtJkiX zD3RoE!l-Nm7z?6}fzpO6(;eXEM{@RLaV#Lgg(~#>dIib!9VQ3=JM(`MVNUi2G$mLar!V zA@r(Y4)Vo|QzAa{rR@`$vr?p}&{l}Yck0RxnqO3M>#-q9=4`_B}m$|~oIeQ19 zB?5X$3lK(`@q#ZnK)<%q3clhY*ZJ|bTGR>dW9I$st2J+#IO%oL*L`~2M~;|UTW2Px zq~M$UMLP-Qg@Bc(#7Q6r6Z6#MeWzxPG&lzM9YdLazxrC}ih_whFPFT>pf=!Mg4NP7 zV-I@1bwA7Yf0q?mb%d>1G%&6DMc^4|`%_an`YLtz&KE5i{Cfj6i7%AuAPAfg2e-3= zQAyR*$wn|B+6p&>ZCnDP@Kax&z6HbvVM#XQ$n*6J1O0pV6E?i=0{Xb{`hut^?*mk& z&4wG4nxIr4$qYs~v>3@?wr=J}HBLk0ugl5N`sxPAd5tsoLqnS?%`1wcqd~ZQ1%vnR z--oSJCDZGwc^x{Ji{R!%o%~L3GRlk;a=7Q7@jIHMyzraD@iOAd%GOR1Xmhw#b-I|% z{{3kwDJho$`=v5aD|Vo1eCE?9aUsoskH^%kC6SjeN-js$8R+*(iM@RD;>EF&l9DT` zs*Q`Utp<_D3%&6`f}glmj&-0lwcXd^jqDP!JpM=N41cBKDv;rwdo%uED7ZV zS^aUCc9|r*Oa#V*KOl8b;@5YT(I78L&Rt$h9zAw!w3FZA%}J%{DgeqtEH>twT}N?= zbm8e1FkLrrv!G=)n4CsrF=mR@R+{ZZVAq{)9Xv7o``RpxFzE+geEYDczu}5vL-Pv8 zy}r@{?iNhbfAr{0K5S0~!|HE=&f=8#Ja|m(?Hvp=uQ{GSf8Gt+=8J};&V97gs8HFgI(^o{f^G0pts z?#l&_?NefZ|GnHNhcNNhzxeCytWYFJ2Jb|8X5jENhLR-y2@b3WuUE27#ezA&Y)rmE2ekZD0eV-yl%8r=V<5e7AB7f51)~Luj2bH zmG4|3M2$T}C(*Bpi<|5}D?LX)$>|7l_8eVQ2U8>A z6PUx6js4qEFp*pVuBqB?x4LfNPU7Ns9a=&%q||g9-f5Ru#odolFnMz03HPyM<|%M# z%Z_;!j&Z+x_YcX_@!pC#Z|cZLIctJk|7bBjU{s)qP$EI?9GZiq_;o-sZClB~)Ct!Z z+Ip`{g>$Eg+_G6fb-Tl410Y7fvDX44WjThN$zYA&5sg>G#jRb0qwdODxb*!nJD`Z{78$KAKC7gI zZ^ykkXVl0wPxVx9%@s;egJk+t*1mm`T(v)0KYBG+rD1cv{I{DouYPiV$p7Ot{J5*O z-j^Xz-{H^cozAA6apx4EJ2 zJ@JdQa@>oo3rE*Zz14?>^pNVA22dn(Q|5+XeCdrG zSwZ~Ax`4P2bcme)r<+$u2h@`%{PiTta>-L`oCTsav&mb+~`M>URSshd+w~ zhb|x>;NQ}DH(=ctt);K0;U(wt&*IVRm#<#`@PC@6_z#7+`&s}5`Zj?AXAF-Fr=_NP zDDuTGE}qf3{X5(j19dYKc6C()y;g^YkqweL@!g11a}EhV@C^92NIOnurNUalbNDtXqOJ>AFO4dm zw9IeJbJp>pBA0lvCBhcLwSOn)VUnWH#2c->F+?*&1NQl=o}Lgm(lCniWE;+~ytCU> zD_CdP&nB8_1oY4SQi{7vvG{Snxj3<0Q`06^g+(H4wdKcK#CGkhI@7>w~VA zb7}o7BI5MzXq1!MUYeLOv)?SIeZAgT9ra$0K0U5{V&AT=f9}yQQ(rTWZN+|777lc@ z3td(NJx%}?j5hwd4V|WfskI*1zr!DYDE~ z-p^N&55C(lTVx=kbW57muwf-gecX8G@pnKLBUP zBQ#p8gE7*sYEA{zHHah)Uza{4ZO?H&Vv<` z;C3qdzFXb+IT>c7)RfeulQ@h}L_E`L$6SO#ZUhaVQ6^Vqv z!ePzh(Ak~yPw}tcySj~)0FW`~e6?ZJ7ad%d(!en_b>`GYw$kb;Ir|5mo=ZSxDzLfI z;;cfPACeM#oVcR3cWnfeS~RRI%x}rakKEjWk>ops5u~@KSFT(EjL&BmK%1j4tjGQq zz5meh#i5#6d3p`D{`$i#$FLPg+fM|fGfLaF%w#aT+}JMxpJ_@n=FZN^0(6yleyLH$St59zIbtEIXi4(3 zuU=I`5;clfEvXZzAXC;+oOI_UAk>pF%&Qy!nx=(2BmVW_$fH*ttljwf^`_9gQ_qs4z z<=HBB1?ihx?6b2epz2S46YNbwQq_VheKE~~S}RBJ=+FD~(4>HHDorUhKfM%g=ZNr6 zO0odp5MY)$fvl8mtF>Z|;jM!_gio)O^pZJHa<2E3xvbdjr-ur*f1PKFM2(sm zYatF4L{!D`NvM9E0irf_To&z#FPdspcSKuYxWeTSggo~C7&EUU_5>wCi8MUW?mYtF zXcxr{0|jt?*|@Ow`h?c=E=Fuu~lwoUy;;ln~`P~RDBJ-&rKhA1n@e}iGt zD~#-OFD1h6FA$N`Gj_>}-ecuJf~@kCmG1o&-l;&%Y&Z}Ib`t);r!Ewj;j$I*WmbMt zj;*)X46}K6>^+!?LbcYC%}6IFz2Uq@QYG9LWdY!dt5!(wNBhNnE`~{)+hX4XG2BwN zk;7(Za%biJi~4pAzIY(LSg0a`Bk0g%M)NV@5Q)bsjm~!<#lhuO$8&cfd$42AkoUQ=T?`o@> zAt$U_!Vcq0Az;IE>^C1eHu5hww^-JVdqd|?LW_c zsE}37XV0E3J+Dcs>`4){|w8)+MQbU+i^5!(k;Ag6e>jX6_;%*?zRmy~2>V)9bd^5^Lm$Xtgn z$kox-w%IXzCvK$)T#xh`x*5;kZ^aDu!PN&;*b$|-g=5PGG7EtOp4V)%g8mk?0?JtN zEKL<;8b&dLUiZr^1Up@OtPPX0sn;H|D1vmnJ((@Lu^)&>vOS|*Svx#lqi+i>tM3iI zd$K=h0K=$ooNl&#_4Zx}DXh#fYR&?%44bge?`6p(wkpCAebYe=LP$AvNw8Qir_tMV zWb0D$n_^E-C{b_0{7tE`AM=YW8A zanXYX`RrFF3Hn~8ob;6Ms`(%vkDx~{0g)VlO@yB<<2f5B@}GvDX<#LA58DiCK_SwZMzZA(`A_YY&28)z)#!jJXiGT$t$@iEp3#c zwkBjmJR zAiakQ?RMYs8o!QXhaw5p+jlq*^|7o#wxtU!C}_{=Ufa zTqSSSxR8m}?LhijXK4w0j%wwEzsZkpHvx>E(s7YeAk>wRl{JN1AuQnpgn6~E29RB& zQ75gy1Z0Ysl=0oc&o}n-IihQU>B(azA9#iuQ_jB%aP8{`@0svXX>Th;g@!j_)!%HzzKqh z2qI1?!Q{b%*IxL_3h6wxxo-ctn6pT$XWV?e0s8sT*PWYrLIx!s+FpnATKfQ84u)ssVcPaBmPxsprI`pKJt zw$5+Z6Gy;!vfpUA=`G;xz<~pA%fczC`hWds6GqS2*qU!{v)@VcJ*Q|sG-Sbh?pzgs zR7$Edq^vA0E%Vhwh+a-IcBMJK=a4{nb45(-HV}>iZ0bmug4beguXLIi$`^m1y&GsU zK0Y4S@Fl~k;fjQW8EeN;A_rG`{^=nyrw3rZ2AVYh3Kz+@oU$LJ-IlEYt_b{v85n_C(0*>=|lYX?U4oCRv|=NZ%9@)k;Jv7 zDE&DQxAP!IGgrfowR6I}+EGJ(bLYG!&2VFP>e{2XS7Ux({34rlRrQjsaRTf;pOuwWNe;rCIdux*g6-$6 zZ29Gs5OquA+Na8@?cpt-gC$w#3!gW1tGIuC7-!G&BZKMM)U;&7#e$YoRA7KgN}ubQ_@&jK)yzfKPTCHnfB za-|!c`#@Vy7Z1UnZ+&*7Irj;Z)$MBAaxRDW@(uYcnUtj?C__R*T|!2 z?H2Y{M@{alSA|axow(Ys%Z}p8J=+<1u61|47v)Eairl~n0`!_bV7Cngm4=6_+bju7 zBkmd^&G$>L9XN0Rh=govYVa55Fplyfq7V3g{;awp2^?a8-mSDpCyP}81qe_Ij_uW( zF4eryc4)KHJ)ZC6$wC)wlEfxyALHxKfV1TsetyT?%J$>uOziVi-MB%y7lC!{8uqGW zH0`Ri!7D!be#EMwqKgF;A=UYuoM^C-uYY=~8Y{}Ha${|H4gO}Ts1kNuI7m_i;IM&w zby!^Evb_Tw@VrZdUpm6p?aRa!OX1LR)i2{+tu8c(%(!kD#?X2fm4 zU;)e$lHg+t-uvyD*JLT;x8L^Bx_9N2V4TD`E?>?nNkg^to!QrY;#z=#DB+W^%6h1W zR&t7@O%hArzP&cB>h{&|L zk)PVsZGL7kmhL(irC+WxsVhY%{ak*|(FO>~RRg}gEh={5-t-&WULXjFp}nU(8%G1!gW;O0{an9eIn;Md4p!4m%0SphBah#bbBii(@R=09pl;8O+>ipgW~=r| z0FEtBHF)rlNdNs_c(o-UI)MBED5)Dj9)D1W-$6`oXScVc|CWlpS7-@PYedLJZ^(xZ zY6CPArI7?9&hZFlR)C`St?zlC0qwrnuDY^;ud*?VR1R!*!$mmZvK2Mp)00+FeGwaT zQgsE}>ta-o?EEOY3WPdifZ4H!lVBu--hevQV|5r^2M{;yUZlblAd^<~{M6;U$>heIg?a;qmN~&hkHMD|lxyHhLX{>4T47LTB&b4GM0tiJtO#n@9y&2+DSV=7Mb#l< zO@|!r0OL6)e_Mkcd9XyRMA#4yMD?$8bI);ocUaLh@#kki0N%}Os1+?hw*lg+D(Pux zC1esTzl83F&Q~09 z>)i)zO}4yevTE=Okm7MH%T^cw!cd`$dO1oyrV$Yp&CL( zo>WR2rZ&zLWoAxcPkPVP+N`aipB-W{s3QP7E#wj^aZ!O&TJ=&;$)^yoHeNtPUp~YY8~|}z!)s3Jpk?Y$Nl~jjT(*+(7b$F#<9pKPc7#mCq1m5r5PEMd_P%X zzVHyP^gims`5XHWZYp(vdfpy5dO9D{XIB%DW{I?w(&JPCV%m4hXx)KH!L$Qu&mZ(ZPIh9;fb*ouVbf`Z!%s8Bj#}d ze(H0v8^6AexQYlo|80VW^_>n}W$k#cT>wIfI-BIw?Ci?D{Rcg}zZzRq++Pv>X}v>M zKpdt|;UvDlgiSC&yH@6pekYL*F`|TTfIw59{O)jp#3dI_SlLGo?gP!)0QgxL`aKG> z*OdcZ>b&As_h#xu9vrAvfSd$@NFUH9-4%dwpQ!gNPFffkkn4@EYa~S)7yAk<{TT3m z?F`67_(yGG^!0=jHlVHA-O@B%wNUix>=}~)VWB{}{8;}w_|WG`W#Rpxp0ga5N9VfN zC=N&`|J_Rd0HDa@^Qx-NA4^-35)&0~9S2hVJ;gxsU+$-Ox}6`tv(-S#v+9Sx%j6zS z^EsEo#r|<$0*8K*zJ2?)2aw?P8ZCb%29S(r1aw^5(d`h5B~Xtdh;aXraKCWj18~+Z zbz-pqCm0dU+Cl+o(hDN(`n8Ka#L%l3T-x&!SP0iwSqlSW4k>I}*NxTV$o`^n?>l10VEfVZz`rjfG^e{5Wo*=C2?vAcNj zVm1!cQ{EY$k$Jw++1Z&F7}W#-9kVC7lp+bIS)Y>vOa*=>W$Jry`;J8a)97`$eE+k@ zzW(g8Cz8ot5SJ(U1vfD5Jx_YPe-Q!1hwASY#(BF(*`;GsZ>3?o$i(8lnC|mtk z+v5EXCi6eYy?wZU4F+}k%!m7VG74bX4`ZSJ{>@7SHQeJu=C<5_$n3`)6Lq&XM!FZi zs=`QPC)4n_M0!*yfG0l%nzdhJ0o?8Go~H7DBW(LCreS+o3kTV8B?U{95X!k;s%@iX znQ)}M@A4Vld#WRT9ZE-byuyCVj|z?}_L&-!CC`-9^+xSUzdQD;Rfg|0a86FsmZxH_Pzd`PwTpa17AazuTu-;I5mS zb!O*e7K%VtZMONpUSZ6)Zy1N43zHR%KFgsb{3e17!&eFr`8PO!V5jEFy7P z%}!Kzy3#EjCH2vhQ)I&_dZV0-5+YAghb9VK8or=X?P+Y6@G#v zkOFD@Hz}8m$YX1>!1nF~Rg$^TeSZr91Zw>f5lA{EH(=i-0C2B2w4*!M!v7UXl^weR z5FAgQJW)-~EC)!GPb2?&xbJ;Ae)JqUE@iDt?{fC`Vg$HaIZRRHHE{Z#N~lRPP%Cs2 zsQSDML@z$JjRdvT*0Pk###ez-Bp933I0XDL#ouiZ2>~aZRqI;wA~vJ4zTRg@=UvRZ zBKVeFkA1~2w=cB9xY|))PoMC~v~g*yCdeWQfxO(MSvodG7G#czW^XQUkf%^BzwO&k z8+>|Vv2~+gF81%Gf$w&68EGRYX~)<3k%!97k~H1w#5_BP2Y_B_<36-gFFnj}XEL74 zp0*LYWBg4{fB7$_(69fk1itOJo1iQI zkKE(md-~4`hyScF_>Xh>KU>DUl*ZIE4nO5_xJN z1VrPESIpmP=?ePqRU7}md8YppVgBNA(B%5fTeon7DnLT*2tb+tRZRawI^Rus_oXHS zxoWu}qjSoSqXDe&U+uf--&P;w`a!#`7vQYL_6&al@e2JVrltq%@A%0R4>7n7bSLZ( zNZaN^y8*}~`0>4%_CNoGVxbS+_U>xWmd;ZAZdwV;F1?|%;4NuDite{|g&JuyLDB8@ zXOJc)o6yT}?sd(h$#%9q9g6$fJ`mY32^?d=b2@(l^sSREBkuV_2A^}q6XSEfp6N&l z)YwDMXs%szkqeK zQHEn`o4D}G-r1&jH3=j;mit4SaNzVs)ZXws8I#>RBYv;ot21)aUX$ORHO+h5l$nwK zb!-OMrH~bDz}3REv96pIPqs&}?Gui!{;2ASz96xvhsi1+1>E3!(_R)2G3L+BpLny2 zh3t8D<-r)KWTf#JuVp1O?8bWxVW_R07L;N9p7SV(6M zWIRg+*hSx@&sFX~q5P(vK)#eXzZP8Ed~Ke9QijHryS}E%tCSB_Bg-z*lYC+;SKX{q zebUxf7kpt&l!e??sgUjM1lI=igi#7Scb72x9OGxY)8gU_FSB>=pLA(6Hl9M|_=mV; zhXt>E-S)>1So8rU6?s`M(rVbgf|YMX-vyuiyp{+#U+EUJL3QJ8w;Z&$dl$~tEQ&hw zSRqEB*KgczW?91Q1~W5kzP3ATcQ-&|ZIpx$|0F})4zWVJ^^NQpBG;#AN{#O=^5H)8 z^s)%!+3a5Aw?@RJz{q}<0}A~$ch%7GMksl<5@&Jjz^xy4g!|&&^i;w8f#14cgxS*N zW8fL%zU=8~x_a1mA% z3Gon@$t<50LKdl3F7TKYdW8U%&e6h=HfDX0aM~DO@bvKb_I&M?_TcWwL^$K~RU+r{ zos4}-pER_}EHnkSH0EyLqP7#}Dyk)r0eyQb1kbKMb!KhLTHju<+myZZVXH}(0KPzM zA4I8CZH4}$#Utsx9-8 zo9&-Mg-s|1Vpc-E0yr)`{=o4tkzxJ1Hltwc?r z$?o%G=(wCnL$?;--A=l=OTT%dIJj)?T%-4buY+6PlxE9r)PP^af~&DhxNR^V zi-+R&4m{85^HG@1Z|LYh*WulWweK`^OKgEmpySm7X~KO?5^gQyBq1xxV@E}oOI8#} zr)K)N-OI!!&R<~NI+iI)pJ+t;++BC#@|fjML~o1;aYlFIS-YqG6Idv{^J zR3?={vSf{^`+eNmFbz^b@KjdWTA%B}T5gzJ!}tPwyoG6PPzUc{rC3^6g|xIx<5%c* zIl{}JsQ1^R?bdI5eEJ?6zqYHRUV7n=yT7_=yJ(9VM*nP*DSrR4?#W>ArCYLt?AM=e z-a_gQ-8~6Y{`r~g)6}e>r<0ji_CLS$313YM8Oez0o~Qu{_gdB!tLGcsB^Rp z+pxM>iaX7=cop)Tkw`<)d!603!_tZ4&4MyLwsR;LfjHE3hqvwUq2Cm|yD)8fy*hvCN=)CRlYLnn-41*{d)^;T=cXvPzr| zIz1mHpWh8#5-7h<8MtS7GCsgj!s#|daKyiGVHoP5mU1CyQVrYNua9-?)(OXl@t$E3^<5V&C&9NVQ0Ss?ooX75pgCcTePwGXrDTadJslVn z=@>MPBAQC}QrsOD>gjHl16g^kjxs$u5S{eiuWtUxkDEI~u`Y6TWd&i1TOL9o1Lzh7 z`H9YWerjQ?uW|fhD<0ZVTy7b+?Bajt8C!j%1sig$lPhE|5cCT!B{g zN>d>@kA0cX8Ii)FB9oghg492Lc``mSpbr0XYDFc4@uZ8+!oUhMWl8`G6J!G^|4`DQx6$f+94YClvUiM3t> zlmb1e+JN?@>!(<07G62ahH$AS+_zXA|CfFk4m}4Tb)?$HYSCsdtRnc zQ^(&6ME&+R)C4vhi6tF=d$ETEGFfD=RU;$k`d&l-3wwdyMSZ>#Ew#4k?$@ZqzHEi- zTTWnqkqF*ge}Y;6=&7b$_qK8OokiW#6dHpy4G(@iv78>Bv^)jdtkqyt`>8qpD@+_y zn*wgswhj&%7a~PWx6LxzoL6^O^Bc67v3}R%VvDRcfQ{FcfRHPi8YHfykY3ZJ^zbbG+D9ZcM1UEDFk959o@o{~qkowH zuIZaG!zgY%?t`kUnroeDM$evmOI2JGnEseOmg^JIUaX-byJ<5wZa5SkCoE-FEep{C zDfx@OOK)2HaHVClgE20$i));mg)x1CX-D0G=blI@qYj;z@>9y!+O}kE-&$Bc(Y8UQ zAA$iND42h2DIn8eBTB3mjuKk1h@vRRClp5ZY<>Uy2>Fq<(M&^5+F5Om>MD$@WOc5I zr+eZFgc(arL4jmJCni7p%4StDQEgu1tS#xf+X*Q%`Uq=Dz$=0q;fRPZg~ubi+X?QG z4kLC-B_8dLyh+X)LHUUm5+Xw?x2lb?Hhs&F?6Eoxu^mrSY8(+^h}~z;?o0%=4++{9 z>uXj%cYfs^<#w0a@CV)A*|eWh0iSCvgxFTQ@XZxl%Wh(|A+-scjnX&P_Cs zj5F2=r!?vM7)e<@QV0qrIi; zfTCS{LQq;mwX)y8wk&)5)&Yn%|NOrI?c-IbA#0b5D+Qvi$Q6VFRx8}pC_IUe_F{9C z_K_8Za>TxxpdH7#&K8EhfM$el+TsJ;BDXzGs7e=w@=EO5c;t1-J2{(9Eq|MuuoXaz z)L8Xq2{~yE$7Mtk>D5I0Q`F1Zrx2@wrc(<5lhZb6ZzgYsN}71ff|iE0i>CCoe-jOTAHmvBk8}p8smzL|(B=+8nAZF?Mk0I-yO#$thl!RqJnV_sG8*)iAGstS+uea;5O1 zGUsh)?-}Q68^C` z7bZkApO7G@4DEh-k2?#Nw_4wW)R$&(eA+;DA($Mcj?2hwBP`sNat{tbEJNg_YnyCl zWWuN8wprw{c<05yI~_a<4#k>{Dq9rxR$| z_5I6_fMS#)_agT%OHfA%;)qNo<@vVe#E7JpR^2SofxH=b?UCfWj(^i)Pn=Rv4^!ivr1O05m(FBHLk_ZKLg<3IiH9jZL-WYq) z@{#-ewlGSyu8CniG-J?uv9jBFXZ$;KW5_zNGL6x*g9$8Rc_jrn427S?ImOUNDBr{5 zQ=yoHIHv!mYEhtpq**|e)}AC_`7yResQypOkNQ^!l8$Qid&Mp0UT@PS3{>U!E-GBn zD4U7Y4ey^BAlUGgSW<$PJ&mvzPgBqD2oBn69L21Z$M%~x-zmLZrhsZ%k!YS5w!t86 zJR-sB>BwrAlI`W?>#igei|IH-nXs3cNLguIF8NqrUEO<|k~zl;!kyN-%sl4dRVXYtc3lS=h>OWKsM0Ciu2S zku^ONs)b3aAZ9lgXm)Li$X$kx?u-WXszaG>5JA6$aA8bfS463dq_3tt5@n_p#3Mu$ z2JXH%WHthQp)y;gsLII4W~%gRdV!B)63?SrzGqI->V;j9N;xT=v6}TsiFRv}tJv%j z-PELcT?i_c0B|eC1efLf*wH&1K@kuAuk+)pnXeF9^qpeioljpeNsAg%)!0pfimLFA zb#Ta3+m%F9jQQXRic6WE>TBNzHp!L69jYt1gT&(`8dXfaV>bJ@)5K9>lyc+ zjoe!PYlqAt4e#L(^-iJ~&P0043x_6x!|Q&v_U9MqRQeUyiz zgMT%*7iYN?zfsy&DDw5cWXL!*6c>x~{Qa7v1zO%-Q?nmnryK^2-P{GJ-M?DZ_<_Ca zRv^&Zn?H0Q8qwkR$BnftP$`zk7Y#YH1I7*R_$X$8pCd)%9=5M#Wgd4w0?Oq6p#l%e zT-)`MJC$4~>2>i9OUD{LWyUlocecj}kv?%+C;Hu5E%K%-cZ__j=;-*`I^5Sk{2e!H zml%Pcc5Znw#xny2F1;BeTd! z*0tL3Of$QX2Cp{^7k~%zpzIChW#SM9_4H>tEb#pw;n)I-Y#Zix*T-N~A}(ugttd^z z*;^*BZ1$Jt#rfhVE)gV!UJ~&p(Or}Tj zy3pm^jCu$2O(S_Hb%r4J)*t=r)E(>BT{a`6D$k?figS*oWCe=?SDtBIYTa|sKaZHD zG_zDQK3qe=jRkSTg6l+7_0@Y&j{sWza*#tjl3GigP#%a5(Z-ys%s236JAs>{70#LwdibTr@) zX@mI)O@v0eyHl}0PS+BQ?RxMd2>w*pTH$g+*3LcWGI?>PSt-9Mr`vY)Z0UM>dTL7f0I~zA4irBQ}Cy*)p|F!>ZII{Fgf%jeoKJxf(;S zBDH4J>^(S%-)@@MpjtTU(olk01hh@-$NG#i@D>pjmQ|%02d;9rkM~3lQYt-ZamVt^ zCXIsh=3NJ#^-~Hl^|ADk*5Ta5cyVFs#UdsJu~8I;f`HNY8lvbE zNp;zPRPpn!P;vy@GO}U_Q$L@TR}~M{DasZv)pw-q+OyBmMpI49pB7 zoqC`Y%HFR%dV07BF0*am2U*|^OK_5-Rez@76k!bMvLE;c}y3;Ob z6E)X?{Q@bisn7v z=VBMo7AnbpL{iLCEUM@81%+?3a|Veobwnqe9anpyvW1f|>vH(&A0C7H4~p8% zg<5%m=VB8=_XzP((jOkDEs>!sFMX`q$EWaL$DJ3{QRuI2niYZZ-}e_NTCKd^fUB{MOPG4=|H0N3;5idMHTO<%Vy=8&Gc+Wxwl&ll7L`WY<51|9%U0z^kFXd+^z4YQY%6RnF=!m;n9Y-{f9HDj4}0dC z`)$r9y`MIv7sqNRuagX7rGZ%K0g=Zcbgx475@l8^^V-@<^Q!QA57OLA^Y6)P%B2m; z1~2x4usHKqFWf9*66>&8QsxP3+UW<++ku3RBTVqpc z=_E!Fp=tHjw`$7SYvr1`YQk5L-**82Qj(XI<^6WvTC{g8do`CGM_PS$SiXa_`fsRq z0<9TjuMcWgB?Q#`U$?|sXo_qfBM)svnf5P2Ki39_JdJi)kq_PGeZn2>sEuh1QW81CU;F6u z(%CuDvtzS1He;%oLFup#*;l9LRTNx$NVq~dF@t3?+eQeG;HafbFm)N!aQ3{v;v&eC zj-F4q4xrn$eJj0VL1J%$-8ngXyEB08+IpzqjVm8--wxYwp zPw+$T`lYZ-VYE8lLwvL#APU5n+7?F3>aE^kK^cmxHEIDFL!o0)1a$H>`|Z4@1Nkll6ZdU`SV5yBA37zGP(p%`%)&3 zN0CDFiFB!6OS>f**_4{iEzJ>1K^>w~K`cvc)}QXr8_uxppyuWUYz-M>Ahvl8@)l0a zY9$h8q^&&@iHq-D*-9qDBx_zqQ3`=8BexgJ8JZWGs+J1P-G`k4=9RS&Y<>X#46dFJ z>bm$d`7rvIZMQY{sMQ{p%`5gx%+Lisf0_}}=9FcvtB9W&F3{$9`Rls!FdOg}eySi} zoS#IlTMv==oQ}1ujC9|N_s3*~65?1Kz0_)wFu=EKNqa`;?Qruvc=nvAz48)MRw9?6g!+rhaGF<8DAB~qD9818H6 zl32^>U(ux?xMKV@OH~p)Lt%xFbkZdF4bT-%oamZEo#{ zCmV^gEiT;TJ^@ctFR)N^G~b*S6svhQ+;EbMXXl1PkZfk&i{S-(+Ha7>**3^2J+9rI zG}^yX*FDyaZLTmUQMFg(zITb)z?_@R!^U3rpw|>^dPnWzn*TkN+Q8#oE1k=({ZeUu zF#sOvaSSV{TY|3SWgmr-4N*91WvpV$FJJo8&lQcPo#5L}F#5nq1A}g|qE=e0-g}K7 z)31+(x$WJdZnPCkb|NYFhxE8kVH3fpuj00NbXbmdLQ<+lpN7aw14WV}V^PxL4{k`~P2-mAsHoP)cQrQ@my6*+@giS;T$X=Wddc*?M{9I6R zFOyMG;OAV}i|QK^UVoRTGheEFz67Y%_&>BX$(R25p;?nVKQW)SjjFai z;R36c>~FzfCL)LS6{??4`=(mo-_L-OwSm!V;>Q3%JtmRvv-y?nKDID9fP;-&Y3I_N zP@T>-i*Ag#tN@1xyqKlv(@@B4e_2>i5}VissLtB9cE}>gfg9$DDRMv>ti}Eap$8{% zV6+%{&zIET7uMi_v*kQoRNv5%qEgQ@ZSb8ku8TivU=5XF8A^9x9W%Og{4>*lS$xzU zMZh_w;`5+eT*6cZlT z10jmW3^;vZml)gn*fl3no;>@%9N?f1XGkCw>>ANZ7ch?o5~cUx zKFHx33TOKMH=a==UsL3hEdz`&X4qOpdjTq}-zw&?fSdedZ4BvK8%lL70AR=44L_JG z_Xa)E2t7{KNe?n~u&aV|$&P_5&*PG;{i848Yg-ihr`RacO@{)-JB+SmpZ8xH1zwd} zuL%28ADN4AKyq^A}?6TXguZ&uH>@+TOa3BUgngi!UI1`-v`Xy&spWo_N$HWcY~P1_18g!vE?@S z-n;F_QR822{$_5B-~9L_XxhzGI~r_#?^51(AI!{R{=N4<&?2*vPdA_B-~W2~%wwQy z|L^UF<^S3Fe`AOJn)hLoO;V<+9MZNum=@mrpHLpSM=?33VbDH)b<|X-Nb~yEE#aH* zfPcZP$#FGvTghQo`+H{SUhi$D+nG@R@yGd%f-xB);JcTn-a^07 zeEJT_o8hlL4QdP8DPs(UPyEWc@6y|{y7j$r0)5oL!z#np_SeWE&uaL+siJ4nDn^Vw zLD7qIb3$i%OEd9L_PqnMVjOipoybWnufpwvdH*pYus<;L($u98H9T0z=5-e;oqf&E z(bcTBWuJI&>!jSoNr4BTeA5b39Idr`Tw?2*QClnB+A_l$aZ|f3+-#%e=u+6##r5>U z9QfVKsBk7ZZmaiYo(8qt^9}on`wOJ`UZx0eBhB<69x0uY=Ed^W*3b!2|-u zmQA*3u(!TR#Pm$ni(*u@@*cg~}@ZydoXM(=o~CvNvap zrqr3e29eu(z^Sro$@amyaHqjF2wD9RmD|jZZh=+S%2rEWl@u`TYEfw(HUr8~obS?H z7nGWR8t(#okPo3X13Hvu`37D%f_LnZD599eoL7t5qI_VtI! z#+E~qc2Ns?7BGt1r_Td{5G+Hm5k4d}qX2^logf%m`D-L#NGHj( zWk7vX;I6^4mWsYp)KTdjP2Ox2of$V(T6aO}2l0KFd*xH=XtI5cejic}S$KR$v!nNR z4y~i7fL2wg;y_>&0~Wi}XCVwj&8Wfv68=-BCYpy`RgH`|=IGP8=zLl|QeZm+<4{A= zspiztv`>3`h-={PA6hf$V|P`SzG{8GaDb{Fo+b)m00da>QCg~{sbs zL3e#4^LJY%@v0ShrquTae78kR_Ljp4zqR8uZjUW0sTIqdeK?Dm_=Yy@U7YqTukPF5 zwE$<(9v#mv)~h5%bMmjwab9T&Sij~20}O!TvQkYEAIBzuePyWZbMb&*3XN5;B6qez zUD#HC35srgTq~_Ylap6E$GZN3f+32jAsSIXmUS!)KXjoFC7_Xhx)OIm;Lt2DYY~Jt zLTGns2-2XQg7J0bul~h{KX~OUx}Ss*i}yd<^U~!g8DH3C7kpZe!dI5|CM$Tb?mAxE zH7feKrbiI?fN=JFs*NSH-MPgOjn#<8DQ|IiR#s&5 zfSA<>UZD!^*VWR3cPJj!DGtH}QgTR9>5)bVGjB2-*k z?rtZMk2y5OA=M=H`fg*{-4iqZ^VOcT%Nt70WMr_dH?Fa-+b}6m$TrGj8k!L(R(%Y5tjSZqx3kRhQ( zHaJcBBFYir83m8YvGGZ~GJIr?h3Sb$Z>SUY(olOf_d;WmplBTO6jhLH&!V;%VwQ@U zRR>sthj>A>5bETaRbGgaG5cTk-p`I=ZU0BSOm29uB=JSQnJxAOE%Qxd1a7%) zQU!Gq8oZ4a_$BX5S+kXLo01(3!r0F9l zSk&smdd;8y?KWqxS%M84DC2Gn9b)^x{#6wD?ZLmX1FfFKoHC4ej04C4;EsbE#=J^* zt;kkP!uL}&zeM`K1|ZGG)Ail#G(LQF9hDXN!~{-oF|C2u-)=F2lRTlleq`IytY7VU z@5$^E3WROX6PES=K2OW%O#|`QN|BO{MCgZ5S;HG@uxGKi`b@X+qA_R^ZTuX!lpCwi zAy}yg|05u~oK?v+bCt0OJ#k5^v#8YhEmH|Gzst~$TymQqi^w!|iDd11D(0xQ#AyGmP4A;wTZ_Lt%gZeJ4pBc?Kki3@6SebL8&5iO zWF)e5Cv@nx))F>_iktezJ4Jld7w3NJ@!IQD%0x8_;lsE3#`9T&PE&GC5sh_pxqj)k zJpRkXL$doIC!9Yy8W!y0ONjVEP%kMHi^N@JjeZtRC+HG!6e^BjWkrDwo<3sn)TM6PRFC^i7g|tvMbah*vop8{%y5G zsrS%#Of7$cTlqlCHa^z*#VY`du9*&}wP|HN|H+?KoB zbFanpt7ZbK*NEhOR^1FT1#Ss)C}?anpVfEeTz}80pjs;)A|Z zS=)r_xKmiZJ8|mfh&tU?kO`xosF8fxUR3K20>mt6boc{BHALgguHnyT_So6IG zii+xj6=j997?4O4`SXwWwDdRjX3m^+{bq zFX$=TMHBqvHV(0?j!&ot%5X->ZpBZO-WKpjv+wKT%_1*;Aabj-SJcYC}Wi-*=7*xHi>5VVBl+`;|KsiC%~J zQ&SHCA-$|~p)}6h+I~1zl3*S%q@=_W9F2`_#e(sU+G9XKXN2LyBjE}d5`Exv8};Dq z*qj(o5D9#CYH_*CKt^h~7Q?zAt%HfR6lOM@L;Oggl)(`GfG2PC7BuA2aCy!uOhExO zOHBE}fRX`mn4QvZrQ*dkBhyjROm?{o_hTDNc=UsV^C!uIxo8!HCTVW-M}^NpK*Hq5 z1lIy03-hB5cf}q5ka!&XDqLIb>?FI?wtOdfQ$SQS%P!S6&+XOkZ$q5*&lkSvs**U9 zUA|%1wjbxZYge8lPl#Rk>f$OxYsimdJk97hDvzjrJ=(=jn?%Y4*8=O*^qB*LEGGzf zt~uj-m@@PFLCqfZ%aQ^pDR*I${zO6oXsMM)%WUVV6iby18&DoR=!;Yq7ND>rowW!P zFc$Gy8KpV8zoF0xD?=ic^#q^^G^u~r-~M1Q+AHq&_U&o4ZG9kf@&%!re{%u}Q;_agL)G|B@PD^Pxhvw#@(s7hEYAi1T!GTSgD<~51R1?b* zUv_ggiJazv<>*Xv;X89rV}#P=JI!be5$M*ZvBzg{bNsB24^UwwgvN@To;k&A zIU*x)cf)J|tygutW@0$(v%6OTs=wqJ-EJt?3w=h+bm^Hxr0NC5OkNfeXO6l1=Ykw_}P2k;#)!41&xEpaUn5_w3DYohB3!R z(WRZ`O%0rGr5h&cl78Q+Y!KN)v}!eWMMuj3d9)B3h=NHRZYuyW2euL_(e@k`dAp=* ziY6L!Pe4ZGjp}?VU{^m1)R0<&V@e@KrQJ!hsX>7V&X}R5U7;UOg-Qj1t)Gmy5t?z9 zp61=zS}bwa$Qm)|#%K{r4{i4dWA=9IV6v<1r_EEm{&@XHb7dMrQ_kA(5dTpoR&@!+ zQ!BRIz(@p@!y&U1#ibvcQCiH|u8AthRb)~*3=|bGK`RKuLNI_6xjjm5XFvLxZ)LYmG3TG9uexfX4bTV2jx+WYj$RZyV7Ge!JT zhwo2+&unN_`QGfmMoE(MbaS~U>%-=4tF@b<-Q}mN+ulESi?jU({wW=2cB6i2%EAo& zuJL9~|BhHYPmT~CHfwyJ{o(tsgV%K(mKC06m-qgrzq_nh9S*Sb+F|NVv)_{O&HtO- zxG?eTqszPhk52D@qc-uHNpuNJ<$*r-$?AnmVGb7W*y+psV-7=~n4j2u<;|uQWLu|{ zhy$WLP?`V0JGf2yc1zmkolLOJgl|dRvawmkq3P+dZ#mMq3hUz;u2-iWZVbPXnF9ci z(mYk0SJ~kRJUXwv-zha4_jucBEU_0t@fLpUs@Ril;_jxo@~J?VTNvt1iDqp=yC#y} z(e8NNC=y(D+|u|FsayR^Mf0CC7MnhPhx0zzqyOOL>-76U^dPu;{k=z=4kXGnsSQAo z0;(jjc>tfUfToJO=8P#lxhqyF;ZezL=kciwL{gs}^q9kRW`M)AzVYXahT=*k1It@x zOtb^pei~93`)%l;COkDW<`Q1;lM-JjxUU|W4h)G_9?26N=;~9eD61Fq=G%b9 z;z%4Pp>fH%#m0-si-M^`;dB`Q^SVExip!MNGIdn?XAb5yDLXn;D zQ1g75W!uVF-lg%}g53Q;C}h!(GfBr9iD75P+;ZycW`90VK!18%R@BJ?2|HNzxsNff zjETR9{QBW3%)q-k>NO^r=QhwGE`R<~R~AIK1Ad9CmZ9wZOGiLK?OnYWg92sa6i7#* zQ^ureRu7<;1)aB7t}$cvdtN2giNzrHwyo|c<}>!MFN)hZno9w8 zk&dEVAYxB${JgCA#lne2+yZ8-FCZKtIBjTI;{u%!lPq8)kM-yB7w_}@mh5KdZIC#P zJAPy}LxCCdL!Aifvr@ruv|-1#nw7D%(RtKwKvb2ZeZJkpL~HNCpT7IhAgqNs-@ z{B#eSqh)m2yqJvFNte4^HA@MND8IG5kIA{Z2k1P3m1XbqO+`pBqA2VeUg|XsCg*o+{0XrLVM|oM*$Wvb%!Mk*`3pB zE)gH@yL8-bfvXCwn`y1!J67{L++tE)7CArnpR&efvIyIp+gOLXKLi0PmNrO$NDC{x zV-=N07!0E&o2IN*cI0R%rl89$mvfc`oum+b6kEgaD9JwrZl0Z6ID(ZsBR;+Y>m{6H z+XL4aGDxlj>sT6;)Vk{M6@JB-CiGgttc1Q0tL%s`mIK0Z zfJT>LbONDpK0OL|iJECS>=(IF!Ih1c9PMlgI+3+?l3lFM*HHATbt@e#(rdA>Oe7MX z+|HZJ#PaWW`Eel{D_BzMHM}hW4ze#r%whUe?nu81dctD(g;?-qoAgWjS|y<%H*Q}g zL-;~;K-8KEU6nSsxa4W}f%@2U_`x{*XCgs~dnPpjVI^?k_2evf6^k<&6~Y&-vUMu9jLmcEE zBUb;*(3iA+o2gr%&^h+MIUQa$zaGz=`(wBEV6bA0`myK9XpPjovNhJo886_aUC)x* zYYMX7R#bNCpI?}4Z(^?_7Ml)d&Kz@W9V~Vl=W_3xR5C@^jO*$8x!9?Ab|hh6uFU+4 z`^2)J1)r35AOjbp&pB0AOO7;>Ypq)QfUg54D2uMVMC&Mk#(*Tr?meL1Z7X#U;)zCA|`-@8HXv_ra`6+~Fs`w%nr{SyX@D`~7G7ppHnksy& zyYyUhuQxX+lNZ#?+DItR*LwsH4p9LI6ivN?pv(zT4^SPASCS}m@kLrS0u?&^mr#NO z>m#G!{w2D5)HW(F+1~3Brm^{>NNwft;4~z1*5RyRY^eol0H$B%+b5>p=Dq#1wxV1A zw@7`A()>xnYXBDNUS`X0+OGdl>46#WQk8OgG7mn%iB&fG+exb*>L;XLv2Irqkft*d8lc#5+o;p-1v+8<|LzMeS_(f4It>ovYjVj6(!R))aw3g9QB z1tgE?T4ng_jtvU^^={)xmDLATA*w~OGkvt4WkmZfKlR= zI7C*2G5tGl!x9ddMTyGv7pyf>7SH&$dnzVZwng)6tulkj$4!$aqy5)gyXBgso~ud& zi5*j%x$1?R_lN7t|HJEIrl0?HBc|X{kA70wPRRK6poYTMQGj`9ovzXzi|AU>`9KZA zam6n~KVM~T+^+eDlR@aHnP9u4uH%0xt9ch$Z~nyH2GYIFKFEB#CF^D8OmJo|ExeOF z-{97>F>5&&5B1DNsBEM#2f-djAp7-JGbvgd^>5K zaNBD_9I-d~m1_m>sHQ20IGzxkWj(<#j7+vD`&(4mAyU}MdhPo489`0r#FXpei*b!X zabRSnCgN0X#DuErTi5P&325u#^lImR<_?fnko$hiqXodAcVHZdY8U{I@h4c=&Kp?)h3 zh5KPj8(nh=&L9;|zkkG}N^As+0nVlu8>MPV;|?Uh z&j=qpj%L>DcKYai8CIjoHDl^AR6a0=*jRoen?D8E?58)=0ALARzPEd7ftf@6(j7*Z zlz|3npU<{WgPN&ncN(P)(XwbD8{}-Q6f_8W3W9w>3<1m>4#sm^OT8b(IhB0q_I}>V z3;)blo5OYWzMoCA?Z%R=w`TPPY5kvW01&vGHSe|z7{E=BFW1}-X3JYaDyX|;ReFk= zTQQQqijL>4HfPn@1+{5*Dh@Z;lgnUug1jpy&XG#buSVHBTjM+m;->6qxVvz{m0OzS zrTs{Cz_GB7=Oa%>%V5-3YRpwsBg~NDFt96>AllcO%9<1Ua2vo1Jwe|)le=HPm&~UZ z$+%b&8O3)G&F?$TWCWad1={m6dr4z)DEtAlAdcm#k@<4&I|y=6tnlhA2UHwMEDDN9 z&G&70vK>XgjLr8od^Ba>kc-{TaJLd9JAWn?ojprEG_a;#e=0?it@7v7LIb=fy9u%o zS=$-k^n6AiD7o;^z8N+`V0h*D0viuugc~4#rL1lfCS^v&dG5M=kv*H zc-sbZVM3HsuJ#T;ug_P>*a<^i@fg7t2_iG0+JRG40@jz~;U9mVA|6d=bTY!Z4M$LI z{DGCBBg)_^6ayQFw{W5}fJ3Y9g21l%+(mlwa4h=v9qelIL(ZHRPMS`2XJjZEPt;(d zub!Y6E4d4!B74zq3GN-%e!>;-%2NH)??Rb<8^7FRYLH7}0}%EN>MR11Dy!2$pqr6z zy#3v7>TX3+;q69V*xra>I=_M_W`ySgH8xk}#aO-te?XE7=0nk6`S4{_7tc_Nq^Qv= z*S|hCzp=)R7Riw!F);?l=TcIu&f#k6@!Cj#!5z3PvkhTc?ETDgIoln8zd3+;7gfKK z<iH=cc2nqbt&D*scD@2iUbiq>b2y%E- zkN+kWepFQ6Fms&CID;pb)UL_PQkZpp^3~2L$o+&1JMEJBxcBLy15)?Qd=udZ; zaslrF$eBIUd`{5?@c3dN6JHt93ik4}@=H=#E35B|ce2GtzEk&5=lUI~SYR*t9#A&G z`vx_6iIHlqc|h-*zwetm1J#d}H46luwePHLx#z3Y!jf0_GFhq5dk%CTvyN@H@$l~- zoSVm_z0A762sf{!OU`p$-U8DcGnV>QqU+&Y0;Fx~rOvqqR*flQ7L|IhpueUpZPd;@ zTzAh`D5M37Pea=U#|3T21{afrYd%q^=%uoRk)5ReG7~=PGp371xV@{zP|@4B%&d`j z|GAu16wTk~a($?xs4><;i$z`p=W&V@a)3#i(&VVWlQ$WLg4>h0=>VKKkV^@gY#}jh zB6S6&E~m%5bN+?@4Z>I&UZ15(vIj548_bA+2tUpD%vDk@0k1WHZS$&RbC1D#R$ip= z%%zXI%Kv5E7cK?Sy69P1_w3srHY%{P_Fh-z^>XcPL8Am(b|rs5;;GL$E;<1SrX>+$ z`m8d-8@;BH8(7^pR3AEv9xoFN(4QXCHoai<@9CPVQiG+kF-)jBw#WqLPTs{9L=t&V zQ0_aSPq9XwO%Tf#spDt5TeJ~EX~mL_nM6J)xR6>In(>KidSZD9ZU>4sP(h16I&-e2 z^~H4LIpN2nZ}2}I9X6;STG_Z8CCxjSyJHl1wi^4vd#8V>DhzQrx-nVda4RM;IOq$kw6CTz@L;py;;6dvHeD}32r53>f$%ut$Y4uoB2T$Lh&=M z`(xb1u8NtfZw&97C46jb`b$aeC8Nql)-;Ig&v*xg-pGHmMPg`wq0IuOjB185$RrFM z&sEN7wkR=sWB=d)f^SnpwgzYZZDz}*yq}~WQu<=IngwMeB7+Go;BR{hHoM{r_e@aE z{bUj#yA8*+$_=tC_ez{(!u2M4i&?@4@061_piPNv%g#joduBgu*)aV4mkjW{{c+>` zlS9AbvYt1lP6Ds=-<(+2|Df4|P(H8VZgzv&ou$1FlH}CapLu1KyiYoK8MmbF^h~2S zaaPwOCs#LHaO3XV4<<9G@qXJ#=0krb#eyAoW+eS7;T?ZzI!sgY`gfow2zVnTokX{O z()0UipWIdqB%M?acyY6}AO*<#N&A;4SARM4?$qyo{;z2pp9Gye_3}5Xc;EjQ2mGx` z|7)%J|6^xhZjdGGR^qo&hrr8y{~yl!BPM6PUdcxI*rVSSV0!f*FVDPu`!@>Uw^shQ z+*aNQ^q3PD2lF9q(wA7X5-av6?>+hFy68`4qtgQ3Z`?@;xlRErT#W_TjHKfcfYx9!d5?@oSt z%kmG#r}sR+zs#}y_whY;x1)sD zrt>OK?%#3b{^x|4>AZ8_{AhPK!Ry@p$;JU&>M&eQVQ9qC3G^aV%h!HzVnm3dGBsXO zaRaTvYqGLedOVfctU7)chwrm$(e7tJ0@X{ibAMhbQyJW1mHSg{J>cZTEaGBlYi$g| z;SH>~Y0KsDS>4GJit;aKU;7X~SD^oivqX+CIYOLZD|5GxEUacRd;aZTO}%&M`kS~_ zxA$1AA-eiyMFCWym`}UIpZGqBw@b#K$Haga1 z2J^WKf&p^~hW&g3m8X~jbEu%vMbQ0iTPA(KN)X`>s$_i>%5m`Od&oQFWq@U$KPZ$m| zQ98ln>le=R59SKaEg30yb!VEZOlnYN8Iz9o-`1YJ#gssPQ7$k44#WEUUXOnIj8wBS zU%!NvF^=txT{^$OjAc|5M}%2W=IfR9mRBk>8J$=JlF^`reB*;e@#kHll)DQq@H=MH z7_ebAtvcCycfhHIy}e&e*dAnrR|y`OR`@u1Ry+;D58>v-3UaKH10Q)v?J!LYq0Ik1@$DhNK8tB3oMC?U$pVPkHWPcS{W-TMo&(*OIOE&+WS66>x z8P>^1ryeWJkszDz~$sXaRolU9~W-!F}(tVK0+s#e&D=s_g{+OOv zQEBp9dlCncw+D1%{8x`>0TtbT;S8v^gI&y8>*@R0#J?LrQrZ}Vi$@ImH78u<4(u)) zZFW0Ui_>5{jyY%1)?lZ|3~o=*Ir>kz(^jMIj?qkhPaFa}rplugSK>@8h6V|&-@fUJ zTEWHX$gx>p*~7F$gY%H672V3~?|IQOy^%X%7ZioQNI5j9l?O_TGFHy|HXjJ9=F{&i zgtanDS(@hg2)|DD;CVHg2+As#_549lLX*1URsPj=)h zLXa&~kesp@nORV9c1W5LE`!jK-~cU*QsDSql2&eBC-K2AEJlN03sswo!ZyK8yXYMa}yA;QG=CQ0r?GO4Fx~i$rZKi1ey0}&%?Dx)` z=~4eN-4QXki(xgqmbCqopni1eap`4D3&K=kaO%g?o#5dEDkdgK@>@d!7L%xb4kLcr zXSN2JYg8ps;JL7adY`uzX))ToNclBo_#cs6tOq~M`7^lekvo-}R;08>ol{YhEds#_ zc$(^F2fFf>gUxVoKz`p>%DDXRl>yzZ*4nz^Bkt_$wgOcVF}*A1jP{%E4xiIdtfm3k zEcaSvRIkZ$_*$$=C>{I<8k-Lh+N6ZHVf8pz-!0Y%d~9%(%RpWYy*!14Bft?#kODk41ji2#~gJu}OQf z$ll1@22_@%<)0`jN%)^_^hrrA{aN}m=d+EXq>Rx|oXYWPDS~pu#Ra#*^;5ik`}I?B zzpE#4N~VU$VF@#*So+V*KBVAB+x{>r$rkP2$V3&hb(oPHu3&(< zy!{2kHGV+$3hv?MSknVCZ^j&`n`VUljAH&9>M*%|k-J^<5>>4EXvHI>6slhF-A4LW z<|qxe@=RQ%zh^P4U(6aHZm5h|b#K&JQunY*t{rGg(bf5>J4xuQuJ!_L?;*N)UbaV%`ZHa_6GkxrdizISs}PSD3tuymdJ?p!)=Mh86~9i+vEMir%iFV5f5ge%bjBIe z;5Tic?$6&XfLh_(Q-5mui(AH2bho0G*uh@sjFby`^aLkpa-YrG^WoI_@qR=Dissp( z>{cED*x*J`>(jsur?7WN+5E}M-SVErWz6Dpme!awzM(%$Z>}Ef-}riCZh3&X`fI*X zXy)Rg56)bR*Xxod2hs`BrdD7_l+;PXI}yRaMr^gj5a0 zY5btAyAv;l;u1qQ)U#Uz6F`it%KDZYB(N5i2HT3pz1Dujuf-y2H!tpnb#l`*$$K+0 zHq3dpoZP^=rl^XTbV=lP9blk58rHL5L^Q&1Ks+dLFk4|ER z@{Ke-Pe(U;b_fGWY@(_7`V;ZVU$VU3PkrB((w#3z5^4-;p;q?6m{*S!CHQ(!L5^S& zBPO-+jC=uVFY1%#6~n*M+n0#KXJuWt3%-McF8)?})w9jbFzxG!mQ>Y%L#FayCtdCv zQ31M9`jA@Ki;F^M_vPNa)E#3Tsf$&IW*`|D@T^MKvEG7nyy5N}Agp>+Qw!l(%SwE1==XWeBCfYkSV=br` z)AvpKSobkLZHDh0JT-a)43oM0S>K9nHWYgSME~gU$ZMbL?4RJ7u37cOwFfde#d`y0 zsxp*tT9&&sS3Y!YtTVs?)Uk7J#hx1QZFXO2d@xB#paEWWO3Uvl?!^@{kT`oXbfE%3 z6h{|=s~MS@V}sHYwB~a`dwVeeKOz?fb20OoK)hJp1(vDs(@P2YV(97S{cTx5GzC9%`Yvi1DCCicV%yQ&A9adOj(3mTU6GIWlhZx}j%hrIanu=qsw`Ga zQD%T)CE1)g!e;>liQ+ITfd$HfPeUFhB@(5$m7%-*WhoQi1H-4}1Dun(fzKJqzXDbP z`;{&fL-tlS=e&K=R#By=sa-rv1@2J&aoHe2!7JOXaQl9SoQ68ziao#69+W%5wx(SO z7%Yvr3(A9J*iD_4#!BMwpr(dyCznVTC2RyxLDi17LsqE9XX3)&9t+B~p^bAg%+=E`<^M{(2Hcu?>OFV)M_L{a zV54My60ryXr2c53N9eh=wHZv#@&F>z5@+%xIYo3lJI=LfEz#Uv$h9j1#ZvIA$tZs> zdcT7Ow}(bacN+J9$eCs^QW{$__^k|5P~-D!To`t-Q0BzW=sBbEm;%=Os&uG@7p?OS zeLkWZcwSGiA&0j|rUA6^7H4#kg0EG{ZwQ_5xoksdzcV~f#Oq0Sg}jT z^@6_E<~~%j!y>^;oD=Vr14_e?D?(YfNyWnuloY0dQgd0^xwOL`S{V|UyVnBFzbK4+ z2uk(Dq+ifBU^4y7a#!K^9f(|Af-Nv96L{A=3Z}g*DrFTwP1}2bfgu~>hotLVS*gIJ zhRy98IN1>9MCm;*qvXOIrYz(=MG(fv-_ZmQZ5`o!;HtqzXq z2?4JXG>~8KgUQYn3Ix%2;tKjc3BoH0`(fxlTIe)&4bDe>#myv#d^S#E^UqNa;7?se(v$E z!GcmSRpow9fD`!Pt&h({W;S*nX~Wty>ZYDHTvswYtWiT7aL-gyWFaHve13=5)dMrw zfldO-!3xwV<%YCxp&zaV^^CZ%UBmTv7o%M@p9HPhSmR#2(RJJ)(cIa+}reBfCBs-z~1++L`2o3~MVcxPCry zBlH*m!Oz>tbZWum;Jq~;r1V--nMUW92+AE2(zX@d+{|;oJ?fw?zsF|*R4@TGIr?kc zl;rd94G=+{eT-1O5x-wsp83g3F#oKKU4a3)Cq`<{m{*$(CB3i}oE48BO15pwoi&Mj-ODH*dVD-QCpvNzjrvsbDnz2#HhN0_ex%V7>pG z0M(S-2KzKUU48S}7vZw(TI%}t1T3K(Zmv(1>*M989Cpy9y2sgOW9@A335~AyJ(8aL zGwu&(pJZ)jC}t-y06;yi$3{Gf@9%d7Z@ym@nf=Xta}b%`aFk@9)aMI2%(dW6lP((V z-DcGd%#zq=o6_8@{K-vl;@`HgKHE<|Fspg??pN)%ZBu@ukVhqd|NT3re>H#FOk_8I zW$|5>GY}kJwmS8nVcHX$H~(*YT(($$K>Y1UP1gUt$7M>|9aA-)5{v))xZq=xUz*8f z{QGtT`tyIZ)yz!(>AUMQ{r&QXTkK7&Si*7HoV_1-jt6wP=P?4o;jFC{Il>Jr#@&vj^&i9T_bqXU2-*oJ?b!T7`mYWiRC zZJn{F~ve8$)@Wm2ddxAF{)BY4WF3Zxucty6f_r zQ-<2+IkoqWg=2YtpU~%Jb9auutmYX`H+o z)$T7G(Tt<`e3Zx!bQKl;MWxeq%cmmnZ;TwCp&DLdNiz$HX;{RHW<;!%OJu(h0W(UP zmuBtu>PC5z>V%&*gj1mzz=iGi%kcU-g5{`|l~>N3w780{NaKDEiy-+)gYjAT!TP?~ zcZ%);E^uPd6pjvRTWo>DoD^sf?ifB?T?OYA4%^(Nbt^z<6L)-XPDnHlr+vD>T-MuV zR<#4~?+HwNsi4h9!GKYZB6a5g1Cj=hP@bbUG6nbRV0FtzMv@WK6Y%TvDHe$_gfDw} zpg!YO^MTg(_>cSuC5owIhQ9lG;Jst``P#FB@>lB94<0e}U%(BxxXykyHcaBAsI67y z#f)QuqlqVhshjw|I2k{Wi%Tg%O*ui)MFE{P&z;ZgH=7H^$oqm3RXDdns9j5l*xNA>0uxB3%{mRRP`=$5z z%Mjnast=dvovU09ouap$!JuDbuE-orR(jpfsA$R{+*V^bl7D-~%z_)YAvbx*NGi3K z1OvCbDhVpFtui0@C6=+wnZ?0Gw>E&_tfkf-33Q%s z4wM#kWW|)iujS^#0t5p=QkU$xXJ>`S?BL|fcWul&2#LK2>9)JQLS|XUS$q@|sPFzd zK&+?ux@0{B!6d=r8dSMHi|COIFKhl53sytpeq7g{$rg$u%>oc8)w7Xs*r|x(>5~iL z%wjh!q8C)z<*LH?Tn$-p=dx>{B?6!$fm6g@B&!m}&L__eXA5|R0p}%n_bCrrWm6xd z$9v|QOJ%%*nT8=Dr|=$SxOM8QGLem7yOBQvy3Oog&}ifwsS`)8LEo9D@#!H*Y%oOiFLva4Tb}X^HS% z>jO}OZ>xf>mmPrWZW(0UwVE*O)+TdsXKfdmza222_wDIs&l(F&CE%7OL3!m8ED;_S z!f~ZvW5ry#&x1$A!NRd!3J=M0Qpy5{jkvHw-R}f&HjOVFKM-_3(AO4Dj=OMQ$~r^> z-k>jqu?fE2>5q>~6=V-HA>?3yvv9=*7$zPNxrvI*r^}j=`5om2x!Hqm^E*rQA`5xQ z4*sl&X_#9D#!^W+v8JRU9CKS&yf%o;dGO5bY!dB>Rekb)@DoagL$ep7)VjMdrmhk)yv)(HqTQur2Y0PpXC6*MJDQ7i3heWEvKDM zf$bky-Nj0AYftVu1CPVSxvewjqEG?WKZG*YXR-wL?7fqQu7`iH?a;Q3{-}RhCIw&XgBo=(U@n z8U5_V2aJM;BT|&sN>Va!@~uaEqH*5j4p@fi7PsyxoF+>bCmQ?OGV!Xdo_I(<|I| z;m3X45);?Rl*ath*2Nz)RHU>A1UWkdkV1uQWt~C2K9|(GB)2xTY+KHcmU}9i!>W5A zge6~R7ndQ>j(Uz1c(p=;u?KVw&Sg_Q@iVq(Y!P z#hfvd0+8PJ_QdEgVcx~6X%gNwmW=!S0qx0@S3fK{^j;XJ_*AoC`{rwftqE2;`z8P&xik;8{%~y=q(*uTvc&L zfor0+Yk5V=XaaQNOw&yT4-Me9G*GLYFBM;2Uvy)4$X?54+I3d=?nIYZ)Aje3vf056 zq>vNW+e2AlS<2pWCoUrLd+GS8Zp|l~C!U(KaeWm1&rIZo4(7ulpY~X8nm)Yv?w*gx zDs@cRwcO{daM7TD+pyQFa3poTxj1@lPMxy+Gc096lTYSXm3If`+PdO8H;|^AiuP-5 zANI5H__z7@e|Uao<1BET#g`^j@QH9Cn#5vB!xVLyP-0N`cS4dPb^XFb&y^O*!4O){ zae|?S)(07pD@fw_>8xH^i=sK6s#wEsi%!IkTv>T!t`iz}{dDK%MJD*YU?;UD@{xBe zXscj2efLd$8#}B$vz1LUQBA3S^OWH7`>*dtQ_?#j8SnqRas3%>HN)D{23)nx+}Pmx z+)}(itkW?@or#JhJg$XzHQfVPgslsMWz7qe!hIDB(J6J6MpJ6*YwJ!|lbL5z{y>YCN zW-#aYCx)5IA5ZP;Y(rr!>++3Q-eU`+Yj)I236QVv45wpeizzFVLc21yZ%suLyXndu zu^65VhoO83KE0vW60CeKRa3%O?mLFhTzNU9FD^qN&kk5SX^Bk)}JMiQwtTGv%OT)H+_D%!Aoy@6i-t^UyoaC1=8GYdrkj%;?w+6_}X`H-7i%6`pRu**%n31%QZziA(Nnjw$BFv*2hJ^ zn?yS(P}JOFWY64x*6CuEU!V~;=GJ-TLk zurar)5=??YWSQ$q0fZA&3HUvzQ=}eB98=^&c4yA$GA>vGJcl5-DtD9GkP+LT= zu(Qy*EbXLEcne$e<|?!0NQDH@M)PKI`)O?>Z&>M0xl2Pe<(2IycZK1}lU{ykFppt1 zK$v6WJyfhC^jg8QKnzg&j+Mg@s?F4UwLSf!+Nrv}7hSTSQ`ftZ=yJ|2#Ur)r%oMQ( z<|=d+6c37GN>L4m?=#g z;Nk@Sns;tDF-YM*o2xqG(_98c-G61H_uz>%8zHrlU0H$fDXlq_ZSKoXF|{|(T}&S^ z99Pcn{kT@Wv)I3xPOM!ZkC?w_q2K8tOyV&Sg6t033h{N}WIOGjJ%lq;7AgeXR=86= z1OPNBEuUX#HQ{~%IvsM2-I-Mrtyn#mVns_*>}BM2M_|sfKpY7lS0dqzr2hm6e6}PT zNw{z@>5MRImVM3pl#d5fL_OGIjzi7joQ~IulN!Ar>2C|Hb+azzhHGp(6??N^_f^QK zq%8f$!T<*r&`ErGvJhZC>qjo{MOB*UH zr;s-DAbFGR zO-7md`3)_@Bbl^N$9eVaUMqYdl@^cl%6INg(lQxdGtoJoxOhvkISUi6n!*1s|&~}3xrgSz8u$3?S-O}vIfwK-7;3sim`1@=pw6+S*GgQUgDtc0)3C7m{%5(u0UouoA~FjS#X)E8gJ%wDUa#n$QMk%A;OGie z_urC_;oS8P6>U*$6XLPZYh{yQbaW|_0 zHz$i?!7nUNx8ee-yz-RICDYcdb&wcmV&eQr<3T!>dFd3vwi0El|MU08rpj54Utzs= z@3sJ*Y(!g79bExz=QuNf+9@8!yQZLi5>WH(F_P5r3ql9~(kI7~(H?SXmKXA8$k$dV zt~!2Y4e;}mhLR@eapOS8RQdojQHg&#gxS-=IEpHIgv}I8o&JCbKRLyBFauC%7U&{+ z$p9ttm+Bw*bqd{&+D7{0u+c%y599UkwEaJ7Qwr7WKxls^S_IM)OYa#I-=F(|Y z>Dy<~1>JiUG)LTJNb4$6z9`=GIX2*v;X8*ZY2Uclp9W<=Kvn6C$wlX7`0mQB3Z3D(^!KvOYnYAKK0f@4Kt9h?HXZx$+}8&`!hts1P0xLJ>dwnw z(J+(7tju=s){@w%-IJNOTw*h$l@%sS}fYs+) zhP^Um3%^4hHVIei!c*TPff?0*Ad>$=QVTvMO?_&j*<8xo^vkthRPgJB|CbP1a4=61 zeQ_p5Nvd84Ow2B>tG>p{$|`pE#!-Ga)XKCkE_(Krx*35{V8s3(Xm923KO&kR-~aJq zd5S&$-W0Owx%XR@rcSwHVsa{H0uQd*55sqH`|2My4|B;s5ZWp(moj?eM}!V)am7O5 zG8;?#UUyzssMgHWUY>X$RGe(jm7?@5s>-_ig!m%yW+P$GU)t00^%zE0C)O$o7;iAi z%)L(&Kb*J_q265xiX}gqXba*n=31NQj2KTPSt~r4zVUD3^t9EYhGO2)n2-zi8u3Guy@H}y z$ul@V&BN-zKT1nF91_L}cMPFl|8&Yv3L49Nurjx4^uf^?l@*)|gqaJPAbjcP0FynJ z2t0Wx7Pi$O=@6+x>T2uQ!5Oz^;qSoQ#}tGCBQ^r#sD789y12~8Sk<>}^&Eal|9~6T zO#kew-*UCLiTxt$GQ2E3l60^V!Pwb7Gaoe9RweoCNU|;?r?iq8%-!E*3fI;iacvS6 z4yb`S6bC>pE;K41s^!%q!-%O9ub#eGoT>N zA~~7iivsQU%kX5QHRBDsbvr?fkfV#5tGN+;e~^_*C?b zBtrXK(OfK?i32LAmfX%VWyKjhwZvy%Yx_Y77XvKxl8JxJrdE+|H^t;Ecye%bj+dyj z@a{hCKgW1gjd|8cUC%$@nrbx&L)nre9k#^Ft3Grjj0o+?tNu&9IYc{#t`G_Jr}Asb zbG}^vlpHpB#uy_ADGxe}Z6O{*cUo90uMTA-soc^WUo!bo@OR@4N z>Mv;nu7$SWdXxAuid*3068Oe)!;GtWqKr3%PtCeydFajsmj^Hi$;C<*2qEbqhd73{ zpIRvKiwWf%tWd^yF?wS8>1{6j~Q)3>ga{Yl)SLKSd&6+zcEw5_{aIp{y z{tAd4@;`g%Z#QUymxeqX$1?{pZ#tcX3Iz5mDoss)quEI-XPu<04zST?WZc{{P&dT0 z7rd}cmB?=Xq5AcVUR9G1*^+}1cgLNi>ld$$ZC5Xy+{MqwqN$Eh>$+#14TuEREqb}C zA)o6Z`BpE;4d%|e6iH&+hT=jaburL2m#lPTrMjhw0xH*_PT_nZ4sv^u=~~F0!8@V( ziz2k}ScCVJMK-R=rj!P|U&>(B)h>ic%Yqa^Q?3C5b^8flH~EeZ5SeILtpXEsvD5{) znMKwB3M|H><%j%o0F)y)|8vBNkCQC!+VMCd81R-ovhecJgl)fG^fD6C(LvsBNP( z1@47-S>YN^#q=Csdy}kTt}pF6<~XxLQvXO$FkBOiOd?P7K+UvvoPN+4N+Ejt_R&VJ z!BquIE5f_#qx3pHhfMgA_|!?~(F`p1v&;(t-NLlCFBBHZ*cIl zynx(fjG)Pq4pXO7ytvGm91zoj^6M4ut)cG~j`R8*-KE(vC^O3)6M6A&EZ||Fd=f#Z ze-_7QF2c(oo2k~ES=3fw=lx?2i&Cqq_*x>u5?32=jf>LRxe_4DF@2;B!8dk-FeXf! z7aTwLY3zu zU5$Qock6sZ7$f!R+yD^@#BZyZ$12BPJ9|_qy$EDp&}R{*p;#rDbD)Be7U@o`11#7Fvni= z>Tu%FRcHKKzY%;|mwm!umgo11S4JlqgsXCM*=RfdhrADk$HqRrzg)vyzws8$e=D)PK?xLe ztI{=3A%*ardW(T!LJVMMGP;m8vJ9I3HgzZW7KY@*j< zo2*1dL}gh?v+{kAp~H<0@}D1C%7CQ$@2n|%@+cPV6756CT?b4`lX!6fwBXJ|9u=$; zN(ap_UIPbCASN7fefvhEccBq;PdEpICn-Ih3XH7nXgpT_dQek;3t%XR<6FuMVvUIF+A2xGnAv>%&s+}7)q8h7sW*UyaQ{wP<`lIk3 z%%@uXr}!WQ03N0Lr9CCx7x0DEKHU|97yic6m}yIwp;ryUVe<)qXDP>1hnv>kP=S}k zzsPr*cb;lBc{~E$rroChxjiY9AYThP2{;H(?KV@_Y-gC>9H1xst$d;-_eTc5$_{(K zw8$t(I05h(5$KKfEW!ZpVId5<4nN(PIE9Z zt|%zF8n(oaL0f4%ugM~tkFdRax)%XOrFXHoGdXkzU$bzq3sGBJM*tG%Iz>$;J(wDn zPxt5fg$FRQulkqN?0`C9N8gD^W04kOR#dy!HonR%y>mWc7vCfXo z0Y)w?WZen{FA=JeeQPoWq#wNBPEE!5NVO<=T_H7F%H1>` zO0xw0Nw;-aE2&)(Bgsc1jLWX7Z~z^<>>~`}q_$epg)S0!sDara$bJq!-W^?*{H=>+ zv;<|wkRQuBN&+z|)^vR?D8vY7Rpk~r>eqs$It!%(wMy+8H}q#$;gH)vy`?Y6RqZ~ynwOntF(02Qob1*h((e}OK})o8fI%L zX6{M%N)#4WF}0nCh7rB|N6}}buz*>1UEyr#1C*~<0+*Il(U>=%lU@jv>#SMySN&39 z_)mG5+nNh8E}}{|VL1f>{1jDnEh1)t)dbon?4$~A}t-rV|`qx zd#Dnpve4TP@KsAh4?DS3RcKw;46i)`ARFp5vcc6-S__I77r3Ag--&7f#edcbhw3-D z`DeQ=px-F`sg(#BHN;^)7xSVcZA+MGqwcDW{Y(7|# z^q?a{4-P#FU5R^=&BhbWv=)2@qTUt)%!3y|+;5HBKk5-tv!T_=jYKT6n zrPFqF#AUSa0#58?JFK_!2Pox;DdA5Cnq@_IfZ45>z15&Kh-iYZ!XgQE&$1RUim^K&5*!4SPwQ;N)%XZN!|)7bPxsH{LDB!bRD3@|YMZe@;g(pB9O#SsQ0 z63I2*Q@CC~Ij?&qQn_m!`%_-_($WPcvXsi_gWf3l%iiqEcY$NE7_+;`zFQT%_vMG> zeN?SA`e`Drbi~0|Dmzd_O){=pt>XH`;``F`Bbaaf7N~sdZs5Gtc*b3m{?sDC?QsIF zbt@Jn?H9XCIYFsg-GKLvXMULUf)MUsy6{E8g5jx0X8+g7MDdP8M+Jq_N_&8F!w`mh~4G zw6B09F0wbFW*pPU>K$km)B~T1!ig?(R`OjV{+8G^Ovou|I_fzwaetLb$B#?<(A7s7 zQ~>Nzfnj6x=-}Bz0H@KYfdGBwmAJTpuLEFb1 z1&){Rih9Sdfug(V_=RIlW}c(rjiolOugu_2ZQ3e$-3zh1I45M2()w&Xu7JChsQc%Zr|r^qDk0x!K|QmC0~(zh4Nyu@`S1wrUHk-6nQx zy&>H45qvuI$^mV1{ZY!CHWz45L}~Ev(=Ug7_orXE07|>x0hjID8?YFVJpXco7ZE5{ zv_9p6@M+_hP`^)h`xl!JJY(|9PORaQA(T+rum%5{>mMLXF1JE@e=^5t<0AH$~OG?hUb*Y zc5w2I+w9-oo;$F6vSFf~M>OcOggZ^1URZ_PIQrgIl<)zQ?m#X&STb4st3kPs+r7cB zZ?|6F<$d=2|>dw+L)uhsfYB)a|XQ6ayD zoxIS|ztVGo5GM!=AJ<`vFeE>1Nz24gW3!uKtRZ4lBsnA;e}{}K!bImitMn>ZY^jtn zWBnFbONHfLk2%kAW{qw*Zb#iem%Ntlw z-hnlu6Hv#Z8;R^gz%(c`s0pEQlBV6+f=DVoa*F9y<9i0xy;+Jp2=h& znFVckuZX^MT44b3^9k&^^?giM9R&`&K9cTs9B_}rZ;&TZKBXHryLJt!Afxu}>^$Ry zAmfkO=4?sDERG3pxEg*&RTgwcdJV*l)TaZgoCj_N@bXy=h=xW%L9ANd=4myH(Y(EjlTDcFuge`jp6#~lD_Fl$qz(@>+1crlfB0~__UERa8 zuxh$*Xw+Ctt3`MZY=h#j;WG^}Tp8pHf>FsH^8{`DwGkIu(SK_OY6siIndhj7vhwx6 zkRCh}=u}AJS1nQ3D6#wq;YfL^w}OIc2CT(kuk@|mwoq~|&h>B@LR-c;cRzb3G8veg zFW8-JJZom(J>`){77T!s3ui%{nsx>TkJ+&E%HAPi50PNufD$gI(~i~Qe6*z{cncx0 z9UUO(!fBJ{{_dJ<9`7LHYvoOG=yU0pG)!(^_++kcLewNdO z1ESpCzWRb(%qotS&aOzMFch~Vua!Jfqjm)j;CyY$q7GRx*(|R(GtpZH-PMGY3YP>bINU?(*^5AU@<4`<$~Vk9F{RC?Q5b*a zCnoBQaDF89tR%l+<^Tv06xj65AuX(@s+w}^hreLzS~i2l!&&@N-^(QHt;J`3;DlP8 zrdge*vT%AU#w(ameJ!-!NX}ZDv|am|C`l!9j8p!x`v0kRqX48J|t0X+o5hkDEyh-^(SVB>uUJZBK#!f3HU?HDhTxeRtG3dK@Kz}V9a zRf}P9NO)bwE(fY)=pUk z?U8=a=!zyt^lWT@q&$~S=v|Q%Fcl@FiFlD2CR)h6FhT^X6r|$&*MOb!q|!ES`R^NG z-@^}JAJ66Y?;D#~y~gBV5o3LYF^V?u(IfZ?Jt~A2pn2j$_}0@0z#~rK8sYjoyTD5+ z3(QPSw1Q9q*SDinZBT8~;S&v(r1y^lJEN~SHE`C7isS?9lTm>k#{2(8$iNRKSj)w& zFJfz7;p>djma%pDLzNg9P6Q?_QsS>3>-jg$|Z zZF`TU^ZLmt%B3FsFBicMS)iXa6`DGpyy4!5<-&m}B3dsJ%Jl8)`o~|TEY*#?bX&a% zz6v0gi9tloC9a`V*it>UHnJIok6+cSD?i20+dJ&d2KBM*-lGY#;@^3;9rhgK#r|xV zpg1ZF#jI5RA;p_R!k=BzwsPLGFWqZ25JxpDo*jfei87X@vng*kex1|ziFRAt=9!e7 z-+kR@bdhvUT`1nL=QH%18vbE;<+J*C!i;_CK;7}wwJ|J6#a`p5TuYBMa!1Ei@556T z8dnq56RQsG;rXR4oon-4ZtTDKdiGPMCpXz_UElxw4Dh3iC|zHb$bo5Nso^zw^_DSy zTLYMUaU<4Zz+q)yU3+deT*9$Kxx zBq=l`X*zyI!nMU4`~T$!QxS-LE|2sFxc$O%Qz?_N9<{c4eSRxR5v*S!rIznme+MF! ze%+n}=<&qF>WqV&-nvi+sG1KSj*O>HBp7u=7x=81MApR#ohTUpmKumFc=#`(;b-mA zF}dyNv2{_Z?9FC0?I_scAGiHCgKBc|vkc;?XuVG;{@s|A zmnqzfef#I@w^v_4SHFiFmUtTL_C@QI34(UxQ`^-c?T;h>JpT^F+_L3~F)?tqoK|R?Os^p`+{& z5zFY_nhFHRk+9&5&)}>O7_H3$iM~@3SmR9kYIGP8jHNRr^Uj_9te&JqBn!yk1Y-eW z%kQM>9_Ju6KT`0}dFYVvJ0OwIzcOwR%m}aEeWZ4;8Erj<#yU}c?x(Pcv!VRL(&_rN zpMdNb~_MJI)@r6u{3kx0K#G!^6y1PB~}Ozg>=#m2HbJQ5S7JW6Zr1Zyrl82$aA z$Zhl?%=>*v>V9NOcv)M=#;(WmRUyvv#n~pXYj`U+AV>e39yR5lcd+`yaGmo72ZSp$U{5eY|G=ht3cob z%-5!z3Gfb}?ACfl)*b;KZy<&2_zpf$A}&kAqOX`K_LznRYKwY%C!^Hq;h)d(Lqe$1 zk#@lu^&0AIUD_i6s--1d@x)M2`H;+NIddm>5jZq3ou;*ZqakOs4|;PZ2arwyASMMk z3t9*bkve4W7w$88q8M*8`)qB|0A~Inu=BEWc4R~wC~c^Fft^SRb~;oA>dlml637U< zPUo&K>cGZHTFgtO55fY0)o(a7`zjna10dCqCE|(>E+N(^Z=t(6tIac&*WS)XkgTH1 zfJorkA!^Pf4L^pRIH|y|6bqi%xc!9i6$n=BSSM(@HP_Gity~*n3-E1hJ|95E#D(2g zA!rl#MJcPQ1NW@fb5d4E29PHd?YoaBKQ_ zg^w_0@{iEzCw%rFwCGp*7C|w@Jon_)jsn<&Qifsm^p4)66851o7+lI65^!%`C%;uWX}YxjzFrL!3IK%N{AYCNLM_K8VCmgz`i1CSipOU$YTVLntc|F0 zM45RMspqL-0L2FF-n;RpH`*aNQf6EZFfDqZ?$$U53?0te*}@jg!&ONopo*7}K?*u& zV1Ty*{5_k2iC`H_jm!uI3z9A=^2_o3zJX^m_QA*Jd6Qb-&MznMulLe;yG0)e59K=B z=jtuMG{}OXj!~*^_E;e&UTQJwE7QG`g-2#0wZ5R)0BFh4RFnq*zW`VYE<4bWw8t3$ z>I)tJ0pe>aFAsl=#r5GM^hcWu0r?GzT7fXrgO6t;%8W%0l2Hb~uQW3p7*PX&+Gr*O zmIJoepv)B+MHh76-kBmS+|En0ivoBU$fCg@Bo@>#DjCrXaR-6fOD^|l<#xb5rK@OK z`7?uocp&H7h_fNtg0x#P3oquEC1dCB3zJMscox9e#fd&3I&oZBkv=WWo=rR+jgw|o zxrzX%`Uoo9qxo`tpDfL_I_GkfIxPwVEn3;96ZKLE?e2I;#6(uQv~I zqzvoBIB;S~Pd$GUp&iB^1C~HS;MC4=1QIEzw6;I#?s(=t5G-_F9m@T}UAXtM%q1}k zQn_b?8g#L8*1^PhL$alIPkQL0VtX7ejTG0o4pt{jYI7|(%1w_ zstHa7q;&jOz)wdR=kD99iO;ZBl@afVuXc9Cv8{YkxnI5_H$sCQRFpV=96bHuET=ZX8{B`E*pArG@8;O%F z3FL)d=zKN@_qk#C&3xUnK>S3w9*25KsDzph!1t(t`v3mn5gZ_f4V67ZiH1KnW2xGZiKN7EwB0Cfifbco`?mI}g! z);{F%t4}8}s<2n>MrrT$zo;_~4EXs0Q?bUd5llkq!9eOr!%@xP%1yVdV1HoDY8zh(iCu*ABhrS^PS#jw zp}P%%RQcjN^be00Fca~Mu8CJ^muCyzrFL7_JODCzZEk}ZsM&^or^s(cagXXDHEG`v zzVc%q56Tq_nN8bFo-)W}>>FF|qCmV^Jz=NwM&qSB%S(k3uT*;GpHuahe7ddY!awD@-)fUA<{4x5I5 zBaVKavAq2ADdpNHCKsFD;~h+DfD+HRKd=?-A-y*?zGYiB*5W2ldCQ-CG%x?<1Ko4u zZQ$YmI3)SYdbC0u4(p{mwKM$k*{#(rqYGD>nhT`6#~RO`ia7CU-e{_;nhdc&bvohs zMfavw>iBqa5?Y$Fh)7RmsGKo!R_Q2Stc6dIuPwRYX-uJ*t72EKwE6o4{yG}qFrRvS zq$qwk?(4_?Q}7-CZ-Q^*pk)6i;V|TXk#6t*4>I5LUH_>_Z~Qk6`~SB-y13sYV2Z6@ z;PvmP;O39E5)CazM$fNXNY>^}+uppncNq3>4)xC?K+ZeZhIdD6Pa3becnFdH+rM2< zN}(n-A63>SXWu#~ufMc^r%yZwIJJNJd+Fh)e^17vX`{sJPd=QO=dMLdFC0cOyErSa zQROzSXMGry!pD1OL>>*9M*@bfJ`WmQWQ+2or$0-hRba+n1`xMVgUS&JP(VVzgtdAg z#LAKzXPArB25xd zXi>RZ4~Cct^0z06z$?)GzRqtY0$aZfOgGAf_nYrF>vtLhu`6cfrTj=E!QQ13sX$)2 z;x5P%ulwmJG%={FBS8dA-T=}Hdec$Uk6+K+J%~9c|LXugg)Z%#iB5X=VO;O&ps%_& ze?Oav%>h*g%nygq4;7OU@U};~n3~$hYPvap&X5H}qGTtxA3F}f4Q&Cv{lsGf@Ihm@ z6>byPyblDvjg4u$Wf!?siMREPb7oK^nis-I2B|VFH)TZN0qB_XpjIw3$@Zt--aJTXo((jChbA zX{LA>$SW+@omdEV$i}Y`b&R~40FFmPqJQ^8s~&w)L;iO1>==aBBBs#rMr%3gFt2#Q zSM{(8C>X5nNS_5JkieYt-Yj~idCXiJ6UOvq<02c#3I^t@UM7eF5EPytLU_mOX=jSqKY%*77W%-MKK4ebE$tK3`B(|}Uv)3U4RB~>lGE$pk;-B_@ug7s1YSoBRrlGv#Vrzqg7S2SIw z1O9}&?6V~?3CHpsTT|b6OXBL%S-Snegol!wO=-5UmWrD^P8urQPL2R_7fBo~@T$mx>fjtC4IgRN@L+YQhZNDE zpD@;{-Vt@u)@c`Tu4IND+m%x;+TRsP=xUY-`yJN`N}^R4Xr+CzIiPGg9fSA;F`(N5 zbkC*E++sn0jH%n|QzJH{zwAIV`0HoZm2Fn-3EbTAx@eMmOQmd8A#P!GI;Th}EM+uz zls1vlkvq{6%_Xj$!plla22bEul^pqsT53#FD|q)}GqeG^{K`^(Pvh(LI3;z37z6z- zdY3Q{;*L`bv_om0LIe_il3}EQ;G}IL0)xdq=IaK66z@DN2gC`&m`$J816uG9>K@9e z3AQj$Xb*EclgJRa_&fIyyn9lLf1Teiz zrXw!S)QTEF{7g=pI_J|ncyr+qh!M4Fbkydlzf#(`2^RWtrV7MBxrcPg;!lS6jC6-o zCo&#MO=%8NQaf|;t{t|glPfi{AKjP4-96y_<)uaHu1ODIflD0r_|4I0G%CUC@ z--n8OBL=x;%t9nxxEH?753_!7qPpk@`ar#AA$J4}H7w(KNwl{jMposx=J1%%?P=`l}CD_o3z3{KMX@Qy&L?kX3*FH=eKLzOZ zOH-x43)&&x_uxH_Vq>LzwyOuiR}2p3h{cTE|3C`I=s*l<;>~H z9lv@YMeSk>2#e2mdlF4T>$FZI8fo#=%FOKdNzb&#S#hRQa;Nu|^k#X)`nqebyDAXs zm@u(s?KApNq06#rR}~uG;L(_5y@cAGch{+PgHb;x$S>;FFKsgSL*-i=D4%*UY)Vl~T;Na@N*5_q_Y`afmpL z?``PKmvuu~FORRfH4Piq`GN9bnoH-uP2vbE)Bmcuow)k^w|W3JHNo|aEA@zVu~gPW zS}q%3RR=O`uy>q-oTgwBl_XY1u-%>c9V;ADO!w@FREN^HXINdOAQ8 zl_6T$vK-%-n$9tvCOAo(USDER3vCs{Z}5pjqq!qq+R!Gc?O%+v$lN>Qza^Q0fxIAX zbGZ0m85LyOwrx~V#p?77RcwcU-~4{9I2V8G`Cn~P{8S{x$UM;TxdvK z`{lRzSO4Xs{*?=$awB!IvD%|}8Ob#=pKH~0FBRRySU+qoU$zoz`EHu*N?DIHs&O+> zdKBN?emdItp6K=I1brO%HiC{rtAB74LNJ7nMgY(``9Mh1S6Dem`;aQxO$}{j23Q+^ zc_8!7t>Z^lw&O&!yTp{Ht>`Ivd0&m6iGl^UW%eBl&+)Nq5bnhnoDjyZtopyvJnX=9 z_B`y~%g~uok*WUDy^ZBpORzEH)BxAPLLQ*mC_F({h}HQK-u-6Jk@1IY#uHWT9f5SP zi+opIJVgkIDHQ?zmeDa54trRO%>=z-=b;~2`9t*zq}MUwiKvAFM+8_}*W8vsnn%`V z$F;-I^-AqNbz zZ79u!hoLd`NxR5HSK)OqI4k$Oz;lYWYQ_TT##u-fS1q=#XSW1+-Dk-6WogCh0mN#@ z6@-uyR+{U1erh0tb`%&8QWQ}BS);hMB{yr)2~q|U2orBF_?b~-`9P(4x8{&2+Hn#gGSGz4x}5+`+!**6O)oUX+bjhdR-$mMEwOz->DvyE^7Z!O+J&% zg!tARqLL>ET7U}fTw)H0yM9$h*a4^f9PtI(#f?#yH6^#OjI8&V}|rCY%MkC$Mlf#TJjGcAu5+eu_4@W$;A@k z_BvlR+s#YbWm(N^vYC3*}@*LcE2xJ4)^lOL6`R@d5zSs4`w)0#1Y$frMP1v`$8U)2c(cgci10 zPed8@%Pj<(qRMM*^XZ1D&)Pw~m?yoB@n$Z9(P&6)H1)wUYZuz@s3lwALPCtIQyzAl z?C?j49zh_CN$}(wx!F~fBUCrrNa+eFSA)nSsGdb;Ry-SUaxc49!jNu%obgC+Z|vNU ztOT9ep8n88o3dmlx>C5aH=91n)87YCH=t|9 z%c~@JkDy1XKycCx=0jqJLHV9hLSIEOBI1n5z=6I`H zb>(rX)pSNEhsn(F*6YLNV`LbxmCOrDkj)haCp0Q#dkx+(#yj)w%c;@piJ^s7t3OOw zxV0l1C|q4Rqnj9x6bdMN!mN1!iE^aD>YoJ@Co){h;K?;*8D`lHbZ!l6;v4+ZQoum5 z%q43s1Fl(A4={x*TZ8L|W-Z`YR(x+vjp8q~qqxbD^plWpB=b(7%$4@*gU$%$SYMYx zKffJenG3D{_9>?BGqDMny#vy$p_K?nlF$C&wp)t_XUHps4TAVEYPCA6j}_n|@;Sw+ zp;#kf1Mrj1%WOX3p;cSEZ|@f)DDd6z2_;71ulAcMct*lWTO4{m?UsmQ!0Z{XM7lU> zkx9LnQ1WzJW9g4VF$JX!V!959@n?jULNpI+=;97Y=j1!(t%!yS2aW5lHK&u}mc}EJ zaJYohzAG$5vaBDR#4yQOf;%%q09?KloTpl?e}kUy@4AQjr5^wwp` z^+~>yG;ISQDl@)oMAZM_2xzY)Qv}Q}k_Dru^SjD(+c#MF_i}WP#<;P)!qtVo7eRld z0xVDzt{BU!@<|Evt$Cc9yEb!UhNFcOt$v)@39CmI;+gQCIG*f>7%R)Z%doBys0}3b+XBI6tHjw zHzN8VDr~B2pMZsI08ILh$DX7L7^-<0c|rXv=U{)U<`uQf$GvYK&A~S@?I3nA?1d&} zB!4D(EFDUfN5)lYc)gf6_mlb3}TVhB$zTZ^L#tqoWmx1@@nSKR|!j zYUM07Le4?40zV8s; z;}d*_Z%BV(|9WkU%HgtKsiRwB)H^>v!rfS1;>hfKgJJ;DSy_?p)KJ|E~9(D=! zg z)St`b)FHpr8DLAYuN5nOk^&C8X|T{?l`;OsSPWq`$*0>;&w5xi_&)izuA%H>kjU+J z|J?!TEc9+2_(?xu?sw(Kx^Uv7vUu+^lRF!_-a~Ger^wGVeJ<9ewuB#8#})PTTvXrf z1`G7`3f9doKRgHjjQ#%=Bm4ghGH6@+No~*kaFtc#9p~JW<6Lg>{}s!t@u;$FyhBcs z68?rtuK!5W4E3og+q@amVYWCQI-Y}T2d{c2+0Zyz4J*Q-04Y+Q zj&s;+)Df4WLjj8CSF2%so*LD zRFPRdZpxL7EZOa9sNxhh;G#i@-o*efUV+&BvDdu6k-k+-(iM~L9>Z6JW^0@fBz`X$ z+p)8(a_5N?*60BFuef5W&Fsi)Ha8++Yx`ywcjT2P$7prRWWO(|ouyJ!4~0vfeJV>q zOdNRF`ipER+6)9)?Hd$P6%~mm2uqFJ;ia!UW-~_{g1sy(Nj)R;dySlsRuuofhpos< z9@|B5?bN1|?xSg3^3(rlQB{)5dcJZA|CpA28h&Hu3YfU~} zEVKtC0$|=gYBv?Q8x;U_+|+!+=ysUJb&-DzktNORmLCDLndZjXIpais2Bu5n zP-0Fs!dhm@HZ<@}eBvpUl5cC%kgC%b-+(-B2s(!5?w^b>A2A1Pc;OB!jtd#)BreGiQ`)YzM<%bNsW^xnK}q~3O_1oOTnpSTDZ}eL#g8rcrt6oo zS;H1>GC*&TSn})#S%9m$(Mu8tz5;T# zFy34Th^o83v_GGqOlPrdi%S3`%ks%#C&8+gI2`N=OJnJa)wV@{iUI{kmDlwop6N|+ z@3fiCPHYM`<(32R^$7(k)=`Da*I*^x$VNnram@@R%V8lQtGcs^am4HGlaU3ew3_b@ z+R)w9-qSF@FgSY!Vkr(x>Xf~*<#+%Xs2b`=yYm);$~DwhfEG8-t$VCP%xhr%ss@>O z=U5Gg^2-B9aT&o%iF*Xgi4KWDFeyiYuc9Ftm|Ul33EWW~C#vwkEB}aSbeSSZ16JVh zEBrt^2SR$IVzp(~Q``phcCvEw?1GauJZ_L#(wpaCEop+oMQr^sr{sfP0?Gh)Yr zFKz)M9v!BI&`jGhGjqS?8~TzxsjiMAvdev@2iu+OL|Z@-yQ!TMJ+PnY7zAm+*&PM| z-O+lu>*-BKf_;VvCED|<5=U%P`F7J?mc^@z(cAT5!wD4ibo$s!NqOc25#3F^AX=$7pNGs|>Qd#Pk0HQN_IeUHS90e8RXQ+hjeNr8QZZID zo%t>_hAAL6wy-^h4=il!?qET}229GDy@BUT@IVR zeW2sNcCL1tadfoJ50-dh@N7}kE@st_MDU`=2a}gUc6IxTmK#owoN6fG-57|(c6s8I zw6*%84xCmeu{O#T{vie-czPr#dh%q_!cKd&Nk=0YputxQ`qxT7c^3SZ{nJ2?pn3OZ z0S1N*5RwD+1Qm%{kqY@jr8wD$*aZnlgFu69!01c1B#cOx|( zvRND2I8?KgHL(%;{u8CdL5o)2H(A?ait8@mu2gz=_V>eARo~l7AvT(?wX$Y-Qvpr$ z#K-|Xpu-Kb)PEdpUtR<_P&cEbaTq~xNRVUfrD7fG`A)0a_-9r3jQQMT`v!8ab_@2) zn{S_gj(d0CX|axOY+=#BQ&M>ialoiBc5$rkJ$W{Hh3O>E(5eoL>UFqcyVm4P2tvK$ z61!HnEmo+Co@j3iA#C-l|9V$JJN;vcR!O$9XxJ7)nm}nE7%VP7pjRw-vf*QO zv{kVP8>GrfP5|DG%{Hz7=7Q=ybAPV=2lRnkvSL{FY=iz-bCRH$vouW?7Ksz9@FH}{ zaD8Rbw%{mLmBHe1tumr1ZYrGrym(3yS7I~4?Dd|KlsJUwwvpR;LX1* z*Sn&qUP(L3K&r?kKz&fEv2Tg z7-55yD%rBc4{#sB{A{+6x*wv(f#5J~aCB|%BBu8(q_ThNQ=;JbnpXh8J~L9dgri<{ z=N~sknpOePM+Isw-qi!=Rz!POK2vlZ6tyF0z$?9;g00LySE>=$ZW*q+B#wc5M zpv(N00j36OT;wzjAMYr}Q)V~23;gGA=x{I6=mjZ57cp8khsCJ(gKNuX2zD~kD0{lxWEQEpr;)EfXky9^>$O+E?erz!= z_hBc8(Qfo=p=~?a{n9+V+}1a_;Y{bxpsg(E-^>b@`wEc2g&T5j1g;MxIspt2(gCEK z<54V|bbU{OhfZS3=^T*p>)TjTM(Ii<(W9?4bG=)hul?a)Ci}}k#j{aW-6I%q!T@hN z%;fQfDk_X!VQb_BT*(erBB0*WWP)eG61p5pi8jr>iv%u)8hlML73=Ft=F!!yEP(Nu zO&>Aoj#7djB`JUD1Qlb7hGr_{%Vt2$zfyE2jBY=`k@alsI=BxR*=88K9nslpzOA ztST+JhM>L;fdPevy3_egIpv`BL-FvihkW-8i2eg=l>B0FV%P=&A&)_GY$+OYz8(uq zrL)4$k5N>|hUZScVX*V5Pva za>+1&V2wbHK9W*7#;VgCrz_;Tt@Im*Ep=C&mH2m5!`3Co1lh)`*b`+SEd|fNq7bwl z3~AEpd+kSeqv~;21fzfzmsf3_R&&{dARL%akaZqug&```^{t(*sF-9TIwQ92-O%Lb zCU99AScROIf8CxkM>*BUGqaoo^g~1%)xPI1&nj@Q%%Vzc3KnAD#$^1Q222;45f-S(?Y zFg6-ok)2%W6qsXORHyrb=ogMxi809|@nS&4usgpe`GD@+1zSl|mpQ7%u3y*%6H~UQ zNntadV9NS;nh8Ws_k2{J!ySf^>srlMxJ?TAJ3o*@Dg)Ee4}8*vnwljE@Q%1P*mk*G z>D&7omH${d5^eqt<5%G+3L{ZcWVZeC@a+ZHGLsbWk!avVurs4!%pc(V;YX>c(FGNi z9q7yuRMsm_h%$T`fNKbvAKw6`=##3-;1`Jqf+S^fxb|e4veP~3D1|cH;?;l!oKTbf zg6wX;l>RXWSG{2YAPS)6WIRv6uVjsd*D|kuo1Ipa>}!b?aT!kU=t*Mn~v%4 zix}AMERWjVt{F?9=7?yGwsMCM^1dFDeQfcIOR8MmAU8MX4ABnkNj3w4& zSlM*{3iplJX@Wx1Y9xLz^11C3pQ6R>D|6S`TSb#ukqSS=t zwez-O<&Q>%x|>gICw*kiu@*25cc4hXh%kJa%l>@*5K!TY)K8~qna#`*RP6wH#(KO7 z?)|RXA0@`fSOXPJaMR{ZD~gzVUB3$euMr<{!bWhWoPZox8yoG zcCo|LSFWyU)rQ=CQ7PXDTkjhE*B$*@g84hBeognAx0FAA=m^O)hD&a@9G-i_JKPa+ zYi$XS*&u)Oen4&5KP9eyqlu{8X7~*0-x2Tbm_9Z!F}yCfedhJn=Ex_t+mc*hZUw3~ zZ~&=x7ukQ;a|YrK$D823tpg^ney2M0mv6u1S^Us(_{Oey-X}gzD$b{Rrx%b~)4c^< zs`aqor(0J4b?aVo)Fph$r6Pyt$Jl=XcH;u}!VR+fdpKB$#d}_O?Y4C|@rIs>{|COA zzv82S3sLdZypVh0kx}GUF>*d64Pa;D8&c3rJo(Mk4SC5 z;(skK^2O(yu3AO5%^%uC{6)YfPmB`0t^PxcH1^e*zumZ?^Y#bt8^Gg#|KK^GJ2xL6 z)}MZaC13B*aSflxll7jRT9pE6_oo`=U-m-Q>9LfjD;r-`D5pOYS!@m*NACHP3-E;) za$Rz%%a(H_t*N)sw3eGfk-d*3kk0J=)!LCcQs^t6(F;3~~Gz(I5ph#P*$Fgtt zvNS2R_K#y9)b?SBH%&a)`F1Ajdt^oV-eHl9B_~nMQ6*CZ#_`tbN~>nTXe_O)wFn~F zG%|)1FZMz@0C$ksNmL!9$X@9zCW41)^qxUa4qeXje+X5#MUJ$Q+wyd4*WS({dsJ@OXhF0qHo0Xt|QEL;na0&oW? zK@Mu%$w$1(rFQUF$Jh(zF%HgbhH#Oqfy^bv)W#&aG`WgZU9TEwFqc>&#TG3HAdJjM zALStE9-HQypP&ZM3eKB@5arkdz?k^a`0w%7toVUnVcEp0XwJ85zoKzXt| zSV2prDphDQ1S>yvh{t&#a(F8@7t7#LlOGTOXu zq}gAYcw|UlU$LQ`JgmR<{t+ImYNto17pfkr>7yfI2@Q$VxyfPbYW$%mCi~kqVn?{d zLfL!1FBRD6kf{xJCGzPztgw!E^@hnS(HT1^ka?_o$%)FCqUy>!H z$#-1Ho=r3x$d0!~y2NtR&P6wzfuF^_oCQ3K1TJ0b9|*)NT&lqP8&0lqwu}gJlArU- zAglhiNYZoM3W@E=|8h5q^vE!aboj{<21Wp3bwwzujNQW0O>`XR;Pr5zn=hG23*oy{ zwP`o+S0k-2}<=^UUhyrXD@WhOMB#LFg&WD{$qx78HnYD_N z(EmX<99E4}&meBvRRN(*=NS;`%#1FT;QiOQ8t1S}bfQlT#EunsKcl=DM(CA>QM1-s z*u6kbv{GGXOqF05FpmP%PCmu$%5)gl1aAg;QnM7wctY9Oa2sF7sm@|$w<>nLe0xV2 z`ud1n@|$at2sA%$*w9e;Ce@@V)fjuB3k9-0;7>uo!}%_7O;!F+z-3ML*i@eEwic8w z{jID*D(j_)4b7gTtOdL%rh5TVFTqq3(`3ds(@;^?94Zu7G z35dSBvKm@EdWi{g1qsFM=O-BY70ThI%PAcvi5FrUUQOihZ1Fi=M=Bbp%y3U zqWFUvP8{uO)T)y~1iuBaZP-Lvu0veUfL$p6(KuT@ogT~3cZkYLWUI-~qmm^%lsWni zD0*ajq?xAsb7g~kDuP`+q0Rlrhk|7;dJL3=1t%0=rQf!mfezF$Y_YWs5wy+-N*X(2 z2WmUPA2o49d~>p3bh_9C(lJ|*!PZ9(^R^XqY9oL(q#Bf0tmj5i$>W{E5(tW+U2;*r z)xk!*_W=GvJH%(HCumY^`}9x8V--ebLV`O_MuXy~v$MVBeo$2{eku8MMDl7gJc^-x zGZ3rIO2#DXYOh<}O9h|d|6lFVv)J(!@w^MqA=6mL`7=w>98yl|(+1&5s@2Zb8rs@# z0jk>t!rozegt%?oqrG_?q}VX~`?-p{hlTyT&S(*SK1AGTrhQd|uiF-fXxJ7P(BPbW zBRwwM3+Pp0!N!g@s&EJ6UkL~>T4uA>~vbyIJ*5r(tNppdyUD4~KWoY2JC8r~8rsNAe zy&J<+_F}I`m-64M*JJGg6uj1ST5>vdmX|Q@+9}Ex?zL#=FMbjGM%&rl-!D>h8UR2W zpG?(`rqdTIXlH(`;w}A_mq!&j;JAayL=dd=tZh7-)<0M6lPiGd;#Yi{sicXd{b*4g zfaIsXa$fO&RVg(1b{gLKM;Yho9uuVQ(~*bdU4sqfbw$QHE;( zaEUBeSf^j|p(W;?qWKqmA$4$1na*RfjD?GfE6j zFFXO=2wE7nVNrmzkqDwMb|Nk_@17SRy>O{n{6owKXs*>% z2WN`!?t_4IYn_>zdF6^k{#?#1)=h@{#u6SGfOD8l5$a>w}e${xe~i}?EPF5 zJ-vJK7l7%9GHC*ib1UCfSkrOXr{&|7?nEV2NffAZ@GEgCd zlUYG@kOW*~w$eEo$_pp)y0*LP2l%08VeN+&$ZSNWdj$EEgP=4TR1A1UbYmqeNQGg) zWg6Zu&fm|$S$d=0GsfS?S7J8h_bpf%kH#NNmZ3_fJqaa@W+ZS10eK)G0IZ%Ds>SwS zJ15{+l`!p!Jo-kjbw;5!&G6R(zv%;60=Q+%DnJHoEe;f%fgP}yTfz&r32Q(n@~x(N zDYURCHyTAWHwSUGb(e&OX)qUS8U0ApcHtDvB8^S5`HlF74Eb%~co{zQM806!xATR@ z-g6o!7df4)`-hy2x|cm53~&@APMTN{6CYb2?hV4#GC{4?GZLgczI4Rk?t-OP5B5aN zne(jn=U8$LNGmBdFX9>I`ZUo)3rBalcUV=WmY5hdXxzh(@EqA*I<-jpb@r9ePA@`Z zY$z}ta+AZZX&&8OcK^~=^Nr51y?kDur`eb=utU++$(r|BOKi-{1PTzHFvM_XS2v1p zMYL^;T$zz1xyBk3r_Uu6?HRp}#O?Ym9mMbDRYz^=vLo7xk_gPgi+OQG>)v@QrJ*pl zNYK(hyq3@Q$vGZ9gDSUD7oISXTJ_1d+{Fe7j%I`ikp2wR+J)o1S8b#}fSDj>@tm|6 zL92451Ilc~l5~8swpkbDUIR<;6nV0ed(|(l6(j85;@B+nbO})dx2By##dOY6j~EEM zBx{TMk1&xalVD*Y(=UY89C?2<2m%;dv#YoY+a29kM%?Csi3}2VZ)MTGw_ub#wAR_w z6ujN!7|bnub)0&eU5X>$U2aQq_VN(i56B%S*bWF}4waE zY@1QTn&U&LBVRUx;)S}p=^VYYu9;GBuvO*uQ6Pv0cjM6h;X?5PfUG?90JK}Gd%zoX zn-hc`9Yd-+<;E{(e6C|D=@D|&Ujv@oTm3JwBWNJuzd$6hP;vZGo(fpN5_;uavKL_P zQU4)8%3p;iXJiGHspk?Kuvz<3WOrrk-*hzyP(Ty{+eUmshKhMS5K+E#NA~g>{G~Eb zU~?mzSDrQ6Qw-dCR^#0=GOgGL+N{aVRiXpfsqEQ4ANj*k$~9APRU;uW8d%DZ^7MI@ zqjEm}&Y~iwuzN)0s>YxGnN&5MzmH-?gpR8PObaJO9JQHZTmc0^@4t}7brw>=YScM4 zJlzSYrgGc%x24oN0qBw0a);!N^25!QqA|6q&N9wciPbpHVi-uE({(2JQ92Uc{`_s! zK?V0n%48H`+o+SkbqmOswq^-N+b*;kB<)?o__{EA4HR~B@Q!_Q+jf&*#P0UuJaM2e z{+k8&IcS;WM5S~_v%m%3SteyxHoYN&1u&z4=T?Df(5XiFJZei=x0OkTA1~LpqgyWW zoW0~b4C3bdTf7@(KAr3#+?~DaxTV*%O#Qz)bEbTQvz%87uMO&l6?19D8i*Bl2piQq zdGJfBH<;|oe&rZ*+~~fSq$4YauLxDvUuA$=ia#qA& zv~8DKNk`FDFId23tADN7Xu?^5sYR|GG3fSkk^d_&Ltl3Q0492zB72P0k-#GL8b3rX441{VN`FR^cE4Mq-( ziaR_QvQ(CX+I>@;#%;@~!&eTXrWpNCX3wr*3@6yU+i*u@)#-jr*{+^-edncDiZ>nE zh*w)tZfm#V9$7~)oJDbiT(s2iRmg>iEKfOV236S~@Jknm_kqlE0N#@Q26 zXi?c>$V+?>)w@ns>DNv5?KZS>CYu10)CpJec$SsHe9b4wb<}E4s{TJh$ESQX`1&ha z&_x&&5dFLh8nxkM-^9p3_B`nQps$1vQ^zV%uXLSmv=z;_{6H;C{ipuh zIV&mk6b4jcf~%uih@TYegXjS9ppNg|*72UruWfe!H2M}7xmPq1`$nq!yV;MCEB{ah zpV3_hK-)upC+42?`R(;r>;L{6vHC9?jTqc&^}%V*5Zix6?=L*&LnJQpiCqB!UlnPV>RO>V+F7 zr39>_GkULYxoC!(Ug9>gcJut6F`vm5*tZz4BEdgPoUPYaq4#Xx>)RH${)y0MZ6o(F ze@AW7le}SH*^8M_L7onNK3%RXSn|Lb0d zB!BS!n7QURofpB!$JljMjoJEhR_{3E19@04WgSGWwbFcj^|WYw(`4!%FN}#7W%EK# zl2MLGzjhl}JbVMNC2zgu-yVGpE&urWzd|2}(_C;F`e<4zMfM{R72AT!qP#24U8G(_ z`_tD2ae%<=izPeDOD4vtUE`8RR`h=JJq~7XaRcM>YS;{?T02aH4pnmnoG}#E6v7e~W zSuxqOj>44PUSCW}7_Tc7oKABBhIM90YB_ZkUF3~D2g-U3+|+wo zUI~dg^G#)$=G=Cm z0y9F&+F)5nc7}}xv0*nIM`ivaH{p$DMubp2?=ZLz&+0%+_{t-uU`SC6qyQIj!0-l@ zGP2XI%0Hw@Ja^^^yYt+`;b^mf;Swjos(-p{NW-iW5XknX;ebGdMr!*2>*J6$p=hI z=Of_vMo2@-zE4lSz?))T@Q;fXpO1K$-nd|ekhv2;W^n*&e_Q0;yiI~!A_A)vFeTMw zpj<8IUu3U<;u;iod+(C$d)LPHt`vuL6r#B4vNCj8U9pmS9Rh$rN11Pd-yXr43U(>m zqrhXZoN{KWtYQNus#Y4gWaY?nfVXa@9mY?6G1WZl zTx*RG`_|m;EGfe@hTN47dQ<$}{b(UttVsGCWyPds${srvdGx?0wn_;D66t*yT)BW* zXeBdbW5W$Y#=e(8qT@)z&>E9VseIm;3#Ocil7Wn~sDc?t%+!(MspO)9sbuim9q~Sw z;J`L*{Ok*p)1&1rVPl3`HXVA1~q$oHwHf9$7j!mp6Htwl$PP+L2izeDUd1O z<0PO`Rx3fwjIy^xz#LXSkQ(mK=ZiD4<`wW5Qwnv{T?*GEcq4zo_#q3IKa3hKZu5#r z0s-zxbGaQVr@jwD>r$Q2`iD7|Ulc`7)2d^Y!sdxo2i2SR-`xVSw`0cGxqX{}t~#k( z!*MEUIt*19>IAOvyW+hT%C_W0<_A6SeQ_Ia;Lhj*h^bDOnVKZ@K$USK3gBklL=5&X z0_5{=a?oyi4f#*HW{vMzlT-8;it9@iwOGKlP@AJGUs>raApC3=+ljT5=lRYn;d!Wm zR>h-%iN0E{8`_S01?`^-SlxX?3PTW0>oUwq1{$7F;gP0e;~d{aqkH2Mk8Ja5y?2rU z#PPs|%H0Qu8fXa&mNuI&SdQvTg7c?TaNt1!dA-ppBm5=N>>{zOg`0n+Bv&Alyt>NifbmaX+*YXxKjGb>9$qrCcRsu1=BRz znZFH*AAR;MH!hJLH-7rWMz zUo^6GLsvnGdNo3}rd_54volr751Q-?9u7}Uf}GUc2!S+8fY~vTgxAYFnS=)>H_}zH z7+Ex=*VDQ-TOiz+{C?vHV4F&nwXeEFVTX0L0E+4WTqM0eHvI6ag(BC)A(9^DFeX_- z*ab0?XAf)+C9pMoux5)_usKvucZx1ipBu(2a-gh#-uhR~%KVHA@UlLO` z_#nD8`*4rf^SVX+q(SIPss6YwCb%6K5UH2VwDlhxEnkgKO7Ui6ciYLjB)O}?#Y|Pz z-JU`F-jQ_j-jaKM2 zu54E~_FfVD07!SUa1^k|5BWPcI;k~xkY@%2@t+fXy23S>Sy4#SB}Q3uTvWhW{z7~; zS5B;=*ug72iAEquf%Owe9kT-ZgV}|05a!&U4t)0ZNbu1R!x%q3 zk-k7YK3i>6A54h7L>g`A;USVo(4$WzxHK7%f`FWb>$nkA#?6b<#8zKwu~QirzS9f{ z`dDWJGmcI*WTJrkgBvc_Cx&$KhZZAK{KC zW+(+qbr)iUL@=>k3Oy6!I__vKOH?hw{XMpg%6rnnqs}of@ts^9sAeWC7oecS=!{>u zdpy$#9HM zax5wOgb+-;uXv=d30BOLYnf1YlJ`h?HF4W~%~320qi^RX0RcicgQZYAS>4>O(U3BZ zS3mHMK%TkWx5L*(sECP}YH)}W+Vq7LEu&-LImaz4CDhuD3+t1c@^dC%mDOd1omBoX zhs6S5FQ1H z9@vA?Z1e!mmt^fG-!d9@^2yPa;mJ3$Ke+%@5_+YYZ{<$QsUDQSyat-Eze(sfNeH+l z%cnKYop)0-T|6jRy3?aJY|CUJ&-CJ$CKpF%QLgf@28NBtsVh!yXzU3-l&fg|p&Zk{ zS;n+8IzHv`%>n_t?CbI}2!xs#07k0pNl^&oAuC4`Dt326kYhTlqK$CwC7iZksVxMn z5JbPo_dPSMl#?LOe_eAAh|p&yNvml)*UH7de)L)g5+ug|nx-R14#UXBwt z>R@$LtV0LbCe3wZWTA}GnDheH^3!@hzxRNc8ogJtVfB&iZtd^i0190Q(gX^_3NV|r z*+*MpE?J>TO|jbrOnb7Icb{|!w>aN&u-)N)aa(8$vG0SYNVQk>~muLzwg zy_f|f!y@m!at3{YV>DN{C?ICwow|+>XtzF`E+1-5Dmr42K^M8OUuJFr$tBWVr)Zu9 zz~}WF0N?#G8wa$v7ZZ;ZJ)oYq^U9awMhbzGdd5w|VcRW56BZ#y+W?&rE`Z_CSbe0V zf4S{M@jTHYo$N_}l5Jx@)*Rw`{Y3`{+g5UknCjg}8IhGm8_9cFN9vAvhr04ZlQu&j z!M(P6279Rp z0j#04{4|}~>s*amJKnDJac&?CiUlLiG_Q6-+my&=Pk2`&l7j~WMJ(&ObhJ@&T=dY)~-yZix z`{8@0KVI4(B*n&I;>H6!1ExGN5~l=jp<;?6<1!{}8R$Xzfq_tp<0G zdAzxKf!$zP^>EJT6tYEovsV-Z8ynO00TUQ&Q59%%Z1gJf zFGmKE>+125%FOT8DFP4fBe@sa^CUY|Q*<#m+kF=xKgEtE+wd z>QtGuJpM6?>Pxc?oR_RwrC`W5l`cgMt-Az*Kbq#yCs|fil`M0f6vQOT{d@5xv7<|4hJaaE^#j$$Lf7~Eu?f%*#FP5ax^ zeF)8Ru9f8%O69i6OaN%z^3ChpGg2cr$0T1h5H9dXNyhT6?>UPn;mWl!8GFB7=aI}) zgA?B}*V5nO?~Q*U=@#5`dBgEDi~Mx#S0H!&P`5q15Fa?ciT{P*56WkHV;Gh1-}KtU zYRTF_>F<%R{|pKNrI8V>Uyqf}u)V5$pOCRN@}vHF?AxSUsV`0j>}ijw1qbH*hdiwB z`8p8f-|!nda2v_t(^$}&;DGGZd8Dy;Ee+xp1jr*i8kJv zdd^PPEw%r+G|`Br`tY{iGwatMB=_Kz1H!-2*gP*~(PdZvRw7o%p)}Degp1A%wTe4Nway;Jjmh?4xlU~a7`l{1& zw6{XPcQ^ri1qbW4zAG-cHS=1s!}%G;&U8?m_;H`~o_)4-yMAEWO1EQABR+1x>rJk$ z-mQq=OTxgG!xNLui2tp-zb@-P5`|rKxz8Xh`PXdjpPpZk6F_f_(TQM+2|Dnfny-hDj(koRAN>P@7O zQ9@S{x>N7!p706wa)iAxK;FrIj|@K?(ADWO8>su^k6xC;C*n-?uWBk4XNsSQeH5aon&gzSb_}&s0NdDSIU_X> z5?_Cbx-@DaS--mZ6kuq~3xZ7nadWDjZ7G?4(xMO+J`#mT1q}G$k**?yGL%`tDo6V% zC?g|Z79q;0lDgTgX2sXwu9T5)0+94mtb9ks2tcS>FGn0@Bkvg>^pKU*%Xhho*m9pf zx%qj9Iraqe*Xcq2=6Q=0TVcg{dDS!tHrqUMHl@F)d=7Ru(;`jZyfK8mc39rdnEyjS zG&ka3wDlyoBoHNUVHQ`MX*u~91e3tYulfBT@2f!R_~Qpkb!NF>FPWR3025e;)j|e| zS;wTNGyCzgwOvE_O+{JK^2vA;oDl-*9k-Ra~3)zdGwV@ zc5!Yz1kzSDmvEOA=H`~0xN?SX6m`XKxFXe<(41B1EgnVo@5!BI$;@;12^t5PlI-I% z7u$-<0b7C(0NsFj)vLOZo`e&-|=w8T0lox7<5PIdGjC~%@6!D`Ll zjC_#-?_YeaL1N%k1BRqmcTR46wf^5kETMcp3r5?MK*{-~{iK5@Wx3 z3jBmO%qh@4Vm=-Oq0UB=UHNm84%aYZ&&6o7O5Cv-iIZEH*v@siMRac3)-31vjEDA> zVMX2SZ;9Fpr;G&U_ifLYjTJsPl1V~Fk7Y?HV`Z0gAS`3HgCACFVknQdEh?a67P}?> zK+Ez?!pSG!!*n98ro{u09OyEq1Iq8dJc$!!ICtQyU%`&DUR8YqJ0K=dQK<7v&;1S? z^1&SZ`9woi#UW-f({?CcGdIAlgD#+>Ky&oBS8WOXeRE5n`c#=sifu ze+Yy#@!tzH4>-ncyBcy|72A1HI;sH4#AylIn48Hi4I0SFFJuW?jz@JG^o=fmUqgB* zD03^ZbmGVEMn#?gc^`n+0V_wHsn0~`3);>*( z@9Pm|H_9%dVFJJTgb^olZ1v@m%cSGvTm#gO0n?&}GY&tKwm=4d=PvXHveohQ*WJjN z%uG*H-u?yiQNggTEVYW!OrE80>-KExLBuchOFyUIoo-I1V&jCW&%Z68XXw$5JFZ4) zhSFm-&Q6PjlFlVghF&wGX+o>$i&Zp7){WlB<0c%GY3i9|Tsw6&nHw{un%qEpUs$Al z%RFxLmkvR1h8{q@)TIqFRn*kAT(jE6jAZ$__QW59k~Ix%69e5(`_|6+^S5|)PFBBs z9+O}(a@i(Ma3=F8E5GdU(=dPbn3w;AI$AUvM8EpE@Lo17hFQbg{e5ifyk~B@2MmdVNmcqsY#;bbfpg}t@v4GbU>hA zbcCGVT96xnhcpRWYykxM?V-p|AKi5}BQCzpb8I>`S#Yz9n!A`oQ>0Pfth;om2}h{-h8;Q)1k}*YE|}zuvOv zq6af46Xx4|UfO_IY~hfLQdUs{1$0UPhctSxMpfyye04saal5l6R z;BpC7mS=IL5%7z;9#Y$%2rF_B`P9*ltncNOfzMgdBvxGtr^%{nks2egvk|}!*AGhD zN!bhSg9YtS`f!MAdW|CUoUVRI8FbpsH$a1G-9qv%HFLtEUyFx+{m-C*`jQy8C*VeZWkw0QQx?iM%9G0R(X zYy$-iUY3W`4l@%nfDep?1HZdpC24E6J8hgUbFEXkm*h6GHkn_~*oPiaNjxt<8DOAu zJKY1eyICTzMHH6B<3nl9%d*-PA0*}=NS~H_v7wgSUVG~z0Af13lu_(l%i@niidA)} zD58AO&l0>OReZKF`ook6o3N-hY+ZI)dx_6bEZZ0`1CNU{(lQ}-WJ`Kq0)6NoY!~w^ z(AVqk2VuKHcX_F_wc2UxfQ9{OZ&W!bVEA<`D2xzI&hj={nw?7|A7Vn{VG>H4p?KG_ zlUWYm(s*V$`Yvl8O%5?O!@D4T5%YVe+y?jH>R^L=_)=~;>wL-x1zO36Cr2?V+WrS+ z?;e*_w*UXrbf)HReQ z`yEmIz(oU+V~<|5gTnf9CcIq?fI%>iE7!`SqL}tLB>V;0oog#;J73Blewg0fQ`gao zi14Dh_ynC+-wCm-_f!|)(eQFc@*~E}Oi?-00CQn4el81L??Nd{J;@BVInY+l>~+A* z^fVPRTWp{MZnlNcf&FGk4fa^IGg`NOEXagwnrFawamWT9Sl@F5Nx2S>Wd<%gQ7X}5 zn^I6@8A@HGO7{U*n6waK3Ju%EZ+tJt)!hh?wrzt-+F}=RJz($Y3fz1$&YosfGvKD> zxXSPH@QfYi&T2eg^A0sz6MwTn!9!0~*YN;){EkLZ_NEy2F%OB*L}p;Q_C``X{ub_> zuu@`KzU*z$reMk^9I`{gV!-{Jvk=bWfv8p{>V1Frmm#M=#vsS-Bx2`v$DPX;mx_|J z=*!1@Dx}|4DpmdgymKU2!O1!syDVE;)1GR(af|E&i-z>!Z6!fQjel|#xn~KP2-

GZNFKH`pVG6EGg4T6KaOD6yo~+x5vAY)i<%Dw zLHaas0flFMIvov&Zq5p#MBAeYU;w#lv!PV~Ou{3(O=~wznIrlPZ!UDmBG( z&Q0Q21V}Q)KX(!x#jZjp$(6noHC|+k@D$M_i#h62NnCLsO1jro;Is>l21#Dr<2KFX z&iL{7&9&tQds`K@0MAx zD#;G%kV=WvQwvLYO|cd*X|0tRg{F%yHdmf$njwET-}l67_&rp=82_!Yx9FNbYT?IW zU$&t_UhC)Eaj$LhU}>%#(VaQ4a4)A{jK@L!dzJC>g_3HOeq19A)p@jm;Uf-62PFFp zN0vpwtmNY=3wc!$%dVO` zjJj8zSXNqTPY<45%B%05dUC3!Di@uHe_&dwo|@{CffZ7ekXpXzo&4}2W{3J6orp{y z9aO|SovaBkD{zmeC>ar+W+oz-o1)@b?LeNqGWcdkJ9<`T3`{$W)oUhRnDCTlTq?M_}ZcRkVg(X9R_0wae<^#F0 zXVH4qp}!@uT#^6I9@O@}GvO`hq}|kG+6u>267!FurRz-5rhi(FpXU^YMcYnRa~QKe znV=ez;psBPV0gFH50cP;Zf-B>DQCI4|8gd89tlfg#WBr%XHh?le}hfG!ONPE_9t$k z<9eXC#6QnzPuNW<7h0^W#u|*dN2le6@in$m1Wmq3#EZ&;42vq_=P330iGpp=qu1go zR}(%F7U%ct47O6UL_vR`pZ&*qBEyPyzNR5}?f0{iKXX|H7tPqn5y@*O>f7L1Dr+Zg zg(_4ccdfFO$JWd%!DW-dr(=NObqjg&(iX(&OJDHE4!?KEB4PhFlfT#SIiC6cj}3-} z*D?*DQ2=vYM8ChT?yx_`pm|@~#@`3Il}RHXH*yKMenGMrB!LB1S@C};fSn63LZnAK z?7BGe%iibLnV|5UA-LO~|Cw~;ZxF>F{LqVuvPF`0^aS1z-BqW75P#9FQCRaN!a(BV zl2sz|_*kW#edV46&Nk{b7sAYu>adJb-@p!+9#fh%#DephHgZCeSl3Pe`$)g^S6Xw8 z^G13<5oH&{`a<#iBS(6k4FICtObw8R$1qPQpXsgZUL$F~r8t5-fh@YONFRJRjD5Ly z+?<-%Whh)C^`PeR@weQ^>;MXrcH&nbP5ayIKHM#WKii-9cDLm6W!mqfsb-e%{HiX|I;v7wSzc4qL5! ze)$vMdHQn8{qg)=B+oe);nA!H{PeMu)(E(nO!gpZ;yI{SOb@{04J% z70Mk+IrZ4@^GAK$>HmOehO)+QfjztJ)A_eVot7s z1X?Zc0jPK4md~mfRGrwOb z_)bqdAv0gJgclw_7(kk@(+GFB@dI3o8%3Wn-;Zruh`$Q)fm^zdfDcP+Sv@%CZ91qb ze(|p#O252o=W-gfW%KfA>6X<-q|t&TMFa8FFCmlJG@v}XXe4vq{Mhk-pyc!J34?DZ z|1AaGZ1Jf>k6k?D`X*QpO~>Bbnn2P*-*PX{IJm6;{=omhy^l$+VW*pY(mSx;_7weoT$V^WotAPb=a7j-n3yY8m}`U;XdTXAflNXbpeA^GKlt zGyw*3cgwx^BTtmHY~-v zGR6~(>-*~spBh{r5%W<+r09`LBWU^q;ZQ%qj(+(JrxX?ixs}dkj5)4vPGAn+szijJ zP?vB`hc`%nm-cV@gnUpbCj`S}I`!uq=cZ1nPm$CSbLfD2MDR;>QZB~&=gXNsD?Z&b z7ZGd*So%7zpoaXo`Gvbs%6nJMNc-j~(g$_zV8wgB2AK;y^FEXUMM!OBm0OLM86PBq zj5$*8kE_?MOp+KFFiV7O;<%z$TCe* zU9hF=%UqVT#=c=r8Pu;j-*Imd2fhCG)6Ms_w&+8z(f#B{_E|Vw9xcyx8cS+y-9{Ox zi<&}mB;_!SF?eIXuF9YPBn(d_bPA_%%g?On@fFW5j4Y#3UVb_33d6j4PLc3qa-&B! zrX{31R`jj3)Aa^Ws_hwv)Iu&H!V|1HN`1x;Qk!P#uJK=F4%By^A(%*a*Oyi2`r;_6 z0SRI}+10zn#=p=DMfB^@SS}pH(&KYrE&;V!vEHMPihW^Zz$XZr&Pea+ApEBE_HPjs z1NU<0hR#R|W^0!#!&DMmN5VG{r)v~23EVeLnBT4Xl@yCvALY}EA`k`leMi^G?nloj(PT@-b^2D!L# z;ZRo}F~6R7FP?0xQ0J3^LKr?)0k!VvcalR~GpK(B`kn009gLTb{urm+#Lk-rYSm8X z`C3l!SYtL8mcG2&;Vh>Qnfy|zov6o7VMb+qGyaC4lRpz*e-xS!ovq@QupL zH+fknrEWxKo$9q=o|;E0%y)p&>XIL6tteXoB3`Vtuk|M{*5|ixAIq5|?+6%nkK8hE z-4n9`LnI=~wE!&N?Tcp`K0OB3fFPq*ai25j9BE2?wjBS?#-AJjs}P=4*Rt<31GjZe zozOlIAe``2VR2%yCUDz8QcM;OJ*~K52bCxlJPxXq%7}nxiI{rJ}^aWpnd;#IEUIhoq@X zw^(zm@Yy;{E4QeAzLh~a$xh1`tZL^}TX!~(ctiF_nt`1y&7(*;|kC{&@ zUyR&hi0;v+P#3Di4efW9jsqn%Q|=v7G5Te`3~>2=#mE#pl7*ZHDG=b zS`3tX{4z9?Z-8*}W#xMAX@#HQ<2XTrknshN@t;#aq^Cv{Sizr|>UvD++!WQ*)%x4B zK(JQ_B&Hvl2J6;C2a8-6%1c&yw`!-WF--j4=WkI3vy)>5Ga{&oZO}aykPiZ?wrHsu zb!`xrrm@s$SEvi(z4E-JkrKcDt4+q$VgaxU1-1 ziDP27XPFBT4q(#pkF};xlqFZ_vYl(qI@4NkE_MSK`p5IdNr;XdrV-h{yq#^|F)PNB?-gCi|YKIo}ufYaH)*Wi-tv( zRqn~Jo|g_K3jSUc_;jF2-OzZL@V4!Uq05_>vXR#1>KAjvi`y?A?aqQsYg^{EGue)F zkC9qBvM-mQ|K8dL2PQ^Md{0hz`)Y3h<}ZI56KD4kq%;?Fss=g!Q@Cs5RhBNNdWM^- zN+8G*F)h@&b}e+M&0zCTqYmkXOx$}scCwvFP;gr=LUq!Lgz19I%~l`Hz280?sAR^0_S!sk0HtGas1PMt0*tasV z3*cAtRPsso${))({n{^ zshY;7S2`q2Rpt@rFr%oI`9=Z8V0q>tvCkEc&`%wow+)FW*X|ZyS#R>#6co9xClO~* z35)Vj52SLDn~FGb-T-;*=jxR3w~ zTfh3g+>SNnp=wdHxGBq$Q~|hwC{u{b_&ztsWKLdcdRTnixF=J~EYYq|ml7d~Oi8gZ zL*aukallJLo8OkA>EK_?5*>ck3;$!IWXTQ$F@ddftKAp3-fhApUDdtcvhrBhL0igj zURhEk2(#>lH`#4mano9+DtBr@B7|I)Zsl00zI&VB_y6Rm8vHR-H@h9S4u}W*N0v9_MZ30G1BQ#vP&m7BE58=*xz$q< zW{kenyFVM#X-hB2t_Cua>vy%wY1Y4#04P7QvD>l9P*M2x(*SZjmC;Qf>VYcI{61T!s_mH?yVM-?@zuOD*SFSSPXDaTp z>q_frNSSVqFpJ$XhWhvicX{rwBdtlIBjTIt(dh8yH&tXh!#zAeHP}4Y?BKG}wpaC1 zt>3qlFhgq5nrQz>U4{`v{ckUUulD?VRI+btV^a$W7@sG#?-sJE0s;!>YX*4o+g92e z(`VUV_Y6loBAti_4l94jN!d4NSt@^}Eqd`A7=bsV06H!Q_N7P#9Ps|En#EYDBUabm48HRFDW`Hv#LMs!V3mwa?JzhlLT3| zWvyk8Jzcg#<-hD8h{465F*5)u*<@x5aPmB}7>L|WIHnc+bGUei?kO0h9`J200X%KH6TwsSJxpQu82W^$pV z>*hpiAUQOf6D{mk zI5-~|7QS2ly;Lx2qtcjBUQO-kGN;TsxbhEkrL;OHz7p3Dm~`EDg~A>~V}gb^DT}hm zVo9xf0pOI1>-U#%BC->Mw?b*slk#Nul15rD^Ch{bRO3YRbqK&xxU5A6tff83w~Xw< z=AD&H9+Ob=<}EuKUoV=>PEiU<3AQ)enRV?>uz;W=D>riSxs%KYB!anRHV>o=6)=B@ zvsg^;Xr)S-cMi@2l!mNCELxw6$D4p+J+^{EOI<~h3okGu5hplpgXF)TUic6NO8$`u z*gQMeYG>B70xs4TF}{sf)xUSv16^+=F%F3I+@an$RbZMKAZ&ZRDo*)6aCuYe^4&hn zB&#r(0vwxj-;O8NQxh*WRROmrhu>RTNz9FC9cD7E3M*o|4tP8DpfZwd1uj8uN^fRW zg2^3Ni<^UBX)jmAEZhZ`o{#Q2C^2^TGm_ya1JQAhUaS z<{9#7`RT0qfx{q?xCe$*DcMR~~l;fdp% z!ze>R@Jt-zPCmWjLm*7s*q;|ASLCvrdR++QxzvP{bO42&D~ElkFN^r07)#qTAQy7; zLFGZww{Xcjr63sU-xuCJc$WWf11So#Q=Kufk0`#Ir|$W-(5UV0Dkb~vcOOB-#v#eC z3~4=;$m0Qr2d^pmjwv>73DM=}dho`_X-?`0t#+0x2Hf}>7dih&eqYQ3KfYbKKbI6e zCLM?y^WtmbFR)uQ)Z54vTO3?k>~a<;R$DZxM_ql)?1FNR?UR$fdq#FRrw2+>qFVP@ zimG_w(RylrqO64|KFLHNHU_+tggsK&_?y}$j9Y6eO)3$Vww7@%*#)(tW3VV#))4uD zRO0X66$t$2)YZmc+A+R_dL>TQb>lEZ3t7@xf|V%Yd>rt&8K4#>L&l`{qQg8E5@V5j zr9!wl;*nc4!g8Fw!@1yw;<2tBpT(Z;iyD)zX})OWN^6kab#iy?uADW9UjCNyQC@&P*86dSFY9JuEi`TH zcm79x;xm)^93BcoA4+!w`$Lt``9s1aeEO=VIAv zh3vuIDZsP7+bFdGe5Ub-BgT;HtK{#JZOSic_C+t0_UX=glLlbC;7=^>uJPp`A!&;f zXq$wtK>ku75zRHd<1UcUImV4cbFzK@aRE11R&Bj?@ zJhwW;D1Q_^6x{`!t40@hq0!wE0>S=K_7kL2te{`C(hmLy?vel*v|T;t#RQfAQd(4r zIDsl@FooRx0x6}k7YE@?RdYH&IT%&SwI-~A93`z1)0rJ9&%T6=WYT2M@zE3Fu6SC( zr6~B*E-K5i;q9KwFRURDB!?%x0jvy28bj!@7B(Y~e`3jQv1Hfz`4+F3;lCKXWmU$7 zM+JHGYzMxaEiKUBgMn~B-wQs{m?QT2fg(_&LOz` z6u2dB#a+-mBv+Fh5U5*Mq!Gkp{DYrC?5XxGC{X-ob5Gfn_Z|rBz3pF~JA7u=TR;3X zB$lm4UOnFU=`!iB88D;%Td-*;gT8}6uEU1Q_PLKhsh{<*X`6)WKa$Nf>s30xdMF4P z{eNpgpSQXCZd%>PV*7s)@uD?AHSxa^anSf<%lkg|q0|4~M6iC_zPztG^mE%*PxBeI zernjimdqjgn_R!8txdYRl6!(BTHzTce-tY8bIVSAc^0)tTZgl(t~3K*Odmqa$5I?Z@&J!P_g#;9hvRMK&XoZ zV$l|eq_t14eKcYf-26%0d-P{|;e7b3VzvH$?0&4IwxH`c_Q_gQ{1xEL~%5!2EnP(lsh6aXLqt1yqP;&mqv>3(~LXv6H*Eq z#z*g{vuEq4(6Io@_E1ZzDMyvGLTEiTgKC+&3L?m?zo&Tn{<*e3R6m`9G zY*+DBoE%{Q<*@^avAzq%SMRey>7T>?rcYx)=y|$x=kHD3C!$M0D)Q?3o3;f}R+6Yr zpc_YfRyj~zh}zD7UYcCo%veI1ytu{bL{ zq8G+LT_Au*Nq$hL#*UMeJ%?$PVRL8MKX%aHiwSkb;`m}ktYzKsv6~YnG}XHnQ#%=~f{QJtKOseu#zk=!DeA+DE6$C8k)uDiyw$Z(r1FsiLt(>*}3HP>74Od`T z-F+) z(>Vn&@F|+kK6?kNDW9T+dq?uD2xsU5X^gJI>l@W~OVP-#>eIBN z1_RTmEY&O2?T4Q1GwzryX`tI8RTeCDacB4=C?LzbgC{e|q>l-j)sfe)LQc%u;rk_w zgC$7(d?|0 z9?T{f>j0@5LuvU9IJy6?MR*cSqMe^jG^4vX`?X^z+K(^txx)0t40?Z-#XB=eyouiDprGvU`#w_ytdx-X%dxM(e!SwKoD1&T>&&J@@^7h z%c5c%%}UvAO5fi@d~mms?K3*_CSGzAcsgd@;H(wup7hea#*+A^bSFuhuSH=2StOTM z_@NVs2+3ryGXm3!H(^R}7EL_RsTj)n!akv2CF-^CydjN}$40ZOwY@RRcfmp}Y`xkK zKeoL~nu1;ou0J(loUS6B#6c0#p>2G0`B63pjOkL+jRJRxr}~~NRK!ZMqMZ>w2)+EB zXaVLCH{XP*w8lJQznw)&-0FTv2?kw#Jz|R?Ux)6tK)e8H&6x2)IP^oH?dHftk`t&( z31g`^S2GDhq)ebxZ5OCjjRoF;31xOR+o{pqbfUKCVFMYTn|K|B`v@B^(D-W+B*_8_4;sHDt_{Q`b@gZ;wh~$!Oyou zg!!}FF5JB#i=Atv_z=a4T4Y3yVbEhcssahON>K}g`@!@8n}?;`Fl$-nYaTEIByHl5 z?Szpk+v;hfdm$_yn$JnJJw>W{gvFp9iInd{0LDgGn2Dl*s>np!&COGJcjPUU6lCHf zg4rV>Gr|sSRuAEp$%!mRdOsP%ZBd-9nzX-Wtr{Axpj%DBNLQYjsWAoD+J& zJp)jW>i!#x1{jXmI;MWnDUR9u{Qt(mPQdR9G>-#_6=X?WP^)q$VToqEBubgPf=ue} zPL)jp{ws(l&H{9*;vsbzl+8uDsd462{akn)iV}SFtC6q^^1VSY7xH=&(r!)XR%8({ zj3thGTj=%Nuj?LvEouGbs!m3jf3m^Ln3rqZgc}zu+&7qUPnGw5oukexvNV(a3+KUqhPSO82MPPoTFy zcKndMULVGDasU6Y)kWQs(TZS$N|YK~#F8((mCC({OSTCrtX=Si`vM4%6LsY3S?oBW5$@Etyv2d3THig?H4cj7 zEUvHa?3|f|7tBz~l6cDt2j(9eq|WcJ(VHQkHjDFG%$>TeWh-9uPuQ(O7s68W9G%@N zMKs5We|27ltB2c;q|aoli=XaIMD&_b^$K3X%BQBb5QsG}iG8apfD zWSY_1Gescyoy2=5oLu}nQ8UTtH*xSMkgu}0b(x8BRepMhGU^x*Y2Q2G8aGT4B-;Evh-9n1c;-CseVjxqYt{uNPj zB#TO9F$@(GJ;zpF2Oy+P`CKr`A^UEIDsi{FkrhiwYT+9xQD zD`r>!T^W7zG`ouZTRaXS5i^4$OAU@>^7|xB7?554+%R_p1HvqvuRDR-eUT&=`JzUW z%N_|>aH?kKg;XZroO?m&UlyP?%=U#|?mc`O9XGb{(A3_zv6n6v(BtJz3zJ>1lwEFd z*>h$-9TN6^cHeO8NdjILLG-G3g;MLS+L%E3(2hny^XJEBI{5o3){eV5dB79$I6@w5>sptu0`-lv#$miw~^vXzM;2;g&czHY~;}gRl_D zvQ<1wd61l$??8FQE+osu4XQs$UMTz$&SY#)kA7^wo~Tn74M7n`jVVVE!+}@^bJ06|V+vv$gSd61K(5zPe860=wnn9lgF2)OYWU6w=#*K5;-%W3=86e=s zyreQW>)C=3OAvQ6jAk_rr|hjcIz0tyY)3Vmk_CL$0&vfla$NZ=E&)@V1X63CDHL{f z;TCr&sFpk1uuc4Ydt2`<7|JAe-0}1>W)>GPi)qGE|4Ubis zv|#`1By)!^;@kk>QxOTjjQKA*;5!$kenI5W)u)N3O`V7P3ckyu<&tdH9JQFvI*Qcsc3sw)XBn9j45dFhzJ8w(OAZ+2@_~8Sz$QrJj5{~D|_ni>)-qC`}xYbsY<5QzN zWpjV+Qj)XxXkVx)J^no9awj$*$XwDjUmj2Cb)cKjlh5!O6eS$FQiIb*WW`&@rJR{E z^zK>Y$5?Si=mBxum~W;PF=&8C6S*%WlNf5MwwJZhTk6eeJfs9xp%dRHM+!ss_L8~r zh@uMVbW)T6kFe+oHk8PQt+(Xy92lv;xZ1*{SUQ79YW70TGbJ{qWlyQ~0Z5cG@_Z02 zzwBSL@f?w@rLwj@-$459G(~v7Ni`A_-`ehu&W+vIvv@Oi+9sgciWzvOb{;R#^y;RL z8HmckYDk zNAlyT=MNVU?+&C||JXr!QN=e6`75DJVIpBRT3d)fxzDHWvx7$&Du)NcAWe*FEOi_g z_ChRfpMGkF9bI|tOz(sOfoqs@=|^9+X?a5<>wHjGQx-#fm-QH7NrSs*>E+QF>w5O< z#lWMhO4moS>tBZTA=mq=*-7H}Fb`IGjYd5?l4#N-42Eu}89_RIXTwfGMnXNy=CQ*Y z5ZBaJ1!akr`A$VCD_TmU9DH0D_-bh$F*AR)8&tc zrWyT9BOF~bxU*TVyd03deK0K9WbJd+TePG2*6NznOz`(LeQ&TEAj!$Zzclpiacz`5 zI{+ZaXNEKxvDx)Q87K<*n8MB*xrPE1^Z8{V0dZ0BNf~&{)JGpAv>f~;r2Y*zGv%y= zl3UcVpEtU$*zR1`$CEr(BniJfS3NHY7{^U!W3%GF7Q&1w)_)%mJPt|NiClqPH2j2? zHoVx<@z^p!1{1WQ+Eq1hHr+}5(+ocnW1HdynY>DK8@bPs4p#0O-1qT-w5%ferXW2< zNc>XrmycoX3hOrhJAeX@Ba=OVJhDLfHh69Gm5fDUkpSYf;OwyEcS`?3qo*};fl4=eaG;#k>*zzA|e0~!K@?88m6dYJbJK(7o zhUg}Pp~)?fK!=uW%Ev%2&^!4T>l?R< zv;y}C7B)VWN-%s<>R*$xzPFP;+t06Cw|RmIe7!GrYl2S`#T)52Z6#4Pl8_qhI>^PZ zR&PK5x9gn-6MTb(z6*bG3P#^G*N{rZn>zm&8evlCwE zU<7x#thqS#0pD!FLfp%1Q_wU84XWIY5Zu$5vF32ta zI*Q+lx!Wh>MuKdYk9;*pes$eY@NvlkDoZ|oU>EN`Y_1oHC`*b zY6-ICMnWb@wEK|j6Vk6BPa|7*-5MfvA|UOnS0b>uPVn>Q*&}f&uYlb(MjQ1b7F<{S=~sf|v0Id(YuE`l?JgqpD*ADeW^- zWiQu*R<(uZl>vNe4_!bEefD|wnH}@rPJ=Y^TyKs^Md3k&u&eKGHvgyX=*k&GdUUu5FW+77=kl8`h`)!4lt`YcF05H|(Z zDHXj*oIsSBK!iJkF1gogYK-pW8~|?GI}@6RbrC6`+0mqJSmw>sYup`scirfR~Hbu+wQos7>L~m8;Rd@hH$9{Q*$#3t56iXa_MuHvZxAy z@B8eU^JVt?P|6PNBx4~~5Qz1*c*VUIP%SxB>#bk16tw|>2m-rosa)rpL_hvEk{E3< zjQNfR1m`oB~?+5$W$prhJlk#KhSPm9@{C z`I7LWr`qcQJ#mL!rUX5|4O7~FmZ=%u^Av3h3mU!}P-GGhvW1oh+cd@_?Zi0 zo)p5_Hygut@OAkQ7ID!oJa(}imop$16K;qnVfL789^TdxC8E6TdWCIDERl%lRVUH) z1#+IERKnCfaPfA{lGsE2rw80P#gj<@7bT4GjeIXr4AG>^TTqDbjPxW#xYSUpNlm9fjsh%D_=zim zqg}URNI0sEXI*lNUxgee0M>q;(;P*r4}Ii=#95L$E3`zzYv?b^lb*HTQ!ie@p-T}- zvUAEO_AyHdo8S^^^W2lNrB`LEGBnLXBw z`ZunUjEDaj7fUDa@l;kC8ZVi_rbO$K+eMv};&=lK%r| z^qZ^pl}!XqrAh>SDlPn@wdy(xB+6~;1WNb%bJU)aSnBKr@mdM6{-cH2Bl5Q z7!56ZZbY#FoMe4o0d)+1offqL(iQrE0$p z8hea^E(<(92G{G;419|9zna>-@`rEw?0d?3SH+g`2vz>dvb}z1$#R!BL2kOIF`)Eq zyWV;Lc2d!Y-}S^ZEgDz`d+iCTM`KCCtN^y%SX>UAnbgDj>+_ios4j@9S^7rX={VP@ z%M44$j5W5(Wqk?rTUvicE?*m&S-=R&MQ&cnD`O3*J(mE9UpCd&vKA$X$J~{0+Q8wD zr~3Hg`M-fk;?}#N@G?4Kjv<-;ARx@M=34)@J7i@dj}3H;Agu#NkNcIHf1j%1E{8s) zEwx*+{<}(%IMZm&DftH>X=G%4If0;)A~E{bvb1CK4Q$iYV2*=htYn{K@=QnTp~p=> zje9O-DO7a!|0y?x84ooY&8=ukAYrQE`PIrzgKh-*c-q0p>@RP<_XY~Aselvo0MRSX_)K;OIadi#@ zqUu$8I=t+^GJRvnZY7aB;+Ue&WeI(&XLuN5Tj>(vKt0e`Uqtt(-0nScwG9-5}i-@Nj^@QTiBCFYysrg-ta@QLzFtBiT@Xpd2J*|7_4 zSG1-dLUj_kwQSi^+B!+P5x7kH$y$OIhN$ifQL2D;c9E;GafvkbAOApTG|vK;ByZvL z3+MuRn?ieOWB}3l#1c{Ja5T*HuuGV!gA2kLOAgfCez66zQ1S!=o4Z7&HNYInFzT(D zqVYK6z64gU9jK-g*do8}FD$8Hh3_=pkE;vTyTtQo>8?t)i=I zMP$*pQ4;Auj!drE(e^~fEX;^vWJK8x#3~ONlD9#JtKtaebxv-wZ*i#V-sGkhw?(`N zoTySBB@ema{7TX`eLmsVllYx(>~Hf5yik$N`Hmw;XMFp+$&{DmSDb<@w|)a|yIdan zK-Zg4df0ZLf4Xc;?M9Duimt9ZjM`Om9%+vBX!4wj_Iekc=zuUZA9lroT@A@q2k1j1 zcuhf1k!*U%i~c|fdZHzmGR9@d^OC$>@&XDB$di%O^2rZ=(jdzK_k`43;{z1;kqd4NW|AR!pu0Ao ze9$+iLz*)dj*AlQt(83Nviltbv~-k2i@ZjkE}m_Mm4SZ2q5RO*j3-qI`9TsJP{fcI zTB$RiEg!MTFYu~lFzb2~1cx&o5NpT0L?U@eO=C2o`dERBjz72M4W6v+y* z!W9EMM41y8==nJYnUex&UVCp{U$WQvcFGc``)$1yA&5~%>EeWrxb+VE(SvPq9Iikt z>Ps-Bz3fdIrtZVs%CIS=x+7YK8y-x*3TIPCy%aU&E<=H;h94YGmBzt)o9f{=ceeRP zMh|zl_a>FrH~6{u_8{MKr=0J8CLVu@xHR7c_IE{>cRIUr&LAJdN3y2H4O@Y>)K}EWZK#k3_ z!YetQQeRTNi(`W5sIH(Z94&vKlZQlFv5+yZgd z^+(AMsXWcjv9O(@-h_Smx`3OZ81ySLC-eNz*hu(rcP$TU-fS7^G@pX7=&GJA4}5uD z`14AoZKM@#|M?xHg9uN&Yo0hmbDm7zNv2;ON-lIqU2u~f&d^jza|$Y)41j#;^WD7?r9I}}?rOT+%WXjOI#eP>l^u&&NB;nvEh;(C5|Z+nh#PtwAUy#MRVNJ{=BgRp^8Yjw^JU4 z+~{>^to>1WFu3VGLlroBo~Vk!40IFRjg0noGKYivR}s~l4BD1`yMP;%?&KC9X%Wre zJ7Q;C?cZEjK5MbvU9l7N6~51|9KQl4i#VgqcLQKZ)-0UC5Xxto_&#C9mt1Hr(E&T! z_R86>&SE2*zs-`+rEve-ZbNpE_4|kfcoDq;ADoL=L7^5)R&y^r#SX6QTc{`sWL7M0xxCO~;W8=+Qqj-FC3aX45$aP$P3`cY{Dl zJ7~CBET$JLciN3ZxfAP|mO=XbQ3M~O1^)9?j1X{iZ`S%}XwuJ9=cC^L{|#W`wEP-4&` z#X@{pl-7w{nv961II)EP0w&VBT#uOEaRbgwrgiPqye#PV6iU+@^tf<$E`H-D@f@EKfp$gP&?*FA#10dyWky z_2?df|BVE4hWzkPh@skCigbe!>Ox-VrCXu4&0*u|ZX*FZ=5HS6=HRAljK9*v66Y^T z7gNYpykm6v+MeTq5@!4lq@Vn~_pJNd#{qko`Ok{HB&M2n<#DPMo{pE^rzw@F$22zT z4YpIZdy?V7m_I6d5@}&#{_}b57bXY8f6$BWep`7*{PniMRsT?zAEBhLFZwE+{sIx< z#zvEZzj}N#d7>yF+S^;s-A}!Tf;da;cdXlQ@WrKX zIR#L!6A*~UtD=9VUFmZB{`<%GDj>)A%5i_cgumMR+29;tyt#6v^V-Lk$6Mv!*h4n% zK@=F}2v4zsKczu^J(2wCtJlFFUs}Fg@A$xJ|HE|lhWd3_V+h2sYy-eLv;@a|4h{TI;WW~M5_LxUFi7hu;h`+n!BsS7@>D&1_gxPZ`MQI|$ zOYQQnG$A0bH!{2bg|ZG`^RPhQmWV?xd)BP`cUY2D)b;4Ju%BFd9~p}3#tNiE1*qu1 zWwk6nZ-2}9V%@scSv6WNw2)9`_04zx-m|t z+&xQ+xspX|R4g`0sqQCqhv4J&UC-a?zZ)bX*5$!{D9$O`Nyng(j!oRK@heykwo6;? z@8BXS%-QG9-{2yIvEM+923)i=IoBg%vvKIy(I66PGv)W-@r9!*Bk`4lPCJ#Vv3A*M zF8l5C{xUYR(JAb=`Hk-w2$Fv?VzTR~9Ve#RiOp0V6CdCDvE?>+#hS0X?%y_9n-?Gb zAb#D$h-(t6|KO>~4dTBk-zQC(s>~s{kJs$dZ}(h(h!RJmcV%ydEYSRuDYLUj)eWwY zg=<&d@sD0e7nRUfr^B4gdL=|F;JJt|+nLs;=PELLsO8wf}VZ;Ma18 zy?bJ1QsL8V%I%T5IBvnY5&`rKNxU-E^~6WiSrlVhz3{}4O;*qmB$^MBQ;hvMcfUE= zc2ND*>tA(*_g$QBrHA(FD;s)gl=lapY)ZE_-ycsV-%`@!z&21d9G8-KG+voK`p`Qg zsb$w2p7^%SQonIE#6cYBAElucy>L1;XXEtqI(g!nh|BF};9yfu1hL)GoSk(v{$=M@ zb(1~vmhbLZynl29`|kCxMnQv>Oh`X#eAI2}>QZ*eIdx0AG=%P=d+ykFNOFE{!a4Rt zqyPD_@Tt6U4LbJCeSIT*+ha*9HRgVf|5rZEBfWin!uQ!!%JF-;?B^Hi1Z91J@qU@n zX8W@@-cO&>)YLSJURmh&O}m8c%s(Rf&d?Z=aLfhp|Mk4|Rg*FZ z%x$WrGeKNA^Zu`jD(6q6h8pm1BICci=S(@BeThJ5j2*4`hwbtk27@e*j?2Vf>?j*6t z4=u4I0+kw7^lDGD;QAH=G9n5POG;AfE{e@!x7+jcwo#D3qC%Lf=VT-9P&M(d%T4)G zNwMDA%;hFOEEt1hoynb_cq7P* zlF7{J;zmI~sX1{xBhn!6(Psh=cV z>Hk&5hnEBziDzkQ*c)uX?=KsQSw&U?6WZfLZ20D(y@P{;Mo5b-UM-|&PE%{) zTGi5^wQqBVW>A&bwZM&`4avb@yMt<^ZN>gUK~%`5aN3@n#0|W>A{t8z;($7WT^;uD z@R)`hMJ;yQ*pr>la6FnNn_M*ts7cdTMLU~rGx^nDyy9_l96t>5m!%E7?@1jcTN$KU$fZt zwPl9y+thIAVvb#thz2hDp8`C-&D|NfFUi1D9s0XbYe2`(jN2r#HG*u!w7R1OvcKS0 z7AA-?=~Aw~`gc7fCQ-;?CwT;_h4m913NWmL+O!Ju{GY~F=0@s-(0VAy-o7aljdrDD z)a>Hq9rOKvAn@sl54~e71FM(E{0W4U?Izd0*rHr-7$b^zV_=W|A(aC6(r-ZptHUSr za@#r(Gol_ORTUQQm$w+cDiIgOYHm^9NB;YGaAJUXvXeM)h0pc~s(O&~k_e(%hx6>e zZbN;g6}v{<=Ut8Ridi02H4ntNZ}PId?mwc1i#*m z|57e%8bfuHZP^8{as?%-S7OONk~s%Lr@I5WQKN4jh(ug1uB*qT2YJ0MH@{R8pk>R> z>Ny*qP_M!B$}bPLhqnm{6!RL;b8YQvm<4ybr$m?@HhA2{Rw4dT^9_2QGu!vNf_CYcN#=tqr%k; z!vHbx&Obxx<>lpE26yKUL=mHhJBHhEI~nWDT$0T*02qWEBz0QrcXIp>*Zm?lwW#_! zlAcRU5f0GI!Cya8L^^cKF+y?(xJ~BwN|l)5UJ37tfqZGuux=uS=NFn5m-EXl@#KipaAZx3{l1 zZbm5~mbO*vP^E`sWMt&NbH$H60_KKm6%>dtEq1CY0b6_h3nLi4J7PRt*m>4_#*Woa@n4?9+H6y-p4YI>$M4r?DSzK`$ot@E|E-(6ax|Zacy9 zc;@WNGVoWlN%dlB5&Mzt4YQ{{=l5Fkb+maF!UV*w1r2(c383AUpY$0pTRMB%{|G2p z9IUrV_}p;#(mi_!Yk=WDM^7IM*=u-C>hAFmA8)xm-(ay3Y&)?gkBg9-Uy z+?hT0@2#XX1&g>Ur%6$K-DXx19^*!i>k)H zrz*i}JsG#(nh&9P;GHsOt5*fHv9ie29NV{mnw-QPd~pd*$fY*htefP#5fvWbrVY)H zHl9qcb4)(48{r^}k4?rpSZQ2?jsqnDid$L5th8r_uCj=mssS>f632yUrjX(}v)k^M z`rcAPbW6uo@WfiPfvy}JP|NS`DJY00pA~TcYH&qWi}NE!CTiosg=`jv0K3bn*!cZr z?u5xWe{|oY{_G2q-5?9tv_K zn-W~@3uMpXS|@Vtjm=zuQ&j-Zi(vW-9i^(*>f3E>N z2X+8_#@T)rqX07X!9+^(e&WLdY7Vg)mjz<60Cr1VlhpE;zu5i?cWsE2Bp^3!j;BOU5sc zg_HEH;^N5=h3H_BsYHAnV;@!V>=3)hNX2|E)ZP)k*;|yrj#tssIoNW>IlZD>tbE%DeV4ARe~DFU}O<3@h}nF!S{IN+Qq&dkg(`T{H1$XiK(TLsXV zFd9KRdiBaALj3*U{Cc9zPvG**_|V^BhxPPxyR)9I;N|S|eA%)6wcI55NfbmOaval? zgQ}N(`9(|!5uOt0&NK@nc)1E_BJB=C7CxH=?^{sH_nNxc9_|H5oqXWJku?byO)({O z)$34Ef@HIlEXxd=xISRsw<6u|L*!K$>#^#Hv(Qy=u6;&aSyg4zRp?P(OF$+5)@z9g zr5n1x*b`VO;vlMz?Ra_`@xs{q%lq&CA*YGe(j9@!-H0oeeJt^IDS^*Xaz-&~4L5H{ zNa-G~ED&a^EA|#e$2FkLmKe*9iw({dk>{M4_ zdxQS1>_mWk2=}YkDR~$FA?vv*8l(j literal 0 HcmV?d00001 diff --git a/doc/modules/widgets.rst b/doc/modules/widgets.rst index 86c541dfd0..4532b74b93 100644 --- a/doc/modules/widgets.rst +++ b/doc/modules/widgets.rst @@ -14,6 +14,9 @@ Since version 0.95.0, the :py:mod:`spikeinterface.widgets` module supports multi * | :code:`sortingview`: web-based and interactive rendering using the `sortingview `_ | and `FIGURL `_ packages. +Version 0.100.0, also come with this new backend: +* | :code:`ephyviewer`: interactive Qt based using the `ephyviewer `_ package + Installing backends ------------------- @@ -85,6 +88,28 @@ Finally, if you wish to set up another cloud provider, follow the instruction fr `kachery-cloud `_ package ("Using your own storage bucket"). +ephyviewer +^^^^^^^^^^ + +This backend is Qt based with PyQt5, PyQt6 or PySide6 support. Qt is sometimes tedious to install. + + +For pip based install, run: + +.. code-block:: bash + + pip install PySide6 ephyviewer + + +Anaconda user will have a better experience with this: + +.. code-block:: bash + + conda install pyqt=5 + pip install ephyviewer + + + Usage ----- @@ -215,6 +240,21 @@ For example, here is how to combine the timeseries and sorting summary generated print(url) +ephyviewer +^^^^^^^^^^ + + +The :code:`ephyviewer` backend is only available for :py:func:`~spikeinterface.widgets.plot_traces()` functions. + + +.. code-block:: python + + plot_traces(recording, backend="ephyviewer", mode="line", show_channel_ids=True) + + +.. image:: ../images/plot_traces_ephyviewer.png + + Available plotting functions ---------------------------- From 74ae24ea47393db3a90b6fc9bc9765c3b833bb89 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Thu, 21 Sep 2023 08:55:34 +0000 Subject: [PATCH 82/90] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- src/spikeinterface/widgets/traces.py | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/src/spikeinterface/widgets/traces.py b/src/spikeinterface/widgets/traces.py index e046623eb7..7bb2126744 100644 --- a/src/spikeinterface/widgets/traces.py +++ b/src/spikeinterface/widgets/traces.py @@ -523,7 +523,7 @@ def plot_sortingview(self, data_plot, **backend_kwargs): backend_kwargs["display"] = False self.url = handle_display_and_url(self, self.view, **backend_kwargs) - + def plot_ephyviewer(self, data_plot, **backend_kwargs): import ephyviewer from ..preprocessing import depth_order @@ -534,15 +534,14 @@ def plot_ephyviewer(self, data_plot, **backend_kwargs): win = ephyviewer.MainViewer(debug=False, show_auto_scale=True) for k, rec in dp.recordings.items(): - if dp.order_channel_by_depth: rec = depth_order(rec, flip=True) sig_source = ephyviewer.SpikeInterfaceRecordingSource(recording=rec) view = ephyviewer.TraceViewer(source=sig_source, name=k) - view.params['scale_mode'] = 'by_channel' + view.params["scale_mode"] = "by_channel" if dp.show_channel_ids: - view.params['display_labels'] = True + view.params["display_labels"] = True view.auto_scale() win.add_view(view) @@ -550,7 +549,6 @@ def plot_ephyviewer(self, data_plot, **backend_kwargs): app.exec() - def _get_trace_list(recordings, channel_ids, time_range, segment_index, order=None, return_scaled=False): # function also used in ipywidgets plotter k0 = list(recordings.keys())[0] From 383040c5b063a7427ebe7dc7daf1945d5bf95a07 Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Thu, 21 Sep 2023 10:59:29 +0200 Subject: [PATCH 83/90] doc --- doc/modules/widgets.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/modules/widgets.rst b/doc/modules/widgets.rst index 4532b74b93..426a1e02e6 100644 --- a/doc/modules/widgets.rst +++ b/doc/modules/widgets.rst @@ -269,7 +269,7 @@ Available plotting functions * :py:func:`~spikeinterface.widgets.plot_spikes_on_traces` (backends: :code:`matplotlib`, :code:`ipywidgets`) * :py:func:`~spikeinterface.widgets.plot_template_metrics` (backends: :code:`matplotlib`, :code:`ipywidgets`, :code:`sortingview`) * :py:func:`~spikeinterface.widgets.plot_template_similarity` (backends: ::code:`matplotlib`, :code:`sortingview`) -* :py:func:`~spikeinterface.widgets.plot_timeseries` (backends: :code:`matplotlib`, :code:`ipywidgets`, :code:`sortingview`) +* :py:func:`~spikeinterface.widgets.plot_traces` (backends: :code:`matplotlib`, :code:`ipywidgets`, :code:`sortingview`, :code:`ephyviewer`) * :py:func:`~spikeinterface.widgets.plot_unit_depths` (backends: :code:`matplotlib`) * :py:func:`~spikeinterface.widgets.plot_unit_locations` (backends: :code:`matplotlib`, :code:`ipywidgets`, :code:`sortingview`) * :py:func:`~spikeinterface.widgets.plot_unit_summary` (backends: :code:`matplotlib`) From 1c48ce2cdbbd32c8e317f621805132c30e0e5efd Mon Sep 17 00:00:00 2001 From: Alessio Buccino Date: Thu, 21 Sep 2023 11:26:01 +0200 Subject: [PATCH 84/90] Update doc/modules/widgets.rst --- doc/modules/widgets.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/modules/widgets.rst b/doc/modules/widgets.rst index 426a1e02e6..5f71767a7d 100644 --- a/doc/modules/widgets.rst +++ b/doc/modules/widgets.rst @@ -101,7 +101,7 @@ For pip based install, run: pip install PySide6 ephyviewer -Anaconda user will have a better experience with this: +Anaconda users will have a better experience with this: .. code-block:: bash From b74bbae7a2944e220bde5cb4ff1fa63cb76c9d64 Mon Sep 17 00:00:00 2001 From: Alessio Buccino Date: Thu, 21 Sep 2023 11:26:13 +0200 Subject: [PATCH 85/90] Update doc/modules/widgets.rst --- doc/modules/widgets.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/modules/widgets.rst b/doc/modules/widgets.rst index 5f71767a7d..4c8d2f9258 100644 --- a/doc/modules/widgets.rst +++ b/doc/modules/widgets.rst @@ -244,7 +244,7 @@ ephyviewer ^^^^^^^^^^ -The :code:`ephyviewer` backend is only available for :py:func:`~spikeinterface.widgets.plot_traces()` functions. +The :code:`ephyviewer` backend is currently only available for the :py:func:`~spikeinterface.widgets.plot_traces()` function. .. code-block:: python From 36e197fd784d228ea6ee798ce1a1169e1c0c8a5a Mon Sep 17 00:00:00 2001 From: Alessio Buccino Date: Thu, 21 Sep 2023 11:37:14 +0200 Subject: [PATCH 86/90] Update doc/modules/widgets.rst --- doc/modules/widgets.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/modules/widgets.rst b/doc/modules/widgets.rst index 4c8d2f9258..8565e94fce 100644 --- a/doc/modules/widgets.rst +++ b/doc/modules/widgets.rst @@ -94,7 +94,7 @@ ephyviewer This backend is Qt based with PyQt5, PyQt6 or PySide6 support. Qt is sometimes tedious to install. -For pip based install, run: +For a pip-based installation, run: .. code-block:: bash From df0504c2748e4086304447fafb857efd4a2110c2 Mon Sep 17 00:00:00 2001 From: Heberto Mayorquin Date: Thu, 21 Sep 2023 11:38:46 +0200 Subject: [PATCH 87/90] adding some typing (#2031) --- src/spikeinterface/core/sparsity.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/src/spikeinterface/core/sparsity.py b/src/spikeinterface/core/sparsity.py index 455edcfc80..8c5c62d568 100644 --- a/src/spikeinterface/core/sparsity.py +++ b/src/spikeinterface/core/sparsity.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import numpy as np from .recording_tools import get_channel_distances, get_noise_levels @@ -125,7 +127,7 @@ def unit_id_to_channel_indices(self): self._unit_id_to_channel_indices[unit_id] = channel_inds return self._unit_id_to_channel_indices - def sparsify_waveforms(self, waveforms: np.ndarray, unit_id: str) -> np.ndarray: + def sparsify_waveforms(self, waveforms: np.ndarray, unit_id: str | int) -> np.ndarray: """ Sparsify the waveforms according to a unit_id corresponding sparsity. @@ -159,7 +161,7 @@ def sparsify_waveforms(self, waveforms: np.ndarray, unit_id: str) -> np.ndarray: return sparsified_waveforms - def densify_waveforms(self, waveforms: np.ndarray, unit_id: str) -> np.ndarray: + def densify_waveforms(self, waveforms: np.ndarray, unit_id: str | int) -> np.ndarray: """ Densify sparse waveforms that were sparisified according to a unit's channel sparsity. @@ -199,7 +201,7 @@ def densify_waveforms(self, waveforms: np.ndarray, unit_id: str) -> np.ndarray: def are_waveforms_dense(self, waveforms: np.ndarray) -> bool: return waveforms.shape[-1] == self.num_channels - def are_waveforms_sparse(self, waveforms: np.ndarray, unit_id: str) -> bool: + def are_waveforms_sparse(self, waveforms: np.ndarray, unit_id: str | int) -> bool: non_zero_indices = self.unit_id_to_channel_indices[unit_id] num_active_channels = len(non_zero_indices) return waveforms.shape[-1] == num_active_channels From e3cb9bb14ee56e07cbc251556482b9a861d465a2 Mon Sep 17 00:00:00 2001 From: Alessio Buccino Date: Thu, 21 Sep 2023 12:00:26 +0200 Subject: [PATCH 88/90] Typing and docstrings --- .../extractors/phykilosortextractors.py | 26 ++++++++++++------- 1 file changed, 17 insertions(+), 9 deletions(-) diff --git a/src/spikeinterface/extractors/phykilosortextractors.py b/src/spikeinterface/extractors/phykilosortextractors.py index 2769e03344..d32846dd79 100644 --- a/src/spikeinterface/extractors/phykilosortextractors.py +++ b/src/spikeinterface/extractors/phykilosortextractors.py @@ -1,3 +1,6 @@ +from __future__ import __annotations__ + +from typing import Optional, List from pathlib import Path import numpy as np @@ -13,7 +16,7 @@ class BasePhyKilosortSortingExtractor(BaseSorting): ---------- folder_path: str or Path Path to the output Phy folder (containing the params.py) - exclude_cluster_groups: list or str, optional + exclude_cluster_groups: list or str, default: None Cluster groups to exclude (e.g. "noise" or ["noise", "mua"]). keep_good_only : bool, default: True Whether to only keep good units. @@ -33,11 +36,11 @@ class BasePhyKilosortSortingExtractor(BaseSorting): def __init__( self, - folder_path, - exclude_cluster_groups=None, - keep_good_only=False, - remove_empty_units=False, - load_all_cluster_properties=True, + folder_path: Path | str, + exclude_cluster_groups: Optional[List[str] | str] = None, + keep_good_only: bool = False, + remove_empty_units: bool = False, + load_all_cluster_properties: bool = True, ): try: import pandas as pd @@ -199,7 +202,7 @@ class PhySortingExtractor(BasePhyKilosortSortingExtractor): ---------- folder_path: str or Path Path to the output Phy folder (containing the params.py). - exclude_cluster_groups: list or str, optional + exclude_cluster_groups: list or str, default: None Cluster groups to exclude (e.g. "noise" or ["noise", "mua"]). load_all_cluster_properties : bool, default: True If True, all cluster properties are loaded from the tsv/csv files. @@ -213,7 +216,12 @@ class PhySortingExtractor(BasePhyKilosortSortingExtractor): extractor_name = "PhySorting" name = "phy" - def __init__(self, folder_path, exclude_cluster_groups=None, load_all_cluster_properties=True): + def __init__( + self, + folder_path: Path | str, + exclude_cluster_groups: Optional[List[str] | str] = None, + load_all_cluster_properties: bool = True, + ): BasePhyKilosortSortingExtractor.__init__( self, folder_path, @@ -250,7 +258,7 @@ class KiloSortSortingExtractor(BasePhyKilosortSortingExtractor): extractor_name = "KiloSortSorting" name = "kilosort" - def __init__(self, folder_path, keep_good_only=False, remove_empty_units=True): + def __init__(self, folder_path: Path | str, keep_good_only: bool = False, remove_empty_units: bool = True): BasePhyKilosortSortingExtractor.__init__( self, folder_path, From 195f03c2a710dadc9978cc9f9369f571a7e31554 Mon Sep 17 00:00:00 2001 From: Alessio Buccino Date: Thu, 21 Sep 2023 12:17:12 +0200 Subject: [PATCH 89/90] oups --- src/spikeinterface/extractors/phykilosortextractors.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/spikeinterface/extractors/phykilosortextractors.py b/src/spikeinterface/extractors/phykilosortextractors.py index d32846dd79..96c0415c65 100644 --- a/src/spikeinterface/extractors/phykilosortextractors.py +++ b/src/spikeinterface/extractors/phykilosortextractors.py @@ -1,4 +1,4 @@ -from __future__ import __annotations__ +from __future__ import annotations from typing import Optional, List from pathlib import Path From 8e3324b77849a00467fc75f146663ee39201204c Mon Sep 17 00:00:00 2001 From: Alessio Buccino Date: Thu, 21 Sep 2023 13:26:14 +0200 Subject: [PATCH 90/90] List -> list --- src/spikeinterface/extractors/phykilosortextractors.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/spikeinterface/extractors/phykilosortextractors.py b/src/spikeinterface/extractors/phykilosortextractors.py index 96c0415c65..05aee160f5 100644 --- a/src/spikeinterface/extractors/phykilosortextractors.py +++ b/src/spikeinterface/extractors/phykilosortextractors.py @@ -1,6 +1,6 @@ from __future__ import annotations -from typing import Optional, List +from typing import Optional from pathlib import Path import numpy as np @@ -37,7 +37,7 @@ class BasePhyKilosortSortingExtractor(BaseSorting): def __init__( self, folder_path: Path | str, - exclude_cluster_groups: Optional[List[str] | str] = None, + exclude_cluster_groups: Optional[list[str] | str] = None, keep_good_only: bool = False, remove_empty_units: bool = False, load_all_cluster_properties: bool = True, @@ -219,7 +219,7 @@ class PhySortingExtractor(BasePhyKilosortSortingExtractor): def __init__( self, folder_path: Path | str, - exclude_cluster_groups: Optional[List[str] | str] = None, + exclude_cluster_groups: Optional[list[str] | str] = None, load_all_cluster_properties: bool = True, ): BasePhyKilosortSortingExtractor.__init__(