diff --git a/src/spikeinterface/preprocessing/tests/test_whiten.py b/src/spikeinterface/preprocessing/tests/test_whiten.py index 01c9051298..1abed61a79 100644 --- a/src/spikeinterface/preprocessing/tests/test_whiten.py +++ b/src/spikeinterface/preprocessing/tests/test_whiten.py @@ -218,7 +218,7 @@ def test_compute_covariance_matrix(self, dtype): is cast to float before computing the covariance matrix, otherwise it can overflow. """ - eps = 1e-8 + eps = 1e-16 _, cov_mat, recording = self.get_float_test_data(num_segments=1, dtype=dtype) whitened_recording = whiten( @@ -271,7 +271,7 @@ def test_compute_covariance_matrix_2_segments(self): the zeros do not affect the covariance estimation but the covariance matrix is scaled by 1 / N. """ - eps = 1e-8 + eps = 1e-16 _, cov_mat, recording = self.get_float_test_data(num_segments=2, dtype=np.float32) all_zero_data = np.zeros( @@ -309,7 +309,7 @@ def test_apply_mean(self, apply_mean): """ means = np.array([10, 20, 30]) - eps = 1e-8 + eps = 1e-16 _, cov_mat, recording = self.get_float_test_data(num_segments=1, dtype=np.float32, means=means) whitened_recording = whiten( @@ -375,10 +375,10 @@ def test_whiten_regularisation_norm(self): apply_mean=True, num_chunks_per_segment=1, chunk_size=recording.get_num_samples(segment_index=0) - 1, - eps=1e-8, + eps=1e-16, ) - test_cov_mat = self.cov_mat_from_whitening_mat(whitened_recording, eps=1e-8) + test_cov_mat = self.cov_mat_from_whitening_mat(whitened_recording, eps=1e-16) # Compute covariance matrix using sklearn directly and compare. X = recording.get_traces()[:-1, :] @@ -413,7 +413,7 @@ def test_local_vs_global_whiten(self): apply_mean=True, num_chunks_per_segment=1, chunk_size=recording.get_num_samples(segment_index=0) - 1, - eps=1e-8, + eps=1e-16, mode=mode, radius_um=y_dist + 1e-01, ) diff --git a/src/spikeinterface/preprocessing/whiten.py b/src/spikeinterface/preprocessing/whiten.py index 8e5c761012..03e56ae20b 100644 --- a/src/spikeinterface/preprocessing/whiten.py +++ b/src/spikeinterface/preprocessing/whiten.py @@ -198,12 +198,11 @@ def compute_whitening_matrix( # type and we estimate a more reasonable eps in the case # where the data is on a scale less than 1. if eps is None: - if data.dtype.kind == "f": - median_data_sqr = np.median(data**2) # use the square because cov (and hence S) scales as the square - if median_data_sqr < 1 and median_data_sqr > 0: - eps = max(1e-16, median_data_sqr * 1e-3) # use a small fraction of the median of the squared data + median_data_sqr = np.median(data**2) # use the square because cov (and hence S) scales as the square + if median_data_sqr < 1 and median_data_sqr > 0: + eps = max(1e-16, median_data_sqr * 1e-3) # use a small fraction of the median of the squared data else: - eps = 1e-8 + eps = 1e-16 if mode == "global": W = compute_whitening_from_covariance(cov, eps)