Skip to content

Commit

Permalink
Merge pull request #2898 from samuelgarcia/rem_verbose_from_job_kwargs
Browse files Browse the repository at this point in the history
remove verbose from job_kwargs
  • Loading branch information
samuelgarcia authored May 23, 2024
2 parents bd88c1e + df515ce commit 0df2536
Show file tree
Hide file tree
Showing 16 changed files with 36 additions and 31 deletions.
1 change: 0 additions & 1 deletion src/spikeinterface/core/job_tools.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,6 @@
"chunk_duration",
"progress_bar",
"mp_context",
"verbose",
"max_threads_per_process",
)

Expand Down
5 changes: 4 additions & 1 deletion src/spikeinterface/core/recording_tools.py
Original file line number Diff line number Diff line change
Expand Up @@ -73,6 +73,7 @@ def write_binary_recording(
add_file_extension: bool = True,
byte_offset: int = 0,
auto_cast_uint: bool = True,
verbose: bool = True,
**job_kwargs,
):
"""
Expand All @@ -98,6 +99,8 @@ def write_binary_recording(
auto_cast_uint: bool, default: True
If True, unsigned integers are automatically cast to int if the specified dtype is signed
.. deprecated:: 0.103, use the `unsigned_to_signed` function instead.
verbose: bool
If True, output is verbose
{}
"""
job_kwargs = fix_job_kwargs(job_kwargs)
Expand Down Expand Up @@ -138,7 +141,7 @@ def write_binary_recording(
init_func = _init_binary_worker
init_args = (recording, file_path_dict, dtype, byte_offset, cast_unsigned)
executor = ChunkRecordingExecutor(
recording, func, init_func, init_args, job_name="write_binary_recording", **job_kwargs
recording, func, init_func, init_args, job_name="write_binary_recording", verbose=verbose, **job_kwargs
)
executor.run()

Expand Down
18 changes: 10 additions & 8 deletions src/spikeinterface/core/tests/test_recording_tools.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,8 +37,8 @@ def test_write_binary_recording(tmp_path):
file_paths = [tmp_path / "binary01.raw"]

# Write binary recording
job_kwargs = dict(verbose=False, n_jobs=1)
write_binary_recording(recording, file_paths=file_paths, dtype=dtype, **job_kwargs)
job_kwargs = dict(n_jobs=1)
write_binary_recording(recording, file_paths=file_paths, dtype=dtype, verbose=False, **job_kwargs)

# Check if written data matches original data
recorder_binary = BinaryRecordingExtractor(
Expand All @@ -64,9 +64,11 @@ def test_write_binary_recording_offset(tmp_path):
file_paths = [tmp_path / "binary01.raw"]

# Write binary recording
job_kwargs = dict(verbose=False, n_jobs=1)
job_kwargs = dict(n_jobs=1)
byte_offset = 125
write_binary_recording(recording, file_paths=file_paths, dtype=dtype, byte_offset=byte_offset, **job_kwargs)
write_binary_recording(
recording, file_paths=file_paths, dtype=dtype, byte_offset=byte_offset, verbose=False, **job_kwargs
)

# Check if written data matches original data
recorder_binary = BinaryRecordingExtractor(
Expand Down Expand Up @@ -97,8 +99,8 @@ def test_write_binary_recording_parallel(tmp_path):
file_paths = [tmp_path / "binary01.raw", tmp_path / "binary02.raw"]

# Write binary recording
job_kwargs = dict(verbose=False, n_jobs=2, chunk_memory="100k", mp_context="spawn")
write_binary_recording(recording, file_paths=file_paths, dtype=dtype, **job_kwargs)
job_kwargs = dict(n_jobs=2, chunk_memory="100k", mp_context="spawn")
write_binary_recording(recording, file_paths=file_paths, dtype=dtype, verbose=False, **job_kwargs)

# Check if written data matches original data
recorder_binary = BinaryRecordingExtractor(
Expand Down Expand Up @@ -127,8 +129,8 @@ def test_write_binary_recording_multiple_segment(tmp_path):
file_paths = [tmp_path / "binary01.raw", tmp_path / "binary02.raw"]

# Write binary recording
job_kwargs = dict(verbose=False, n_jobs=2, chunk_memory="100k", mp_context="spawn")
write_binary_recording(recording, file_paths=file_paths, dtype=dtype, **job_kwargs)
job_kwargs = dict(n_jobs=2, chunk_memory="100k", mp_context="spawn")
write_binary_recording(recording, file_paths=file_paths, dtype=dtype, verbose=False, **job_kwargs)

# Check if written data matches original data
recorder_binary = BinaryRecordingExtractor(
Expand Down
2 changes: 1 addition & 1 deletion src/spikeinterface/sorters/internal/simplesorter.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ def get_sorter_version(cls):
def _run_from_folder(cls, sorter_output_folder, params, verbose):
job_kwargs = params["job_kwargs"]
job_kwargs = fix_job_kwargs(job_kwargs)
job_kwargs.update({"verbose": verbose, "progress_bar": verbose})
job_kwargs.update({"progress_bar": verbose})

from spikeinterface.sortingcomponents.peak_detection import detect_peaks
from spikeinterface.sortingcomponents.tools import extract_waveform_at_max_channel
Expand Down
2 changes: 1 addition & 1 deletion src/spikeinterface/sorters/internal/spyking_circus2.py
Original file line number Diff line number Diff line change
Expand Up @@ -113,7 +113,7 @@ def _run_from_folder(cls, sorter_output_folder, params, verbose):

job_kwargs = params["job_kwargs"]
job_kwargs = fix_job_kwargs(job_kwargs)
job_kwargs.update({"verbose": verbose, "progress_bar": verbose})
job_kwargs.update({"progress_bar": verbose})

recording = cls.load_recording_from_folder(sorter_output_folder.parent, with_warnings=False)

Expand Down
4 changes: 2 additions & 2 deletions src/spikeinterface/sortingcomponents/clustering/circus.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,6 +63,7 @@ class CircusClustering:
"noise_levels": None,
"tmp_folder": None,
"job_kwargs": {},
"verbose": True,
}

@classmethod
Expand All @@ -72,7 +73,7 @@ def main_function(cls, recording, peaks, params):
job_kwargs = fix_job_kwargs(params["job_kwargs"])

d = params
verbose = job_kwargs.get("verbose", True)
verbose = d["verbose"]

fs = recording.get_sampling_frequency()
ms_before = params["ms_before"]
Expand Down Expand Up @@ -250,7 +251,6 @@ def main_function(cls, recording, peaks, params):
cleaning_matching_params.pop(value)
cleaning_matching_params["chunk_duration"] = "100ms"
cleaning_matching_params["n_jobs"] = 1
cleaning_matching_params["verbose"] = False
cleaning_matching_params["progress_bar"] = False

cleaning_params = params["cleaning_kwargs"].copy()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ class PositionAndFeaturesClustering:
"ms_before": 1.5,
"ms_after": 1.5,
"cleaning_method": "dip",
"job_kwargs": {"n_jobs": -1, "chunk_memory": "10M", "verbose": True, "progress_bar": True},
"job_kwargs": {"n_jobs": -1, "chunk_memory": "10M", "progress_bar": True},
}

@classmethod
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ class PositionAndPCAClustering:
"ms_after": 2.5,
"n_components_by_channel": 3,
"n_components": 5,
"job_kwargs": {"n_jobs": -1, "chunk_memory": "10M", "verbose": True, "progress_bar": True},
"job_kwargs": {"n_jobs": -1, "chunk_memory": "10M", "progress_bar": True},
"hdbscan_global_kwargs": {"min_cluster_size": 20, "allow_single_cluster": True, "core_dist_n_jobs": -1},
"hdbscan_local_kwargs": {"min_cluster_size": 20, "allow_single_cluster": True, "core_dist_n_jobs": -1},
"waveform_mode": "shared_memory",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ class PositionPTPScaledClustering:
"ptps": None,
"scales": (1, 1, 10),
"peak_localization_kwargs": {"method": "center_of_mass"},
"job_kwargs": {"n_jobs": -1, "chunk_memory": "10M", "verbose": True, "progress_bar": True},
"job_kwargs": {"n_jobs": -1, "chunk_memory": "10M", "progress_bar": True},
"hdbscan_kwargs": {
"min_cluster_size": 20,
"min_samples": 20,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,7 @@ class RandomProjectionClustering:
"smoothing_kwargs": {"window_length_ms": 0.25},
"tmp_folder": None,
"job_kwargs": {},
"verbose": True,
}

@classmethod
Expand All @@ -65,7 +66,7 @@ def main_function(cls, recording, peaks, params):
job_kwargs = fix_job_kwargs(params["job_kwargs"])

d = params
verbose = job_kwargs.get("verbose", True)
verbose = d["verbose"]

fs = recording.get_sampling_frequency()
radius_um = params["radius_um"]
Expand Down Expand Up @@ -161,7 +162,6 @@ def main_function(cls, recording, peaks, params):
cleaning_matching_params[value] = None
cleaning_matching_params["chunk_duration"] = "100ms"
cleaning_matching_params["n_jobs"] = 1
cleaning_matching_params["verbose"] = False
cleaning_matching_params["progress_bar"] = False

cleaning_params = params["cleaning_kwargs"].copy()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ class SlidingHdbscanClustering:
"auto_merge_quantile_limit": 0.8,
"ratio_num_channel_intersect": 0.5,
# ~ 'auto_trash_misalignment_shift' : 4,
"job_kwargs": {"n_jobs": -1, "chunk_memory": "10M", "verbose": True, "progress_bar": True},
"job_kwargs": {"n_jobs": -1, "chunk_memory": "10M", "progress_bar": True},
}

@classmethod
Expand Down
5 changes: 3 additions & 2 deletions src/spikeinterface/sortingcomponents/tests/test_clustering.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@


def job_kwargs():
return dict(n_jobs=1, chunk_size=10000, progress_bar=True, verbose=True, mp_context="spawn")
return dict(n_jobs=1, chunk_size=10000, progress_bar=True, mp_context="spawn")


@pytest.fixture(name="job_kwargs", scope="module")
Expand Down Expand Up @@ -78,6 +78,7 @@ def test_find_cluster_from_peaks(clustering_method, recording, peaks, peak_locat
peak_locations = run_peak_locations(recording, peaks, job_kwargs)
# method = "position_and_pca"
# method = "circus"
method = "tdc_clustering"
# method = "tdc_clustering"
method = "random_projections"

test_find_cluster_from_peaks(method, recording, peaks, peak_locations)
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,6 @@ def setup_module():
detect_threshold=5,
exclude_sweep_ms=0.1,
chunk_size=10000,
verbose=1,
progress_bar=True,
pipeline_nodes=pipeline_nodes,
)
Expand Down Expand Up @@ -156,12 +155,14 @@ def test_estimate_motion():
bin_um=10.0,
margin_um=5,
output_extra_check=True,
progress_bar=False,
verbose=False,
)
kwargs.update(cases_kwargs)

motion, temporal_bins, spatial_bins, extra_check = estimate_motion(recording, peaks, peak_locations, **kwargs)
job_kwargs = dict(progress_bar=False)

motion, temporal_bins, spatial_bins, extra_check = estimate_motion(
recording, peaks, peak_locations, **kwargs, **job_kwargs
)

motions[name] = motion

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ def sorting(dataset):


def job_kwargs():
return dict(n_jobs=1, chunk_size=10000, progress_bar=True, verbose=True, mp_context="spawn")
return dict(n_jobs=1, chunk_size=10000, progress_bar=True, mp_context="spawn")


@pytest.fixture(name="job_kwargs", scope="module")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,8 +10,8 @@
def test_localize_peaks():
recording, _ = make_dataset()

# job_kwargs = dict(n_jobs=2, chunk_size=10000, verbose=False, progress_bar=True)
job_kwargs = dict(n_jobs=1, chunk_size=10000, verbose=False, progress_bar=True)
# job_kwargs = dict(n_jobs=2, chunk_size=10000, progress_bar=True)
job_kwargs = dict(n_jobs=1, chunk_size=10000, progress_bar=True)

peaks = detect_peaks(
recording, method="locally_exclusive", peak_sign="neg", detect_threshold=5, exclude_sweep_ms=0.1, **job_kwargs
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,13 +23,12 @@ def test_select_peaks():
detect_threshold=5,
exclude_sweep_ms=0.1,
chunk_size=10000,
verbose=1,
progress_bar=False,
noise_levels=noise_levels,
)

peak_locations = localize_peaks(
recording, peaks, method="center_of_mass", n_jobs=2, chunk_size=10000, verbose=True, progress_bar=True
recording, peaks, method="center_of_mass", n_jobs=2, chunk_size=10000, progress_bar=True
)

n_peaks = 100
Expand Down

0 comments on commit 0df2536

Please sign in to comment.