Skip to content

Commit

Permalink
Merge pull request #3275 from zm711/doc-spacing
Browse files Browse the repository at this point in the history
More numpydoc fixes
  • Loading branch information
alejoe91 authored Aug 1, 2024
2 parents 6f63f76 + 8574793 commit b273d08
Show file tree
Hide file tree
Showing 11 changed files with 95 additions and 95 deletions.
4 changes: 2 additions & 2 deletions src/spikeinterface/comparison/basecomparison.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,9 +63,9 @@ def compute_subgraphs(self):
Computes subgraphs of connected components.
Returns
-------
sg_object_names: list
sg_object_names : list
List of sorter names for each node in the connected component subgraph
sg_units: list
sg_units : list
List of unit ids for each node in the connected component subgraph
"""
if self.clean_graph is not None:
Expand Down
110 changes: 55 additions & 55 deletions src/spikeinterface/comparison/comparisontools.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,16 +14,16 @@ def count_matching_events(times1, times2, delta=10):
Parameters
----------
times1: list
times1 : list
List of spike train 1 frames
times2: list
times2 : list
List of spike train 2 frames
delta: int
delta : int
Number of frames for considering matching events
Returns
-------
matching_count: int
matching_count : int
Number of matching events
"""
times_concat = np.concatenate((times1, times2))
Expand All @@ -45,16 +45,16 @@ def compute_agreement_score(num_matches, num1, num2):
Parameters
----------
num_matches: int
num_matches : int
Number of matches
num1: int
num1 : int
Number of events in spike train 1
num2: int
num2 : int
Number of events in spike train 2
Returns
-------
score: float
score : float
Agreement score
"""
denom = num1 + num2 - num_matches
Expand All @@ -71,12 +71,12 @@ def do_count_event(sorting):
Parameters
----------
sorting: SortingExtractor
sorting : SortingExtractor
A sorting extractor
Returns
-------
event_count: pd.Series
event_count : pd.Series
Nb of spike by units.
"""
import pandas as pd
Expand All @@ -90,14 +90,14 @@ def count_match_spikes(times1, all_times2, delta_frames): # , event_counts1, ev
Parameters
----------
times1: array
times1 : array
Spike train 1 frames
all_times2: list of array
all_times2 : list of array
List of spike trains from sorting 2
Returns
-------
matching_events_count: list
matching_events_count : list
List of counts of matching events
"""
matching_event_counts = np.zeros(len(all_times2), dtype="int64")
Expand Down Expand Up @@ -337,18 +337,18 @@ def make_agreement_scores(sorting1, sorting2, delta_frames, ensure_symmetry=True
Parameters
----------
sorting1: SortingExtractor
sorting1 : SortingExtractor
The first sorting extractor
sorting2: SortingExtractor
sorting2 : SortingExtractor
The second sorting extractor
delta_frames: int
delta_frames : int
Number of frames to consider spikes coincident
ensure_symmetry: bool, default: True
ensure_symmetry : bool, default: True
If ensure_symmetry is True, then the algo is run two times by switching sorting1 and sorting2.
And the minimum of the two results is taken.
Returns
-------
agreement_scores: array (float)
agreement_scores : array (float)
The agreement score matrix.
"""
import pandas as pd
Expand Down Expand Up @@ -401,16 +401,16 @@ def make_possible_match(agreement_scores, min_score):
Parameters
----------
agreement_scores: pd.DataFrame
agreement_scores : pd.DataFrame
min_score: float
min_score : float
Returns
-------
best_match_12: pd.Series
best_match_12 : pd.Series
best_match_21: pd.Series
best_match_21 : pd.Series
"""
unit1_ids = np.array(agreement_scores.index)
Expand Down Expand Up @@ -442,16 +442,16 @@ def make_best_match(agreement_scores, min_score):
Parameters
----------
agreement_scores: pd.DataFrame
agreement_scores : pd.DataFrame
min_score: float
min_score : float
Returns
-------
best_match_12: pd.Series
best_match_12 : pd.Series
best_match_21: pd.Series
best_match_21 : pd.Series
"""
import pandas as pd
Expand Down Expand Up @@ -490,14 +490,14 @@ def make_hungarian_match(agreement_scores, min_score):
----------
agreement_scores: pd.DataFrame
min_score: float
min_score : float
Returns
-------
hungarian_match_12: pd.Series
hungarian_match_12 : pd.Series
hungarian_match_21: pd.Series
hungarian_match_21 : pd.Series
"""
import pandas as pd
Expand Down Expand Up @@ -541,22 +541,22 @@ def do_score_labels(sorting1, sorting2, delta_frames, unit_map12, label_misclass
Parameters
----------
sorting1: SortingExtractor instance
sorting1 : SortingExtractor instance
The ground truth sorting
sorting2: SortingExtractor instance
sorting2 : SortingExtractor instance
The tested sorting
delta_frames: int
delta_frames : int
Number of frames to consider spikes coincident
unit_map12: pd.Series
unit_map12 : pd.Series
Dict of matching from sorting1 to sorting2
label_misclassification: bool
label_misclassification : bool
If True, misclassification errors are labelled
Returns
-------
labels_st1: dict of lists of np.array of str
labels_st1 : dict of lists of np.array of str
Contain score labels for units of sorting 1 for each segment
labels_st2: dict of lists of np.array of str
labels_st2 : dict of lists of np.array of str
Contain score labels for units of sorting 2 for each segment
"""
unit1_ids = sorting1.get_unit_ids()
Expand Down Expand Up @@ -647,12 +647,12 @@ def compare_spike_trains(spiketrain1, spiketrain2, delta_frames=10):
Parameters
----------
spiketrain1, spiketrain2: numpy.array
spiketrain1, spiketrain2 : numpy.array
Times of spikes for the 2 spike trains.
Returns
-------
lab_st1, lab_st2: numpy.array
lab_st1, lab_st2 : numpy.array
Label of score for each spike
"""
lab_st1 = np.array(["UNPAIRED"] * len(spiketrain1))
Expand Down Expand Up @@ -684,19 +684,19 @@ def do_confusion_matrix(event_counts1, event_counts2, match_12, match_event_coun
Parameters
----------
event_counts1: pd.Series
event_counts1 : pd.Series
Number of event per units 1
event_counts2: pd.Series
event_counts2 : pd.Series
Number of event per units 2
match_12: pd.Series
match_12 : pd.Series
Series of matching from sorting1 to sorting2.
Can be the hungarian or best match.
match_event_count: pd.DataFrame
match_event_count : pd.DataFrame
The match count matrix given by make_match_count_matrix
Returns
-------
confusion_matrix: pd.DataFrame
confusion_matrix : pd.DataFrame
The confusion matrix
index are units1 reordered
columns are units2 redordered
Expand Down Expand Up @@ -746,19 +746,19 @@ def do_count_score(event_counts1, event_counts2, match_12, match_event_count):
Parameters
----------
event_counts1: pd.Series
event_counts1 : pd.Series
Number of event per units 1
event_counts2: pd.Series
event_counts2 : pd.Series
Number of event per units 2
match_12: pd.Series
match_12 : pd.Series
Series of matching from sorting1 to sorting2.
Can be the hungarian or best match.
match_event_count: pd.DataFrame
match_event_count : pd.DataFrame
The match count matrix given by make_match_count_matrix
Returns
-------
count_score: pd.DataFrame
count_score : pd.DataFrame
A table with one line per GT units and columns
are tp/fn/fp/...
"""
Expand Down Expand Up @@ -837,16 +837,16 @@ def make_matching_events(times1, times2, delta):
Parameters
----------
times1: list
times1 : list
List of spike train 1 frames
times2: list
times2 : list
List of spike train 2 frames
delta: int
delta : int
Number of frames for considering matching events
Returns
-------
matching_event: numpy array dtype = ["index1", "index2", "delta"]
matching_event : numpy array dtype = ["index1", "index2", "delta"]
1d of collision
"""
times_concat = np.concatenate((times1, times2))
Expand Down Expand Up @@ -894,14 +894,14 @@ def make_collision_events(sorting, delta):
Parameters
----------
sorting: SortingExtractor
sorting : SortingExtractor
The sorting extractor object for counting collision events
delta: int
delta : int
Number of frames for considering collision events
Returns
-------
collision_events: numpy array
collision_events : numpy array
dtype = [('index1', 'int64'), ('unit_id1', 'int64'),
('index2', 'int64'), ('unit_id2', 'int64'),
('delta', 'int64')]
Expand Down
4 changes: 2 additions & 2 deletions src/spikeinterface/curation/curation_format.py
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,7 @@ def convert_from_sortingview_curation_format_v0(sortingview_dict, destination_fo
Returns
-------
curation_dict: dict
curation_dict : dict
A curation dictionary
"""

Expand Down Expand Up @@ -138,7 +138,7 @@ def curation_label_to_vectors(curation_dict):
Returns
-------
labels: dict of numpy vector
labels : dict of numpy vector
"""
unit_ids = list(curation_dict["unit_ids"])
Expand Down
10 changes: 5 additions & 5 deletions src/spikeinterface/curation/curation_tools.py
Original file line number Diff line number Diff line change
Expand Up @@ -106,18 +106,18 @@ def find_duplicated_spikes(
Parameters
----------
spike_train: np.ndarray
spike_train : np.ndarray
The spike train on which to look for duplicated spikes.
censored_period: int
censored_period : int
The censored period for duplicates (in sample time).
method: "keep_first" |"keep_last" | "keep_first_iterative" | "keep_last_iterative" |random", default: "random"
method : "keep_first" |"keep_last" | "keep_first_iterative" | "keep_last_iterative" |random", default: "random"
Method used to remove the duplicated spikes.
seed: int | None
seed : int | None
The seed to use if method="random".
Returns
-------
indices_of_duplicates: np.ndarray
indices_of_duplicates : np.ndarray
The indices of spikes considered to be duplicates.
"""

Expand Down
2 changes: 1 addition & 1 deletion src/spikeinterface/curation/splitunitsorting.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ class SplitUnitSorting(BaseSorting):
Parameters
----------
sorting: BaseSorting
sorting : BaseSorting
The sorting object
parent_unit_id : int
Unit id of the unit to split
Expand Down
Loading

0 comments on commit b273d08

Please sign in to comment.