Skip to content
This repository has been archived by the owner on Jun 6, 2023. It is now read-only.

Commit

Permalink
Merge pull request #42 from SpikeInterface/fix_typo
Browse files Browse the repository at this point in the history
compute_performence -> compute_performance
  • Loading branch information
alejoe91 authored Mar 11, 2021
2 parents d53d279 + de84132 commit 817fd11
Show file tree
Hide file tree
Showing 6 changed files with 9 additions and 9 deletions.
2 changes: 1 addition & 1 deletion .travis.yml
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,8 @@ python:
- '3.6'
install:
- pip install https://github.com/SpikeInterface/spikeextractors/archive/master.zip
- pip install https://github.com/SpikeInterface/spikesorters/archive/master.zip
- pip install https://github.com/SpikeInterface/spiketoolkit/archive/master.zip
- pip install https://github.com/SpikeInterface/spikesorters/archive/master.zip
- pip install .
- pip install pytest==4.3
- pip install matplotlib==3.2.2
Expand Down
2 changes: 1 addition & 1 deletion spikecomparison/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

from .comparisontools import (count_matching_events, compute_agreement_score, count_match_spikes,
make_agreement_scores, make_possible_match, make_best_match, make_hungarian_match,
do_score_labels, compare_spike_trains, do_confusion_matrix, do_count_score, compute_performence)
do_score_labels, compare_spike_trains, do_confusion_matrix, do_count_score, compute_performance)
from .symmetricsortingcomparison import compare_two_sorters, SymmetricSortingComparison
from .groundtruthcomparison import compare_sorter_to_ground_truth, GroundTruthComparison
from .multisortingcomparison import compare_multiple_sorters, MultiSortingComparison
Expand Down
2 changes: 1 addition & 1 deletion spikecomparison/collisioncomparison.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
from .groundtruthcomparison import GroundTruthComparison
#~ from .comparisontools import (do_score_labels, make_possible_match,
#~ make_best_match, make_hungarian_match, do_confusion_matrix, do_count_score,
#~ compute_performence)
#~ compute_performance)
from .comparisontools import make_collision_events


Expand Down
4 changes: 2 additions & 2 deletions spikecomparison/comparisontools.py
Original file line number Diff line number Diff line change
Expand Up @@ -608,9 +608,9 @@ def do_count_score(event_counts1, event_counts2, match_12, match_event_count):
_perf_keys = ['accuracy', 'recall', 'precision', 'false_discovery_rate', 'miss_rate']


def compute_performence(count_score):
def compute_performance(count_score):
"""
This compte perf formula.
This compute perf formula.
this trick here is that it works both on pd.Series and pd.Dataframe
line by line.
This it is internally used by perf by psiketrain and poll_with_sum.
Expand Down
4 changes: 2 additions & 2 deletions spikecomparison/groundtruthcomparison.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
from .basecomparison import BaseTwoSorterComparison
from .comparisontools import (do_score_labels, make_possible_match,
make_best_match, make_hungarian_match, do_confusion_matrix, do_count_score,
compute_performence)
compute_performance)


class GroundTruthComparison(BaseTwoSorterComparison):
Expand Down Expand Up @@ -165,7 +165,7 @@ def get_performance(self, method='by_unit', output='pandas'):
perf = self.count_score

elif method == 'by_unit':
perf = compute_performence(self.count_score)
perf = compute_performance(self.count_score)

elif method == 'pooled_with_average':
perf = self.get_performance(method='by_unit').mean(axis=0)
Expand Down
4 changes: 2 additions & 2 deletions spikecomparison/tests/test_comparisontools.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
from spikecomparison.comparisontools import (do_count_event, make_match_count_matrix, make_agreement_scores,
make_possible_match, make_best_match, make_hungarian_match,
do_score_labels, compare_spike_trains, do_confusion_matrix, do_count_score,
compute_performence)
compute_performance)


def make_sorting(times1, labels1, times2, labels2):
Expand Down Expand Up @@ -206,7 +206,7 @@ def test_do_count_score_and_perf():
assert count_score.at[0, 'tested_id'] == 0
assert count_score.at[1, 'tested_id'] == 5

perf = compute_performence(count_score)
perf = compute_performance(count_score)
#  print(perf)

assert perf.at[0, 'accuracy'] == 2 / 3
Expand Down

0 comments on commit 817fd11

Please sign in to comment.