Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Plotting #19

Open
wants to merge 13 commits into
base: top-level_refactor
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion dacapo/experiments/arraytypes/binary.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from arraytype import ArrayType
from .arraytype import ArrayType

import attr

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

from .array_config import ArrayConfig
from .cellmap_array import CellMapArray
from .array_config import ArrayConfig
from .array import Array

from typing import List

Expand All @@ -14,7 +14,7 @@ class CellMapArrayConfig(ArrayConfig):

array_type = CellMapArray

source_array_config: ArrayConfig = attr.ib(
source_array_config: Array = attr.ib(
metadata={
"help_text": "The Array from which to pull annotated data. Is expected to contain a volume with uint64 voxels and no channel dimension"
}
Expand Down
49 changes: 39 additions & 10 deletions dacapo/experiments/validation_scores.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
from .validation_iteration_scores import ValidationIterationScores
from typing import List
import attr
import inspect
import numpy as np


@attr.s
Expand Down Expand Up @@ -29,31 +31,58 @@ def validated_until(self):
return 0
return max([score.iteration for score in self.iteration_scores]) + 1

def get_attribute_names(self, class_instance):

attributes = inspect.getmembers(
class_instance, lambda a: not(inspect.isroutine(a)))
names = [a[0] for a in attributes if not(
a[0].startswith('__') and a[0].endswith('__'))]
return names

'''
def get_score_names(self):

for scores in self.scores:
for parameters, sample_scores in scores.items():
return sample_scores['scores']['average'].keys()
if self.iteration_scores:
example_parameter_scores = self.iteration_scores[0].parameter_scores
score_class_instance = example_parameter_scores[0][1]
return self.get_attribute_names(score_class_instance)

raise RuntimeError("No scores were added, yet")

def get_postprocessor_parameter_names(self):

if self.iteration_scores:
example_parameter_scores = self.iteration_scores[0].parameter_scores
postprocessor_class_instance = example_parameter_scores[0][0]
return self.get_attribute_names(postprocessor_class_instance)

raise RuntimeError("No scores were added, yet")

def get_best(self, score_name=None, higher_is_better=True):

names = self.get_score_names()
postprocessor_parameter_names = self.get_postprocessor_parameter_names()

best_scores = {name: [] for name in names}
for iteration_scores in self.scores:
best_score_parameters = {name: []
for name in postprocessor_parameter_names}

for iteration_score in self.iteration_scores:
ips = np.array([
parameter_scores['scores']['average'].get(score_name, np.nan)
for parameter_scores in iteration_scores.values()
getattr(parameter_score[1], score_name, np.nan)
for parameter_score in iteration_score.parameter_scores
], dtype=np.float32)
ips[np.isnan(ips)] = -np.inf if higher_is_better else np.inf
i = np.argmax(ips) if higher_is_better else np.argmin(ips)
best_score = iteration_score.parameter_scores[i]

for name in names:
best_scores[name].append(
list(iteration_scores.values())[i]['scores']['average'].get(name, 0)
getattr(best_score[1], name)
)
return best_scores
'''

for name in postprocessor_parameter_names:
best_score_parameters[name].append(
getattr(best_score[0], name)
)

return (best_score_parameters, best_scores)
Loading