Skip to content

Commit

Permalink
feat: ⚡️ Incorporaten validation related changes from rhoadesj/dev
Browse files Browse the repository at this point in the history
  • Loading branch information
rhoadesScholar committed Feb 8, 2024
1 parent 5f50f9b commit 2a29afd
Show file tree
Hide file tree
Showing 3 changed files with 235 additions and 124 deletions.
32 changes: 29 additions & 3 deletions dacapo/experiments/tasks/evaluators/instance_evaluator.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,22 +3,48 @@
from .evaluator import Evaluator
from .instance_evaluation_scores import InstanceEvaluationScores

from funlib.evaluate import rand_voi
from funlib.evaluate import rand_voi, detection_scores

try:
from funlib.segment.arrays import relabel

iou = True
except ImportError:
iou = False

import numpy as np


class InstanceEvaluator(Evaluator):
criteria = ["voi_merge", "voi_split", "voi"]
criteria = ["voi_merge", "voi_split", "voi", "avg_iou"]

def evaluate(self, output_array_identifier, evaluation_array):
output_array = ZarrArray.open_from_array_identifier(output_array_identifier)
evaluation_data = evaluation_array[evaluation_array.roi].astype(np.uint64)
output_data = output_array[output_array.roi].astype(np.uint64)
results = rand_voi(evaluation_data, output_data)
if iou:
try:
output_data, _ = relabel(output_data)
results.update(
detection_scores(
evaluation_data,
output_data,
matching_score="iou",
)
)
except Exception:
results["avg_iou"] = 0
logger.warning(
"Could not compute IoU because of an unknown error. Sorry about that."
)
else:
results["avg_iou"] = 0

return InstanceEvaluationScores(
voi_merge=results["voi_merge"], voi_split=results["voi_split"]
voi_merge=results["voi_merge"],
voi_split=results["voi_split"],
avg_iou=results["avg_iou"],
)

@property
Expand Down
7 changes: 5 additions & 2 deletions dacapo/experiments/validation_scores.py
Original file line number Diff line number Diff line change
Expand Up @@ -113,7 +113,7 @@ def get_best(
best value in two seperate arrays.
"""
if "criteria" in data.coords.keys():
if len(data.coords["criteria"].shape) == 1:
if len(data.coords["criteria"].shape) > 1:
criteria_bests: List[Tuple[xr.DataArray, xr.DataArray]] = []
for criterion in data.coords["criteria"].values:
if self.evaluation_scores.higher_is_better(criterion.item()):
Expand Down Expand Up @@ -142,7 +142,10 @@ def get_best(
return (da_best_indexes, da_best_scores)
else:
if self.evaluation_scores.higher_is_better(
data.coords["criteria"].item()
list(data.coords["criteria"].values)[
0
] # TODO: what is the intended behavior here? (hot fix in place)
# data.coords["criteria"].item()
):
return (
data.idxmax(dim, skipna=True, fill_value=None),
Expand Down
Loading

0 comments on commit 2a29afd

Please sign in to comment.