Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Keep track of defected slices #24

Open
wants to merge 30 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
30 commits
Select commit Hold shift + click to select a range
c941841
Add few temporary legacy fixes
abailoni Feb 6, 2018
fb9a686
Fix tiny bug MaskTransitionIgnoreLabel
abailoni Feb 15, 2018
2e7652f
Fix issues in m2fcn
constantinpape Mar 7, 2018
4baf228
Adapt fusionhed to new hed's (untested)
constantinpape Mar 7, 2018
68a2c0c
remove dilation parameter from denseHED
Steffen-Wolf Mar 7, 2018
7129a84
Add out channel member to all hed's
constantinpape Mar 7, 2018
6c6c66f
Merge branch 'HED' of https://github.com/nasimrahaman/neurofire into HED
constantinpape Mar 7, 2018
c5fcbff
Merge remote-tracking branch 'origin/HED' into legacy-MS-unet-2
abailoni Jul 2, 2018
6401f90
Fix MaskTransitionIgnoreLabel
abailoni Jul 2, 2018
2b1e1fc
Add opt. for residual connections to 3D UNet
abailoni Jul 2, 2018
2acbd12
Add option for plain unstruct. training
abailoni Jul 3, 2018
e26212c
Minor changes
abailoni Mar 7, 2019
b70bd6d
Merge master
abailoni Mar 7, 2019
8d321ba
Update InvertTarget (support for tensor with segm)
abailoni Jan 20, 2020
d6eaab4
Modify RawVolume to support defect mask
abailoni Jan 20, 2020
4c9a934
Add option for boundary label; add affinities from DynamicOffsets
abailoni Jan 20, 2020
115068e
Slide/shift only slices without neighboring defected ones
abailoni Jan 20, 2020
bdfca1e
Fix and extend defect augmentation
abailoni Jan 20, 2020
2c0e763
Add temporary hack-argument to ConnectedComponents
abailoni Jan 20, 2020
57e1794
Merge remote-tracking branch 'origin/master' into my_dev
abailoni Jan 20, 2020
d085b5f
Add support for glia- and boundary-masks
abailoni Jan 24, 2020
b081588
Update affinity computation and add support for glia-mask
abailoni Jan 27, 2020
9c1424b
Few mods to glia-mask and slide augmentation
abailoni Feb 10, 2020
edcb7b5
Improve glia option
abailoni Feb 23, 2020
cc056b2
More hacks
abailoni Mar 30, 2020
9bb5aef
Merge remote-tracking branch 'remotes/origin/master' into my_dev
abailoni Apr 3, 2020
1c7767c
Integrate easy changes from master
abailoni Apr 3, 2020
a781d3a
Polish implementations and delete obsolete stuff
abailoni Apr 3, 2020
6023903
Clean RandomSlide implementation
abailoni Apr 6, 2020
3376eec
Move custom implementation of affinities
abailoni Apr 6, 2020
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 10 additions & 1 deletion neurofire/datasets/loader/raw.py
Original file line number Diff line number Diff line change
Expand Up @@ -128,6 +128,9 @@ def __init__(self, path, defect_augmentation_config,
self.defect_augmentation = DefectAugmentation.from_config(defect_augmentation_config)
self.cast = Cast(self.dtype)

# Check if we should keep track of defected slices:
self.keep_track_defected_slices = len(self.defect_augmentation.keep_track_of) > 0

def __getitem__(self, index):
# Casting to int would allow index to be IndexSpec objects.
index = int(index)
Expand All @@ -145,7 +148,13 @@ def __getitem__(self, index):

# apply the normal transformations (including normalization)
if self.transforms is not None:
vol = self.transforms(vol)
if self.keep_track_defected_slices:
if isinstance(vol, tuple):
vol = list(vol)
assert isinstance(vol, list)
vol[0] = self.transforms(vol[0])
else:
vol = self.transforms(vol)

if self.return_index_spec:
return vol, IndexSpec(index=index, base_sequence_at_index=slices)
Expand Down
3 changes: 1 addition & 2 deletions neurofire/transform/artifact_source.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,8 +37,7 @@ def __init__(self, path, path_in_h5_dataset=None, data_slice=None,
self.transforms = self.get_transforms()

def get_transforms(self):
transforms = Compose(Normalize(),
Cast(self.dtype))
transforms = Compose(Cast(self.dtype))
return transforms


Expand Down
85 changes: 77 additions & 8 deletions neurofire/transform/defect_augmentation.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,15 +33,28 @@ class DefectAugmentation(Transform):
artifact_source: data source for additional artifacts
mean_val: mean value for artifact normalization
std_val: std value for artifact normalization
keep_track_of: list with possible values
'low_contrast', 'artifacts', 'missing_slice', 'deformed_slice'
min_distance_between_defects: minimum number of 'clean' slices between defects
nb_contiguous_artifacts: how many contiguous slices should have artifacts
(on CREMI test there are always two next to each other)
"""
def __init__(self, p_missing_slice, p_low_contrast,
p_deformed_slice, p_artifact_source=0,
ignore_slice_list=None, contrast_scale=0.1,
deformation_mode='undirected', deformation_strength=10,
artifact_source=None, mean_val=None, std_val=None,
keep_track_of=None,
min_distance_between_defects=0,
nb_contiguous_artifacts=2,
**super_kwargs):
super().__init__(**super_kwargs)

self.min_distance_between_defects = min_distance_between_defects
assert isinstance(nb_contiguous_artifacts, int)
assert nb_contiguous_artifacts > 0
self.nb_contiguous_artifacts = nb_contiguous_artifacts

# set the cumulative defect probabilities
self.p_missing_slice = p_missing_slice
self.p_low_contrast = self.p_missing_slice + p_low_contrast
Expand All @@ -51,6 +64,10 @@ def __init__(self, p_missing_slice, p_low_contrast,

self.ignore_slice_list = ignore_slice_list

keep_track_of = [] if keep_track_of is None else keep_track_of
assert isinstance(keep_track_of, list)
self.keep_track_of = keep_track_of

# set the parameters for deformation augments
if isinstance(deformation_mode, str):
assert deformation_mode in ('all', 'undirected', 'compress')
Expand Down Expand Up @@ -203,7 +220,7 @@ def apply_artifact_source(self, section):
section = section * (1. - alpha_mask) + artifact * alpha_mask
return section

def volume_function(self, tensor, z_offset=None):
def volume_function_defects(self, tensor, z_offset=None):

# we check for ignore slices if a z-offset is given and if we have
# a ignore slice list
Expand All @@ -212,27 +229,73 @@ def volume_function(self, tensor, z_offset=None):
have_ignore_slices = True

# we iterate over the slices and apply each defect trafo with the given probability
# defected_mask = np.zeros((tensor.shape[0]), dtype='bool')
defected_mask = np.zeros_like(tensor, dtype='bool')
previous_is_defected = False
next_to_be_skipped = 0
for z in range(tensor.shape[0]):
if next_to_be_skipped > 0:
next_to_be_skipped -= 1
previous_is_defected = False
continue

# check if this slice should be ignored
# check if this slice should be ignored because already defected:
if have_ignore_slices:
if z + z_offset in self.ignore_slice_list:
if z + z_offset + self.min_distance_between_defects in self.ignore_slice_list:
# tensor[z] *= 2
next_to_be_skipped = self.min_distance_between_defects * 2
continue


# We never apply defects to the first/last slice:
if z == 0 or z == tensor.shape[0]-1:
continue

# Check if we should leave some space between defected slices:
if previous_is_defected and self.min_distance_between_defects > 0:
previous_is_defected = False
next_to_be_skipped = self.min_distance_between_defects - 1
continue

r = np.random.random()
previous_is_defected = True

if r < self.p_missing_slice:
tensor[z] = self.apply_missing_slice(tensor[z])

if "missing_slice" in self.keep_track_of:
defected_mask[z] = True
elif r < self.p_low_contrast:
tensor[z] = self.apply_low_contrast(tensor[z])

if "low_contrast" in self.keep_track_of:
defected_mask[z] = True
elif r < self.p_deformed_slice:
tensor[z] = self.apply_deformed_slice(tensor[z])

if "deformed_slice" in self.keep_track_of:
defected_mask[z] = True
elif r < self.p_artifact_source:
tensor[z] = self.apply_artifact_source(tensor[z])
next_to_be_skipped = self.nb_contiguous_artifacts - 1 + self.min_distance_between_defects
for i in range(self.nb_contiguous_artifacts):
# Check if we are not at the end of the batch:
if z+i < tensor.shape[0]:
tensor[z+i] = self.apply_artifact_source(tensor[z+i])
if "artifacts" in self.keep_track_of:
defected_mask[z+i] = True
else:
previous_is_defected = False

return tensor, defected_mask


def batch_function(self, batch, z_offset=None):
assert len(batch) == 1
defected_raw, defected_mask = self.volume_function_defects(batch[0], z_offset)

if len(self.keep_track_of) > 0:
return (defected_raw, defected_mask)
else:
return defected_raw

return tensor

@classmethod
def from_config(cls, config):
Expand All @@ -246,6 +309,9 @@ def from_config(cls, config):
deformation_mode = config.get('deformation_mode', 'undirected')
deformation_strength = config.get('deformation_strength', 10)
artifact_source_config = config.get('artifact_source', None)
min_distance_between_defects = config.get('min_distance_between_defects', 0)
nb_contiguous_artifacts = config.get('nb_contiguous_artifacts', 2)
keep_track_of = config.get('keep_track_of')
if artifact_source_config is not None:
artifact_source = ArtifactSource.from_config(artifact_source_config)
else:
Expand All @@ -256,4 +322,7 @@ def from_config(cls, config):
contrast_scale=contrast_scale,
deformation_mode=deformation_mode,
deformation_strength=deformation_strength,
artifact_source=artifact_source)
min_distance_between_defects=min_distance_between_defects,
nb_contiguous_artifacts=nb_contiguous_artifacts,
artifact_source=artifact_source,
keep_track_of=keep_track_of)
49 changes: 32 additions & 17 deletions neurofire/transform/volume.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,12 @@ class RandomSlide(Transform):
"""Transform to randomly sample misalignments."""
def __init__(self, output_image_size=None, max_misalign=None,
shift_vs_slide_proba=0.5, apply_proba=0.8, **super_kwargs):
"""
Shift: move only one slice
Slide: from that point on, move all slices

By using the `apply_to` super_kwarg it is possible to shift only the raw image but not the GT segmentation.
"""
# Make sure that output image size and max misalign are not both None
assert not (output_image_size is None and max_misalign is None)
# Make sure that output image size and max misalign are not set both
Expand Down Expand Up @@ -52,7 +58,7 @@ def build_random_variables(self, num_planes, input_image_size):
shift_or_slide = 'shift' if np.random.uniform() < self.shift_vs_slide_proba else 'slide'
# Select from which plane on to slide
slide_from = np.random.randint(low=1, high=num_planes)
shift_at = np.random.randint(low=0, high=num_planes)
shift_at = np.random.randint(low=1, high=num_planes)
# Write to dict
self.set_random_variable('shifts', shifts)
self.set_random_variable('origin', originward_leeways)
Expand All @@ -76,19 +82,27 @@ def shift_and_crop(self, image, zero_shift=False):
# Crop and return
return image[slices]

def batch_function(self, volumes):
assert isinstance(volumes, (tuple, list))
shape = volumes[0].shape
if len(volumes) > 1:
assert all(vv.shape == shape for vv in volumes[1:]), "%s" % ", ".join(str(vv.shape) for vv in volumes)
def batch_function(self, tensors):
assert isinstance(tensors, (tuple, list))
tensors = list(tensors)

# Add channel dim to all tensors (if not present already):
assert all([vol.ndim in [3,4] for vol in tensors])
tensors = [vol if vol.ndim == 4 else np.expand_dims(vol, axis=0) for vol in tensors]

vol_shape = tensors[0].shape[1:]
if len(tensors) > 1:
assert all(tnsr.shape[1:] == vol_shape for tnsr in tensors[1:]), "%s" % ", ".join(str(tnsr.shape) for tnsr in tensors)
# Build random variables
self.build_random_variables(num_planes=shape[0],
input_image_size=shape[1:])
self.build_random_variables(num_planes=vol_shape[0],
input_image_size=vol_shape[1:])

# determine if we apply the transformation to the slide at all
# TODO would be cleaner to integrate into `build random variables` as well
apply_shift = np.random.rand() < self.apply_proba

apply_to = range(len(tensors)) if self._apply_to is None else self._apply_to

if apply_shift:
# Get random variables
shift_or_slide = self.get_random_variable('shift_or_slide')
Expand All @@ -97,21 +111,22 @@ def batch_function(self, volumes):
# Shift
shift_at = self.get_random_variable('shift_at')
# Don't shift if plane_num doesn't equal shift_at
out_volumes = tuple(np.array([self.shift_and_crop(image=plane,
zero_shift=(plane_num != shift_at))
for plane_num, plane in enumerate(volume)]) for volume in volumes)
out_tensors = tuple(np.array([[self.shift_and_crop(image=plane,
zero_shift=(plane_num != shift_at) or
(nb_tnsr not in apply_to))
for plane_num, plane in enumerate(vv)] for vv in tnsr])
for nb_tnsr, tnsr in enumerate(tensors))
else:
# Slide
slide_from = self.get_random_variable('slide_from')
# Don't shift if plane_num isn't larger than or equal to slide_from
out_volumes = tuple(np.array([self.shift_and_crop(image=plane,
out_tensors = tuple(np.array([[self.shift_and_crop(image=plane,
zero_shift=(plane_num < slide_from))
for plane_num, plane in enumerate(volume)]) for volume in volumes)

for plane_num, plane in enumerate(vv)] for vv in tnsr]) for nb_tnsr, tnsr in enumerate(tensors))
else:
out_volumes = tuple(np.array([self.shift_and_crop(image=plane, zero_shift=True)
for plane in volume]) for volume in volumes)
return out_volumes
out_tensors = tuple(np.array([[self.shift_and_crop(image=plane, zero_shift=True)
for plane in vv] for vv in tnsr]) for tnsr in tensors)
return out_tensors


class RejectNonZeroThreshold(object):
Expand Down