All modules for which code is available
-- mirp.deepLearningPreprocessing -
- mirp.extractFeaturesAndImages -
- mirp.extractImageParameters -
- mirp.extractMaskLabels -
- mirp.importData.importImageAndMask -
- mirp.settings.settingsFeatureExtraction -
- mirp.settings.settingsGeneral -
- mirp.settings.settingsGeneric -
- mirp.settings.settingsImageProcessing -
- mirp.settings.settingsImageTransformation -
- mirp.settings.settingsInterpolation -
- mirp.settings.settingsMaskResegmentation -
- mirp.settings.settingsPerturbation +
- mirp.data_import.import_image_and_mask +
- mirp.deep_learning_preprocessing +
- mirp.extract_features_and_images +
- mirp.extract_image_parameters +
- mirp.extract_mask_labels +
- mirp.settings.feature_parameters +
- mirp.settings.general_parameters +
- mirp.settings.generic +
- mirp.settings.image_processing_parameters +
- mirp.settings.interpolation_parameters +
- mirp.settings.perturbation_parameters +
- mirp.settings.resegmentation_parameters +
- mirp.settings.transformation_parameters
- mirp.utilities.config_utilities
Source code for mirp.data_import.import_image_and_mask
+from mirp.data_import.import_image import import_image
+from mirp.data_import.import_mask import import_mask
+from mirp._data_import.generic_file import ImageFile, MaskFile
+from mirp._data_import.dicom_file import ImageDicomFile, MaskDicomFile
+from mirp._data_import.dicom_file_stack import ImageDicomFileStack
+from mirp.utilities.utilities import random_string
+
+
+
+[docs]
+def import_image_and_mask(
+ image,
+ mask=None,
+ sample_name: None | str | list[str] = None,
+ image_name: None | str | list[str] = None,
+ image_file_type: None | str = None,
+ image_modality: None | str | list[str] = None,
+ image_sub_folder: None | str = None,
+ mask_name: None | str | list[str] = None,
+ mask_file_type: None | str = None,
+ mask_modality: None | str | list[str] = None,
+ mask_sub_folder: None | str = None,
+ roi_name: None | str | list[str] | dict[str | str] = None,
+ association_strategy: None | str | list[str] = None,
+ stack_images: str = "auto",
+ stack_masks: str = "auto"
+) -> list[ImageFile]:
+ """
+ Creates and curates references to image and mask files. This function is usually called internally by other
+ functions such as :func:`~mirp.extractFeaturesAndImages.extract_features`.
+
+ Parameters
+ ----------
+ image: Any
+ A path to an image file, a path to a directory containing image files, a path to a config_data.xml
+ file, a path to a csv file containing references to image files, a pandas.DataFrame containing references to
+ image files, or a numpy.ndarray.
+
+ mask: Any
+ A path to a mask file, a path to a directory containing mask files, a path to a config_data.xml
+ file, a path to a csv file containing references to mask files, a pandas.DataFrame containing references to
+ mask files, or a numpy.ndarray.
+
+ sample_name: str or list of str, default: None
+ Name of expected sample names. This is used to select specific image files. If None, no image files are
+ filtered based on the corresponding sample name (if known).
+
+ image_name: str, optional, default: None
+ Pattern to match image files against. The matches are exact. Use wildcard symbols ("*") to
+ match varying structures. The sample name (if part of the file name) can also be specified using "#". For
+ example, image_name = '#_*_image' would find John_Doe in John_Doe_CT_image.nii or John_Doe_001_image.nii.
+ File extensions do not need to be specified. If None, file names are not used for filtering files and
+ setting sample names.
+
+ image_file_type: {"dicom", "nifti", "nrrd", "numpy", "itk"}, optional, default: None
+ The type of file that is expected. If None, the file type is not used for filtering files.
+ "itk" comprises "nifti" and "nrrd" file types.
+
+ image_modality: {"ct", "pet", "pt", "mri", "mr", "rtdose", "generic"}, optional, default: None
+ The type of modality that is expected. If None, modality is not used for filtering files. Note that only
+ DICOM files contain metadata concerning modality.
+
+ image_sub_folder: str, optional, default: None
+ Fixed directory substructure where image files are located. If None, the directory substructure is not used
+ for filtering files.
+
+ mask_name: str or list of str, optional, default: None
+ Pattern to match mask files against. The matches are exact. Use wildcard symbols ("*") to match varying
+ structures. The sample name (if part of the file name) can also be specified using "#". For example,
+ mask_name = '#_*_mask' would find John_Doe in John_Doe_CT_mask.nii or John_Doe_001_mask.nii. File extensions
+ do not need to be specified. If None, file names are not used for filtering files and setting sample names.
+
+ mask_file_type: {"dicom", "nifti", "nrrd", "numpy", "itk"}, optional, default: None
+ The type of file that is expected. If None, the file type is not used for filtering files.
+ "itk" comprises "nifti" and "nrrd" file types.
+
+ mask_modality: {"rtstruct", "seg", "generic_mask"}, optional, default: None
+ The type of modality that is expected. If None, modality is not used for filtering files.
+ Note that only DICOM files contain metadata concerning modality. Masks from non-DICOM files are considered to
+ be "generic_mask".
+
+ mask_sub_folder: str, optional, default: None
+ Fixed directory substructure where mask files are located. If None, the directory substructure is not used for
+ filtering files.
+
+ roi_name: str, optional, default: None
+ Name of the regions of interest that should be assessed.
+
+ association_strategy: {"frame_of_reference", "sample_name", "file_distance", "file_name_similarity", "list_order", "position", "single_image"}
+ The preferred strategy for associating images and masks. File association is preferably done using frame of
+ reference UIDs (DICOM), or sample name (NIfTI, numpy). Other options are relatively frail, except for
+ `list_order` which may be applicable when a list with images and a list with masks is provided and both lists
+ are of equal length.
+
+ stack_images: {"auto", "yes", "no"}, optional, default: "str"
+ If image files in the same directory cannot be assigned to different samples, and are 2D (slices) of the same
+ size, they might belong to the same 3D image stack. "auto" will stack 2D numpy arrays, but not other file types.
+ "yes" will stack all files that contain 2D images, that have the same dimensions, orientation and spacing,
+ except for DICOM files. "no" will not stack any files. DICOM files ignore this argument, because their stacking
+ can be determined from metadata.
+
+ stack_masks: {"auto", "yes", "no"}, optional, default: "str"
+ If mask files in the same directory cannot be assigned to different samples, and are 2D (slices) of the same
+ size, they might belong to the same 3D mask stack. "auto" will stack 2D numpy arrays, but not other file
+ types. "yes" will stack all files that contain 2D images, that have the same dimensions, orientation and
+ spacing, except for DICOM files. "no" will not stack any files. DICOM files ignore this argument,
+ because their stacking can be determined from metadata.
+
+ Returns
+ -------
+ list[ImageFile]
+ The functions returns a list of ImageFile objects, if any were found with the specified filters.
+ """
+ if mask is None:
+ mask = image
+
+ # Generate list of images.
+ image_list = import_image(
+ image,
+ sample_name=sample_name,
+ image_name=image_name,
+ image_file_type=image_file_type,
+ image_modality=image_modality,
+ image_sub_folder=image_sub_folder,
+ stack_images=stack_images
+ )
+
+ # Generate list of images.
+ mask_list = import_mask(
+ mask,
+ sample_name=sample_name,
+ mask_name=mask_name,
+ mask_file_type=mask_file_type,
+ mask_modality=mask_modality,
+ mask_sub_folder=mask_sub_folder,
+ stack_masks=stack_masks,
+ roi_name=roi_name
+ )
+
+ if len(image_list) == 0:
+ raise ValueError(f"No images were found. Possible reasons are lack of images with the preferred modality.")
+ if len(mask_list) == 0:
+ raise ValueError(f"No masks were found. Possible reasons are lack of masks with the preferred modality.")
+
+ # Determine association strategy, if this is unset.
+ possible_association_strategy = set_association_strategy(
+ image_list=image_list,
+ mask_list=mask_list
+ )
+
+ if association_strategy is None:
+ association_strategy = possible_association_strategy
+ elif isinstance(association_strategy, str):
+ association_strategy = [association_strategy]
+
+ if not isinstance(association_strategy, set):
+ association_strategy = set(association_strategy)
+
+ # Test association strategy.
+ unavailable_strategy = association_strategy - possible_association_strategy
+ if len(unavailable_strategy) > 0:
+ raise ValueError(
+ f"One or more strategies for associating images and masks are not available for the provided image and "
+ f"mask set: {', '.join(list(unavailable_strategy))}. Only the following strategies are available: "
+ f"{'. '.join(list(possible_association_strategy))}"
+ )
+
+ if len(possible_association_strategy) == 0:
+ raise ValueError(
+ f"No strategies for associating images and masks are available, indicating that there is no clear way to "
+ f"establish an association."
+ )
+
+ # Start association.
+ if association_strategy == {"list_order"}:
+ # If only the list_order strategy is available, use this.
+ for ii, image in enumerate(image_list):
+ image.associated_masks = [mask_list[ii]]
+
+ elif association_strategy == {"single_image"}:
+ # If single_image is the only strategy, use this.
+ image_list[0].associated_masks = mask_list
+
+ else:
+ for ii, image in enumerate(image_list):
+ image.associate_with_mask(
+ mask_list=mask_list,
+ association_strategy=association_strategy
+ )
+
+ if all(image.associated_masks is None for image in image_list):
+ if "single_image" in association_strategy:
+ image_list[0].associated_masks = mask_list
+ elif "list_order" in association_strategy:
+ for ii, image in enumerate(image_list):
+ image.associated_masks = [mask_list[ii]]
+
+ # Ensure that we are working with deep copies from this point - we don't want to propagate changes to masks,
+ # images by reference.
+ image_list = [image.copy() for image in image_list]
+
+ # Set sample names. First we check if all sample names are missing.
+ if all(image.sample_name is None for image in image_list):
+ if isinstance(sample_name, str):
+ sample_name = [sample_name]
+
+ if isinstance(sample_name, list) and len(sample_name) == len(image_list):
+ for ii, image in enumerate(image_list):
+ image.set_sample_name(sample_name=sample_name[ii])
+ if image.associated_masks is not None:
+ for mask in image.associated_masks:
+ mask.set_sample_name(sample_name=sample_name[ii])
+
+ elif all(image.file_name is not None for image in image_list):
+ for image in image_list:
+ image.set_sample_name(sample_name=image.file_name)
+
+ if image.associated_masks is not None:
+ for mask in image.associated_masks:
+ mask.set_sample_name(sample_name=image.file_name)
+
+ # Then set any sample names for images that still miss them.
+ if any(image.sample_name is None for image in image_list):
+ for ii, image in enumerate(image_list):
+ if image.sample_name is None:
+ generated_sample_name = str(ii + 1) + "_" + random_string(16)
+ image.set_sample_name(sample_name=generated_sample_name)
+ if image.associated_masks is not None:
+ for mask in image.associated_masks:
+ mask.set_sample_name(sample_name=generated_sample_name)
+
+ return image_list
+
+
+
+def set_association_strategy(
+ image_list: list[ImageFile] | list[ImageDicomFile],
+ mask_list: list[MaskFile] | list[MaskDicomFile]
+) -> set[str]:
+ # Association strategy is set by a process of elimination.
+ possible_strategies = {
+ "frame_of_reference", "sample_name", "file_distance", "file_name_similarity", "list_order", "position",
+ "single_image"
+ }
+
+ # Check that images and masks are available
+ if len(mask_list) == 0 or len(image_list) == 0:
+ return set([])
+
+ # Check if association by list order is possible.
+ if len(image_list) != len(mask_list):
+ possible_strategies.remove("list_order")
+
+ # Check that association with a single image is possible.
+ if len(image_list) > 1:
+ possible_strategies.remove("single_image")
+
+ # Check if association by frame of reference UID is possible.
+ if (any(isinstance(image, ImageDicomFile) or isinstance(image, ImageDicomFileStack) for image in image_list) and
+ any(isinstance(mask, MaskDicomFile) for mask in mask_list)):
+ dcm_image_list: list[ImageDicomFile | ImageDicomFileStack] = [
+ image for image in image_list
+ if isinstance(image, ImageDicomFile) or isinstance(image, ImageDicomFileStack)]
+ dcm_mask_list: list[MaskDicomFile] = [mask for mask in mask_list if isinstance(mask, MaskDicomFile)]
+
+ # If frame of reference UIDs are completely absent.
+ if all(image.frame_of_reference_uid is None for image in dcm_image_list) or \
+ all(mask.frame_of_reference_uid is None for mask in dcm_mask_list):
+ possible_strategies.remove("frame_of_reference")
+
+ else:
+ possible_strategies.remove("frame_of_reference")
+
+ # Check if association by sample name is possible.
+ if all(image.sample_name is None for image in image_list) or all(mask.sample_name is None for mask in mask_list):
+ possible_strategies.remove("sample_name")
+
+ # Check if file_distance is possible. If directory are absent or singular, file distance cannot be used for
+ # association.
+ image_dir_path = set(image.dir_path for image in image_list) - {None}
+ mask_dir_path = set(mask.dir_path for mask in mask_list) - {None}
+ if len(image_dir_path) == 0 or len(mask_dir_path) <= 1:
+ possible_strategies.remove("file_distance")
+
+ # Check if file_name_similarity is possible. If file names are absent, this is not possible.
+ if all(image.file_name is None for image in image_list) or all(mask.file_name is None for mask in mask_list):
+ possible_strategies.remove("file_name_similarity")
+
+ # Check if position can be used.
+ if all(image.image_origin is None for image in image_list) or all(mask.image_origin is None for mask in mask_list):
+ possible_strategies.remove("position")
+ else:
+ image_position_data = set([
+ image.get_image_origin(as_str=True) + image.get_image_spacing(as_str=True) +
+ image.get_image_dimension(as_str=True) + image.get_image_orientation(as_str=True)
+ for image in image_list if image.image_origin is not None
+ ])
+ mask_position_data = set([
+ mask.get_image_origin(as_str=True) + mask.get_image_spacing(as_str=True) +
+ mask.get_image_dimension(as_str=True) + mask.get_image_orientation(as_str=True)
+ for mask in mask_list if mask.image_origin is not None
+ ])
+
+ # Check that there are more
+ if len(image_position_data) <= 1 or len(mask_position_data) <= 1:
+ possible_strategies.remove("position")
+
+ return possible_strategies
+
Source code for mirp.deepLearningPreprocessing
from mirp.workflows.standardWorkflow import StandardWorkflow -
Source code for mirp.deepLearningPreprocessing
return results
Source code for mirp.deepLearningPreprocessing
) -
Source code for mirp.deepLearningPreprocessing
)
Source code for mirp.deep_learning_preprocessing
+from typing import Generator, Iterable, Any
+import copy
+import ray
+
+from mirp._data_import.generic_file import ImageFile
+from mirp.settings.generic import SettingsClass
+from mirp._workflows.standardWorkflow import StandardWorkflow
+
+
+
+[docs]
+def deep_learning_preprocessing(
+ output_slices: bool = False,
+ crop_size: None | list[float] | list[int] = None,
+ image_export_format: str = "numpy",
+ write_file_format: str = "numpy",
+ export_images: None | bool = None,
+ write_images: None | bool = None,
+ write_dir: None | str = None,
+ num_cpus: None | int = None,
+ **kwargs
+) -> None | list[Any]:
+ """
+ Pre-processes images for deep learning.
+
+ Parameters
+ ----------
+ output_slices: bool, optional, default: False
+ Determines whether separate slices should be extracted.
+
+ crop_size: list of float or list of int, optional, default: None
+ Size to which the images and masks should be cropped. Images and masks are cropped around the center of the
+ mask(s).
+
+ .. note::
+ MIRP follows the numpy convention for indexing (*z*, *y*, *x*). The final element always corresponds to the
+ *x* dimension.
+
+ image_export_format: {"dict", "native", "numpy"}, default: "numpy"
+ Return format for processed images and masks. ``"dict"`` returns dictionaries of images and masks as numpy
+ arrays and associated characteristics. ``"native"`` returns images and masks in their internal format.
+ ``"numpy"`` returns images and masks in numpy format. This argument is only used if ``export_images=True``.
+
+ write_file_format: {"nifti", "numpy"}, default: "numpy"
+ File format for processed images and masks. ``"nifti"`` writes images and masks in the NIfTI file format,
+ and ``"numpy"`` writes images and masks as numpy files. This argument is only used if ``write_images=True``.
+
+ export_images: bool, optional
+ Determines whether processed images and masks should be returned by the function.
+
+ write_images: bool, optional
+ Determines whether processed images and masks should be written to the directory indicated by the
+ ``write_dir`` keyword argument.
+
+ write_dir: str, optional
+ Path to directory where processed images and masks should be written. If not set, processed images and masks
+ are returned by this function. Required if ``write_images=True``.
+
+ num_cpus: int, optional, default: None
+ Number of CPU nodes that should be used for parallel processing. Image and mask processing can be
+ parallelized using the ``ray`` package. If a ray cluster is defined by the user, this cluster will be used
+ instead. By default, image and mask processing are processed sequentially.
+
+ **kwargs:
+ Keyword arguments passed for importing images and masks (
+ :func:`~mirp._data_import.import_image_and_mask.import_image_and_mask`) and configuring settings (notably
+ :class:`~mirp.settings.image_processing_parameters.ImagePostProcessingClass`,
+ :class:`~mirp.settings.perturbation_parameters.ImagePerturbationSettingsClass`), among others.
+
+ Returns
+ -------
+ None | list[Any]
+ List of images and masks in the format indicated by ``image_export_format``, if ``export_images=True``.
+
+ See Also
+ --------
+ Keyword arguments can be provided to configure the following:
+
+ * image and mask import (:func:`~mirp._data_import.import_image_and_mask.import_image_and_mask`)
+ * image post-processing (:class:`~mirp.settings.image_processing_parameters.ImagePostProcessingClass`)
+ * image perturbation / augmentation (:class:`~mirp.settings.perturbation_parameters.ImagePerturbationSettingsClass`)
+ * image interpolation / resampling (:class:`~mirp.settings.interpolation_parameters.ImageInterpolationSettingsClass` and
+ :class:`~mirp.settings.interpolation_parameters.MaskInterpolationSettingsClass`)
+ * mask resegmentation (:class:`~mirp.settings.resegmentation_parameters.ResegmentationSettingsClass`)
+
+ """
+
+ # Conditionally start a ray cluster.
+ external_ray = ray.is_initialized()
+ if not external_ray and num_cpus is not None and num_cpus > 1:
+ ray.init(num_cpus=num_cpus)
+
+ if ray.is_initialized():
+ # Parallel processing.
+ results = [
+ _ray_extractor.remote(
+ workflow=workflow,
+ output_slices=output_slices,
+ crop_size=crop_size,
+ image_export_format=image_export_format,
+ write_file_format=write_file_format
+ )
+ for workflow in _base_deep_learning_preprocessing(
+ export_images=export_images,
+ write_images=write_images,
+ write_dir=write_dir,
+ **kwargs
+ )
+ ]
+
+ results = ray.get(results)
+ if not external_ray:
+ ray.shutdown()
+ else:
+ workflows = list(_base_deep_learning_preprocessing(
+ export_images=export_images,
+ write_images=write_images,
+ write_dir=write_dir,
+ **kwargs)
+ )
+
+ results = [
+ workflow.deep_learning_conversion(
+ output_slices=output_slices,
+ crop_size=crop_size,
+ image_export_format=image_export_format,
+ write_file_format=write_file_format
+ )
+ for workflow in workflows
+ ]
+
+ return results
+
+
+
+@ray.remote
+def _ray_extractor(
+ workflow: StandardWorkflow,
+ output_slices: bool = False,
+ crop_size: None | list[float] | list[int] = None,
+ image_export_format: str = "numpy",
+ write_file_format: str = "numpy"
+):
+ # Limit internal threading by third-party libraries.
+ from mirp.utilities.parallel import limit_inner_threads
+ limit_inner_threads()
+
+ return workflow.deep_learning_conversion(
+ output_slices=output_slices,
+ crop_size=crop_size,
+ image_export_format=image_export_format,
+ write_file_format=write_file_format
+ )
+
+
+
+[docs]
+def deep_learning_preprocessing_generator(
+ output_slices: bool = False,
+ crop_size: None | list[float] | list[int] = None,
+ image_export_format: str = "numpy",
+ write_file_format: str = "numpy",
+ export_images: None | bool = None,
+ write_images: None | bool = None,
+ write_dir: None | str = None,
+ **kwargs
+) -> Generator[Any, None, None]:
+ """
+ Generator for pre-processing images for deep learning.
+
+ Parameters
+ ----------
+ output_slices: bool, optional, default: False
+ Determines whether separate slices should be extracted.
+
+ crop_size: list of float or list of int, optional, default: None
+ Size to which the images and masks should be cropped. Images and masks are cropped around the center of the
+ mask(s).
+
+ .. note::
+ MIRP follows the numpy convention for indexing (*z*, *y*, *x*). The final element always corresponds to the
+ *x* dimension.
+
+ image_export_format: {"dict", "native", "numpy"}, default: "numpy"
+ Return format for processed images and masks. ``"dict"`` returns dictionaries of images and masks as numpy
+ arrays and associated characteristics. ``"native"`` returns images and masks in their internal format.
+ ``"numpy"`` returns images and masks in numpy format. This argument is only used if ``export_images=True``.
+
+ write_file_format: {"nifti", "numpy"}, default: "numpy"
+ File format for processed images and masks. ``"nifti"`` writes images and masks in the NIfTI file format,
+ and ``"numpy"`` writes images and masks as numpy files. This argument is only used if ``write_images=True``.
+
+ export_images: bool, optional
+ Determines whether processed images and masks should be returned by the function.
+
+ write_images: bool, optional
+ Determines whether processed images and masks should be written to the directory indicated by the
+ ``write_dir`` keyword argument.
+
+ write_dir: str, optional
+ Path to directory where processed images and masks should be written. If not set, processed images and masks
+ are returned by this function. Required if ``write_images=True``.
+
+ **kwargs:
+ Keyword arguments passed for importing images and masks (
+ :func:`~mirp._data_import.import_image_and_mask.import_image_and_mask`) and configuring settings (notably
+ :class:`~mirp.settings.image_processing_parameters.ImagePostProcessingClass`,
+ :class:`~mirp.settings.settingsPerturbation.ImagePerturbationSettingsClass`), among others.
+
+ Yields
+ -------
+ None | list[Any]
+ List of images and masks in the format indicated by ``image_export_format``, if ``export_images=True``.
+
+ See Also
+ --------
+ Keyword arguments can be provided to configure the following:
+
+ * image and mask import (:func:`~mirp._data_import.import_image_and_mask.import_image_and_mask`)
+ * image post-processing (:class:`~mirp.settings.image_processing_parameters.ImagePostProcessingClass`)
+ * image perturbation / augmentation (:class:`~mirp.settings.perturbation_parameters.ImagePerturbationSettingsClass`)
+ * image interpolation / resampling (:class:`~mirp.settings.interpolation_parameters.ImageInterpolationSettingsClass` and
+ :class:`~mirp.settings.interpolation_parameters.MaskInterpolationSettingsClass`)
+ * mask resegmentation (:class:`~mirp.settings.resegmentation_parameters.ResegmentationSettingsClass`)
+
+ """
+ workflows = list(_base_deep_learning_preprocessing(
+ export_images=export_images,
+ write_images=write_images,
+ write_dir=write_dir,
+ **kwargs))
+
+ for workflow in workflows:
+ yield workflow.deep_learning_conversion(
+ output_slices=output_slices,
+ crop_size=crop_size,
+ image_export_format=image_export_format,
+ write_file_format=write_file_format
+ )
+
+
+
+def _base_deep_learning_preprocessing(
+ image,
+ mask=None,
+ sample_name: None | str | list[str] = None,
+ image_name: None | str | list[str] = None,
+ image_file_type: None | str = None,
+ image_modality: None | str | list[str] = None,
+ image_sub_folder: None | str = None,
+ mask_name: None | str | list[str] = None,
+ mask_file_type: None | str = None,
+ mask_modality: None | str | list[str] = None,
+ mask_sub_folder: None | str = None,
+ roi_name: None | str | list[str] | dict[str, str] = None,
+ association_strategy: None | str | list[str] = None,
+ settings: None | str | SettingsClass | list[SettingsClass] = None,
+ stack_masks: str = "auto",
+ stack_images: str = "auto",
+ write_images: None | bool = None,
+ export_images: None | bool = None,
+ write_dir: None | str = None,
+ **kwargs
+):
+ from mirp.data_import.import_image_and_mask import import_image_and_mask
+ from mirp.settings.import_config_parameters import import_configuration_settings
+
+ # Infer write_images, export_images based on write_dir.
+ if write_images is None:
+ write_images = write_dir is not None
+ if export_images is None:
+ export_images = write_dir is None
+
+ if not write_images:
+ write_dir = None
+
+ if write_images and write_dir is None:
+ raise ValueError("write_dir argument should be provided for writing images and masks to.")
+
+ if not write_images and not export_images:
+ raise ValueError(f"write_images and export_images arguments cannot both be False.")
+
+ # Import settings (to provide immediate feedback if something is amiss).
+ if isinstance(settings, str):
+ settings = import_configuration_settings(
+ compute_features=False,
+ path=settings
+ )
+ elif isinstance(settings, SettingsClass):
+ settings = [settings]
+ elif isinstance(settings, Iterable) and all(isinstance(x, SettingsClass) for x in settings):
+ settings = list(settings)
+ elif settings is None:
+ settings = import_configuration_settings(
+ compute_features=False,
+ **kwargs
+ )
+ else:
+ raise TypeError(
+ f"The 'settings' argument is expected to be a path to a configuration xml file, "
+ f"a SettingsClass object, or a list thereof. Found: {type(settings)}."
+ )
+
+ image_list = import_image_and_mask(
+ image=image,
+ mask=mask,
+ sample_name=sample_name,
+ image_name=image_name,
+ image_file_type=image_file_type,
+ image_modality=image_modality,
+ image_sub_folder=image_sub_folder,
+ mask_name=mask_name,
+ mask_file_type=mask_file_type,
+ mask_modality=mask_modality,
+ mask_sub_folder=mask_sub_folder,
+ roi_name=roi_name,
+ association_strategy=association_strategy,
+ stack_images=stack_images,
+ stack_masks=stack_masks
+ )
+
+ yield from _generate_dl_preprocessing_workflows(
+ image_list=image_list,
+ settings=settings,
+ write_dir=write_dir,
+ write_images=write_images,
+ export_images=export_images
+ )
+
+
+def _generate_dl_preprocessing_workflows(
+ image_list: list[ImageFile],
+ settings: list[SettingsClass],
+ write_dir: None | str,
+ write_images: bool,
+ export_images: bool
+) -> Generator[StandardWorkflow, None, None]:
+
+ for image_file in image_list:
+ for current_settings in settings:
+
+ # Update settings to remove settings that may cause problems.
+ current_settings.feature_extr.families = "none"
+ current_settings.img_transform.feature_settings.families = "none"
+ current_settings.perturbation.crop_around_roi = False
+ current_settings.roi_resegment.resegmentation_method = "none"
+
+ if current_settings.perturbation.noise_repetitions is None or \
+ current_settings.perturbation.noise_repetitions == 0:
+ noise_repetition_ids = [None]
+ else:
+ noise_repetition_ids = list(range(current_settings.perturbation.noise_repetitions))
+
+ if current_settings.perturbation.rotation_angles is None or len(
+ current_settings.perturbation.rotation_angles) == 0 or all(
+ x == 0.0 for x in current_settings.perturbation.rotation_angles
+ ):
+ rotation_angles = [None]
+ else:
+ rotation_angles = copy.deepcopy(current_settings.perturbation.rotation_angles)
+
+ if current_settings.perturbation.translation_fraction is None or len(
+ current_settings.perturbation.translation_fraction) == 0 or all(
+ x == 0.0 for x in current_settings.perturbation.translation_fraction
+ ):
+ translations = [None]
+ else:
+ config_translation = copy.deepcopy(current_settings.perturbation.translation_fraction)
+ translations = []
+ for translation_x in config_translation:
+ for translation_y in config_translation:
+ if not current_settings.general.by_slice:
+ for translation_z in config_translation:
+ translations += [(translation_z, translation_y, translation_x)]
+ else:
+ translations += [(0.0, translation_y, translation_x)]
+
+ if current_settings.img_interpolate.new_spacing is None or len(
+ current_settings.img_interpolate.new_spacing) == 0 or all(
+ x == 0.0 for x in current_settings.img_interpolate.new_spacing
+ ):
+ spacings = [None]
+ else:
+ spacings = copy.deepcopy(current_settings.img_interpolate.new_spacing)
+
+ for noise_repetition_id in noise_repetition_ids:
+ for rotation_angle in rotation_angles:
+ for translation in translations:
+ for spacing in spacings:
+ yield StandardWorkflow(
+ image_file=copy.deepcopy(image_file),
+ write_dir=write_dir,
+ settings=current_settings,
+ settings_name=current_settings.general.config_str,
+ write_features=False,
+ export_features=False,
+ write_images=write_images,
+ export_images=export_images,
+ noise_iteration_id=noise_repetition_id,
+ rotation=rotation_angle,
+ translation=translation,
+ new_image_spacing=spacing
+ )
+
Source code for mirp.extractFeaturesAndImages
Source code for mirp.extractFeaturesAndImages
Source code for mirp.extractFeaturesAndImages
Source code for mirp.extractFeaturesAndImages
Source code for mirp.extractFeaturesAndImages
Source code for mirp.extractFeaturesAndImages
Source code for mirp.extractFeaturesAndImages
Source code for mirp.extractImageParameters
from mirp.importData.imageGenericFile import ImageFile
-[docs]def extract_image_parameters(
+
+[docs]
+def extract_image_parameters(
image,
sample_name: None | str | list[str] = None,
image_name: None | str | list[str] = None,
@@ -179,6 +179,7 @@ Source code for mirp.extractImageParameters
return metadata
+
def _extract_image_parameters(index: int, image: ImageFile) -> dict[str, str]:
metadata = image.export_metadata()
diff --git a/docs/_modules/mirp/extractMaskLabels.html b/docs/_modules/mirp/extractMaskLabels.html
index a069e7b2..59794496 100644
--- a/docs/_modules/mirp/extractMaskLabels.html
+++ b/docs/_modules/mirp/extractMaskLabels.html
@@ -1,22 +1,20 @@
-
+
- mirp.extractMaskLabels — mirp 2.1.1 documentation
-
-
-
-
+ mirp.extractMaskLabels — mirp 2.0.1 documentation
+
+
-
-
-
-
-
+
+
+
+
+
@@ -84,7 +82,9 @@ Source code for mirp.extractMaskLabels
from mirp.importData.imageGenericFile import MaskFile
-[docs]def extract_mask_labels(
+
+[docs]
+def extract_mask_labels(
mask=None,
sample_name: None | str | list[str] = None,
mask_name: None | str | list[str] = None,
@@ -181,6 +181,7 @@ Source code for mirp.extractMaskLabels
return labels
+
def _extract_mask_labels(index: int, mask: MaskFile) -> dict[str, Any]:
labels = mask.export_roi_labels()
diff --git a/docs/_modules/mirp/extract_features_and_images.html b/docs/_modules/mirp/extract_features_and_images.html
new file mode 100644
index 00000000..13fe0469
--- /dev/null
+++ b/docs/_modules/mirp/extract_features_and_images.html
@@ -0,0 +1,630 @@
+
+
+
+
+
+ mirp.extract_features_and_images — mirp 2.2.0 documentation
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Source code for mirp.extract_features_and_images
+from typing import Generator, Iterable, Any
+import copy
+
+import ray
+
+from mirp._data_import.generic_file import ImageFile
+from mirp.settings.generic import SettingsClass
+from mirp._workflows.standardWorkflow import StandardWorkflow
+
+
+
+[docs]
+def extract_features(
+ write_features: None | bool = None,
+ export_features: None | bool = None,
+ write_dir: None | str = None,
+ **kwargs
+) -> None | list[Any]:
+ """
+ Compute features from regions of interest in images. This function is a wrapper around
+ :func:`~mirp.extractFeaturesAndImages.extract_features_and_images`.
+
+ Parameters
+ ----------
+ write_features: bool, optional
+ Determines whether features computed from images should be written to the directory indicated by the
+ ``write_dir`` keyword argument.
+
+ export_features: bool, optional
+ Determines whether features computed from images should be returned by the function.
+
+ write_dir: str, optional
+ Path to directory where feature tables should be written. If not set, feature tables are returned by this
+ function. Required if ``write_features=True``.
+
+ **kwargs:
+ Keyword arguments passed to :func:`~mirp.extractFeaturesAndImages.extract_features_and_images`.
+
+ Returns
+ -------
+ None | list[Any]
+ List of feature tables, if ``export_features=True``.
+
+ See Also
+ --------
+ :func:`~mirp.extractFeaturesAndImages.extract_features_and_images`
+
+ """
+ return extract_features_and_images(
+ write_features=write_features,
+ export_features=export_features,
+ write_images=False,
+ export_images=False,
+ write_dir=write_dir,
+ **kwargs
+ )
+
+
+
+
+[docs]
+def extract_features_generator(
+ write_features: bool = False,
+ export_features: bool = True,
+ **kwargs
+):
+ """
+ Compute features from regions of interest in images. This generator is a wrapper around
+ :func:`~mirp.extractFeaturesAndImages.extract_features_and_images_generator`.
+
+ Parameters
+ ----------
+ write_features: bool, default: False
+ Determines whether features computed from images should be written to the directory indicated by the
+ ``write_dir`` keyword argument.
+
+ export_features: bool, default: True
+ Determines whether features computed from images should be returned by the function.
+
+ **kwargs:
+ Keyword arguments passed to :func:`~mirp.extractFeaturesAndImages.extract_features_and_images_generator`.
+
+ Returns
+ -------
+ None | list[Any]
+ List of feature tables, if ``export_features=True``.
+
+ See Also
+ --------
+ :func:`~mirp.extractFeaturesAndImages.extract_features_and_images_generator`
+
+ """
+ yield from extract_features_and_images_generator(
+ write_features=write_features,
+ export_features=export_features,
+ write_images=False,
+ export_images=False,
+ **kwargs
+ )
+
+
+
+
+[docs]
+def extract_images(
+ write_images: None | bool = True,
+ export_images: None | bool = False,
+ write_dir: None | str = None,
+ **kwargs
+):
+ """
+ Process images and masks. This function is a wrapper around
+ :func:`~mirp.extractFeaturesAndImages.extract_features_and_images`.
+
+ Parameters
+ ----------
+ write_images: bool, optional
+ Determines whether processed images and masks should be written to the directory indicated by the
+ ``write_dir`` keyword argument.
+
+ export_images: bool, optional
+ Determines whether processed images and masks should be returned by the function.
+
+ write_dir: str, optional
+ Path to directory where processed images and masks should be written. If not set, processed images and masks
+ are returned by this function. Required if ``write_images=True``.
+
+ **kwargs:
+ Keyword arguments passed to :func:`~mirp.extractFeaturesAndImages.extract_features_and_images`.
+
+ Returns
+ -------
+ None | list[Any]
+ List of feature tables, if ``export_images=True``.
+
+ See Also
+ --------
+ :func:`~mirp.extractFeaturesAndImages.extract_features_and_images`
+
+ """
+ return extract_features_and_images(
+ write_features=False,
+ export_features=False,
+ write_images=write_images,
+ export_images=export_images,
+ write_dir=write_dir,
+ **kwargs
+ )
+
+
+
+
+[docs]
+def extract_images_generator(
+ write_images: bool = False,
+ export_images: bool = True,
+ **kwargs
+):
+ """
+ Process images and masks. This generator is a wrapper around
+ :func:`~mirp.extractFeaturesAndImages.extract_features_and_images_generator`.
+
+ Parameters
+ ----------
+ write_images: bool, default: True
+ Determines whether processed images and masks should be written to the directory indicated by the
+ ``write_dir`` keyword argument.
+
+ export_images: bool, default: False
+ Determines whether processed images and masks should be returned by the function.
+
+ **kwargs:
+ Keyword arguments passed to :func:`~mirp.extractFeaturesAndImages.extract_features_and_images_generator`.
+
+ Yields
+ ------
+ None | list[Any]
+ List of feature tables, if ``export_images=True``.
+
+ See Also
+ --------
+ :func:`~mirp.extractFeaturesAndImages.extract_features_and_images_generator`
+
+ """
+ yield from extract_features_and_images_generator(
+ write_features=False,
+ export_features=False,
+ write_images=write_images,
+ export_images=export_images,
+ **kwargs
+ )
+
+
+
+
+[docs]
+def extract_features_and_images(
+ image_export_format: str = "dict",
+ num_cpus: None | int = None,
+ **kwargs
+):
+ """
+ Processes images and computes features from regions of interest.
+
+ Parameters
+ ----------
+ image_export_format: {"dict", "native", "numpy"}, default: "numpy"
+ Return format for processed images and masks. ``"dict"`` returns dictionaries of images and masks as numpy
+ arrays and associated characteristics. ``"native"`` returns images and masks in their internal format.
+ ``"numpy"`` returns images and masks in numpy format. This argument is only used if ``export_images=True``.
+
+ num_cpus: int, optional, default: None
+ Number of CPU nodes that should be used for parallel processing. Image processing and feature computation can be
+ parallelized using the ``ray`` package. If a ray cluster is defined by the user, this cluster will be used
+ instead. By default, images are processed sequentially.
+
+ **kwargs:
+ Keyword arguments passed for importing images and masks (
+ :func:`mirp._data_import.import_image_and_mask.import_image_and_mask`) and configuring settings:
+
+ * general settings (:class:`~mirp.settings.general_parameters.GeneralSettingsClass`)
+ * image post-processing (:class:`~mirp.settings.image_processing_parameters.ImagePostProcessingClass`)
+ * image perturbation / augmentation (:class:`~mirp.settings.perturbation_parameters.ImagePerturbationSettingsClass`)
+ * image interpolation / resampling (
+ :class:`~mirp.settings.interpolation_parameters.ImageInterpolationSettingsClass` and
+ :class:`~mirp.settings.interpolation_parameters.MaskInterpolationSettingsClass`)
+ * mask resegmentation (:class:`~mirp.settings.resegmentation_parameters.ResegmentationSettingsClass`)
+ * image transformation (:class:`~mirp.settings.transformation_parameters.ImageTransformationSettingsClass`)
+ * feature computation / extraction (
+ :class:`~mirp.settings.feature_parameters.FeatureExtractionSettingsClass`)
+
+ Returns
+ -------
+ None | list[Any]
+ List of features, images and masks, depending on ``export_features`` and ``export_images``.
+
+ See Also
+ --------
+ Keyword arguments can be provided to configure the following:
+
+ * image and mask import (:func:`~mirp._data_import.import_image_and_mask.import_image_and_mask`)
+ * general settings (:class:`~mirp.settings.general_parameters.GeneralSettingsClass`)
+ * image post-processing (:class:`~mirp.settings.image_processing_parameters.ImagePostProcessingClass`)
+ * image perturbation / augmentation (:class:`~mirp.settings.perturbation_parameters.ImagePerturbationSettingsClass`)
+ * image interpolation / resampling (:class:`~mirp.settings.interpolation_parameters.ImageInterpolationSettingsClass` and
+ :class:`~mirp.settings.interpolation_parameters.MaskInterpolationSettingsClass`)
+ * mask resegmentation (:class:`~mirp.settings.resegmentation_parameters.ResegmentationSettingsClass`)
+ * image transformation (:class:`~mirp.settings.transformation_parameters.ImageTransformationSettingsClass`)
+ * feature computation / extraction (:class:`~mirp.settings.feature_parameters.FeatureExtractionSettingsClass`)
+
+ """
+
+ # Conditionally start a ray cluster.
+ external_ray = ray.is_initialized()
+ if not external_ray and num_cpus is not None and num_cpus > 1:
+ ray.init(num_cpus=num_cpus)
+
+ if ray.is_initialized():
+ # Parallel processing.
+ results = [
+ _ray_extractor.remote(workflow=workflow, image_export_format=image_export_format)
+ for workflow in _base_extract_features_and_images(**kwargs)
+ ]
+
+ results = ray.get(results)
+ if not external_ray:
+ ray.shutdown()
+
+ else:
+ # Sequential processing.
+ workflows = list(_base_extract_features_and_images(**kwargs))
+ results = [workflow.standard_extraction(image_export_format=image_export_format) for workflow in workflows]
+
+ return results
+
+
+
+
+[docs]
+def extract_features_and_images_generator(
+ image_export_format: str = "dict",
+ **kwargs
+):
+ """
+ Processes images and computes features from regions of interest as a generator.
+
+ Parameters
+ ----------
+ image_export_format: {"dict", "native", "numpy"}, default: "numpy"
+ Return format for processed images and masks. ``"dict"`` returns dictionaries of images and masks as numpy
+ arrays and associated characteristics. ``"native"`` returns images and masks in their internal format.
+ ``"numpy"`` returns images and masks in numpy format. This argument is only used if ``export_images=True``.
+
+ **kwargs:
+ Keyword arguments passed for importing images and masks (
+ :func:`mirp._data_import.import_image_and_mask.import_image_and_mask`) and configuring settings:
+
+ * general settings (:class:`~mirp.settings.general_parameters.GeneralSettingsClass`)
+ * image post-processing (:class:`~mirp.settings.image_processing_parameters.ImagePostProcessingClass`)
+ * image perturbation / augmentation (:class:`~mirp.settings.perturbation_parameters.ImagePerturbationSettingsClass`)
+ * image interpolation / resampling (
+ :class:`~mirp.settings.interpolation_parameters.ImageInterpolationSettingsClass` and
+ :class:`~mirp.settings.interpolation_parameters.MaskInterpolationSettingsClass`)
+ * mask resegmentation (:class:`~mirp.settings.resegmentation_parameters.ResegmentationSettingsClass`)
+ * image transformation (:class:`~mirp.settings.transformation_parameters.ImageTransformationSettingsClass`)
+ * feature computation / extraction (
+ :class:`~mirp.settings.feature_parameters.FeatureExtractionSettingsClass`)
+
+ Yields
+ ------
+ None | list[Any]
+ List of features, images and masks, depending on ``export_features`` and ``export_images``.
+
+ See Also
+ --------
+ Keyword arguments can be provided to configure the following:
+
+ * image and mask import (:func:`~mirp._data_import.import_image_and_mask.import_image_and_mask`)
+ * general settings (:class:`~mirp.settings.general_parameters.GeneralSettingsClass`)
+ * image post-processing (:class:`~mirp.settings.image_processing_parameters.ImagePostProcessingClass`)
+ * image perturbation / augmentation (:class:`~mirp.settings.perturbation_parameters.ImagePerturbationSettingsClass`)
+ * image interpolation / resampling (:class:`~mirp.settings.interpolation_parameters.ImageInterpolationSettingsClass`
+ and :class:`~mirp.settings.interpolation_parameters.MaskInterpolationSettingsClass`)
+ * mask resegmentation (:class:`~mirp.settings.resegmentation_parameters.ResegmentationSettingsClass`)
+ * image transformation (:class:`~mirp.settings.transformation_parameters.ImageTransformationSettingsClass`)
+ * feature computation / extraction (:class:`~mirp.settings.feature_parameters.FeatureExtractionSettingsClass`)
+
+ """
+
+ workflows = list(_base_extract_features_and_images(**kwargs))
+ for workflow in workflows:
+ yield workflow.standard_extraction(image_export_format=image_export_format)
+
+
+
+@ray.remote
+def _ray_extractor(workflow: StandardWorkflow, image_export_format="dict"):
+ # Limit internal threading by third-party libraries.
+ from mirp.utilities.parallel import limit_inner_threads
+ limit_inner_threads()
+
+ return workflow.standard_extraction(image_export_format=image_export_format)
+
+
+def _base_extract_features_and_images(
+ image,
+ mask=None,
+ sample_name: None | str | list[str] = None,
+ image_name: None | str | list[str] = None,
+ image_file_type: None | str = None,
+ image_modality: None | str | list[str] = None,
+ image_sub_folder: None | str = None,
+ mask_name: None | str | list[str] = None,
+ mask_file_type: None | str = None,
+ mask_modality: None | str | list[str] = None,
+ mask_sub_folder: None | str = None,
+ roi_name: None | str | list[str] | dict[str, str] = None,
+ association_strategy: None | str | list[str] = None,
+ settings: None | str | SettingsClass | list[SettingsClass] = None,
+ stack_masks: str = "auto",
+ stack_images: str = "auto",
+ write_features: None | bool = None,
+ export_features: None | bool = None,
+ write_images: None | bool = None,
+ export_images: None | bool = None,
+ write_dir: None | str = None,
+ **kwargs
+):
+ from mirp.data_import.import_image_and_mask import import_image_and_mask
+ from mirp.settings.import_config_parameters import import_configuration_settings
+
+ # Infer write_images, export_images, write_features, export_features based on write_dir.
+ if write_images is None:
+ write_images = write_dir is not None
+ if export_images is None:
+ export_images = write_dir is None
+ if write_features is None:
+ write_features = write_dir is not None
+ if export_features is None:
+ export_features = write_dir is None
+
+ if not write_images and not write_features:
+ write_dir = None
+
+ if write_images and write_dir is None:
+ raise ValueError("write_dir argument is required for writing images and masks, but not provided.")
+ if write_features and write_dir is None:
+ raise ValueError("write_dir argument is required for writing feature tables, but not provided.")
+
+ if not write_features and not write_images and not export_features and not export_images:
+ raise ValueError(
+ f"At least one of write_features, write_images, export_features and export_images should be True."
+ )
+
+ # Import settings (to provide immediate feedback if something is amiss).
+ if isinstance(settings, str):
+ settings = import_configuration_settings(
+ compute_features=write_features or export_features,
+ path=settings
+ )
+ elif isinstance(settings, SettingsClass):
+ settings = [settings]
+ elif isinstance(settings, Iterable) and all(isinstance(x, SettingsClass) for x in settings):
+ settings = list(settings)
+ elif settings is None:
+ settings = import_configuration_settings(
+ compute_features=write_features or export_features,
+ **kwargs
+ )
+ else:
+ raise TypeError(
+ f"The 'settings' argument is expected to be a path to a configuration xml file, a SettingsClass object, or "
+ f"a list thereof. Found: {type(settings)}."
+ )
+
+ image_list = import_image_and_mask(
+ image=image,
+ mask=mask,
+ sample_name=sample_name,
+ image_name=image_name,
+ image_file_type=image_file_type,
+ image_modality=image_modality,
+ image_sub_folder=image_sub_folder,
+ mask_name=mask_name,
+ mask_file_type=mask_file_type,
+ mask_modality=mask_modality,
+ mask_sub_folder=mask_sub_folder,
+ roi_name=roi_name,
+ association_strategy=association_strategy,
+ stack_images=stack_images,
+ stack_masks=stack_masks
+ )
+
+ yield from _generate_feature_and_image_extraction_workflows(
+ image_list=image_list,
+ settings=settings,
+ write_dir=write_dir,
+ write_features=write_features,
+ export_features=export_features,
+ write_images=write_images,
+ export_images=export_images
+ )
+
+
+def _generate_feature_and_image_extraction_workflows(
+ image_list: list[ImageFile],
+ settings: list[SettingsClass],
+ write_dir: None | str,
+ write_features: bool,
+ export_features: bool,
+ write_images: bool,
+ export_images: bool
+) -> Generator[StandardWorkflow, None, None]:
+
+ for image_file in image_list:
+ for current_settings in settings:
+
+ if not current_settings.feature_extr.has_any_feature_family() and (
+ current_settings.img_transform.spatial_filters is not None and not
+ current_settings.img_transform.feature_settings.has_any_feature_family()
+ ) and (export_features or write_features):
+ raise ValueError(
+ "No feature families were specified. Please set 'base_feature_families' or"
+ " 'response_map_feature_families'."
+ )
+
+ if current_settings.perturbation.noise_repetitions is None or \
+ current_settings.perturbation.noise_repetitions == 0:
+ noise_repetition_ids = [None]
+ else:
+ noise_repetition_ids = list(range(current_settings.perturbation.noise_repetitions))
+
+ if current_settings.perturbation.rotation_angles is None or len(
+ current_settings.perturbation.rotation_angles) == 0 or all(
+ x == 0.0 for x in current_settings.perturbation.rotation_angles
+ ):
+ rotation_angles = [None]
+ else:
+ rotation_angles = copy.deepcopy(current_settings.perturbation.rotation_angles)
+
+ if current_settings.perturbation.translation_fraction is None or len(
+ current_settings.perturbation.translation_fraction) == 0 or all(
+ x == 0.0 for x in current_settings.perturbation.translation_fraction
+ ):
+ translations = [None]
+ else:
+ config_translation = copy.deepcopy(current_settings.perturbation.translation_fraction)
+ translations = []
+ for translation_x in config_translation:
+ for translation_y in config_translation:
+ if not current_settings.general.by_slice:
+ for translation_z in config_translation:
+ translations += [(translation_z, translation_y, translation_x)]
+ else:
+ translations += [(0.0, translation_y, translation_x)]
+
+ if current_settings.img_interpolate.new_spacing is None or len(
+ current_settings.img_interpolate.new_spacing) == 0 or all(
+ x == 0.0 for x in current_settings.img_interpolate.new_spacing
+ ):
+ spacings = [None]
+ else:
+ spacings = copy.deepcopy(current_settings.img_interpolate.new_spacing)
+
+ for noise_repetition_id in noise_repetition_ids:
+ for rotation_angle in rotation_angles:
+ for translation in translations:
+ for spacing in spacings:
+ yield StandardWorkflow(
+ image_file=copy.deepcopy(image_file),
+ write_dir=write_dir,
+ settings=current_settings,
+ settings_name=current_settings.general.config_str,
+ write_features=write_features,
+ export_features=export_features,
+ write_images=write_images,
+ export_images=export_images,
+ noise_iteration_id=noise_repetition_id,
+ rotation=rotation_angle,
+ translation=translation,
+ new_image_spacing=spacing
+ )
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/docs/_modules/mirp/extract_image_parameters.html b/docs/_modules/mirp/extract_image_parameters.html
new file mode 100644
index 00000000..017ff528
--- /dev/null
+++ b/docs/_modules/mirp/extract_image_parameters.html
@@ -0,0 +1,220 @@
+
+
+
+
+
+ mirp.extract_image_parameters — mirp 2.2.0 documentation
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Source code for mirp.extract_image_parameters
+import os
+import pandas as pd
+
+from mirp._data_import.generic_file import ImageFile
+
+
+
+[docs]
+def extract_image_parameters(
+ image,
+ sample_name: None | str | list[str] = None,
+ image_name: None | str | list[str] = None,
+ image_file_type: None | str = None,
+ image_modality: None | str | list[str] = None,
+ image_sub_folder: None | str = None,
+ stack_images: str = "auto",
+ write_file: bool = False,
+ write_dir: None | str = None
+) -> pd.DataFrame | None:
+ """
+ Extract parameters related to image acquisition and reconstruction from images. Not all metadata may
+ be available.
+
+ Parameters
+ ----------
+ image: Any
+ A path to an image file, a path to a directory containing image files, a path to a config_data.xml
+ file, a path to a csv file containing references to image files, a pandas.DataFrame containing references to
+ image files, or a numpy.ndarray.
+
+ sample_name: str or list of str, default: None
+ Name of expected sample names. This is used to select specific image files. If None, no image files are
+ filtered based on the corresponding sample name (if known).
+
+ image_name: str, optional, default: None
+ Pattern to match image files against. The matches are exact. Use wildcard symbols ("*") to
+ match varying structures. The sample name (if part of the file name) can also be specified using "#". For
+ example, image_name = '#_*_image' would find John_Doe in John_Doe_CT_image.nii or John_Doe_001_image.nii.
+ File extensions do not need to be specified. If None, file names are not used for filtering files and
+ setting sample names.
+
+ image_file_type: {"dicom", "nifti", "nrrd", "numpy", "itk"}, optional, default: None
+ The type of file that is expected. If None, the file type is not used for filtering files.
+ "itk" comprises "nifti" and "nrrd" file types.
+
+ image_modality: {"ct", "pet", "pt", "mri", "mr", "rtdose", "generic"}, optional, default: None
+ The type of modality that is expected. If None, modality is not used for filtering files. Note that only
+ DICOM files contain metadata concerning modality.
+
+ image_sub_folder: str, optional, default: None
+ Fixed directory substructure where image files are located. If None, the directory substructure is not used
+ for filtering files.
+
+ stack_images: {"auto", "yes", "no"}, optional, default: "str"
+ If image files in the same directory cannot be assigned to different samples, and are 2D (slices) of the same
+ size, they might belong to the same 3D image stack. "auto" will stack 2D numpy arrays, but not other file types.
+ "yes" will stack all files that contain 2D images, that have the same dimensions, orientation and spacing,
+ except for DICOM files. "no" will not stack any files. DICOM files ignore this argument, because their stacking
+ can be determined from metadata.
+
+ write_file: bool, optional, default: False
+ Determines whether image acquisition and reconstruction metadata should be written to a table.
+
+ write_dir: str, optional, default: None
+ Folder to which the table with image acquisition and reconstruction metadata is written.
+
+ Returns
+ -------
+ pd.DataFrame | None
+ The functions returns a table with metadata (`write_file == False`) or nothing (`write_file == True`)
+ """
+
+ from mirp.data_import.import_image import import_image
+
+ if not write_file:
+ write_dir = None
+
+ if write_file and write_dir is None:
+ raise ValueError("write_dir argument should be provided for writing a table with image metadata.")
+
+ image_list = import_image(
+ image=image,
+ sample_name=sample_name,
+ image_name=image_name,
+ image_file_type=image_file_type,
+ image_modality=image_modality,
+ image_sub_folder=image_sub_folder,
+ stack_images=stack_images
+ )
+
+ metadata = [_extract_image_parameters(ii, image) for ii, image in enumerate(image_list)]
+ metadata = pd.DataFrame(metadata)
+
+ if write_file:
+ write_dir = os.path.normpath(write_dir)
+ if not os.path.exists(write_dir):
+ os.makedirs(write_dir)
+
+ metadata.to_csv(
+ path_or_buf=os.path.join(write_dir, "mask_labels.csv")
+ )
+ else:
+ return metadata
+
+
+
+def _extract_image_parameters(index: int, image: ImageFile) -> dict[str, str]:
+
+ metadata = image.export_metadata()
+ metadata.update({"image_index": index})
+
+ return metadata
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/docs/_modules/mirp/extract_mask_labels.html b/docs/_modules/mirp/extract_mask_labels.html
new file mode 100644
index 00000000..1747966a
--- /dev/null
+++ b/docs/_modules/mirp/extract_mask_labels.html
@@ -0,0 +1,222 @@
+
+
+
+
+
+ mirp.extract_mask_labels — mirp 2.2.0 documentation
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Source code for mirp.extract_mask_labels
+from typing import Any
+
+import os
+import pandas as pd
+
+from mirp._data_import.generic_file import MaskFile
+
+
+
+[docs]
+def extract_mask_labels(
+ mask=None,
+ sample_name: None | str | list[str] = None,
+ mask_name: None | str | list[str] = None,
+ mask_file_type: None | str = None,
+ mask_modality: None | str | list[str] = None,
+ mask_sub_folder: None | str = None,
+ stack_masks: str = "auto",
+ write_file: bool = False,
+ write_dir: None | str = None
+) -> pd.DataFrame | None:
+ """
+ Extract labels of regions of interest present in one or more mask files.
+
+ Parameters
+ ----------
+ mask: Any
+ A path to a mask file, a path to a directory containing mask files, a path to a config_data.xml
+ file, a path to a csv file containing references to mask files, a pandas.DataFrame containing references to
+ mask files, or a numpy.ndarray.
+
+ sample_name: str or list of str, optional, default: None
+ Name of expected sample names. This is used to select specific mask files. If None, no mask files are filtered
+ based on the corresponding sample name (if known).
+
+ mask_name: str, optional, default: None
+ Pattern to match mask files against. The matches are exact. Use wildcard symbols ("*") to match varying
+ structures. The sample name (if part of the file name) can also be specified using "#". For example,
+ mask_name = '#_*_mask' would find John_Doe in John_Doe_CT_mask.nii or John_Doe_001_mask.nii. File extensions
+ do not need to be specified. If None, file names are not used for filtering files and setting sample names.
+
+ mask_file_type: {"dicom", "nifti", "nrrd", "numpy", "itk"}, optional, default: None
+ The type of file that is expected. If None, the file type is not used for filtering files.
+ "itk" comprises "nifti" and "nrrd" file types.
+
+ mask_modality: {"rtstruct", "seg", "generic_mask"}, optional, default: None
+ The type of modality that is expected. If None, modality is not used for filtering files.
+ Note that only DICOM files contain metadata concerning modality. Masks from non-DICOM files are considered to
+ be "generic_mask".
+
+ mask_sub_folder: str, optional, default: None
+ Fixed directory substructure where mask files are located. If None, the directory substructure is not used for
+ filtering files.
+
+ stack_masks: {"auto", "yes", "no"}, optional, default: "str"
+ If mask files in the same directory cannot be assigned to different samples, and are 2D (slices) of the same
+ size, they might belong to the same 3D mask stack. "auto" will stack 2D numpy arrays, but not other file
+ types. "yes" will stack all files that contain 2D images, that have the same dimensions, orientation and
+ spacing, except for DICOM files. "no" will not stack any files. DICOM files ignore this argument,
+ because their stacking can be determined from metadata.
+
+ write_file: bool, optional, default: False
+ Determines whether the labels should be written to a table.
+
+ write_dir: str, optional, default: None
+ Folder to which a table with mask labels should be written.
+
+ Returns
+ -------
+ pd.DataFrame | None
+ The functions returns a table with labels extracted from mask files (`write_file == False`),
+ or None `(write_file == True)`.
+
+ """
+ from mirp.data_import.import_mask import import_mask
+
+ if not write_file:
+ write_dir = None
+
+ if write_file and write_dir is None:
+ raise ValueError("write_dir argument should be provided for writing a table with mask labels.")
+
+ mask_list = import_mask(
+ mask=mask,
+ sample_name=sample_name,
+ mask_name=mask_name,
+ mask_file_type=mask_file_type,
+ mask_modality=mask_modality,
+ mask_sub_folder=mask_sub_folder,
+ stack_masks=stack_masks
+ )
+
+ labels = [pd.DataFrame(_extract_mask_labels(ii, mask)) for ii, mask in enumerate(mask_list)]
+ labels = pd.concat(labels)
+
+ if write_file:
+ write_dir = os.path.normpath(write_dir)
+ if not os.path.exists(write_dir):
+ os.makedirs(write_dir)
+
+ labels.to_csv(
+ path_or_buf=os.path.join(write_dir, "mask_labels.csv")
+ )
+ else:
+ return labels
+
+
+
+def _extract_mask_labels(index: int, mask: MaskFile) -> dict[str, Any]:
+
+ labels = mask.export_roi_labels()
+ labels.update({"mask_index": index})
+
+ return labels
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/docs/_modules/mirp/importData/importImageAndMask.html b/docs/_modules/mirp/importData/importImageAndMask.html
index 408891be..3af39d96 100644
--- a/docs/_modules/mirp/importData/importImageAndMask.html
+++ b/docs/_modules/mirp/importData/importImageAndMask.html
@@ -1,22 +1,20 @@
-
+
- mirp.importData.importImageAndMask — mirp 2.1.1 documentation
-
-
-
-
+ mirp.importData.importImageAndMask — mirp 2.1.0 documentation
+
+
-
-
-
-
-
+
+
+
+
+
@@ -84,7 +82,9 @@ Source code for mirp.importData.importImageAndMask
from mirp.utilities.utilities import random_string
-[docs]def import_image_and_mask(
+
+[docs]
+def import_image_and_mask(
image,
mask=None,
sample_name: None | str | list[str] = None,
@@ -214,9 +214,7 @@ Source code for mirp.importData.importImageAndMask
)
if len(image_list) == 0:
- raise ValueError(f"No images were found. Possible reasons are lack of images with the preferred modality.")
- if len(mask_list) == 0:
- raise ValueError(f"No masks were found. Possible reasons are lack of masks with the preferred modality.")
+ raise ValueError(f"No images were present.")
# Determine association strategy, if this is unset.
possible_association_strategy = set_association_strategy(
@@ -308,6 +306,7 @@ Source code for mirp.importData.importImageAndMask
return image_list
+
def set_association_strategy(
image_list: list[ImageFile] | list[ImageDicomFile],
mask_list: list[MaskFile] | list[MaskDicomFile]
diff --git a/docs/_modules/mirp/settings/feature_parameters.html b/docs/_modules/mirp/settings/feature_parameters.html
new file mode 100644
index 00000000..956027bf
--- /dev/null
+++ b/docs/_modules/mirp/settings/feature_parameters.html
@@ -0,0 +1,765 @@
+
+
+
+
+
+ mirp.settings.feature_parameters — mirp 2.2.0 documentation
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Source code for mirp.settings.feature_parameters
+from typing import Any
+from dataclasses import dataclass
+from mirp.settings.utilities import setting_def
+
+
+
+[docs]
+@dataclass
+class FeatureExtractionSettingsClass:
+ """
+ Parameters related to feature computation. Many are conditional on the type of features that will be computed (
+ ``base_feature_families``).
+
+ Parameters
+ ----------
+ by_slice: str or bool, optional, default: False
+ Defines whether calculations should be performed in 2D (True) or 3D (False), or alternatively only in the
+ largest slice ("largest"). See :class:`~mirp.settings.general_parameters.GeneralSettingsClass`.
+
+ no_approximation: bool, optional, default: False
+ Disables approximation of features, such as Geary's c-measure. Can be True or False (default). See
+ :class:`~mirp.settings.general_parameters.GeneralSettingsClass`.
+
+ ibsi_compliant: bool, optional, default: True
+ Limits computation of features to those features that have a reference value in the IBSI reference standard.
+
+ base_feature_families: str or list of str, optional, default: "none"
+ Determines the feature families for which features are computed. Radiomics features are implemented as
+ defined in the IBSI reference manual. The following feature families are currently present, and can be added
+ using the following tags:
+
+ * Morphological features: "mrp", "morph", "morphology", and "morphological".
+ * Local intensity features: "li", "loc.int", "loc_int", "local_int", and "local_intensity".
+ * Intensity-based statistical features: "st", "stat", "stats", "statistics", and "statistical".
+ * Intensity histogram features: "ih", "int_hist", "int_histogram", and "intensity_histogram".
+ * Intensity-volume histogram features: "ivh", "int_vol_hist", and "intensity_volume_histogram".
+ * Grey level co-occurrence matrix (GLCM) features: "cm", "glcm", "grey_level_cooccurrence_matrix",
+ and "cooccurrence_matrix".
+ * Grey level run length matrix (GLRLM) features: "rlm", "glrlm", "grey_level_run_length_matrix", and
+ "run_length_matrix".
+ * Grey level size zone matrix (GLSZM) features: "szm", "glszm", "grey_level_size_zone_matrix", and
+ "size_zone_matrix".
+ * Grey level distance zone matrix (GLDZM) features: "dzm", "gldzm", "grey_level_distance_zone_matrix", and
+ "distance_zone_matrix".
+ * Neighbourhood grey tone difference matrix (NGTDM) features: "tdm", "ngtdm",
+ "neighbourhood_grey_tone_difference_matrix", and "grey_tone_difference_matrix".
+ * Neighbouring grey level dependence matrix (NGLDM) features: "ldm", "ngldm",
+ "neighbouring_grey_level_dependence_matrix", and "grey_level_dependence_matrix".
+
+ In addition, the following tags can be used:
+
+ * "none": no features are computed.
+ * "all": all features are computed.
+
+ A list of strings may be provided to select multiple feature families.
+
+ .. note::
+ Even though ``"none"`` is the internal default, the :func:`~mirp.extractFeaturesAndImages.extract_features`
+ function overrides this, and sets the default to ``"all"``.
+
+ base_discretisation_method: {"fixed_bin_number", "fixed_bin_size", "fixed_bin_size_pyradiomics", "none"}
+ Method used for discretising intensities. Used to compute intensity histogram as well as texture features.
+ The setting is ignored if none of these feature families are being computed. The following options are
+ available:
+
+ * "fixed_bin_number": The intensity range within the mask is divided into a fixed number of bins,
+ defined by the ``base_discretisation_bin_width`` parameter.
+ * "fixed_bin_size": The intensity range is divided into bins with a fixed width, defined using the
+ ``base_discretisation_bin_width`` parameter. The lower bound of the range is determined from the lower
+ bound of the mask resegmentation range, see the ``resegmentation_intensity_range`` in
+ :class:`~mirp.settings.resegmentation_parameters.ResegmentationSettingsClass`. CT images have a default
+ lower bound of the initial bin at -1000.0 and PET images have a default lower bound at 0.0. Other images,
+ including MRI, normalised CT and PET images and filtered images, do not have a default value.
+ * "fixed_bin_size_pyradiomics": The intensity range is divided into bins with a fixed width. This follows the
+ non-IBSI compliant implementation in the pyradiomics package.
+ * "none": The intensity range is not discretised into bins. This method can only be used if the image
+ intensities are integer and strictly positive.
+
+ There is no default method. Multiple methods can be specified as a list to yield features according to each
+ method.
+
+ .. warning::
+ The "fixed_bin_size_pyradiomics" is not IBSI compliant, and should only be used when
+ reproducing results from studies that used pyradiomics.
+
+ base_discretisation_n_bins: int or list of int
+ Number of bins used for the "fixed_bin_number" discretisation method. No default value. Multiple values can
+ be specified in a list to yield features according to each number of bins.
+
+ base_discretisation_bin_width: float or list of float
+ Width of each bin in the "fixed_bin_size" discretisation method. No default value. Multiple values can be
+ specified in a list to yield features according to each bin width.
+
+ ivh_discretisation_method: {"fixed_bin_number", "fixed_bin_size", "none"}, optional, default: "none"
+ Method used for discretising intensities for computing intensity-volume histograms. The discretisation
+ methods follow those in ``base_discretisation_method``. The "none" method changes to "fixed_bin_number" if
+ the underlying data are not suitable.
+
+ ivh_discretisation_n_bins: int, optional, default: 1000
+ Number of bins used for the "fixed_bin_number" discretisation method.
+
+ ivh_discretisation_bin_width: float, optional
+ Width of each bin in the "fixed_bin_size" discretisation method. No default value.
+
+ glcm_distance: float or list of float, optional, default: 1.0
+ Distance (in voxels) for GLCM for determining the neighbourhood. Chebyshev, or checkerboard, distance is
+ used. A value of 1.0 will therefore consider all (diagonally) adjacent voxels as its neighbourhood. A list of
+ values can be provided to compute GLCM features at different scales.
+
+ glcm_spatial_method: {"2d_average", "2d_slice_merge", "2.5d_direction_merge", "2.5d_volume_merge", "3d_average", "3d_volume_merge"}, optional
+ Determines how co-occurrence matrices are formed and aggregated. One of the following:
+
+ * "2d_average": features are computed from all matrices then averaged [IBSI:BTW3].
+ * "2d_slice_merge": matrices in the same slice are merged, features computed and then averaged [IBSI:SUJT].
+ * "2.5d_direction_merge": matrices for the same direction are merged, features computed and then averaged
+ [IBSI:JJUI].
+ * "2.5d_volume_merge": all matrices are merged and a single feature is calculated [IBSI:ZW7Z].
+ * "3d_average": features are computed from all matrices then averaged [IBSI:ITBB].
+ * "3d_volume_merge": all matrices are merged and a single feature is computed from the merged matrix
+ [IBSI:IAZD].
+
+ A list of values may be provided to extract features for multiple spatial methods. Default: "2d_slice_merge"
+ (``by_slice = False``) or "3d_volume_merge" (``by_slice = True``).
+
+ glrlm_spatial_method: {"2d_average", "2d_slice_merge", "2.5d_direction_merge", "2.5d_volume_merge", "3d_average", "3d_volume_merge"}, optional
+ Determines how run length matrices are formed and aggregated. One of the following:
+
+ * "2d_average": features are calculated from all matrices then averaged [IBSI:BTW3].
+ * "2d_slice_merge": matrices in the same slice are merged, features computed and then averaged [IBSI:SUJT].
+ * "2.5d_direction_merge": matrices for the same direction are merged, features computed and then averaged
+ [IBSI:JJUI].
+ * "2.5d_volume_merge": all matrices are merged and a single feature is computed [IBSI:ZW7Z].
+ * "3d_average": features are computed from all matrices then averaged [IBSI:ITBB].
+ * "3d_volume_merge": all matrices are merged and a single feature is computed from the merged matrix
+ [IBSI:IAZD].
+
+ A list of values may be provided to extract features for multiple spatial methods. Default:
+ "2d_slice_merge" (``by_slice = False``) or "3d_volume_merge" (``by_slice = True``).
+
+ glszm_spatial_method: {"2d", "2.5d", "3d"}, optional
+ Determines how the size zone matrices are formed and aggregated. One of the following:
+
+ * "2d": features are computed from individual matrices and subsequently averaged [IBSI:8QNN].
+ * "2.5d": all 2D matrices are merged and features are computed from this single matrix [IBSI:62GR].
+ * "3d": features are computed from a single 3D matrix [IBSI:KOBO].
+
+ A list of values may be provided to extract features for multiple spatial methods. Default: "2d"
+ (``by_slice = False``) or "3d" (``by_slice = True``).
+
+ gldzm_spatial_method: {"2d", "2.5d", "3d"}, optional
+ Determines how the distance zone matrices are formed and aggregated. One of the following:
+
+ * "2d": features are computed from individual matrices and subsequently averaged [IBSI:8QNN].
+ * "2.5d": all 2D matrices are merged and features are computed from this single matrix [IBSI:62GR].
+ * "3d": features are computed from a single 3D matrix [IBSI:KOBO].
+
+ A list of values may be provided to extract features for multiple spatial methods. Default: "2d"
+ (``by_slice = False``) or "3d" (``by_slice = True``).
+
+ ngtdm_spatial_method: {"2d", "2.5d", "3d"}, optional
+ Determines how the neighbourhood grey tone difference matrices are formed and aggregated. One of the
+ following:
+
+ * "2d": features are computed from individual matrices and subsequently averaged [IBSI:8QNN].
+ * "2.5d": all 2D matrices are merged and features are computed from this single matrix [IBSI:62GR].
+ * "3d": features are computed from a single 3D matrix [IBSI:KOBO].
+
+ A list of values may be provided to extract features for multiple spatial methods. Default: "2d"
+ (``by_slice = False``) or "3d" (``by_slice = True``).
+
+ ngldm_distance: float or list of float, optional, default: 1.0
+ Distance (in voxels) for NGLDM for determining the neighbourhood. Chebyshev, or checkerboard, distance is
+ used. A value of 1.0 will therefore consider all (diagonally) adjacent voxels as its neighbourhood. A list of
+ values can be provided to compute NGLDM features at different scales.
+
+ ngldm_difference_level: float or list of float, optional, default: 0.0
+ Difference level (alpha) for NGLDM. Determines which bins are grouped together in the matrix.
+
+ ngldm_spatial_method: {"2d", "2.5d", "3d"}, optional
+ Determines how the neighbourhood grey level dependence matrices are formed and aggregated. One of the
+ following:
+
+ * "2d": features are computed from individual matrices and subsequently averaged [IBSI:8QNN].
+ * "2.5d": all 2D matrices are merged and features are computed from this single matrix [IBSI:62GR].
+ * "3d": features are computed from a single 3D matrix [IBSI:KOBO].
+
+ A list of values may be provided to extract features for multiple spatial methods. Default: "2d"
+ (``by_slice = False``) or "3d" (``by_slice = True``).
+
+ **kwargs: dict, optional
+ Unused keyword arguments.
+ """
+
+ def __init__(
+ self,
+ by_slice: bool = False,
+ no_approximation: bool = False,
+ ibsi_compliant: bool = True,
+ base_feature_families: None | str | list[str] = "none",
+ base_discretisation_method: None | str | list[str] = None,
+ base_discretisation_n_bins: None | int | list[int] = None,
+ base_discretisation_bin_width: None | float | list[float] = None,
+ ivh_discretisation_method: str = "none",
+ ivh_discretisation_n_bins: None | int = 1000,
+ ivh_discretisation_bin_width: None | float = None,
+ glcm_distance: float | list[float] = 1.0,
+ glcm_spatial_method: None | str | list[str] = None,
+ glrlm_spatial_method: None | str | list[str] = None,
+ glszm_spatial_method: None | str | list[str] = None,
+ gldzm_spatial_method: None | str | list[str] = None,
+ ngtdm_spatial_method: None | str | list[str] = None,
+ ngldm_distance: float | list[float] = 1.0,
+ ngldm_difference_level: float | list[float] = 0.0,
+ ngldm_spatial_method: None | str | list[str] = None,
+ **kwargs
+ ):
+ # Set by slice.
+ self.by_slice: bool = by_slice
+
+ # Set approximation flag.
+ self.no_approximation: bool = no_approximation
+
+ # Set IBSI-compliance flag.
+ self.ibsi_compliant: bool = ibsi_compliant
+
+ if base_feature_families is None:
+ base_feature_families = "none"
+
+ # Check families.
+ if not isinstance(base_feature_families, list):
+ base_feature_families = [base_feature_families]
+
+ # Check which entries are valid.
+ valid_families: list[bool] = [ii in self.get_available_families() for ii in base_feature_families]
+
+ if not all(valid_families):
+ raise ValueError(
+ f"One or more families in the base_feature_families parameter were not recognised: "
+ f"{', '.join([base_feature_families[ii] for ii, is_valid in enumerate(valid_families) if not is_valid])}")
+
+ # Set families.
+ self.families: list[str] = base_feature_families
+
+ if not self.has_any_feature_family():
+ self.families = ["none"]
+
+ if self.has_discretised_family():
+ # Check if discretisation_method is None.
+ if base_discretisation_method is None:
+ raise ValueError("The base_discretisation_method parameter has no default and must be set.")
+
+ if not isinstance(base_discretisation_method, list):
+ base_discretisation_method = [base_discretisation_method]
+
+ if not all(discretisation_method in [
+ "fixed_bin_size", "fixed_bin_number", "fixed_bin_size_pyradiomics", "none"
+ ] for discretisation_method in base_discretisation_method):
+ raise ValueError(
+ "Available values for the base_discretisation_method parameter are "
+ "'fixed_bin_number', 'fixed_bin_size', 'fixed_bin_size_pyradiomics' and 'none'. "
+ "One or more values were not recognised."
+ )
+
+ # Check discretisation_n_bins
+ if "fixed_bin_number" in base_discretisation_method:
+ if base_discretisation_n_bins is None:
+ raise ValueError("The base_discretisation_n_bins parameter has no default and must be set")
+
+ if not isinstance(base_discretisation_n_bins, list):
+ base_discretisation_n_bins = [base_discretisation_n_bins]
+
+ if not all(isinstance(n_bins, int) for n_bins in base_discretisation_n_bins):
+ raise TypeError(
+ "The base_discretisation_n_bins parameter is expected to contain integers with "
+ "value 2 or larger. Found one or more values that were not integers.")
+
+ if not all(n_bins >= 2 for n_bins in base_discretisation_n_bins):
+ raise ValueError(
+ "The base_discretisation_n_bins parameter is expected to contain integers with "
+ "value 2 or larger. Found one or more values that were less than 2.")
+
+ else:
+ base_discretisation_n_bins = None
+
+ # Check discretisation_bin_width
+ if "fixed_bin_size" in base_discretisation_method or "fixed_bin_size_pyradiomics" in base_discretisation_method:
+ if base_discretisation_bin_width is None:
+ raise ValueError(
+ "The base_discretisation_bin_width parameter has no default value and must be set.")
+
+ if not isinstance(base_discretisation_bin_width, list):
+ base_discretisation_bin_width = [base_discretisation_bin_width]
+
+ if not all(isinstance(bin_size, float) for bin_size in base_discretisation_bin_width):
+ raise TypeError(
+ "The base_discretisation_bin_width parameter is expected to contain floating "
+ "point values greater than 0.0. Found one or more values that were not floating "
+ "points.")
+
+ if not all(bin_size > 0.0 for bin_size in base_discretisation_bin_width):
+ raise ValueError(
+ "The base_discretisation_bin_width parameter is expected to contain floating "
+ "point values greater than 0.0. Found one or more values that were 0.0 or less.")
+
+ # Check ibsi_compliance
+ if self.ibsi_compliant and "fixed_bin_size_pyradiomics" in base_discretisation_method:
+ raise ValueError(
+ "The fixed_bin_size_pyradiomics method is not IBSI-compliant. If you are sure that you want to "
+ "use this method, use ibsi_compliant = False."
+ )
+
+ else:
+ base_discretisation_bin_width = None
+
+ else:
+ base_discretisation_method = None
+ base_discretisation_n_bins = None
+ base_discretisation_bin_width = None
+
+ # Set discretisation method-related parameters.
+ self.discretisation_method: None | list[str] = base_discretisation_method
+ self.discretisation_n_bins: None | list[int] = base_discretisation_n_bins
+ self.discretisation_bin_width: None | list[float] = base_discretisation_bin_width
+
+ if self.has_ivh_family():
+ if ivh_discretisation_method not in ["fixed_bin_size", "fixed_bin_number", "none"]:
+ raise ValueError(
+ "Available values for the ivh_discretisation_method parameter are 'fixed_bin_size', "
+ "'fixed_bin_number', and 'none'. One or more values were not recognised.")
+
+ # Check discretisation_n_bins
+ if "fixed_bin_number" in ivh_discretisation_method:
+
+ if not isinstance(ivh_discretisation_n_bins, int):
+ raise TypeError(
+ "The ivh_discretisation_n_bins parameter is expected to be an integer with "
+ "value 2 or greater. Found: a value that was not an integer.")
+
+ if not ivh_discretisation_n_bins >= 2:
+ raise ValueError(
+ "The ivh_discretisation_n_bins parameter is expected to be an integer with "
+ f"value 2 or greater. Found: {ivh_discretisation_n_bins}")
+
+ else:
+ ivh_discretisation_n_bins = None
+
+ # Check discretisation_bin_width
+ if "fixed_bin_size" in ivh_discretisation_method:
+
+ if not isinstance(ivh_discretisation_bin_width, float):
+ raise TypeError(
+ "The ivh_discretisation_bin_width parameter is expected to be a floating "
+ "point value greater than 0.0. Found a value that was not a floating point.")
+
+ if not ivh_discretisation_bin_width > 0.0:
+ raise ValueError(
+ "The ivh_discretisation_bin_width parameter is expected to be a floating "
+ f"point value greater than 0.0. Found: {ivh_discretisation_bin_width}")
+
+ else:
+ ivh_discretisation_bin_width = None
+
+ else:
+ ivh_discretisation_method = None
+ ivh_discretisation_n_bins = None
+ ivh_discretisation_bin_width = None
+
+ # Set parameters
+ self.ivh_discretisation_method: None | str = ivh_discretisation_method
+ self.ivh_discretisation_n_bins: None | int = ivh_discretisation_n_bins
+ self.ivh_discretisation_bin_width: None | float = ivh_discretisation_bin_width
+
+ # Set GLCM attributes.
+ if self.has_glcm_family():
+ # Check distance parameter.
+ if not isinstance(glcm_distance, list):
+ glcm_distance = [glcm_distance]
+
+ if not all(isinstance(distance, float) for distance in glcm_distance):
+ raise TypeError(
+ "The glcm_distance parameter is expected to contain floating point values of 1.0 "
+ "or greater. Found one or more values that were not floating points.")
+
+ if not all(distance >= 1.0 for distance in glcm_distance):
+ raise ValueError(
+ "The glcm_distance parameter is expected to contain floating point values of 1.0 "
+ "or greater. Found one or more values that were less than 1.0.")
+
+ # Check spatial method.
+ glcm_spatial_method = self.check_valid_directional_spatial_method(
+ glcm_spatial_method,
+ "glcm_spatial_method")
+
+ else:
+ glcm_distance = None
+ glcm_spatial_method = None
+
+ self.glcm_distance: None | list[float] = glcm_distance
+ self.glcm_spatial_method: None | list[str] = glcm_spatial_method
+
+ # Set GLRLM attributes.
+ if self.has_glrlm_family():
+ # Check spatial method.
+ glrlm_spatial_method = self.check_valid_directional_spatial_method(
+ glrlm_spatial_method, "glrlm_spatial_method")
+
+ else:
+ glrlm_spatial_method = None
+
+ self.glrlm_spatial_method: None | list[str] = glrlm_spatial_method
+
+ # Set GLSZM attributes.
+ if self.has_glszm_family():
+ # Check spatial method.
+ glszm_spatial_method = self.check_valid_omnidirectional_spatial_method(
+ glszm_spatial_method, "glszm_spatial_method")
+ else:
+ glszm_spatial_method = None
+
+ self.glszm_spatial_method: None | list[str] = glszm_spatial_method
+
+ # Set GLDZM attributes.
+ if self.has_gldzm_family():
+ # Check spatial method.
+ gldzm_spatial_method = self.check_valid_omnidirectional_spatial_method(
+ gldzm_spatial_method, "gldzm_spatial_method")
+
+ else:
+ gldzm_spatial_method = None
+
+ self.gldzm_spatial_method: None | list[str] = gldzm_spatial_method
+
+ # Set NGTDM attributes.
+ if self.has_ngtdm_family():
+ # Check spatial method
+ ngtdm_spatial_method = self.check_valid_omnidirectional_spatial_method(
+ ngtdm_spatial_method, "ngtdm_spatial_method")
+
+ else:
+ ngtdm_spatial_method = None
+
+ self.ngtdm_spatial_method: None | list[str] = ngtdm_spatial_method
+
+ # Set NGLDM attributes
+ if self.has_ngldm_family():
+
+ # Check distance.
+ if not isinstance(ngldm_distance, list):
+ ngldm_distance = [ngldm_distance]
+
+ if not all(isinstance(distance, float) for distance in ngldm_distance):
+ raise TypeError(
+ "The ngldm_distance parameter is expected to contain floating point values of 1.0 "
+ "or greater. Found one or more values that were not floating points.")
+
+ if not all(distance >= 1.0 for distance in ngldm_distance):
+ raise ValueError(
+ "The ngldm_distance parameter is expected to contain floating point values of 1.0 "
+ "or greater. Found one or more values that were less than 1.0.")
+
+ # Check spatial method
+ ngldm_spatial_method = self.check_valid_omnidirectional_spatial_method(
+ ngldm_spatial_method, "ngldm_spatial_method")
+
+ # Check difference level.
+ if not isinstance(ngldm_difference_level, list):
+ ngldm_difference_level = [ngldm_difference_level]
+
+ if not all(isinstance(difference, float) for difference in ngldm_difference_level):
+ raise TypeError(
+ "The ngldm_difference_level parameter is expected to contain floating point values of 0.0 "
+ "or greater. Found one or more values that were not floating points.")
+
+ if not all(difference >= 0.0 for difference in ngldm_difference_level):
+ raise ValueError(
+ "The ngldm_difference_level parameter is expected to contain floating point values "
+ "of 0.0 or greater. Found one or more values that were less than 0.0.")
+
+ else:
+ ngldm_spatial_method = None
+ ngldm_distance = None
+ ngldm_difference_level = None
+
+ self.ngldm_dist: None | list[float] = ngldm_distance
+ self.ngldm_diff_lvl: None | list[float] = ngldm_difference_level
+ self.ngldm_spatial_method: None | list[str] = ngldm_spatial_method
+
+ @staticmethod
+ def get_available_families():
+ return [
+ "mrp", "morph", "morphology", "morphological", "li", "loc.int", "loc_int", "local_int", "local_intensity",
+ "st", "stat", "stats", "statistics", "statistical", "ih", "int_hist", "int_histogram", "intensity_histogram",
+ "ivh", "int_vol_hist", "intensity_volume_histogram", "cm", "glcm", "grey_level_cooccurrence_matrix",
+ "cooccurrence_matrix", "rlm", "glrlm", "grey_level_run_length_matrix", "run_length_matrix",
+ "szm", "glszm", "grey_level_size_zone_matrix", "size_zone_matrix", "dzm", "gldzm",
+ "grey_level_distance_zone_matrix", "distance_zone_matrix", "tdm", "ngtdm",
+ "neighbourhood_grey_tone_difference_matrix", "grey_tone_difference_matrix", "ldm", "ngldm",
+ "neighbouring_grey_level_dependence_matrix", "grey_level_dependence_matrix", "all", "none"
+ ]
+
+ def has_any_feature_family(self):
+ return not any(family == "none" for family in self.families)
+
+ def has_discretised_family(self):
+ return self.has_ih_family() or self.has_glcm_family() or self.has_glrlm_family() or self.has_glszm_family() \
+ or self.has_gldzm_family() or self.has_ngtdm_family() or self.has_ngldm_family()
+
+ def has_morphology_family(self):
+ return any(family in ["mrp", "morph", "morphology", "morphological", "all"] for family in self.families)
+
+ def has_local_intensity_family(self):
+ return any(family in ["li", "loc.int", "loc_int", "local_int", "local_intensity", "all"] for family in self.families)
+
+ def has_stats_family(self):
+ return any(family in ["st", "stat", "stats", "statistics", "statistical", "all"] for family in self.families)
+
+ def has_ih_family(self):
+ return any(family in ["ih", "int_hist", "int_histogram", "intensity_histogram", "all"] for family in self.families)
+
+ def has_ivh_family(self):
+ return any(family in ["ivh", "int_vol_hist", "intensity_volume_histogram", "all"] for family in self.families)
+
+ def has_glcm_family(self):
+ return any(family in ["cm", "glcm", "grey_level_cooccurrence_matrix", "cooccurrence_matrix", "all"] for family in self.families)
+
+ def has_glrlm_family(self):
+ return any(family in ["rlm", "glrlm", "grey_level_run_length_matrix", "run_length_matrix", "all"] for family in self.families)
+
+ def has_glszm_family(self):
+ return any(family in ["szm", "glszm", "grey_level_size_zone_matrix", "size_zone_matrix", "all"] for family in self.families)
+
+ def has_gldzm_family(self):
+ return any(family in ["dzm", "gldzm", "grey_level_distance_zone_matrix", "distance_zone_matrix", "all"] for family in self.families)
+
+ def has_ngtdm_family(self):
+ return any(family in ["tdm", "ngtdm", "neighbourhood_grey_tone_difference_matrix", "grey_tone_difference_matrix", "all"] for family in self.families)
+
+ def has_ngldm_family(self):
+ return any(family in ["ldm", "ngldm", "neighbouring_grey_level_dependence_matrix", "grey_level_dependence_matrix", "all"] for family in self.families)
+
+ def check_valid_directional_spatial_method(self, x, var_name):
+
+ # Set defaults
+ if x is None and self.by_slice:
+ x = ["2d_slice_merge"]
+
+ elif x is None and not self.by_slice:
+ x = ["3d_volume_merge"]
+
+ # Check that x is a list.
+ if not isinstance(x, list):
+ x = [x]
+
+ all_spatial_method = ["2d_average", "2d_slice_merge", "2.5d_direction_merge", "2.5d_volume_merge"]
+ if not self.by_slice:
+ all_spatial_method += ["3d_average", "3d_volume_merge"]
+
+ # Check that x contains strings.
+ if not all(isinstance(spatial_method, str) for spatial_method in x):
+ raise TypeError(
+ f"The {var_name} parameter expects one or more of the following values: "
+ f"{', '.join(all_spatial_method)}. Found: one or more values that were not strings.")
+
+ # Check spatial method.
+ valid_spatial_method = [spatial_method in all_spatial_method for spatial_method in x]
+
+ if not all(valid_spatial_method):
+ raise ValueError(
+ f"The {var_name} parameter expects one or more of the following values: "
+ f"{', '.join(all_spatial_method)}. Found: "
+ f"{', '.join([spatial_method for spatial_method in x if spatial_method in all_spatial_method])}")
+
+ return x
+
+ def check_valid_omnidirectional_spatial_method(self, x, var_name):
+
+ # Set defaults
+ if x is None and self.by_slice:
+ x = ["2d"]
+
+ elif x is None and not self.by_slice:
+ x = ["3d"]
+
+ # Check that x is a list.
+ if not isinstance(x, list):
+ x = [x]
+
+ all_spatial_method = ["2d", "2.5d"]
+ if not self.by_slice:
+ all_spatial_method += ["3d"]
+
+ # Check that x contains strings.
+ if not all(isinstance(spatial_method, str) for spatial_method in x):
+ raise TypeError(
+ f"The {var_name} parameter expects one or more of the following values: "
+ f"{', '.join(all_spatial_method)}. Found: one or more values that were not strings.")
+
+ # Check spatial method.
+ valid_spatial_method = [spatial_method in all_spatial_method for spatial_method in x]
+
+ if not all(valid_spatial_method):
+ raise ValueError(
+ f"The {var_name} parameter expects one or more of the following values: "
+ f"{', '.join(all_spatial_method)}. Found: "
+ f"{', '.join([spatial_method for spatial_method in x if spatial_method in all_spatial_method])}")
+
+ return x
+
+
+
+def get_feature_extraction_settings() -> list[dict[str, Any]]:
+ return [
+ setting_def(
+ "base_feature_families", "str", to_list=True, xml_key=["feature_families", "families"],
+ class_key="families", test=["all"]
+ ),
+ setting_def(
+ "base_discretisation_method", "str", to_list=True, xml_key=["discretisation_method", "discr_method"],
+ class_key="discretisation_method", test=["fixed_bin_size", "fixed_bin_number"]
+ ),
+ setting_def(
+ "base_discretisation_n_bins", "int", to_list=True, xml_key=["discretisation_n_bins", "discr_n_bins"],
+ class_key="discretisation_n_bins", test=[10, 33]
+ ),
+ setting_def(
+ "base_discretisation_bin_width", "float", to_list=True,
+ xml_key=["discretisation_bin_width", "discr_bin_width"], class_key="discretisation_bin_width",
+ test=[10.0, 34.0]
+ ),
+ setting_def(
+ "ivh_discretisation_method", "str", xml_key=["ivh_discretisation_method", "ivh_discr_method"],
+ class_key="ivh_discretisation_method", test="fixed_bin_size"
+ ),
+ setting_def(
+ "ivh_discretisation_n_bins", "int", xml_key=["ivh_discretisation_n_bins", "ivh_discr_n_bins"],
+ test=20
+ ),
+ setting_def(
+ "ivh_discretisation_bin_width", "float", xml_key=["ivh_discretisation_bin_width", "ivh_discr_bin_width"],
+ test=30.0
+ ),
+ setting_def("glcm_distance", "float", to_list=True, xml_key=["glcm_distance", "glcm_dist"], test=[2.0, 3.0]),
+ setting_def("glcm_spatial_method", "str", to_list=True, test=["2d_average", "2d_slice_merge"]),
+ setting_def("glrlm_spatial_method", "str", to_list=True, test=["2d_average", "2d_slice_merge"]),
+ setting_def("glszm_spatial_method", "str", to_list=True, test=["2d", "2.5d"]),
+ setting_def("gldzm_spatial_method", "str", to_list=True, test=["2d", "2.5d"]),
+ setting_def("ngtdm_spatial_method", "str", to_list=True, test=["2d", "2.5d"]),
+ setting_def(
+ "ngldm_distance", "float", to_list=True, xml_key=["ngldm_distance", "ngldm_dist"],
+ class_key="ngldm_dist", test=[2.5, 3.5]
+ ),
+ setting_def(
+ "ngldm_difference_level", "float", to_list=True, xml_key=["ngldm_difference_level", "ngldm_diff_lvl"],
+ class_key="ngldm_diff_lvl", test=[1.0, 1.9]
+ ),
+ setting_def("ngldm_spatial_method", "str", to_list=True, test=["2d", "2.5d"])
+ ]
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/docs/_modules/mirp/settings/general_parameters.html b/docs/_modules/mirp/settings/general_parameters.html
new file mode 100644
index 00000000..962f1303
--- /dev/null
+++ b/docs/_modules/mirp/settings/general_parameters.html
@@ -0,0 +1,206 @@
+
+
+
+
+
+ mirp.settings.general_parameters — mirp 2.2.0 documentation
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Source code for mirp.settings.general_parameters
+import warnings
+from dataclasses import dataclass
+from typing import Any
+from mirp.settings.utilities import setting_def
+
+
+
+[docs]
+@dataclass
+class GeneralSettingsClass:
+ """
+ Set of overall process parameters. The most important parameter here is ``by_slice`` which affects how images are
+ processed and features are computed.
+
+ Parameters
+ ----------
+ by_slice: bool, optional, default: False
+ Defines whether image processing and computations should be performed in 2D (True) or 3D (False).
+
+ ibsi_compliant: bool, optional, default: True
+ Limits use of methods and computation of features to those that exist in the IBSI reference standard.
+
+ mask_merge: bool, optional, default: False
+ Defines whether multiple mask objects should be combined into a single mask.
+
+ mask_split: bool, optional, default: False
+ Defines whether a mask that contains multiple regions should be split into separate mask objects.
+
+ mask_select_largest_region: bool, optional, default: False
+ Defines whether the largest region within a mask object should be selected. For example, in a mask that
+ contains multiple separate lesions. ``mask_select_largest_region = True`` will remove all but the largest
+ lesion.
+
+ mask_select_largest_slice: bool, optional, default: False
+ Defines whether the largest slice within a mask object should be selected.
+
+ config_str: str, optional
+ Sets a configuration string, which can be used to differentiate results obtained using other settings.
+
+ no_approximation: bool, optional, default: False
+ Disables approximation within MIRP. This currently only affects computation of features such as Geary's
+ c-measure. Can be True or False (default). False means that approximation is performed.
+
+ **kwargs: dict, optional
+ Unused keyword arguments.
+ """
+
+ def __init__(
+ self,
+ by_slice: bool = False,
+ ibsi_compliant: bool = True,
+ mask_merge: bool = False,
+ mask_split: bool = False,
+ mask_select_largest_region: bool = False,
+ mask_select_largest_slice: bool = False,
+ config_str: str = "",
+ no_approximation: bool = False,
+ **kwargs
+ ):
+
+ if not isinstance(by_slice, bool):
+ raise ValueError("The by_slice parameter should be a boolean.")
+
+ # Set by_slice and select_slice parameters.
+ self.by_slice: bool = by_slice
+
+ # Set IBSI-compliance flag.
+ self.ibsi_compliant: bool = ibsi_compliant
+
+ self.mask_merge = mask_merge
+ self.mask_split = mask_split
+ self.mask_select_largest_region = mask_select_largest_region
+
+ if mask_select_largest_slice and not by_slice:
+ warnings.warn("A 2D approach is used as the largest slice is selected.", UserWarning)
+ self.by_slice = True
+
+ self.mask_select_largest_slice = mask_select_largest_slice
+
+ # Set configuration string.
+ self.config_str: str = config_str
+
+ # Set approximation of features.
+ self.no_approximation: bool = no_approximation
+
+
+
+def get_general_settings() -> list[dict[str, Any]]:
+ return [
+ setting_def("by_slice", "bool", test=True),
+ setting_def("ibsi_compliant", "bool", test=True),
+ setting_def("mask_merge", "bool", test=True),
+ setting_def("mask_split", "bool", test=True),
+ setting_def("mask_select_largest_region", "bool", test=True),
+ setting_def("mask_select_largest_slice", "bool", test=True),
+ setting_def("config_str", "str", test="test_config"),
+ setting_def("no_approximation", "bool", test=True)
+ ]
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/docs/_modules/mirp/settings/generic.html b/docs/_modules/mirp/settings/generic.html
new file mode 100644
index 00000000..97a18a33
--- /dev/null
+++ b/docs/_modules/mirp/settings/generic.html
@@ -0,0 +1,275 @@
+
+
+
+
+
+ mirp.settings.generic — mirp 2.2.0 documentation
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Source code for mirp.settings.generic
+import copy
+
+from mirp.settings.feature_parameters import FeatureExtractionSettingsClass
+from mirp.settings.general_parameters import GeneralSettingsClass
+from mirp.settings.image_processing_parameters import ImagePostProcessingClass
+from mirp.settings.transformation_parameters import ImageTransformationSettingsClass
+from mirp.settings.interpolation_parameters import ImageInterpolationSettingsClass, MaskInterpolationSettingsClass
+from mirp.settings.resegmentation_parameters import ResegmentationSettingsClass
+from mirp.settings.perturbation_parameters import ImagePerturbationSettingsClass
+
+
+
+[docs]
+class SettingsClass:
+ """
+ Container for objects used to configure the image processing and feature processing workflow. This object can be
+ initialised in two ways:
+
+ * By providing (already initialised) configuration objects as arguments.
+ * By passing arguments to configuration objects as keyword arguments. These configuration objects will then be
+ created while initialising this container.
+
+ Parameters
+ ----------
+ general_settings: GeneralSettingsClass, optional
+ Configuration object for parameters related to the general process. See
+ :class:`~mirp.settings.general_parameters.GeneralSettingsClass`.
+
+ post_process_settings: ImagePostProcessingClass, optional
+ Configuration object for parameters related to image (post-)processing. See
+ :class:`~mirp.settings.image_processing_parameters.ImagePostProcessingClass`.
+
+ perturbation_settings: ImagePerturbationSettingsClass, optional
+ Configuration object for parameters related to image perturbation / augmentation. See
+ :class:`~mirp.settings.perturbation_parameters.ImagePerturbationSettingsClass`.
+
+ img_interpolate_settings: ImageInterpolationSettingsClass, optional
+ Configuration object for parameters related to image resampling. See
+ :class:`~mirp.settings.interpolation_parameters.ImageInterpolationSettingsClass`.
+
+ roi_interpolate_settings: MaskInterpolationSettingsClass, optional
+ Configuration object for parameters related to mask resampling. See
+ :class:`~mirp.settings.interpolation_parameters.MaskInterpolationSettingsClass`.
+
+ roi_resegment_settings: ResegmentationSettingsClass, optional
+ Configuration object for parameters related to mask resegmentation. See
+ :class:`~mirp.settings.resegmentation_parameters.ResegmentationSettingsClass`.
+
+ feature_extr_settings: FeatureExtractionSettingsClass, optional
+ Configuration object for parameters related to feature computation. See
+ :class:`~mirp.settings.feature_parameters.FeatureExtractionSettingsClass`.
+
+ img_transform_settings: ImageTransformationSettingsClass, optional
+ Configuration object for parameters related to image transformation. See
+ :class:`~mirp.settings.transformation_parameters.ImageTransformationSettingsClass`.
+
+ **kwargs: dict, optional
+ Keyword arguments for initialising configuration objects stored in this container object.
+
+ See Also
+ --------
+
+ * general settings (:class:`~mirp.settings.general_parameters.GeneralSettingsClass`)
+ * image post-processing (:class:`~mirp.settings.image_processing_parameters.ImagePostProcessingClass`)
+ * image perturbation / augmentation (:class:`~mirp.settings.perturbation_parameters.ImagePerturbationSettingsClass`)
+ * image interpolation / resampling (:class:`~mirp.settings.interpolation_parameters.ImageInterpolationSettingsClass`
+ and :class:`~mirp.settings.interpolation_parameters.MaskInterpolationSettingsClass`)
+ * mask resegmentation (:class:`~mirp.settings.resegmentation_parameters.ResegmentationSettingsClass`)
+ * image transformation (:class:`~mirp.settings.transformation_parameters.ImageTransformationSettingsClass`)
+ * feature computation / extraction (
+ :class:`~mirp.settings.feature_parameters.FeatureExtractionSettingsClass`)
+
+ """
+ def __init__(
+ self,
+ general_settings: None | GeneralSettingsClass = None,
+ post_process_settings: None | ImagePostProcessingClass = None,
+ perturbation_settings: None | ImagePerturbationSettingsClass = None,
+ img_interpolate_settings: None | ImageInterpolationSettingsClass = None,
+ roi_interpolate_settings: None | MaskInterpolationSettingsClass = None,
+ roi_resegment_settings: None | ResegmentationSettingsClass = None,
+ feature_extr_settings: None | FeatureExtractionSettingsClass = None,
+ img_transform_settings: None | ImageTransformationSettingsClass = None,
+ **kwargs
+ ):
+ kwargs = copy.deepcopy(kwargs)
+
+ # General settings.
+ if general_settings is None:
+ general_settings = GeneralSettingsClass(**kwargs)
+ self.general = general_settings
+
+ # Remove by_slice and no_approximation from the keyword arguments to avoid double passing.
+ kwargs.pop("by_slice", None)
+ kwargs.pop("no_approximation", None)
+ kwargs.pop("ibsi_compliant", None)
+
+ # Image interpolation settings.
+ if img_interpolate_settings is None:
+ img_interpolate_settings = ImageInterpolationSettingsClass(
+ by_slice=general_settings.by_slice,
+ **kwargs
+ )
+ self.img_interpolate = img_interpolate_settings
+
+ # Mask interpolation settings.
+ if roi_interpolate_settings is None:
+ roi_interpolate_settings = MaskInterpolationSettingsClass(**kwargs)
+ self.roi_interpolate = roi_interpolate_settings
+
+ # Image (post-)processing settings.
+ if post_process_settings is None:
+ post_process_settings = ImagePostProcessingClass(**kwargs)
+ self.post_process = post_process_settings
+
+ # Image perturbation settings.
+ if perturbation_settings is None:
+ perturbation_settings = ImagePerturbationSettingsClass(**kwargs)
+ self.perturbation = perturbation_settings
+
+ # Mask resegmentation settings.
+ if roi_resegment_settings is None:
+ roi_resegment_settings = ResegmentationSettingsClass(**kwargs)
+ self.roi_resegment = roi_resegment_settings
+
+ # Feature extraction settings.
+ if feature_extr_settings is None:
+ feature_extr_settings = FeatureExtractionSettingsClass(
+ by_slice=general_settings.by_slice,
+ no_approximation=general_settings.no_approximation,
+ ibsi_compliant=general_settings.ibsi_compliant,
+ **kwargs
+ )
+ self.feature_extr = feature_extr_settings
+
+ # Image transformation settings
+ if img_transform_settings is None:
+ img_transform_settings = ImageTransformationSettingsClass(
+ by_slice=general_settings.by_slice,
+ ibsi_compliant=general_settings.ibsi_compliant,
+ **kwargs
+ )
+ self.img_transform = img_transform_settings
+
+ def __eq__(self, other):
+ if self.__class__ != other.__class__:
+ return False
+
+ if self.general != other.general:
+ return False
+ if self.img_interpolate != other.img_interpolate:
+ return False
+ if self.roi_interpolate != other.roi_interpolate:
+ return False
+ if self.post_process != other.post_process:
+ return False
+ if self.perturbation != other.perturbation:
+ return False
+ if self.roi_resegment != other.roi_resegment:
+ return False
+ if self.feature_extr != other.feature_extr:
+ return False
+ if self.img_transform != other.img_transform:
+ return False
+
+ return True
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/docs/_modules/mirp/settings/image_processing_parameters.html b/docs/_modules/mirp/settings/image_processing_parameters.html
new file mode 100644
index 00000000..848076be
--- /dev/null
+++ b/docs/_modules/mirp/settings/image_processing_parameters.html
@@ -0,0 +1,475 @@
+
+
+
+
+
+ mirp.settings.image_processing_parameters — mirp 2.2.0 documentation
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Source code for mirp.settings.image_processing_parameters
+import numpy as np
+
+from typing import Any
+from dataclasses import dataclass
+from mirp.settings.utilities import setting_def
+
+
+
+[docs]
+@dataclass
+class ImagePostProcessingClass:
+ """
+ Parameters related to image processing. Note that parameters concerning image perturbation / augmentation and
+ resampling are set separately, see :class:`~mirp.settings.perturbation_parameters.ImagePerturbationSettingsClass` and
+ :class:`~mirp.settings.interpolation_parameters.ImageInterpolationSettingsClass`.
+
+ Parameters
+ ----------
+ bias_field_correction: bool, optional, default: False
+ Determines whether N4 bias field correction should be performed. When a tissue mask is present, bias field
+ correction is conducted using the information contained within the mask. Bias-field correction can only be
+ applied to MR imaging.
+
+ bias_field_correction_n_fitting_levels: int, optional, default: 1
+ The number of fitting levels for the N4 bias field correction algorithm.
+
+ bias_field_correction_n_max_iterations: int or list of int, optional, default: 50
+ The number of fitting iterations for the N4 bias field algorithm. A single integer, or a list of integers
+ with a length equal to the number of fitting levels is expected.
+
+ bias_field_convergence_threshold: float, optional, default: 0.001
+ Convergence threshold for N4 bias field correction algorithm.
+
+ intensity_normalisation: {"none", "range", "relative_range", "quantile_range", "standardisation"}, default: "none"
+ Specifies the algorithm used to normalise intensities in the image. Will use only intensities in voxels
+ masked by the tissue mask (of present). The following are possible:
+
+ * "none": no normalisation
+ * "range": normalises intensities based on a fixed mapping against the ``intensity_normalisation_range``
+ parameter, which is interpreted to represent an intensity range.
+ * "relative_range": normalises intensities based on a fixed mapping against the ``intensity_normalisation_range``
+ parameter, which is interpreted to represent a relative intensity range.
+ * "quantile_range": normalises intensities based on a fixed mapping against the
+ ``intensity_normalisation_range`` parameter, which is interpreted to represent a quantile range.
+ * "standardisation": normalises intensities by subtraction of the mean intensity and division by the standard
+ deviation of intensities.
+
+ .. note::
+ intensity normalisation may remove any physical meaning of intensity units.
+
+ intensity_normalisation_range: list of float, optional
+ Required for "range", "relative_range", and "quantile_range" intensity normalisation methods, and defines the
+ intensities that are mapped to the [0.0, 1.0] range during normalisation. The default range depends on the
+ type of normalisation method:
+
+ * "range": [np.nan, np.nan]: the minimum and maximum intensity value present in the image are used to set the
+ mapping range.
+ * "relative_range": [0.0. 1.0]: the minimum (0.0) and maximum (1.0) intensity value present in the image are
+ used to set the mapping range.
+ * "quantile_range": [0.025, 0.975] the 2.5th and 97.5th percentiles of the intensities in the image are used
+ to set the mapping range.
+
+ The lower end of the range is mapped to 0.0 and the upper end to 1.0. However, if intensities below the lower
+ end or above the upper end are present in the image, values below 0.0 or above 1.0 may be encountered after
+ normalisation. Use ``intensity_normalisation_saturation`` to cap intensities after normalisation to a
+ specific range.
+
+ intensity_normalisation_saturation: list of float, optional, default: [np.nan, np.nan]
+ Defines the start and endpoint for the saturation range. Normalised intensities that lie outside this
+ range are mapped to the limits of the saturation range, e.g. with a range of [0.0, 0.8] all values greater
+ than 0.8 are assigned a value of 0.8. np.nan can be used to define limits where the intensity values should
+ not be saturated.
+
+ intensity_scaling: float, optional
+ Defines scaling parameter to linearly scale intensities with. The scaling parameter is applied after
+ normalisation (if any). For example, `intensity_scaling = 1000.0`, combined with `intensity_normalisation =
+ "range"` results in intensities being mapped to a [0.0, 1000.0] range instead of [0.0, 1.0].
+
+ tissue_mask_type: {"none", "range", "relative_range"}, optional, default: "relative_range"
+ Type of algorithm used to produce an approximate tissue mask of the tissue. Such masks can be used to select
+ pixels for bias correction and intensity normalisation by excluding non-tissue voxels.
+
+ tissue_mask_range: list of float, optional
+ Range values for creating an approximate mask of the tissue. Required for "range" and "relative_range"
+ options. Default: [0.02, 1.00] (``"relative_range"``); [np.nan, np.nan] (``"range"``; effectively all voxels
+ are considered to represent tissue).
+
+ **kwargs:
+ Unused keyword arguments.
+ """
+
+ def __init__(
+ self,
+ bias_field_correction: bool = False,
+ bias_field_correction_n_fitting_levels: int = 1,
+ bias_field_correction_n_max_iterations: int | list[int] | None = None,
+ bias_field_convergence_threshold: float = 0.001,
+ intensity_normalisation: str = "none",
+ intensity_normalisation_range: list[float] | None = None,
+ intensity_normalisation_saturation: list[float] | None = None,
+ intensity_scaling: float | None = None,
+ tissue_mask_type: str = "relative_range",
+ tissue_mask_range: list[float] | None = None,
+ **kwargs
+ ):
+
+ # Set bias_field_correction parameter
+ self.bias_field_correction = bias_field_correction
+
+ # Check n_fitting_levels.
+ if bias_field_correction:
+ if not isinstance(bias_field_correction_n_fitting_levels, int):
+ raise TypeError("The bias_field_correction_n_fitting_levels should be integer with value 1 or larger.")
+
+ if bias_field_correction_n_fitting_levels < 1:
+ raise ValueError(
+ f"The bias_field_correction_n_fitting_levels should be integer with value 1 or larger. "
+ f"Found: {bias_field_correction_n_fitting_levels}")
+
+ else:
+ bias_field_correction_n_fitting_levels = None
+
+ # Set n_fitting_levels.
+ self.n_fitting_levels: None | int = bias_field_correction_n_fitting_levels
+
+ # Set default value for bias_field_correction_n_max_iterations. This is the number of iterations per fitting
+ # level.
+ if bias_field_correction_n_max_iterations is None and bias_field_correction:
+ bias_field_correction_n_max_iterations = [50 for ii in range(bias_field_correction_n_fitting_levels)]
+
+ if bias_field_correction:
+ # Parse to list, if a single value is provided.
+ if not isinstance(bias_field_correction_n_max_iterations, list):
+ bias_field_correction_n_max_iterations = [bias_field_correction_n_max_iterations]
+
+ # Ensure that the list of maximum iteration values equals the number of fitting levels.
+ if bias_field_correction_n_fitting_levels > 1 and len(bias_field_correction_n_max_iterations) == 1:
+ bias_field_correction_n_max_iterations = [
+ bias_field_correction_n_max_iterations[0]
+ for ii in range(bias_field_correction_n_fitting_levels)
+ ]
+
+ # Check that the list of maximum iteration values is equal to the number of fitting levels.
+ if len(bias_field_correction_n_max_iterations) != bias_field_correction_n_fitting_levels:
+ raise ValueError(
+ f"The bias_field_correction_n_max_iterations parameter should be a list with a length equal to the"
+ f" number of fitting levels ({bias_field_correction_n_fitting_levels}). Found list with "
+ f"{len(bias_field_correction_n_max_iterations)} values.")
+
+ # Check that all values are integers.
+ if not all(isinstance(ii, int) for ii in bias_field_correction_n_max_iterations):
+ raise TypeError(
+ f"The bias_field_correction_n_max_iterations parameter should be a list of positive "
+ f"integer values. At least one value was not an integer.")
+
+ # Check that all values are positive.
+ if not all([ii > 0 for ii in bias_field_correction_n_max_iterations]):
+ raise ValueError(
+ f"The bias_field_correction_n_max_iterations parameter should be a list of positive "
+ f"integer values. At least one value was zero or negative.")
+
+ else:
+ bias_field_correction_n_max_iterations = None
+
+ # Set n_max_iterations attribute.
+ self.n_max_iterations: list[int] | None = bias_field_correction_n_max_iterations
+
+ # Check that the convergence threshold is a non-negative number.
+ if bias_field_correction:
+
+ # Check that the value is a float.
+ if not isinstance(bias_field_convergence_threshold, float):
+ raise TypeError(
+ f"The bias_field_convergence_threshold parameter is expected to be a non-negative "
+ f"floating point value. Found: a value that was not a floating point value.")
+
+ if bias_field_convergence_threshold <= 0.0:
+ raise TypeError(
+ f"The bias_field_convergence_threshold parameter is expected to be a non-positive floating point "
+ f"value. Found: a value that was 0.0 or negative ({bias_field_convergence_threshold}).")
+
+ else:
+ bias_field_convergence_threshold = None
+
+ # Set convergence_threshold attribute.
+ self.convergence_threshold: None | float = bias_field_convergence_threshold
+
+ # Check that intensity_normalisation has the correct values.
+ if intensity_normalisation not in ["none", "range", "relative_range", "quantile_range", "standardisation"]:
+ raise ValueError(
+ f"The intensity_normalisation parameter is expected to have one of the following values: "
+ f"'none', 'range', 'relative_range', 'quantile_range', 'standardisation'. Found: "
+ f"{intensity_normalisation}.")
+
+ # Set intensity_normalisation parameter.
+ self.intensity_normalisation = intensity_normalisation
+
+ # Set default value.
+ if intensity_normalisation_range is None:
+ if intensity_normalisation == "range":
+ # Cannot define a proper range.
+ intensity_normalisation_range = [np.nan, np.nan]
+
+ elif intensity_normalisation == "relative_range":
+ intensity_normalisation_range = [0.0, 1.0]
+
+ elif intensity_normalisation == "quantile_range":
+ intensity_normalisation_range = [0.025, 0.975]
+
+ if intensity_normalisation == "range":
+ # Check that the range has length 2 and contains floating point values.
+ if not isinstance(intensity_normalisation_range, list):
+ raise TypeError(
+ f"The intensity_normalisation_range parameter for range-based normalisation should "
+ f"be a list with exactly two values, which are mapped to 0.0 and 1.0 respectively. "
+ f"Found: an object that is not a list.")
+
+ if len(intensity_normalisation_range) != 2:
+ raise ValueError(
+ f"The intensity_normalisation_range parameter for range-based normalisation should "
+ f"be a list with exactly two values, which are mapped to 0.0 and 1.0 respectively. "
+ f"Found: list with {len(intensity_normalisation_range)} values.")
+
+ if not all(isinstance(ii, float) for ii in intensity_normalisation_range):
+ raise TypeError(
+ f"The intensity_normalisation_range parameter for range-based normalisation should "
+ f"be a list with exactly two floating point values, which are mapped to 0.0 and 1.0 "
+ f"respectively. Found: one or more values that are not floating point values.")
+
+ elif intensity_normalisation in ["relative_range", "quantile_range"]:
+ # Check that the range has length 2 and contains floating point values between 0.0 and 1.0.
+ if intensity_normalisation == "relative_range":
+ intensity_normalisation_specifier = "relative range-based normalisation"
+ else:
+ intensity_normalisation_specifier = "quantile range-based normalisation"
+
+ if not isinstance(intensity_normalisation_range, list):
+ raise TypeError(
+ f"The intensity_normalisation_range parameter for {intensity_normalisation_specifier} "
+ f"should be a list with exactly two values, which are mapped to 0.0 and 1.0 "
+ f"respectively. Found: an object that is not a list.")
+
+ if len(intensity_normalisation_range) != 2:
+ raise ValueError(
+ f"The intensity_normalisation_range parameter for {intensity_normalisation_specifier} "
+ f"should be a list with exactly two values, which are mapped to 0.0 and 1.0 "
+ f"respectively. Found: list with {len(intensity_normalisation_range)} values.")
+
+ if not all(isinstance(ii, float) for ii in intensity_normalisation_range):
+ raise TypeError(
+ f"The intensity_normalisation_range parameter for {intensity_normalisation_specifier} "
+ f"should be a list with exactly two values, which are mapped to 0.0 and 1.0 "
+ f"respectively. Found: one or more values that are not floating point values.")
+
+ if not all([0.0 <= ii <= 1.0 for ii in intensity_normalisation_range]):
+ raise TypeError(
+ f"The intensity_normalisation_range parameter for {intensity_normalisation_specifier} "
+ f"should be a list with exactly two values, which are mapped to 0.0 and 1.0 "
+ f"respectively. Found: one or more values that are outside the [0.0, 1.0] range.")
+
+ else:
+ # None and standardisation do not use this range.
+ intensity_normalisation_range = None
+
+ # Set normalisation range.
+ self.intensity_normalisation_range: None | list[float] = intensity_normalisation_range
+
+ # Check intensity normalisation saturation range.
+ if intensity_normalisation_saturation is None:
+ intensity_normalisation_saturation = [np.nan, np.nan]
+
+ if not isinstance(intensity_normalisation_saturation, list):
+ raise TypeError(
+ "The intensity_normalisation_saturation parameter is expected to be a "
+ "list of two floating point values."
+ )
+
+ if not len(intensity_normalisation_saturation) == 2:
+ raise ValueError(
+ f"The intensity_normalisation_saturation parameter should consist of two values. Found: "
+ f"{len(intensity_normalisation_saturation)} values.")
+
+ if not all(isinstance(ii, float) for ii in intensity_normalisation_saturation):
+ raise TypeError(
+ "The intensity_normalisation_saturation parameter can only contain floating point or np.nan values."
+ )
+
+ # intensity_normalisation_saturation parameter
+ self.intensity_normalisation_saturation: None | list[float] = intensity_normalisation_saturation
+
+ # Check intensity_scaling
+ if intensity_scaling is not None:
+ if not isinstance(intensity_scaling, float):
+ raise TypeError("The intensity_scaling parameter is expected to be a single floating point.")
+ if intensity_scaling == 0.0:
+ raise ValueError("The intensity_scaling parameter cannot have a value of 0.0.")
+ else:
+ intensity_scaling = 1.0
+
+ self.intensity_scaling: float = intensity_scaling
+
+ # Check tissue_mask_type
+ if tissue_mask_type not in ["none", "range", "relative_range"]:
+ raise ValueError(
+ f"The tissue_mask_type parameter is expected to have one of the following values: "
+ f"'none', 'range', or 'relative_range'. Found: {tissue_mask_type}."
+ )
+
+ # Set tissue_mask_type
+ self.tissue_mask_type: str = tissue_mask_type
+
+ # Set the default value for tissue_mask_range.
+ if tissue_mask_range is None:
+ if tissue_mask_type == "relative_range":
+ tissue_mask_range = [0.02, 1.00]
+ elif tissue_mask_type == "range":
+ tissue_mask_range = [np.nan, np.nan]
+ else:
+ tissue_mask_range = [np.nan, np.nan]
+
+ # Perform checks on tissue_mask_range.
+ if tissue_mask_type != "none":
+ if not isinstance(tissue_mask_range, list):
+ raise TypeError(
+ "The tissue_mask_range parameter is expected to be a list of two floating point values.")
+
+ if not len(tissue_mask_range) == 2:
+ raise ValueError(
+ f"The tissue_mask_range parameter should consist of two values. Found: "
+ f"{len(tissue_mask_range)} values.")
+
+ if not all(isinstance(ii, float) for ii in tissue_mask_range):
+ raise TypeError("The tissue_mask_range parameter can only contain floating point or np.nan values.")
+
+ if tissue_mask_type == "relative_range":
+ if not all([(0.0 <= ii <= 1.0) or np.isnan(ii) for ii in tissue_mask_range]):
+ raise ValueError(
+ "The tissue_mask_range parameter should consist of two values between 0.0 and 1.0.")
+
+ # Set tissue_mask_range.
+ self.tissue_mask_range: tuple[float, ...] = tuple(tissue_mask_range)
+
+
+
+def get_post_processing_settings() -> list[dict[str, Any]]:
+
+ return [
+ setting_def("bias_field_correction", "bool", test=True),
+ setting_def(
+ "bias_field_correction_n_fitting_levels", "int", xml_key="n_fitting_levels",
+ class_key="n_fitting_levels", test=2
+ ),
+ setting_def(
+ "bias_field_correction_n_max_iterations", "int", xml_key="n_max_iterations",
+ class_key="n_max_iterations", to_list=True, test=[1000, 1000]
+ ),
+ setting_def(
+ "bias_field_convergence_threshold", "float", xml_key="convergence_threshold",
+ class_key="convergence_threshold", test=0.1
+ ),
+ setting_def("intensity_normalisation", "str", test="relative_range"),
+ setting_def("intensity_normalisation_range", "float", to_list=True, test=[0.10, 0.90]),
+ setting_def("intensity_normalisation_saturation", "float", to_list=True, test=[0.00, 10.00]),
+ setting_def("intensity_scaling", "float", test=3.0),
+ setting_def("tissue_mask_type", "str", test="range"),
+ setting_def("tissue_mask_range", "float", to_list=True, test=[0.00, 10.00])
+ ]
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/docs/_modules/mirp/settings/interpolation_parameters.html b/docs/_modules/mirp/settings/interpolation_parameters.html
new file mode 100644
index 00000000..658a3c73
--- /dev/null
+++ b/docs/_modules/mirp/settings/interpolation_parameters.html
@@ -0,0 +1,324 @@
+
+
+
+
+
+ mirp.settings.interpolation_parameters — mirp 2.2.0 documentation
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Source code for mirp.settings.interpolation_parameters
+from typing import Iterable, Any
+from dataclasses import dataclass
+from mirp.settings.utilities import setting_def
+
+
+
+[docs]
+@dataclass
+class ImageInterpolationSettingsClass:
+ """
+ Parameters related to image interpolating / resampling. Images in a dataset are typically resampled to uniform
+ voxel spacing to ensure that their spatial representation does not vary between samples. Note that when the
+ voxel spacing in the original image is smaller than that in the resampled image (e.g., 0.5 mm sampled to 1.0 mm),
+ antialiasing may be recommended.
+
+ For parameters related to mask interpolation / resampling, see
+ :class:`~mirp.settings.interpolation_parameters.MaskInterpolationSettingsClass`.
+
+ Parameters
+ ----------
+ by_slice: str or bool, optional, default: False
+ Defines whether calculations should be performed in 2D (True) or 3D (False), or alternatively only in the
+ largest slice ("largest"). See :class:`~mirp.settings.general_parameters.GeneralSettingsClass`.
+
+ new_spacing: float or list of float or list of list of float, optional:
+ Sets voxel spacing after interpolation. A single value represents the spacing that will be applied in all
+ directions. Non-uniform voxel spacing may also be provided, but requires 3 values for z, y, and x directions
+ (if `by_slice = False`) or 2 values for y and x directions (otherwise).
+
+ Multiple spacings may be defined by creating a nested list, e.g. [[1.0], [1.5], [2.0]] to resample the
+ same image multiple times to different (here: isotropic) voxel spacings, namely 1.0, 1.5 and 2.0. Units
+ are defined by the headers of the image files. These are typically millimeters for radiological images.
+
+ spline_order: int, optional, default: 3
+ Sets the spline order used for spline interpolation. mirp uses `scipy.ndimage.map_coordinates
+ <https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.map_coordinates.html#scipy.ndimage
+ .map_coordinates>`_ internally. Spline orders 0, 1, and 3 refer to nearest neighbour, linear interpolation
+ and cubic interpolation, respectively.
+
+ anti_aliasing: bool, optional, default: true
+ Determines whether to perform antialiasing, which is done to mitigate aliasing artifacts when downsampling.
+
+ smoothing_beta: float, optional, default: 0.98
+ Determines the smoothness of the Gaussian filter used for anti-aliasing. A value of 1.00 equates to no
+ antialiasing, with lower values producing increasingly smooth imaging. Values above 0.90 are recommended.
+
+ **kwargs: dict, optional
+ Unused keyword arguments.
+ """
+
+ def __init__(
+ self,
+ by_slice: bool,
+ new_spacing: None | float | int | list[int] | list[float] | list[list[float]] | list[list[int]] = None,
+ spline_order: int = 3,
+ anti_aliasing: bool = True,
+ smoothing_beta: float = 0.98,
+ **kwargs
+ ):
+
+ # Set interpolate parameter.
+ self.interpolate: bool = new_spacing is not None
+
+ # Check if the spline order is valid.
+ if spline_order < 0 or spline_order > 5:
+ raise ValueError(
+ f"The interpolation spline order should be an integer between 0 and 5. Found: {spline_order}")
+
+ # Set spline order for the interpolating spline.
+ self.spline_order: int = spline_order
+
+ if self.interpolate:
+ # Parse value to list of floating point values to facilitate checks.
+ if isinstance(new_spacing, (int, float)):
+ new_spacing = [new_spacing]
+
+ # Check if nested list elements are present.
+ if any(isinstance(ii, Iterable) for ii in new_spacing):
+ new_spacing = [
+ self._check_new_sample_spacing(by_slice=by_slice, new_spacing=new_spacing_element)
+ for new_spacing_element in new_spacing
+ ]
+
+ else:
+ new_spacing = [self._check_new_sample_spacing(by_slice=by_slice, new_spacing=new_spacing)]
+
+ # Check that new spacing is now a nested list.
+ if not all(isinstance(ii, list) for ii in new_spacing):
+ raise TypeError(f"THe new_spacing variable should now be represented as a nested list.")
+
+ # Set spacing for resampling. Note that new_spacing should now either be None or a nested list, i.e. a list
+ # containing other lists.
+ self.new_spacing: None | list[list[float | None]] = new_spacing
+
+ # Set anti-aliasing.
+ self.anti_aliasing: bool = anti_aliasing
+
+ # Check that smoothing beta lies between 0.0 and 1.0.
+ if anti_aliasing:
+ if smoothing_beta <= 0.0 or smoothing_beta > 1.0:
+ raise ValueError(
+ f"The value of the smoothing_beta parameter should lie between 0.0 and 1.0, "
+ f"not including 0.0. Found: {smoothing_beta}")
+
+ # Set smoothing beta.
+ self.smoothing_beta: float = smoothing_beta
+
+ @staticmethod
+ def _check_new_sample_spacing(by_slice, new_spacing):
+ # Checks whether sample spacing is correctly set, and parses it.
+
+ # Parse value to list of floating point values to facilitate checks.
+ if isinstance(new_spacing, (int, float)):
+ new_spacing = [new_spacing]
+
+ # Convert to floating point values.
+ new_spacing: list[float | None] = [float(new_spacing_element) for new_spacing_element in new_spacing]
+
+ if by_slice:
+ # New spacing is expect to consist of at most two values when the experiment is based on slices. A
+ # placeholder for the z-direction is set here.
+ if len(new_spacing) == 1:
+ # This creates isotropic spacing.
+ new_spacing = [None, new_spacing[0], new_spacing[0]]
+
+ elif len(new_spacing) == 2:
+ # Insert a placeholder for the z-direction.
+ new_spacing.insert(0, None)
+
+ else:
+ raise ValueError(
+ f"The desired voxel spacing for in-slice resampling should consist of two "
+ f"elements. Found: {len(new_spacing)} elements.")
+ else:
+ if len(new_spacing) == 1:
+ # This creates isotropic spacing.
+ new_spacing = [new_spacing[0], new_spacing[0], new_spacing[0]]
+
+ elif len(new_spacing) == 3:
+ # Do nothing.
+ pass
+
+ else:
+ raise ValueError(
+ f"The desired voxel spacing for volumetric resampling should consist of three "
+ f"elements. Found: {len(new_spacing)} elements.")
+
+ return new_spacing
+
+
+
+def get_image_interpolation_settings() -> list[dict[str, Any]]:
+ return [
+ setting_def("new_spacing", "float", to_list=True, test=[1.0, 1.0, 1.0]),
+ setting_def("spline_order", "int", test=2),
+ setting_def("anti_aliasing", "bool", test=False),
+ setting_def("smoothing_beta", "float", test=0.75)
+ ]
+
+
+
+[docs]
+@dataclass
+class MaskInterpolationSettingsClass:
+ """
+ Parameters related to mask interpolation / resampling. MIRP registers the mask to an interpolated image based,
+ and fewer parameters can be set compared to image interpolation / resampling (
+ :class:`~mirp.settings.interpolation_parameters.ImageInterpolationSettingsClass`).
+
+ Parameters
+ ----------
+ roi_spline_order: int, optional, default: 1
+ Sets the spline order used for spline interpolation. mirp uses `scipy.ndimage.map_coordinates
+ <https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.map_coordinates.html#scipy.ndimage
+ .map_coordinates>`_ internally. Spline orders 0, 1, and 3 refer to nearest neighbour, linear interpolation
+ and cubic interpolation, respectively.
+
+ roi_interpolation_mask_inclusion_threshold: float, optional, default: 0.5
+ Threshold for partially masked voxels after interpolation. All voxels with a value equal to or greater than
+ this threshold are assigned to the mask.
+
+ **kwargs: dict, optional
+ Unused keyword arguments.
+ """
+
+ def __init__(
+ self,
+ roi_spline_order: int = 1,
+ roi_interpolation_mask_inclusion_threshold: float = 0.5,
+ **kwargs):
+
+ # Check if the spline order is valid.
+ if roi_spline_order < 0 or roi_spline_order > 5:
+ raise ValueError(
+ f"The interpolation spline order for the ROI should be an integer between 0 and 5. Found:"
+ f" {roi_spline_order}")
+
+ # Set spline order.
+ self.spline_order = roi_spline_order
+
+ # Check if the inclusion threshold is between 0 and 1.
+ if roi_interpolation_mask_inclusion_threshold <= 0.0 or roi_interpolation_mask_inclusion_threshold > 1.0:
+ raise ValueError(
+ f"The inclusion threshold for the ROI mask should be between 0.0 and 1.0, excluding 0.0. "
+ f"Found: {roi_interpolation_mask_inclusion_threshold}")
+
+ self.incl_threshold = roi_interpolation_mask_inclusion_threshold
+
+
+
+def get_mask_interpolation_settings() -> list[dict[str, Any]]:
+ return [
+ setting_def("roi_spline_order", "int", xml_key="spline_order", class_key="spline_order", test=2),
+ setting_def("roi_interpolation_mask_inclusion_threshold", "float", xml_key="incl_threshold",
+ class_key="incl_threshold", test=0.25)
+ ]
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/docs/_modules/mirp/settings/perturbation_parameters.html b/docs/_modules/mirp/settings/perturbation_parameters.html
new file mode 100644
index 00000000..24dd32d3
--- /dev/null
+++ b/docs/_modules/mirp/settings/perturbation_parameters.html
@@ -0,0 +1,373 @@
+
+
+
+
+
+ mirp.settings.perturbation_parameters — mirp 2.2.0 documentation
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Source code for mirp.settings.perturbation_parameters
+from typing import Any
+from dataclasses import dataclass
+from mirp.settings.utilities import setting_def
+
+
+
+[docs]
+@dataclass
+class ImagePerturbationSettingsClass:
+ """
+ Parameters related to image and mask perturbation / augmentation. By default images and masks are not perturbed or
+ augmented.
+
+ Parameters
+ ----------
+
+ crop_around_roi: bool, optional, default: False
+ Determines whether the image may be cropped around the regions of interest. Setting
+ this to True may speed up computation and save memory.
+
+ crop_distance: float, optional, default: 150.0
+ Physical distance around the mask that should be maintained when cropping the image. When using convolutional
+ kernels for filtering an image, we recommend to leave some distance to prevent boundary effects. A crop
+ distance of 0.0 crops the image tightly around the mask.
+
+ perturbation_noise_repetitions: int, optional, default: 0
+ Number of repetitions where noise is randomly added to the image. A value of 0 means that no noise will be
+ added.
+
+ perturbation_noise_level: float, optional, default: None
+ Set the noise level in intensity units. This determines the width of the normal distribution used to generate
+ random noise. If None (default), noise is determined from the image itself.
+
+ perturbation_rotation_angles: float or list of float, optional, default: 0.0
+ Angles (in degrees) over which the image and mask are rotated. This rotation is only in the x-y (axial)
+ plane. Multiple angles can be provided to create images with different rotations.
+
+ perturbation_translation_fraction: float or list of float, optional, default: 0.0
+ Sub-voxel translation distance fractions of the interpolation grid. This forces the interpolation grid to
+ shift slightly and interpolate at different points. Multiple values can be provided. All values should be
+ between 0.0 and 1.0.
+
+ perturbation_roi_adapt_type: {"fraction", "distance"}, optional, default: "distance"
+ Determines how the mask is grown or shrunk. Can be either "fraction" or "distance". "fraction" is used to
+ grow or shrink the mask by a certain fraction (see the ``perturbation_roi_adapt_size`` parameter).
+ "distance" is used to grow or shrink the mask by a certain physical distance, defined using the
+ ``perturbation_roi_adapt_size`` parameter.
+
+ perturbation_roi_adapt_size: float or list of float, optional, default: 0.0
+ Determines the extent of growth/shrinkage of the ROI mask. The use of this parameter depends on the
+ growth/shrinkage type (``perturbation_roi_adapt_type``), For "distance", this parameter defines
+ growth/shrinkage in physical units, typically mm. For "fraction", this parameter defines growth/shrinkage in
+ volume fraction (e.g. a value of 0.2 grows the mask by 20%). For either type, positive values indicate growing
+ the mask, whereas negative values indicate its shrinkage. Multiple values can be provided to perturb the
+ volume of the mask.
+
+ perturbation_roi_adapt_max_erosion: float, optional, default: 0.8
+ Limits shrinkage of the mask by distance-based adaptations to avoid forming empty masks. Defined as fraction of
+ the original volume, e.g. a value of 0.8 prevents shrinking the mask below 80% of its original volume. Only
+ used when ``perturbation_roi_adapt_type=="distance"``.
+
+ perturbation_randomise_roi_repetitions: int, optional, default: 0.0
+ Number of repetitions where the mask is randomised using supervoxel-based randomisation.
+
+ roi_split_boundary_size: float or list of float, optional, default: 0.0
+ Width of the rim used for splitting the mask into bulk and rim masks, in physical dimensions. Multiple values
+ can be provided to generate rims of different widths.
+
+ roi_split_max_erosion: float, optional, default: 0.6
+ Determines the minimum volume of the bulk mask when splitting the original mask into bulk and rim sections.
+ Fraction of the original volume, e.g. 0.6 means that the bulk contains at least 60% of the original mask.
+
+ **kwargs: dict, optional
+ Unused keyword arguments.
+ """
+
+ def __init__(
+ self,
+ crop_around_roi: bool = False,
+ crop_distance: float = 150.0,
+ perturbation_noise_repetitions: int = 0,
+ perturbation_noise_level: None | float = None,
+ perturbation_rotation_angles: None | float | list[float] = 0.0,
+ perturbation_translation_fraction: None | float | list[float] = 0.0,
+ perturbation_roi_adapt_type: str = "distance",
+ perturbation_roi_adapt_size: None | float | list[float] = 0.0,
+ perturbation_roi_adapt_max_erosion: float = 0.8,
+ perturbation_randomise_roi_repetitions: int = 0,
+ roi_split_boundary_size: None | float | list[float] = 0.0,
+ roi_split_max_erosion: float = 0.6,
+ **kwargs
+ ):
+
+ # Set crop_around_roi
+ self.crop_around_roi: bool = crop_around_roi
+
+ # Check that crop distance is not negative.
+ if crop_distance < 0.0 and crop_around_roi:
+ raise ValueError(f"The cropping distance cannot be negative. Found: {crop_distance}")
+
+ # Set crop_distance.
+ self.crop_distance: float = crop_distance
+
+ # Check that noise repetitions is not negative.
+ perturbation_noise_repetitions = int(perturbation_noise_repetitions)
+ if perturbation_noise_repetitions < 0:
+ raise ValueError(f"The number of repetitions where noise is added to the image cannot be negative. Found: {perturbation_noise_repetitions}")
+
+ # Set noise repetitions.
+ self.add_noise: bool = perturbation_noise_repetitions > 0
+ self.noise_repetitions: int = perturbation_noise_repetitions
+
+ # Check noise level.
+ if perturbation_noise_level is not None:
+ if perturbation_noise_level < 0.0:
+ raise ValueError(f"The noise level cannot be negative. Found: {perturbation_noise_level}")
+
+ # Set noise level.
+ self.noise_level: None | float = perturbation_noise_level
+
+ # Convert perturbation_rotation_angles to list, if necessary.
+ if not isinstance(perturbation_rotation_angles, list):
+ perturbation_rotation_angles = [perturbation_rotation_angles]
+
+ # Check that the rotation angles are floating points.
+ if not all(isinstance(ii, float) for ii in perturbation_rotation_angles):
+ raise TypeError(f"Not all values for perturbation_rotation_angles are floating point values.")
+
+ # Set rotation_angles.
+ self.rotation_angles: list[float] = perturbation_rotation_angles
+
+ # Convert perturbation_translation_fraction to list, if necessary.
+ if not isinstance(perturbation_translation_fraction, list):
+ perturbation_translation_fraction = [perturbation_translation_fraction]
+
+ # Check that the translation fractions are floating points.
+ if not all(isinstance(ii, float) for ii in perturbation_translation_fraction):
+ raise TypeError(f"Not all values for perturbation_translation_fraction are floating point values.")
+
+ # Check that the translation fractions lie between 0.0 and 1.0.
+ if not all(0.0 <= ii < 1.0 for ii in perturbation_translation_fraction):
+ raise ValueError(
+ "Not all values for perturbation_translation_fraction lie between 0.0 and 1.0, not including 1.0."
+ )
+
+ # Set translation_fraction.
+ self.translation_fraction: list[float] = perturbation_translation_fraction
+
+ # Check roi adaptation type.
+ if perturbation_roi_adapt_type not in ["distance", "fraction"]:
+ raise ValueError(f"The perturbation ROI adaptation type should be one of 'distance' or 'fraction'. Found: {perturbation_roi_adapt_type}")
+
+ # Set roi_adapt_type
+ self.roi_adapt_type: str = perturbation_roi_adapt_type
+
+ # Convert to perturbation_roi_adapt_size to list.
+ if not isinstance(perturbation_roi_adapt_size, list):
+ perturbation_roi_adapt_size = [perturbation_roi_adapt_size]
+
+ # Check that the adapt sizes are floating points.
+ if not all(isinstance(ii, float) for ii in perturbation_roi_adapt_size):
+ raise TypeError(f"Not all values for perturbation_roi_adapt_size are floating point values.")
+
+ # Check that values do not go below 0.
+ if perturbation_roi_adapt_type == "fraction" and any([ii <= -1.0 for ii in perturbation_roi_adapt_size]):
+ raise ValueError("All values for perturbation_roi_adapt_size should be greater than -1.0. However, "
+ "one or more values were less.")
+
+ # Set roi_adapt_size
+ self.roi_adapt_size: list[float] = perturbation_roi_adapt_size
+
+ # Check that perturbation_roi_adapt_max_erosion is between 0.0 and 1.0.
+ if not 0.0 <= perturbation_roi_adapt_max_erosion <= 1.0:
+ raise ValueError(f"The perturbation_roi_adapt_max_erosion parameter must have a value between 0.0 and "
+ f"1.0. Found: {perturbation_roi_adapt_max_erosion}")
+
+ # Set max volume erosion.
+ self.max_volume_erosion: float = perturbation_roi_adapt_max_erosion
+
+ # Check that ROI randomisation representation is not negative.
+ perturbation_randomise_roi_repetitions = int(perturbation_randomise_roi_repetitions)
+ if perturbation_randomise_roi_repetitions < 0:
+ raise ValueError(
+ f"The number of repetitions where the ROI mask is randomised cannot be negative. Found: "
+ f"{perturbation_randomise_roi_repetitions}")
+
+ # Set ROI mask randomisation repetitions.
+ self.randomise_roi: bool = perturbation_randomise_roi_repetitions > 0
+ self.roi_random_rep: int = perturbation_randomise_roi_repetitions
+
+ # Check that roi_split_max_erosion is between 0.0 and 1.0.
+ if not 0.0 <= roi_split_max_erosion <= 1.0:
+ raise ValueError(f"The roi_split_max_erosion parameter must have a value between 0.0 and "
+ f"1.0. Found: {roi_split_max_erosion}")
+
+ # Division of roi into bulk and boundary
+ self.max_bulk_volume_erosion: float = roi_split_max_erosion
+
+ # Convert roi_split_boundary_size to list, if necessary.
+ if not isinstance(roi_split_boundary_size, list):
+ roi_split_boundary_size = [roi_split_boundary_size]
+
+ # Check that the translation fractions are floating points.
+ if not all(isinstance(ii, float) for ii in roi_split_boundary_size):
+ raise TypeError(f"Not all values for roi_split_boundary_size are floating point values.")
+
+ # Check that the translation fractions lie between 0.0 and 1.0.
+ if not all(ii >= 0.0 for ii in roi_split_boundary_size):
+ raise ValueError("Not all values for roi_split_boundary_size are positive.")
+
+ # Set roi_boundary_size.
+ self.roi_boundary_size: list[float] = roi_split_boundary_size
+
+ # Initially local variables
+ self.translate_x: None | float = None
+ self.translate_y: None | float = None
+ self.translate_z: None | float = None
+
+
+
+def get_perturbation_settings() -> list[dict[str, Any]]:
+ return [
+ setting_def("crop_around_roi", "bool", xml_key=["crop_around_roi", "resect"], test=True),
+ setting_def("crop_distance", "float", test=10.0),
+ setting_def(
+ "perturbation_noise_repetitions", "int", xml_key="noise_repetitions",
+ class_key="noise_repetitions", test=10
+ ),
+ setting_def(
+ "perturbation_noise_level", "float", xml_key="noise_level", class_key="noise_level", test=0.75
+ ),
+ setting_def(
+ "perturbation_rotation_angles", "float", to_list=True, xml_key=["rotation_angles", "rot_angles"],
+ class_key="rotation_angles", test=[-33.0, 33.0]
+ ),
+ setting_def(
+ "perturbation_translation_fraction", "float", to_list=True,
+ xml_key=["translation_fraction", "translate_frac"], class_key="translation_fraction", test=[0.25, 0.75]
+ ),
+ setting_def(
+ "perturbation_roi_adapt_type", "str", xml_key="roi_adapt_type", class_key="roi_adapt_type",
+ test="fraction"
+ ),
+ setting_def(
+ "perturbation_roi_adapt_size", "float", to_list=True, xml_key="roi_adapt_size",
+ class_key="roi_adapt_size", test=[0.8, 1.0, 1.2]
+ ),
+ setting_def(
+ "perturbation_roi_adapt_max_erosion", "float", xml_key=["roi_adapt_max_erosion", "eroded_vol_fract"],
+ class_key="max_volume_erosion", test=0.2
+ ),
+ setting_def(
+ "perturbation_randomise_roi_repetitions", "int", xml_key="roi_random_rep",
+ class_key="roi_random_rep", test=100
+ ),
+ setting_def(
+ "roi_split_boundary_size", "float", to_list=True, xml_key="roi_boundary_size",
+ class_key="roi_boundary_size", test=[2.0, 5.0]
+ ),
+ setting_def(
+ "roi_split_max_erosion", "float", xml_key=["roi_split_max_erosion", "bulk_min_vol_fract"],
+ class_key="max_bulk_volume_erosion", test=0.2
+ )
+ ]
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/docs/_modules/mirp/settings/resegmentation_parameters.html b/docs/_modules/mirp/settings/resegmentation_parameters.html
new file mode 100644
index 00000000..f232fba5
--- /dev/null
+++ b/docs/_modules/mirp/settings/resegmentation_parameters.html
@@ -0,0 +1,203 @@
+
+
+
+
+
+ mirp.settings.resegmentation_parameters — mirp 2.2.0 documentation
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Source code for mirp.settings.resegmentation_parameters
+from typing import Any
+from dataclasses import dataclass
+from mirp.settings.utilities import setting_def
+
+import numpy as np
+
+
+
+[docs]
+@dataclass
+class ResegmentationSettingsClass:
+ """
+ Parameters related to mask resegmentation. Resegmentation is used to remove parts of the mask that correspond to
+ undesired intensities that should be excluded, e.g. those corresponding to air. Resegmentation based on an
+ intensity range is also required for using *Fixed Bin Size* discretisation to set the lower bound of the first bin.
+
+ .. note::
+ Even though intensity range resegmentation is usually required to perform *Fixed Bin Size* discretisation,
+ default values are available for computed tomography (CT) and positron emission tomography (PET) imaging,
+ and are set to -1000.0 Hounsfield Units and 0.0 SUV, respectively.
+
+ Parameters
+ ----------
+ Sets parameters related to resegmentation of the segmentation mask.
+
+ resegmentation_intensity_range: list of float, optional
+ Intensity threshold for threshold-based re-segmentation ("threshold" and "range"). If set, requires two
+ values for lower and upper range respectively. The upper range value can also be np.nan for half-open ranges.
+
+ resegmentation_sigma: float, optional
+ Number of standard deviations for outlier-based intensity re-segmentation ("sigma" and "outlier").
+
+ **kwargs: dict, optional
+ Unused keyword arguments.
+ """
+ def __init__(
+ self,
+ resegmentation_intensity_range: None | list[float] = None,
+ resegmentation_sigma: None | float = None,
+ **kwargs
+ ):
+ resegmentation_method = []
+ if resegmentation_sigma is None and resegmentation_intensity_range is None:
+ resegmentation_method += ["none"]
+ if resegmentation_intensity_range is not None:
+ resegmentation_method += ["range"]
+ if resegmentation_sigma is not None:
+ resegmentation_method += ["sigma"]
+
+ # Set resegmentation method.
+ self.resegmentation_method: list[str] = resegmentation_method
+
+ # Set default value.
+ if resegmentation_intensity_range is None:
+ # Cannot define a proper range.
+ resegmentation_intensity_range = [np.nan, np.nan]
+
+ if not isinstance(resegmentation_intensity_range, list):
+ raise TypeError(
+ f"The resegmentation_intensity_range parameter should be a list with exactly two "
+ f"values. Found: an object that is not a list.")
+
+ if len(resegmentation_intensity_range) != 2:
+ raise ValueError(
+ f"The resegmentation_intensity_range parameter should be a list with exactly two "
+ f"values. Found: list with {len(resegmentation_intensity_range)} values.")
+
+ if not all(isinstance(ii, float) for ii in resegmentation_intensity_range):
+ raise TypeError(
+ f"The resegmentation_intensity_range parameter should be a list with exactly two "
+ f"values. Found: one or more values that are not floating point values.")
+
+ self.intensity_range: None | tuple[Any, Any] = tuple(resegmentation_intensity_range) if \
+ resegmentation_intensity_range is not None else None
+
+ # Set default value.
+ if resegmentation_sigma is None:
+ resegmentation_sigma = 3.0
+
+ # Check that sigma is not negative.
+ if resegmentation_sigma < 0.0:
+ raise ValueError(f"The resegmentation_sigma parameter can not be negative. Found: {resegmentation_sigma}")
+
+ self.sigma: float = resegmentation_sigma
+
+
+
+def get_mask_resegmentation_settings() -> list[dict[str, Any]]:
+ return [
+ setting_def(
+ "resegmentation_intensity_range", "float", to_list=True, xml_key=["intensity_range", "g_thresh"],
+ class_key="intensity_range", test=[-10.0, 30.0]
+ ),
+ setting_def("resegmentation_sigma", "float", xml_key="sigma", class_key="sigma", test=1.0)
+ ]
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/docs/_modules/mirp/settings/settingsFeatureExtraction.html b/docs/_modules/mirp/settings/settingsFeatureExtraction.html
index f37e5d08..3bac3f26 100644
--- a/docs/_modules/mirp/settings/settingsFeatureExtraction.html
+++ b/docs/_modules/mirp/settings/settingsFeatureExtraction.html
@@ -1,22 +1,20 @@
-
+
- mirp.settings.settingsFeatureExtraction — mirp 2.1.1 documentation
-
-
-
-
+ mirp.settings.settingsFeatureExtraction — mirp 2.1.0 documentation
+
+
-
-
-
-
-
+
+
+
+
+
@@ -76,12 +74,14 @@
Source code for mirp.settings.settingsFeatureExtraction
-from typing import Any
+from typing import Union, List, Any
from dataclasses import dataclass
from mirp.settings.utilities import setting_def
-[docs]@dataclass
+
+[docs]
+@dataclass
class FeatureExtractionSettingsClass:
"""
Parameters related to feature computation. Many are conditional on the type of features that will be computed (
@@ -272,22 +272,22 @@ Source code for mirp.settings.settingsFeatureExtraction
by_slice: bool = False,
no_approximation: bool = False,
ibsi_compliant: bool = True,
- base_feature_families: None | str | list[str] = "none",
- base_discretisation_method: None | str | list[str] = None,
- base_discretisation_n_bins: None | int | list[int] = None,
- base_discretisation_bin_width: None | float | list[float] = None,
+ base_feature_families: Union[None, str, List[str]] = "none",
+ base_discretisation_method: Union[None, str, List[str]] = None,
+ base_discretisation_n_bins: Union[None, int, List[int]] = None,
+ base_discretisation_bin_width: Union[None, float, List[float]] = None,
ivh_discretisation_method: str = "none",
- ivh_discretisation_n_bins: None | int = 1000,
- ivh_discretisation_bin_width: None | float = None,
- glcm_distance: float | list[float] = 1.0,
- glcm_spatial_method: None | str | list[str] = None,
- glrlm_spatial_method: None | str | list[str] = None,
- glszm_spatial_method: None | str | list[str] = None,
- gldzm_spatial_method: None | str | list[str] = None,
- ngtdm_spatial_method: None | str | list[str] = None,
- ngldm_distance: float | list[float] = 1.0,
- ngldm_difference_level: float | list[float] = 0.0,
- ngldm_spatial_method: None | str | list[str] = None,
+ ivh_discretisation_n_bins: Union[None, int] = 1000,
+ ivh_discretisation_bin_width: Union[None, float] = None,
+ glcm_distance: Union[float, List[float]] = 1.0,
+ glcm_spatial_method: Union[None, str, List[str]] = None,
+ glrlm_spatial_method: Union[None, str, List[str]] = None,
+ glszm_spatial_method: Union[None, str, List[str]] = None,
+ gldzm_spatial_method: Union[None, str, List[str]] = None,
+ ngtdm_spatial_method: Union[None, str, List[str]] = None,
+ ngldm_distance: Union[float, List[float]] = 1.0,
+ ngldm_difference_level: Union[float, List[float]] = 0.0,
+ ngldm_spatial_method: Union[None, str, List[str]] = None,
**kwargs
):
# Set by slice.
@@ -307,7 +307,7 @@ Source code for mirp.settings.settingsFeatureExtraction
base_feature_families = [base_feature_families]
# Check which entries are valid.
- valid_families: list[bool] = [ii in self.get_available_families() for ii in base_feature_families]
+ valid_families: List[bool] = [ii in self.get_available_families() for ii in base_feature_families]
if not all(valid_families):
raise ValueError(
@@ -315,7 +315,7 @@ Source code for mirp.settings.settingsFeatureExtraction
f"{', '.join([base_feature_families[ii] for ii, is_valid in enumerate(valid_families) if not is_valid])}")
# Set families.
- self.families: list[str] = base_feature_families
+ self.families: List[str] = base_feature_families
if not self.has_any_feature_family():
self.families = ["none"]
@@ -386,9 +386,9 @@ Source code for mirp.settings.settingsFeatureExtraction
base_discretisation_bin_width = None
# Set discretisation method-related parameters.
- self.discretisation_method: None | list[str] = base_discretisation_method
- self.discretisation_n_bins: None | list[int] = base_discretisation_n_bins
- self.discretisation_bin_width: None | list[float] = base_discretisation_bin_width
+ self.discretisation_method: Union[None, List[str]] = base_discretisation_method
+ self.discretisation_n_bins: Union[None, List[int]] = base_discretisation_n_bins
+ self.discretisation_bin_width: Union[None, List[float]] = base_discretisation_bin_width
if self.has_ivh_family():
if ivh_discretisation_method not in ["fixed_bin_size", "fixed_bin_number", "none"]:
@@ -434,9 +434,9 @@ Source code for mirp.settings.settingsFeatureExtraction
ivh_discretisation_bin_width = None
# Set parameters
- self.ivh_discretisation_method: None | str = ivh_discretisation_method
- self.ivh_discretisation_n_bins: None | int = ivh_discretisation_n_bins
- self.ivh_discretisation_bin_width: None | float = ivh_discretisation_bin_width
+ self.ivh_discretisation_method: Union[None, str] = ivh_discretisation_method
+ self.ivh_discretisation_n_bins: Union[None, int] = ivh_discretisation_n_bins
+ self.ivh_discretisation_bin_width: Union[None, float] = ivh_discretisation_bin_width
# Set GLCM attributes.
if self.has_glcm_family():
@@ -463,8 +463,8 @@ Source code for mirp.settings.settingsFeatureExtraction
glcm_distance = None
glcm_spatial_method = None
- self.glcm_distance: None | list[float] = glcm_distance
- self.glcm_spatial_method: None | list[str] = glcm_spatial_method
+ self.glcm_distance: Union[None, List[float]] = glcm_distance
+ self.glcm_spatial_method: Union[None, List[str]] = glcm_spatial_method
# Set GLRLM attributes.
if self.has_glrlm_family():
@@ -475,7 +475,7 @@ Source code for mirp.settings.settingsFeatureExtraction
else:
glrlm_spatial_method = None
- self.glrlm_spatial_method: None | list[str] = glrlm_spatial_method
+ self.glrlm_spatial_method: Union[None, List[str]] = glrlm_spatial_method
# Set GLSZM attributes.
if self.has_glszm_family():
@@ -485,7 +485,7 @@ Source code for mirp.settings.settingsFeatureExtraction
else:
glszm_spatial_method = None
- self.glszm_spatial_method: None | list[str] = glszm_spatial_method
+ self.glszm_spatial_method: Union[None, List[str]] = glszm_spatial_method
# Set GLDZM attributes.
if self.has_gldzm_family():
@@ -496,7 +496,7 @@ Source code for mirp.settings.settingsFeatureExtraction
else:
gldzm_spatial_method = None
- self.gldzm_spatial_method: None | list[str] = gldzm_spatial_method
+ self.gldzm_spatial_method: Union[None, List[str]] = gldzm_spatial_method
# Set NGTDM attributes.
if self.has_ngtdm_family():
@@ -507,7 +507,7 @@ Source code for mirp.settings.settingsFeatureExtraction
else:
ngtdm_spatial_method = None
- self.ngtdm_spatial_method: None | list[str] = ngtdm_spatial_method
+ self.ngtdm_spatial_method: Union[None, List[str]] = ngtdm_spatial_method
# Set NGLDM attributes
if self.has_ngldm_family():
@@ -549,9 +549,9 @@ Source code for mirp.settings.settingsFeatureExtraction
ngldm_distance = None
ngldm_difference_level = None
- self.ngldm_dist: None | list[float] = ngldm_distance
- self.ngldm_diff_lvl: None | list[float] = ngldm_difference_level
- self.ngldm_spatial_method: None | list[str] = ngldm_spatial_method
+ self.ngldm_dist: Union[None, List[float]] = ngldm_distance
+ self.ngldm_diff_lvl: Union[None, List[float]] = ngldm_difference_level
+ self.ngldm_spatial_method: Union[None, List[str]] = ngldm_spatial_method
@staticmethod
def get_available_families():
@@ -675,6 +675,7 @@ Source code for mirp.settings.settingsFeatureExtraction
return x
+
def get_feature_extraction_settings() -> list[dict[str, Any]]:
return [
setting_def("ibsi_compliant", "bool", test=True),
diff --git a/docs/_modules/mirp/settings/settingsGeneral.html b/docs/_modules/mirp/settings/settingsGeneral.html
index 35c3e1a1..a2c35ea3 100644
--- a/docs/_modules/mirp/settings/settingsGeneral.html
+++ b/docs/_modules/mirp/settings/settingsGeneral.html
@@ -1,22 +1,20 @@
-
+
- mirp.settings.settingsGeneral — mirp 2.1.1 documentation
-
-
-
-
+ mirp.settings.settingsGeneral — mirp 2.1.0 documentation
+
+
-
-
-
-
-
+
+
+
+
+
@@ -82,7 +80,9 @@ Source code for mirp.settings.settingsGeneral
from mirp.settings.utilities import setting_def
-[docs]@dataclass
+
+[docs]
+@dataclass
class GeneralSettingsClass:
"""
Set of overall process parameters. The most important parameter here is ``by_slice`` which affects how images are
@@ -152,6 +152,7 @@ Source code for mirp.settings.settingsGeneral
self.no_approximation: bool = no_approximation
+
def get_general_settings() -> list[dict[str, Any]]:
return [
setting_def("by_slice", "bool", test=True),
diff --git a/docs/_modules/mirp/settings/settingsGeneric.html b/docs/_modules/mirp/settings/settingsGeneric.html
index ec5ff93f..bf005a13 100644
--- a/docs/_modules/mirp/settings/settingsGeneric.html
+++ b/docs/_modules/mirp/settings/settingsGeneric.html
@@ -1,22 +1,20 @@
-
+
- mirp.settings.settingsGeneric — mirp 2.1.1 documentation
-
-
-
-
+ mirp.settings.settingsGeneric — mirp 2.1.0 documentation
+
+
-
-
-
-
-
+
+
+
+
+
@@ -87,7 +85,9 @@ Source code for mirp.settings.settingsGeneric
from mirp.settings.settingsPerturbation import ImagePerturbationSettingsClass
-[docs]class SettingsClass:
+
+[docs]
+class SettingsClass:
"""
Container for objects used to configure the image processing and feature processing workflow. This object can be
initialised in two ways:
@@ -237,6 +237,7 @@ Source code for mirp.settings.settingsGeneric
return False
return True
+
diff --git a/docs/_modules/mirp/settings/settingsImageProcessing.html b/docs/_modules/mirp/settings/settingsImageProcessing.html
index 4ce5b9f8..83510015 100644
--- a/docs/_modules/mirp/settings/settingsImageProcessing.html
+++ b/docs/_modules/mirp/settings/settingsImageProcessing.html
@@ -1,22 +1,20 @@
-
+
- mirp.settings.settingsImageProcessing — mirp 2.1.1 documentation
-
-
-
-
+ mirp.settings.settingsImageProcessing — mirp 2.1.0 documentation
+
+
-
-
-
-
-
+
+
+
+
+
@@ -78,12 +76,14 @@
Source code for mirp.settings.settingsImageProcessing
import numpy as np
-from typing import Any
+from typing import Union, List, Tuple, Any
from dataclasses import dataclass
from mirp.settings.utilities import setting_def
-[docs]@dataclass
+
+[docs]
+@dataclass
class ImagePostProcessingClass:
"""
Parameters related to image processing. Note that parameters concerning image perturbation / augmentation and
@@ -164,13 +164,13 @@ Source code for mirp.settings.settingsImageProcessing
self,
bias_field_correction: bool = False,
bias_field_correction_n_fitting_levels: int = 1,
- bias_field_correction_n_max_iterations: int | list[int] | None = None,
+ bias_field_correction_n_max_iterations: Union[int, List[int], None] = None,
bias_field_convergence_threshold: float = 0.001,
intensity_normalisation: str = "none",
- intensity_normalisation_range: list[float] | None = None,
- intensity_normalisation_saturation: list[float] | None = None,
+ intensity_normalisation_range: Union[List[float], None] = None,
+ intensity_normalisation_saturation: Union[List[float], None] = None,
tissue_mask_type: str = "relative_range",
- tissue_mask_range: list[float] | None = None,
+ tissue_mask_range: Union[List[float], None] = None,
**kwargs
):
@@ -191,7 +191,7 @@ Source code for mirp.settings.settingsImageProcessing
bias_field_correction_n_fitting_levels = None
# Set n_fitting_levels.
- self.n_fitting_levels: None | int = bias_field_correction_n_fitting_levels
+ self.n_fitting_levels: Union[None, int] = bias_field_correction_n_fitting_levels
# Set default value for bias_field_correction_n_max_iterations. This is the number of iterations per fitting
# level.
@@ -233,7 +233,7 @@ Source code for mirp.settings.settingsImageProcessing
bias_field_correction_n_max_iterations = None
# Set n_max_iterations attribute.
- self.n_max_iterations: list[int] | None = bias_field_correction_n_max_iterations
+ self.n_max_iterations: Union[List[int], None] = bias_field_correction_n_max_iterations
# Check that the convergence threshold is a non-negative number.
if bias_field_correction:
@@ -253,7 +253,7 @@ Source code for mirp.settings.settingsImageProcessing
bias_field_convergence_threshold = None
# Set convergence_threshold attribute.
- self.convergence_threshold: None | float = bias_field_convergence_threshold
+ self.convergence_threshold: Union[None, float] = bias_field_convergence_threshold
# Check that intensity_normalisation has the correct values.
if intensity_normalisation not in ["none", "range", "relative_range", "quantile_range", "standardisation"]:
@@ -333,7 +333,7 @@ Source code for mirp.settings.settingsImageProcessing
intensity_normalisation_range = None
# Set normalisation range.
- self.intensity_normalisation_range: None | list[float] = intensity_normalisation_range
+ self.intensity_normalisation_range: Union[None, List[float]] = intensity_normalisation_range
# Check intensity normalisation saturation range.
if intensity_normalisation_saturation is None:
@@ -351,14 +351,13 @@ Source code for mirp.settings.settingsImageProcessing
raise TypeError("The tissue_mask_range parameter can only contain floating point or np.nan values.")
# intensity_normalisation_saturation parameter
- self.intensity_normalisation_saturation: None | list[float] = intensity_normalisation_saturation
+ self.intensity_normalisation_saturation: Union[None, List[float]] = intensity_normalisation_saturation
# Check tissue_mask_type
if tissue_mask_type not in ["none", "range", "relative_range"]:
raise ValueError(
f"The tissue_mask_type parameter is expected to have one of the following values: "
- f"'none', 'range', or 'relative_range'. Found: {tissue_mask_type}."
- )
+ f"'none', 'range', or 'relative_range'. Found: {tissue_mask_type}.")
# Set tissue_mask_type
self.tissue_mask_type: str = tissue_mask_type
@@ -392,7 +391,8 @@ Source code for mirp.settings.settingsImageProcessing
"The tissue_mask_range parameter should consist of two values between 0.0 and 1.0.")
# Set tissue_mask_range.
- self.tissue_mask_range: tuple[float, ...] = tuple(tissue_mask_range)
+ self.tissue_mask_range: Tuple[float, ...] = tuple(tissue_mask_range)
+
def get_post_processing_settings() -> list[dict[str, Any]]:
diff --git a/docs/_modules/mirp/settings/settingsImageTransformation.html b/docs/_modules/mirp/settings/settingsImageTransformation.html
index 1cecde19..363fff62 100644
--- a/docs/_modules/mirp/settings/settingsImageTransformation.html
+++ b/docs/_modules/mirp/settings/settingsImageTransformation.html
@@ -1,22 +1,20 @@
-
+
- mirp.settings.settingsImageTransformation — mirp 2.1.1 documentation
-
-
-
-
+ mirp.settings.settingsImageTransformation — mirp 2.1.0 documentation
+
+
-
-
-
-
-
+
+
+
+
+
@@ -77,7 +75,7 @@
Source code for mirp.settings.settingsImageTransformation
import copy
-from typing import Any
+from typing import Union, List, Any
from dataclasses import dataclass
import numpy as np
@@ -86,7 +84,9 @@ Source code for mirp.settings.settingsImageTransformation
from mirp.settings.utilities import setting_def
-[docs]@dataclass
+
+[docs]
+@dataclass
class ImageTransformationSettingsClass:
"""
Parameters related to image transformation using filters. Many parameters are conditional on the selected image
@@ -409,49 +409,49 @@ Source code for mirp.settings.settingsImageTransformation
self,
by_slice: bool,
response_map_feature_settings: FeatureExtractionSettingsClass | None = None,
- response_map_feature_families: None | str | list[str] = "statistical",
- response_map_discretisation_method: None | str | list[str] = "fixed_bin_number",
- response_map_discretisation_n_bins: None | int | list[int] = 16,
- response_map_discretisation_bin_width: None | int | list[float] = None,
- filter_kernels: None | str | list[str] = None,
- boundary_condition: None | str = "mirror",
- separable_wavelet_families: None | str | list[str] = None,
- separable_wavelet_set: None | str | list[str] = None,
+ response_map_feature_families: Union[None, str, List[str]] = "statistical",
+ response_map_discretisation_method: Union[None, str, List[str]] = "fixed_bin_number",
+ response_map_discretisation_n_bins: Union[None, int, List[int]] = 16,
+ response_map_discretisation_bin_width: Union[None, int, List[int]] = None,
+ filter_kernels: Union[None, str, List[str]] = None,
+ boundary_condition: Union[None, str] = "mirror",
+ separable_wavelet_families: Union[None, str, List[str]] = None,
+ separable_wavelet_set: Union[None, str, List[str]] = None,
separable_wavelet_stationary: bool = True,
- separable_wavelet_decomposition_level: None | int | list[int] = 1,
+ separable_wavelet_decomposition_level: Union[None, int, List[int]] = 1,
separable_wavelet_rotation_invariance: bool = True,
separable_wavelet_pooling_method: str = "max",
- separable_wavelet_boundary_condition: None | str = None,
- nonseparable_wavelet_families: None | str | list[str] = None,
- nonseparable_wavelet_decomposition_level: None | int | list[int] = 1,
- nonseparable_wavelet_response: None | str = "real",
- nonseparable_wavelet_boundary_condition: None | str = None,
- gaussian_sigma: None | float | list[float] = None,
- gaussian_kernel_truncate: None | float = 4.0,
- gaussian_kernel_boundary_condition: None | str = None,
- laplacian_of_gaussian_sigma: None | float | list[float] = None,
- laplacian_of_gaussian_kernel_truncate: None | float = 4.0,
+ separable_wavelet_boundary_condition: Union[None, str] = None,
+ nonseparable_wavelet_families: Union[None, str, List[str]] = None,
+ nonseparable_wavelet_decomposition_level: Union[None, int, List[int]] = 1,
+ nonseparable_wavelet_response: Union[None, str] = "real",
+ nonseparable_wavelet_boundary_condition: Union[None, str] = None,
+ gaussian_sigma: Union[None, float, List[float]] = None,
+ gaussian_kernel_truncate: Union[None, float] = 4.0,
+ gaussian_kernel_boundary_condition: Union[None, str] = None,
+ laplacian_of_gaussian_sigma: Union[None, float, List[float]] = None,
+ laplacian_of_gaussian_kernel_truncate: Union[None, float] = 4.0,
laplacian_of_gaussian_pooling_method: str = "none",
- laplacian_of_gaussian_boundary_condition: None | str = None,
- laws_kernel: None | str | list[str] = None,
- laws_delta: int | list[int] = 7,
+ laplacian_of_gaussian_boundary_condition: Union[None, str] = None,
+ laws_kernel: Union[None, str, List[str]] = None,
+ laws_delta: Union[int, List[int]] = 7,
laws_compute_energy: bool = True,
laws_rotation_invariance: bool = True,
laws_pooling_method: str = "max",
- laws_boundary_condition: None | str = None,
- gabor_sigma: None | float | list[float] = None,
- gabor_lambda: None | float | list[float] = None,
- gabor_gamma: None | float | list[float] = 1.0,
- gabor_theta: None | float | list[float] = 0.0,
- gabor_theta_step: None | float = None,
+ laws_boundary_condition: Union[None, str] = None,
+ gabor_sigma: Union[None, float, List[float]] = None,
+ gabor_lambda: Union[None, float, List[float]] = None,
+ gabor_gamma: Union[None, float, List[float]] = 1.0,
+ gabor_theta: Union[None, float, List[float]] = 0.0,
+ gabor_theta_step: Union[None, float] = None,
gabor_response: str = "modulus",
gabor_rotation_invariance: bool = False,
gabor_pooling_method: str = "max",
- gabor_boundary_condition: None | str = None,
- mean_filter_kernel_size: None | int | list[int] = None,
- mean_filter_boundary_condition: None | str = None,
- riesz_filter_order: None | int | list[int] = None,
- riesz_filter_tensor_sigma: None | float | list[float] = None,
+ gabor_boundary_condition: Union[None, str] = None,
+ mean_filter_kernel_size: Union[None, int, List[int]] = None,
+ mean_filter_boundary_condition: Union[None, str] = None,
+ riesz_filter_order: Union[None, int, List[int]] = None,
+ riesz_filter_tensor_sigma: Union[None, float, List[float]] = None,
**kwargs
):
# Set by slice
@@ -466,14 +466,14 @@ Source code for mirp.settings.settingsImageTransformation
if filter_kernels is not None:
# Check validity of the filter kernel names.
- valid_kernels: list[bool] = [ii in self.get_available_image_filters() for ii in filter_kernels]
+ valid_kernels: List[bool] = [ii in self.get_available_image_filters() for ii in filter_kernels]
if not all(valid_kernels):
raise ValueError(
f"One or more kernels are not implemented, or were spelled incorrectly: "
f"{', '.join([filter_kernel for ii, filter_kernel in enumerate(filter_kernels) if not valid_kernels[ii]])}")
- self.spatial_filters: None | list[str] = filter_kernels
+ self.spatial_filters: Union[None, List[str]] = filter_kernels
# Check families.
if response_map_feature_families is None:
@@ -483,7 +483,7 @@ Source code for mirp.settings.settingsImageTransformation
response_map_feature_families = [response_map_feature_families]
# Check which entries are valid.
- valid_families: list[bool] = [ii in [
+ valid_families: List[bool] = [ii in [
"li", "loc.int", "loc_int", "local_int", "local_intensity", "st", "stat", "stats", "statistics",
"statistical", "ih", "int_hist", "int_histogram", "intensity_histogram",
"ivh", "int_vol_hist", "intensity_volume_histogram", "cm", "glcm", "grey_level_cooccurrence_matrix",
@@ -552,8 +552,8 @@ Source code for mirp.settings.settingsImageTransformation
mean_filter_kernel_size = None
mean_filter_boundary_condition = None
- self.mean_filter_size: None | list[int] = mean_filter_kernel_size
- self.mean_filter_boundary_condition: None | str = mean_filter_boundary_condition
+ self.mean_filter_size: Union[None, List[int]] = mean_filter_kernel_size
+ self.mean_filter_boundary_condition: Union[None, str] = mean_filter_boundary_condition
# Check Gaussian kernel settings.
if self.has_gaussian_filter():
@@ -577,9 +577,9 @@ Source code for mirp.settings.settingsImageTransformation
gaussian_kernel_truncate = None
gaussian_kernel_boundary_condition = None
- self.gaussian_sigma: None | list[float] = gaussian_sigma
- self.gaussian_sigma_truncate: None | float = gaussian_kernel_truncate
- self.gaussian_boundary_condition: None | str = gaussian_kernel_boundary_condition
+ self.gaussian_sigma: Union[None, List[float]] = gaussian_sigma
+ self.gaussian_sigma_truncate: Union[None, float] = gaussian_kernel_truncate
+ self.gaussian_boundary_condition: Union[None, str] = gaussian_kernel_boundary_condition
# Check laplacian-of-gaussian filter settings
if self.has_laplacian_of_gaussian_filter():
@@ -609,15 +609,16 @@ Source code for mirp.settings.settingsImageTransformation
laplacian_of_gaussian_pooling_method = None
laplacian_of_gaussian_boundary_condition = None
- self.log_sigma: None | list[float] = laplacian_of_gaussian_sigma
- self.log_sigma_truncate: None | float = laplacian_of_gaussian_kernel_truncate
- self.log_pooling_method: None | str = laplacian_of_gaussian_pooling_method
- self.log_boundary_condition: None | str = laplacian_of_gaussian_boundary_condition
+ self.log_sigma: Union[None, List[float]] = laplacian_of_gaussian_sigma
+ self.log_sigma_truncate: Union[None, float] = laplacian_of_gaussian_kernel_truncate
+ self.log_pooling_method: Union[None, str] = laplacian_of_gaussian_pooling_method
+ self.log_boundary_condition: Union[None, str] = laplacian_of_gaussian_boundary_condition
# Check Laws kernel filter settings
if self.has_laws_filter():
# Check kernel.
- laws_kernel = self.check_laws_kernels(laws_kernel, "laws_kernel")
+ laws_kernel = self.check_laws_kernels(laws_kernel,
+ "laws_kernel")
# Check energy computation.
if not isinstance(laws_compute_energy, bool):
@@ -660,12 +661,12 @@ Source code for mirp.settings.settingsImageTransformation
laws_pooling_method = None
laws_boundary_condition = None
- self.laws_calculate_energy: None | bool = laws_compute_energy
- self.laws_kernel: None | list[str] = laws_kernel
- self.laws_delta: None | bool = laws_delta
- self.laws_rotation_invariance: None | bool = laws_rotation_invariance
- self.laws_pooling_method: None | str = laws_pooling_method
- self.laws_boundary_condition: None | str = laws_boundary_condition
+ self.laws_calculate_energy: Union[None, bool] = laws_compute_energy
+ self.laws_kernel: Union[None, List[str]] = laws_kernel
+ self.laws_delta: Union[None, bool] = laws_delta
+ self.laws_rotation_invariance: Union[None, bool] = laws_rotation_invariance
+ self.laws_pooling_method: Union[None, str] = laws_pooling_method
+ self.laws_boundary_condition: Union[None, str] = laws_boundary_condition
# Check Gabor filter settings.
if self.has_gabor_filter():
@@ -739,15 +740,15 @@ Source code for mirp.settings.settingsImageTransformation
gabor_pooling_method = None
gabor_boundary_condition = None
- self.gabor_sigma: None | list[float] = gabor_sigma
- self.gabor_gamma: None | list[float] = gabor_gamma
- self.gabor_lambda: None | list[float] = gabor_lambda
- self.gabor_theta: None | list[float] | list[int] = gabor_theta
- self.gabor_pool_theta: None | bool = gabor_pool_theta
- self.gabor_response: None | str = gabor_response
- self.gabor_rotation_invariance: None | str = gabor_rotation_invariance
- self.gabor_pooling_method: None | str = gabor_pooling_method
- self.gabor_boundary_condition: None | str = gabor_boundary_condition
+ self.gabor_sigma: Union[None, List[float]] = gabor_sigma
+ self.gabor_gamma: Union[None, List[float]] = gabor_gamma
+ self.gabor_lambda: Union[None, List[float]] = gabor_lambda
+ self.gabor_theta: Union[None, List[float], List[int]] = gabor_theta
+ self.gabor_pool_theta: Union[None, bool] = gabor_pool_theta
+ self.gabor_response: Union[None, str] = gabor_response
+ self.gabor_rotation_invariance: Union[None, str] = gabor_rotation_invariance
+ self.gabor_pooling_method: Union[None, str] = gabor_pooling_method
+ self.gabor_boundary_condition: Union[None, str] = gabor_boundary_condition
# Check separable wavelet settings.
if self.has_separable_wavelet_filter():
@@ -787,13 +788,13 @@ Source code for mirp.settings.settingsImageTransformation
separable_wavelet_pooling_method = None
separable_wavelet_boundary_condition = None
- self.separable_wavelet_families: None | list[str] = separable_wavelet_families
- self.separable_wavelet_filter_set: None | list[str] = separable_wavelet_set
- self.separable_wavelet_stationary: None | bool = separable_wavelet_stationary
- self.separable_wavelet_decomposition_level: None | list[int] = separable_wavelet_decomposition_level
- self.separable_wavelet_rotation_invariance: None | bool = separable_wavelet_rotation_invariance
- self.separable_wavelet_pooling_method: None | str = separable_wavelet_pooling_method
- self.separable_wavelet_boundary_condition: None | str = separable_wavelet_boundary_condition
+ self.separable_wavelet_families: Union[None, List[str]] = separable_wavelet_families
+ self.separable_wavelet_filter_set: Union[None, List[str]] = separable_wavelet_set
+ self.separable_wavelet_stationary: Union[None, bool] = separable_wavelet_stationary
+ self.separable_wavelet_decomposition_level: Union[None, List[int]] = separable_wavelet_decomposition_level
+ self.separable_wavelet_rotation_invariance: Union[None, bool] = separable_wavelet_rotation_invariance
+ self.separable_wavelet_pooling_method: Union[None, str] = separable_wavelet_pooling_method
+ self.separable_wavelet_boundary_condition: Union[None, str] = separable_wavelet_boundary_condition
# Set parameters for non-separable wavelets.
if self.has_nonseparable_wavelet_filter():
@@ -819,10 +820,10 @@ Source code for mirp.settings.settingsImageTransformation
nonseparable_wavelet_response = None
nonseparable_wavelet_boundary_condition = None
- self.nonseparable_wavelet_families: None | list[str] = nonseparable_wavelet_families
- self.nonseparable_wavelet_decomposition_level: None | list[int] = nonseparable_wavelet_decomposition_level
- self.nonseparable_wavelet_response: None | str = nonseparable_wavelet_response
- self.nonseparable_wavelet_boundary_condition: None | str = nonseparable_wavelet_boundary_condition
+ self.nonseparable_wavelet_families: Union[None, List[str]] = nonseparable_wavelet_families
+ self.nonseparable_wavelet_decomposition_level: Union[None, List[int]] = nonseparable_wavelet_decomposition_level
+ self.nonseparable_wavelet_response: Union[None, str] = nonseparable_wavelet_response
+ self.nonseparable_wavelet_boundary_condition: Union[None, str] = nonseparable_wavelet_boundary_condition
# Check Riesz filter orders.
if self.has_riesz_filter():
@@ -837,8 +838,8 @@ Source code for mirp.settings.settingsImageTransformation
else:
riesz_filter_tensor_sigma = None
- self.riesz_order: None | list[list[int]] = riesz_filter_order
- self.riesz_filter_tensor_sigma: None | list[float] = riesz_filter_tensor_sigma
+ self.riesz_order: Union[None, List[List[int]]] = riesz_filter_order
+ self.riesz_filter_tensor_sigma: Union[None, List[float]] = riesz_filter_tensor_sigma
@staticmethod
def get_available_image_filters():
@@ -982,7 +983,7 @@ Source code for mirp.settings.settingsImageTransformation
return x
- def check_separable_wavelet_sets(self, x: None | str | list[str], var_name: str):
+ def check_separable_wavelet_sets(self, x: Union[None, str, List[str]], var_name):
from itertools import product
if x is None:
@@ -1019,7 +1020,7 @@ Source code for mirp.settings.settingsImageTransformation
# Return lowercase values.
return [xx.lower() for xx in x]
- def check_laws_kernels(self, x: str | list[str], var_name: str):
+ def check_laws_kernels(self, x: Union[str, List[str]], var_name):
from itertools import product
# Set implemented kernels.
@@ -1185,6 +1186,7 @@ Source code for mirp.settings.settingsImageTransformation
return x is not None and any(filter_kernel.startswith("riesz_steered") for filter_kernel in x)
+
def get_image_transformation_settings() -> list[dict[str, Any]]:
return [
setting_def(
@@ -1204,7 +1206,7 @@ Source code for mirp.settings.settingsImageTransformation
class_key="discretisation_bin_width", test=[10.0, 34.0]
),
setting_def(
- "filter_kernels", "str", to_list=True, xml_key=["filter_kernels", "spatial_filters"],
+ "filter_kernels", "float", to_list=True, xml_key=["filter_kernels", "spatial_filters"],
class_key="spatial_filters", test=[
"separable_wavelet", "nonseparable_wavelet", "riesz_nonseparable_wavelet",
"riesz_steered_nonseparable_wavelet", "gaussian", "riesz_gaussian", "riesz_steered_gaussian",
diff --git a/docs/_modules/mirp/settings/settingsInterpolation.html b/docs/_modules/mirp/settings/settingsInterpolation.html
index c4765bf4..83fabf2c 100644
--- a/docs/_modules/mirp/settings/settingsInterpolation.html
+++ b/docs/_modules/mirp/settings/settingsInterpolation.html
@@ -1,22 +1,20 @@
-
+
- mirp.settings.settingsInterpolation — mirp 2.1.1 documentation
-
-
-
-
+ mirp.settings.settingsInterpolation — mirp 2.1.0 documentation
+
+
-
-
-
-
-
+
+
+
+
+
@@ -81,7 +79,9 @@ Source code for mirp.settings.settingsInterpolation
from mirp.settings.utilities import setting_def
-[docs]@dataclass
+
+[docs]
+@dataclass
class ImageInterpolationSettingsClass:
"""
Parameters related to image interpolating / resampling. Images in a dataset are typically resampled to uniform
@@ -224,6 +224,7 @@ Source code for mirp.settings.settingsInterpolation
return new_spacing
+
def get_image_interpolation_settings() -> list[dict[str, Any]]:
return [
setting_def("new_spacing", "float", to_list=True, test=[1.0, 1.0, 1.0]),
@@ -233,7 +234,9 @@ Source code for mirp.settings.settingsInterpolation
]
-[docs]@dataclass
+
+[docs]
+@dataclass
class MaskInterpolationSettingsClass:
"""
Parameters related to mask interpolation / resampling. MIRP registers the mask to an interpolated image based,
@@ -280,6 +283,7 @@ Source code for mirp.settings.settingsInterpolation
self.incl_threshold = roi_interpolation_mask_inclusion_threshold
+
def get_mask_interpolation_settings() -> list[dict[str, Any]]:
return [
setting_def("roi_spline_order", "int", xml_key="spline_order", class_key="spline_order", test=2),
diff --git a/docs/_modules/mirp/settings/settingsMaskResegmentation.html b/docs/_modules/mirp/settings/settingsMaskResegmentation.html
index 90cb9e11..e4b371e2 100644
--- a/docs/_modules/mirp/settings/settingsMaskResegmentation.html
+++ b/docs/_modules/mirp/settings/settingsMaskResegmentation.html
@@ -1,22 +1,20 @@
-
+
- mirp.settings.settingsMaskResegmentation — mirp 2.1.1 documentation
-
-
-
-
+ mirp.settings.settingsMaskResegmentation — mirp 2.1.0 documentation
+
+
-
-
-
-
-
+
+
+
+
+
@@ -83,7 +81,9 @@ Source code for mirp.settings.settingsMaskResegmentation
import numpy as np
-[docs]@dataclass
+
+[docs]
+@dataclass
class ResegmentationSettingsClass:
"""
Parameters related to mask resegmentation. Resegmentation is used to remove parts of the mask that correspond to
@@ -160,6 +160,7 @@ Source code for mirp.settings.settingsMaskResegmentation
self.sigma: float = resegmentation_sigma
+
def get_mask_resegmentation_settings() -> list[dict[str, Any]]:
return [
setting_def(
diff --git a/docs/_modules/mirp/settings/settingsPerturbation.html b/docs/_modules/mirp/settings/settingsPerturbation.html
index 2577df8b..6511dadb 100644
--- a/docs/_modules/mirp/settings/settingsPerturbation.html
+++ b/docs/_modules/mirp/settings/settingsPerturbation.html
@@ -1,22 +1,20 @@
-
+
- mirp.settings.settingsPerturbation — mirp 2.1.1 documentation
-
-
-
-
+ mirp.settings.settingsPerturbation — mirp 2.1.0 documentation
+
+
-
-
-
-
-
+
+
+
+
+
@@ -76,12 +74,14 @@
Source code for mirp.settings.settingsPerturbation
-from typing import Any
+from typing import Union, List, Any
from dataclasses import dataclass
from mirp.settings.utilities import setting_def
-[docs]@dataclass
+
+[docs]
+@dataclass
class ImagePerturbationSettingsClass:
"""
Parameters related to image and mask perturbation / augmentation. By default images and masks are not perturbed or
@@ -155,14 +155,14 @@ Source code for mirp.settings.settingsPerturbation
crop_around_roi: bool = False,
crop_distance: float = 150.0,
perturbation_noise_repetitions: int = 0,
- perturbation_noise_level: None | float = None,
- perturbation_rotation_angles: None | float | list[float] = 0.0,
- perturbation_translation_fraction: None | float | list[float] = 0.0,
+ perturbation_noise_level: Union[None, float] = None,
+ perturbation_rotation_angles: Union[None, List[float], float] = 0.0,
+ perturbation_translation_fraction: Union[None, List[float], float] = 0.0,
perturbation_roi_adapt_type: str = "distance",
- perturbation_roi_adapt_size: None | float | list[float] = 0.0,
+ perturbation_roi_adapt_size: Union[None, List[float], float] = 0.0,
perturbation_roi_adapt_max_erosion: float = 0.8,
perturbation_randomise_roi_repetitions: int = 0,
- roi_split_boundary_size: None | float | list[float] = 0.0,
+ roi_split_boundary_size: Union[None, List[float], float] = 0.0,
roi_split_max_erosion: float = 0.6,
**kwargs
):
@@ -192,7 +192,7 @@ Source code for mirp.settings.settingsPerturbation
raise ValueError(f"The noise level cannot be negative. Found: {perturbation_noise_level}")
# Set noise level.
- self.noise_level: None | float = perturbation_noise_level
+ self.noise_level: Union[None, float] = perturbation_noise_level
# Convert perturbation_rotation_angles to list, if necessary.
if not isinstance(perturbation_rotation_angles, list):
@@ -203,7 +203,7 @@ Source code for mirp.settings.settingsPerturbation
raise TypeError(f"Not all values for perturbation_rotation_angles are floating point values.")
# Set rotation_angles.
- self.rotation_angles: list[float] = perturbation_rotation_angles
+ self.rotation_angles: List[float] = perturbation_rotation_angles
# Convert perturbation_translation_fraction to list, if necessary.
if not isinstance(perturbation_translation_fraction, list):
@@ -215,12 +215,11 @@ Source code for mirp.settings.settingsPerturbation
# Check that the translation fractions lie between 0.0 and 1.0.
if not all(0.0 <= ii < 1.0 for ii in perturbation_translation_fraction):
- raise ValueError(
- "Not all values for perturbation_translation_fraction lie between 0.0 and 1.0, not including 1.0."
- )
+ raise ValueError("Not all values for perturbation_translation_fraction lie between 0.0 and 1.0, "
+ "not including 1.0.")
# Set translation_fraction.
- self.translation_fraction: list[float] = perturbation_translation_fraction
+ self.translation_fraction: List[float] = perturbation_translation_fraction
# Check roi adaptation type.
if perturbation_roi_adapt_type not in ["distance", "fraction"]:
@@ -243,7 +242,7 @@ Source code for mirp.settings.settingsPerturbation
"one or more values were less.")
# Set roi_adapt_size
- self.roi_adapt_size: list[float] = perturbation_roi_adapt_size
+ self.roi_adapt_size: List[float] = perturbation_roi_adapt_size
# Check that perturbation_roi_adapt_max_erosion is between 0.0 and 1.0.
if not 0.0 <= perturbation_roi_adapt_max_erosion <= 1.0:
@@ -285,12 +284,13 @@ Source code for mirp.settings.settingsPerturbation
raise ValueError("Not all values for roi_split_boundary_size are positive.")
# Set roi_boundary_size.
- self.roi_boundary_size: list[float] = roi_split_boundary_size
+ self.roi_boundary_size: List[float] = roi_split_boundary_size
# Initially local variables
- self.translate_x: None | float = None
- self.translate_y: None | float = None
- self.translate_z: None | float = None
+ self.translate_x: Union[None, float] = None
+ self.translate_y: Union[None, float] = None
+ self.translate_z: Union[None, float] = None
+
def get_perturbation_settings() -> list[dict[str, Any]]:
diff --git a/docs/_modules/mirp/settings/transformation_parameters.html b/docs/_modules/mirp/settings/transformation_parameters.html
new file mode 100644
index 00000000..3d80d331
--- /dev/null
+++ b/docs/_modules/mirp/settings/transformation_parameters.html
@@ -0,0 +1,1386 @@
+
+
+
+
+
+ mirp.settings.transformation_parameters — mirp 2.2.0 documentation
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Source code for mirp.settings.transformation_parameters
+import copy
+from typing import Any
+from dataclasses import dataclass
+
+import numpy as np
+
+from mirp.settings.feature_parameters import FeatureExtractionSettingsClass
+from mirp.settings.utilities import setting_def
+
+
+
+[docs]
+@dataclass
+class ImageTransformationSettingsClass:
+ """
+ Parameters related to image transformation using filters. Many parameters are conditional on the selected image
+ filter (``filter_kernels``). By default, only statistical features are computed from filtered images.
+
+ .. note::
+ Many feature extraction parameters are copied from
+ :class:`~mirp.settings.feature_parameters.FeatureExtractionSettingsClass`, except
+ ``response_map_feature_families``, ``response_map_discretisation_method`` and
+ ``response_map_discretisation_n_bins``. If other parameters need to be changed from their default settings,
+ first create an object of the current class (
+ :class:`~mirp.settings.transformation_parameters.ImageTransformationSettingsClass`), and then update the
+ attributes.
+
+ Parameters
+ ----------
+ by_slice: str or bool, optional, default: False
+ Defines whether calculations should be performed in 2D (True) or 3D (False), or alternatively only in the
+ largest slice ("largest"). See :class:`~mirp.settings.general_parameters.GeneralSettingsClass`.
+
+ ibsi_compliant: bool, optional, default: True
+ Limits use of filters to those that exist in the IBSI reference standard.
+
+ response_map_feature_families: str or list of str, optional, default: "statistics"
+ Determines the feature families for which features are computed from response maps (filtered images). Radiomics
+ features are implemented as defined in the IBSI reference manual. The following feature families can be
+ computed from response maps:
+
+ * Local intensity features: "li", "loc.int", "loc_int", "local_int", and "local_intensity".
+ * Intensity-based statistical features: "st", "stat", "stats", "statistics", and "statistical".
+ * Intensity histogram features: "ih", "int_hist", "int_histogram", and "intensity_histogram".
+ * Intensity-volume histogram features: "ivh", "int_vol_hist", and "intensity_volume_histogram".
+ * Grey level co-occurrence matrix (GLCM) features: "cm", "glcm", "grey_level_cooccurrence_matrix",
+ and "cooccurrence_matrix".
+ * Grey level run length matrix (GLRLM) features: "rlm", "glrlm", "grey_level_run_length_matrix", and
+ "run_length_matrix".
+ * Grey level size zone matrix (GLSZM) features: "szm", "glszm", "grey_level_size_zone_matrix", and
+ "size_zone_matrix".
+ * Grey level distance zone matrix (GLDZM) features: "dzm", "gldzm", "grey_level_distance_zone_matrix", and
+ "distance_zone_matrix".
+ * Neighbourhood grey tone difference matrix (NGTDM) features: "tdm", "ngtdm",
+ "neighbourhood_grey_tone_difference_matrix", and "grey_tone_difference_matrix".
+ * Neighbouring grey level dependence matrix (NGLDM) features: "ldm", "ngldm",
+ "neighbouring_grey_level_dependence_matrix", and "grey_level_dependence_matrix".
+
+ In addition, the following tags can be used:
+
+ * "none": no features are computed.
+ * "all": all features are computed.
+
+ A list of tags may be provided to select multiple feature families. Morphological features are not computed
+ from response maps, because these are mask-based and are invariant to filtering.
+
+ response_map_discretisation_method: {"fixed_bin_number", "fixed_bin_size", "fixed_bin_size_pyradiomics", "none"}, optional, default: "fixed_bin_number"
+ Method used for discretising intensities. Used to compute intensity histogram as well as texture features.
+ The setting is ignored if none of these feature families are being computed. The following options are
+ available:
+
+ * "fixed_bin_number": The intensity range within the mask is divided into a fixed number of bins,
+ defined by the ``base_discretisation_bin_width`` parameter.
+ * "fixed_bin_size": The intensity range is divided into bins with a fixed width, defined using the
+ ``base_discretisation_bin_width`` parameter. The lower bound of the range is determined from the lower
+ bound of the mask resegmentation range, see the ``resegmentation_intensity_range`` in
+ :class:`~mirp.settings.resegmentation_parameters.ResegmentationSettingsClass`. Other images,
+ including MRI, normalised CT and PET images and filtered images, do not have a default value, and bins are
+ created from using the minimum intensity as lower bound.
+ * "fixed_bin_size_pyradiomics": The intensity range is divided into bins with a fixed width. This follows the
+ non-IBSI compliant implementation in the pyradiomics package.
+ * "none": The intensity range is not discretised into bins. This method can only be used if the image
+ intensities are integer and strictly positive.
+
+ Multiple discretisation methods can be specified as a list to yield features according to each method.
+
+ .. note::
+ Use of the "fixed_bin_size", "fixed_bin_size_pyradiomics", and "none" discretisation methods is discouraged
+ for transformed images. Due to transformation, a direct link to any meaningful quantity represented by the
+ intensity of the original image (e.g. Hounsfield Units for CT, Standardised Uptake Value for PET) is lost.
+
+ response_map_discretisation_n_bins: int or list of int, optional, default: 16
+ Number of bins used for the "fixed_bin_number" discretisation method. Multiple values can be specified in a
+ list to yield features according to each number of bins.
+
+ response_map_discretisation_bin_width: float or list of float, optional
+ Width of each bin in the "fixed_bin_size" and "fixed_bin_size_pyradiomics" discretisation methods. Multiple
+ values can be specified in a list to yield features according to each bin width.
+
+ filter_kernels: str or list of str, optional, default: None
+ Names of the filters applied to the original image to create response maps (filtered images). Filter
+ implementation follows the IBSI reference manual. The following filters are supported:
+
+ * Mean filters: "mean"
+ * Gaussian filters: "gaussian", "riesz_gaussian", and "riesz_steered_gaussian"
+ * Laplacian-of-Gaussian filters: "laplacian_of_gaussian", "log", "riesz_laplacian_of_gaussian",
+ "riesz_log", "riesz_steered_laplacian_of_gaussian", and "riesz_steered_log".
+ * Laws kernels: "laws"
+ * Gabor kernels: "gabor", "riesz_gabor", and "riesz_steered_gabor"
+ * Separable wavelets: "separable_wavelet"
+ * Non-separable wavelets: "nonseparable_wavelet", "riesz_nonseparable_wavelet",
+ and "riesz_steered_nonseparable_wavelet"
+ * Function transformations: "pyradiomics_square", "pyradiomics_square_root",
+ and "pyradiomics_logarithm", "pyradiomics_exponential"
+
+ Filters with names that preceded by "riesz" undergo a Riesz transformation. If the filter name is preceded by
+ "riesz_steered", a steerable riesz filter is used.
+
+ More than one filter name can be provided. By default, no filters are selected, and image transformation is
+ skipped.
+
+ .. note::
+ There is no IBSI reference standard for Gaussian filters. However, the filter implementation is relatively
+ straightforward, and most likely reproducible.
+
+ .. warning::
+ Riesz transformation and steerable riesz transformations are experimental. The implementation of these
+ filter transformations is complex. Since there is no corresponding IBSI reference standard, any feature
+ derived from response maps of Riesz transformations is unlikely to be reproducible.
+
+ .. warning::
+ Function transformations (square, square root, logarithm, exponential) do not have an IBSI reference
+ standard. These transformations follow the definition in pyradiomics, and have been implemented for
+ validation purposes.
+
+ boundary_condition: {"reflect", "constant", "nearest", "mirror", "wrap"}, optional, default: "mirror"
+ Sets the boundary condition, which determines how filters behave at the edge of an image. MIRP uses
+ the same nomenclature for boundary conditions as scipy.ndimage. See the ``mode`` parameter of
+ `scipy.ndimage.convolve
+ <https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.convolve.html#scipy.ndimage.convolve>`_
+
+ separable_wavelet_families: str or list str
+ Name of separable wavelet kernel as implemented in the ``pywavelets`` package. See `pywt.wavelist(
+ kind="discrete") <https://pywavelets.readthedocs.io/en/latest/ref/wavelets.html#built-in-wavelets-wavelist>`_
+ for options.
+
+ separable_wavelet_set: str or list of str, optional
+ Filter orientation of separable wavelets. Allows for specifying combinations for high and low-pass filters.
+ For 2D (``by_slice=True``) filters, the following sets are possible: "hh", "hl", "lh", "ll" (y-x directions).
+ For 3D (``by_slice=False``) filters, the set of possibilities is larger: "hhh", "hhl", "hlh", "lhh", "hll",
+ "lhl", "llh", "lll". More than one orientation may be set. Default: "hh" (2d) or "hhh (3d).
+
+ separable_wavelet_stationary: bool, optional, default: True
+ Determines if wavelets are stationary or not. Stationary wavelets maintain the image dimensions after
+ decomposition.
+
+ separable_wavelet_decomposition_level: int or list of int, optional, default: 1
+ Sets the wavelet decomposition level. For the first decomposition level, the base image is used as input to
+ generate a response map. For decomposition levels greater than 1, the low-pass image from the previous level
+ is used as input. More than 1 value may be specified in a list.
+
+ separable_wavelet_rotation_invariance: bool, optional, default: True
+ Determines whether separable filters are applied in a pseudo-rotational invariant manner. This generates
+ permutations of the filter and, as a consequence, additional response maps. These maps are then merged using
+ the pooling method (``separable_wavelet_pooling_method``).
+
+ separable_wavelet_pooling_method: {"max", "min", "mean", "sum"}, optional, default: "max"
+ Response maps are pooled to create a rotationally invariant response map. This sets the method for
+ pooling.
+
+ * "max": Each voxel of the pooled response map represents the maximum value for that voxel in the underlying
+ response maps.
+ * "min": Each voxel of the pooled response map represents the minimum value for that voxel in the underlying
+ response maps.
+ * "mean": Each voxel of the pooled response map represents the mean value for that voxel in the underlying
+ response maps. For band-pass and high-pass filters, this will likely result in values close to 0.0,
+ and "max" or "min" pooling methods should be used instead.
+ * "sum": Each voxel of the pooled response map is the sum of intensities for that voxel in the underlying
+ response maps. Similar to the "mean" pooling method, but without the normalisation.
+
+ separable_wavelet_boundary_condition: str, optional, default: "mirror"
+ Sets the boundary condition for separable wavelets. This supersedes any value set by the general
+ ``boundary_condition`` parameter. See the ``boundary_condition`` parameter above for all valid options.
+
+ nonseparable_wavelet_families: {"shannon", "simoncelli"}
+ Name of non-separable wavelet kernels used for image transformation. Shannon and Simoncelli wavelets are
+ implemented.
+
+ nonseparable_wavelet_decomposition_level: int or list of int, optional, default: 1
+ Sets the wavelet decomposition level. Unlike the decomposition level in separable wavelets, decomposition of
+ non-separable wavelets is purely a filter-based operation.
+
+ nonseparable_wavelet_response: {"modulus", "abs", "magnitude", "angle", "phase", "argument", "real", "imaginary"}, optional, default: "real"
+ Nonseparable wavelets produce response maps with complex numbers. The complex-valued response map is
+ converted to a real-valued response map using the specified method. "modulus", "abs", "magnitude" are
+ synonymous, as are "angle", "phase", and "argument". "real" selects the real component of the complex values,
+ and "imaginary" selects the imaginary component.
+
+ nonseparable_wavelet_boundary_condition: str, optional, default: "mirror"
+ Sets the boundary condition for non-separable wavelets. This supersedes any value set by the general
+ ``boundary_condition`` parameter. See the ``boundary_condition`` parameter above for all valid options.
+
+ gaussian_sigma: float or list of float, optional
+ Width of the Gaussian filter in physical dimensions (e.g. mm). Multiple values can be specified.
+
+ gaussian_kernel_truncate: float, optional, default: 4.0
+ Width, in units of sigma, at which the filter is truncated.
+
+ gaussian_kernel_boundary_condition: str, optional, default: "mirror"
+ Sets the boundary condition for Gaussian filters. This supersedes any value set by the general
+ ``boundary_condition`` parameter. See the ``boundary_condition`` parameter above for all valid options.
+
+ laplacian_of_gaussian_sigma: float or list of float, optional
+ Width of the Gaussian filter in physical dimensions (e.g. mm). Multiple values can be specified.
+
+ laplacian_of_gaussian_kernel_truncate: float, optional, default: 4.0
+ Width, in sigma, at which the filter is truncated.
+
+ laplacian_of_gaussian_pooling_method: {"max", "min", "mean", "sum", "none"}, optional, default: "none"
+ Determines whether and how response maps for filters with different widths (``laplacian_of_gaussian_sigma``)
+ are pooled.
+
+ * "max": Each voxel of the pooled response map represents the maximum value for that voxel in the underlying
+ response maps.
+ * "min": Each voxel of the pooled response map represents the minimum value for that voxel in the underlying
+ response maps.
+ * "mean": Each voxel of the pooled response map represents the mean value for that voxel in the underlying
+ response maps. For band-pass and high-pass filters, this will likely result in values close to 0.0,
+ and "max" or "min" pooling methods should be used instead.
+ * "sum": Each voxel of the pooled response map is the sum of intensities for that voxel in the underlying
+ response maps. Similar to the "mean" pooling method, but without the normalisation.
+ * "none": Each Laplacian-of-Gaussian response map is treated separately, without pooling.
+
+ laplacian_of_gaussian_boundary_condition: str, optional, default: "mirror"
+ Sets the boundary condition for Laplacian-of-Gaussian filters. This supersedes any value set by the general
+ ``boundary_condition`` parameter. See the ``boundary_condition`` parameter above for all valid options.
+
+ laws_kernel: str or list of str, optional
+ Compute specific Laws kernels these typically are specific combinations of kernels such as L5S5E5,
+ E5E5E5. The following kernels are available: 'l5', 'e5', 's5', 'w5', 'r5', 'l3', 'e3', 's3'. A combination of
+ two kernels is expected for 2D (``by_slice=True``), whereas a kernel triplet is expected for 3D filters (
+ ``by_slice=False``).
+
+ laws_compute_energy: bool, optional, default: True
+ Determine whether an energy image should be computed, or just the response map.
+
+ laws_delta: int or list of int, optional, default: 7
+ Delta for chebyshev distance between center voxel and neighbourhood boundary used to calculate energy maps.
+
+ laws_rotation_invariance: bool, optional, default: True
+ Determines whether separable filters are applied in a pseudo-rotational invariant manner. This generates
+ permutations of the filter and, as a consequence, additional response maps. These maps are then merged using
+ the pooling method (``laws_pooling_method``).
+
+ laws_pooling_method: {"max", "min", "mean", "sum"}, optional, default: "max"
+ Response maps are pooled to create a rotationally invariant response map. This sets the method for
+ pooling.
+
+ * "max": Each voxel of the pooled response map represents the maximum value for that voxel in the underlying
+ response maps.
+ * "min": Each voxel of the pooled response map represents the minimum value for that voxel in the underlying
+ response maps.
+ * "mean": Each voxel of the pooled response map represents the mean value for that voxel in the underlying
+ response maps. For band-pass and high-pass filters, this will likely result in values close to 0.0,
+ and "max" or "min" pooling methods should be used instead.
+ * "sum": Each voxel of the pooled response map is the sum of intensities for that voxel in the underlying
+ response maps. Similar to the "mean" pooling method, but without the normalisation.
+
+ laws_boundary_condition: str, optional, default: "mirror"
+ Sets the boundary condition for Laws filters. This supersedes any value set by the general
+ ``boundary_condition`` parameter. See the ``boundary_condition`` parameter above for all valid options.
+
+ gabor_sigma: float or list of float, optional
+ Width of the Gaussian envelope in physical dimensions (e.g. mm). Multiple values can be specified.
+
+ gabor_lambda: float or list of float, optional
+ Wavelength of the oscillator component of the Gabor filter, in physical dimensions (e.g. mm).
+
+ gabor_gamma: float or list of float, optional, default: 1.0
+ Eccentricity parameter of the Gaussian envelope of the Gabor kernel. Defines width of y-axis relative to
+ x-axis for 0-angle Gabor kernel. Default: 1.0
+
+ gabor_theta: float or list of flaot, optional, default: 0.0
+ Initial angle of the Gabor filter in degrees (not radians). Multiple angles can be provided.
+
+ gabor_theta_step: float, optional, default: None
+ Angle step size in degrees for in-plane rotational invariance. A value of 0.0 or None (default) disables
+ stepping.
+
+ gabor_response: {"modulus", "abs", "magnitude", "angle", "phase", "argument", "real", "imaginary"}, optional, default: "modulus"
+ Type of response map created by Gabor filters. Gabor kernels consist of complex numbers, and the response map
+ will be complex as well. The complex-valued response map is converted to a real-valued response map using the
+ specified method.
+
+ gabor_rotation_invariance: bool, optional, default: False
+ Determines whether (2D) Gabor filters are applied in a pseudo-rotational invariant manner. If True,
+ Gabor filters are applied in each of the orthogonal planes.
+
+ gabor_pooling_method: {"max", "min", "mean", "sum"}, optional, default: "max"
+ Response maps are pooled to create a rotationally invariant response map. This sets the method for
+ pooling.
+
+ * "max": Each voxel of the pooled response map represents the maximum value for that voxel in the underlying
+ response maps.
+ * "min": Each voxel of the pooled response map represents the minimum value for that voxel in the underlying
+ response maps.
+ * "mean": Each voxel of the pooled response map represents the mean value for that voxel in the underlying
+ response maps. For band-pass and high-pass filters, this will likely result in values close to 0.0,
+ and "max" or "min" pooling methods should be used instead.
+ * "sum": Each voxel of the pooled response map is the sum of intensities for that voxel in the underlying
+ response maps. Similar to the "mean" pooling method, but without the normalisation.
+
+ gabor_boundary_condition: str, optional, default: "mirror"
+ Sets the boundary condition for Gabor filters. This supersedes any value set by the general
+ ``boundary_condition`` parameter. See the ``boundary_condition`` parameter above for all valid options.
+
+ mean_filter_kernel_size: int or list of int, optional
+ Length of the kernel in pixels. Multiple values can be specified to create multiple response maps.
+
+ mean_filter_boundary_condition: str, optional, default: "mirror"
+ Sets the boundary condition for mean filters. This supersedes any value set by the general
+ ``boundary_condition`` parameter. See the ``boundary_condition`` parameter above for all valid options.
+
+ riesz_filter_order: float, list of float or list of list of float, optional
+ Riesz-transformation order. If required, should be a 2 (2D filter), or 3-element (3D filter) integer
+ vector, e.g. [0,0,1]. Multiple sets can be provided by nesting the list, e.g. [[0, 0, 1],
+ [0, 1, 0]]. If an integer is provided, a set of filters is created. For example when
+ riesz_filter_order = 2 and a 2D filter is used, the following Riesz-transformations are performed: [2,
+ 0], [1, 1] and [0, 2].
+
+ .. note::
+ Riesz filter order uses the numpy coordinate ordering and represents (z, y, x) directions.
+
+ riesz_filter_tensor_sigma: float or list of float, optional
+ Determines width of Gaussian filter used with Riesz filter banks.
+
+ **kwargs: dict, optional
+ Unused keyword arguments.
+
+ """
+
+ def __init__(
+ self,
+ by_slice: bool,
+ ibsi_compliant: bool = True,
+ response_map_feature_settings: FeatureExtractionSettingsClass | None = None,
+ response_map_feature_families: None | str | list[str] = "statistical",
+ response_map_discretisation_method: None | str | list[str] = "fixed_bin_number",
+ response_map_discretisation_n_bins: None | int | list[int] = 16,
+ response_map_discretisation_bin_width: None | int | list[float] = None,
+ filter_kernels: None | str | list[str] = None,
+ boundary_condition: None | str = "mirror",
+ separable_wavelet_families: None | str | list[str] = None,
+ separable_wavelet_set: None | str | list[str] = None,
+ separable_wavelet_stationary: bool = True,
+ separable_wavelet_decomposition_level: None | int | list[int] = 1,
+ separable_wavelet_rotation_invariance: bool = True,
+ separable_wavelet_pooling_method: str = "max",
+ separable_wavelet_boundary_condition: None | str = None,
+ nonseparable_wavelet_families: None | str | list[str] = None,
+ nonseparable_wavelet_decomposition_level: None | int | list[int] = 1,
+ nonseparable_wavelet_response: None | str = "real",
+ nonseparable_wavelet_boundary_condition: None | str = None,
+ gaussian_sigma: None | float | list[float] = None,
+ gaussian_kernel_truncate: None | float = 4.0,
+ gaussian_kernel_boundary_condition: None | str = None,
+ laplacian_of_gaussian_sigma: None | float | list[float] = None,
+ laplacian_of_gaussian_kernel_truncate: None | float = 4.0,
+ laplacian_of_gaussian_pooling_method: str = "none",
+ laplacian_of_gaussian_boundary_condition: None | str = None,
+ laws_kernel: None | str | list[str] = None,
+ laws_delta: int | list[int] = 7,
+ laws_compute_energy: bool = True,
+ laws_rotation_invariance: bool = True,
+ laws_pooling_method: str = "max",
+ laws_boundary_condition: None | str = None,
+ gabor_sigma: None | float | list[float] = None,
+ gabor_lambda: None | float | list[float] = None,
+ gabor_gamma: None | float | list[float] = 1.0,
+ gabor_theta: None | float | list[float] = 0.0,
+ gabor_theta_step: None | float = None,
+ gabor_response: str = "modulus",
+ gabor_rotation_invariance: bool = False,
+ gabor_pooling_method: str = "max",
+ gabor_boundary_condition: None | str = None,
+ mean_filter_kernel_size: None | int | list[int] = None,
+ mean_filter_boundary_condition: None | str = None,
+ riesz_filter_order: None | int | list[int] = None,
+ riesz_filter_tensor_sigma: None | float | list[float] = None,
+ **kwargs
+ ):
+ # Set by slice
+ self.by_slice: bool = by_slice
+
+ # Set IBSI-compliance flag.
+ self.ibsi_compliant: bool = ibsi_compliant
+
+ # Check filter kernels
+ if not isinstance(filter_kernels, list):
+ filter_kernels = [filter_kernels]
+
+ if any(filter_kernel is None for filter_kernel in filter_kernels):
+ filter_kernels = None
+
+ if filter_kernels is not None:
+ # Check validity of the filter kernel names.
+ valid_kernels: list[bool] = [ii in self.get_available_image_filters() for ii in filter_kernels]
+
+ if not all(valid_kernels):
+ raise ValueError(
+ f"One or more kernels are not implemented, or were spelled incorrectly: "
+ f"{', '.join([filter_kernel for ii, filter_kernel in enumerate(filter_kernels) if not valid_kernels[ii]])}")
+
+ self.spatial_filters: None | list[str] = filter_kernels
+
+ # Check families.
+ if response_map_feature_families is None:
+ response_map_feature_families = "none"
+
+ if not isinstance(response_map_feature_families, list):
+ response_map_feature_families = [response_map_feature_families]
+
+ # Check which entries are valid.
+ valid_families: list[bool] = [ii in [
+ "li", "loc.int", "loc_int", "local_int", "local_intensity", "st", "stat", "stats", "statistics",
+ "statistical", "ih", "int_hist", "int_histogram", "intensity_histogram",
+ "ivh", "int_vol_hist", "intensity_volume_histogram", "cm", "glcm", "grey_level_cooccurrence_matrix",
+ "cooccurrence_matrix", "rlm", "glrlm", "grey_level_run_length_matrix", "run_length_matrix",
+ "szm", "glszm", "grey_level_size_zone_matrix", "size_zone_matrix", "dzm", "gldzm",
+ "grey_level_distance_zone_matrix", "distance_zone_matrix", "tdm", "ngtdm",
+ "neighbourhood_grey_tone_difference_matrix", "grey_tone_difference_matrix", "ldm", "ngldm",
+ "neighbouring_grey_level_dependence_matrix", "grey_level_dependence_matrix", "all", "none"
+ ] for ii in response_map_feature_families]
+
+ if not all(valid_families):
+ raise ValueError(
+ f"One or more families in the base_feature_families parameter were not recognised: "
+ f"{', '.join([response_map_feature_families[ii] for ii, is_valid in enumerate(valid_families) if not is_valid])}"
+ )
+
+ # Create a temporary feature settings object. If response_map_feature_settings is not present, this object is
+ # used. Otherwise, response_map_feature_settings is copied, and then updated.
+ if response_map_feature_settings is None:
+
+ kwargs = copy.deepcopy(kwargs)
+ kwargs.update({
+ "base_feature_families": response_map_feature_families,
+ "base_discretisation_method": response_map_discretisation_method,
+ "base_discretisation_bin_width": response_map_discretisation_bin_width,
+ "base_discretisation_n_bins": response_map_discretisation_n_bins
+ })
+
+ response_map_feature_settings = FeatureExtractionSettingsClass(
+ by_slice=by_slice,
+ no_approximation=False,
+ ibsi_compliant=ibsi_compliant,
+ **kwargs
+ )
+
+ # Set feature settings.
+ self.feature_settings: FeatureExtractionSettingsClass = response_map_feature_settings
+
+ # Check boundary condition.
+ self.boundary_condition = boundary_condition
+ self.boundary_condition: str = self.check_boundary_condition(
+ boundary_condition,
+ "boundary_condition")
+
+ # Check mean filter settings
+ if self.has_mean_filter():
+ # Check filter size.
+ if not isinstance(mean_filter_kernel_size, list):
+ mean_filter_kernel_size = [mean_filter_kernel_size]
+
+ if not all(isinstance(kernel_size, int) for kernel_size in mean_filter_kernel_size):
+ raise TypeError(
+ f"All kernel sizes for the mean filter are expected to be integer values equal or "
+ f"greater than 1. Found: one or more kernel sizes that were not integers.")
+
+ if not all(kernel_size >= 1 for kernel_size in mean_filter_kernel_size):
+ raise ValueError(
+ f"All kernel sizes for the mean filter are expected to be integer values equal or "
+ f"greater than 1. Found: one or more kernel sizes less then 1.")
+
+ # Check boundary condition
+ mean_filter_boundary_condition = self.check_boundary_condition(
+ mean_filter_boundary_condition,
+ "mean_filter_boundary_condition")
+
+ else:
+ mean_filter_kernel_size = None
+ mean_filter_boundary_condition = None
+
+ self.mean_filter_size: None | list[int] = mean_filter_kernel_size
+ self.mean_filter_boundary_condition: None | str = mean_filter_boundary_condition
+
+ # Check Gaussian kernel settings.
+ if self.has_gaussian_filter():
+ # Check sigma.
+ gaussian_sigma = self.check_sigma(
+ gaussian_sigma,
+ "gaussian_sigma")
+
+ # Check filter truncation.
+ gaussian_kernel_truncate = self.check_truncation(
+ gaussian_kernel_truncate,
+ "gaussian_kernel_truncate")
+
+ # Check boundary condition
+ gaussian_kernel_boundary_condition = self.check_boundary_condition(
+ gaussian_kernel_boundary_condition,
+ "gaussian_kernel_boundary_condition")
+
+ else:
+ gaussian_sigma = None
+ gaussian_kernel_truncate = None
+ gaussian_kernel_boundary_condition = None
+
+ self.gaussian_sigma: None | list[float] = gaussian_sigma
+ self.gaussian_sigma_truncate: None | float = gaussian_kernel_truncate
+ self.gaussian_boundary_condition: None | str = gaussian_kernel_boundary_condition
+
+ # Check laplacian-of-gaussian filter settings
+ if self.has_laplacian_of_gaussian_filter():
+ # Check sigma.
+ laplacian_of_gaussian_sigma = self.check_sigma(
+ laplacian_of_gaussian_sigma,
+ "laplacian_of_gaussian_sigma")
+
+ # Check filter truncation.
+ laplacian_of_gaussian_kernel_truncate = self.check_truncation(
+ laplacian_of_gaussian_kernel_truncate,
+ "laplacian_of_gaussian_kernel_truncate")
+
+ # Check pooling method.
+ laplacian_of_gaussian_pooling_method = self.check_pooling_method(
+ laplacian_of_gaussian_pooling_method,
+ "laplacian_of_gaussian_pooling_method",
+ allow_none=True)
+
+ # Check boundary condition.
+ laplacian_of_gaussian_boundary_condition = self.check_boundary_condition(
+ laplacian_of_gaussian_boundary_condition, "laplacian_of_gaussian_boundary_condition")
+
+ else:
+ laplacian_of_gaussian_sigma = None
+ laplacian_of_gaussian_kernel_truncate = None
+ laplacian_of_gaussian_pooling_method = None
+ laplacian_of_gaussian_boundary_condition = None
+
+ self.log_sigma: None | list[float] = laplacian_of_gaussian_sigma
+ self.log_sigma_truncate: None | float = laplacian_of_gaussian_kernel_truncate
+ self.log_pooling_method: None | str = laplacian_of_gaussian_pooling_method
+ self.log_boundary_condition: None | str = laplacian_of_gaussian_boundary_condition
+
+ # Check Laws kernel filter settings
+ if self.has_laws_filter():
+ # Check kernel.
+ laws_kernel = self.check_laws_kernels(laws_kernel, "laws_kernel")
+
+ # Check energy computation.
+ if not isinstance(laws_compute_energy, bool):
+ raise TypeError("The laws_compute_energy parameter is expected to be a boolean value.")
+
+ if laws_compute_energy:
+
+ # Check delta.
+ if not isinstance(laws_delta, list):
+ laws_delta = [laws_delta]
+
+ if not all(isinstance(delta, int) for delta in laws_delta):
+ raise TypeError(
+ "The laws_delta parameter is expected to be one or more integers with value 0 or "
+ "greater. Found: one or more values that are not integer.")
+
+ if not all(delta >= 0 for delta in laws_delta):
+ raise ValueError(
+ "The laws_delta parameter is expected to be one or more integers with value 0 or "
+ "greater. Found: one or more values that are less than 0.")
+
+ else:
+ laws_delta = None
+
+ # Check invariance.
+ if not isinstance(laws_rotation_invariance, bool):
+ raise TypeError("The laws_rotation_invariance parameter is expected to be a boolean value.")
+
+ # Check pooling method.
+ laws_pooling_method = self.check_pooling_method(laws_pooling_method, "laws_pooling_method")
+
+ # Check boundary condition
+ laws_boundary_condition = self.check_boundary_condition(laws_boundary_condition, "laws_boundary_condition")
+
+ else:
+ laws_kernel = None
+ laws_compute_energy = None,
+ laws_delta = None
+ laws_rotation_invariance = None
+ laws_pooling_method = None
+ laws_boundary_condition = None
+
+ self.laws_calculate_energy: None | bool = laws_compute_energy
+ self.laws_kernel: None | list[str] = laws_kernel
+ self.laws_delta: None | bool = laws_delta
+ self.laws_rotation_invariance: None | bool = laws_rotation_invariance
+ self.laws_pooling_method: None | str = laws_pooling_method
+ self.laws_boundary_condition: None | str = laws_boundary_condition
+
+ # Check Gabor filter settings.
+ if self.has_gabor_filter():
+ # Check sigma.
+ gabor_sigma = self.check_sigma(gabor_sigma, "gabor_sigma")
+
+ # Check gamma. Gamma behaves like sigma.
+ gabor_gamma = self.check_sigma(gabor_gamma, "gabor_gamma")
+
+ # Check lambda. Lambda behaves like sigma
+ gabor_lambda = self.check_sigma(gabor_lambda, "gabor_lambda")
+
+ # Check theta step.
+ if gabor_theta_step is not None:
+ if not isinstance(gabor_theta_step, (float, int)):
+ raise TypeError(
+ "The gabor_theta_step parameter is expected to be an angle, in degrees. Found a "
+ "value that was not a number.")
+
+ if gabor_theta_step == 0.0:
+ gabor_theta_step = None
+
+ if gabor_theta_step is not None:
+ # Check that the step would divide the 360-degree circle into an integer number of steps.
+ if not (360.0 / gabor_theta_step).is_integer():
+ raise ValueError(
+ f"The gabor_theta_step parameter should divide a circle into equal portions. "
+ f"The current settings would create {360.0 / gabor_theta_step} portions.")
+
+ # Check theta.
+ gabor_pool_theta = gabor_theta_step is not None
+
+ if not isinstance(gabor_theta, list):
+ gabor_theta = [gabor_theta]
+
+ if gabor_theta_step is not None and len(gabor_theta) > 1:
+ raise ValueError(
+ f"The gabor_theta parameter cannot have more than one value when used in conjunction"
+ f" with the gabor_theta_step parameter")
+
+ if not all(isinstance(theta, (float, int)) for theta in gabor_theta):
+ raise TypeError(
+ f"The gabor_theta parameter is expected to be one or more values indicating angles in"
+ f" degrees. Found: one or more values that were not numeric.")
+
+ if gabor_theta_step is not None:
+ gabor_theta = [gabor_theta[0] + ii for ii in np.arange(0.0, 360.0, gabor_theta_step)]
+
+ # Check filter response.
+ gabor_response = self.check_response(gabor_response, "gabor_response")
+
+ # Check rotation invariance
+ if not isinstance(gabor_rotation_invariance, bool):
+ raise TypeError("The gabor_rotation_invariance parameter is expected to be a boolean value.")
+
+ # Check pooling method
+ gabor_pooling_method = self.check_pooling_method(gabor_pooling_method, "gabor_pooling_method")
+
+ # Check boundary condition
+ gabor_boundary_condition = self.check_boundary_condition(
+ gabor_boundary_condition, "gabor_boundary_condition")
+
+ else:
+ gabor_sigma = None
+ gabor_gamma = None
+ gabor_lambda = None
+ gabor_theta = None
+ gabor_pool_theta = None
+ gabor_response = None
+ gabor_rotation_invariance = None
+ gabor_pooling_method = None
+ gabor_boundary_condition = None
+
+ self.gabor_sigma: None | list[float] = gabor_sigma
+ self.gabor_gamma: None | list[float] = gabor_gamma
+ self.gabor_lambda: None | list[float] = gabor_lambda
+ self.gabor_theta: None | list[float] | list[int] = gabor_theta
+ self.gabor_pool_theta: None | bool = gabor_pool_theta
+ self.gabor_response: None | str = gabor_response
+ self.gabor_rotation_invariance: None | str = gabor_rotation_invariance
+ self.gabor_pooling_method: None | str = gabor_pooling_method
+ self.gabor_boundary_condition: None | str = gabor_boundary_condition
+
+ # Check separable wavelet settings.
+ if self.has_separable_wavelet_filter():
+ # Check wavelet families.
+ separable_wavelet_families = self.check_separable_wavelet_families(
+ separable_wavelet_families, "separable_wavelet_families")
+
+ # Check wavelet filter sets.
+ separable_wavelet_set = self.check_separable_wavelet_sets(separable_wavelet_set, "separable_wavelet_set")
+
+ # Check if wavelet is stationary
+ if not isinstance(separable_wavelet_stationary, bool):
+ raise TypeError(f"The separable_wavelet_stationary parameter is expected to be a boolean value.")
+
+ # Check decomposition level
+ separable_wavelet_decomposition_level = self.check_decomposition_level(
+ separable_wavelet_decomposition_level, "separable_wavelet_decomposition_level")
+
+ # Check rotation invariance
+ if not isinstance(separable_wavelet_rotation_invariance, bool):
+ raise TypeError("The separable_wavelet_rotation_invariance parameter is expected to be a boolean value.")
+
+ # Check pooling method.
+ separable_wavelet_pooling_method = self.check_pooling_method(
+ separable_wavelet_pooling_method, "separable_wavelet_pooling_method")
+
+ # Check boundary condition.
+ separable_wavelet_boundary_condition = self.check_boundary_condition(
+ separable_wavelet_boundary_condition, "separable_wavelet_boundary_condition")
+
+ else:
+ separable_wavelet_families = None
+ separable_wavelet_set = None
+ separable_wavelet_stationary = None
+ separable_wavelet_decomposition_level = None
+ separable_wavelet_rotation_invariance = None
+ separable_wavelet_pooling_method = None
+ separable_wavelet_boundary_condition = None
+
+ self.separable_wavelet_families: None | list[str] = separable_wavelet_families
+ self.separable_wavelet_filter_set: None | list[str] = separable_wavelet_set
+ self.separable_wavelet_stationary: None | bool = separable_wavelet_stationary
+ self.separable_wavelet_decomposition_level: None | list[int] = separable_wavelet_decomposition_level
+ self.separable_wavelet_rotation_invariance: None | bool = separable_wavelet_rotation_invariance
+ self.separable_wavelet_pooling_method: None | str = separable_wavelet_pooling_method
+ self.separable_wavelet_boundary_condition: None | str = separable_wavelet_boundary_condition
+
+ # Set parameters for non-separable wavelets.
+ if self.has_nonseparable_wavelet_filter():
+ # Check wavelet families.
+ nonseparable_wavelet_families = self.check_nonseparable_wavelet_families(
+ nonseparable_wavelet_families, "nonseparable_wavelet_families")
+
+ # Check decomposition level.
+ nonseparable_wavelet_decomposition_level = self.check_decomposition_level(
+ nonseparable_wavelet_decomposition_level, "nonseparable_wavelet_decomposition_level")
+
+ # Check filter response.
+ nonseparable_wavelet_response = self.check_response(
+ nonseparable_wavelet_response, "nonseparable_wavelet_response")
+
+ # Check boundary condition.
+ nonseparable_wavelet_boundary_condition = self.check_boundary_condition(
+ nonseparable_wavelet_boundary_condition, "nonseparable_wavelet_boundary_condition")
+
+ else:
+ nonseparable_wavelet_families = None
+ nonseparable_wavelet_decomposition_level = None
+ nonseparable_wavelet_response = None
+ nonseparable_wavelet_boundary_condition = None
+
+ self.nonseparable_wavelet_families: None | list[str] = nonseparable_wavelet_families
+ self.nonseparable_wavelet_decomposition_level: None | list[int] = nonseparable_wavelet_decomposition_level
+ self.nonseparable_wavelet_response: None | str = nonseparable_wavelet_response
+ self.nonseparable_wavelet_boundary_condition: None | str = nonseparable_wavelet_boundary_condition
+
+ # Check Riesz filter orders.
+ if self.has_riesz_filter():
+ riesz_filter_order = self.check_riesz_filter_order(riesz_filter_order, "riesz_filter_order")
+
+ else:
+ riesz_filter_order = None
+
+ if self.has_steered_riesz_filter() and self.ibsi_compliant:
+ raise ValueError(
+ "The steered riesz filters are not part of the IBSI reference standard. If you are sure that you want "
+ "to use this method, use ibsi_compliant = False."
+ )
+
+ elif self.has_riesz_filter():
+ riesz_filter_tensor_sigma = self.check_sigma(riesz_filter_tensor_sigma, "riesz_filter_tensor_sigma")
+
+ else:
+ riesz_filter_tensor_sigma = None
+
+ self.riesz_order: None | list[list[int]] = riesz_filter_order
+ self.riesz_filter_tensor_sigma: None | list[float] = riesz_filter_tensor_sigma
+
+ if ibsi_compliant and self.has_square_transform_filter():
+ raise ValueError(
+ "The square transformation filter is not part of the IBSI reference standard. If you are sure that "
+ "you want to use this method, use ibsi_compliant = False."
+ )
+ if ibsi_compliant and self.has_square_root_transform_filter():
+ raise ValueError(
+ "The square root transformation filter is not part of the IBSI reference standard. If you are sure "
+ "that you want to use this method, use ibsi_compliant = False."
+ )
+ if ibsi_compliant and self.has_logarithm_transform_filter():
+ raise ValueError(
+ "The logarithmic transformation filter is not part of the IBSI reference standard. If you are sure "
+ "that you want to use this method, use ibsi_compliant = False."
+ )
+ if ibsi_compliant and self.has_exponential_transform_filter():
+ raise ValueError(
+ "The exponential transformation filter is not part of the IBSI reference standard. If you are sure "
+ "that you want to use this method, use ibsi_compliant = False."
+ )
+
+ @staticmethod
+ def get_available_image_filters():
+ return [
+ "separable_wavelet", "nonseparable_wavelet", "riesz_nonseparable_wavelet",
+ "riesz_steered_nonseparable_wavelet", "gaussian", "riesz_gaussian", "riesz_steered_gaussian",
+ "laplacian_of_gaussian", "log", "riesz_laplacian_of_gaussian", "riesz_steered_laplacian_of_gaussian",
+ "riesz_log", "riesz_steered_log", "laws", "gabor", "riesz_gabor", "riesz_steered_gabor", "mean",
+ "pyradiomics_square", "pyradiomics_square_root", "pyradiomics_logarithm", "pyradiomics_exponential"
+ ]
+
+ def check_boundary_condition(self, x, var_name):
+ if x is None:
+ if self.boundary_condition is not None:
+ # Avoid updating by reference.
+ x = copy.deepcopy(self.boundary_condition)
+
+ else:
+ raise ValueError(f"No value for the {var_name} parameter could be set, due to a lack of a default.")
+
+ # Check value
+ if x not in ["reflect", "constant", "nearest", "mirror", "wrap"]:
+ raise ValueError(
+ f"The provided value for the {var_name} is not valid. One of 'reflect', 'constant', "
+ f"'nearest', 'mirror' or 'wrap' was expected. Found: {x}")
+
+ return x
+
+ @staticmethod
+ def check_pooling_method(x, var_name, allow_none=False):
+
+ valid_pooling_method = ["max", "min", "mean", "sum"]
+ if allow_none:
+ valid_pooling_method += ["none"]
+
+ if x not in valid_pooling_method:
+ raise ValueError(
+ f"The {var_name} parameter expects one of the following values: "
+ f"{', '.join(valid_pooling_method)}. Found: {x}")
+
+ return x
+
+ @staticmethod
+ def check_sigma(x, var_name):
+ # Check sigma is a list.
+ if not isinstance(x, list):
+ x = [x]
+
+ # Check that the sigma values are floating points.
+ if not all(isinstance(sigma, float) for sigma in x):
+ raise TypeError(
+ f"The {var_name} parameter is expected to consists of floating points with values "
+ f"greater than 0.0. Found: one or more values that were not floating points.")
+
+ if not all(sigma > 0.0 for sigma in x):
+ raise ValueError(
+ f"The {var_name} parameter is expected to consists of floating points with values "
+ f"greater than 0.0. Found: one or more values with value 0.0 or less.")
+
+ return x
+
+ @staticmethod
+ def check_truncation(x, var_name):
+
+ # Check that the truncation values are floating points.
+ if not isinstance(x, float):
+ raise TypeError(
+ f"The {var_name} parameter is expected to be a floating point with value "
+ f"greater than 0.0. Found: a value that was not a floating point.")
+
+ if not x > 0.0:
+ raise ValueError(
+ f"The {var_name} parameter is expected to be a floating point with value "
+ f"greater than 0.0. Found: a value of 0.0 or less.")
+
+ return x
+
+ @staticmethod
+ def check_response(x, var_name):
+
+ valid_response = ["modulus", "abs", "magnitude", "angle", "phase", "argument", "real", "imaginary"]
+
+ # Check that response is correct.
+ if x not in valid_response:
+ raise ValueError(
+ f"The {var_name} parameter is not correct. Expected one of {', '.join(valid_response)}. "
+ f"Found: {x}")
+
+ return x
+
+ @staticmethod
+ def check_separable_wavelet_families(x, var_name):
+ # Import pywavelets.
+ import pywt
+
+ if not isinstance(x, list):
+ x = [x]
+
+ available_kernels = pywt.wavelist(kind="discrete")
+ valid_kernel = [kernel.lower() in available_kernels for kernel in x]
+
+ if not all(valid_kernel):
+ raise ValueError(
+ f"The {var_name} parameter requires wavelet families that match those defined in the "
+ f"pywavelets package. Could not match: "
+ f"{', '.join([kernel for ii, kernel in x if not valid_kernel[ii]])}")
+
+ # Return lowercase values.
+ return [xx.lower() for xx in x]
+
+ @staticmethod
+ def check_nonseparable_wavelet_families(x, var_name):
+ if not isinstance(x, list):
+ x = [x]
+
+ available_kernels = ["simoncelli", "shannon"]
+ valid_kernel = [kernel.lower() in available_kernels for kernel in x]
+
+ if not all(valid_kernel):
+ raise ValueError(
+ f"The {var_name} parameter expects one or more of the following values: "
+ f"{', '.join(available_kernels)}. Could not match: "
+ f"{', '.join([kernel for ii, kernel in x if not valid_kernel[ii]])}")
+
+ # Return lowercase values.
+ return [xx.lower() for xx in x]
+
+ @staticmethod
+ def check_decomposition_level(x, var_name):
+ if not isinstance(x, list):
+ x = [x]
+
+ if not all(isinstance(xx, int) for xx in x):
+ raise TypeError(
+ f"The {var_name} parameter should be one or more integer "
+ f"values of at least 1. Found: one or more values that was not an integer.")
+
+ if not all(xx >= 1 for xx in x):
+ raise ValueError(
+ f"The {var_name} parameter should be one or more integer "
+ f"values of at least 1. Found: one or more values that was not an integer.")
+
+ return x
+
+ def check_separable_wavelet_sets(self, x: None | str | list[str], var_name: str):
+ from itertools import product
+
+ if x is None:
+ if self.by_slice:
+ x = "hh"
+ else:
+ x = "hhh"
+
+ # Check if x is a list.
+ if not isinstance(x, list):
+ x = [x]
+
+ # Generate all potential combinations.
+ if self.by_slice:
+ possible_combinations = ["".join(combination) for combination in product(["l", "h"], repeat=2)]
+
+ else:
+ possible_combinations = ["".join(combination) for combination in product(["l", "h"], repeat=3)]
+
+ # Check for all.
+ if any(kernel == "all" for kernel in x):
+ x = possible_combinations
+
+ # Check which kernels are valid.
+ valid_kernel = [kernel.lower() in possible_combinations for kernel in x]
+
+ if not all(valid_kernel):
+ raise ValueError(
+ f"The {var_name} parameter requires combinations of low (l) and high-pass (h) kernels. "
+ f"Two kernels should be specified for 2D, and three for 3D. Found the following invalid "
+ f"combinations: "
+ f"{', '.join([kernel for ii, kernel in enumerate(x) if not valid_kernel[ii]])}")
+
+ # Return lowercase values.
+ return [xx.lower() for xx in x]
+
+ def check_laws_kernels(self, x: str | list[str], var_name: str):
+ from itertools import product
+
+ # Set implemented kernels.
+ kernels = ['l5', 'e5', 's5', 'w5', 'r5', 'l3', 'e3', 's3']
+
+ # Generate all valid combinations.
+ if self.by_slice:
+ possible_combinations = ["".join(combination) for combination in product(kernels, repeat=2)]
+
+ else:
+ possible_combinations = ["".join(combination) for combination in product(kernels, repeat=3)]
+
+ # Create list.
+ if not isinstance(x, list):
+ x = [x]
+
+ # Check which kernels are valid.
+ valid_kernel = [kernel.lower() in possible_combinations for kernel in x]
+
+ if not all(valid_kernel):
+ raise ValueError(
+ f"The {var_name} parameter requires combinations of Laws kernels. The follow kernels are "
+ f"implemented: {', '.join(kernels)}. Two kernels should be specified for 2D, "
+ f"and three for 3D. Found the following illegal combinations: "
+ f"{', '.join([kernel for ii, kernel in enumerate(x) if not valid_kernel[ii]])}")
+
+ # Return lowercase values.
+ return [xx.lower() for xx in x]
+
+ def check_riesz_filter_order(self, x, var_name):
+ from itertools import product
+
+ # Skip if None
+ if x is None:
+ return x
+
+ # Set number of elements that the filter order should have
+ if self.by_slice:
+ n_elements = 2
+
+ else:
+ n_elements = 3
+
+ # Create filterbank.
+ if isinstance(x, int):
+ # Check that x is not negative.
+ if x < 0:
+ raise ValueError(f"The {var_name} parameter cannot be negative.")
+
+ # Set filter order.
+ single_filter_order = list(range(x+1))
+
+ # Generate all valid combinations.
+ x = [list(combination) for combination in product(single_filter_order, repeat=n_elements) if
+ sum(combination) == x]
+
+ if not isinstance(x, list):
+ raise TypeError(f"The {var_name} parameter is expected to be a list")
+
+ # Create a nested list,
+ if not all(isinstance(xx, list) for xx in x):
+ x = [x]
+
+ # Check that all elements of x have the right length, and do not negative orders.
+ if not all(len(xx) == n_elements for xx in x):
+ raise ValueError(
+ f"The {var_name} parameter is expected to contain filter orders, each consisting of "
+ f"{n_elements} non-negative integer values. One or more filter orders did not have the "
+ f"expected number of elements.")
+
+ if not all(all(isinstance(xxx, int) for xxx in xx) for xx in x):
+ raise ValueError(
+ f"The {var_name} parameter is expected to contain filter orders, each consisting of "
+ f"{n_elements} non-negative integer values. One or more filter orders did not fully "
+ f"consist of integer values.")
+
+ if not all(all(xxx >= 0 for xxx in xx) for xx in x):
+ raise ValueError(
+ f"The {var_name} parameter is expected to contain filter orders, each consisting of "
+ f"{n_elements} non-negative integer values. One or more filter orders contained negative values.")
+
+ return x
+
+ def has_mean_filter(self, x=None):
+ if x is None:
+ x = self.spatial_filters
+ elif not isinstance(x, list):
+ x = [x]
+
+ return x is not None and any(filter_kernel in ["mean"] for filter_kernel in x)
+
+ def has_gaussian_filter(self, x=None):
+ if x is None:
+ x = self.spatial_filters
+ elif not isinstance(x, list):
+ x = [x]
+
+ return x is not None and any(
+ filter_kernel in ["gaussian", "riesz_gaussian", "riesz_steered_gaussian"] for filter_kernel in x)
+
+ def has_laplacian_of_gaussian_filter(self, x=None):
+ if x is None:
+ x = self.spatial_filters
+ elif not isinstance(x, list):
+ x = [x]
+
+ return x is not None and any(
+ filter_kernel in [
+ "laplacian_of_gaussian", "log", "riesz_laplacian_of_gaussian", "riesz_log",
+ "riesz_steered_laplacian_of_gaussian", "riesz_steered_log"
+ ] for filter_kernel in x)
+
+ def has_laws_filter(self, x=None):
+ if x is None:
+ x = self.spatial_filters
+ elif not isinstance(x, list):
+ x = [x]
+
+ return x is not None and any(filter_kernel in ["laws"] for filter_kernel in x)
+
+ def has_gabor_filter(self, x=None):
+ if x is None:
+ x = self.spatial_filters
+ elif not isinstance(x, list):
+ x = [x]
+
+ return x is not None and any(
+ filter_kernel in ["gabor", "riesz_gabor", "riesz_steered_gabor"] for filter_kernel in x)
+
+ def has_separable_wavelet_filter(self, x=None):
+ if x is None:
+ x = self.spatial_filters
+ elif not isinstance(x, list):
+ x = [x]
+
+ return x is not None and any(filter_kernel in ["separable_wavelet"] for filter_kernel in x)
+
+ def has_nonseparable_wavelet_filter(self, x=None):
+ if x is None:
+ x = self.spatial_filters
+ elif not isinstance(x, list):
+ x = [x]
+
+ return x is not None and any(
+ filter_kernel in [
+ "nonseparable_wavelet", "riesz_nonseparable_wavelet", "riesz_steered_nonseparable_wavelet"
+ ] for filter_kernel in x)
+
+ def has_riesz_filter(self, x=None):
+ if x is None:
+ x = self.spatial_filters
+ elif not isinstance(x, list):
+ x = [x]
+
+ return x is not None and any(filter_kernel.startswith("riesz") for filter_kernel in x)
+
+ def has_steered_riesz_filter(self, x=None):
+ if x is None:
+ x = self.spatial_filters
+ elif not isinstance(x, list):
+ x = [x]
+
+ return x is not None and any(filter_kernel.startswith("riesz_steered") for filter_kernel in x)
+
+ def has_square_transform_filter(self, x=None):
+ if x is None:
+ x = self.spatial_filters
+ elif not isinstance(x, list):
+ x = [x]
+
+ return x is not None and any(filter_kernel == "pyradiomics_square" for filter_kernel in x)
+
+ def has_square_root_transform_filter(self, x=None):
+ if x is None:
+ x = self.spatial_filters
+ elif not isinstance(x, list):
+ x = [x]
+
+ return x is not None and any(filter_kernel == "pyradiomics_square_root" for filter_kernel in x)
+
+ def has_logarithm_transform_filter(self, x=None):
+ if x is None:
+ x = self.spatial_filters
+ elif not isinstance(x, list):
+ x = [x]
+
+ return x is not None and any(filter_kernel == "pyradiomics_logarithm" for filter_kernel in x)
+
+ def has_exponential_transform_filter(self, x=None):
+ if x is None:
+ x = self.spatial_filters
+ elif not isinstance(x, list):
+ x = [x]
+
+ return x is not None and any(filter_kernel == "pyradiomics_exponential" for filter_kernel in x)
+
+
+
+def get_image_transformation_settings() -> list[dict[str, Any]]:
+ return [
+ setting_def(
+ "response_map_feature_families", "str", to_list=True, xml_key="feature_families",
+ class_key="families", test=["statistical", "glcm"]
+ ),
+ setting_def(
+ "response_map_discretisation_method", "str", to_list=True, xml_key="discretisation_method",
+ class_key="discretisation_method", test=["fixed_bin_size", "fixed_bin_number"]
+ ),
+ setting_def(
+ "response_map_discretisation_n_bins", "int", to_list=True, xml_key="discretisation_n_bins",
+ class_key="discretisation_n_bins", test=[10, 33]
+ ),
+ setting_def(
+ "response_map_discretisation_bin_width", "float", to_list=True, xml_key="discretisation_bin_width",
+ class_key="discretisation_bin_width", test=[10.0, 34.0]
+ ),
+ setting_def(
+ "filter_kernels", "str", to_list=True, xml_key=["filter_kernels", "spatial_filters"],
+ class_key="spatial_filters", test=[
+ "separable_wavelet", "nonseparable_wavelet", "riesz_nonseparable_wavelet", "gaussian", "riesz_gaussian",
+ "laplacian_of_gaussian", "log", "riesz_laplacian_of_gaussian", "riesz_log", "laws", "gabor", "riesz_gabor", "mean"
+ ]
+ ),
+ setting_def("boundary_condition", "str", test="nearest"),
+ setting_def("separable_wavelet_families", "str", to_list=True, test=["coif4", "coif5"]),
+ setting_def(
+ "separable_wavelet_set", "str", to_list=True, class_key="separable_wavelet_filter_set",
+ test=["hhh", "lll"]
+ ),
+ setting_def("separable_wavelet_stationary", "bool", test=False),
+ setting_def("separable_wavelet_decomposition_level", "int", to_list=True, test=[1, 2]),
+ setting_def("separable_wavelet_rotation_invariance", "bool", test=False),
+ setting_def("separable_wavelet_pooling_method", "str", test="mean"),
+ setting_def("separable_wavelet_boundary_condition", "str", test="constant"),
+ setting_def("nonseparable_wavelet_families", "str", to_list=True, test=["simoncelli", "shannon"]),
+ setting_def("nonseparable_wavelet_decomposition_level", "int", to_list=True, test=[1, 2]),
+ setting_def("nonseparable_wavelet_response", "str", test="magnitude"),
+ setting_def("nonseparable_wavelet_boundary_condition", "str", test="constant"),
+ setting_def("gaussian_sigma", "float", to_list=True, test=[1.0, 3.0]),
+ setting_def("gaussian_kernel_truncate", "float", class_key="gaussian_sigma_truncate", test=10.0),
+ setting_def(
+ "gaussian_kernel_boundary_condition", "str", class_key="gaussian_boundary_condition", test="constant"
+ ),
+ setting_def(
+ "laplacian_of_gaussian_sigma", "float", to_list=True,
+ xml_key=["laplacian_of_gaussian_sigma", "log_sigma"], class_key="log_sigma", test=[1.0, 3.0]
+ ),
+ setting_def(
+ "laplacian_of_gaussian_kernel_truncate", "float",
+ xml_key=["laplacian_of_gaussian_kernel_truncate", "log_sigma_truncate"], class_key="log_sigma_truncate",
+ test=10.0
+ ),
+ setting_def("laplacian_of_gaussian_pooling_method", "str", class_key="log_pooling_method", test="mean"),
+ setting_def(
+ "laplacian_of_gaussian_boundary_condition", "str", class_key="log_boundary_condition", test="constant"
+ ),
+ setting_def("laws_kernel", "str", to_list=True, test=["l5e5s5", "w5r5l3"]),
+ setting_def(
+ "laws_compute_energy", "bool", xml_key="laws_calculate_energy",
+ class_key="laws_calculate_energy", test=True
+ ),
+ setting_def("laws_delta", "int", to_list=True, test=[3, 5]),
+ setting_def(
+ "laws_rotation_invariance", "bool", xml_key=["laws_rotation_invariance", "laws_rot_invar"], test=False
+ ),
+ setting_def("laws_pooling_method", "str", test="mean"),
+ setting_def("laws_boundary_condition", "str", test="constant"),
+ setting_def("gabor_sigma", "float", to_list=True, test=[1.0, 3.0]),
+ setting_def("gabor_lambda", "float", to_list=True, test=[0.5, 2.0]),
+ setting_def("gabor_gamma", "float", to_list=True, test=[0.5, 0.75]),
+ setting_def("gabor_theta", "float", to_list=True, test=[5.0, 15.0]),
+ setting_def("gabor_theta_step", "float", test=None),
+ setting_def("gabor_response", "str", test="magnitude"),
+ setting_def(
+ "gabor_rotation_invariance", "bool", xml_key=["gabor_rotation_invariance", "gabor_rot_invar"], test=False
+ ),
+ setting_def("gabor_pooling_method", "str", test="mean"),
+ setting_def("gabor_boundary_condition", "str", test="constant"),
+ setting_def(
+ "mean_filter_kernel_size", "int", to_list=True, xml_key=["mean_filter_kernel_size", "mean_filter_size"],
+ class_key="mean_filter_size", test=[3, 7]
+ ),
+ setting_def("mean_filter_boundary_condition", "str", test="constant"),
+ setting_def(
+ "riesz_filter_order", "int", to_list=True, xml_key=["riesz_filter_order", "riesz_order"],
+ class_key="riesz_order", test=[2, 1, 0]
+ ),
+ setting_def("riesz_filter_tensor_sigma", "float", to_list=True, test=[3.0, 5.0])
+ ]
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/docs/_modules/mirp/utilities/config_utilities.html b/docs/_modules/mirp/utilities/config_utilities.html
index 153b0f38..22733ef2 100644
--- a/docs/_modules/mirp/utilities/config_utilities.html
+++ b/docs/_modules/mirp/utilities/config_utilities.html
@@ -1,22 +1,22 @@
-
+
- mirp.utilities.config_utilities — mirp 2.1.1 documentation
-
-
+ mirp.utilities.config_utilities — mirp 2.2.0 documentation
+
+
-
-
-
-
-
+
+
+
+
+
@@ -82,7 +82,9 @@ Source code for mirp.utilities.config_utilities
<
import warnings
-[docs]def get_settings_xml(target_dir: str):
+
+[docs]
+def get_settings_xml(target_dir: str):
"""
Creates a local copy of the settings ``xml`` file. This file can be used to configure the image processing and
feature extraction workflow.
@@ -120,7 +122,10 @@ Source code for mirp.utilities.config_utilities
<
print(f"A copy of the settings xml file was created at {destination_file_path}.")
-[docs]def get_data_xml(target_dir: str):
+
+
+[docs]
+def get_data_xml(target_dir: str):
"""
Creates a local copy of the data ``xml`` file. This file can be used to configure import of images and masks.
@@ -155,6 +160,7 @@ Source code for mirp.utilities.config_utilities
<
shutil.copy(source_file_path, destination_file_path)
print(f"A copy of the data xml file was created at {destination_file_path}.")
+
diff --git a/docs/_sources/mirp.deepLearningPreprocessing.rst.txt b/docs/_sources/mirp.deepLearningPreprocessing.rst.txt
index 55fa3051..4b3db28e 100644
--- a/docs/_sources/mirp.deepLearningPreprocessing.rst.txt
+++ b/docs/_sources/mirp.deepLearningPreprocessing.rst.txt
@@ -6,8 +6,8 @@ processing workflow, with a final cropping step (if any).
The deep learning preprocessing function comes in two versions:
-* :func:`~mirp.deepLearningPreprocessing.deep_learning_preprocessing`: conventional function that processes images.
-* :func:`~mirp.deepLearningPreprocessing.deep_learning_preprocessing_generator`: generator that yields processed images.
+* :func:`~mirp.deep_learning_preprocessing.deep_learning_preprocessing`: conventional function that processes images.
+* :func:`~mirp.deep_learning_preprocessing.deep_learning_preprocessing_generator`: generator that yields processed images.
Example
-------
@@ -26,7 +26,7 @@ MIRP can be used to crop images, e.g. to make them conform to the input of convo
API documentation
-----------------
-.. automodule:: mirp.deepLearningPreprocessing
+.. automodule:: mirp.deep_learning_preprocessing
:members:
:undoc-members:
:show-inheritance:
diff --git a/docs/_sources/mirp.extractFeaturesAndImages.rst.txt b/docs/_sources/mirp.extractFeaturesAndImages.rst.txt
index 0675f703..fdd5b6c2 100644
--- a/docs/_sources/mirp.extractFeaturesAndImages.rst.txt
+++ b/docs/_sources/mirp.extractFeaturesAndImages.rst.txt
@@ -5,18 +5,18 @@ Two of the main uses for MIRP are to process images and compute quantitative fea
standardized, IBSI 1 and IBSI 2 compliant, workflow. Two versions of the image processing and feature computation
function exist:
-* :func:`~mirp.extractFeaturesAndImages.extract_features_and_images`: conventional function that processes images and
+* :func:`~mirp.extract_features_and_images.extract_features_and_images`: conventional function that processes images and
computes features.
-* :func:`~mirp.extractFeaturesAndImages.extract_features_and_images_generator`: generator that yields processed
+* :func:`~mirp.extract_features_and_images.extract_features_and_images_generator`: generator that yields processed
images and features computed therefrom.
For convenience, the above functions are wrapped to allow for only computing feature values (without exporting
images) and only processing images (without computing features):
-* :func:`~mirp.extractFeaturesAndImages.extract_features`: conventional function that only computes features.
-* :func:`~mirp.extractFeaturesAndImages.extract_features_generator`: generator that only yields feature values.
-* :func:`~mirp.extractFeaturesAndImages.extract_features_and_images`: conventional function that only processes images.
-* :func:`~mirp.extractFeaturesAndImages.extract_features_and_images_generator`: generator that yields processed images.
+* :func:`~mirp.extract_features_and_images.extract_features`: conventional function that only computes features.
+* :func:`~mirp.extract_features_and_images.extract_features_generator`: generator that only yields feature values.
+* :func:`~mirp.extract_features_and_images.extract_features_and_images`: conventional function that only processes images.
+* :func:`~mirp.extract_features_and_images.extract_features_and_images_generator`: generator that yields processed images.
Examples
--------
@@ -165,15 +165,15 @@ changed using the ``response_map_discretisation_method`` and ``response_map_disc
API documentation
-----------------
-.. autofunction:: mirp.extractFeaturesAndImages.extract_features_and_images
+.. autofunction:: mirp.extract_features_and_images.extract_features_and_images
-.. autofunction:: mirp.extractFeaturesAndImages.extract_features_and_images_generator
+.. autofunction:: mirp.extract_features_and_images.extract_features_and_images_generator
-.. autofunction:: mirp.extractFeaturesAndImages.extract_features
+.. autofunction:: mirp.extract_features_and_images.extract_features
-.. autofunction:: mirp.extractFeaturesAndImages.extract_features_generator
+.. autofunction:: mirp.extract_features_and_images.extract_features_generator
-.. autofunction:: mirp.extractFeaturesAndImages.extract_images
+.. autofunction:: mirp.extract_features_and_images.extract_images
-.. autofunction:: mirp.extractFeaturesAndImages.extract_images_generator
+.. autofunction:: mirp.extract_features_and_images.extract_images_generator
diff --git a/docs/_sources/mirp.extractImageParameters.rst.txt b/docs/_sources/mirp.extractImageParameters.rst.txt
index 15c659fd..350fd3bf 100644
--- a/docs/_sources/mirp.extractImageParameters.rst.txt
+++ b/docs/_sources/mirp.extractImageParameters.rst.txt
@@ -24,7 +24,7 @@ Parameters of a single image can be extracted from their metadata as follows:
API documentation
-----------------
-.. automodule:: mirp.extractImageParameters
+.. automodule:: mirp.extract_image_parameters
:members:
:undoc-members:
:show-inheritance:
diff --git a/docs/_sources/mirp.extractMaskLabels.rst.txt b/docs/_sources/mirp.extractMaskLabels.rst.txt
index 4c3323ed..fd6ba487 100644
--- a/docs/_sources/mirp.extractMaskLabels.rst.txt
+++ b/docs/_sources/mirp.extractMaskLabels.rst.txt
@@ -2,7 +2,7 @@ Extract mask labels
===================
Mask files can contain labels for multiple regions of interest. You can use the
-:func:`~mirp.extractMaskLabels.extract_mask_labels` function to obtain these labels.
+:func:`~mirp.extract_mask_labels.extract_mask_labels` function to obtain these labels.
Example
-------
@@ -20,7 +20,7 @@ Region of interest labels can be extract from mask files as follows:
API documentation
-----------------
-.. automodule:: mirp.extractMaskLabels
+.. automodule:: mirp.extract_mask_labels
:members:
:undoc-members:
:show-inheritance:
diff --git a/docs/_sources/mirp.importData.rst.txt b/docs/_sources/mirp.importData.rst.txt
index b9ff8279..adbc3da9 100644
--- a/docs/_sources/mirp.importData.rst.txt
+++ b/docs/_sources/mirp.importData.rst.txt
@@ -201,7 +201,7 @@ MIRP processes and analyses images and masks. There are multiple ways to provide
* By specifying the configuration in a stand-alone data ``xml`` file. An empty copy of the ``xml`` file can be
created using :func:`mirp.utilities.config_utilities.get_data_xml`. The tags of the``xml`` file are the same as the
- arguments of :func:`~mirp.importData.importImageAndMask.import_image_and_mask`, that are listed below.
+ arguments of :func:`~mirp.data_import.import_image_and_mask.import_image_and_mask`, that are listed below.
Selecting specific images and masks
-----------------------------------
@@ -294,8 +294,8 @@ API documentation
.. note:: The :func:`~mirp.importData.importImageAndMask.import_image_and_mask` function is called internally by other
functions. These function pass through keyword arguments to
- :func:`~mirp.importData.importImageAndMask.import_image_and_mask`.
+ :func:`~mirp.data_import.import_image_and_mask.import_image_and_mask`.
-.. autofunction:: mirp.importData.importImageAndMask.import_image_and_mask
+.. autofunction:: mirp.data_import.import_image_and_mask.import_image_and_mask
.. autofunction:: mirp.utilities.config_utilities.get_data_xml
diff --git a/docs/_sources/mirp.settings.rst.txt b/docs/_sources/mirp.settings.rst.txt
index 1fc41baf..3539cba0 100644
--- a/docs/_sources/mirp.settings.rst.txt
+++ b/docs/_sources/mirp.settings.rst.txt
@@ -6,16 +6,16 @@ Standardization Initiative. Many aspects of this workflow can be configured. Thi
* Using keyword arguments. The keyword arguments match the parameters used to initialise the various settings objects
documented below.
-* By creating a :class:`~mirp.settings.settingsGeneric.SettingsClass` object. This object can be initialised using the
+* By creating a :class:`~mirp.settings.generic.SettingsClass` object. This object can be initialised using the
same keyword arguments as above. Alternatively, the attributes of the
- :class:`~mirp.settings.settingsGeneric.SettingsClass` can be filled with the specific objects documented below.
+ :class:`~mirp.settings.generic.SettingsClass` can be filled with the specific objects documented below.
* By specifying the configuration in a stand-alone settings ``xml`` file. An empty copy of the ``xml`` file can be
created using :func:`~mirp.utilities.config_utilities.get_settings_xml`.
General settings
----------------
-.. automodule:: mirp.settings.settingsGeneral
+.. automodule:: mirp.settings.general_parameters
:members:
:no-undoc-members:
:show-inheritance:
@@ -23,7 +23,7 @@ General settings
Image processing settings
-------------------------
-.. automodule:: mirp.settings.settingsImageProcessing
+.. automodule:: mirp.settings.image_processing_parameters
:members:
:no-undoc-members:
:show-inheritance:
@@ -31,7 +31,7 @@ Image processing settings
Image perturbation settings
---------------------------
-.. automodule:: mirp.settings.settingsPerturbation
+.. automodule:: mirp.settings.perturbation_parameters
:members:
:no-undoc-members:
:show-inheritance:
@@ -39,7 +39,7 @@ Image perturbation settings
Image interpolation settings
----------------------------
-.. automodule:: mirp.settings.settingsInterpolation
+.. automodule:: mirp.settings.interpolation_parameters
:members:
:no-undoc-members:
:show-inheritance:
@@ -47,7 +47,7 @@ Image interpolation settings
Mask resegmentation settings
----------------------------
-.. automodule:: mirp.settings.settingsMaskResegmentation
+.. automodule:: mirp.settings.resegmentation_parameters
:members:
:no-undoc-members:
:show-inheritance:
@@ -55,7 +55,7 @@ Mask resegmentation settings
Feature computation settings
----------------------------
-.. automodule:: mirp.settings.settingsFeatureExtraction
+.. automodule:: mirp.settings.feature_parameters
:members:
:no-undoc-members:
:show-inheritance:
@@ -63,7 +63,7 @@ Feature computation settings
Image transformation settings
-----------------------------
-.. automodule:: mirp.settings.settingsImageTransformation
+.. automodule:: mirp.settings.transformation_parameters
:members:
:no-undoc-members:
:show-inheritance:
@@ -71,7 +71,7 @@ Image transformation settings
Generic settings object
-----------------------
-.. automodule:: mirp.settings.settingsGeneric
+.. automodule:: mirp.settings.generic
:members:
:no-undoc-members:
:show-inheritance:
diff --git a/docs/_static/_sphinx_javascript_frameworks_compat.js b/docs/_static/_sphinx_javascript_frameworks_compat.js
index 8549469d..81415803 100644
--- a/docs/_static/_sphinx_javascript_frameworks_compat.js
+++ b/docs/_static/_sphinx_javascript_frameworks_compat.js
@@ -1,20 +1,9 @@
-/*
- * _sphinx_javascript_frameworks_compat.js
- * ~~~~~~~~~~
- *
- * Compatability shim for jQuery and underscores.js.
- *
- * WILL BE REMOVED IN Sphinx 6.0
- * xref RemovedInSphinx60Warning
+/* Compatability shim for jQuery and underscores.js.
*
+ * Copyright Sphinx contributors
+ * Released under the two clause BSD licence
*/
-/**
- * select a different prefix for underscore
- */
-$u = _.noConflict();
-
-
/**
* small helper function to urldecode strings
*
diff --git a/docs/_static/basic.css b/docs/_static/basic.css
index 08896771..30fee9d0 100644
--- a/docs/_static/basic.css
+++ b/docs/_static/basic.css
@@ -4,7 +4,7 @@
*
* Sphinx stylesheet -- basic theme.
*
- * :copyright: Copyright 2007-2022 by the Sphinx team, see AUTHORS.
+ * :copyright: Copyright 2007-2023 by the Sphinx team, see AUTHORS.
* :license: BSD, see LICENSE for details.
*
*/
@@ -237,6 +237,10 @@ a.headerlink {
visibility: hidden;
}
+a:visited {
+ color: #551A8B;
+}
+
h1:hover > a.headerlink,
h2:hover > a.headerlink,
h3:hover > a.headerlink,
@@ -324,17 +328,17 @@ aside.sidebar {
p.sidebar-title {
font-weight: bold;
}
+
nav.contents,
aside.topic,
-
div.admonition, div.topic, blockquote {
clear: left;
}
/* -- topics ---------------------------------------------------------------- */
+
nav.contents,
aside.topic,
-
div.topic {
border: 1px solid #ccc;
padding: 7px;
@@ -375,7 +379,6 @@ div.sidebar > :last-child,
aside.sidebar > :last-child,
nav.contents > :last-child,
aside.topic > :last-child,
-
div.topic > :last-child,
div.admonition > :last-child {
margin-bottom: 0;
@@ -385,7 +388,6 @@ div.sidebar::after,
aside.sidebar::after,
nav.contents::after,
aside.topic::after,
-
div.topic::after,
div.admonition::after,
blockquote::after {
@@ -611,25 +613,6 @@ ul.simple p {
margin-bottom: 0;
}
-/* Docutils 0.17 and older (footnotes & citations) */
-dl.footnote > dt,
-dl.citation > dt {
- float: left;
- margin-right: 0.5em;
-}
-
-dl.footnote > dd,
-dl.citation > dd {
- margin-bottom: 0em;
-}
-
-dl.footnote > dd:after,
-dl.citation > dd:after {
- content: "";
- clear: both;
-}
-
-/* Docutils 0.18+ (footnotes & citations) */
aside.footnote > span,
div.citation > span {
float: left;
@@ -654,8 +637,6 @@ div.citation > p:last-of-type:after {
clear: both;
}
-/* Footnotes & citations ends */
-
dl.field-list {
display: grid;
grid-template-columns: fit-content(30%) auto;
@@ -668,10 +649,6 @@ dl.field-list > dt {
padding-right: 5px;
}
-dl.field-list > dt:after {
- content: ":";
-}
-
dl.field-list > dd {
padding-left: 0.5em;
margin-top: 0em;
@@ -697,6 +674,16 @@ dd {
margin-left: 30px;
}
+.sig dd {
+ margin-top: 0px;
+ margin-bottom: 0px;
+}
+
+.sig dl {
+ margin-top: 0px;
+ margin-bottom: 0px;
+}
+
dl > dd:last-child,
dl > dd:last-child > :last-child {
margin-bottom: 0;
@@ -765,6 +752,14 @@ abbr, acronym {
cursor: help;
}
+.translated {
+ background-color: rgba(207, 255, 207, 0.2)
+}
+
+.untranslated {
+ background-color: rgba(255, 207, 207, 0.2)
+}
+
/* -- code displays --------------------------------------------------------- */
pre {
diff --git a/docs/_static/doctools.js b/docs/_static/doctools.js
index c3db08d1..d06a71d7 100644
--- a/docs/_static/doctools.js
+++ b/docs/_static/doctools.js
@@ -4,12 +4,19 @@
*
* Base JavaScript utilities for all Sphinx HTML documentation.
*
- * :copyright: Copyright 2007-2022 by the Sphinx team, see AUTHORS.
+ * :copyright: Copyright 2007-2023 by the Sphinx team, see AUTHORS.
* :license: BSD, see LICENSE for details.
*
*/
"use strict";
+const BLACKLISTED_KEY_CONTROL_ELEMENTS = new Set([
+ "TEXTAREA",
+ "INPUT",
+ "SELECT",
+ "BUTTON",
+]);
+
const _ready = (callback) => {
if (document.readyState !== "loading") {
callback();
@@ -18,73 +25,11 @@ const _ready = (callback) => {
}
};
-/**
- * highlight a given string on a node by wrapping it in
- * span elements with the given class name.
- */
-const _highlight = (node, addItems, text, className) => {
- if (node.nodeType === Node.TEXT_NODE) {
- const val = node.nodeValue;
- const parent = node.parentNode;
- const pos = val.toLowerCase().indexOf(text);
- if (
- pos >= 0 &&
- !parent.classList.contains(className) &&
- !parent.classList.contains("nohighlight")
- ) {
- let span;
-
- const closestNode = parent.closest("body, svg, foreignObject");
- const isInSVG = closestNode && closestNode.matches("svg");
- if (isInSVG) {
- span = document.createElementNS("http://www.w3.org/2000/svg", "tspan");
- } else {
- span = document.createElement("span");
- span.classList.add(className);
- }
-
- span.appendChild(document.createTextNode(val.substr(pos, text.length)));
- parent.insertBefore(
- span,
- parent.insertBefore(
- document.createTextNode(val.substr(pos + text.length)),
- node.nextSibling
- )
- );
- node.nodeValue = val.substr(0, pos);
-
- if (isInSVG) {
- const rect = document.createElementNS(
- "http://www.w3.org/2000/svg",
- "rect"
- );
- const bbox = parent.getBBox();
- rect.x.baseVal.value = bbox.x;
- rect.y.baseVal.value = bbox.y;
- rect.width.baseVal.value = bbox.width;
- rect.height.baseVal.value = bbox.height;
- rect.setAttribute("class", className);
- addItems.push({ parent: parent, target: rect });
- }
- }
- } else if (node.matches && !node.matches("button, select, textarea")) {
- node.childNodes.forEach((el) => _highlight(el, addItems, text, className));
- }
-};
-const _highlightText = (thisNode, text, className) => {
- let addItems = [];
- _highlight(thisNode, addItems, text, className);
- addItems.forEach((obj) =>
- obj.parent.insertAdjacentElement("beforebegin", obj.target)
- );
-};
-
/**
* Small JavaScript module for the documentation.
*/
const Documentation = {
init: () => {
- Documentation.highlightSearchWords();
Documentation.initDomainIndexTable();
Documentation.initOnKeyListeners();
},
@@ -126,51 +71,6 @@ const Documentation = {
Documentation.LOCALE = catalog.locale;
},
- /**
- * highlight the search words provided in the url in the text
- */
- highlightSearchWords: () => {
- const highlight =
- new URLSearchParams(window.location.search).get("highlight") || "";
- const terms = highlight.toLowerCase().split(/\s+/).filter(x => x);
- if (terms.length === 0) return; // nothing to do
-
- // There should never be more than one element matching "div.body"
- const divBody = document.querySelectorAll("div.body");
- const body = divBody.length ? divBody[0] : document.querySelector("body");
- window.setTimeout(() => {
- terms.forEach((term) => _highlightText(body, term, "highlighted"));
- }, 10);
-
- const searchBox = document.getElementById("searchbox");
- if (searchBox === null) return;
- searchBox.appendChild(
- document
- .createRange()
- .createContextualFragment(
- '' +
- '' +
- Documentation.gettext("Hide Search Matches") +
- "
"
- )
- );
- },
-
- /**
- * helper function to hide the search marks again
- */
- hideSearchWords: () => {
- document
- .querySelectorAll("#searchbox .highlight-link")
- .forEach((el) => el.remove());
- document
- .querySelectorAll("span.highlighted")
- .forEach((el) => el.classList.remove("highlighted"));
- const url = new URL(window.location);
- url.searchParams.delete("highlight");
- window.history.replaceState({}, "", url);
- },
-
/**
* helper function to focus on search bar
*/
@@ -210,15 +110,11 @@ const Documentation = {
)
return;
- const blacklistedElements = new Set([
- "TEXTAREA",
- "INPUT",
- "SELECT",
- "BUTTON",
- ]);
document.addEventListener("keydown", (event) => {
- if (blacklistedElements.has(document.activeElement.tagName)) return; // bail for input elements
- if (event.altKey || event.ctrlKey || event.metaKey) return; // bail with special keys
+ // bail for input elements
+ if (BLACKLISTED_KEY_CONTROL_ELEMENTS.has(document.activeElement.tagName)) return;
+ // bail with special keys
+ if (event.altKey || event.ctrlKey || event.metaKey) return;
if (!event.shiftKey) {
switch (event.key) {
@@ -240,10 +136,6 @@ const Documentation = {
event.preventDefault();
}
break;
- case "Escape":
- if (!DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS) break;
- Documentation.hideSearchWords();
- event.preventDefault();
}
}
diff --git a/docs/_static/documentation_options.js b/docs/_static/documentation_options.js
index 38a79217..14dc0d5c 100644
--- a/docs/_static/documentation_options.js
+++ b/docs/_static/documentation_options.js
@@ -1,6 +1,5 @@
-var DOCUMENTATION_OPTIONS = {
- URL_ROOT: document.getElementById("documentation_options").getAttribute('data-url_root'),
- VERSION: '2.1.1',
+const DOCUMENTATION_OPTIONS = {
+ VERSION: '2.2.0',
LANGUAGE: 'en',
COLLAPSE_INDEX: false,
BUILDER: 'html',
@@ -10,5 +9,5 @@ var DOCUMENTATION_OPTIONS = {
SOURCELINK_SUFFIX: '.txt',
NAVIGATION_WITH_KEYS: false,
SHOW_SEARCH_SUMMARY: true,
- ENABLE_SEARCH_SHORTCUTS: false,
+ ENABLE_SEARCH_SHORTCUTS: true,
};
\ No newline at end of file
diff --git a/docs/_static/jquery-3.6.0.js b/docs/_static/jquery-3.6.0.js
deleted file mode 100644
index fc6c299b..00000000
--- a/docs/_static/jquery-3.6.0.js
+++ /dev/null
@@ -1,10881 +0,0 @@
-/*!
- * jQuery JavaScript Library v3.6.0
- * https://jquery.com/
- *
- * Includes Sizzle.js
- * https://sizzlejs.com/
- *
- * Copyright OpenJS Foundation and other contributors
- * Released under the MIT license
- * https://jquery.org/license
- *
- * Date: 2021-03-02T17:08Z
- */
-( function( global, factory ) {
-
- "use strict";
-
- if ( typeof module === "object" && typeof module.exports === "object" ) {
-
- // For CommonJS and CommonJS-like environments where a proper `window`
- // is present, execute the factory and get jQuery.
- // For environments that do not have a `window` with a `document`
- // (such as Node.js), expose a factory as module.exports.
- // This accentuates the need for the creation of a real `window`.
- // e.g. var jQuery = require("jquery")(window);
- // See ticket #14549 for more info.
- module.exports = global.document ?
- factory( global, true ) :
- function( w ) {
- if ( !w.document ) {
- throw new Error( "jQuery requires a window with a document" );
- }
- return factory( w );
- };
- } else {
- factory( global );
- }
-
-// Pass this if window is not defined yet
-} )( typeof window !== "undefined" ? window : this, function( window, noGlobal ) {
-
-// Edge <= 12 - 13+, Firefox <=18 - 45+, IE 10 - 11, Safari 5.1 - 9+, iOS 6 - 9.1
-// throw exceptions when non-strict code (e.g., ASP.NET 4.5) accesses strict mode
-// arguments.callee.caller (trac-13335). But as of jQuery 3.0 (2016), strict mode should be common
-// enough that all such attempts are guarded in a try block.
-"use strict";
-
-var arr = [];
-
-var getProto = Object.getPrototypeOf;
-
-var slice = arr.slice;
-
-var flat = arr.flat ? function( array ) {
- return arr.flat.call( array );
-} : function( array ) {
- return arr.concat.apply( [], array );
-};
-
-
-var push = arr.push;
-
-var indexOf = arr.indexOf;
-
-var class2type = {};
-
-var toString = class2type.toString;
-
-var hasOwn = class2type.hasOwnProperty;
-
-var fnToString = hasOwn.toString;
-
-var ObjectFunctionString = fnToString.call( Object );
-
-var support = {};
-
-var isFunction = function isFunction( obj ) {
-
- // Support: Chrome <=57, Firefox <=52
- // In some browsers, typeof returns "function" for HTML