diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 58656d15b..4413211c8 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -15,10 +15,22 @@ repos: hooks: - id: isort files: ^python/cucim/src/.* - args: ["--settings-path=python/cucim/setup.cfg"] - - repo: https://github.com/PyCQA/flake8 - rev: 6.0.0 + args: ["--settings-path=python/cucim/pyproject.toml"] + - repo: https://github.com/psf/black + rev: 23.3.0 hooks: - - id: flake8 - args: ["--config=python/cucim/setup.cfg"] - files: ^python/cucim/.* + - id: black + files: (python|legate)/.* + args: ["--config", "python/cucim/pyproject.toml"] + - repo: https://github.com/astral-sh/ruff-pre-commit + rev: v0.0.278 + hooks: + - id: ruff + files: python/.*$ + - repo: https://github.com/codespell-project/codespell + rev: v2.2.4 + hooks: + - id: codespell + args: ["--toml", "python/cucim/pyproject.toml"] + additional_dependencies: + - tomli diff --git a/CHANGELOG.md b/CHANGELOG.md index c0c6faf84..e65a64a56 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -95,7 +95,7 @@ ## 🐛 Bug Fixes - Fix bug in median filter with non-uniform footprint ([#521](https://github.com/rapidsai/cucim/pull/521)) [@grlee77](https://github.com/grlee77) -- use cp.around instead of cp.round for CuPy 10.x compatiblity ([#508](https://github.com/rapidsai/cucim/pull/508)) [@grlee77](https://github.com/grlee77) +- use cp.around instead of cp.round for CuPy 10.x compatibility ([#508](https://github.com/rapidsai/cucim/pull/508)) [@grlee77](https://github.com/grlee77) - Fix error in LZ4-compressed Zarr writing demo ([#506](https://github.com/rapidsai/cucim/pull/506)) [@grlee77](https://github.com/grlee77) - Normalize whitespace. ([#474](https://github.com/rapidsai/cucim/pull/474)) [@bdice](https://github.com/bdice) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 0ccffe49d..0bb7757fe 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -58,27 +58,43 @@ The following instructions are for developers and contributors to cuCIM OSS deve #### Python -cuCIM uses [isort](https://readthedocs.org/projects/isort/), and -[flake8](http://flake8.pycqa.org/en/latest/) to ensure a consistent code format + +cuCIM uses [isort](https://readthedocs.org/projects/isort/), [ruff](https://docs.astral.sh/ruff/) and [black](https://black.readthedocs.io/en/stable/) to ensure a consistent code format throughout the project. `isort`, and `flake8` can be installed with `conda` or `pip`: ```bash -conda install isort flake8 +conda install isort black ruff +``` + +```bash +pip install isort black ruff +``` + +These tools are used to auto-format the Python code in the repository. Additionally, there is a CI check in place to enforce that the committed code follows our standards. To run only for the python/cucim folder, change to that folder and run + +```bash +isort . +black . +ruff . ``` +To also check formatting in top-level folders like `benchmarks`, `examples` and `experiments`, these tools can also be run from the top level of the repository as follows: + ```bash -pip install isort flake8 +isort --settings-path="python/cucim/pyproject.toml" . +black --config python/cucim/pyproject.toml . +ruff . ``` -These tools are used to auto-format the Python code in the repository. Additionally, there is a CI check in place to enforce -that the committed code follows our standards. You can use the tools to -automatically format your python code by running: +In addition to these tools, [codespell](https://github.com/codespell-project/codespell) can be used to help diagnose and interactively fix spelling errors in both Python and C++ code. It can also be run from the top level of the repository in interactive mode using: ```bash -isort --atomic python/**/*.py +codespell --toml python/cucim/pyproject.toml . -i 3 -w ``` +If codespell is finding false positives in newly added code, the `ignore-words-list` entry of the `tool.codespell` section in `pyproject.toml` can be updated as needed. + ### Get libcucim Dependencies Compiler requirements: diff --git a/benchmarks/skimage/_image_bench.py b/benchmarks/skimage/_image_bench.py index 8e6ea88fa..571588fee 100644 --- a/benchmarks/skimage/_image_bench.py +++ b/benchmarks/skimage/_image_bench.py @@ -1,10 +1,10 @@ import itertools import math +import re +import subprocess import time import types from collections import abc -import re -import subprocess import cupy as cp import cupyx.scipy.ndimage @@ -12,6 +12,7 @@ import pandas as pd import scipy.ndimage import skimage.data + from cucim.time import repeat @@ -32,13 +33,11 @@ def __init__( fixed_kwargs={}, var_kwargs={}, index_str=None, # extra string to append to dataframe index - # set_args_kwargs={}, # for passing additional arguments to custom set_args method module_cpu=scipy.ndimage, module_gpu=cupyx.scipy.ndimage, function_is_generator=False, - run_cpu=True + run_cpu=True, ): - self.shape = shape self.function_name = function_name self.fixed_kwargs_cpu = self._update_kwargs_arrays(fixed_kwargs, "cpu") @@ -171,7 +170,9 @@ def run_benchmark(self, duration=3, verbose=True): self.func_gpu, self.args_gpu, kw_gpu, duration, cpu=False ) print("Number of Repetitions : ", rep_kwargs_gpu) - perf_gpu = repeat(self.func_gpu, self.args_gpu, kw_gpu, **rep_kwargs_gpu) + perf_gpu = repeat( + self.func_gpu, self.args_gpu, kw_gpu, **rep_kwargs_gpu + ) df.at[index, "shape"] = f"{self.shape}" # df.at[index, "description"] = index @@ -179,9 +180,13 @@ def run_benchmark(self, duration=3, verbose=True): df.at[index, "dtype"] = np.dtype(dtype).name df.at[index, "ndim"] = len(self.shape) - if self.run_cpu == True: - perf = repeat(self.func_cpu, self.args_cpu, kw_cpu, **rep_kwargs_cpu) - df.at[index, "GPU accel"] = perf.cpu_times.mean() / perf_gpu.gpu_times.mean() + if self.run_cpu is True: + perf = repeat( + self.func_cpu, self.args_cpu, kw_cpu, **rep_kwargs_cpu + ) + df.at[index, "GPU accel"] = ( + perf.cpu_times.mean() / perf_gpu.gpu_times.mean() + ) df.at[index, "CPU: host (mean)"] = perf.cpu_times.mean() df.at[index, "CPU: host (std)"] = perf.cpu_times.std() @@ -191,14 +196,22 @@ def run_benchmark(self, duration=3, verbose=True): df.at[index, "GPU: device (std)"] = perf_gpu.gpu_times.std() with cp.cuda.Device() as device: props = cp.cuda.runtime.getDeviceProperties(device.id) - gpu_name = props['name'].decode() + gpu_name = props["name"].decode() - df.at[index, "GPU: DEV Name"] = [gpu_name for i in range(len(df))] + df.at[index, "GPU: DEV Name"] = [ + gpu_name for i in range(len(df)) + ] cmd = "cat /proc/cpuinfo" cpuinfo = subprocess.check_output(cmd, shell=True).strip() - cpu_name = re.search("\nmodel name.*\n", cpuinfo.decode()).group(0).strip('\n') - cpu_name = cpu_name.replace('model name\t: ', '') - df.at[index, "CPU: DEV Name"] = [cpu_name for i in range(len(df))] + cpu_name = ( + re.search("\nmodel name.*\n", cpuinfo.decode()) + .group(0) + .strip("\n") + ) + cpu_name = cpu_name.replace("model name\t: ", "") + df.at[index, "CPU: DEV Name"] = [ + cpu_name for i in range(len(df)) + ] # accelerations[arr_index] = df.at[index, "GPU accel"] if verbose: diff --git a/benchmarks/skimage/bench_convolve.py b/benchmarks/skimage/bench_convolve.py index 5b2c9632d..f44714306 100644 --- a/benchmarks/skimage/bench_convolve.py +++ b/benchmarks/skimage/bench_convolve.py @@ -3,27 +3,33 @@ """ import cupy as cp import cupyx.scipy.ndimage as ndi -import pytest from cupyx.profiler import benchmark -from cucim.skimage._vendored.ndimage import ( - convolve1d, correlate1d, gaussian_filter, gaussian_filter1d, - gaussian_gradient_magnitude, gaussian_laplace, laplace, prewitt, sobel, - uniform_filter, uniform_filter1d, +from cucim.skimage._vendored.ndimage import ( # noqa: F401 + convolve1d, + correlate1d, + gaussian_filter, + gaussian_filter1d, + gaussian_gradient_magnitude, + gaussian_laplace, + laplace, + prewitt, + sobel, + uniform_filter, + uniform_filter1d, ) d = cp.cuda.Device() def _get_image(shape, dtype, seed=123): - rng = cp.random.default_rng(seed) dtype = cp.dtype(dtype) - if dtype.kind == 'b': + if dtype.kind == "b": image = rng.integers(0, 1, shape, dtype=cp.uint8).astype(bool) - elif dtype.kind in 'iu': + elif dtype.kind in "iu": image = rng.integers(0, 128, shape, dtype=dtype) - elif dtype.kind in 'c': + elif dtype.kind in "c": real_dtype = cp.asarray([], dtype=dtype).real.dtype image = rng.standard_normal(shape, dtype=real_dtype) image = image + 1j * rng.standard_normal(shape, dtype=real_dtype) @@ -36,9 +42,18 @@ def _get_image(shape, dtype, seed=123): def _compare_implementations( - shape, kernel_size, axis, dtype, mode, cval=0.0, origin=0, - output_dtype=None, kernel_dtype=None, output_preallocated=False, - function=convolve1d, max_duration=1 + shape, + kernel_size, + axis, + dtype, + mode, + cval=0.0, + origin=0, + output_dtype=None, + kernel_dtype=None, + output_preallocated=False, + function=convolve1d, + max_duration=1, ): dtype = cp.dtype(dtype) if kernel_dtype is None: @@ -55,21 +70,56 @@ def _compare_implementations( output1 = cp.empty(image.shape, dtype=output_dtype) output2 = cp.empty(image.shape, dtype=output_dtype) kwargs.update(dict(output=output1)) - perf1 = benchmark(function_ref, (image, kernel), kwargs=kwargs, n_warmup=10, n_repeat=10000, max_duration=max_duration) - kwargs.update(dict(output=output2, algorithm='shared_memory')) - perf2 = benchmark(function, (image, kernel), kwargs=kwargs, n_warmup=10, n_repeat=10000, max_duration=max_duration) + perf1 = benchmark( + function_ref, + (image, kernel), + kwargs=kwargs, + n_warmup=10, + n_repeat=10000, + max_duration=max_duration, + ) + kwargs.update(dict(output=output2, algorithm="shared_memory")) + perf2 = benchmark( + function, + (image, kernel), + kwargs=kwargs, + n_warmup=10, + n_repeat=10000, + max_duration=max_duration, + ) return perf1, perf2 kwargs.update(dict(output=output_dtype)) - perf1 = benchmark(function_ref, (image, kernel), kwargs=kwargs, n_warmup=10, n_repeat=10000, max_duration=max_duration) - kwargs.update(dict(output=output_dtype, algorithm='shared_memory')) - perf2 = benchmark(function, (image, kernel), kwargs=kwargs, n_warmup=10, n_repeat=10000, max_duration=max_duration) + perf1 = benchmark( + function_ref, + (image, kernel), + kwargs=kwargs, + n_warmup=10, + n_repeat=10000, + max_duration=max_duration, + ) + kwargs.update(dict(output=output_dtype, algorithm="shared_memory")) + perf2 = benchmark( + function, + (image, kernel), + kwargs=kwargs, + n_warmup=10, + n_repeat=10000, + max_duration=max_duration, + ) return perf1, perf2 def _compare_implementations_other( - shape, dtype, mode, cval=0.0, - output_dtype=None, kernel_dtype=None, output_preallocated=False, - function=convolve1d, func_kwargs={}, max_duration=1, + shape, + dtype, + mode, + cval=0.0, + output_dtype=None, + kernel_dtype=None, + output_preallocated=False, + function=convolve1d, + func_kwargs={}, + max_duration=1, ): dtype = cp.dtype(dtype) image = _get_image(shape, dtype) @@ -83,38 +133,84 @@ def _compare_implementations_other( if output_dtype is None: output_dtype = image.dtype output1 = cp.empty(image.shape, dtype=output_dtype) - output2 = cp.empty(image.shape, dtype=output_dtype) kwargs.update(dict(output=output1)) - perf1 = benchmark(function_ref, (image,), kwargs=kwargs, n_warmup=10, n_repeat=10000, max_duration=max_duration) - kwargs.update(dict(output=output1, algorithm='shared_memory')) - perf2 = benchmark(function, (image,), kwargs=kwargs, n_warmup=10, n_repeat=10000, max_duration=max_duration) + perf1 = benchmark( + function_ref, + (image,), + kwargs=kwargs, + n_warmup=10, + n_repeat=10000, + max_duration=max_duration, + ) + kwargs.update(dict(output=output1, algorithm="shared_memory")) + perf2 = benchmark( + function, + (image,), + kwargs=kwargs, + n_warmup=10, + n_repeat=10000, + max_duration=max_duration, + ) return perf1, perf2 kwargs.update(dict(output=output_dtype)) - perf1 = benchmark(function_ref, (image,), kwargs=kwargs, n_warmup=10, n_repeat=10000, max_duration=max_duration) - kwargs.update(dict(output=output_dtype, algorithm='shared_memory')) - perf2 = benchmark(function, (image,), kwargs=kwargs, n_warmup=10, n_repeat=10000, max_duration=max_duration) + perf1 = benchmark( + function_ref, + (image,), + kwargs=kwargs, + n_warmup=10, + n_repeat=10000, + max_duration=max_duration, + ) + kwargs.update(dict(output=output_dtype, algorithm="shared_memory")) + perf2 = benchmark( + function, + (image,), + kwargs=kwargs, + n_warmup=10, + n_repeat=10000, + max_duration=max_duration, + ) return perf1, perf2 print("\n\n") -print("function | shape | dtype | mode | kernel size | preallocated | axis | dur (ms), CuPy | dur (ms), cuCIM | acceleration ") -print("---------|-------|-------|------|-------------|--------------|------|----------------|-----------------|--------------") +print( + "function | shape | dtype | mode | kernel size | preallocated | axis | dur (ms), CuPy | dur (ms), cuCIM | acceleration " # noqa: E501 +) +print( + "---------|-------|-------|------|-------------|--------------|------|----------------|-----------------|--------------" # noqa: E501 +) for function in [convolve1d]: for shape in [(512, 512), (3840, 2160), (64, 64, 64), (256, 256, 256)]: for dtype in [cp.float32, cp.uint8]: - for mode in ['nearest']: + for mode in ["nearest"]: for kernel_size in [3, 7, 11, 41]: for output_preallocated in [False]: # , True]: for axis in range(len(shape)): output_dtype = dtype - perf1, perf2 = _compare_implementations(shape=shape, kernel_size=kernel_size, mode=mode, axis=axis, dtype=dtype, output_dtype=output_dtype, output_preallocated=output_preallocated, function=function) - t_elem = perf1.gpu_times * 1000. - t_shared = perf2.gpu_times * 1000. - print(f"{function.__name__} | {shape} | {cp.dtype(dtype).name} | {mode} | {kernel_size=} | prealloc={output_preallocated} | {axis=} | {t_elem.mean():0.3f} +/- {t_elem.std():0.3f} | {t_shared.mean():0.3f} +/- {t_shared.std():0.3f} | {t_elem.mean() / t_shared.mean():0.3f}") + perf1, perf2 = _compare_implementations( + shape=shape, + kernel_size=kernel_size, + mode=mode, + axis=axis, + dtype=dtype, + output_dtype=output_dtype, + output_preallocated=output_preallocated, + function=function, + ) + t_elem = perf1.gpu_times * 1000.0 + t_shared = perf2.gpu_times * 1000.0 + print( + f"{function.__name__} | {shape} | {cp.dtype(dtype).name} | {mode} | {kernel_size=} | prealloc={output_preallocated} | {axis=} | {t_elem.mean():0.3f} +/- {t_elem.std():0.3f} | {t_shared.mean():0.3f} +/- {t_shared.std():0.3f} | {t_elem.mean() / t_shared.mean():0.3f}" # noqa: E501 + ) -print("function | kwargs | shape | dtype | mode | preallocated | dur (ms), CuPy | dur (ms), cuCIM | acceleration ") -print("---------|--------|-------|-------|------|--------------|----------------|-----------------|--------------") +print( + "function | kwargs | shape | dtype | mode | preallocated | dur (ms), CuPy | dur (ms), cuCIM | acceleration " # noqa: E501 +) +print( + "---------|--------|-------|-------|------|--------------|----------------|-----------------|--------------" # noqa: E501 +) for function, func_kwargs in [ # (gaussian_filter1d, dict(sigma=1.0, axis=0)), # (gaussian_filter1d, dict(sigma=1.0, axis=-1)), @@ -129,10 +225,23 @@ def _compare_implementations_other( (sobel, dict(axis=-1)), ]: for shape in [(512, 512), (3840, 2160), (64, 64, 64), (256, 256, 256)]: - for (dtype, output_dtype) in [(cp.float32, cp.float32), (cp.uint8, cp.float32)]: - for mode in ['nearest']: + for dtype, output_dtype in [ + (cp.float32, cp.float32), + (cp.uint8, cp.float32), + ]: + for mode in ["nearest"]: for output_preallocated in [False, True]: - perf1, perf2 = _compare_implementations_other(shape=shape, mode=mode, dtype=dtype, output_dtype=output_dtype, output_preallocated=output_preallocated, function=function, func_kwargs=func_kwargs) - t_elem = perf1.gpu_times * 1000. - t_shared = perf2.gpu_times * 1000. - print(f"{function.__name__} | {func_kwargs} | {shape} | {cp.dtype(dtype).name} | {mode} | {output_preallocated} | {t_elem.mean():0.3f} +/- {t_elem.std():0.3f} | {t_shared.mean():0.3f} +/- {t_shared.std():0.3f} | {t_elem.mean() / t_shared.mean():0.3f}") + perf1, perf2 = _compare_implementations_other( + shape=shape, + mode=mode, + dtype=dtype, + output_dtype=output_dtype, + output_preallocated=output_preallocated, + function=function, + func_kwargs=func_kwargs, + ) + t_elem = perf1.gpu_times * 1000.0 + t_shared = perf2.gpu_times * 1000.0 + print( + f"{function.__name__} | {func_kwargs} | {shape} | {cp.dtype(dtype).name} | {mode} | {output_preallocated} | {t_elem.mean():0.3f} +/- {t_elem.std():0.3f} | {t_shared.mean():0.3f} +/- {t_shared.std():0.3f} | {t_elem.mean() / t_shared.mean():0.3f}" # noqa: E501 + ) diff --git a/benchmarks/skimage/cucim_color_bench.py b/benchmarks/skimage/cucim_color_bench.py index 87955dd0a..3e57e1afb 100644 --- a/benchmarks/skimage/cucim_color_bench.py +++ b/benchmarks/skimage/cucim_color_bench.py @@ -2,8 +2,6 @@ import os import pickle -import cucim.skimage -import cucim.skimage.color import cupy import cupy as cp import cupyx.scipy.ndimage @@ -12,16 +10,31 @@ import scipy import skimage import skimage.color - from _image_bench import ImageBench -func_name_choices = ['convert_colorspace', 'rgb2hed', 'hed2rgb', 'lab2lch', 'lch2lab', 'xyz2lab', 'lab2xyz', 'rgba2rgb', 'label2rgb'] +import cucim.skimage +import cucim.skimage.color + +func_name_choices = [ + "convert_colorspace", + "rgb2hed", + "hed2rgb", + "lab2lch", + "lch2lab", + "xyz2lab", + "lab2xyz", + "rgba2rgb", + "label2rgb", +] + class ColorBench(ImageBench): def set_args(self, dtype): if self.shape[-1] != 3: raise ValueError("shape must be 3 on the last axis") - imaged = cupy.testing.shaped_random(self.shape, xp=cp, dtype=dtype, scale=1.0) + imaged = cupy.testing.shaped_random( + self.shape, xp=cp, dtype=dtype, scale=1.0 + ) image = cp.asnumpy(imaged) self.args_cpu = (image,) self.args_gpu = (imaged,) @@ -31,7 +44,9 @@ class RGBABench(ImageBench): def set_args(self, dtype): if self.shape[-1] != 4: raise ValueError("shape must be 4 on the last axis") - imaged = cupy.testing.shaped_random(self.shape, xp=cp, dtype=dtype, scale=1.0) + imaged = cupy.testing.shaped_random( + self.shape, xp=cp, dtype=dtype, scale=1.0 + ) image = cp.asnumpy(imaged) self.args_cpu = (image,) self.args_gpu = (imaged,) @@ -79,21 +94,22 @@ def set_args(self, dtype): label = np.kron(a, np.ones(tiling, dtype=a.dtype)) else: label = np.tile(a, tiling) - labeld = cp.asarray(label) - imaged = cupy.testing.shaped_random(labeld.shape, xp=cp, dtype=dtype, scale=1.0) + labelled = cp.asarray(label) + imaged = cupy.testing.shaped_random( + labelled.shape, xp=cp, dtype=dtype, scale=1.0 + ) image = cp.asnumpy(imaged) self.args_cpu = ( label, image, ) self.args_gpu = ( - labeld, + labelled, imaged, ) def main(args): - pfile = "cucim_color_results.pickle" if os.path.exists(pfile): with open(pfile, "rb") as f: @@ -103,21 +119,28 @@ def main(args): dtypes = [np.dtype(args.dtype)] # image sizes/shapes - shape = tuple(list(map(int,(args.img_size.split(','))))) + shape = tuple(list(map(int, (args.img_size.split(","))))) run_cpu = not args.no_cpu all_colorspaces = False - ndim = len(shape) - for function_name in func_name_choices: - if function_name != args.func_name: continue - if function_name == 'convert_colorspace': + if function_name == "convert_colorspace": if all_colorspaces: - color_spaces = ["RGB", "HSV", "RGB CIE", "XYZ", "YUV", "YIQ", "YPbPr", "YCbCr", "YDbDr"] + color_spaces = [ + "RGB", + "HSV", + "RGB CIE", + "XYZ", + "YUV", + "YIQ", + "YPbPr", + "YCbCr", + "YDbDr", + ] else: color_spaces = ["RGB", "HSV", "YUV", "XYZ"] for fromspace in color_spaces: @@ -139,7 +162,7 @@ def main(args): results = B.run_benchmark(duration=args.duration) all_results = pd.concat([all_results, results["full"]]) - elif function_name == 'rgba2rgb': + elif function_name == "rgba2rgb": B = RGBABench( function_name="rgba2rgb", shape=shape[:-1] + (4,), @@ -153,8 +176,7 @@ def main(args): results = B.run_benchmark(duration=args.duration) all_results = pd.concat([all_results, results["full"]]) - elif function_name == 'label2rgb': - + elif function_name == "label2rgb": for contiguous_labels in [True, False]: if contiguous_labels: index_str = "contiguous" @@ -176,7 +198,12 @@ def main(args): all_results = pd.concat([all_results, results["full"]]) elif function_name in [ - 'rgb2hed', 'hed2rgb', 'lab2lch', 'lch2lab', 'xyz2lab', 'lab2xyz' + "rgb2hed", + "hed2rgb", + "lab2lch", + "lch2lab", + "xyz2lab", + "lab2xyz", ]: B = ColorBench( function_name=function_name, @@ -195,7 +222,7 @@ def main(args): all_results.to_csv(fbase + ".csv") all_results.to_pickle(pfile) try: - import tabular + import tabular # noqa: F401 with open(fbase + ".md", "wt") as f: f.write(all_results.to_markdown()) @@ -203,14 +230,59 @@ def main(args): pass -if __name__ == '__main__': - parser = argparse.ArgumentParser(description='Benchmarking cuCIM color conversion functions') - dtype_choices = ['float16', 'float32', 'float64', 'int8', 'int16', 'int32', 'int64', 'uint8', 'uint16', 'uint32', 'uint64'] - parser.add_argument('-i','--img_size', type=str, help='Size of input image (omit color channel, it will be appended as needed)', required=True) - parser.add_argument('-d','--dtype', type=str, help='Dtype of input image', choices = dtype_choices, required=True) - parser.add_argument('-f','--func_name', type=str, help='function to benchmark', choices = func_name_choices, required=True) - parser.add_argument('-t','--duration', type=int, help='time to run benchmark', required=True) - parser.add_argument('--no_cpu', action='store_true', help='disable cpu measurements', default=False) +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description="Benchmarking cuCIM color conversion functions" + ) + dtype_choices = [ + "float16", + "float32", + "float64", + "int8", + "int16", + "int32", + "int64", + "uint8", + "uint16", + "uint32", + "uint64", + ] + parser.add_argument( + "-i", + "--img_size", + type=str, + help="Size of input image (omit color channel, it will be appended as needed)", + required=True, + ) + parser.add_argument( + "-d", + "--dtype", + type=str, + help="Dtype of input image", + choices=dtype_choices, + required=True, + ) + parser.add_argument( + "-f", + "--func_name", + type=str, + help="function to benchmark", + choices=func_name_choices, + required=True, + ) + parser.add_argument( + "-t", + "--duration", + type=int, + help="time to run benchmark", + required=True, + ) + parser.add_argument( + "--no_cpu", + action="store_true", + help="disable cpu measurements", + default=False, + ) args = parser.parse_args() main(args) diff --git a/benchmarks/skimage/cucim_exposure_bench.py b/benchmarks/skimage/cucim_exposure_bench.py index d0fb4dbda..317c4efe6 100644 --- a/benchmarks/skimage/cucim_exposure_bench.py +++ b/benchmarks/skimage/cucim_exposure_bench.py @@ -2,17 +2,17 @@ import os import pickle -import cucim.skimage -import cucim.skimage.exposure import cupy import cupy as cp import numpy as np import pandas as pd import skimage import skimage.exposure - from _image_bench import ImageBench +import cucim.skimage +import cucim.skimage.exposure + class ExposureBench(ImageBench): def set_args(self, dtype): @@ -20,7 +20,9 @@ def set_args(self, dtype): scale = 256 else: scale = 1.0 - imaged = cupy.testing.shaped_random(self.shape, xp=cp, dtype=dtype, scale=scale) + imaged = cupy.testing.shaped_random( + self.shape, xp=cp, dtype=dtype, scale=scale + ) image = cp.asnumpy(imaged) self.args_cpu = (image,) self.args_gpu = (imaged,) @@ -28,13 +30,16 @@ def set_args(self, dtype): class MatchHistogramBench(ImageBench): def set_args(self, dtype): - if np.dtype(dtype).kind in "iu": scale = 256 else: scale = 1.0 - imaged = cupy.testing.shaped_random(self.shape, xp=cp, dtype=dtype, scale=scale) - imaged2 = cupy.testing.shaped_random(self.shape, xp=cp, dtype=dtype, scale=scale) + imaged = cupy.testing.shaped_random( + self.shape, xp=cp, dtype=dtype, scale=scale + ) + imaged2 = cupy.testing.shaped_random( + self.shape, xp=cp, dtype=dtype, scale=scale + ) image = cp.asnumpy(imaged) image2 = cp.asnumpy(imaged2) self.args_cpu = (image, image2) @@ -42,7 +47,6 @@ def set_args(self, dtype): def main(args): - pfile = "cucim_exposure_results.pickle" if os.path.exists(pfile): with open(pfile, "rb") as f: @@ -52,7 +56,7 @@ def main(args): dtypes = [np.dtype(args.dtype)] # image sizes/shapes - shape = tuple(list(map(int,(args.img_size.split(','))))) + shape = tuple(list(map(int, (args.img_size.split(","))))) run_cpu = not args.no_cpu for function_name, fixed_kwargs, var_kwargs, allow_color in [ @@ -65,20 +69,21 @@ def main(args): ), ("cumulative_distribution", dict(), dict(nbins=[16, 256]), False), ("equalize_hist", dict(mask=None), dict(nbins=[16, 256]), False), - ("rescale_intensity", dict(in_range="image", out_range="dtype"), dict(), False), + ( + "rescale_intensity", + dict(in_range="image", out_range="dtype"), + dict(), + False, + ), ("adjust_gamma", dict(), dict(), False), ("adjust_log", dict(), dict(), False), ("adjust_sigmoid", dict(), dict(inv=[False, True]), False), ("is_low_contrast", dict(), dict(), False), ]: - if function_name != args.func_name: continue - ndim = len(shape) - - if function_name == 'match_histograms': - + if function_name == "match_histograms": channel_axis = -1 if shape[-1] in [3, 4] else None B = MatchHistogramBench( @@ -95,16 +100,18 @@ def main(args): all_results = pd.concat([all_results, results["full"]]) else: - if shape[-1] == 3 and not allow_color: continue if function_name == "equalize_adapthist": - # TODO: fix equalize_adapthist for size (3840, 2160) and kernel_size = [16, 16] + # TODO: fix equalize_adapthist for size (3840, 2160) + # and kernel_size = [16, 16] size_factors = [4, 8, 16] kernel_sizes = [] for size_factor in size_factors: - kernel_sizes.append([max(s // size_factor, 1) for s in shape if s != 3]) + kernel_sizes.append( + [max(s // size_factor, 1) for s in shape if s != 3] + ) var_kwargs.update(dict(kernel_size=kernel_sizes)) B = ExposureBench( @@ -127,15 +134,66 @@ def main(args): f.write(all_results.to_markdown()) -if __name__ == '__main__': - parser = argparse.ArgumentParser(description='Benchmarking cuCIM exposure functions') - func_name_choices = ['equalize_adapthist', 'cumulative_distribution', 'equalize_hist', 'rescale_intensity', 'adjust_gamma', 'adjust_log', 'adjust_sigmoid', 'is_low_contrast', 'match_histograms'] - dtype_choices = ['float16', 'float32', 'float64', 'int8', 'int16', 'int32', 'int64', 'uint8', 'uint16', 'uint32', 'uint64'] - parser.add_argument('-i','--img_size', type=str, help='Size of input image', required=True) - parser.add_argument('-d','--dtype', type=str, help='Dtype of input image', choices=dtype_choices, required=True) - parser.add_argument('-f','--func_name', type=str, help='function to benchmark', choices=func_name_choices, required=True) - parser.add_argument('-t','--duration', type=int, help='time to run benchmark', required=True) - parser.add_argument('--no_cpu', action='store_true', help='disable cpu measurements', default=False) +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description="Benchmarking cuCIM exposure functions" + ) + func_name_choices = [ + "equalize_adapthist", + "cumulative_distribution", + "equalize_hist", + "rescale_intensity", + "adjust_gamma", + "adjust_log", + "adjust_sigmoid", + "is_low_contrast", + "match_histograms", + ] + dtype_choices = [ + "float16", + "float32", + "float64", + "int8", + "int16", + "int32", + "int64", + "uint8", + "uint16", + "uint32", + "uint64", + ] + parser.add_argument( + "-i", "--img_size", type=str, help="Size of input image", required=True + ) + parser.add_argument( + "-d", + "--dtype", + type=str, + help="Dtype of input image", + choices=dtype_choices, + required=True, + ) + parser.add_argument( + "-f", + "--func_name", + type=str, + help="function to benchmark", + choices=func_name_choices, + required=True, + ) + parser.add_argument( + "-t", + "--duration", + type=int, + help="time to run benchmark", + required=True, + ) + parser.add_argument( + "--no_cpu", + action="store_true", + help="disable cpu measurements", + default=False, + ) args = parser.parse_args() main(args) diff --git a/benchmarks/skimage/cucim_feature_bench.py b/benchmarks/skimage/cucim_feature_bench.py index ac55f9f7d..d767fc6e0 100644 --- a/benchmarks/skimage/cucim_feature_bench.py +++ b/benchmarks/skimage/cucim_feature_bench.py @@ -1,21 +1,20 @@ +import argparse import math import os import pickle -import argparse import cupy as cp import numpy as np import pandas as pd import skimage import skimage.feature +from _image_bench import ImageBench from skimage import data, draw import cucim.skimage import cucim.skimage.feature from cucim.skimage import exposure -from _image_bench import ImageBench - class BlobDetectionBench(ImageBench): def set_args(self, dtype): @@ -24,7 +23,7 @@ def set_args(self, dtype): # create 2D image by tiling the coins image img = cp.array(data.coins()) img = exposure.equalize_hist(img) # improves detection - n_tile = (math.ceil(s/s0) for s, s0 in zip(self.shape, img.shape)) + n_tile = (math.ceil(s / s0) for s, s0 in zip(self.shape, img.shape)) img = cp.tile(img, n_tile) img = img[tuple(slice(s) for s in img.shape)] img = img.astype(dtype, copy=False) @@ -38,8 +37,10 @@ def set_args(self, dtype): offsets = rng.integers(0, np.prod(img.shape), num_ellipse) locs = np.unravel_index(offsets, img.shape) for loc in zip(*locs): - loc = tuple(min(l, s - es) for l, s, es in zip(loc, img.shape, e.shape)) - sl = tuple(slice(l, l + es) for l, es in zip(loc, e.shape)) + loc = tuple( + min(p, s - es) for p, s, es in zip(loc, img.shape, e.shape) + ) + sl = tuple(slice(p, p + es) for p, es in zip(loc, e.shape)) img[sl] = e else: raise NotImplementedError("unsupported ndim") @@ -63,7 +64,6 @@ def set_args(self, dtype): def main(args): - pfile = "cucim_feature_results.pickle" if os.path.exists(pfile): with open(pfile, "rb") as f: @@ -74,9 +74,16 @@ def main(args): dtypes = [np.dtype(args.dtype)] for function_name, fixed_kwargs, var_kwargs, allow_color, allow_nd in [ - ("multiscale_basic_features", dict(edges=True), dict(texture=[True, False]), True, True), + ( + "multiscale_basic_features", + dict(edges=True), + dict(texture=[True, False]), + True, + True, + ), ("canny", dict(sigma=1.8), dict(), False, False), - # reduced default rings, histograms, orientations to fit daisy at (3840, 2160) into GPU memory + # reduced default rings, histograms, orientations to fit daisy at + # (3840, 2160) into GPU memory ( "daisy", dict(step=4, radius=15, rings=2, histograms=5, orientations=4), @@ -84,29 +91,68 @@ def main(args): False, False, ), - ("structure_tensor", dict(sigma=1, mode="reflect", order="rc"), dict(), False, True), - ("hessian_matrix", dict(sigma=1, mode="reflect", order="rc"), dict(), False, True), - ("hessian_matrix_det", dict(sigma=1, approximate=False), dict(), False, True), + ( + "structure_tensor", + dict(sigma=1, mode="reflect", order="rc"), + dict(), + False, + True, + ), + ( + "hessian_matrix", + dict(sigma=1, mode="reflect", order="rc"), + dict(), + False, + True, + ), + ( + "hessian_matrix_det", + dict(sigma=1, approximate=False), + dict(), + False, + True, + ), ("shape_index", dict(sigma=1, mode="reflect"), dict(), False, False), - ("corner_kitchen_rosenfeld", dict(mode="reflect"), dict(), False, False), - ("corner_harris", dict(k=0.05, eps=1e-6, sigma=1), dict(method=["k", "eps"]), False, False), + ( + "corner_kitchen_rosenfeld", + dict(mode="reflect"), + dict(), + False, + False, + ), + ( + "corner_harris", + dict(k=0.05, eps=1e-6, sigma=1), + dict(method=["k", "eps"]), + False, + False, + ), ("corner_shi_tomasi", dict(sigma=1), dict(), False, False), ("corner_foerstner", dict(sigma=1), dict(), False, False), ("corner_peaks", dict(), dict(min_distance=(2, 3, 5)), False, True), - ("match_template", dict(), dict(pad_input=[False], mode=["reflect"]), False, True), + ( + "match_template", + dict(), + dict(pad_input=[False], mode=["reflect"]), + False, + True, + ), # blob detectors. fixed kwargs here are taken from the docstring examples - ("blob_dog", dict(threshold=.05, min_sigma=10, max_sigma=40), dict(), False, True), - ("blob_log", dict(threshold=.3), dict(), False, True), + ( + "blob_dog", + dict(threshold=0.05, min_sigma=10, max_sigma=40), + dict(), + False, + True, + ), + ("blob_log", dict(threshold=0.3), dict(), False, True), ("blob_doh", dict(), dict(), False, False), ]: - if function_name == args.func_name: - shape = tuple(list(map(int,(args.img_size.split(","))))) + shape = tuple(list(map(int, (args.img_size.split(","))))) else: continue - #if function_name in ["corner_peaks", "peak_local_max"] and np.prod(shape) > 1000000: - # skip any large sizes that take too long ndim = len(shape) run_cpu = not args.no_cpu if not allow_nd: @@ -139,7 +185,6 @@ def main(args): run_cpu=run_cpu, ) elif function_name != "match_template": - if function_name == "multiscale_basic_features": fixed_kwargs["channel_axis"] = -1 if shape[-1] == 3 else None if ndim == 3 and shape[-1] != 3: @@ -157,7 +202,6 @@ def main(args): run_cpu=run_cpu, ) else: - B = MatchTemplateBench( function_name=function_name, shape=shape, diff --git a/benchmarks/skimage/cucim_filters_bench.py b/benchmarks/skimage/cucim_filters_bench.py index e12a9a9e8..a6771f59b 100644 --- a/benchmarks/skimage/cucim_filters_bench.py +++ b/benchmarks/skimage/cucim_filters_bench.py @@ -1,18 +1,18 @@ +import argparse import os import pickle -import argparse -import cucim.skimage -import cucim.skimage.filters import numpy as np import pandas as pd import skimage import skimage.filters - from _image_bench import ImageBench -def main(args): +import cucim.skimage +import cucim.skimage.filters + +def main(args): pfile = "cucim_filters_results.pickle" if os.path.exists(pfile): with open(pfile, "rb") as f: @@ -62,7 +62,8 @@ def main(args): # lpi_filter.py # TODO: benchmark wiener # ridges.py - # TODO: had to set meijering, etc allow_nd to False just due to insufficient GPU memory + # TODO: Had to set meijering, etc allow_nd to False just due to + # insufficient GPU memory ( "meijering", dict(sigmas=range(1, 10, 2), alpha=None), @@ -107,16 +108,40 @@ def main(args): ("threshold_minimum", dict(), dict(nbins=[64, 256]), False, True), ("threshold_mean", dict(), dict(), False, True), ("threshold_triangle", dict(), dict(nbins=[64, 256]), False, True), - ("threshold_niblack", dict(), dict(window_size=[7, 15, 65]), False, True), - ("threshold_sauvola", dict(), dict(window_size=[7, 15, 65]), False, True), - ("apply_hysteresis_threshold", dict(low=0.15, high=0.6), dict(), False, True), - ("threshold_multiotsu", dict(), dict(nbins=[64, 256], classes=[3]), False, True), + ( + "threshold_niblack", + dict(), + dict(window_size=[7, 15, 65]), + False, + True, + ), + ( + "threshold_sauvola", + dict(), + dict(window_size=[7, 15, 65]), + False, + True, + ), + ( + "apply_hysteresis_threshold", + dict(low=0.15, high=0.6), + dict(), + False, + True, + ), + ( + "threshold_multiotsu", + dict(), + dict(nbins=[64, 256], classes=[3]), + False, + True, + ), ]: if function_name != args.func_name: continue else: # image sizes/shapes - shape = tuple(list(map(int,(args.img_size.split(','))))) + shape = tuple(list(map(int, (args.img_size.split(","))))) # for shape in [(512, 512), (3840, 2160), (3840, 2160, 3), (192, 192, 192)]: @@ -136,16 +161,16 @@ def main(args): if function_name == "gabor" and np.prod(shape) > 1000000: # avoid cases that are too slow on the CPU - var_kwargs["frequency"] = [f for f in var_kwargs["frequency"] if f >= 0.1] + var_kwargs["frequency"] = [ + f for f in var_kwargs["frequency"] if f >= 0.1 + ] if function_name == "median": footprints = [] ndim = len(shape) footprint_sizes = [3, 5, 7, 9] if ndim == 2 else [3, 5, 7] for footprint_size in footprint_sizes: - footprints.append( - np.ones((footprint_size,) * ndim, dtype=bool) - ) + footprints.append(np.ones((footprint_size,) * ndim, dtype=bool)) var_kwargs["footprint"] = footprints if function_name in ["gaussian", "unsharp_mask"]: @@ -171,15 +196,84 @@ def main(args): f.write(all_results.to_markdown()) -if __name__ == '__main__': - parser = argparse.ArgumentParser(description='Benchmarking cuCIM Filters') - func_name_choices = ['gabor', 'gaussian', 'median', 'rank_order', 'unsharp_mask', 'sobel', 'prewitt', 'scharr', 'roberts', 'roberts_pos_diag', 'roberts_neg_diag', 'farid', 'laplace', 'meijering', 'sato', 'frangi', 'hessian', 'threshold_isodata', 'threshold_otsu', 'threshold_yen', 'threshold_local', 'threshold_li', 'threshold_minimum', 'threshold_mean', 'threshold_triangle', 'threshold_niblack', 'threshold_sauvola', 'apply_hysteresis_threshold', 'threshold_multiotsu'] - dtype_choices = ['float16', 'float32', 'float64', 'int8', 'int16', 'int32', 'int64', 'uint8', 'uint16', 'uint32', 'uint64'] - parser.add_argument('-i','--img_size', type=str, help='Size of input image', required=True) - parser.add_argument('-d','--dtype', type=str, help='Dtype of input image', choices=dtype_choices, required=True) - parser.add_argument('-f','--func_name', type=str, help='function to benchmark', choices=func_name_choices, required=True) - parser.add_argument('-t','--duration', type=int, help='time to run benchmark', required=True) - parser.add_argument('--no_cpu', action='store_true', help='disable cpu measurements', default=False) +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="Benchmarking cuCIM Filters") + func_name_choices = [ + "gabor", + "gaussian", + "median", + "rank_order", + "unsharp_mask", + "sobel", + "prewitt", + "scharr", + "roberts", + "roberts_pos_diag", + "roberts_neg_diag", + "farid", + "laplace", + "meijering", + "sato", + "frangi", + "hessian", + "threshold_isodata", + "threshold_otsu", + "threshold_yen", + "threshold_local", + "threshold_li", + "threshold_minimum", + "threshold_mean", + "threshold_triangle", + "threshold_niblack", + "threshold_sauvola", + "apply_hysteresis_threshold", + "threshold_multiotsu", + ] + dtype_choices = [ + "float16", + "float32", + "float64", + "int8", + "int16", + "int32", + "int64", + "uint8", + "uint16", + "uint32", + "uint64", + ] + parser.add_argument( + "-i", "--img_size", type=str, help="Size of input image", required=True + ) + parser.add_argument( + "-d", + "--dtype", + type=str, + help="Dtype of input image", + choices=dtype_choices, + required=True, + ) + parser.add_argument( + "-f", + "--func_name", + type=str, + help="function to benchmark", + choices=func_name_choices, + required=True, + ) + parser.add_argument( + "-t", + "--duration", + type=int, + help="time to run benchmark", + required=True, + ) + parser.add_argument( + "--no_cpu", + action="store_true", + help="disable cpu measurements", + default=False, + ) args = parser.parse_args() main(args) diff --git a/benchmarks/skimage/cucim_measure_bench.py b/benchmarks/skimage/cucim_measure_bench.py index 6a8f4d1cc..ef99a74d0 100644 --- a/benchmarks/skimage/cucim_measure_bench.py +++ b/benchmarks/skimage/cucim_measure_bench.py @@ -3,17 +3,17 @@ import os import pickle -import cucim.skimage -import cucim.skimage.measure import cupy as cp import numpy as np import pandas as pd import skimage import skimage.measure - from _image_bench import ImageBench from cucim_metrics_bench import MetricsBench +import cucim.skimage +import cucim.skimage.measure + class LabelBench(ImageBench): def __init__( @@ -29,7 +29,6 @@ def __init__( module_gpu=cucim.skimage.measure, run_cpu=True, ): - self.contiguous_labels = contiguous_labels super().__init__( @@ -77,7 +76,6 @@ def __init__( module_gpu=cucim.skimage.measure, run_cpu=True, ): - self.contiguous_labels = contiguous_labels super().__init__( @@ -160,7 +158,6 @@ def set_args(self, dtype): def main(args): - pfile = "cucim_measure_results.pickle" if os.path.exists(pfile): with open(pfile, "rb") as f: @@ -170,7 +167,7 @@ def main(args): dtypes = [np.dtype(args.dtype)] # image sizes/shapes - shape = tuple(list(map(int,(args.img_size.split(','))))) + shape = tuple(list(map(int, (args.img_size.split(","))))) run_cpu = not args.no_cpu for function_name, fixed_kwargs, var_kwargs, allow_color, allow_nd in [ @@ -187,7 +184,7 @@ def main(args): # _moments.py ("moments", dict(), dict(order=[1, 2, 3, 4]), False, False), ("moments_central", dict(), dict(order=[1, 2, 3]), False, True), - # omited from benchmarks (only tiny arrays): moments_normalized, moments_hu + # omitted from benchmarks (only tiny arrays): moments_normalized, moments_hu ("centroid", dict(), dict(), False, True), ("inertia_tensor", dict(), dict(), False, True), ("inertia_tensor_eigvals", dict(), dict(), False, True), @@ -215,14 +212,12 @@ def main(args): True, False, ), # variable block_size configured below - # binary image overlap measures ("intersection_coeff", dict(mask=None), dict(), False, True), ("manders_coloc_coeff", dict(mask=None), dict(), False, True), ("manders_overlap_coeff", dict(mask=None), dict(), False, True), ("pearson_corr_coeff", dict(mask=None), dict(), False, True), ]: - if function_name != args.func_name: continue @@ -237,13 +232,14 @@ def main(args): if shape[-1] == 3 and not allow_color: continue - if function_name in ['label', 'regionprops']: - - Tester = LabelBench if function_name == "label" else RegionpropsBench + if function_name in ["label", "regionprops"]: + Tester = ( + LabelBench if function_name == "label" else RegionpropsBench + ) for contiguous_labels in [True, False]: if contiguous_labels: - index_str = f"contiguous" + index_str = "contiguous" else: index_str = None B = Tester( @@ -258,9 +254,12 @@ def main(args): module_gpu=cucim.skimage.measure, run_cpu=run_cpu, ) - elif function_name in ['intersection_coeff', 'manders_coloc_coeff', - 'manders_overlap_coeff', 'pearson_corr_coeff']: - + elif function_name in [ + "intersection_coeff", + "manders_coloc_coeff", + "manders_overlap_coeff", + "pearson_corr_coeff", + ]: if function_name in ["pearson_corr_coeff", "manders_overlap_coeff"]: # arguments are two images of matching dtype Tester = MetricsBench @@ -282,16 +281,18 @@ def main(args): run_cpu=run_cpu, ) else: - - if function_name == "gabor" and np.prod(shape) > 1000000: # avoid cases that are too slow on the CPU - var_kwargs["frequency"] = [f for f in var_kwargs["frequency"] if f >= 0.1] + var_kwargs["frequency"] = [ + f for f in var_kwargs["frequency"] if f >= 0.1 + ] if function_name == "block_reduce": ndim = len(shape) if shape[-1] == 3: - block_sizes = [(b,) * (ndim - 1) + (3,) for b in (16, 32, 64)] + block_sizes = [ + (b,) * (ndim - 1) + (3,) for b in (16, 32, 64) + ] else: block_sizes = [(b,) * ndim for b in (16, 32, 64)] var_kwargs["block_size"] = block_sizes @@ -332,15 +333,72 @@ def main(args): f.write(all_results.to_markdown()) -if __name__ == '__main__': - parser = argparse.ArgumentParser(description='Benchmarking cuCIM measure functions') - func_name_choices = ['label', 'regionprops', 'moments', 'moments_central', 'centroid', 'inertia_tensor', 'inertia_tensor_eigvals', 'block_reduce', 'shannon_entropy', 'profile_line', 'intersection_coeff', 'manders_coloc_coeff', 'manders_overlap_coeff', 'pearson_corr_coeff'] - dtype_choices = ['bool', 'float16', 'float32', 'float64', 'int8', 'int16', 'int32', 'int64', 'uint8', 'uint16', 'uint32', 'uint64'] - parser.add_argument('-i','--img_size', type=str, help='Size of input image', required=True) - parser.add_argument('-d','--dtype', type=str, help='Dtype of input image', choices=dtype_choices, required=True) - parser.add_argument('-f','--func_name', type=str, help='function to benchmark', choices=func_name_choices, required=True) - parser.add_argument('-t','--duration', type=int, help='time to run benchmark', required=True) - parser.add_argument('--no_cpu', action='store_true', help='disable cpu measurements', default=False) +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description="Benchmarking cuCIM measure functions" + ) + func_name_choices = [ + "label", + "regionprops", + "moments", + "moments_central", + "centroid", + "inertia_tensor", + "inertia_tensor_eigvals", + "block_reduce", + "shannon_entropy", + "profile_line", + "intersection_coeff", + "manders_coloc_coeff", + "manders_overlap_coeff", + "pearson_corr_coeff", + ] + dtype_choices = [ + "bool", + "float16", + "float32", + "float64", + "int8", + "int16", + "int32", + "int64", + "uint8", + "uint16", + "uint32", + "uint64", + ] + parser.add_argument( + "-i", "--img_size", type=str, help="Size of input image", required=True + ) + parser.add_argument( + "-d", + "--dtype", + type=str, + help="Dtype of input image", + choices=dtype_choices, + required=True, + ) + parser.add_argument( + "-f", + "--func_name", + type=str, + help="function to benchmark", + choices=func_name_choices, + required=True, + ) + parser.add_argument( + "-t", + "--duration", + type=int, + help="time to run benchmark", + required=True, + ) + parser.add_argument( + "--no_cpu", + action="store_true", + help="disable cpu measurements", + default=False, + ) args = parser.parse_args() main(args) diff --git a/benchmarks/skimage/cucim_metrics_bench.py b/benchmarks/skimage/cucim_metrics_bench.py index f741877a8..760f7eedb 100644 --- a/benchmarks/skimage/cucim_metrics_bench.py +++ b/benchmarks/skimage/cucim_metrics_bench.py @@ -7,13 +7,12 @@ import pandas as pd import skimage import skimage.metrics +from _image_bench import ImageBench import cucim.skimage import cucim.skimage.metrics from cucim.skimage import data, measure -from _image_bench import ImageBench - class MetricsBench(ImageBench): def set_args(self, dtype): @@ -40,7 +39,6 @@ def __init__( module_gpu=cucim.skimage.metrics, run_cpu=True, ): - super().__init__( function_name=function_name, shape=shape, @@ -55,9 +53,9 @@ def __init__( def _generate_labels(self, dtype, seed=5): ndim = len(self.shape) - blobs_kwargs = dict(blob_size_fraction=0.05, - volume_fraction=0.35, - seed=seed) + blobs_kwargs = dict( + blob_size_fraction=0.05, volume_fraction=0.35, seed=seed + ) # binary blobs only creates square outputs labels = measure.label( data.binary_blobs(max(self.shape), n_dim=ndim, **blobs_kwargs) @@ -78,7 +76,6 @@ def set_args(self, dtype): def main(args): - pfile = "cucim_metrics_results.pickle" if os.path.exists(pfile): with open(pfile, "rb") as f: @@ -88,7 +85,7 @@ def main(args): dtypes = [np.dtype(args.dtype)] # image sizes/shapes - shape = tuple(list(map(int,(args.img_size.split(','))))) + shape = tuple(list(map(int, (args.img_size.split(","))))) run_cpu = not args.no_cpu for function_name, fixed_kwargs, var_kwargs, allow_color, allow_nd in [ @@ -112,7 +109,13 @@ def main(args): ("peak_signal_noise_ratio", dict(data_range=1.0), dict(), True, True), ("normalized_mutual_information", dict(bins=100), dict(), True, True), ("adapted_rand_error", dict(), dict(), False, True), - ("contingency_table", dict(), dict(normalize=[False, True]), False, True), + ( + "contingency_table", + dict(), + dict(normalize=[False, True]), + False, + True, + ), ("variation_of_information", dict(), dict(), False, True), ]: if function_name != args.func_name: @@ -137,7 +140,7 @@ def main(args): "mean_squared_error", "normalized_root_mse", "peak_signal_noise_ratio", - "normalized_mutual_information" + "normalized_mutual_information", ]: B = MetricsBench( function_name=function_name, @@ -171,7 +174,6 @@ def main(args): results = B.run_benchmark(duration=args.duration) all_results = pd.concat([all_results, results["full"]]) - fbase = os.path.splitext(pfile)[0] all_results.to_csv(fbase + ".csv") all_results.to_pickle(pfile) @@ -179,15 +181,65 @@ def main(args): f.write(all_results.to_markdown()) -if __name__ == '__main__': - parser = argparse.ArgumentParser(description='Benchmarking cuCIM metrics functions') - func_name_choices = ['structural_similarity', 'mean_squared_error', 'normalized_root_mse', 'peak_signal_noise_ratio', 'normalized_mutual_information', 'adapted_rand_error', 'contingency_table', 'variation_of_information'] # noqa - dtype_choices = ['float16', 'float32', 'float64', 'int8', 'int16', 'int32', 'int64', 'uint8', 'uint16', 'uint32', 'uint64'] - parser.add_argument('-i','--img_size', type=str, help='Size of input image', required=True) - parser.add_argument('-d','--dtype', type=str, help='Dtype of input image', choices=dtype_choices, required=True) - parser.add_argument('-f','--func_name', type=str, help='function to benchmark', choices=func_name_choices, required=True) - parser.add_argument('-t','--duration', type=int, help='time to run benchmark', required=True) - parser.add_argument('--no_cpu', action='store_true', help='disable cpu measurements', default=False) +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description="Benchmarking cuCIM metrics functions" + ) + func_name_choices = [ + "structural_similarity", + "mean_squared_error", + "normalized_root_mse", + "peak_signal_noise_ratio", + "normalized_mutual_information", + "adapted_rand_error", + "contingency_table", + "variation_of_information", + ] # noqa + dtype_choices = [ + "float16", + "float32", + "float64", + "int8", + "int16", + "int32", + "int64", + "uint8", + "uint16", + "uint32", + "uint64", + ] + parser.add_argument( + "-i", "--img_size", type=str, help="Size of input image", required=True + ) + parser.add_argument( + "-d", + "--dtype", + type=str, + help="Dtype of input image", + choices=dtype_choices, + required=True, + ) + parser.add_argument( + "-f", + "--func_name", + type=str, + help="function to benchmark", + choices=func_name_choices, + required=True, + ) + parser.add_argument( + "-t", + "--duration", + type=int, + help="time to run benchmark", + required=True, + ) + parser.add_argument( + "--no_cpu", + action="store_true", + help="disable cpu measurements", + default=False, + ) args = parser.parse_args() main(args) diff --git a/benchmarks/skimage/cucim_morphology_bench.py b/benchmarks/skimage/cucim_morphology_bench.py index 0b4f3b16b..a4877af8c 100644 --- a/benchmarks/skimage/cucim_morphology_bench.py +++ b/benchmarks/skimage/cucim_morphology_bench.py @@ -6,18 +6,18 @@ import os import pickle -import cucim.skimage -import cucim.skimage.morphology import cupy as cp import numpy as np import pandas as pd +import scipy.ndimage as ndi import skimage import skimage.data import skimage.morphology -import scipy.ndimage as ndi - from _image_bench import ImageBench +import cucim.skimage +import cucim.skimage.morphology + class BinaryMorphologyBench(ImageBench): def __init__( @@ -33,7 +33,6 @@ def __init__( module_gpu=cucim.skimage.morphology, run_cpu=True, ): - array_kwargs = dict(footprint=footprint) if "footprint" in fixed_kwargs: raise ValueError("fixed_kwargs cannot contain 'footprint'") @@ -72,7 +71,7 @@ def set_args(self, dtype): h = ~skimage.data.horse() nrow = math.ceil(self.shape[0] / h.shape[0]) ncol = math.ceil(self.shape[1] / h.shape[1]) - image = np.tile(h, (nrow, ncol))[:self.shape[0], :self.shape[1]] + image = np.tile(h, (nrow, ncol))[: self.shape[0], : self.shape[1]] imaged = cp.asarray(image) self.args_cpu = (image,) self.args_gpu = (imaged,) @@ -80,7 +79,9 @@ def set_args(self, dtype): class ReconstructionBench(ImageBench): def set_args(self, dtype): - coords = cp.meshgrid(*[cp.linspace(0, 6 * cp.pi, s) for s in self.shape], sparse=True) + coords = cp.meshgrid( + *[cp.linspace(0, 6 * cp.pi, s) for s in self.shape], sparse=True + ) bumps = functools.reduce(operator.add, [cp.sin(c) for c in coords]) h = 0.6 seed = bumps - h @@ -116,7 +117,6 @@ def set_args(self, dtype): def main(args): - pfile = "cucim_morphology_results.pickle" if os.path.exists(pfile): with open(pfile, "rb") as f: @@ -126,7 +126,7 @@ def main(args): dtypes = [np.dtype(args.dtype)] # image sizes/shapes - shape = tuple(list(map(int,(args.img_size.split(','))))) + shape = tuple(list(map(int, (args.img_size.split(","))))) run_cpu = not args.no_cpu for function_name, fixed_kwargs, var_kwargs, allow_color, allow_nd in [ @@ -150,14 +150,19 @@ def main(args): ("white_tophat", dict(), dict(), False, True), ("black_tophat", dict(), dict(), False, True), # _skeletonize.py - ("medial_axis", dict(random_state=123), dict(return_distance=[False, True]), False, False), + ( + "medial_axis", + dict(random_state=123), + dict(return_distance=[False, True]), + False, + False, + ), ("thin", dict(), dict(), False, True), # grayreconstruct.py ("reconstruction", dict(), dict(), False, True), # footprints.py # OMIT the functions from this file (each creates a structuring element) ]: - if function_name != args.func_name: continue @@ -166,7 +171,7 @@ def main(args): continue ndim = len(shape) - if function_name in ['thin', 'medial_axis']: + if function_name in ["thin", "medial_axis"]: if ndim != 2: raise ValueError("only 2d benchmark data has been implemented") @@ -184,8 +189,7 @@ def main(args): run_cpu=run_cpu, ) - if function_name.startswith('binary'): - + if function_name.startswith("binary"): if not allow_nd and ndim > 2: continue @@ -206,8 +210,7 @@ def main(args): run_cpu=run_cpu, ) - elif function_name.startswith('isotropic'): - + elif function_name.startswith("isotropic"): if not allow_nd and ndim > 2: continue @@ -222,7 +225,7 @@ def main(args): run_cpu=run_cpu, ) - elif function_name in ['remove_small_holes', 'remove_small_objects']: + elif function_name in ["remove_small_holes", "remove_small_objects"]: if not allow_nd and ndim > 2: continue @@ -244,7 +247,6 @@ def main(args): run_cpu=run_cpu, ) else: - if not allow_nd: if not allow_color: if ndim > 2: @@ -278,7 +280,7 @@ def main(args): all_results.to_csv(fbase + ".csv") all_results.to_pickle(pfile) try: - import tabular + import tabular # noqa: F401 with open(fbase + ".md", "wt") as f: f.write(all_results.to_markdown()) @@ -286,8 +288,10 @@ def main(args): pass -if __name__ == '__main__': - parser = argparse.ArgumentParser(description='Benchmarking cuCIM morphology functions') +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description="Benchmarking cuCIM morphology functions" + ) # fmt: off func_name_choices = [ 'binary_erosion', 'binary_dilation', 'binary_opening', @@ -297,12 +301,55 @@ def main(args): 'white_tophat', 'black_tophat', 'thin', 'medial_axis', 'reconstruction' ] # fmt: on - dtype_choices = ['float16', 'float32', 'float64', 'int8', 'int16', 'int32', 'int64', 'uint8', 'uint16', 'uint32', 'uint64'] - parser.add_argument('-i','--img_size', type=str, help='Size of input image (omit color channel, it will be appended as needed)', required=True) - parser.add_argument('-d','--dtype', type=str, help='Dtype of input image', choices = dtype_choices, required=True) - parser.add_argument('-f','--func_name', type=str, help='function to benchmark', choices = func_name_choices, required=True) - parser.add_argument('-t','--duration', type=int, help='time to run benchmark', required=True) - parser.add_argument('--no_cpu', action='store_true', help='disable cpu measurements', default=False) + dtype_choices = [ + "float16", + "float32", + "float64", + "int8", + "int16", + "int32", + "int64", + "uint8", + "uint16", + "uint32", + "uint64", + ] + parser.add_argument( + "-i", + "--img_size", + type=str, + help="Size of input image (omit color channel, it will be appended as needed)", + required=True, + ) + parser.add_argument( + "-d", + "--dtype", + type=str, + help="Dtype of input image", + choices=dtype_choices, + required=True, + ) + parser.add_argument( + "-f", + "--func_name", + type=str, + help="function to benchmark", + choices=func_name_choices, + required=True, + ) + parser.add_argument( + "-t", + "--duration", + type=int, + help="time to run benchmark", + required=True, + ) + parser.add_argument( + "--no_cpu", + action="store_true", + help="disable cpu measurements", + default=False, + ) args = parser.parse_args() main(args) diff --git a/benchmarks/skimage/cucim_registration_bench.py b/benchmarks/skimage/cucim_registration_bench.py index 6d275dcec..5f9802bd1 100644 --- a/benchmarks/skimage/cucim_registration_bench.py +++ b/benchmarks/skimage/cucim_registration_bench.py @@ -3,16 +3,16 @@ import os import pickle -import cucim.skimage -import cucim.skimage.registration import cupy as cp import numpy as np import pandas as pd import skimage import skimage.registration - from _image_bench import ImageBench +import cucim.skimage +import cucim.skimage.registration + class RegistrationBench(ImageBench): def set_args(self, dtype): @@ -35,7 +35,6 @@ def set_args(self, dtype): def main(args): - pfile = "cucim_registration_results.pickle" if os.path.exists(pfile): with open(pfile, "rb") as f: @@ -45,29 +44,37 @@ def main(args): dtypes = [np.dtype(args.dtype)] # image sizes/shapes - shape = tuple(list(map(int,(args.img_size.split(','))))) + shape = tuple(list(map(int, (args.img_size.split(","))))) run_cpu = not args.no_cpu for function_name, fixed_kwargs, var_kwargs, allow_color, allow_nd in [ # _phase_cross_correlation.py ("phase_cross_correlation", dict(), dict(), False, True), # optical flow functions - ("optical_flow_tvl1", dict(), dict(num_iter=[10], num_warp=[5]), False, True), + ( + "optical_flow_tvl1", + dict(), + dict(num_iter=[10], num_warp=[5]), + False, + True, + ), ( "optical_flow_ilk", dict(), - dict(radius=[3, 7], num_warp=[10], gaussian=[False, True], prefilter=[False, True]), + dict( + radius=[3, 7], + num_warp=[10], + gaussian=[False, True], + prefilter=[False, True], + ), False, True, ), - ]: - if function_name != args.func_name: continue - if function_name == 'phase_cross_correlation': - + if function_name == "phase_cross_correlation": ndim = len(shape) if not allow_nd: if not allow_color: @@ -80,7 +87,6 @@ def main(args): continue for masked in [True, False]: - index_str = f"masked={masked}" if masked: moving_mask = cp.ones(shape, dtype=bool) @@ -110,7 +116,6 @@ def main(args): all_results = pd.concat([all_results, results["full"]]) else: - ndim = len(shape) if not allow_nd: if not allow_color: @@ -135,12 +140,11 @@ def main(args): results = B.run_benchmark(duration=args.duration) all_results = pd.concat([all_results, results["full"]]) - fbase = os.path.splitext(pfile)[0] all_results.to_csv(fbase + ".csv") all_results.to_pickle(pfile) try: - import tabular + import tabular # noqa: F401 with open(fbase + ".md", "wt") as f: f.write(all_results.to_markdown()) @@ -148,15 +152,64 @@ def main(args): pass -if __name__ == '__main__': - parser = argparse.ArgumentParser(description='Benchmarking cuCIM registration functions') - func_name_choices = ['phase_cross_correlation', 'optical_flow_tvl1', 'optical_flow_ilk'] - dtype_choices = ['float16', 'float32', 'float64', 'int8', 'int16', 'int32', 'int64', 'uint8', 'uint16', 'uint32', 'uint64'] - parser.add_argument('-i','--img_size', type=str, help='Size of input image (omit color channel, it will be appended as needed)', required=True) - parser.add_argument('-d','--dtype', type=str, help='Dtype of input image', choices = dtype_choices, required=True) - parser.add_argument('-f','--func_name', type=str, help='function to benchmark', choices = func_name_choices, required=True) - parser.add_argument('-t','--duration', type=int, help='time to run benchmark', required=True) - parser.add_argument('--no_cpu', action='store_true', help='disable cpu measurements', default=False) +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description="Benchmarking cuCIM registration functions" + ) + func_name_choices = [ + "phase_cross_correlation", + "optical_flow_tvl1", + "optical_flow_ilk", + ] + dtype_choices = [ + "float16", + "float32", + "float64", + "int8", + "int16", + "int32", + "int64", + "uint8", + "uint16", + "uint32", + "uint64", + ] + parser.add_argument( + "-i", + "--img_size", + type=str, + help="Size of input image (omit color channel, it will be appended as needed)", + required=True, + ) + parser.add_argument( + "-d", + "--dtype", + type=str, + help="Dtype of input image", + choices=dtype_choices, + required=True, + ) + parser.add_argument( + "-f", + "--func_name", + type=str, + help="function to benchmark", + choices=func_name_choices, + required=True, + ) + parser.add_argument( + "-t", + "--duration", + type=int, + help="time to run benchmark", + required=True, + ) + parser.add_argument( + "--no_cpu", + action="store_true", + help="disable cpu measurements", + default=False, + ) args = parser.parse_args() main(args) diff --git a/benchmarks/skimage/cucim_restoration_bench.py b/benchmarks/skimage/cucim_restoration_bench.py index a1523dbae..4e3711519 100644 --- a/benchmarks/skimage/cucim_restoration_bench.py +++ b/benchmarks/skimage/cucim_restoration_bench.py @@ -3,18 +3,18 @@ import os import pickle -import cucim.skimage -import cucim.skimage.restoration import cupy as cp import cupyx.scipy.ndimage as ndi import numpy as np import pandas as pd import skimage import skimage.restoration -from cucim.skimage.restoration import denoise_tv_chambolle as tv_gpu +from _image_bench import ImageBench from skimage.restoration import denoise_tv_chambolle as tv_cpu -from _image_bench import ImageBench +import cucim.skimage +import cucim.skimage.restoration +from cucim.skimage.restoration import denoise_tv_chambolle as tv_gpu class DenoiseBench(ImageBench): @@ -98,9 +98,7 @@ def set_args(self, dtype): self.args_gpu = (imaged, psfd) - def main(args): - pfile = "cucim_restoration_results.pickle" if os.path.exists(pfile): with open(pfile, "rb") as f: @@ -110,10 +108,9 @@ def main(args): dtypes = [np.dtype(args.dtype)] # image sizes/shapes - shape = tuple(list(map(int,(args.img_size.split(','))))) + shape = tuple(list(map(int, (args.img_size.split(","))))) run_cpu = not args.no_cpu - for function_name, fixed_kwargs, var_kwargs, allow_color, allow_nd in [ # _denoise.py ("denoise_tv_chambolle", dict(), dict(weight=[0.02]), True, True), @@ -124,7 +121,6 @@ def main(args): ("unsupervised_wiener", dict(), dict(), False, False), ("richardson_lucy", dict(), dict(num_iter=[5]), False, True), ]: - if function_name != args.func_name: continue @@ -139,8 +135,7 @@ def main(args): if shape[-1] == 3 and not allow_color: continue - if function_name in ['denoise_tv_chambolle', 'calibrate_denoiser']: - + if function_name in ["denoise_tv_chambolle", "calibrate_denoiser"]: if function_name == "denoise_tv_chambolle": fixed_kwargs["channel_axis"] = -1 if shape[-1] == 3 else None @@ -162,8 +157,11 @@ def main(args): results = B.run_benchmark(duration=args.duration) all_results = pd.concat([all_results, results["full"]]) - elif function_name in ['wiener', 'unsupervised_wiener', 'richardson_lucy']: - + elif function_name in [ + "wiener", + "unsupervised_wiener", + "richardson_lucy", + ]: B = DeconvolutionBench( function_name=function_name, shape=shape, @@ -181,7 +179,7 @@ def main(args): all_results.to_csv(fbase + ".csv") all_results.to_pickle(pfile) try: - import tabular + import tabular # noqa: F401 with open(fbase + ".md", "wt") as f: f.write(all_results.to_markdown()) @@ -189,15 +187,66 @@ def main(args): pass -if __name__ == '__main__': - parser = argparse.ArgumentParser(description='Benchmarking cuCIM restoration functions') - func_name_choices = ['denoise_tv_chambolle', 'calibrate_denoiser', 'wiener', 'unsupervised_wiener', 'richardson_lucy'] - dtype_choices = ['float16', 'float32', 'float64', 'int8', 'int16', 'int32', 'int64', 'uint8', 'uint16', 'uint32', 'uint64'] - parser.add_argument('-i','--img_size', type=str, help='Size of input image (omit color channel, it will be appended as needed)', required=True) - parser.add_argument('-d','--dtype', type=str, help='Dtype of input image', choices = dtype_choices, required=True) - parser.add_argument('-f','--func_name', type=str, help='function to benchmark', choices = func_name_choices, required=True) - parser.add_argument('-t','--duration', type=int, help='time to run benchmark', required=True) - parser.add_argument('--no_cpu', action='store_true', help='disable cpu measurements', default=False) +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description="Benchmarking cuCIM restoration functions" + ) + func_name_choices = [ + "denoise_tv_chambolle", + "calibrate_denoiser", + "wiener", + "unsupervised_wiener", + "richardson_lucy", + ] + dtype_choices = [ + "float16", + "float32", + "float64", + "int8", + "int16", + "int32", + "int64", + "uint8", + "uint16", + "uint32", + "uint64", + ] + parser.add_argument( + "-i", + "--img_size", + type=str, + help="Size of input image (omit color channel, it will be appended as needed)", + required=True, + ) + parser.add_argument( + "-d", + "--dtype", + type=str, + help="Dtype of input image", + choices=dtype_choices, + required=True, + ) + parser.add_argument( + "-f", + "--func_name", + type=str, + help="function to benchmark", + choices=func_name_choices, + required=True, + ) + parser.add_argument( + "-t", + "--duration", + type=int, + help="time to run benchmark", + required=True, + ) + parser.add_argument( + "--no_cpu", + action="store_true", + help="disable cpu measurements", + default=False, + ) args = parser.parse_args() main(args) diff --git a/benchmarks/skimage/cucim_segmentation_bench.py b/benchmarks/skimage/cucim_segmentation_bench.py index c34257e87..38add9d64 100644 --- a/benchmarks/skimage/cucim_segmentation_bench.py +++ b/benchmarks/skimage/cucim_segmentation_bench.py @@ -3,16 +3,16 @@ import os import pickle -import cucim.skimage -from cucim.skimage import data, exposure, measure, segmentation import cupy as cp import numpy as np import pandas as pd import skimage import skimage.segmentation - from _image_bench import ImageBench +import cucim.skimage +from cucim.skimage import data, measure + class LabelBench(ImageBench): def __init__( @@ -27,7 +27,6 @@ def __init__( module_gpu=cucim.skimage.measure, run_cpu=True, ): - super().__init__( function_name=function_name, shape=shape, @@ -42,9 +41,9 @@ def __init__( def _generate_labels(self, dtype): ndim = len(self.shape) - blobs_kwargs = dict(blob_size_fraction=0.05, - volume_fraction=0.35, - seed=5) + blobs_kwargs = dict( + blob_size_fraction=0.05, volume_fraction=0.35, seed=5 + ) # binary blobs only creates square outputs labels = measure.label( data.binary_blobs(max(self.shape), n_dim=ndim, **blobs_kwargs) @@ -63,7 +62,6 @@ def set_args(self, dtype): class LabelAndImageBench(LabelBench): - def set_args(self, dtype): labels_d = self._generate_labels(dtype) labels = cp.asnumpy(labels_d) @@ -100,12 +98,10 @@ def set_args(self, dtype): class RandomWalkerBench(ImageBench): - - def set_args(self, dtype): # Note: dtype only used for merkers array, data is hard-coded as float32 - if np.dtype(dtype).kind not in 'iu': + if np.dtype(dtype).kind not in "iu": raise ValueError("random_walker markers require integer dtype") n_dim = len(self.shape) @@ -135,7 +131,6 @@ def set_args(self, dtype): def main(args): - pfile = "cucim_segmentation_results.pickle" if os.path.exists(pfile): with open(pfile, "rb") as f: @@ -146,7 +141,7 @@ def main(args): dtypes = [np.dtype(args.dtype)] dtypes_label = [np.dtype(args.dtype_label)] # image sizes/shapes - shape = tuple(list(map(int,(args.img_size.split(','))))) + shape = tuple(list(map(int, (args.img_size.split(","))))) run_cpu = not args.no_cpu for function_name, fixed_kwargs, var_kwargs, allow_color, allow_nd in [ @@ -178,7 +173,9 @@ def main(args): ( "find_boundaries", dict(), - dict(connectivity=[1], mode=["thick", "inner", "outer", "subpixel"]), + dict( + connectivity=[1], mode=["thick", "inner", "outer", "subpixel"] + ), False, True, ), @@ -191,8 +188,8 @@ def main(args): ), ( "random_walker", - dict(beta=4, tol=1.e-4, prob_tol=1.e-2), - dict(mode=['cg', 'cg_j']), + dict(beta=4, tol=1.0e-4, prob_tol=1.0e-2), + dict(mode=["cg", "cg_j"]), False, True, ), @@ -225,7 +222,6 @@ def main(args): # omit: disk_level_set (simple array generation function) # omit: checkerboard_level_set (simple array generation function) ]: - if function_name != args.func_name: continue @@ -240,13 +236,20 @@ def main(args): if shape[-1] == 3 and not allow_color: continue - if function_name in ["clear_border", "expand_labels", "relabel_sequential", "find_boundaries", "mark_boundaries", "random_walker"]: - if function_name == 'random_walker': - fixed_kwargs['channel_axis'] = -1 if shape[-1] == 3 else None + if function_name in [ + "clear_border", + "expand_labels", + "relabel_sequential", + "find_boundaries", + "mark_boundaries", + "random_walker", + ]: + if function_name == "random_walker": + fixed_kwargs["channel_axis"] = -1 if shape[-1] == 3 else None - if function_name == 'mark_boundaries': + if function_name == "mark_boundaries": bench_func = LabelAndImageBench - elif function_name == 'random_walker': + elif function_name == "random_walker": bench_func = RandomWalkerBench else: bench_func = LabelBench @@ -259,13 +262,17 @@ def main(args): var_kwargs=var_kwargs, module_cpu=skimage.segmentation, module_gpu=cucim.skimage.segmentation, + run_cpu=run_cpu, ) results = B.run_benchmark(duration=args.duration) all_results = pd.concat([all_results, results["full"]]) - - elif function_name in ["inverse_gaussian_gradient", "morphological_geodesic_active_contour", "morphological_chan_vese", "chan_vese"]: - + elif function_name in [ + "inverse_gaussian_gradient", + "morphological_geodesic_active_contour", + "morphological_chan_vese", + "chan_vese", + ]: if function_name == "morphological_geodesic_active_contour": bench_class = MorphGeodesicBench else: @@ -279,6 +286,7 @@ def main(args): var_kwargs=var_kwargs, module_cpu=skimage.segmentation, module_gpu=cucim.skimage.segmentation, + run_cpu=run_cpu, ) results = B.run_benchmark(duration=args.duration) all_results = pd.concat([all_results, results["full"]]) @@ -287,7 +295,7 @@ def main(args): all_results.to_csv(fbase + ".csv") all_results.to_pickle(pfile) try: - import tabular + import tabular # noqa: F401 with open(fbase + ".md", "wt") as f: f.write(all_results.to_markdown()) @@ -295,17 +303,89 @@ def main(args): pass -if __name__ == '__main__': - parser = argparse.ArgumentParser(description='Benchmarking cuCIM segmentation functions') - func_name_choices = ["clear_border", "expand_labels", "relabel_sequential", "find_boundaries", "mark_boundaries", "random_walker", "inverse_gaussian_gradient", "morphological_geodesic_active_contour", "morphological_chan_vese", "chan_vese"] - label_dtype_choices = ['int8', 'int16', 'int32', 'int64', 'uint8', 'uint16', 'uint32', 'uint64'] - dtype_choices = ['float16', 'float32', 'float64', 'int8', 'int16', 'int32', 'int64', 'uint8', 'uint16', 'uint32', 'uint64'] - parser.add_argument('-i','--img_size', type=str, help='Size of input image (omit color channel, it will be appended as needed)', required=True) - parser.add_argument('-d','--dtype', type=str, help='Dtype of input image', choices = dtype_choices, required=True) - parser.add_argument('--dtype_label', type=str, help='Dtype of input image', choices = label_dtype_choices, required=False, default='uint8') - parser.add_argument('-f','--func_name', type=str, help='function to benchmark', choices = func_name_choices, required=True) - parser.add_argument('-t','--duration', type=int, help='time to run benchmark', required=True) - parser.add_argument('--no_cpu', action='store_true', help='disable cpu measurements', default=False) +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description="Benchmarking cuCIM segmentation functions" + ) + func_name_choices = [ + "clear_border", + "expand_labels", + "relabel_sequential", + "find_boundaries", + "mark_boundaries", + "random_walker", + "inverse_gaussian_gradient", + "morphological_geodesic_active_contour", + "morphological_chan_vese", + "chan_vese", + ] + label_dtype_choices = [ + "int8", + "int16", + "int32", + "int64", + "uint8", + "uint16", + "uint32", + "uint64", + ] + dtype_choices = [ + "float16", + "float32", + "float64", + "int8", + "int16", + "int32", + "int64", + "uint8", + "uint16", + "uint32", + "uint64", + ] + parser.add_argument( + "-i", + "--img_size", + type=str, + help="Size of input image (omit color channel, it will be appended as needed)", + required=True, + ) + parser.add_argument( + "-d", + "--dtype", + type=str, + help="Dtype of input image", + choices=dtype_choices, + required=True, + ) + parser.add_argument( + "--dtype_label", + type=str, + help="Dtype of input image", + choices=label_dtype_choices, + required=False, + default="uint8", + ) + parser.add_argument( + "-f", + "--func_name", + type=str, + help="function to benchmark", + choices=func_name_choices, + required=True, + ) + parser.add_argument( + "-t", + "--duration", + type=int, + help="time to run benchmark", + required=True, + ) + parser.add_argument( + "--no_cpu", + action="store_true", + help="disable cpu measurements", + default=False, + ) args = parser.parse_args() main(args) diff --git a/benchmarks/skimage/cucim_transform_bench.py b/benchmarks/skimage/cucim_transform_bench.py index b6aa315f9..74a9b4d0c 100644 --- a/benchmarks/skimage/cucim_transform_bench.py +++ b/benchmarks/skimage/cucim_transform_bench.py @@ -2,18 +2,17 @@ import os import pickle -import cucim.skimage -import cucim.skimage.transform import numpy as np import pandas as pd import skimage import skimage.transform - from _image_bench import ImageBench +import cucim.skimage +import cucim.skimage.transform -def main(args): +def main(args): pfile = "cucim_transform_results.pickle" if os.path.exists(pfile): with open(pfile, "rb") as f: @@ -23,7 +22,7 @@ def main(args): dtypes = [np.dtype(args.dtype)] # image sizes/shapes - shape = tuple(list(map(int,(args.img_size.split(','))))) + shape = tuple(list(map(int, (args.img_size.split(","))))) run_cpu = not args.no_cpu for function_name, fixed_kwargs, var_kwargs, allow_color, allow_nd in [ @@ -56,7 +55,13 @@ def main(args): False, False, ), - ("downscale_local_mean", dict(), dict(), True, True), # factors handled in loop below + ( + "downscale_local_mean", + dict(), + dict(), + True, + True, + ), # factors handled in loop below ( "swirl", dict(strength=1, preserve_range=True), @@ -85,7 +90,6 @@ def main(args): True, ), ]: - if function_name != args.func_name: continue @@ -102,10 +106,18 @@ def main(args): ndim_spatial = ndim - 1 if shape[-1] == 3 else ndim - if function_name in ["rescale", "warp_polar", "pyramid_gaussian", "pyramid_laplacian"]: + if function_name in [ + "rescale", + "warp_polar", + "pyramid_gaussian", + "pyramid_laplacian", + ]: fixed_kwargs["channel_axis"] = -1 if ndim_spatial < ndim else None - function_is_generator = function_name in ["pyramid_gaussian", "pyramid_laplacian"] + function_is_generator = function_name in [ + "pyramid_gaussian", + "pyramid_laplacian", + ] if function_name in ["rescale", "resize", "resize_local_mean"]: scales = [0.75, 1.25] @@ -116,10 +128,13 @@ def main(args): if ndim_spatial < ndim: # don't resize along channels dimension out_shapes = [ - tuple([int(s_ * s) for s_ in shape[:-1]]) + (shape[-1],) for s in scales + tuple([int(s_ * s) for s_ in shape[:-1]]) + (shape[-1],) + for s in scales ] else: - out_shapes = [tuple([int(s_ * s) for s_ in shape]) for s in scales] + out_shapes = [ + tuple([int(s_ * s) for s_ in shape]) for s in scales + ] var_kwargs["output_shape"] = out_shapes elif function_name == "downscale_local_mean": @@ -139,6 +154,7 @@ def main(args): module_cpu=skimage.transform, module_gpu=cucim.skimage.transform, function_is_generator=function_is_generator, + run_cpu=run_cpu, ) results = B.run_benchmark(duration=args.duration) all_results = pd.concat([all_results, results["full"]]) @@ -147,7 +163,7 @@ def main(args): all_results.to_csv(fbase + ".csv") all_results.to_pickle(pfile) try: - import tabular + import tabular # noqa: F401 with open(fbase + ".md", "wt") as f: f.write(all_results.to_markdown()) @@ -155,15 +171,70 @@ def main(args): pass -if __name__ == '__main__': - parser = argparse.ArgumentParser(description='Benchmarking cuCIM transform functions') - func_name_choices = ['resize', 'resize_local_mean', 'rescale', 'rotate', 'downscale_local_mean', 'warp_polar', 'integral_image', 'pyramid_gaussian', 'pyramid_laplacian'] - dtype_choices = ['float16', 'float32', 'float64', 'int8', 'int16', 'int32', 'int64', 'uint8', 'uint16', 'uint32', 'uint64'] - parser.add_argument('-i','--img_size', type=str, help='Size of input image (omit color channel, it will be appended as needed)', required=True) - parser.add_argument('-d','--dtype', type=str, help='Dtype of input image', choices = dtype_choices, required=True) - parser.add_argument('-f','--func_name', type=str, help='function to benchmark', choices = func_name_choices, required=True) - parser.add_argument('-t','--duration', type=int, help='time to run benchmark', required=True) - parser.add_argument('--no_cpu', action='store_true', help='disable cpu measurements', default=False) +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description="Benchmarking cuCIM transform functions" + ) + func_name_choices = [ + "resize", + "resize_local_mean", + "rescale", + "rotate", + "downscale_local_mean", + "warp_polar", + "integral_image", + "pyramid_gaussian", + "pyramid_laplacian", + ] + dtype_choices = [ + "float16", + "float32", + "float64", + "int8", + "int16", + "int32", + "int64", + "uint8", + "uint16", + "uint32", + "uint64", + ] + parser.add_argument( + "-i", + "--img_size", + type=str, + help="Size of input image (omit color channel, it will be appended as needed)", + required=True, + ) + parser.add_argument( + "-d", + "--dtype", + type=str, + help="Dtype of input image", + choices=dtype_choices, + required=True, + ) + parser.add_argument( + "-f", + "--func_name", + type=str, + help="function to benchmark", + choices=func_name_choices, + required=True, + ) + parser.add_argument( + "-t", + "--duration", + type=int, + help="time to run benchmark", + required=True, + ) + parser.add_argument( + "--no_cpu", + action="store_true", + help="disable cpu measurements", + default=False, + ) args = parser.parse_args() main(args) diff --git a/benchmarks/skimage/cupyx_scipy_ndimage_filter_bench.py b/benchmarks/skimage/cupyx_scipy_ndimage_filter_bench.py index e09f3fe1d..a820c699e 100644 --- a/benchmarks/skimage/cupyx_scipy_ndimage_filter_bench.py +++ b/benchmarks/skimage/cupyx_scipy_ndimage_filter_bench.py @@ -7,7 +7,6 @@ import numpy as np import pandas as pd import scipy - from _image_bench import ImageBench @@ -23,7 +22,6 @@ def __init__( module_cpu=scipy.ndimage, module_gpu=cupyx.scipy.ndimage, ): - self.weights_shape = weights_shape super().__init__( @@ -78,22 +76,29 @@ def set_args(self, dtype): ("gaussian_filter", dict(mode=["nearest"], sigma=[0.33, 1, 3, 4, 9])), ( "gaussian_filter1d", - dict(mode=["nearest"], sigma=[0.33, 3, 9], axis=[0, -1], order=[0, 1]), + dict( + mode=["nearest"], sigma=[0.33, 3, 9], axis=[0, -1], order=[0, 1] + ), ), ("maximum_filter", dict(mode=["nearest"], size=[3, 5, 7])), ("maximum_filter1d", dict(mode=["nearest"], size=[3, 7], axis=[0, -1])), ("minimum_filter", dict(mode=["nearest"], size=[3, 5, 7])), ("minimum_filter1d", dict(mode=["nearest"], size=[3, 7], axis=[0, -1])), ("median_filter", dict(mode=["nearest"], size=[3, 5, 7])), - ("percentile_filter", dict(mode=["nearest"], size=[3, 5, 7], percentile=[30])), + ( + "percentile_filter", + dict(mode=["nearest"], size=[3, 5, 7], percentile=[30]), + ), ("rank_filter", dict(mode=["nearest"], size=[3, 5, 7], rank=[-2])), ("prewitt", dict(mode=["nearest"], axis=[0, -1])), ("sobel", dict(mode=["nearest"], axis=[0, -1])), ("laplace", dict(mode=["nearest"])), ("gaussian_laplace", dict(mode=["nearest"], sigma=[0.33, 3, 9])), - ("gaussian_gradient_magnitude", dict(mode=["nearest"], sigma=[0.33, 3, 9])), + ( + "gaussian_gradient_magnitude", + dict(mode=["nearest"], sigma=[0.33, 3, 9]), + ), ]: - B = FilterBench( function_name=fname, shape=shape, diff --git a/benchmarks/skimage/cupyx_scipy_ndimage_fourier_bench.py b/benchmarks/skimage/cupyx_scipy_ndimage_fourier_bench.py index 06eb19fc2..92a4795e0 100644 --- a/benchmarks/skimage/cupyx_scipy_ndimage_fourier_bench.py +++ b/benchmarks/skimage/cupyx_scipy_ndimage_fourier_bench.py @@ -5,7 +5,6 @@ import cupy as cp import numpy as np import pandas as pd - from _image_bench import ImageBench pfile = "fourier_results.pickle" @@ -24,7 +23,9 @@ class FourierBench(ImageBench): def set_args(self, dtype): cplx_dt = np.promote_types(dtype, np.complex64) - imaged = cupy.testing.shaped_random(self.shape, xp=cp, dtype=cplx_dt) + imaged = cupy.testing.shaped_random( + self.shape, xp=cp, dtype=cplx_dt + ) image = cp.asnumpy(imaged) self.args_cpu = (image,) self.args_gpu = (imaged,) diff --git a/benchmarks/skimage/cupyx_scipy_ndimage_interp_bench.py b/benchmarks/skimage/cupyx_scipy_ndimage_interp_bench.py index de518ab82..63773732c 100644 --- a/benchmarks/skimage/cupyx_scipy_ndimage_interp_bench.py +++ b/benchmarks/skimage/cupyx_scipy_ndimage_interp_bench.py @@ -6,7 +6,6 @@ import cupy as cp import numpy as np import pandas as pd - from _image_bench import ImageBench @@ -20,13 +19,14 @@ def set_args(self, dtype): class MapCoordinatesBench(ImageBench): def set_args(self, dtype): - imaged = cupy.testing.shaped_random(self.shape, xp=cp, dtype=dtype) image = cp.asnumpy(imaged) rstate = cp.random.RandomState(5) ndim = len(self.shape) - coordsd = cp.indices(self.shape) + 0.1 * rstate.standard_normal((ndim,) + self.shape) + coordsd = cp.indices(self.shape) + 0.1 * rstate.standard_normal( + (ndim,) + self.shape + ) coords = cupy.asnumpy(coordsd) self.args_cpu = (image, coords) @@ -69,7 +69,11 @@ def set_args(self, dtype): dict(output=None, output_shape=None, prefilter=prefilter), dict(mode=modes, order=orders), ), - ("zoom", dict(output=None, zoom=1.1, prefilter=prefilter), dict(mode=modes, order=orders)), + ( + "zoom", + dict(output=None, zoom=1.1, prefilter=prefilter), + dict(mode=modes, order=orders), + ), ( "shift", dict(output=None, shift=1.5, prefilter=prefilter), @@ -77,7 +81,13 @@ def set_args(self, dtype): ), ( "rotate", - dict(output=None, reshape=True, axes=(0, 1), angle=30, prefilter=prefilter), + dict( + output=None, + reshape=True, + axes=(0, 1), + angle=30, + prefilter=prefilter, + ), dict(mode=modes, order=orders), ), ( @@ -102,7 +112,6 @@ def set_args(self, dtype): ), ), ]: - if fname == "affine_transform1": # affine_transform case 1: the general affine matrix code path fname = fname[:-1] diff --git a/benchmarks/skimage/cupyx_scipy_ndimage_measurements_bench.py b/benchmarks/skimage/cupyx_scipy_ndimage_measurements_bench.py index bb1d479b1..61b3de5a4 100644 --- a/benchmarks/skimage/cupyx_scipy_ndimage_measurements_bench.py +++ b/benchmarks/skimage/cupyx_scipy_ndimage_measurements_bench.py @@ -9,7 +9,6 @@ import pandas as pd import scipy import scipy.ndimage as ndi - from _image_bench import ImageBench @@ -27,7 +26,6 @@ def __init__( module_cpu=scipy.ndimage, module_gpu=cupyx.scipy.ndimage, ): - self.contiguous_labels = contiguous_labels array_kwargs = dict(structure=structure) if "structure" in fixed_kwargs: @@ -78,7 +76,6 @@ def __init__( module_cpu=scipy.ndimage, module_gpu=cupyx.scipy.ndimage, ): - self.nlabels = nlabels self.use_index = use_index super().__init__( @@ -127,7 +124,10 @@ def set_args(self, dtype): ndim = len(shape) for fname, var_kwargs in [ - ("label", {}), # dict(greyscale_mode=[False, True]) not available in cupyx + ( + "label", + {}, + ), # dict(greyscale_mode=[False, True]) not available in cupyx ]: for contiguous_labels in [True, False]: if contiguous_labels: diff --git a/benchmarks/skimage/cupyx_scipy_ndimage_morphology_bench.py b/benchmarks/skimage/cupyx_scipy_ndimage_morphology_bench.py index 7fbe9e7c6..d83ee72f9 100644 --- a/benchmarks/skimage/cupyx_scipy_ndimage_morphology_bench.py +++ b/benchmarks/skimage/cupyx_scipy_ndimage_morphology_bench.py @@ -1,4 +1,3 @@ -import math import os import pickle @@ -9,7 +8,6 @@ import pandas as pd import scipy import scipy.ndimage as ndi - from _image_bench import ImageBench @@ -27,7 +25,6 @@ def __init__( module_cpu=scipy.ndimage, module_gpu=cupyx.scipy.ndimage, ): - array_kwargs = dict(structure=structure, mask=mask) if "structure" in fixed_kwargs: raise ValueError("fixed_kwargs cannot contain 'structure'") @@ -66,7 +63,6 @@ def __init__( module_cpu=scipy.ndimage, module_gpu=cupyx.scipy.ndimage, ): - array_kwargs = dict(structure=structure, footprint=footprint) if "structure" in fixed_kwargs: raise ValueError("fixed_kwargs cannot contain 'structure'") diff --git a/ci/release/update-version.sh b/ci/release/update-version.sh index 37f67015a..37cee5a67 100755 --- a/ci/release/update-version.sh +++ b/ci/release/update-version.sh @@ -34,8 +34,10 @@ function sed_runner() { sed_runner 's/version = .*/version = '"'${NEXT_SHORT_TAG}'"'/g' docs/source/conf.py sed_runner 's/release = .*/release = '"'${NEXT_FULL_TAG}'"'/g' docs/source/conf.py +sed_runner "s/^version = .*/version = \"${NEXT_FULL_TAG}\"/g" python/cucim/pyproject.toml sed_runner "s/${CURRENT_LONG_TAG}/${NEXT_FULL_TAG}/g" VERSION sed_runner "s/${CURRENT_LONG_TAG}/${NEXT_FULL_TAG}/g" python/cucim/VERSION +sed_runner "s/__version__ = .*/__version__ = \"${NEXT_FULL_TAG}\"/g" python/cucim/src/cucim/__init__.pyi sed_runner "s/${CURRENT_LONG_TAG}/${NEXT_FULL_TAG}/g" cpp/plugins/cucim.kit.cuslide/VERSION sed_runner "s/${CURRENT_LONG_TAG}/${NEXT_FULL_TAG}/g" cpp/plugins/cucim.kit.cumed/VERSION sed_runner "s#\[Version ${CURRENT_LONG_TAG}\](release_notes/v${CURRENT_LONG_TAG}.md)#\[Version ${NEXT_FULL_TAG}\](release_notes/v${NEXT_FULL_TAG}.md)#g" python/cucim/docs/index.md diff --git a/conda/environments/all_cuda-118_arch-x86_64.yaml b/conda/environments/all_cuda-118_arch-x86_64.yaml index b9a4de842..19d15f9a6 100644 --- a/conda/environments/all_cuda-118_arch-x86_64.yaml +++ b/conda/environments/all_cuda-118_arch-x86_64.yaml @@ -12,13 +12,13 @@ dependencies: - cmake>=3.23.1,!=3.25.0 - cuda-version=11.8 - cudatoolkit -- cupy >=12.0.0 +- cupy>=12.0.0 - cxx-compiler - gcc_linux-64=11.* - imagecodecs>=2021.6.8 - ipython - jbig -- lazy_loader >=0.1 +- lazy_loader>=0.1 - libcufile-dev=1.4.0.31 - libcufile=1.4.0.31 - libnvjpeg-dev=11.6.0.55 @@ -26,7 +26,7 @@ dependencies: - libwebp-base - nbsphinx - ninja -- numpy >=1.21.3 +- numpy>=1.21.3 - numpydoc - nvcc_linux-64=11.8 - openslide-python>=1.1.2 @@ -40,7 +40,7 @@ dependencies: - pytest>=6.2.4 - python>=3.8,<3.11 - recommonmark -- scikit-image >=0.19.0,<0.22.0a0 +- scikit-image>=0.19.0,<0.22.0a0 - scipy - sphinx<6 - sysroot_linux-64==2.17 diff --git a/conda/environments/all_cuda-120_arch-x86_64.yaml b/conda/environments/all_cuda-120_arch-x86_64.yaml index a4cb89dcd..982a2c627 100644 --- a/conda/environments/all_cuda-120_arch-x86_64.yaml +++ b/conda/environments/all_cuda-120_arch-x86_64.yaml @@ -13,20 +13,20 @@ dependencies: - cuda-cudart-dev - cuda-nvcc - cuda-version=12.0 -- cupy >=12.0.0 +- cupy>=12.0.0 - cxx-compiler - gcc_linux-64=11.* - imagecodecs>=2021.6.8 - ipython - jbig -- lazy_loader >=0.1 +- lazy_loader>=0.1 - libcufile-dev - libnvjpeg-dev - libnvjpeg-static - libwebp-base - nbsphinx - ninja -- numpy >=1.21.3 +- numpy>=1.21.3 - numpydoc - openslide-python>=1.1.2 - pip @@ -39,7 +39,7 @@ dependencies: - pytest>=6.2.4 - python>=3.8,<3.11 - recommonmark -- scikit-image >=0.19.0,<0.22.0a0 +- scikit-image>=0.19.0,<0.22.0a0 - scipy - sphinx<6 - sysroot_linux-64==2.17 diff --git a/conda/recipes/cucim/build.sh b/conda/recipes/cucim/build.sh index 28b1112c2..3f26cb963 100644 --- a/conda/recipes/cucim/build.sh +++ b/conda/recipes/cucim/build.sh @@ -18,6 +18,6 @@ cp -P python/install/lib/* python/cucim/src/cucim/clara/ pushd python/cucim echo "PYTHON: ${PYTHON}" -$PYTHON setup.py install +$PYTHON -m pip install . -vv popd diff --git a/cpp/include/cucim/memory/dlpack.h b/cpp/include/cucim/memory/dlpack.h index 86ff74368..8981233ce 100644 --- a/cpp/include/cucim/memory/dlpack.h +++ b/cpp/include/cucim/memory/dlpack.h @@ -24,7 +24,7 @@ namespace cucim::memory { /** -* @brief Return a string providing the basic type of the homogenous array in NumPy. +* @brief Return a string providing the basic type of the homogeneous array in NumPy. * * Note: This method assumes little-endian for now. * @@ -108,7 +108,7 @@ class DLTContainer return tensor_->dtype; } /** - * @brief Return a string providing the basic type of the homogenous array in NumPy. + * @brief Return a string providing the basic type of the homogeneous array in NumPy. * * Note: This method assumes little-endian for now. * diff --git a/cpp/src/core/cucim_plugin.cpp b/cpp/src/core/cucim_plugin.cpp index f53f44ed6..6d7263806 100644 --- a/cpp/src/core/cucim_plugin.cpp +++ b/cpp/src/core/cucim_plugin.cpp @@ -400,7 +400,7 @@ bool Plugin::initialize() is_in_initialization_ = true; - // failed to load the plugin library iself => fail and allow the caller to try again later. + // failed to load the plugin library itself => fail and allow the caller to try again later. if (load(next_version_++)) { // // run the pre-startup function for the plugin. diff --git a/dependencies.yaml b/dependencies.yaml index cdf60e9cf..fddf3add9 100644 --- a/dependencies.yaml +++ b/dependencies.yaml @@ -30,6 +30,44 @@ files: - cudatoolkit - docs - py_version + py_build: + output: pyproject + pyproject_dir: python/cucim + extras: + table: build-system + includes: + - build + py_run: + output: pyproject + pyproject_dir: python/cucim + extras: + table: project + includes: + - run + py_optional_test: + output: pyproject + pyproject_dir: python/cucim + extras: + table: project.optional-dependencies + key: test + includes: + - test_python + py_develop: + output: pyproject + pyproject_dir: python/cucim + extras: + table: project.optional-dependencies + key: developer + includes: + - develop + py_develop: + output: pyproject + pyproject_dir: python/cucim + extras: + table: project.optional-dependencies + key: docs + includes: + - docs channels: - rapidsai - rapidsai-nightly @@ -38,6 +76,11 @@ channels: dependencies: build: common: + # Note: + # Currently omit pyproject output_type from cmake, ninja. The python + # build doesn't use these, but assumes that the C++ libcucim shared + # library had already previously been built and the .so file copied to + # python/cucim/src/cucim/clara/ before calling `pip install .`. - output_types: [conda, requirements] packages: - cmake>=3.23.1,!=3.25.0 @@ -46,6 +89,10 @@ dependencies: packages: - c-compiler - cxx-compiler + - output_types: [requirements, pyproject] + packages: + - setuptools>=24.2.0 + - wheel specific: - output_types: conda matrices: @@ -124,9 +171,17 @@ dependencies: - cuda-cudart-dev - libnvjpeg-dev - libcufile-dev + develop: + common: + - output_types: [conda, requirements, pyproject] + packages: + - pre-commit + - black + - ruff + - isort docs: common: - - output_types: [conda, requirements] + - output_types: [conda, requirements, pyproject] packages: - ipython - nbsphinx @@ -157,25 +212,37 @@ dependencies: - python>=3.8,<3.11 run: common: + - output_types: [conda, requirements, pyproject] + packages: + - lazy_loader>=0.1 + - scikit-image>=0.19.0,<0.22.0a0 + - scipy - output_types: conda packages: + - cupy>=12.0.0 + - numpy>=1.21.3 + # All dependencies below this point are specific to `cucim.clara` and + # are not needed for either `cucim.core` or `cucim.skimage`. I did + # not include these under a "pyproject" output so that it is still + # possible to run `pip install .` from the python/cucim folder + # without having build the C++ library at all. This allows, usage of + # usage of `cucim.skimage` on Windows, for example. - click - - cupy >=12.0.0 - jbig - - lazy_loader >=0.1 - libwebp-base - - numpy >=1.21.3 - - scikit-image >=0.19.0,<0.22.0a0 - - scipy - xz - zlib - zstd # Not sure where these go, if anywhere: # - openslide # - xorg-libxcb + - output_types: [requirements, pyproject] + packages: + - cupy-cuda11x>=12.0.0 + - numpy test_python: common: - - output_types: [conda, requirements] + - output_types: [conda, requirements, pyproject] packages: - GPUtil>=1.4.0 - imagecodecs>=2021.6.8 @@ -191,6 +258,19 @@ dependencies: - pip - pip: - opencv-python-headless>=4.6 - - output_types: [requirements] + - output_types: [requirements, pyproject] packages: - opencv-python-headless>=4.6 + # All dependencies below this point are specific to `cucim.clara` and + # are not needed for either `cucim.core` or `cucim.skimage`, so are + # listed as optional. They are needed in order to run the full test + # suite, including the `cucim.clara` package. + - click + - jbig + - libwebp-base + - xz + - zlib + - zstd + # Not sure where these go, if anywhere: + # - openslide + # - xorg-libxcb diff --git a/docs/adr/example.md b/docs/adr/example.md index 74cafd387..61ee5641e 100644 --- a/docs/adr/example.md +++ b/docs/adr/example.md @@ -76,12 +76,12 @@ We accept lower compile-time speeds in favor of compile-time safety and runtime ### Constraints -We have a strong constraint on languages that are usuable with major cloud provider services for functions, such as Amazon Lambda. +We have a strong constraint on languages that are usable with major cloud provider services for functions, such as Amazon Lambda. ### Positions -We considered these langauges: +We considered these languages: * C @@ -143,9 +143,9 @@ Summary per language: * JavaScript: most popular language ever; most widespread ecosystem. - * Kotlin: fixes so much of Java; excelent backing by JetBrains; good published cases of porting from Java to Kotlin. + * Kotlin: fixes so much of Java; excellent backing by JetBrains; good published cases of porting from Java to Kotlin. - * Python: most popular language for systems administration; great analytics tooling; good web frameworks; but abandonded by Google in favor of Go. + * Python: most popular language for systems administration; great analytics tooling; good web frameworks; but abandoned by Google in favor of Go. * Ruby: best developer experience ever; best web frameworks; nicest community; but very slow; somewhat hard to package. @@ -161,7 +161,7 @@ We believe that our core decision is driven by two cross-cutting concerns: * For close-to-fastest runtime speed and close-to-tightest system access, we choose TypeScript and Rust. -Honorable mentions go to the VM languages and web frameworks that we would choose if we wanted a VM lanauge: +Honorable mentions go to the VM languages and web frameworks that we would choose if we wanted a VM language: * Closure and Luminous @@ -176,7 +176,7 @@ Front-end developers will need to learn TypeScript. This is likely an easy learn Back-end developers will need to learn Rust. This is likely a moderate learning curve if the developer's primary experience is using C/C++, and a hard learning curve if the developer's primary experience is using Java, Python, Ruby, or similar memory-managed languages. -TypeScript and Rust are both relatively new. This means that many tools do not yet have documentation for these languages. For example, the devops pipeline will need to be set up for these languages, and so far, none of the devops tools that we are evaluating have default examples for these langauges. +TypeScript and Rust are both relatively new. This means that many tools do not yet have documentation for these languages. For example, the devops pipeline will need to be set up for these languages, and so far, none of the devops tools that we are evaluating have default examples for these languages. Compile times for TypeScript and Rust are quite slow. Some of this may be due to the newness of the languages. We may want to look at how to mitigate slow compile times, such as by compile-on-demand, compile-concurrency, etc. @@ -188,9 +188,9 @@ IDE support for these languages is not yet ubiquitous and not yet first-class. F ### Related decisions -We will aim toward ecosystem choices that align with these langauges. +We will aim toward ecosystem choices that align with these languages. -For example, we want to choose an IDE that has good capabilties for these languages. +For example, we want to choose an IDE that has good capabilities for these languages. For example, for our front-end web framework, we are more-likley to decide on a framework that tends to aim toward TypeScript (e.g. Vue) than a framework that tends to aim toward plain JavaScript (e.g. React). diff --git a/docs/source/conf.py b/docs/source/conf.py index a3112e9a2..084d85d70 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -22,9 +22,9 @@ import os import sys -sys.path.insert(0, os.path.abspath('../..')) +sys.path.insert(0, os.path.abspath("../..")) curpath = os.path.dirname(__file__) -sys.path.append(os.path.join(curpath, 'ext')) +sys.path.append(os.path.join(curpath, "ext")) # -- General configuration ------------------------------------------------ @@ -36,45 +36,45 @@ # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ - 'sphinx.ext.intersphinx', - 'sphinx.ext.autodoc', - 'sphinx.ext.autosummary', - 'sphinx.ext.mathjax', - 'numpydoc', - 'doi_role', - 'IPython.sphinxext.ipython_console_highlighting', - 'IPython.sphinxext.ipython_directive', - 'nbsphinx', - 'recommonmark', + "sphinx.ext.intersphinx", + "sphinx.ext.autodoc", + "sphinx.ext.autosummary", + "sphinx.ext.mathjax", + "numpydoc", + "doi_role", + "IPython.sphinxext.ipython_console_highlighting", + "IPython.sphinxext.ipython_directive", + "nbsphinx", + "recommonmark", ] -ipython_mplbackend = 'str' +ipython_mplbackend = "str" # Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] +templates_path = ["_templates"] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # # source_suffix = ['.rst', '.md'] -source_suffix = '.rst' +source_suffix = ".rst" # The master toctree document. -master_doc = 'index' +master_doc = "index" # General information about the project. -project = 'cuCIM' -copyright = '2020-2021, NVIDIA' -author = 'NVIDIA' +project = "cuCIM" +copyright = "2020-2021, NVIDIA" +author = "NVIDIA" # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. -version = '23.12' +version = "23.12" # The full version, including alpha/beta/rc tags. -release = '23.12.00' +release = "23.12.00" # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. @@ -89,7 +89,7 @@ exclude_patterns = [] # The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' +pygments_style = "sphinx" # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False @@ -101,7 +101,7 @@ # a list of builtin themes. # -html_theme = 'pydata_sphinx_theme' +html_theme = "pydata_sphinx_theme" # Theme options are theme-specific and customize the look and feel of a theme @@ -113,13 +113,13 @@ # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] +html_static_path = ["_static"] # -- Options for HTMLHelp output ------------------------------------------ # Output file base name for HTML help builder. -htmlhelp_basename = 'cucimdoc' +htmlhelp_basename = "cucimdoc" # -- Options for LaTeX output --------------------------------------------- @@ -128,15 +128,12 @@ # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', - # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', - # Additional stuff for the LaTeX preamble. # # 'preamble': '', - # Latex figure (float) alignment # # 'figure_align': 'htbp', @@ -146,8 +143,13 @@ # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ - (master_doc, 'cucim.tex', 'cucim Documentation', - 'NVIDIA Corporation', 'manual'), + ( + master_doc, + "cucim.tex", + "cucim Documentation", + "NVIDIA Corporation", + "manual", + ), ] @@ -155,10 +157,7 @@ # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). -man_pages = [ - (master_doc, 'cucim', 'cucim Documentation', - [author], 1) -] +man_pages = [(master_doc, "cucim", "cucim Documentation", [author], 1)] # -- Options for Texinfo output ------------------------------------------- @@ -167,19 +166,25 @@ # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ - (master_doc, 'cucim', 'cucim Documentation', - author, 'cucim', 'One line description of project.', - 'Miscellaneous'), + ( + master_doc, + "cucim", + "cucim Documentation", + author, + "cucim", + "One line description of project.", + "Miscellaneous", + ), ] # Configuration for intersphinx: refer to other projects documentation. intersphinx_mapping = { - 'python': ('https://docs.python.org/', None), - 'cupy': ('https://docs.cupy.dev/en/stable/', None), - 'numpy': ('https://numpy.org/doc/stable', None), - 'scipy': ('https://docs.scipy.org/doc/scipy/', None), - 'skimage': ('https://scikit-image.org/docs/stable/', None), + "python": ("https://docs.python.org/", None), + "cupy": ("https://docs.cupy.dev/en/stable/", None), + "numpy": ("https://numpy.org/doc/stable", None), + "scipy": ("https://docs.scipy.org/doc/scipy/", None), + "skimage": ("https://scikit-image.org/docs/stable/", None), } @@ -190,4 +195,6 @@ def setup(app): app.add_css_file("https://docs.rapids.ai/assets/css/custom.css") - app.add_js_file("https://docs.rapids.ai/assets/js/custom.js", loading_method="defer") + app.add_js_file( + "https://docs.rapids.ai/assets/js/custom.js", loading_method="defer" + ) diff --git a/docs/source/ext/doi_role.py b/docs/source/ext/doi_role.py index 86aebbda4..ab603df60 100644 --- a/docs/source/ext/doi_role.py +++ b/docs/source/ext/doi_role.py @@ -24,9 +24,9 @@ def doi_role(typ, rawtext, text, lineno, inliner, options={}, content=[]): text = utils.unescape(text) has_explicit_title, title, part = split_explicit_title(text) - full_url = 'https://doi.org/' + part + full_url = "https://doi.org/" + part if not has_explicit_title: - title = 'DOI:' + part + title = "DOI:" + part pnode = nodes.reference(title, title, internal=False, refuri=full_url) return [pnode], [] @@ -34,20 +34,20 @@ def doi_role(typ, rawtext, text, lineno, inliner, options={}, content=[]): def arxiv_role(typ, rawtext, text, lineno, inliner, options={}, content=[]): text = utils.unescape(text) has_explicit_title, title, part = split_explicit_title(text) - full_url = 'https://arxiv.org/abs/' + part + full_url = "https://arxiv.org/abs/" + part if not has_explicit_title: - title = 'arXiv:' + part + title = "arXiv:" + part pnode = nodes.reference(title, title, internal=False, refuri=full_url) return [pnode], [] def setup_link_role(app): - app.add_role('doi', doi_role) - app.add_role('DOI', doi_role) - app.add_role('arXiv', arxiv_role) - app.add_role('arxiv', arxiv_role) + app.add_role("doi", doi_role) + app.add_role("DOI", doi_role) + app.add_role("arXiv", arxiv_role) + app.add_role("arxiv", arxiv_role) def setup(app): - app.connect('builder-inited', setup_link_role) - return {'version': '0.1', 'parallel_read_safe': True} + app.connect("builder-inited", setup_link_role) + return {"version": "0.1", "parallel_read_safe": True} diff --git a/examples/python/distance_transform_edt_demo.py b/examples/python/distance_transform_edt_demo.py index 4a4f76e53..405cd0f23 100644 --- a/examples/python/distance_transform_edt_demo.py +++ b/examples/python/distance_transform_edt_demo.py @@ -1,4 +1,5 @@ import math + import cupy as cp import numpy as np @@ -7,7 +8,7 @@ import matplotlib.pyplot as plt except ImportError as e: print("This demo requires the matplotlib and colorcet packages.") - raise(e) + raise (e) from skimage import data @@ -32,7 +33,7 @@ def coords_to_labels(coords): shape = (200, 200) size = math.prod(shape) -ntrue = .001 * size +ntrue = 0.001 * size p_true = ntrue / size p_false = 1 - p_true @@ -67,23 +68,23 @@ def coords_to_labels(coords): fig, axes = plt.subplots(2, 3, figsize=(8, 7)) axes[0][0].imshow(image, cmap=plt.cm.gray) -axes[0][0].set_title('seed points') +axes[0][0].set_title("seed points") axes[0][1].imshow(distances, cmap=plt.cm.gray) -axes[0][1].set_title('Euclidean distance\n(to nearest seed)') +axes[0][1].set_title("Euclidean distance\n(to nearest seed)") axes[1][0].imshow(coords[0], cmap=plt.cm.gray) -axes[1][0].set_title('y coordindate\nof neareset seed') +axes[1][0].set_title("y coordinate\nof neareset seed") axes[1][1].imshow(coords[1], cmap=plt.cm.gray) -axes[1][1].set_title('x coordindate\nof neareset seed') +axes[1][1].set_title("x coordinate\nof neareset seed") axes[1][2].imshow(rgb_labels) -axes[1][2].set_title('discrete Voronoi') +axes[1][2].set_title("discrete Voronoi") for ax in axes.ravel(): ax.set_axis_off() # overlay larger markers at the seed points for better visibility for x, y in zip(xx, yy): # overlay in image - axes[0, 0].plot(y, x, 'w.') + axes[0, 0].plot(y, x, "w.") # overlay in rgb_labels - axes[1, 2].plot(y, x, 'w.') + axes[1, 2].plot(y, x, "w.") plt.tight_layout() @@ -108,13 +109,13 @@ def coords_to_labels(coords): fig, axes = plt.subplots(2, 2, figsize=(7, 7)) axes[0][0].imshow(horse_inv, cmap=plt.cm.gray) -axes[0][0].set_title('Foreground horse') +axes[0][0].set_title("Foreground horse") axes[0][1].imshow(horse, cmap=plt.cm.gray) -axes[0][1].set_title('Background horse') +axes[0][1].set_title("Background horse") axes[1][0].imshow(distances_inv) -axes[1][0].set_title('Distance\n(foreground horse)') +axes[1][0].set_title("Distance\n(foreground horse)") axes[1][1].imshow(distances) -axes[1][1].set_title('Distance\n(background horse)') +axes[1][1].set_title("Distance\n(background horse)") for ax in axes.ravel(): ax.set_axis_off() plt.tight_layout() diff --git a/examples/python/gds_whole_slide/benchmark_read.py b/examples/python/gds_whole_slide/benchmark_read.py index f25668e91..efbfa6c90 100644 --- a/examples/python/gds_whole_slide/benchmark_read.py +++ b/examples/python/gds_whole_slide/benchmark_read.py @@ -5,12 +5,17 @@ import kvikio.defaults import numpy as np from cupyx.profiler import benchmark +from demo_implementation import ( + get_n_tiles, + get_tile_buffers, + read_openslide, + read_tifffile, + read_tiled, +) from tifffile import TiffFile -from demo_implementation import read_openslide, read_tifffile, read_tiled, get_n_tiles, get_tile_buffers - -data_dir = os.environ.get('WHOLE_SLIDE_DATA_DIR', os.path.dirname('__file__')) -fname = os.path.join(data_dir, 'resize.tiff') +data_dir = os.environ.get("WHOLE_SLIDE_DATA_DIR", os.path.dirname("__file__")) +fname = os.path.join(data_dir, "resize.tiff") if not os.path.exists(fname): raise RuntimeError(f"Could not find data file: {fname}") @@ -55,10 +60,10 @@ (fname, level), n_warmup=0, n_repeat=100, - max_duration=max_duration + max_duration=max_duration, ) times.append(perf_openslide.gpu_times.mean()) -labels.append('openslide') +labels.append("openslide") print(f"duration ({labels[-1]}) = {times[-1]}") perf_tifffile = benchmark( @@ -66,10 +71,10 @@ (fname, level), n_warmup=0, n_repeat=100, - max_duration=max_duration + max_duration=max_duration, ) times.append(perf_tifffile.gpu_times.mean()) -labels.append('tifffile') +labels.append("tifffile") print(f"duration ({labels[-1]}) = {times[-1]}") for gds_enabled in [False, True]: @@ -79,7 +84,7 @@ p = benchmark( read_tiled, (fname, [level]), - kwargs=dict(backend='kvikio-raw_read', tile_buffers=tile_buffers), + kwargs=dict(backend="kvikio-raw_read", tile_buffers=tile_buffers), n_warmup=1, n_repeat=100, max_duration=max_duration, @@ -98,7 +103,7 @@ p = benchmark( read_tiled, (fname, [level]), - kwargs=dict(backend='kvikio-read', tile_buffers=tile_buffers), + kwargs=dict(backend="kvikio-read", tile_buffers=tile_buffers), n_warmup=1, n_repeat=100, max_duration=max_duration, @@ -125,12 +130,14 @@ p = benchmark( read_tiled, (fname, [level]), - kwargs=dict(backend='kvikio-pread', - n_buffer=n_buffer, - tile_buffers=tile_buffers), + kwargs=dict( + backend="kvikio-pread", + n_buffer=n_buffer, + tile_buffers=tile_buffers, + ), n_warmup=1, n_repeat=100, - max_duration=max_duration + max_duration=max_duration, ) if gds_enabled: perf_kvikio_pread.append(p) @@ -146,11 +153,11 @@ kvikio.defaults.compat_mode_reset(False) -out_name = 'read_times.npz' +out_name = "read_times.npz" # auto-increment filename to avoid overwriting old results cnt = 1 while os.path.exists(out_name): - out_name = f'read_times{cnt}.npz' + out_name = f"read_times{cnt}.npz" cnt += 1 np.savez(out_name, times=np.asarray(times), labels=np.asarray(labels)) @@ -237,4 +244,4 @@ duration (kvikio-pread (n_buffer=16) (gds_enabled=True)) = 0.9027577819824221 duration (kvikio-pread (n_buffer=64) (gds_enabled=True)) = 0.7827104492187501 duration (kvikio-pread (n_buffer=256) (gds_enabled=True)) = 0.756464599609375 -""" +""" # noqa: E501 diff --git a/examples/python/gds_whole_slide/benchmark_round_trip.py b/examples/python/gds_whole_slide/benchmark_round_trip.py index 05becaae3..21f4ec278 100644 --- a/examples/python/gds_whole_slide/benchmark_round_trip.py +++ b/examples/python/gds_whole_slide/benchmark_round_trip.py @@ -1,19 +1,18 @@ import os from time import time -import cucim.skimage.filters import cupy as cp -import numpy as np import kvikio import kvikio.defaults -from cucim.core.operations.color import image_to_absorbance +import numpy as np from cupyx.profiler import benchmark - from demo_implementation import cupy_to_zarr, read_tiled +import cucim.skimage.filters +from cucim.core.operations.color import image_to_absorbance -data_dir = os.environ.get('WHOLE_SLIDE_DATA_DIR', os.path.dirname('__file__')) -fname = os.path.join(data_dir, 'resize.tiff') +data_dir = os.environ.get("WHOLE_SLIDE_DATA_DIR", os.path.dirname("__file__")) +fname = os.path.join(data_dir, "resize.tiff") if not os.path.exists(fname): raise RuntimeError(f"Could not find data file: {fname}") @@ -41,9 +40,8 @@ def round_trip( zarr_kwargs=dict(overwrite=True, compressor=None), verbose_times=False, ): - if output_path is None: - output_path = f'./image-{cp.dtype(out_dtype).name}.zarr' + output_path = f"./image-{cp.dtype(out_dtype).name}.zarr" if apply_kernel_tilewise: tile_func = kernel_func @@ -57,11 +55,11 @@ def round_trip( data_gpu = read_tiled( fname, levels=[level], - backend='kvikio-pread', + backend="kvikio-pread", n_buffer=n_buffer, tile_func=tile_func, tile_func_kwargs=tile_func_kwargs, - out_dtype=out_dtype + out_dtype=out_dtype, )[0] if verbose_times: dur_read = time() - tstart @@ -84,7 +82,7 @@ def round_trip( tstart = time() cupy_to_zarr( data_gpu, - backend='dask', # 'kvikio-pwrite', + backend="dask", # 'kvikio-pwrite', output_path=output_path, chunk_shape=zarr_chunk_shape, zarr_kwargs=zarr_kwargs, @@ -101,26 +99,30 @@ def round_trip( times = [] labels = [] n_buffer = 32 -for zarr_chunk_shape in [(512, 512, 3), (1024, 1024, 3), (2048, 2048, 3), (4096, 4096, 3)]: - for computation in ['absorbance', 'median', 'gaussian', 'sobel']: - +for zarr_chunk_shape in [ + (512, 512, 3), + (1024, 1024, 3), + (2048, 2048, 3), + (4096, 4096, 3), +]: + for computation in ["absorbance", "median", "gaussian", "sobel"]: if computation is None: kernel_func = None kernel_func_kwargs = {} out_dtype = cp.uint8 - elif computation == 'absorbance': + elif computation == "absorbance": kernel_func = image_to_absorbance kernel_func_kwargs = {} out_dtype = cp.float32 - elif computation == 'median': + elif computation == "median": kernel_func = cucim.skimage.filters.median kernel_func_kwargs = dict(footprint=cp.ones((5, 5, 1), dtype=bool)) out_dtype = cp.uint8 - elif computation == 'gaussian': + elif computation == "gaussian": kernel_func = cucim.skimage.filters.gaussian kernel_func_kwargs = dict(sigma=2.5, channel_axis=-1) out_dtype = cp.uint8 - elif computation == 'sobel': + elif computation == "sobel": kernel_func = cucim.skimage.filters.sobel kernel_func_kwargs = dict(axis=(0, 1)) out_dtype = cp.float32 @@ -152,19 +154,21 @@ def round_trip( ) t = perf.gpu_times - kernel_description = 'tiled' if apply_kernel_tilewise else 'global' - gds_description = 'with GDS' if gds_enabled else 'without GDS' - label = f"{computation=}, {kernel_description}, chunk_shape={zarr_chunk_shape}, {gds_description}" + kernel_description = ( + "tiled" if apply_kernel_tilewise else "global" + ) + gds_description = "with GDS" if gds_enabled else "without GDS" + label = f"{computation=}, {kernel_description}, chunk_shape={zarr_chunk_shape}, {gds_description}" # noqa: E501 print(f"Duration ({label}): {t.mean()} s +/- {t.std()} s") times.append(t.mean()) labels.append(label) -out_name = 'round_trip_times.npz' +out_name = "round_trip_times.npz" # auto-increment filename to avoid overwriting old results cnt = 1 while os.path.exists(out_name): - out_name = f'round_trip_times{cnt}.npz' + out_name = f"round_trip_times{cnt}.npz" cnt += 1 np.savez(out_name, times=np.asarray(times), labels=np.asarray(labels)) @@ -238,4 +242,4 @@ def round_trip( Duration (computation='sobel', tiled, chunk_shape=(4096, 4096, 3), without GDS): 5.430656982421875 s +/- 0.01277270507812478 s Duration (computation='sobel', tiled, chunk_shape=(4096, 4096, 3), with GDS): 3.1940713500976563 s +/- 0.19852391299904837 s -""" +""" # noqa: E501 diff --git a/examples/python/gds_whole_slide/benchmark_zarr_write.py b/examples/python/gds_whole_slide/benchmark_zarr_write.py index e681e723b..13376f410 100644 --- a/examples/python/gds_whole_slide/benchmark_zarr_write.py +++ b/examples/python/gds_whole_slide/benchmark_zarr_write.py @@ -4,15 +4,14 @@ import cupy as cp import kvikio.defaults import numpy as np -from cucim.core.operations.color import image_to_absorbance from cupyx.profiler import benchmark +from demo_implementation import cupy_to_zarr, get_n_tiles, read_tiled from tifffile import TiffFile -from demo_implementation import read_tiled, get_n_tiles, cupy_to_zarr - +from cucim.core.operations.color import image_to_absorbance -data_dir = os.environ.get('WHOLE_SLIDE_DATA_DIR', os.path.dirname('__file__')) -fname = os.path.join(data_dir, 'resize.tiff') +data_dir = os.environ.get("WHOLE_SLIDE_DATA_DIR", os.path.dirname("__file__")) +fname = os.path.join(data_dir, "resize.tiff") if not os.path.exists(fname): raise RuntimeError(f"Could not find data file: {fname}") @@ -45,28 +44,33 @@ # read the uint8 TIFF -kwargs = dict(levels=[level], backend='kvikio-pread', n_buffer=n_buffer) +kwargs = dict(levels=[level], backend="kvikio-pread", n_buffer=n_buffer) image_gpu = read_tiled(fname, **kwargs)[0] # read the uint8 TIFF applying tile-wise processing to give a float32 array -kwargs = dict(levels=[level], backend='kvikio-pread', n_buffer=n_buffer, - tile_func=image_to_absorbance, out_dtype=cp.float32) +kwargs = dict( + levels=[level], + backend="kvikio-pread", + n_buffer=n_buffer, + tile_func=image_to_absorbance, + out_dtype=cp.float32, +) preprocessed_gpu = read_tiled(fname, **kwargs)[0] # benchmark writing these CuPy outputs to Zarr with various chunk sizes -dtypes = ['uint8', 'float32'] +dtypes = ["uint8", "float32"] chunk_shapes = [(512, 512, 3), (1024, 1024, 3), (2048, 2048, 3)] -backends = ['dask', 'kvikio-raw_write', 'kvikio-pwrite'] +backends = ["dask", "kvikio-raw_write", "kvikio-pwrite"] kvikio.defaults.num_threads_reset(16) write_time_means = np.zeros( ((len(dtypes), len(chunk_shapes), len(backends), 2)), dtype=float ) write_time_stds = np.zeros_like(write_time_means) for i, dtype in enumerate(dtypes): - if dtype == 'uint8': + if dtype == "uint8": img = image_gpu assert img.dtype == cp.uint8 - elif dtype == 'float32': + elif dtype == "float32": img = preprocessed_gpu assert img.dtype == cp.float32 else: @@ -74,7 +78,7 @@ for j, chunk_shape in enumerate(chunk_shapes): for k, backend in enumerate(backends): kwargs = dict( - output_path=f'./image-{dtype}.zarr', + output_path=f"./image-{dtype}.zarr", chunk_shape=chunk_shape, zarr_kwargs=dict(overwrite=True, compressor=compressor), n_buffer=64, @@ -82,20 +86,31 @@ ) for m, gds_enabled in enumerate([False, True]): kvikio.defaults.compat_mode_reset(not gds_enabled) - perf_write_float32 = benchmark(cupy_to_zarr, (img,), kwargs=kwargs, n_warmup=1, n_repeat=7, max_duration=15) + perf_write_float32 = benchmark( + cupy_to_zarr, + (img,), + kwargs=kwargs, + n_warmup=1, + n_repeat=7, + max_duration=15, + ) t = perf_write_float32.gpu_times write_time_means[i, j, k, m] = t.mean() write_time_stds[i, j, k, m] = t.std() - print(f"Duration ({cp.dtype(dtype).name} write, {chunk_shape=}, {backend=}, {gds_enabled=}): " - f"{t.mean()} s +/- {t.std()} s") -out_name = 'write_times.npz' + print( + f"Duration ({cp.dtype(dtype).name} write, {chunk_shape=}, {backend=}, {gds_enabled=}): " # noqa: E501 + f"{t.mean()} s +/- {t.std()} s" + ) +out_name = "write_times.npz" # auto-increment filename to avoid overwriting old results cnt = 1 while os.path.exists(out_name): - out_name = f'write_times{cnt}.npz' + out_name = f"write_times{cnt}.npz" cnt += 1 -np.savez(out_name, write_time_means=write_time_means, write_time_stds=write_time_stds) +np.savez( + out_name, write_time_means=write_time_means, write_time_stds=write_time_stds +) """ diff --git a/examples/python/gds_whole_slide/benchmark_zarr_write_lz4_via_dask.py b/examples/python/gds_whole_slide/benchmark_zarr_write_lz4_via_dask.py index 398a4083d..46ef057c0 100644 --- a/examples/python/gds_whole_slide/benchmark_zarr_write_lz4_via_dask.py +++ b/examples/python/gds_whole_slide/benchmark_zarr_write_lz4_via_dask.py @@ -10,16 +10,13 @@ import cupy as cp import kvikio.defaults import numpy as np -from cucim.core.operations.color import image_to_absorbance from cupyx.profiler import benchmark -from tifffile import TiffFile - -from demo_implementation import read_tiled, get_n_tiles, cupy_to_zarr +from demo_implementation import cupy_to_zarr, get_n_tiles, read_tiled from lz4_nvcomp import LZ4NVCOMP +from tifffile import TiffFile - -data_dir = os.environ.get('WHOLE_SLIDE_DATA_DIR', os.path.dirname('__file__')) -fname = os.path.join(data_dir, 'resize.tiff') +data_dir = os.environ.get("WHOLE_SLIDE_DATA_DIR", os.path.dirname("__file__")) +fname = os.path.join(data_dir, "resize.tiff") if not os.path.exists(fname): raise RuntimeError(f"Could not find data file: {fname}") @@ -51,19 +48,26 @@ # read the uint8 TIFF -kwargs = dict(levels=[level], backend='kvikio-pread', n_buffer=n_buffer) +kwargs = dict(levels=[level], backend="kvikio-pread", n_buffer=n_buffer) image_gpu = read_tiled(fname, **kwargs)[0] # benchmark writing these CuPy outputs to Zarr with various chunk sizes # Note: nvcomp only supports integer and unsigned dtypes. -# https://github.com/rapidsai/kvikio/blob/b0c6cedf43d1bc240c3ef1b38ebb9d89574a08ee/python/kvikio/nvcomp.py#L12-L21 # noqa - -dtypes = ['uint16'] -chunk_shapes = [(512, 512, 3), (1024, 1024, 3), (2048, 2048, 3), (4096, 4096, 3)] -backend = 'dask' +# https://github.com/rapidsai/kvikio/blob/b0c6cedf43d1bc240c3ef1b38ebb9d89574a08ee/python/kvikio/nvcomp.py#L12-L21 # noqa: E501 + +dtypes = ["uint16"] +chunk_shapes = [ + (512, 512, 3), + (1024, 1024, 3), + (2048, 2048, 3), + (4096, 4096, 3), +] +backend = "dask" compressors = [None, LZ4NVCOMP()] kvikio.defaults.num_threads_reset(16) -write_time_means = np.zeros(((len(dtypes), len(chunk_shapes), len(compressors), 2)), dtype=float) +write_time_means = np.zeros( + ((len(dtypes), len(chunk_shapes), len(compressors), 2)), dtype=float +) write_time_stds = np.zeros_like(write_time_means) for i, dtype in enumerate(dtypes): dtype = np.dtype(dtype) @@ -79,7 +83,9 @@ for j, chunk_shape in enumerate(chunk_shapes): for k, compressor in enumerate(compressors): kwargs = dict( - output_path=f'./image-{dtype}-chunk{chunk_shape[0]}.zarr' if compressor is None else f'./image-{dtype}-chunk{chunk_shape[0]}-lz4.zarr', + output_path=f"./image-{dtype}-chunk{chunk_shape[0]}.zarr" + if compressor is None + else f"./image-{dtype}-chunk{chunk_shape[0]}-lz4.zarr", chunk_shape=chunk_shape, zarr_kwargs=dict(overwrite=True, compressor=compressor), n_buffer=64, @@ -87,21 +93,32 @@ ) for m, gds_enabled in enumerate([False, True]): kvikio.defaults.compat_mode_reset(not gds_enabled) - perf_write_float32 = benchmark(cupy_to_zarr, (img,), kwargs=kwargs, n_warmup=1, n_repeat=7, max_duration=max_duration) + perf_write_float32 = benchmark( + cupy_to_zarr, + (img,), + kwargs=kwargs, + n_warmup=1, + n_repeat=7, + max_duration=max_duration, + ) t = perf_write_float32.gpu_times write_time_means[i, j, k, m] = t.mean() write_time_stds[i, j, k, m] = t.std() - print(f"Duration ({cp.dtype(dtype).name} write, {chunk_shape=}, {compressor=}, {gds_enabled=}): " - f"{t.mean()} s +/- {t.std()} s") + print( + f"Duration ({cp.dtype(dtype).name} write, {chunk_shape=}, {compressor=}, {gds_enabled=}): " # noqa: E501 + f"{t.mean()} s +/- {t.std()} s" + ) -out_name = 'write_times_lz4.npz' +out_name = "write_times_lz4.npz" # auto-increment filename to avoid overwriting old results cnt = 1 while os.path.exists(out_name): - out_name = f'write_times_lz4{cnt}.npz' + out_name = f"write_times_lz4{cnt}.npz" cnt += 1 -np.savez(out_name, write_time_means=write_time_means, write_time_stds=write_time_stds) +np.savez( + out_name, write_time_means=write_time_means, write_time_stds=write_time_stds +) """ @@ -152,4 +169,4 @@ Duration (uint16 write, chunk_shape=(4096, 4096, 3), compressor=LZ4NVCOMP, gds_enabled=False): 0.8166022583007813 s +/- 0.19712617258152443 s Duration (uint16 write, chunk_shape=(4096, 4096, 3), compressor=LZ4NVCOMP, gds_enabled=True): 0.7342889607747396 s +/- 0.025796103217999546 s -""" +""" # noqa: E501 diff --git a/examples/python/gds_whole_slide/demo_implementation.py b/examples/python/gds_whole_slide/demo_implementation.py index b71845812..7d02c8407 100644 --- a/examples/python/gds_whole_slide/demo_implementation.py +++ b/examples/python/gds_whole_slide/demo_implementation.py @@ -1,4 +1,5 @@ import os +import warnings import cupy as cp import dask.array as da @@ -7,9 +8,9 @@ import numpy as np import openslide import tifffile -from tifffile import TiffFile from kvikio.cufile import IOFuture from kvikio.zarr import GDSStore +from tifffile import TiffFile from zarr import DirectoryStore from zarr.creation import init_array @@ -19,7 +20,7 @@ Developed with Dask 2022.05.2 zarr >= 2.13.2 kvikio >= 2022.10.00 (but had to use a recent development branch on my system to properly find libcufile.so) -""" +""" # noqa: E501 def get_n_tiles(page): @@ -59,13 +60,13 @@ def _get_tile_multiindex(page, index, n_tiles): multi_index : tuple of int Starting index for the tile along each axis in the output array. """ - d, l, w = n_tiles - wl = w * l - wld = wl * d + d, h, w = n_tiles + wh = w * h + whd = wh * d multi_index = ( - index // wld, - (index // wl) % d * page.tiledepth, - (index // w) % l * page.tilelength, + index // whd, + (index // wh) % d * page.tiledepth, + (index // w) % h * page.tilelength, index % w * page.tilewidth, 0, ) @@ -86,7 +87,7 @@ def _decode(data, tile_shape, truncation_slices): last tile along a given dimension when the page size is not an even multiple of the tile width. """ - if not hasattr(data, '__cuda_array_interface__'): + if not hasattr(data, "__cuda_array_interface__"): data = np.frombuffer(data, np.uint8) data.shape = tile_shape # truncate any tiles that extend past the image boundary @@ -112,7 +113,7 @@ def _truncation_slices(check_needed, page_shape, offsets, tile_shape, n_tiles): Parameters ---------- check_needed : 3-tuple of bool - Any axis whos page size is not evenly divisible by the tile size will + Any axis whose page size is not evenly divisible by the tile size will have a True entry in this tuple. page_shape : tuple of int The shape of the current TIFF page (depth, length, width[, channels]) @@ -155,9 +156,7 @@ def read_openslide(fname, level, clear_cache=True): slide = openslide.OpenSlide(fname) out = slide.read_region( - location=(0, 0), - level=level, - size=slide.level_dimensions[level] + location=(0, 0), level=level, size=slide.level_dimensions[level] ) # convert from PIL image to NumPy array out = np.asarray(out) @@ -202,7 +201,9 @@ def _get_aligned_read_props(offsets, bytecounts, alignment=4096): rounded_offsets = (offsets // alignment) * alignment buffer_offsets = offsets - rounded_offsets rounded_bytecounts = buffer_offsets + bytecounts - rounded_bytecounts = np.ceil(rounded_bytecounts / alignment).astype(int) * alignment + rounded_bytecounts = ( + np.ceil(rounded_bytecounts / alignment).astype(int) * alignment + ) # truncate last bytecounts entry to avoid possibly exceeding file extent last = offsets[-1] + bytecounts[-1] @@ -262,12 +263,15 @@ def read_alltiles_bulk(fname, level, clear_cache=True): def get_tile_buffers(fname, level, n_buffer): with TiffFile(fname) as tif: page = tif.pages[level] - n_chan = page.shaped[-1] - rounded_offsets, rounded_bytecounts, buffer_offsets = _get_aligned_read_props( + ( + rounded_offsets, + rounded_bytecounts, + buffer_offsets, + ) = _get_aligned_read_props( offsets=page.dataoffsets, bytecounts=page.databytecounts, - alignment=4096 + alignment=4096, ) # Allocate buffer based on size of the largest tile after rounding # up to the nearest multiple of 4096. @@ -283,10 +287,18 @@ def get_tile_buffers(fname, level, n_buffer): return tile_buffers -def read_tiled(fname, levels=[0], backend='kvikio-pread', n_buffer=100, - tile_func=None, tile_func_kwargs={}, out_dtype=None, - clear_cache=True, preregister_memory_buffers=False, - tile_buffers=None): +def read_tiled( + fname, + levels=[0], + backend="kvikio-pread", + n_buffer=100, + tile_func=None, + tile_func_kwargs={}, + out_dtype=None, + clear_cache=True, + preregister_memory_buffers=False, + tile_buffers=None, +): """Read an uncompressed, tiled multiresolution TIFF image to GPU memory. Parameters @@ -304,7 +316,7 @@ def read_tiled(fname, levels=[0], backend='kvikio-pread', n_buffer=100, reads. n_buffer : int, optional Scratch space equal to `n_buffer` TIFF tiles will be allocated. - Providing scratch space for multiple tiles helps the peformance in the + Providing scratch space for multiple tiles helps the performance in the recommended asynchronous 'kvikio-pread' mode. tile_func : function, optional A CuPy-based function to apply to each tile after it is read. Must @@ -354,16 +366,16 @@ def read_tiled(fname, levels=[0], backend='kvikio-pread', n_buffer=100, n_buffer = len(tile_buffers) with TiffFile(fname) as tif: - if isinstance(levels, int): levels = (levels,) - if levels == 'all': + if levels == "all": pages = tuple(tif.pages[n] for n in range(len(tif.pages))) elif isinstance(levels, (tuple, list)): pages = tuple(tif.pages[n] for n in levels) else: - raise ValueError("pages must be a tuple or list of int or the " - "string 'all'") + raise ValueError( + "pages must be a tuple or list of int or the " "string 'all'" + ) # sanity check: identical tile size for all TIFF pages # todo: is this always true? @@ -381,10 +393,14 @@ def read_tiled(fname, levels=[0], backend='kvikio-pread', n_buffer=100, out_dtype = page.dtype out_array = cp.ndarray(shape=page.shape, dtype=out_dtype) - rounded_offsets, rounded_bytecounts, buffer_offsets = _get_aligned_read_props( + ( + rounded_offsets, + rounded_bytecounts, + buffer_offsets, + ) = _get_aligned_read_props( offsets=page.dataoffsets, bytecounts=page.databytecounts, - alignment=4096 + alignment=4096, ) # Allocate buffer based on size of the largest tile after rounding # up to the nearest multiple of 4096. @@ -397,12 +413,16 @@ def read_tiled(fname, levels=[0], backend='kvikio-pread', n_buffer=100, # note: tile_buffer is C-contiguous so tile_buffer[i] is contiguous if tile_buffers is None: tile_buffers = tuple( - cp.empty(buffer_bytecount, dtype=cp.uint8) for n in range(n_buffer) + cp.empty(buffer_bytecount, dtype=cp.uint8) + for n in range(n_buffer) ) elif tile_buffers[0].size < buffer_bytecount: - warning.warn("reallocating tile buffers to accomodate data size") + warnings.warn( + "reallocating tile buffers to accommodate data size" + ) tile_buffers = tuple( - cp.empty(buffer_bytecount, dtype=cp.uint8) for n in range(n_buffer) + cp.empty(buffer_bytecount, dtype=cp.uint8) + for n in range(n_buffer) ) else: buffer_bytecount = tile_buffers[0].size @@ -418,22 +438,26 @@ def read_tiled(fname, levels=[0], backend='kvikio-pread', n_buffer=100, page.tiledepth, page.tilelength, page.tilewidth, - page.samplesperpixel + page.samplesperpixel, ) keyframe = page.keyframe truncation_check_needed = ( (keyframe.imagedepth % page.tiledepth != 0), (keyframe.imagelength % page.tilelength != 0), - (keyframe.imagewidth % page.tilewidth != 0) + (keyframe.imagewidth % page.tilewidth != 0), ) any_truncated = any(truncation_check_needed) - page_shape = page.shaped[1:] # # Any reason to prefer page.keyframe.imagedepth, etc. here as opposed to page.shape or page.shaped? + page_shape = page.shaped[ + 1: + ] # Any reason to prefer page.keyframe.imagedepth, etc. here as opposed to page.shape or page.shaped? # noqa: E501 - if backend == 'kvikio-raw_read': + if backend == "kvikio-raw_read": def read_tile_raw(fh, tile_buffer, bytecount, offset): """returns the # of bytes read""" - size = fh.raw_read(tile_buffer[:bytecount], file_offset=offset) + size = fh.raw_read( + tile_buffer[:bytecount], file_offset=offset + ) if size != bytecount: raise ValueError( "failed to read the expected number of bytes" @@ -442,7 +466,7 @@ def read_tile_raw(fh, tile_buffer, bytecount, offset): kv_read = read_tile_raw - elif backend == 'kvikio-read': + elif backend == "kvikio-read": def read_tile(fh, tile_buffer, bytecount, offset): """returns the # of bytes read""" @@ -455,11 +479,13 @@ def read_tile(fh, tile_buffer, bytecount, offset): kv_read = read_tile - elif backend == 'kvikio-pread': + elif backend == "kvikio-pread": def read_tile_async(fh, tile_buffer, bytecount, offset): """returns a future""" - future = fh.pread(tile_buffer[:bytecount], file_offset=offset) + future = fh.pread( + tile_buffer[:bytecount], file_offset=offset + ) # future.get() return future @@ -469,23 +495,36 @@ def read_tile_async(fh, tile_buffer, bytecount, offset): # note: page.databytecounts contains the size of all tiles in # bytes. It will only vary in the case of compressed data - for index, (offset, tile_bytecount, rounded_bytecount, buffer_offset) in enumerate( - zip(rounded_offsets, page.databytecounts, rounded_bytecounts, buffer_offsets) + for index, ( + offset, + tile_bytecount, + rounded_bytecount, + buffer_offset, + ) in enumerate( + zip( + rounded_offsets, + page.databytecounts, + rounded_bytecounts, + buffer_offsets, + ) ): index_mod = index % n_buffer if index == 0: - # intialize lists for storage of future results + # initialize lists for storage of future results all_futures = [] all_tiles = [] all_slices = [] elif index_mod == 0: # process the prior group of n_buffer futures - for tile, sl, future in zip(all_tiles, all_slices, all_futures): + for tile, sl, future in zip( + all_tiles, all_slices, all_futures + ): if isinstance(future, IOFuture): size = future.get() if size != rounded_bytecount: raise ValueError( - "failed to read the expected number of bytes" + "failed to read the expected number of " + "bytes" ) tile = tile[0] # omit depth axis if tile_func is None: @@ -509,14 +548,14 @@ def read_tile_async(fh, tile_buffer, bytecount, offset): # Determine offsets into `out_array` for the current tile # and determine slices to truncate the tile if needed. offset_indices = _get_tile_multiindex(page, index, n_tiles) - (s, d, l, w, _) = offset_indices + (s, d, h, w, _) = offset_indices if any_truncated: trunc_sl = _truncation_slices( truncation_check_needed, page_shape, offset_indices[1:4], tile_shape, - n_tiles + n_tiles, ) else: trunc_sl = None @@ -526,10 +565,16 @@ def read_tile_async(fh, tile_buffer, bytecount, offset): # tile start. buffer_start = buffer_offset buffer_end = buffer_start + tile_bytecount - tile = _decode(tile_buffers[index_mod][buffer_start:buffer_end], tile_shape, trunc_sl) + tile = _decode( + tile_buffers[index_mod][buffer_start:buffer_end], + tile_shape, + trunc_sl, + ) all_futures.append(read_output) all_tiles.append(tile) - all_slices.append((slice(l, l + tile_shape[1]), slice(w, w + tile_shape[2]))) + all_slices.append( + (slice(h, h + tile_shape[1]), slice(w, w + tile_shape[2])) + ) for tile, sl, future in zip(all_tiles, all_slices, all_futures): if isinstance(future, IOFuture): @@ -551,9 +596,9 @@ def read_tile_async(fh, tile_buffer, bytecount, offset): def _cupy_to_zarr_via_dask( image, - output_path='./example-output.zarr', + output_path="./example-output.zarr", chunk_shape=(2048, 2048, 3), - zarr_kwargs=dict(overwrite=False, compressor=None) + zarr_kwargs=dict(overwrite=False, compressor=None), ): """Write output to Zarr via GDSStore""" store = GDSStore(output_path) @@ -565,10 +610,10 @@ def _cupy_to_zarr_via_dask( def _cupy_to_zarr_kvikio_write_sync( image, - output_path='./example-output.zarr', + output_path="./example-output.zarr", chunk_shape=(2048, 2048, 3), zarr_kwargs=dict(overwrite=False, compressor=None), - backend='kvikio-raw_write', + backend="kvikio-raw_write", ): """Write output to Zarr via GDSStore""" @@ -579,17 +624,24 @@ def _cupy_to_zarr_kvikio_write_sync( output_path = os.path.realpath(output_path) store = DirectoryStore(output_path) - init_array(store, shape=image.shape, chunks=chunk_shape, dtype=image.dtype, - **zarr_kwargs) + init_array( + store, + shape=image.shape, + chunks=chunk_shape, + dtype=image.dtype, + **zarr_kwargs, + ) c0, c1, c2 = chunk_shape s0, s1, s2 = image.shape for i0, start0 in enumerate(range(0, s0, c0)): for i1, start1 in enumerate(range(0, s1, c1)): for i2, start2 in enumerate(range(0, s2, c2)): - tile = image[start0:start0 + c0, - start1:start1 + c1, - start2:start2 + c2] + tile = image[ + start0 : start0 + c0, + start1 : start1 + c1, + start2 : start2 + c2, + ] if tile.shape == chunk_shape: # copy so the tile is contiguous in memory tile = tile.copy() @@ -600,15 +652,15 @@ def _cupy_to_zarr_kvikio_write_sync( (0, c2 - tile.shape[2]), ) tile = cp.pad( - tile, pad_width, mode='constant', constant_values=0 + tile, pad_width, mode="constant", constant_values=0 ) - chunk_key = '.'.join(map(str, (i0, i1, i2))) + chunk_key = ".".join(map(str, (i0, i1, i2))) fname = os.path.join(output_path, chunk_key) with kvikio.CuFile(fname, "w") as fh: - if backend == 'kvikio-raw_write': + if backend == "kvikio-raw_write": size = fh.raw_write(tile) - elif backend == 'kvikio-write': + elif backend == "kvikio-write": size = fh.write(tile) else: raise ValueError(f"unknown backend {backend}") @@ -618,17 +670,14 @@ def _cupy_to_zarr_kvikio_write_sync( def cupy_to_zarr( image, - output_path='./example-output.zarr', + output_path="./example-output.zarr", chunk_shape=(512, 512, 3), n_buffer=16, zarr_kwargs=dict(overwrite=False, compressor=None), - backend='kvikio-pwrite', + backend="kvikio-pwrite", ): - """Write output to Zarr via GDSStore - - - """ - if backend == 'dask': + """Write output to Zarr via GDSStore""" + if backend == "dask": return _cupy_to_zarr_via_dask( image, output_path=output_path, @@ -636,7 +685,7 @@ def cupy_to_zarr( zarr_kwargs=zarr_kwargs, ) - elif backend in ['kvikio-write', 'kvikio-raw_write']: + elif backend in ["kvikio-write", "kvikio-raw_write"]: return _cupy_to_zarr_kvikio_write_sync( image, output_path=output_path, @@ -644,7 +693,7 @@ def cupy_to_zarr( zarr_kwargs=zarr_kwargs, backend=backend, ) - elif backend != 'kvikio-pwrite': + elif backend != "kvikio-pwrite": raise ValueError(f"unrecognized backend: {backend}") # 1.) create a zarr store @@ -653,8 +702,13 @@ def cupy_to_zarr( # overwrite = True. output_path = os.path.realpath(output_path) store = DirectoryStore(output_path) - init_array(store, shape=image.shape, chunks=chunk_shape, dtype=image.dtype, - **zarr_kwargs) + init_array( + store, + shape=image.shape, + chunks=chunk_shape, + dtype=image.dtype, + **zarr_kwargs, + ) # asynchronous write using pwrite index = 0 @@ -664,11 +718,9 @@ def cupy_to_zarr( for i0, start0 in enumerate(range(0, s0, c0)): for i1, start1 in enumerate(range(0, s1, c1)): for i2, start2 in enumerate(range(0, s2, c2)): - index_mod = index % n_buffer if index == 0: - # intialize lists for storage of future results - all_tiles = [] + # initialize lists for storage of future results all_handles = [] all_futures = [] elif index_mod == 0: @@ -684,11 +736,12 @@ def cupy_to_zarr( # reset the lists to prepare for the next n_buffer tiles all_futures = [] all_handles = [] - all_tiles = [] - tile = image[start0:start0 + c0, - start1:start1 + c1, - start2:start2 + c2] + tile = image[ + start0 : start0 + c0, + start1 : start1 + c1, + start2 : start2 + c2, + ] if tile.shape == chunk_shape: # copy so the tile is contiguous in memory tile_cache[index_mod] = tile @@ -699,10 +752,10 @@ def cupy_to_zarr( (0, c2 - tile.shape[2]), ) tile_cache[index_mod] = cp.pad( - tile, pad_width, mode='constant', constant_values=0 + tile, pad_width, mode="constant", constant_values=0 ) - chunk_key = '.'.join(map(str, (i0, i1, i2))) + chunk_key = ".".join(map(str, (i0, i1, i2))) fname = os.path.join(output_path, chunk_key) fh = kvikio.CuFile(fname, "w") @@ -715,8 +768,6 @@ def cupy_to_zarr( if isinstance(future, IOFuture): size = future.get() if size != tile_cache[0].nbytes: - raise ValueError( - "failed to write the expected number of bytes" - ) + raise ValueError("failed to write the expected number of bytes") fh.close() return diff --git a/examples/python/gds_whole_slide/lz4_nvcomp.py b/examples/python/gds_whole_slide/lz4_nvcomp.py index 58fc4c39d..fb94ed535 100644 --- a/examples/python/gds_whole_slide/lz4_nvcomp.py +++ b/examples/python/gds_whole_slide/lz4_nvcomp.py @@ -8,23 +8,21 @@ def ensure_ndarray(buf): if isinstance(buf, cp.ndarray): arr = buf - elif hasattr(buf, '__cuda_array_interface__'): + elif hasattr(buf, "__cuda_array_interface__"): arr = cp.asarray(buf, copy=False) - elif hasattr(buf, '__array_interface__'): + elif hasattr(buf, "__array_interface__"): arr = cp.asarray(np.asarray(buf)) else: raise ValueError("expected a cupy.ndarray") return arr -def ensure_contiguous_ndarray( - buf, max_buffer_size=None, flatten=True -): +def ensure_contiguous_ndarray(buf, max_buffer_size=None, flatten=True): """Convenience function to coerce `buf` to ndarray-like array. Also ensures that the returned value exports fully contiguous memory, - and supports the new-style buffer interface. If the optional max_buffer_size is - provided, raise a ValueError if the number of bytes consumed by the returned - array exceeds this value. + and supports the new-style buffer interface. If the optional max_buffer_size + is provided, raise a ValueError if the number of bytes consumed by the + returned array exceeds this value. Parameters ---------- @@ -54,7 +52,8 @@ def ensure_contiguous_ndarray( if arr.dtype == object: raise TypeError("object arrays are not supported") - # check for datetime or timedelta ndarray, the buffer interface doesn't support those + # check for datetime or timedelta ndarray, the buffer interface doesn't + # support those if arr.dtype.kind in "Mm": arr = arr.view(np.int64) @@ -67,7 +66,9 @@ def ensure_contiguous_ndarray( raise ValueError("an array with contiguous memory is required") if max_buffer_size is not None and arr.nbytes > max_buffer_size: - msg = "Codec does not support buffers of > {} bytes".format(max_buffer_size) + msg = "Codec does not support buffers of > {} bytes".format( + max_buffer_size + ) raise ValueError(msg) return arr @@ -120,28 +121,34 @@ class LZ4NVCOMP(Codec): """ - codec_id = 'lz4nvcomp' + codec_id = "lz4nvcomp" max_buffer_size = 0x7E000000 - def __init__(self, compressor=None): # , acceleration=1 (nvcomp lz4 doesn't take an acceleration argument) + def __init__( + self, compressor=None + ): # , acceleration=1 (nvcomp lz4 doesn't take an acceleration argument) # self.acceleration = acceleration self._compressor = None def encode(self, buf): buf = ensure_contiguous_ndarray(buf, self.max_buffer_size) - if self._compressor is None: # or self._compressor.data_type != buf.dtype: + if ( + self._compressor is None + ): # or self._compressor.data_type != buf.dtype: self._compressor = LZ4Manager(data_type=buf.dtype) return self._compressor.compress(buf) # , self.acceleration) def decode(self, buf, out=None): buf = ensure_contiguous_ndarray(buf, self.max_buffer_size) - if self._compressor is None: # or self._compressor.data_type != buf.dtype: + if ( + self._compressor is None + ): # or self._compressor.data_type != buf.dtype: self._compressor = LZ4Manager(data_type=buf.dtype) decompressed = self._compressor.decompress(buf) return ndarray_copy(decompressed, out) def __repr__(self): - r = '%s' % type(self).__name__ + r = "%s" % type(self).__name__ return r diff --git a/examples/python/tiff_image/main.py b/examples/python/tiff_image/main.py index 4147bfc4f..c36149a9e 100644 --- a/examples/python/tiff_image/main.py +++ b/examples/python/tiff_image/main.py @@ -20,22 +20,24 @@ from cucim import CuImage -img = CuImage('image.tif') - -print(img.is_loaded) # True if image data is loaded & available. -print(img.device) # A device type. -print(img.ndim) # The number of dimensions. -print(img.dims) # A string containing a list of dimensions being requested. -print(img.shape) # A tuple of dimension sizes (in the order of `dims`). -print(img.size('XYC')) # Returns size as a tuple for the given dimension order. -print(img.dtype) # The data type of the image. -print(img.channel_names) # A channel name list. -print(img.spacing()) # Returns physical size in tuple. -print(img.spacing_units()) # Units for each spacing element (size is same with `ndim`). -print(img.origin) # Physical location of (0, 0, 0) (size is always 3). -print(img.direction) # Direction cosines (size is always 3x3). -print(img.coord_sys) # Coordinate frame in which the direction cosines are - # measured. Available Coordinate frame is not finalized yet. +img = CuImage("image.tif") + +print(img.is_loaded) # True if image data is loaded & available. +print(img.device) # A device type. +print(img.ndim) # The number of dimensions. +print(img.dims) # A string containing a list of dimensions being requested. +print(img.shape) # A tuple of dimension sizes (in the order of `dims`). +print(img.size("XYC")) # Returns size as a tuple for the given dimension order. +print(img.dtype) # The data type of the image. +print(img.channel_names) # A channel name list. +print(img.spacing()) # Returns physical size in tuple. +print( + img.spacing_units() +) # Units for each spacing element (size is same with `ndim`). +print(img.origin) # Physical location of (0, 0, 0) (size is always 3). +print(img.direction) # Direction cosines (size is always 3x3). +print(img.coord_sys) # Coordinate frame in which the direction cosines are +# measured. Available Coordinate frame is not finalized yet. # Returns a set of associated image names. print(img.associated_images) @@ -50,7 +52,9 @@ resolutions = img.resolutions level_count = resolutions["level_count"] # Note: 'level' is at 3rd parameter (OpenSlide has it at 2nd parameter) -region = img.read_region(location=(10000, 10000), size=(512, 512), level=level_count-1) +region = img.read_region( + location=(10000, 10000), size=(512, 512), level=level_count - 1 +) region.save("test.ppm") diff --git a/experiments/Supporting_Aperio_SVS_Format/benchmark.py b/experiments/Supporting_Aperio_SVS_Format/benchmark.py index 6c0a7e1a5..ec65920c2 100644 --- a/experiments/Supporting_Aperio_SVS_Format/benchmark.py +++ b/experiments/Supporting_Aperio_SVS_Format/benchmark.py @@ -19,11 +19,11 @@ from itertools import repeat from time import perf_counter -import numpy as np -from cucim import CuImage -from cucim.clara.filesystem import discard_page_cache from openslide import OpenSlide +from cucim import CuImage +from cucim.clara.filesystem import discard_page_cache # noqa: F401 + class Timer(ContextDecorator): def __init__(self, message): @@ -51,7 +51,7 @@ def load_tile_openslide(slide, start_loc, patch_size): def load_tile_openslide_chunk(inp_file, start_loc_list, patch_size): with OpenSlide(inp_file) as slide: for start_loc in start_loc_list: - region = slide.read_region(start_loc, 0, [patch_size, patch_size]) + _ = slide.read_region(start_loc, 0, [patch_size, patch_size]) def load_tile_cucim(slide, start_loc, patch_size): @@ -62,7 +62,7 @@ def load_tile_cucim_chunk(inp_file, start_loc_list, patch_size): try: slide = CuImage(inp_file) for start_loc in start_loc_list: - region = slide.read_region(start_loc, [patch_size, patch_size], 0) + _ = slide.read_region(start_loc, [patch_size, patch_size], 0) except Exception as e: print(e) @@ -70,33 +70,40 @@ def load_tile_cucim_chunk(inp_file, start_loc_list, patch_size): def load_tile_openslide_chunk_mp(inp_file, start_loc_list, patch_size): with OpenSlide(inp_file) as slide: for start_loc in start_loc_list: - region = slide.read_region(start_loc, 0, [patch_size, patch_size]) + _ = slide.read_region(start_loc, 0, [patch_size, patch_size]) def load_tile_cucim_chunk_mp(inp_file, start_loc_list, patch_size): slide = CuImage(inp_file) for start_loc in start_loc_list: - region = slide.read_region(start_loc, [patch_size, patch_size], 0) + _ = slide.read_region(start_loc, [patch_size, patch_size], 0) -def experiment_thread(cache_strategy, input_file, num_threads, start_location, patch_size): +def experiment_thread( + cache_strategy, input_file, num_threads, start_location, patch_size +): import psutil + print(" ", psutil.virtual_memory()) for num_workers in (1, 3, 6, 9, 12): # range(1, num_threads + 1): openslide_time = 1 cucim_time = 1 rasterio_time = 1 - #discard_page_cache(input_file) + # discard_page_cache(input_file) with OpenSlide(input_file) as slide: width, height = slide.dimensions - start_loc_data = [(sx, sy) - for sy in range(start_location, height, patch_size) - for sx in range(start_location, width, patch_size)] + start_loc_data = [ + (sx, sy) + for sy in range(start_location, height, patch_size) + for sx in range(start_location, width, patch_size) + ] chunk_size = len(start_loc_data) // num_workers - start_loc_list_iter = [start_loc_data[i:i+chunk_size] - for i in range(0, len(start_loc_data), chunk_size)] + start_loc_list_iter = [ + start_loc_data[i : i + chunk_size] + for i in range(0, len(start_loc_data), chunk_size) + ] with Timer(" Thread elapsed time (OpenSlide)") as timer: with concurrent.futures.ThreadPoolExecutor( max_workers=num_workers @@ -105,23 +112,28 @@ def experiment_thread(cache_strategy, input_file, num_threads, start_location, p load_tile_openslide_chunk, repeat(input_file), start_loc_list_iter, - repeat(patch_size) + repeat(patch_size), ) openslide_time = timer.elapsed_time() print(" ", psutil.virtual_memory()) cache_size = psutil.virtual_memory().available // 1024 // 1024 // 20 cache = CuImage.cache( - cache_strategy, memory_capacity=cache_size, record_stat=True) + cache_strategy, memory_capacity=cache_size, record_stat=True + ) cucim_time = 0 - #discard_page_cache(input_file) + # discard_page_cache(input_file) slide = CuImage(input_file) - start_loc_data = [(sx, sy) - for sy in range(start_location, height, patch_size) - for sx in range(start_location, width, patch_size)] + start_loc_data = [ + (sx, sy) + for sy in range(start_location, height, patch_size) + for sx in range(start_location, width, patch_size) + ] chunk_size = len(start_loc_data) // num_workers - start_loc_list_iter = [start_loc_data[i:i+chunk_size] - for i in range(0, len(start_loc_data), chunk_size)] + start_loc_list_iter = [ + start_loc_data[i : i + chunk_size] + for i in range(0, len(start_loc_data), chunk_size) + ] with Timer(" Thread elapsed time (cuCIM)") as timer: with concurrent.futures.ThreadPoolExecutor( max_workers=num_workers @@ -130,36 +142,43 @@ def experiment_thread(cache_strategy, input_file, num_threads, start_location, p load_tile_cucim_chunk, repeat(input_file), start_loc_list_iter, - repeat(patch_size) + repeat(patch_size), ) cucim_time = timer.elapsed_time() print(f" hit: {cache.hit_count} miss: {cache.miss_count}") print(" ", psutil.virtual_memory()) - output_text = f"{datetime.now().strftime('%Y-%m-%d %H:%M:%S')},thread,{cache_strategy},{input_file},{start_location},{patch_size},{num_workers},{openslide_time},{cucim_time},{rasterio_time},{openslide_time / cucim_time},{rasterio_time / cucim_time},{cache_size},{cache.hit_count},{cache.miss_count}\n" + output_text = f"{datetime.now().strftime('%Y-%m-%d %H:%M:%S')},thread,{cache_strategy},{input_file},{start_location},{patch_size},{num_workers},{openslide_time},{cucim_time},{rasterio_time},{openslide_time / cucim_time},{rasterio_time / cucim_time},{cache_size},{cache.hit_count},{cache.miss_count}\n" # noqa: E501 with open("experiment.txt", "a+") as f: f.write(output_text) print(output_text) -def experiment_process(cache_strategy, input_file, num_processes, start_location, patch_size): +def experiment_process( + cache_strategy, input_file, num_processes, start_location, patch_size +): import psutil + print(" ", psutil.virtual_memory()) for num_workers in (1, 3, 6, 9, 12): # range(1, num_processes + 1): openslide_time = 1 cucim_time = 1 rasterio_time = 1 - #discard_page_cache(input_file) + # discard_page_cache(input_file) with OpenSlide(input_file) as slide: width, height = slide.dimensions - start_loc_data = [(sx, sy) - for sy in range(start_location, height, patch_size) - for sx in range(start_location, width, patch_size)] + start_loc_data = [ + (sx, sy) + for sy in range(start_location, height, patch_size) + for sx in range(start_location, width, patch_size) + ] chunk_size = len(start_loc_data) // num_workers - start_loc_list_iter = [start_loc_data[i:i+chunk_size] - for i in range(0, len(start_loc_data), chunk_size)] + start_loc_list_iter = [ + start_loc_data[i : i + chunk_size] + for i in range(0, len(start_loc_data), chunk_size) + ] with Timer(" Process elapsed time (OpenSlide)") as timer: with concurrent.futures.ProcessPoolExecutor( @@ -169,7 +188,7 @@ def experiment_process(cache_strategy, input_file, num_processes, start_location load_tile_openslide_chunk_mp, repeat(input_file), start_loc_list_iter, - repeat(patch_size) + repeat(patch_size), ) openslide_time = timer.elapsed_time() print(" ", psutil.virtual_memory()) @@ -178,16 +197,21 @@ def experiment_process(cache_strategy, input_file, num_processes, start_location if cache_strategy == "shared_memory": cache_size = cache_size * num_workers cache = CuImage.cache( - cache_strategy, memory_capacity=cache_size, record_stat=True) + cache_strategy, memory_capacity=cache_size, record_stat=True + ) cucim_time = 0 - #discard_page_cache(input_file) + # discard_page_cache(input_file) slide = CuImage(input_file) - start_loc_data = [(sx, sy) - for sy in range(start_location, height, patch_size) - for sx in range(start_location, width, patch_size)] + start_loc_data = [ + (sx, sy) + for sy in range(start_location, height, patch_size) + for sx in range(start_location, width, patch_size) + ] chunk_size = len(start_loc_data) // num_workers - start_loc_list_iter = [start_loc_data[i:i+chunk_size] - for i in range(0, len(start_loc_data), chunk_size)] + start_loc_list_iter = [ + start_loc_data[i : i + chunk_size] + for i in range(0, len(start_loc_data), chunk_size) + ] with Timer(" Process elapsed time (cuCIM)") as timer: with concurrent.futures.ProcessPoolExecutor( @@ -197,21 +221,25 @@ def experiment_process(cache_strategy, input_file, num_processes, start_location load_tile_cucim_chunk_mp, repeat(input_file), start_loc_list_iter, - repeat(patch_size) + repeat(patch_size), ) cucim_time = timer.elapsed_time() print(" ", psutil.virtual_memory()) rasterio_time = 0 - start_loc_data = [(sx, sy) - for sy in range(start_location, height, patch_size) - for sx in range(start_location, width, patch_size)] + start_loc_data = [ + (sx, sy) + for sy in range(start_location, height, patch_size) + for sx in range(start_location, width, patch_size) + ] chunk_size = len(start_loc_data) // num_workers - start_loc_list_iter = [start_loc_data[i:i+chunk_size] - for i in range(0, len(start_loc_data), chunk_size)] + start_loc_list_iter = [ + start_loc_data[i : i + chunk_size] + for i in range(0, len(start_loc_data), chunk_size) + ] print(" ", psutil.virtual_memory()) - output_text = f"{datetime.now().strftime('%Y-%m-%d %H:%M:%S')},process,{cache_strategy},{input_file},{start_location},{patch_size},{num_workers},{openslide_time},{cucim_time},{rasterio_time},{openslide_time / cucim_time},{rasterio_time / cucim_time},{cache_size},{cache.hit_count},{cache.miss_count}\n" + output_text = f"{datetime.now().strftime('%Y-%m-%d %H:%M:%S')},process,{cache_strategy},{input_file},{start_location},{patch_size},{num_workers},{openslide_time},{cucim_time},{rasterio_time},{openslide_time / cucim_time},{rasterio_time / cucim_time},{cache_size},{cache.hit_count},{cache.miss_count}\n" # noqa: E501 with open("experiment.txt", "a+") as f: f.write(output_text) print(output_text) @@ -219,14 +247,20 @@ def experiment_process(cache_strategy, input_file, num_processes, start_location for i in range(10): experiment_thread( - "per_process", "notebooks/input/TUPAC-TR-488.svs", 12, 120, 240) + "per_process", "notebooks/input/TUPAC-TR-488.svs", 12, 120, 240 + ) experiment_thread( - "per_process", "notebooks/input/JP2K-33003-2.svs", 12, 128, 256) + "per_process", "notebooks/input/JP2K-33003-2.svs", 12, 128, 256 + ) experiment_thread( - "per_process", "notebooks/input/CMU-1-JP2K-33005.svs", 12, 120, 240) + "per_process", "notebooks/input/CMU-1-JP2K-33005.svs", 12, 120, 240 + ) experiment_process( - "per_process", "notebooks/input/TUPAC-TR-488.svs", 12, 120, 240) + "per_process", "notebooks/input/TUPAC-TR-488.svs", 12, 120, 240 + ) experiment_process( - "per_process", "notebooks/input/JP2K-33003-2.svs", 12, 128, 256) + "per_process", "notebooks/input/JP2K-33003-2.svs", 12, 128, 256 + ) experiment_process( - "per_process", "notebooks/input/CMU-1-JP2K-33005.svs", 12, 120, 240) + "per_process", "notebooks/input/CMU-1-JP2K-33005.svs", 12, 120, 240 + ) diff --git a/experiments/Using_Cache/benchmark.py b/experiments/Using_Cache/benchmark.py index 7659ab2c7..d9667a44b 100644 --- a/experiments/Using_Cache/benchmark.py +++ b/experiments/Using_Cache/benchmark.py @@ -21,10 +21,11 @@ import numpy as np import rasterio -from cucim import CuImage from openslide import OpenSlide from rasterio.windows import Window +from cucim import CuImage + class Timer(ContextDecorator): def __init__(self, message): @@ -52,7 +53,7 @@ def load_tile_openslide(slide, start_loc, patch_size): def load_tile_openslide_chunk(inp_file, start_loc_list, patch_size): with OpenSlide(inp_file) as slide: for start_loc in start_loc_list: - region = slide.read_region(start_loc, 0, [patch_size, patch_size]) + _ = slide.read_region(start_loc, 0, [patch_size, patch_size]) def load_tile_cucim(slide, start_loc, patch_size): @@ -63,7 +64,7 @@ def load_tile_cucim_chunk(inp_file, start_loc_list, patch_size): try: slide = CuImage(inp_file) for start_loc in start_loc_list: - region = slide.read_region(start_loc, [patch_size, patch_size], 0) + _ = slide.read_region(start_loc, [patch_size, patch_size], 0) except Exception as e: print(e) @@ -72,39 +73,69 @@ def load_tile_cucim_chunk(inp_file, start_loc_list, patch_size): def load_tile_rasterio(slide, start_loc, tile_size): - _ = np.moveaxis(slide.read([1, 2, 3], - window=Window.from_slices((start_loc[0], start_loc[0] + tile_size), (start_loc[1], start_loc[1] + tile_size))), 0, -1) + _ = np.moveaxis( + slide.read( + [1, 2, 3], + window=Window.from_slices( + (start_loc[0], start_loc[0] + tile_size), + (start_loc[1], start_loc[1] + tile_size), + ), + ), + 0, + -1, + ) def load_tile_rasterio_chunk(input_file, start_loc_list, patch_size): identity = rasterio.Affine(1, 0, 0, 0, 1, 0) slide = rasterio.open(input_file, transform=identity, num_threads=1) for start_loc in start_loc_list: - _ = np.moveaxis(slide.read([1, 2, 3], - window=Window.from_slices((start_loc[0], start_loc[0] + patch_size), (start_loc[1], start_loc[1] + patch_size))), 0, -1) + _ = np.moveaxis( + slide.read( + [1, 2, 3], + window=Window.from_slices( + (start_loc[0], start_loc[0] + patch_size), + (start_loc[1], start_loc[1] + patch_size), + ), + ), + 0, + -1, + ) def load_tile_openslide_chunk_mp(inp_file, start_loc_list, patch_size): with OpenSlide(inp_file) as slide: for start_loc in start_loc_list: - region = slide.read_region(start_loc, 0, [patch_size, patch_size]) + _ = slide.read_region(start_loc, 0, [patch_size, patch_size]) def load_tile_cucim_chunk_mp(inp_file, start_loc_list, patch_size): slide = CuImage(inp_file) for start_loc in start_loc_list: - region = slide.read_region(start_loc, [patch_size, patch_size], 0) + _ = slide.read_region(start_loc, [patch_size, patch_size], 0) def load_tile_rasterio_chunk_mp(input_file, start_loc_list, patch_size): slide = rasterio.open(input_file, num_threads=1) for start_loc in start_loc_list: - region = np.moveaxis(slide.read([1, 2, 3], - window=Window.from_slices((start_loc[0], start_loc[0] + patch_size), (start_loc[1], start_loc[1] + patch_size))), 0, -1) - - -def experiment_thread(cache_strategy, input_file, num_threads, start_location, patch_size): + _ = np.moveaxis( + slide.read( + [1, 2, 3], + window=Window.from_slices( + (start_loc[0], start_loc[0] + patch_size), + (start_loc[1], start_loc[1] + patch_size), + ), + ), + 0, + -1, + ) + + +def experiment_thread( + cache_strategy, input_file, num_threads, start_location, patch_size +): import psutil + print(" ", psutil.virtual_memory()) # range(1, num_threads + 1): # (num_threads,): for num_workers in range(1, num_threads + 1): @@ -115,12 +146,16 @@ def experiment_thread(cache_strategy, input_file, num_threads, start_location, p with OpenSlide(input_file) as slide: width, height = slide.dimensions - start_loc_data = [(sx, sy) - for sy in range(start_location, height, patch_size) - for sx in range(start_location, width, patch_size)] + start_loc_data = [ + (sx, sy) + for sy in range(start_location, height, patch_size) + for sx in range(start_location, width, patch_size) + ] chunk_size = len(start_loc_data) // num_workers - start_loc_list_iter = [start_loc_data[i:i+chunk_size] - for i in range(0, len(start_loc_data), chunk_size)] + start_loc_list_iter = [ + start_loc_data[i : i + chunk_size] + for i in range(0, len(start_loc_data), chunk_size) + ] with Timer(" Thread elapsed time (OpenSlide)") as timer: with concurrent.futures.ThreadPoolExecutor( max_workers=num_workers @@ -129,22 +164,27 @@ def experiment_thread(cache_strategy, input_file, num_threads, start_location, p load_tile_openslide_chunk, repeat(input_file), start_loc_list_iter, - repeat(patch_size) + repeat(patch_size), ) openslide_time = timer.elapsed_time() print(" ", psutil.virtual_memory()) cache_size = psutil.virtual_memory().available // 1024 // 1024 // 20 cache = CuImage.cache( - cache_strategy, memory_capacity=cache_size, record_stat=True) + cache_strategy, memory_capacity=cache_size, record_stat=True + ) cucim_time = 0 slide = CuImage(input_file) - start_loc_data = [(sx, sy) - for sy in range(start_location, height, patch_size) - for sx in range(start_location, width, patch_size)] + start_loc_data = [ + (sx, sy) + for sy in range(start_location, height, patch_size) + for sx in range(start_location, width, patch_size) + ] chunk_size = len(start_loc_data) // num_workers - start_loc_list_iter = [start_loc_data[i:i+chunk_size] - for i in range(0, len(start_loc_data), chunk_size)] + start_loc_list_iter = [ + start_loc_data[i : i + chunk_size] + for i in range(0, len(start_loc_data), chunk_size) + ] with Timer(" Thread elapsed time (cuCIM)") as timer: with concurrent.futures.ThreadPoolExecutor( max_workers=num_workers @@ -153,18 +193,22 @@ def experiment_thread(cache_strategy, input_file, num_threads, start_location, p load_tile_cucim_chunk, repeat(input_file), start_loc_list_iter, - repeat(patch_size) + repeat(patch_size), ) cucim_time = timer.elapsed_time() print(f" hit: {cache.hit_count} miss: {cache.miss_count}") print(" ", psutil.virtual_memory()) - start_loc_data = [(sx, sy) - for sy in range(start_location, height, patch_size) - for sx in range(start_location, width, patch_size)] + start_loc_data = [ + (sx, sy) + for sy in range(start_location, height, patch_size) + for sx in range(start_location, width, patch_size) + ] chunk_size = len(start_loc_data) // num_workers - start_loc_list_iter = [start_loc_data[i:i+chunk_size] - for i in range(0, len(start_loc_data), chunk_size)] + start_loc_list_iter = [ + start_loc_data[i : i + chunk_size] + for i in range(0, len(start_loc_data), chunk_size) + ] with Timer(" Thread elapsed time (rasterio)") as timer: with concurrent.futures.ThreadPoolExecutor( @@ -174,19 +218,22 @@ def experiment_thread(cache_strategy, input_file, num_threads, start_location, p load_tile_rasterio_chunk, repeat(input_file), start_loc_list_iter, - repeat(patch_size) + repeat(patch_size), ) rasterio_time = timer.elapsed_time() print(" ", psutil.virtual_memory()) - output_text = f"{datetime.now().strftime('%Y-%m-%d %H:%M:%S')},thread,{cache_strategy},{input_file},{start_location},{patch_size},{num_workers},{openslide_time},{cucim_time},{rasterio_time},{openslide_time / cucim_time},{rasterio_time / cucim_time},{cache_size},{cache.hit_count},{cache.miss_count}\n" + output_text = f"{datetime.now().strftime('%Y-%m-%d %H:%M:%S')},thread,{cache_strategy},{input_file},{start_location},{patch_size},{num_workers},{openslide_time},{cucim_time},{rasterio_time},{openslide_time / cucim_time},{rasterio_time / cucim_time},{cache_size},{cache.hit_count},{cache.miss_count}\n" # noqa: E501 with open("experiment.txt", "a+") as f: f.write(output_text) print(output_text) -def experiment_process(cache_strategy, input_file, num_processes, start_location, patch_size): +def experiment_process( + cache_strategy, input_file, num_processes, start_location, patch_size +): import psutil + print(" ", psutil.virtual_memory()) for num_workers in range(1, num_processes + 1): openslide_time = 1 @@ -196,12 +243,16 @@ def experiment_process(cache_strategy, input_file, num_processes, start_location with OpenSlide(input_file) as slide: width, height = slide.dimensions - start_loc_data = [(sx, sy) - for sy in range(start_location, height, patch_size) - for sx in range(start_location, width, patch_size)] + start_loc_data = [ + (sx, sy) + for sy in range(start_location, height, patch_size) + for sx in range(start_location, width, patch_size) + ] chunk_size = len(start_loc_data) // num_workers - start_loc_list_iter = [start_loc_data[i:i+chunk_size] - for i in range(0, len(start_loc_data), chunk_size)] + start_loc_list_iter = [ + start_loc_data[i : i + chunk_size] + for i in range(0, len(start_loc_data), chunk_size) + ] with Timer(" Process elapsed time (OpenSlide)") as timer: with concurrent.futures.ProcessPoolExecutor( @@ -211,7 +262,7 @@ def experiment_process(cache_strategy, input_file, num_processes, start_location load_tile_openslide_chunk_mp, repeat(input_file), start_loc_list_iter, - repeat(patch_size) + repeat(patch_size), ) openslide_time = timer.elapsed_time() print(" ", psutil.virtual_memory()) @@ -220,15 +271,20 @@ def experiment_process(cache_strategy, input_file, num_processes, start_location if cache_strategy == "shared_memory": cache_size = cache_size * num_workers cache = CuImage.cache( - cache_strategy, memory_capacity=cache_size, record_stat=True) + cache_strategy, memory_capacity=cache_size, record_stat=True + ) cucim_time = 0 slide = CuImage(input_file) - start_loc_data = [(sx, sy) - for sy in range(start_location, height, patch_size) - for sx in range(start_location, width, patch_size)] + start_loc_data = [ + (sx, sy) + for sy in range(start_location, height, patch_size) + for sx in range(start_location, width, patch_size) + ] chunk_size = len(start_loc_data) // num_workers - start_loc_list_iter = [start_loc_data[i:i+chunk_size] - for i in range(0, len(start_loc_data), chunk_size)] + start_loc_list_iter = [ + start_loc_data[i : i + chunk_size] + for i in range(0, len(start_loc_data), chunk_size) + ] with Timer(" Process elapsed time (cuCIM)") as timer: with concurrent.futures.ProcessPoolExecutor( @@ -238,18 +294,22 @@ def experiment_process(cache_strategy, input_file, num_processes, start_location load_tile_cucim_chunk_mp, repeat(input_file), start_loc_list_iter, - repeat(patch_size) + repeat(patch_size), ) cucim_time = timer.elapsed_time() print(" ", psutil.virtual_memory()) rasterio_time = 0 - start_loc_data = [(sx, sy) - for sy in range(start_location, height, patch_size) - for sx in range(start_location, width, patch_size)] + start_loc_data = [ + (sx, sy) + for sy in range(start_location, height, patch_size) + for sx in range(start_location, width, patch_size) + ] chunk_size = len(start_loc_data) // num_workers - start_loc_list_iter = [start_loc_data[i:i+chunk_size] - for i in range(0, len(start_loc_data), chunk_size)] + start_loc_list_iter = [ + start_loc_data[i : i + chunk_size] + for i in range(0, len(start_loc_data), chunk_size) + ] with Timer(" Process elapsed time (rasterio)") as timer: with concurrent.futures.ProcessPoolExecutor( @@ -259,12 +319,12 @@ def experiment_process(cache_strategy, input_file, num_processes, start_location load_tile_rasterio_chunk_mp, repeat(input_file), start_loc_list_iter, - repeat(patch_size) + repeat(patch_size), ) rasterio_time = timer.elapsed_time() print(" ", psutil.virtual_memory()) - output_text = f"{datetime.now().strftime('%Y-%m-%d %H:%M:%S')},process,{cache_strategy},{input_file},{start_location},{patch_size},{num_workers},{openslide_time},{cucim_time},{rasterio_time},{openslide_time / cucim_time},{rasterio_time / cucim_time},{cache_size},{cache.hit_count},{cache.miss_count}\n" + output_text = f"{datetime.now().strftime('%Y-%m-%d %H:%M:%S')},process,{cache_strategy},{input_file},{start_location},{patch_size},{num_workers},{openslide_time},{cucim_time},{rasterio_time},{openslide_time / cucim_time},{rasterio_time / cucim_time},{cache_size},{cache.hit_count},{cache.miss_count}\n" # noqa: E501 with open("experiment.txt", "a+") as f: f.write(output_text) print(output_text) diff --git a/python/cucim/MANIFEST.in b/python/cucim/MANIFEST.in index 3c2e31645..756e96d3c 100644 --- a/python/cucim/MANIFEST.in +++ b/python/cucim/MANIFEST.in @@ -19,7 +19,6 @@ include README.md include tox.ini .travis.yml .appveyor.yml .readthedocs.yml -include versioneer.py include src/cucim/clara/*.so* recursive-include src/cucim *.py *.pyi *.cu *.h *.npy *.txt *.md diff --git a/python/cucim/ci/bootstrap.py b/python/cucim/ci/bootstrap.py index 75a2f67b5..05dbf0b3f 100755 --- a/python/cucim/ci/bootstrap.py +++ b/python/cucim/ci/bootstrap.py @@ -35,7 +35,7 @@ def exec_in_env(): check_call([join(bin_path, "pip"), "install", "jinja2", "tox"]) python_executable = join(bin_path, "python") if not os.path.exists(python_executable): - python_executable += '.exe' + python_executable += ".exe" print("Re-executing with: {0}".format(python_executable)) print("+ exec", python_executable, __file__, "--no-env") @@ -51,7 +51,7 @@ def main(): loader=jinja2.FileSystemLoader(join(base_path, "ci", "templates")), trim_blocks=True, lstrip_blocks=True, - keep_trailing_newline=True + keep_trailing_newline=True, ) tox_environments = [ @@ -62,17 +62,20 @@ def main(): # cookiecutter-pylibrary/hooks/post_gen_project.py # invokes this bootstrap.py itself. for line in subprocess.check_output( - [sys.executable, '-m', 'tox', '--listenvs'], - universal_newlines=True).splitlines() + [sys.executable, "-m", "tox", "--listenvs"], universal_newlines=True + ).splitlines() + ] + tox_environments = [ + line for line in tox_environments if line.startswith("py") ] - tox_environments = [line for line in tox_environments - if line.startswith('py')] for name in os.listdir(join("ci", "templates")): with open(join(base_path, name), "w") as fh: fh.write( jinja.get_template(name).render( - tox_environments=tox_environments)) + tox_environments=tox_environments + ) + ) print("Wrote {}".format(name)) print("DONE.") diff --git a/python/cucim/docs/conf.py b/python/cucim/docs/conf.py index 06e804d83..0dbe8b296 100644 --- a/python/cucim/docs/conf.py +++ b/python/cucim/docs/conf.py @@ -8,57 +8,57 @@ version_long = f.readline().strip() extensions = [ - 'sphinx.ext.autodoc', - 'sphinx.ext.autosummary', - 'sphinx.ext.coverage', - 'sphinx.ext.doctest', - 'sphinx.ext.extlinks', - 'sphinx.ext.ifconfig', - 'sphinx.ext.napoleon', - 'sphinx.ext.todo', - 'sphinx.ext.viewcode', - 'sphinx.ext.intersphinx', - 'sphinxcontrib.bibtex', - 'myst_nb', - 'sphinx_copybutton', - 'sphinx_togglebutton', - 'sphinx_panels', - 'ablog', - 'sphinxemoji.sphinxemoji', + "sphinx.ext.autodoc", + "sphinx.ext.autosummary", + "sphinx.ext.coverage", + "sphinx.ext.doctest", + "sphinx.ext.extlinks", + "sphinx.ext.ifconfig", + "sphinx.ext.napoleon", + "sphinx.ext.todo", + "sphinx.ext.viewcode", + "sphinx.ext.intersphinx", + "sphinxcontrib.bibtex", + "myst_nb", + "sphinx_copybutton", + "sphinx_togglebutton", + "sphinx_panels", + "ablog", + "sphinxemoji.sphinxemoji", ] # source_suffix = { # '.rst': 'restructuredtext', # '.ipynb': 'myst-nb', # '.myst': 'myst-nb', # } -master_doc = 'index' -project = 'cuCIM' -year = '2020-2021' -author = 'NVIDIA' -copyright = '{0}, {1}'.format(year, author) +master_doc = "index" +project = "cuCIM" +year = "2020-2021" +author = "NVIDIA" +copyright = "{0}, {1}".format(year, author) version = release = version_long -pygments_style = 'trac' -templates_path = ['.'] +pygments_style = "trac" +templates_path = ["."] extlinks = { - 'issue': ('https://github.com/rapidsai/cucim/issues/%s', '#'), - 'pr': ('https://github.com/rapidsai/cucim/pull/%s', 'PR #'), + "issue": ("https://github.com/rapidsai/cucim/issues/%s", "#"), + "pr": ("https://github.com/rapidsai/cucim/pull/%s", "PR #"), } # on_rtd is whether we are on readthedocs.org -on_rtd = os.environ.get('READTHEDOCS', None) == 'True' +on_rtd = os.environ.get("READTHEDOCS", None) == "True" if not on_rtd: # only set the theme if we're building docs locally - html_theme = 'pydata_sphinx_theme' # 'sphinx_book_theme' + html_theme = "pydata_sphinx_theme" # 'sphinx_book_theme' # https://github.com/pandas-dev/pydata-sphinx-theme # https://pydata-sphinx-theme.readthedocs.io/en/latest/user_guide/index.html html_use_smartypants = True -html_last_updated_fmt = '%b %d, %Y' +html_last_updated_fmt = "%b %d, %Y" html_split_index = False # html_sidebars = { # '**': ['searchbox.html', 'globaltoc.html', 'sourcelink.html'], # } -html_short_title = '%s-%s' % (project, version) +html_short_title = "%s-%s" % (project, version) napoleon_use_ivar = True napoleon_use_rtype = False @@ -70,7 +70,7 @@ # # Reference # : https://www.sphinx-doc.org/en/master/usage/configuration.html?highlight=linkcheck#options-for-the-linkcheck-builder) # noqa -linkcheck_ignore = [r'^\/', r'^\.\.'] +linkcheck_ignore = [r"^\/", r"^\.\."] # Options for sphinx.ext.todo # (reference: https://www.sphinx-doc.org/en/master/usage/extensions/todo.html) @@ -90,9 +90,9 @@ # Options for pydata-sphinx-theme # (reference: https://pydata-sphinx-theme.readthedocs.io/en/latest/user_guide/configuring.html) # noqa -html_static_path = ['_static'] +html_static_path = ["_static"] html_css_files = [ - 'css/custom.css', + "css/custom.css", ] html_theme_options = { @@ -123,9 +123,7 @@ # Prevent the following error # MyST NB Configuration Error: # `nb_render_priority` not set for builder: doctest -nb_render_priority = { - "doctest": () -} +nb_render_priority = {"doctest": ()} # Prevent creating jupyter_execute folder in dist # https://myst-nb.readthedocs.io/en/latest/use/execute.html#executing-in-temporary-folders # noqa diff --git a/python/cucim/pyproject.toml b/python/cucim/pyproject.toml new file mode 100644 index 000000000..2b3143ff7 --- /dev/null +++ b/python/cucim/pyproject.toml @@ -0,0 +1,233 @@ +# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. +# See file LICENSE for terms. + +[build-system] +build-backend = "setuptools.build_meta" +requires = [ + "setuptools>=24.2.0", + "wheel", +] # This list was generated by `rapids-dependency-file-generator`. To make changes, edit ../../dependencies.yaml and run `rapids-dependency-file-generator`. + +[project] +name = "cucim" +version = "23.12.00" +description = "cuCIM - an extensible toolkit designed to provide GPU accelerated I/O, computer vision & image processing primitives for N-Dimensional images with a focus on biomedical imaging." +# TODO: tried also adding CHANGELOG.md as in setup.py's long_description, but ruff complained about it +readme = { file = "README.md", content-type = "text/markdown" } +# readme = [ +# { file = "README.md", content-type = "text/markdown" }, +# { file = "CHANGELOG.md", content-type = "text/markdown" }, +# ] +authors = [ + { name = "NVIDIA Corporation" }, +] +license = { text = "Apache 2.0" } +requires-python = ">=3.8" +dependencies = [ + "cupy-cuda11x>=12.0.0", + "lazy_loader>=0.1", + "numpy", + "scikit-image>=0.19.0,<0.22.0a0", + "scipy", +] # This list was generated by `rapids-dependency-file-generator`. To make changes, edit ../../dependencies.yaml and run `rapids-dependency-file-generator`. +classifiers = [ + "Development Status :: 4 - Beta", + "Intended Audience :: Developers", + "Intended Audience :: Education", + "Intended Audience :: Science/Research", + "Intended Audience :: Healthcare Industry", + "Topic :: Scientific/Engineering", + "Operating System :: POSIX :: Linux", + "Environment :: Console", + "Environment :: GPU :: NVIDIA CUDA :: 11.0", + "Environment :: GPU :: NVIDIA CUDA :: 12.0", + "License :: OSI Approved :: Apache Software License", + "Programming Language :: C++", + "Programming Language :: Python", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", +] + +[project.urls] +Homepage = "https://developer.nvidia.com/multidimensional-image-processing" +Documentation = "https://docs.rapids.ai/api/cucim/stable/" +Changelog = "https://github.com/rapidsai/cucim/blob/branch-23.12/CHANGELOG.md" +Source = "https://github.com/rapidsai/cucim" +Tracker = "https://github.com/rapidsai/cucim/issues" + +[project.optional-dependencies] +test = [ + "GPUtil>=1.4.0", + "click", + "imagecodecs>=2021.6.8", + "jbig", + "libwebp-base", + "opencv-python-headless>=4.6", + "openslide-python>=1.1.2", + "psutil>=5.8.0", + "pytest-cov>=2.12.1", + "pytest-lazy-fixture>=0.6.3", + "pytest-xdist", + "pytest>=6.2.4", + "tifffile>=2022.7.28", + "xz", + "zlib", + "zstd", +] # This list was generated by `rapids-dependency-file-generator`. To make changes, edit ../../dependencies.yaml and run `rapids-dependency-file-generator`. +developer = [ + "black", + "isort", + "pre-commit", + "ruff", +] # This list was generated by `rapids-dependency-file-generator`. To make changes, edit ../../dependencies.yaml and run `rapids-dependency-file-generator`. +docs = [ + "ipython", + "nbsphinx", + "numpydoc", + "pydata-sphinx-theme", + "recommonmark", + "sphinx<6", +] # This list was generated by `rapids-dependency-file-generator`. To make changes, edit ../../dependencies.yaml and run `rapids-dependency-file-generator`. + +[project.entry-points."console_scripts"] +cucim = "cucim.clara.cli:main" + +[tool.setuptools] +license-files = ["LICENSE"] +# By default, include-package-data is true in pyproject.toml, so you do +# NOT have to specify this line. +include-package-data = true + +[tool.setuptools.packages.find] +where = ["src"] + +[tool.setuptools.package-data] +mypkg = ["*.pyi", "*.h", "*.cu"] + +[tool.isort] +profile = "black" +force_single_line = false +line_length = 80 +forced_separate = "test_cucim" +force_grid_wrap = 0 +multi_line_output = 3 +order_by_type = true +combine_as_imports = true +include_trailing_comma = true +known_first_party = [ + "cucim", +] +default_section = "THIRDPARTY" +sections = [ + "FUTURE", + "STDLIB", + "THIRDPARTY", + "FIRSTPARTY", + "LOCALFOLDER", +] +skip = [ + "3rdparty", + "thirdparty", + ".eggs", + ".git", + ".hg", + ".mypy_cache", + ".tox", + ".venv", + "ci", + "cpp", + "_build", + "build", + "build-debug", + "build-release", + "_deps", + "dist", +] + +[tool.pytest.ini_options] +# If a pytest section is found in one of the possible config files +# (pytest.ini, tox.ini or setup.cfg), then pytest will not look for any others, +# so if you add a pytest config section elsewhere, +# you will need to delete this section from setup.cfg. +norecursedirs = "migrations" +python_files = [ + "test_*.py", + "*_test.py", + "tests.py", +] +addopts = [ + "-ra", + "--strict-markers", + # --doctest-modules", + # --doctest-glob=\*.rst", + "--tb=short", + "--ignore-glob=build*", +] +testpaths = [ + "src", + "tests", +] + +[tool.ruff] +# see: https://docs.astral.sh/ruff/rules/ +select = ["E", "F", "W"] +fixable = ["ALL"] +exclude = [ + # TODO: Remove this in a follow-up where we fix __all__. + ".tox", + ".eggs", + "ci/templates", + "build", + "dist", + ".git", + "__pycache__", + "doc/conf.py", + "doc/sphinxext", + "__init__.py", +] +line-length = 80 + +[tool.ruff.per-file-ignores] +# "src/cucim/skimage/util/tests/test_shape.py" = ["E201", "E202"] + +[tool.black] +line-length = 80 +target-version = ["py39"] +include = '\.py?$' +exclude = ''' +/( + 3rdparty | + thirdparty | + \.eggs | + \.git | + \.hg | + \.mypy_cache | + \.tox | + \.venv | + _build | + _deps | + cpp | + ci | + build | + build-debug | + build-release | + dist | + docker | + docs | +)/ +''' + +[tool.codespell] +# note: pre-commit passes explicit lists of files here, which this skip file list doesn't override - +# this is only to allow you to run codespell interactively +# e.g. via +# codespell --toml python/cucim/pyproject.toml . -i 3 -w +skip = "build*,dist,.cache,html,_build,_deps,3rdparty,_static,generated,latex,.git,*.ipynb,test_data/input/LICENSE-3rdparty,jitify_testing" +# ignore-regex = "" +ignore-words-list = "ans,coo,boun,bui,gool,hart,lond,nd,paeth,unser,wronly" +quiet-level = 3 + +# to undo: ./test_data/input/LICENSE-3rdparty diff --git a/python/cucim/pyproject_.toml b/python/cucim/pyproject_.toml new file mode 100644 index 000000000..c5351f058 --- /dev/null +++ b/python/cucim/pyproject_.toml @@ -0,0 +1,129 @@ +# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. +# See file LICENSE for terms. + +[tool.isort] +profile = "black" +force_single_line = false +line_length = 80 +forced_separate = "test_cucim" +force_grid_wrap = 0 +multi_line_output = 3 +order_by_type = true +combine_as_imports = true +include_trailing_comma = true +known_first_party = [ + "cucim", +] +default_section = "THIRDPARTY" +sections = [ + "FUTURE", + "STDLIB", + "THIRDPARTY", + "FIRSTPARTY", + "LOCALFOLDER", +] +skip = [ + "3rdparty", + "thirdparty", + ".eggs", + ".git", + ".hg", + ".mypy_cache", + ".tox", + ".venv", + "ci", + "cpp", + "_build", + "build", + "build-debug", + "build-release", + "_deps", + "dist", +] + +[tool.pytest.ini_options] +# If a pytest section is found in one of the possible config files +# (pytest.ini, tox.ini or setup.cfg), then pytest will not look for any others, +# so if you add a pytest config section elsewhere, +# you will need to delete this section from setup.cfg. +norecursedirs = "migrations" +python_files = [ + "test_*.py", + "*_test.py", + "tests.py", +] +addopts = [ + "-ra", + "--strict-markers", + # --doctest-modules", + # --doctest-glob=\*.rst", + "--tb=short", + "--ignore-glob=build*", +] +testpaths = [ + "src", + "tests", +] + +[tool.ruff] +select = ["E", "F", "W"] +fixable = ["ALL"] +exclude = [ + # TODO: Remove this in a follow-up where we fix __all__. + ".tox", + ".eggs", + "ci/templates", + "build", + "dist", + ".git", + "__pycache__", + "doc/conf.py", + "doc/sphinxext", + "__init__.py", +] +line-length = 80 + +[tool.ruff.per-file-ignores] +# "src/cucim/skimage/util/tests/test_shape.py" = ["E201", "E202"] + +[tool.black] +line-length = 80 +target-version = ["py39"] +include = '\.py?$' +exclude = ''' +/( + 3rdparty | + thirdparty | + \.eggs | + \.git | + \.hg | + \.mypy_cache | + \.tox | + \.venv | + _build | + _deps | + cpp | + ci | + build | + build-debug | + build-release | + dist | + docker | + docs | +)/ +''' + +[tool.setuptools] +license-files = ["LICENSE"] + +[tool.codespell] +# note: pre-commit passes explicit lists of files here, which this skip file list doesn't override - +# this is only to allow you to run codespell interactively +# e.g. via +# codespell --toml python/cucim/pyproject.toml . -i 3 -w +skip = "build*,dist,.cache,html,_build,_deps,3rdparty,_static,generated,latex,.git,*.ipynb,test_data/input/LICENSE-3rdparty,jitify_testing" +# ignore-regex = "" +ignore-words-list = "ans,coo,boun,bui,gool,hart,lond,nd,paeth,unser,wronly" +quiet-level = 3 + +# to undo: ./test_data/input/LICENSE-3rdparty diff --git a/python/cucim/setup.cfg b/python/cucim/setup.cfg deleted file mode 100644 index 7743540c2..000000000 --- a/python/cucim/setup.cfg +++ /dev/null @@ -1,70 +0,0 @@ -[versioneer] -VCS = git -style = pep440 -versionfile_source = src/cucim/_version.py -versionfile_build = cucim/_version.py -tag_prefix = v -parentdir_prefix = cucim- - -[bdist_wheel] -universal = 0 - -[egg_info] -egg_base = src - -[flake8] -max-line-length = 80 -ignore = - # line break before binary operator - W503 - # line break after binary operator - W504 - # whitespace before : - E203 - # f-string is missing placeholders - F541 -exclude = .tox,.eggs,ci/templates,build,dist,.git,__pycache__,doc/conf.py,doc/sphinxext,build,dist,__init__.py -per-file-ignores = - setup.py:F821 - versioneer.py:W605 - src/localtest.py:E127 - src/cucim/skimage/__init__.py:F401 - src/cucim/skimage/measure/tests/test_block.py:E201,E202,E241 - src/cucim/skimage/transform/_geometric.py:E201,E202,E241 - src/cucim/skimage/transform/tests/test_warps.py:E201,E202,E241,W605 - src/cucim/skimage/util/tests/test_shape.py:E201,E202,E241 - src/cucim/core/operations/expose/transform.py:F401 - -[tool:pytest] -# If a pytest section is found in one of the possible config files -# (pytest.ini, tox.ini or setup.cfg), then pytest will not look for any others, -# so if you add a pytest config section elsewhere, -# you will need to delete this section from setup.cfg. -norecursedirs = - migrations - -python_files = - test_*.py - *_test.py - tests.py -# PytestDeprecationWarning: The --strict option is deprecated, use --strict-markers instead. -addopts = - -ra - --strict-markers - # --doctest-modules - # --doctest-glob=\*.rst - --tb=short - --ignore-glob build -testpaths = - src - tests - -[tool:isort] -force_single_line = False -line_length = 80 -known_first_party = cucim -default_section = THIRDPARTY -forced_separate = test_cucim -skip = .tox,.eggs,ci/templates,build,dist,versioneer.py,ndimage.py -sections = FUTURE,STDLIB,THIRDPARTY,FIRSTPARTY,LOCALFOLDER -multi_line_output = GRID diff --git a/python/cucim/setup.py b/python/cucim/setup.py deleted file mode 100755 index 214a44f25..000000000 --- a/python/cucim/setup.py +++ /dev/null @@ -1,106 +0,0 @@ -#!/usr/bin/env python -# -*- encoding: utf-8 -*- - -import io -import sys -from os.path import dirname, join - -import versioneer -from setuptools import find_packages, setup - -# Give setuptools a hint to complain if it's too old a version -# 24.2.0 added the python_requires option -# Should match pyproject.toml -SETUP_REQUIRES = ['setuptools >= 24.2.0'] -# This enables setuptools to install wheel on-the-fly -SETUP_REQUIRES += ['wheel'] if 'bdist_wheel' in sys.argv else [] - - -def read(*names, **kwargs): - with io.open( - join(dirname(__file__), *names), - encoding=kwargs.get('encoding', 'utf8') - ) as fh: - return fh.read() - - -opts = dict( - name='cucim', - version=read('VERSION').strip(), # versioneer.get_version(), - cmdclass=versioneer.get_cmdclass(), - license='Apache-2.0', - description='cuCIM - an extensible toolkit designed to provide GPU accelerated I/O, computer vision & image processing primitives for N-Dimensional images with a focus on biomedical imaging.', # noqa - long_description='%s\n%s' % ( - read('README.md'), - read('CHANGELOG.md') - ), - long_description_content_type='text/markdown', - author='NVIDIA Corporation', - url='https://developer.nvidia.com/multidimensional-image-processing', - packages=find_packages('src'), - package_dir={'cucim': 'src/cucim'}, - package_data={"": ["*.pyi", "*.h", "*.cu"]}, - include_package_data=True, - zip_safe=False, - classifiers=[ - # complete classifier list: - # http://pypi.python.org/pypi?%3Aaction=list_classifiers - 'Development Status :: 4 - Beta', - 'Intended Audience :: Developers', - 'Intended Audience :: Education', - 'Intended Audience :: Science/Research', - 'Intended Audience :: Healthcare Industry', - 'Operating System :: POSIX :: Linux', - 'Environment :: Console', - 'Environment :: GPU :: NVIDIA CUDA :: 11.0', - 'License :: OSI Approved :: Apache Software License', - 'Programming Language :: C++', - 'Programming Language :: Python', - 'Programming Language :: Python :: 3', - 'Programming Language :: Python :: 3.8', - 'Programming Language :: Python :: 3.9', - 'Programming Language :: Python :: 3.10', - 'Programming Language :: Python :: 3.11', - # 'Operating System :: OS Independent', - # 'Operating System :: Unix', - # 'Operating System :: POSIX', - # 'Operating System :: Microsoft :: Windows', - # 'Programming Language :: Python :: Implementation :: CPython', - # uncomment if you test on these interpreters: - # 'Programming Language :: Python :: Implementation :: PyPy', - # 'Programming Language :: Python :: Implementation :: IronPython', - # 'Programming Language :: Python :: Implementation :: Jython', - # 'Programming Language :: Python :: Implementation :: Stackless', - 'Topic :: Scientific/Engineering :: Image Processing', - ], - project_urls={ - 'Source': 'https://github.com/rapidsai/cucim', - 'Documentation': 'https://docs.rapids.ai/api/cucim/stable/', - 'Changelog': 'https://github.com/rapidsai/cucim/blob/main/CHANGELOG.md', - 'Issue Tracker': 'https://github.com/rapidsai/cucim/issues', - }, - keywords=[ - # eg: 'keyword1', 'keyword2', 'keyword3', - ], - python_requires='>= 3.6', - platforms=['manylinux2014_x86_64'], - setup_requires=SETUP_REQUIRES, - install_requires=[ - # TODO: Check cupy dependency based on cuda version - 'click', 'numpy', "lazy_loader>=0.1", # 'scipy', 'scikit-image' - # eg: 'aspectlib==1.1.1', 'six>=1.7', - ], - extras_require={ - # eg: - # 'rst': ['docutils>=0.11'], - # ':python_version=="2.6"': ['argparse'], - }, - entry_points={ - 'console_scripts': [ - 'cucim = cucim.clara.cli:main', - ] - }, -) - -if __name__ == '__main__': - setup(**opts) diff --git a/python/cucim/src/cucim/__init__.py b/python/cucim/src/cucim/__init__.py index 37bfa2cb3..ed21dc97d 100644 --- a/python/cucim/src/cucim/__init__.py +++ b/python/cucim/src/cucim/__init__.py @@ -42,19 +42,22 @@ try: import cupy + _is_cupy_available = True - submodules += ['core', 'skimage'] + submodules += ["core", "skimage"] except ImportError: pass try: from .clara import CuImage, __version__, cli + _is_clara_available = True - submodules += ['clara'] - submod_attrs['clara'] = ['CuImage', 'cli'] + submodules += ["clara"] + submod_attrs["clara"] = ["CuImage", "cli"] except ImportError: from ._version import get_versions - __version__ = get_versions()['version'] + + __version__ = get_versions()["version"] del get_versions del _version @@ -65,7 +68,7 @@ def __dir__(): - return __lazy_dir__() + ['__version__', 'is_available'] + return __lazy_dir__() + ["__version__", "is_available"] def is_available(module_name: str = "") -> bool: @@ -87,7 +90,7 @@ def is_available(module_name: str = "") -> bool: """ if module_name in ("skimage", "core"): return _is_cupy_available - elif module_name == 'clara': + elif module_name == "clara": return _is_clara_available else: return _is_cupy_available and _is_clara_available diff --git a/python/cucim/src/cucim/__init__.pyi b/python/cucim/src/cucim/__init__.pyi index 985551d7e..8c7b467f9 100644 --- a/python/cucim/src/cucim/__init__.pyi +++ b/python/cucim/src/cucim/__init__.pyi @@ -17,19 +17,19 @@ submodules = [] try: import cupy + _is_cupy_available = True - submodules += ['core', 'skimage'] + submodules += ["core", "skimage"] + del cupy except ImportError: pass try: - from .clara import CuImage, __version__, cli + from .clara import CuImage, __version__, cli # noqa: F401 + _is_clara_available = True - submodules += ['clara'] + submodules += ["clara"] except ImportError: - from ._version import get_versions - __version__ = get_versions()['version'] - del get_versions - del _version + __version__ = "23.12.00" -__all__ = submodules + ['__version__', 'is_available'] +__all__ = submodules + ["__version__", "is_available"] # noqa: F822 diff --git a/python/cucim/src/cucim/_misc.py b/python/cucim/src/cucim/_misc.py index 78eaf2de7..fae77a7cc 100644 --- a/python/cucim/src/cucim/_misc.py +++ b/python/cucim/src/cucim/_misc.py @@ -5,7 +5,7 @@ import numpy -if hasattr(math, 'prod'): +if hasattr(math, "prod"): prod = math.prod # available in Python 3.8+ only else: prod = numpy.prod diff --git a/python/cucim/src/cucim/_version.py b/python/cucim/src/cucim/_version.py deleted file mode 100644 index 8eac307ec..000000000 --- a/python/cucim/src/cucim/_version.py +++ /dev/null @@ -1,520 +0,0 @@ - -# This file helps to compute a version number in source trees obtained from -# git-archive tarball (such as those provided by githubs download-from-tag -# feature). Distribution tarballs (built by setup.py sdist) and build -# directories (produced by setup.py build) will contain a much shorter file -# that just contains the computed version number. - -# This file is released into the public domain. Generated by -# versioneer-0.18 (https://github.com/warner/python-versioneer) - -"""Git implementation of _version.py.""" - -import errno -import os -import re -import subprocess -import sys - - -def get_keywords(): - """Get the keywords needed to look up the version information.""" - # these strings will be replaced by git during git-archive. - # setup.py/versioneer.py will grep for the variable names, so they must - # each be defined on a line of their own. _version.py will just call - # get_keywords(). - git_refnames = "$Format:%d$" - git_full = "$Format:%H$" - git_date = "$Format:%ci$" - keywords = {"refnames": git_refnames, "full": git_full, "date": git_date} - return keywords - - -class VersioneerConfig: - """Container for Versioneer configuration parameters.""" - - -def get_config(): - """Create, populate and return the VersioneerConfig() object.""" - # these strings are filled in when 'setup.py versioneer' creates - # _version.py - cfg = VersioneerConfig() - cfg.VCS = "git" - cfg.style = "pep440" - cfg.tag_prefix = "v" - cfg.parentdir_prefix = "cucim-" - cfg.versionfile_source = "cucim/_version.py" - cfg.verbose = False - return cfg - - -class NotThisMethod(Exception): - """Exception raised if a method is not valid for the current scenario.""" - - -LONG_VERSION_PY = {} -HANDLERS = {} - - -def register_vcs_handler(vcs, method): # decorator - """Decorator to mark a method as the handler for a particular VCS.""" - def decorate(f): - """Store f in HANDLERS[vcs][method].""" - if vcs not in HANDLERS: - HANDLERS[vcs] = {} - HANDLERS[vcs][method] = f - return f - return decorate - - -def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, - env=None): - """Call the given command(s).""" - assert isinstance(commands, list) - p = None - for c in commands: - try: - dispcmd = str([c] + args) - # remember shell=False, so use git.cmd on windows, not just git - p = subprocess.Popen([c] + args, cwd=cwd, env=env, - stdout=subprocess.PIPE, - stderr=(subprocess.PIPE if hide_stderr - else None)) - break - except EnvironmentError: - e = sys.exc_info()[1] - if e.errno == errno.ENOENT: - continue - if verbose: - print("unable to run %s" % dispcmd) - print(e) - return None, None - else: - if verbose: - print("unable to find command, tried %s" % (commands,)) - return None, None - stdout = p.communicate()[0].strip() - if sys.version_info[0] >= 3: - stdout = stdout.decode() - if p.returncode != 0: - if verbose: - print("unable to run %s (error)" % dispcmd) - print("stdout was %s" % stdout) - return None, p.returncode - return stdout, p.returncode - - -def versions_from_parentdir(parentdir_prefix, root, verbose): - """Try to determine the version from the parent directory name. - - Source tarballs conventionally unpack into a directory that includes both - the project name and a version string. We will also support searching up - two directory levels for an appropriately named parent directory - """ - rootdirs = [] - - for i in range(3): - dirname = os.path.basename(root) - if dirname.startswith(parentdir_prefix): - return {"version": dirname[len(parentdir_prefix):], - "full-revisionid": None, - "dirty": False, "error": None, "date": None} - else: - rootdirs.append(root) - root = os.path.dirname(root) # up a level - - if verbose: - print("Tried directories %s but none started with prefix %s" % - (str(rootdirs), parentdir_prefix)) - raise NotThisMethod("rootdir doesn't start with parentdir_prefix") - - -@register_vcs_handler("git", "get_keywords") -def git_get_keywords(versionfile_abs): - """Extract version information from the given file.""" - # the code embedded in _version.py can just fetch the value of these - # keywords. When used from setup.py, we don't want to import _version.py, - # so we do it with a regexp instead. This function is not used from - # _version.py. - keywords = {} - try: - f = open(versionfile_abs, "r") - for line in f.readlines(): - if line.strip().startswith("git_refnames ="): - mo = re.search(r'=\s*"(.*)"', line) - if mo: - keywords["refnames"] = mo.group(1) - if line.strip().startswith("git_full ="): - mo = re.search(r'=\s*"(.*)"', line) - if mo: - keywords["full"] = mo.group(1) - if line.strip().startswith("git_date ="): - mo = re.search(r'=\s*"(.*)"', line) - if mo: - keywords["date"] = mo.group(1) - f.close() - except EnvironmentError: - pass - return keywords - - -@register_vcs_handler("git", "keywords") -def git_versions_from_keywords(keywords, tag_prefix, verbose): - """Get version information from git keywords.""" - if not keywords: - raise NotThisMethod("no keywords at all, weird") - date = keywords.get("date") - if date is not None: - # git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant - # datestamp. However we prefer "%ci" (which expands to an "ISO-8601 - # -like" string, which we must then edit to make compliant), because - # it's been around since git-1.5.3, and it's too difficult to - # discover which version we're using, or to work around using an - # older one. - date = date.strip().replace(" ", "T", 1).replace(" ", "", 1) - refnames = keywords["refnames"].strip() - if refnames.startswith("$Format"): - if verbose: - print("keywords are unexpanded, not using") - raise NotThisMethod("unexpanded keywords, not a git-archive tarball") - refs = set([r.strip() for r in refnames.strip("()").split(",")]) - # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of - # just "foo-1.0". If we see a "tag: " prefix, prefer those. - TAG = "tag: " - tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)]) - if not tags: - # Either we're using git < 1.8.3, or there really are no tags. We use - # a heuristic: assume all version tags have a digit. The old git %d - # expansion behaves like git log --decorate=short and strips out the - # refs/heads/ and refs/tags/ prefixes that would let us distinguish - # between branches and tags. By ignoring refnames without digits, we - # filter out many common branch names like "release" and - # "stabilization", as well as "HEAD" and "master". - tags = set([r for r in refs if re.search(r'\d', r)]) - if verbose: - print("discarding '%s', no digits" % ",".join(refs - tags)) - if verbose: - print("likely tags: %s" % ",".join(sorted(tags))) - for ref in sorted(tags): - # sorting will prefer e.g. "2.0" over "2.0rc1" - if ref.startswith(tag_prefix): - r = ref[len(tag_prefix):] - if verbose: - print("picking %s" % r) - return {"version": r, - "full-revisionid": keywords["full"].strip(), - "dirty": False, "error": None, - "date": date} - # no suitable tags, so version is "0+unknown", but full hex is still there - if verbose: - print("no suitable tags, using unknown + full revision id") - return {"version": "0+unknown", - "full-revisionid": keywords["full"].strip(), - "dirty": False, "error": "no suitable tags", "date": None} - - -@register_vcs_handler("git", "pieces_from_vcs") -def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): - """Get version from 'git describe' in the root of the source tree. - - This only gets called if the git-archive 'subst' keywords were *not* - expanded, and _version.py hasn't already been rewritten with a short - version string, meaning we're inside a checked out source tree. - """ - GITS = ["git"] - if sys.platform == "win32": - GITS = ["git.cmd", "git.exe"] - - out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, - hide_stderr=True) - if rc != 0: - if verbose: - print("Directory %s not under git control" % root) - raise NotThisMethod("'git rev-parse --git-dir' returned error") - - # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty] - # if there isn't one, this yields HEX[-dirty] (no NUM) - describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty", - "--always", "--long", - "--match", "%s*" % tag_prefix], - cwd=root) - # --long was added in git-1.5.5 - if describe_out is None: - raise NotThisMethod("'git describe' failed") - describe_out = describe_out.strip() - full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root) - if full_out is None: - raise NotThisMethod("'git rev-parse' failed") - full_out = full_out.strip() - - pieces = {} - pieces["long"] = full_out - pieces["short"] = full_out[:7] # maybe improved later - pieces["error"] = None - - # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty] - # TAG might have hyphens. - git_describe = describe_out - - # look for -dirty suffix - dirty = git_describe.endswith("-dirty") - pieces["dirty"] = dirty - if dirty: - git_describe = git_describe[:git_describe.rindex("-dirty")] - - # now we have TAG-NUM-gHEX or HEX - - if "-" in git_describe: - # TAG-NUM-gHEX - mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe) - if not mo: - # unparseable. Maybe git-describe is misbehaving? - pieces["error"] = ("unable to parse git-describe output: '%s'" - % describe_out) - return pieces - - # tag - full_tag = mo.group(1) - if not full_tag.startswith(tag_prefix): - if verbose: - fmt = "tag '%s' doesn't start with prefix '%s'" - print(fmt % (full_tag, tag_prefix)) - pieces["error"] = ("tag '%s' doesn't start with prefix '%s'" - % (full_tag, tag_prefix)) - return pieces - pieces["closest-tag"] = full_tag[len(tag_prefix):] - - # distance: number of commits since tag - pieces["distance"] = int(mo.group(2)) - - # commit: short hex revision ID - pieces["short"] = mo.group(3) - - else: - # HEX: no tags - pieces["closest-tag"] = None - count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], - cwd=root) - pieces["distance"] = int(count_out) # total number of commits - - # commit date: see ISO-8601 comment in git_versions_from_keywords() - date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"], - cwd=root)[0].strip() - pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1) - - return pieces - - -def plus_or_dot(pieces): - """Return a + if we don't already have one, else return a .""" - if "+" in pieces.get("closest-tag", ""): - return "." - return "+" - - -def render_pep440(pieces): - """Build up version string, with post-release "local version identifier". - - Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you - get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty - - Exceptions: - 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty] - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - if pieces["distance"] or pieces["dirty"]: - rendered += plus_or_dot(pieces) - rendered += "%d.g%s" % (pieces["distance"], pieces["short"]) - if pieces["dirty"]: - rendered += ".dirty" - else: - # exception #1 - rendered = "0+untagged.%d.g%s" % (pieces["distance"], - pieces["short"]) - if pieces["dirty"]: - rendered += ".dirty" - return rendered - - -def render_pep440_pre(pieces): - """TAG[.post.devDISTANCE] -- No -dirty. - - Exceptions: - 1: no tags. 0.post.devDISTANCE - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - if pieces["distance"]: - rendered += ".post.dev%d" % pieces["distance"] - else: - # exception #1 - rendered = "0.post.dev%d" % pieces["distance"] - return rendered - - -def render_pep440_post(pieces): - """TAG[.postDISTANCE[.dev0]+gHEX] . - - The ".dev0" means dirty. Note that .dev0 sorts backwards - (a dirty tree will appear "older" than the corresponding clean one), - but you shouldn't be releasing software with -dirty anyways. - - Exceptions: - 1: no tags. 0.postDISTANCE[.dev0] - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - if pieces["distance"] or pieces["dirty"]: - rendered += ".post%d" % pieces["distance"] - if pieces["dirty"]: - rendered += ".dev0" - rendered += plus_or_dot(pieces) - rendered += "g%s" % pieces["short"] - else: - # exception #1 - rendered = "0.post%d" % pieces["distance"] - if pieces["dirty"]: - rendered += ".dev0" - rendered += "+g%s" % pieces["short"] - return rendered - - -def render_pep440_old(pieces): - """TAG[.postDISTANCE[.dev0]] . - - The ".dev0" means dirty. - - Eexceptions: - 1: no tags. 0.postDISTANCE[.dev0] - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - if pieces["distance"] or pieces["dirty"]: - rendered += ".post%d" % pieces["distance"] - if pieces["dirty"]: - rendered += ".dev0" - else: - # exception #1 - rendered = "0.post%d" % pieces["distance"] - if pieces["dirty"]: - rendered += ".dev0" - return rendered - - -def render_git_describe(pieces): - """TAG[-DISTANCE-gHEX][-dirty]. - - Like 'git describe --tags --dirty --always'. - - Exceptions: - 1: no tags. HEX[-dirty] (note: no 'g' prefix) - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - if pieces["distance"]: - rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) - else: - # exception #1 - rendered = pieces["short"] - if pieces["dirty"]: - rendered += "-dirty" - return rendered - - -def render_git_describe_long(pieces): - """TAG-DISTANCE-gHEX[-dirty]. - - Like 'git describe --tags --dirty --always -long'. - The distance/hash is unconditional. - - Exceptions: - 1: no tags. HEX[-dirty] (note: no 'g' prefix) - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) - else: - # exception #1 - rendered = pieces["short"] - if pieces["dirty"]: - rendered += "-dirty" - return rendered - - -def render(pieces, style): - """Render the given version pieces into the requested style.""" - if pieces["error"]: - return {"version": "unknown", - "full-revisionid": pieces.get("long"), - "dirty": None, - "error": pieces["error"], - "date": None} - - if not style or style == "default": - style = "pep440" # the default - - if style == "pep440": - rendered = render_pep440(pieces) - elif style == "pep440-pre": - rendered = render_pep440_pre(pieces) - elif style == "pep440-post": - rendered = render_pep440_post(pieces) - elif style == "pep440-old": - rendered = render_pep440_old(pieces) - elif style == "git-describe": - rendered = render_git_describe(pieces) - elif style == "git-describe-long": - rendered = render_git_describe_long(pieces) - else: - raise ValueError("unknown style '%s'" % style) - - return {"version": rendered, "full-revisionid": pieces["long"], - "dirty": pieces["dirty"], "error": None, - "date": pieces.get("date")} - - -def get_versions(): - """Get version information or return default if unable to do so.""" - # I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have - # __file__, we can work backwards from there to the root. Some - # py2exe/bbfreeze/non-CPython implementations don't do __file__, in which - # case we can only use expanded keywords. - - cfg = get_config() - verbose = cfg.verbose - - try: - return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, - verbose) - except NotThisMethod: - pass - - try: - root = os.path.realpath(__file__) - # versionfile_source is the relative path from the top of the source - # tree (where the .git directory might live) to this file. Invert - # this to find the root from __file__. - for i in cfg.versionfile_source.split('/'): - root = os.path.dirname(root) - except NameError: - return {"version": "0+unknown", "full-revisionid": None, - "dirty": None, - "error": "unable to find root of source tree", - "date": None} - - try: - pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose) - return render(pieces, cfg.style) - except NotThisMethod: - pass - - try: - if cfg.parentdir_prefix: - return versions_from_parentdir(cfg.parentdir_prefix, root, verbose) - except NotThisMethod: - pass - - return {"version": "0+unknown", "full-revisionid": None, - "dirty": None, - "error": "unable to compute version", "date": None} diff --git a/python/cucim/src/cucim/clara/__init__.py b/python/cucim/src/cucim/clara/__init__.py index 13f96d801..4c9fea2f6 100644 --- a/python/cucim/src/cucim/clara/__init__.py +++ b/python/cucim/src/cucim/clara/__init__.py @@ -16,15 +16,33 @@ import os from . import cli, converter + # import hidden methods -from ._cucim import (CuImage, DLDataType, DLDataTypeCode, __version__, cache, - filesystem, io) +from ._cucim import ( + CuImage, + DLDataType, + DLDataTypeCode, + __version__, + cache, + filesystem, + io, +) -__all__ = ['cli', 'CuImage', 'DLDataType', 'DLDataTypeCode', 'filesystem', - 'io', 'cache', 'converter', '__version__'] +__all__ = [ + "cli", + "CuImage", + "DLDataType", + "DLDataTypeCode", + "filesystem", + "io", + "cache", + "converter", + "__version__", +] from ._cucim import _get_plugin_root # isort:skip from ._cucim import _set_plugin_root # isort:skip + # Set plugin root path _set_plugin_root(os.path.dirname(os.path.realpath(__file__))) diff --git a/python/cucim/src/cucim/clara/cache/__init__.py b/python/cucim/src/cucim/clara/cache/__init__.py index 8d36d8fb5..b3a275d58 100644 --- a/python/cucim/src/cucim/clara/cache/__init__.py +++ b/python/cucim/src/cucim/clara/cache/__init__.py @@ -13,7 +13,10 @@ # limitations under the License. # -from cucim.clara._cucim.cache import (CacheType, ImageCache, - preferred_memory_capacity) +from cucim.clara._cucim.cache import ( + CacheType, + ImageCache, + preferred_memory_capacity, +) -__all__ = ['CacheType', 'ImageCache', 'preferred_memory_capacity'] +__all__ = ["CacheType", "ImageCache", "preferred_memory_capacity"] diff --git a/python/cucim/src/cucim/clara/cli.py b/python/cucim/src/cucim/clara/cli.py index c6011798b..0b4c9a8a9 100644 --- a/python/cucim/src/cucim/clara/cli.py +++ b/python/cucim/src/cucim/clara/cli.py @@ -43,23 +43,41 @@ def main(): @main.command() -@click.argument('src_file', type=click.Path(exists=True)) -@click.argument('dest_folder', type=click.Path( - exists=True, dir_okay=True, file_okay=False), default=Path('.')) -@click.option('--tile-size', type=int, default=256) -@click.option('--overlap', type=int, default=0) -@click.option('--num-workers', type=int, default=os.cpu_count()) -@click.option('--compression', type=str, default='jpeg') -@click.option('--output-filename', type=str, default='image.tif') -def convert(src_file, dest_folder, tile_size, overlap, num_workers, - compression, output_filename): +@click.argument("src_file", type=click.Path(exists=True)) +@click.argument( + "dest_folder", + type=click.Path(exists=True, dir_okay=True, file_okay=False), + default=Path("."), +) +@click.option("--tile-size", type=int, default=256) +@click.option("--overlap", type=int, default=0) +@click.option("--num-workers", type=int, default=os.cpu_count()) +@click.option("--compression", type=str, default="jpeg") +@click.option("--output-filename", type=str, default="image.tif") +def convert( + src_file, + dest_folder, + tile_size, + overlap, + num_workers, + compression, + output_filename, +): """Convert file format""" from .converter import tiff + logging.basicConfig(level=logging.INFO) compression = compression.lower() - if compression in ['raw', 'none']: + if compression in ["raw", "none"]: compression = None - tiff.svs2tif(src_file, Path(dest_folder), tile_size, overlap, num_workers, - compression, output_filename) + tiff.svs2tif( + src_file, + Path(dest_folder), + tile_size, + overlap, + num_workers, + compression, + output_filename, + ) diff --git a/python/cucim/src/cucim/clara/converter/tiff.py b/python/cucim/src/cucim/clara/converter/tiff.py index feb6012a5..801818305 100644 --- a/python/cucim/src/cucim/clara/converter/tiff.py +++ b/python/cucim/src/cucim/clara/converter/tiff.py @@ -32,9 +32,7 @@ logger = logging.getLogger(__name__) -def filter_tile( - tiles, dim_index, index, tile_size, output_array -): +def filter_tile(tiles, dim_index, index, tile_size, output_array): try: x, y = index tile = tiles.get_tile(dim_index, index) @@ -49,15 +47,22 @@ def filter_tile( tile_arr = np.array(tile) # H x W x C - output_array[ay: ay + tile_height, ax: ax + tile_width, :] = tile_arr[ - :tile_height, :tile_width] + output_array[ay : ay + tile_height, ax : ax + tile_width, :] = tile_arr[ + :tile_height, :tile_width + ] except Exception as e: logger.exception(e) -def svs2tif(input_file, output_folder, tile_size, overlap, - num_workers=os.cpu_count(), compression="jpeg", - output_filename="image.tif"): +def svs2tif( + input_file, + output_folder, + tile_size, + overlap, + num_workers=os.cpu_count(), + compression="jpeg", + output_filename="image.tif", +): output_folder = str(output_folder) logger.info("Parameters") @@ -76,7 +81,8 @@ def svs2tif(input_file, output_folder, tile_size, overlap, else: raise ValueError( f"Unsupported compression: {compression}." - + " Should be 'jpeg' or None.") + + " Should be 'jpeg' or None." + ) with OpenSlide(input_file) as slide: properties = slide.properties @@ -105,7 +111,6 @@ def svs2tif(input_file, output_folder, tile_size, overlap, if max(img_w, img_h) < tile_size: break try: - # Multithread processing for each tile in the largest # image (index 0) logger.info("Processing tiles...") @@ -113,7 +118,8 @@ def svs2tif(input_file, output_folder, tile_size, overlap, tile_pos_x, tile_pos_y = tiles.level_tiles[dim_index] index_iter = np.ndindex(tile_pos_x, tile_pos_y) with concurrent.futures.ThreadPoolExecutor( - max_workers=num_workers) as executor: + max_workers=num_workers + ) as executor: executor.map( filter_tile, repeat(tiles), @@ -128,8 +134,11 @@ def svs2tif(input_file, output_folder, tile_size, overlap, src_arr = np_memmap[index - 1] target_arr = np_memmap[index] target_arr[:] = cv2.resize( - src_arr, (0, 0), fx=0.5, fy=0.5, - interpolation=cv2.INTER_AREA + src_arr, + (0, 0), + fx=0.5, + fy=0.5, + interpolation=cv2.INTER_AREA, ) # th, tw = target_arr.shape[:2] # target_arr[:] = src_arr[ @@ -148,17 +157,19 @@ def svs2tif(input_file, output_folder, tile_size, overlap, y_resolution = float(properties.get("tiff.YResolution")) else: resolution_unit = properties.get("tiff.ResolutionUnit", "inch") - if properties.get("tiff.ResolutionUnit", - "inch").lower() == "inch": + if ( + properties.get("tiff.ResolutionUnit", "inch").lower() + == "inch" + ): numerator = 25400 # Microns in Inch else: numerator = 10000 # Microns in CM - x_resolution = int(numerator - // float(properties.get('openslide.mpp-x', - 1))) - y_resolution = int(numerator - // float(properties.get('openslide.mpp-y', - 1))) + x_resolution = int( + numerator // float(properties.get("openslide.mpp-x", 1)) + ) + y_resolution = int( + numerator // float(properties.get("openslide.mpp-y", 1)) + ) # Write TIFF file with TiffWriter(output_file, bigtiff=True) as tif: @@ -166,8 +177,12 @@ def svs2tif(input_file, output_folder, tile_size, overlap, for level in range(len(np_memmap)): src_arr = np_memmap[level] height, width = src_arr.shape[:2] - logger.info("Saving Level %d image (%d x %d)...", - level, width, height) + logger.info( + "Saving Level %d image (%d x %d)...", + level, + width, + height, + ) if level: subfiletype = SUBFILETYPE_REDUCEDIMAGE else: @@ -181,8 +196,8 @@ def svs2tif(input_file, output_folder, tile_size, overlap, photometric="RGB", planarconfig="CONTIG", resolution=( - x_resolution // 2 ** level, - y_resolution // 2 ** level, + x_resolution // 2**level, + y_resolution // 2**level, resolution_unit, ), compression=compression, # requires imagecodecs diff --git a/python/cucim/src/cucim/clara/filesystem/__init__.py b/python/cucim/src/cucim/clara/filesystem/__init__.py index fba23f7c9..6ac6cd654 100644 --- a/python/cucim/src/cucim/clara/filesystem/__init__.py +++ b/python/cucim/src/cucim/clara/filesystem/__init__.py @@ -15,4 +15,11 @@ from cucim.clara._cucim.filesystem import * -__all__ = ['open', 'pread', 'pwrite', 'close', 'discard_page_cache', 'CuFileDriver'] +__all__ = [ + "open", + "pread", + "pwrite", + "close", + "discard_page_cache", + "CuFileDriver", +] diff --git a/python/cucim/src/cucim/clara/io/__init__.py b/python/cucim/src/cucim/clara/io/__init__.py index 33693fa78..7656660e6 100644 --- a/python/cucim/src/cucim/clara/io/__init__.py +++ b/python/cucim/src/cucim/clara/io/__init__.py @@ -15,4 +15,4 @@ from cucim.clara._cucim.io import * -__all__ = ['DeviceType', 'Device'] +__all__ = ["DeviceType", "Device"] diff --git a/python/cucim/src/cucim/core/operations/color/__init__.py b/python/cucim/src/cucim/core/operations/color/__init__.py index 90ae3cce3..030fa6817 100644 --- a/python/cucim/src/cucim/core/operations/color/__init__.py +++ b/python/cucim/src/cucim/core/operations/color/__init__.py @@ -1,12 +1,15 @@ from .jitter import color_jitter, rand_color_jitter -from .stain_normalizer import (absorbance_to_image, image_to_absorbance, - normalize_colors_pca, stain_extraction_pca) +from .stain_normalizer import ( + absorbance_to_image, + image_to_absorbance, + normalize_colors_pca, + stain_extraction_pca, +) __all__ = [ "color_jitter", - "rand_color_jitter" - "absorbance_to_image", + "rand_color_jitter" "absorbance_to_image", "image_to_absorbance", - 'stain_extraction_pca', - 'normalize_colors_pca', + "stain_extraction_pca", + "normalize_colors_pca", ] diff --git a/python/cucim/src/cucim/core/operations/color/jitter.py b/python/cucim/src/cucim/core/operations/color/jitter.py index fbae3dcbc..4d3f47b5b 100755 --- a/python/cucim/src/cucim/core/operations/color/jitter.py +++ b/python/cucim/src/cucim/core/operations/color/jitter.py @@ -23,22 +23,32 @@ CUDA_KERNELS = cupy.RawModule(code=cuda_kernel_code) -def _check_input(value, name, center=1, bound=(0, float('inf')), - clip_first_on_zero=True): +def _check_input( + value, name, center=1, bound=(0, float("inf")), clip_first_on_zero=True +): if isinstance(value, numbers.Number): if value < 0: - raise ValueError("If {} is a single number, \ - it must be non negative.".format(name)) + raise ValueError( + "If {} is a single number, \ + it must be non negative.".format( + name + ) + ) value = [center - float(value), center + float(value)] if clip_first_on_zero: value[0] = max(value[0], 0.0) elif isinstance(value, (tuple, list)) and len(value) == 2: if not bound[0] <= value[0] <= value[1] <= bound[1]: - raise ValueError("{} values should be between {}" - .format(name, bound)) + raise ValueError( + "{} values should be between {}".format(name, bound) + ) else: - raise TypeError("{} should be a single number or a \ - list/tuple with length 2.".format(name)) + raise TypeError( + "{} should be a single number or a \ + list/tuple with length 2.".format( + name + ) + ) # if value is 0 or (1., 1.) for brightness/contrast/saturation # or (0., 0.) for hue, do nothing if value[0] == value[1] == center: @@ -46,14 +56,18 @@ def _check_input(value, name, center=1, bound=(0, float('inf')), return value -def _get_params(brightness: Optional[List[float]], - contrast: Optional[List[float]], - saturation: Optional[List[float]], - hue: Optional[List[float]] - ) -> Tuple[np.ndarray, Optional[float], - Optional[float], Optional[float], - Optional[float]]: - +def _get_params( + brightness: Optional[List[float]], + contrast: Optional[List[float]], + saturation: Optional[List[float]], + hue: Optional[List[float]], +) -> Tuple[ + np.ndarray, + Optional[float], + Optional[float], + Optional[float], + Optional[float], +]: fn_idx = np.random.permutation(4) b = None @@ -83,15 +97,20 @@ def _adjust_brightness(input_arr, brightness): block = (128, 1, 1) length = N * C * H * W length = (length + 1) >> 2 - grid = (int((length - 1) / block[0] + 1) , 1, 1) + grid = (int((length - 1) / block[0] + 1), 1, 1) - result = cupy.ndarray(shape=input_arr.shape, - dtype=input_arr.dtype) + result = cupy.ndarray(shape=input_arr.shape, dtype=input_arr.dtype) kernel = CUDA_KERNELS.get_function("brightnessjitter_kernel") - kernel(grid, block, args=(input_arr, - result, - np.int32(N * C * H * W), - np.float32(brightness))) + kernel( + grid, + block, + args=( + input_arr, + result, + np.int32(N * C * H * W), + np.float32(brightness), + ), + ) return result @@ -110,13 +129,11 @@ def _adjust_contrast(input_arr, contrast): N = 1 block = (128, 1, 1) pitch = W * H - grid = (int((pitch - 1) / block[0] + 1) , N, 1) + grid = (int((pitch - 1) / block[0] + 1), N, 1) output_L32 = cupy.empty((N, H, W), dtype=cupy.uint32) kernel_rgb2l = CUDA_KERNELS.get_function("rgb2l_kernel") - kernel_rgb2l(grid, block, args=(input_arr, - output_L32, - np.int32(pitch))) + kernel_rgb2l(grid, block, args=(input_arr, output_L32, np.int32(pitch))) L32_mean = output_L32.mean(axis=[1, 2], dtype=cupy.float32) @@ -124,13 +141,18 @@ def _adjust_contrast(input_arr, contrast): output_rgb = cupy.empty((C, H, W), dtype=cupy.uint8) else: output_rgb = cupy.empty((N, C, H, W), dtype=cupy.uint8) - kernel_blendconstant = \ - CUDA_KERNELS.get_function("blendconstant_kernel") - kernel_blendconstant(grid, block, args=(input_arr, - output_rgb, - np.int32(pitch), - L32_mean, - np.float32(contrast))) + kernel_blendconstant = CUDA_KERNELS.get_function("blendconstant_kernel") + kernel_blendconstant( + grid, + block, + args=( + input_arr, + output_rgb, + np.int32(pitch), + L32_mean, + np.float32(contrast), + ), + ) return output_rgb @@ -149,12 +171,12 @@ def _adjust_saturation(input_arr, saturation): grid = (int((pitch - 1) / block[0] + 1), N, 1) output_rgb = cupy.empty(input_arr.shape, dtype=cupy.uint8) - kernel_satjitter = \ - CUDA_KERNELS.get_function("saturationjitter_kernel") - kernel_satjitter(grid, block, args=(input_arr, - output_rgb, - np.int32(pitch), - np.float32(saturation))) + kernel_satjitter = CUDA_KERNELS.get_function("saturationjitter_kernel") + kernel_satjitter( + grid, + block, + args=(input_arr, output_rgb, np.int32(pitch), np.float32(saturation)), + ) return output_rgb @@ -162,8 +184,7 @@ def _adjust_saturation(input_arr, saturation): # hue jitter def _adjust_hue(input_arr, hue): if not (-0.5 <= hue <= 0.5): - raise ValueError('hue factor({}) is not in [-0.5, 0.5].'. - format(hue)) + raise ValueError("hue factor({}) is not in [-0.5, 0.5].".format(hue)) if len(input_arr.shape) == 4: N, C, H, W = input_arr.shape @@ -176,21 +197,16 @@ def _adjust_hue(input_arr, hue): grid = (int((pitch - 1) / block[0] + 1), N, 1) output_rgb = cupy.empty(input_arr.shape, dtype=cupy.uint8) kernel_huejitter = CUDA_KERNELS.get_function("huejitter_kernel") - kernel_huejitter(grid, block, args=(input_arr, - output_rgb, - np.int32(pitch), - np.float32(hue))) + kernel_huejitter( + grid, + block, + args=(input_arr, output_rgb, np.int32(pitch), np.float32(hue)), + ) return output_rgb -def color_jitter( - img: Any, - brightness=0, - contrast=0, - saturation=0, - hue=0 -): +def color_jitter(img: Any, brightness=0, contrast=0, saturation=0, hue=0): """Applies color jitter by random sequential application of 4 operations (brightness, contrast, saturation, hue). @@ -246,11 +262,12 @@ def color_jitter( # once instead of checking every time # execution - f_brightness = _check_input(brightness, 'brightness') - f_contrast = _check_input(contrast, 'contrast') - f_saturation = _check_input(saturation, 'saturation') - f_hue = _check_input(hue, 'hue', center=0, bound=(-0.5, 0.5), - clip_first_on_zero=False) + f_brightness = _check_input(brightness, "brightness") + f_contrast = _check_input(contrast, "contrast") + f_saturation = _check_input(saturation, "saturation") + f_hue = _check_input( + hue, "hue", center=0, bound=(-0.5, 0.5), clip_first_on_zero=False + ) to_numpy = False if isinstance(img, np.ndarray): @@ -262,7 +279,7 @@ def color_jitter( cupy_img = cupy.ascontiguousarray(img) if cupy_img.dtype != cupy.uint8: - if cupy.can_cast(cupy_img.dtype, cupy.uint8, 'unsafe') is False: + if cupy.can_cast(cupy_img.dtype, cupy.uint8, "unsafe") is False: raise ValueError( "Cannot cast type {cupy_img.dtype.name} to 'uint8'" ) @@ -275,9 +292,13 @@ def color_jitter( "dimensions (C, H, W) or (N, C, H, W)." ) - fn_idx, brightness_factor, contrast_factor, saturation_factor, \ - hue_factor = _get_params(f_brightness, f_contrast, - f_saturation, f_hue) + ( + fn_idx, + brightness_factor, + contrast_factor, + saturation_factor, + hue_factor, + ) = _get_params(f_brightness, f_contrast, f_saturation, f_hue) for fn_id in fn_idx: if fn_id == 0 and brightness_factor is not None: @@ -306,7 +327,7 @@ def rand_color_jitter( saturation=0, hue=0, prob: float = 0.1, - whole_batch: bool = False + whole_batch: bool = False, ): """Randomly applies color jitter by random sequential application of 4 operations (brightness, contrast, saturation, hue). @@ -373,11 +394,9 @@ def rand_color_jitter( for i in range(shape[0]): if image_wise_probs[i] < prob: - img[i] = color_jitter(img[i], - brightness, - contrast, - saturation, - hue) + img[i] = color_jitter( + img[i], brightness, contrast, saturation, hue + ) return img else: return color_jitter(img, brightness, contrast, saturation, hue) diff --git a/python/cucim/src/cucim/core/operations/color/kernel/cuda_kernel_source.py b/python/cucim/src/cucim/core/operations/color/kernel/cuda_kernel_source.py index ac60de508..ae27df3c2 100644 --- a/python/cucim/src/cucim/core/operations/color/kernel/cuda_kernel_source.py +++ b/python/cucim/src/cucim/core/operations/color/kernel/cuda_kernel_source.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -cuda_kernel_code = r''' +cuda_kernel_code = r""" extern "C" { __global__ void brightnessjitter_kernel(unsigned char *input_rgb, \ unsigned char *output_rgb, \ @@ -224,7 +224,7 @@ float uq = (q <= 0.0f) ? 0.0f : ((q >= 255.0f) ? 255.0f : q); float ut = (t <= 0.0f) ? 0.0f : ((t >= 255.0f) ? 255.0f : t); - // todo: make atleast 16-bit stores + // todo: make at least 16-bit stores switch ((int)i % 6) { case 0: output_rgb[idx] = (unsigned char)__float2uint_rn(v); @@ -260,4 +260,4 @@ } } } -}''' +}""" diff --git a/python/cucim/src/cucim/core/operations/color/stain_normalizer.py b/python/cucim/src/cucim/core/operations/color/stain_normalizer.py index 34475f8fe..84638b4e0 100644 --- a/python/cucim/src/cucim/core/operations/color/stain_normalizer.py +++ b/python/cucim/src/cucim/core/operations/color/stain_normalizer.py @@ -80,9 +80,13 @@ def image_to_absorbance(image, source_intensity=255.0, dtype=cp.float32): return absorbance -def _image_to_absorbance_matrix(image, source_intensity=240, - image_type="intensity", channel_axis=0, - dtype=cp.float32): +def _image_to_absorbance_matrix( + image, + source_intensity=240, + image_type="intensity", + channel_axis=0, + dtype=cp.float32, +): """Convert image to an absorbance and reshape to (3, n_pixels). See ``image_to_absorbance`` for parameter descriptions @@ -104,9 +108,7 @@ def _image_to_absorbance_matrix(image, source_intensity=240, # reshape to form a (n_channels, n_pixels) matrix if channel_axis != 0: - absorbance = cp.moveaxis( - absorbance, source=channel_axis, destination=0 - ) + absorbance = cp.moveaxis(absorbance, source=channel_axis, destination=0) return absorbance.reshape((c, -1)) @@ -233,8 +235,15 @@ def _prep_channel_axis(channel_axis, ndim): return channel_axis % ndim -def stain_extraction_pca(image, source_intensity=240, alpha=1, beta=0.345, - *, channel_axis=0, image_type="intensity"): +def stain_extraction_pca( + image, + source_intensity=240, + alpha=1, + beta=0.345, + *, + channel_axis=0, + image_type="intensity", +): """Extract the matrix of H & E stain coefficient from an image. Uses a method that selects stain vectors based on the angle distribution @@ -351,7 +360,6 @@ def stain_extraction_pca(image, source_intensity=240, alpha=1, beta=0.345, def _get_raw_concentrations(src_stain_coeff, absorbance): - if absorbance.ndim != 2 or absorbance.shape[0] != 3: raise ValueError("`absorbance` must be shape (3, n_pixels)") @@ -360,22 +368,26 @@ def _get_raw_concentrations(src_stain_coeff, absorbance): # pseudo-inverse coeff_pinv = cp.dot( cp.linalg.inv(cp.dot(src_stain_coeff.T, src_stain_coeff)), - src_stain_coeff.T + src_stain_coeff.T, ) if cp.any(cp.isnan(coeff_pinv)): # fall back to cp.linalg.lstsq if pseudo-inverse above failed - conc_raw = cp.linalg.lstsq( - src_stain_coeff, absorbance, rcond=None - )[0] + conc_raw = cp.linalg.lstsq(src_stain_coeff, absorbance, rcond=None)[0] else: conc_raw = cp.dot(cp.asarray(coeff_pinv, order="F"), absorbance) return conc_raw -def _normalized_from_concentrations(conc_raw, max_percentile, ref_stain_coeff, - ref_max_conc, source_intensity, - original_shape, channel_axis): +def _normalized_from_concentrations( + conc_raw, + max_percentile, + ref_stain_coeff, + ref_max_conc, + source_intensity, + original_shape, + channel_axis, +): """Determine normalized image from concentrations. Note: This function will also modify conc_raw in-place. @@ -402,8 +414,10 @@ def _normalized_from_concentrations(conc_raw, max_percentile, ref_stain_coeff, # Note: calling percentile separately for each channel is faster than: # max_conc = cp.percentile(conc_raw, 100 - alpha, axis=1) max_conc = cp.concatenate( - [cp.percentile(ch_raw, max_percentile)[np.newaxis] - for ch_raw in conc_raw] + [ + cp.percentile(ch_raw, max_percentile)[np.newaxis] + for ch_raw in conc_raw + ] ) normalization_factors = ref_max_conc / max_conc conc_raw *= normalization_factors[:, cp.newaxis] @@ -416,32 +430,30 @@ def _normalized_from_concentrations(conc_raw, max_percentile, ref_stain_coeff, # restore original shape for each channel spatial_shape = ( - original_shape[:channel_axis] + original_shape[channel_axis + 1:] + original_shape[:channel_axis] + original_shape[channel_axis + 1 :] ) image_norm = cp.reshape(image_norm, (3,) + spatial_shape) # move channels from axis 0 to channel_axis if channel_axis != 0: - image_norm = cp.moveaxis( - image_norm, source=0, destination=channel_axis - ) + image_norm = cp.moveaxis(image_norm, source=0, destination=channel_axis) # restore original shape return image_norm def normalize_colors_pca( - image, - source_intensity: float = 240.0, - alpha: float = 1.0, - beta: float = 0.345, - ref_stain_coeff: Union[tuple, cp.ndarray] = ( - (0.5626, 0.2159), - (0.7201, 0.8012), - (0.4062, 0.5581), - ), - ref_max_conc: Union[tuple, cp.ndarray] = (1.9705, 1.0308), - image_type: str = "intensity", - channel_axis: int = 0, + image, + source_intensity: float = 240.0, + alpha: float = 1.0, + beta: float = 0.345, + ref_stain_coeff: Union[tuple, cp.ndarray] = ( + (0.5626, 0.2159), + (0.7201, 0.8012), + (0.4062, 0.5581), + ), + ref_max_conc: Union[tuple, cp.ndarray] = (1.9705, 1.0308), + image_type: str = "intensity", + channel_axis: int = 0, ): """Extract the matrix of stain coefficient from the image. @@ -505,7 +517,7 @@ def normalize_colors_pca( image, source_intensity=source_intensity, image_type=image_type, - channel_axis=channel_axis + channel_axis=channel_axis, ) # channels_axis=0 for the shape (3, n_pixels) absorbance matrix diff --git a/python/cucim/src/cucim/core/operations/color/tests/test_color_jitter.py b/python/cucim/src/cucim/core/operations/color/tests/test_color_jitter.py index e481480e5..c79aa8266 100644 --- a/python/cucim/src/cucim/core/operations/color/tests/test_color_jitter.py +++ b/python/cucim/src/cucim/core/operations/color/tests/test_color_jitter.py @@ -24,22 +24,22 @@ def test_color_jitter_bad_params(): arr = get_image_array() with pytest.raises(ValueError): arr1 = arr.flatten() - ccl.color_jitter(arr1, .25, .75, .25, .04) + ccl.color_jitter(arr1, 0.25, 0.75, 0.25, 0.04) with pytest.raises(TypeError): - img = Image.fromarray(arr.T, 'RGB') - ccl.color_jitter(img, .25, .75, .25, .04) + img = Image.fromarray(arr.T, "RGB") + ccl.color_jitter(img, 0.25, 0.75, 0.25, 0.04) def test_color_jitter_numpyinput(): arr = get_image_array() - np_output = ccl.color_jitter(arr, .25, .75, .25, .04) + np_output = ccl.color_jitter(arr, 0.25, 0.75, 0.25, 0.04) verify_result(np_output, arr) def test_color_jitter_cupyinput(): arr = get_image_array() cupy_arr = cupy.asarray(arr) - cupy_output = ccl.color_jitter(cupy_arr, .25, .75, .25, .04) + cupy_output = ccl.color_jitter(cupy_arr, 0.25, 0.75, 0.25, 0.04) np_output = cupy.asnumpy(cupy_output) verify_result(np_output, arr) @@ -48,7 +48,7 @@ def test_color_jitter_cupy_cast(): arr = get_image_array() cupy_arr = cupy.asarray(arr) cupy_arr = cupy_arr.astype(cupy.float32) - cupy_output = ccl.color_jitter(cupy_arr, .25, .75, .25, .04) + cupy_output = ccl.color_jitter(cupy_arr, 0.25, 0.75, 0.25, 0.04) assert cupy_output.dtype == cupy.float32 @@ -61,7 +61,7 @@ def test_color_jitter_factor(): def test_color_jitter_batchinput(): arr = get_image_array() arr_batch = np.stack((arr,) * 8, axis=0) - np_output = ccl.color_jitter(arr_batch, .25, .75, .25, .04) + np_output = ccl.color_jitter(arr_batch, 0.25, 0.75, 0.25, 0.04) assert np_output.shape[0] == 8 verify_result(np_output, arr_batch) @@ -69,9 +69,8 @@ def test_color_jitter_batchinput(): def test_rand_color_jitter_batchinput(): arr = get_image_array() arr_batch = np.stack((arr,) * 8, axis=0) - np_output = ccl.rand_color_jitter(arr_batch, - .25, .75, .25, .04, - prob=1.0, - whole_batch=True) + np_output = ccl.rand_color_jitter( + arr_batch, 0.25, 0.75, 0.25, 0.04, prob=1.0, whole_batch=True + ) assert np_output.shape[0] == 8 verify_result(np_output, arr_batch) diff --git a/python/cucim/src/cucim/core/operations/expose/tests/test_expose.py b/python/cucim/src/cucim/core/operations/expose/tests/test_expose.py index ae7dc298d..4baf0e6ef 100644 --- a/python/cucim/src/cucim/core/operations/expose/tests/test_expose.py +++ b/python/cucim/src/cucim/core/operations/expose/tests/test_expose.py @@ -1,11 +1,15 @@ -from cucim.core.operations.expose.transform import (color_jitter, image_flip, - image_rotate_90, - normalize_data, - rand_color_jitter, - rand_image_flip, - rand_image_rotate_90, - rand_zoom, - scale_intensity_range, zoom) +from cucim.core.operations.expose.transform import ( + color_jitter, + image_flip, + image_rotate_90, + normalize_data, + rand_color_jitter, + rand_image_flip, + rand_image_rotate_90, + rand_zoom, + scale_intensity_range, + zoom, +) def test_exposed_transforms(): diff --git a/python/cucim/src/cucim/core/operations/expose/transform.py b/python/cucim/src/cucim/core/operations/expose/transform.py index 3fbc2a1f3..af25f2332 100644 --- a/python/cucim/src/cucim/core/operations/expose/transform.py +++ b/python/cucim/src/cucim/core/operations/expose/transform.py @@ -13,8 +13,15 @@ # limitations under the License. from cucim.core.operations.color import color_jitter, rand_color_jitter # noqa -from cucim.core.operations.intensity import (normalize_data, rand_zoom, # noqa - scale_intensity_range, zoom) -from cucim.core.operations.spatial import (image_flip, image_rotate_90, # noqa - rand_image_flip, - rand_image_rotate_90) +from cucim.core.operations.intensity import ( # noqa + normalize_data, + rand_zoom, + scale_intensity_range, + zoom, +) +from cucim.core.operations.spatial import ( # noqa + image_flip, + image_rotate_90, + rand_image_flip, + rand_image_rotate_90, +) diff --git a/python/cucim/src/cucim/core/operations/intensity/__init__.py b/python/cucim/src/cucim/core/operations/intensity/__init__.py index 000c987bc..404c5168a 100644 --- a/python/cucim/src/cucim/core/operations/intensity/__init__.py +++ b/python/cucim/src/cucim/core/operations/intensity/__init__.py @@ -2,9 +2,4 @@ from .scaling import scale_intensity_range from .zoom import rand_zoom, zoom -__all__ = [ - "normalize_data", - "scale_intensity_range", - "zoom", - "rand_zoom" -] +__all__ = ["normalize_data", "scale_intensity_range", "zoom", "rand_zoom"] diff --git a/python/cucim/src/cucim/core/operations/intensity/kernel/cuda_kernel_source.py b/python/cucim/src/cucim/core/operations/intensity/kernel/cuda_kernel_source.py index a5e1e80aa..6686555ac 100644 --- a/python/cucim/src/cucim/core/operations/intensity/kernel/cuda_kernel_source.py +++ b/python/cucim/src/cucim/core/operations/intensity/kernel/cuda_kernel_source.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -cuda_kernel_code = r''' +cuda_kernel_code = r""" extern "C" { __global__ void normalize_data_by_range(float *in, float *out, \ float norm_factor, \ @@ -260,4 +260,4 @@ } } } -}''' +}""" diff --git a/python/cucim/src/cucim/core/operations/intensity/normalize.py b/python/cucim/src/cucim/core/operations/intensity/normalize.py index fc084ec06..6e283a776 100644 --- a/python/cucim/src/cucim/core/operations/intensity/normalize.py +++ b/python/cucim/src/cucim/core/operations/intensity/normalize.py @@ -27,7 +27,7 @@ def normalize_data( norm_constant: float, min_value: float, max_value: float, - type: str = 'range' + type: str = "range", ) -> Any: """ Apply intensity normalization to the input array. @@ -69,19 +69,23 @@ def normalize_data( 10, 0 , 255) """ if max_value - min_value == 0.0: - raise ValueError("Minimum and Maximum intensity \ - same in input data") - - if type not in ['range', 'atan']: - raise ValueError("Incorrect normalization type. \ + raise ValueError( + "Minimum and Maximum intensity \ + same in input data" + ) + + if type not in ["range", "atan"]: + raise ValueError( + "Incorrect normalization type. \ Supported types are: \ range based: 1,\ - atangent based: 2") + atangent based: 2" + ) to_numpy = False if isinstance(img, np.ndarray): to_numpy = True - cupy_img = cupy.asarray(img, dtype=cupy.float32, order='C') + cupy_img = cupy.asarray(img, dtype=cupy.float32, order="C") elif not isinstance(img, cupy.ndarray): raise TypeError("img must be a cupy.ndarray or numpy.ndarray") else: @@ -98,7 +102,7 @@ def normalize_data( normalize = CUDA_KERNELS.get_function("normalize_data_by_range") - if type == 'atan': + if type == "atan": normalize = CUDA_KERNELS.get_function("normalize_data_by_atan") value_range = max_value - min_value @@ -110,10 +114,17 @@ def normalize_data( result = cupy.empty(img.shape, dtype=cupy_img.dtype) - normalize((gridx, 1, 1), (blockx, 1, 1), - (cupy_img, result, np.float32(norm_factor), - np.float32(min_value), - np.int32(total_size))) + normalize( + (gridx, 1, 1), + (blockx, 1, 1), + ( + cupy_img, + result, + np.float32(norm_factor), + np.float32(min_value), + np.int32(total_size), + ), + ) if img.dtype != cupy.float32: result = result.astype(img.dtype) diff --git a/python/cucim/src/cucim/core/operations/intensity/scaling.py b/python/cucim/src/cucim/core/operations/intensity/scaling.py index 0728acbdc..c875cdad2 100755 --- a/python/cucim/src/cucim/core/operations/intensity/scaling.py +++ b/python/cucim/src/cucim/core/operations/intensity/scaling.py @@ -28,7 +28,7 @@ def scale_intensity_range( b_min: float, a_max: float, a_min: float, - clip: bool = False + clip: bool = False, ) -> Any: """ Apply intensity scaling to the input array. @@ -76,7 +76,7 @@ def scale_intensity_range( to_numpy = False if isinstance(img, np.ndarray): to_numpy = True - cupy_img = cupy.asarray(img, dtype=cupy.float32, order='C') + cupy_img = cupy.asarray(img, dtype=cupy.float32, order="C") elif not isinstance(img, cupy.ndarray): raise TypeError("img must be a cupy.ndarray or numpy.ndarray") else: @@ -96,8 +96,8 @@ def scale_intensity_range( x = (b_max - b_min) / (a_max - a_min) y = a_min * x - b_min if clip is False: - b_max = float('inf') - b_min = float('-inf') + b_max = float("inf") + b_min = float("-inf") sh = img.shape total_size = np.prod(sh) @@ -106,10 +106,19 @@ def scale_intensity_range( result = cupy.empty(img.shape, dtype=cupy_img.dtype) - scale((gridx, 1, 1), (blockx, 1, 1), - (cupy_img, result, np.float32(x), np.float32(y), - np.float32(b_min), np.float32(b_max), - np.int32(total_size))) + scale( + (gridx, 1, 1), + (blockx, 1, 1), + ( + cupy_img, + result, + np.float32(x), + np.float32(y), + np.float32(b_min), + np.float32(b_max), + np.int32(total_size), + ), + ) if img.dtype != cupy.float32: result = result.astype(img.dtype) diff --git a/python/cucim/src/cucim/core/operations/intensity/tests/test_normalize.py b/python/cucim/src/cucim/core/operations/intensity/tests/test_normalize.py index 16b7c5f99..fc8db4b12 100644 --- a/python/cucim/src/cucim/core/operations/intensity/tests/test_normalize.py +++ b/python/cucim/src/cucim/core/operations/intensity/tests/test_normalize.py @@ -18,8 +18,7 @@ def get_input_arr(): def get_norm_data(): dirname = os.path.dirname(__file__) - img1 = Image.open(os.path.join(os.path.abspath(dirname), - "normalized.png")) + img1 = Image.open(os.path.join(os.path.abspath(dirname), "normalized.png")) arr_o = np.asarray(img1) arr_o = np.transpose(arr_o) return arr_o @@ -27,8 +26,9 @@ def get_norm_data(): def get_norm_atan_data(): dirname = os.path.dirname(__file__) - img1 = Image.open(os.path.join(os.path.abspath(dirname), - "normalized_atan.png")) + img1 = Image.open( + os.path.join(os.path.abspath(dirname), "normalized_atan.png") + ) arr_o = np.asarray(img1) arr_o = np.transpose(arr_o) return arr_o @@ -39,9 +39,9 @@ def test_norm_param(): with pytest.raises(ValueError): its.normalize_data(arr, 10.0, 255, 255) with pytest.raises(ValueError): - its.normalize_data(arr, 10.0, 0, 255, 'invalid') + its.normalize_data(arr, 10.0, 0, 255, "invalid") with pytest.raises(TypeError): - img = Image.fromarray(arr.T, 'RGB') + img = Image.fromarray(arr.T, "RGB") its.normalize_data(img, 10.0, 0, 255) @@ -52,7 +52,7 @@ def test_norm_numpy_input(): assert np.allclose(output, norm_arr) norm_atan_arr = get_norm_atan_data() - output = its.normalize_data(arr, 10000, 0, 255, 'atan') + output = its.normalize_data(arr, 10000, 0, 255, "atan") assert np.allclose(output, norm_atan_arr) diff --git a/python/cucim/src/cucim/core/operations/intensity/tests/test_rand_zoom.py b/python/cucim/src/cucim/core/operations/intensity/tests/test_rand_zoom.py index d6ebc2467..e49695390 100644 --- a/python/cucim/src/cucim/core/operations/intensity/tests/test_rand_zoom.py +++ b/python/cucim/src/cucim/core/operations/intensity/tests/test_rand_zoom.py @@ -20,8 +20,9 @@ def get_zoomed_data(zoomout=False): if not zoomout: img1 = Image.open(os.path.join(os.path.abspath(dirname), "zoomed.png")) else: - img1 = Image.open(os.path.join(os.path.abspath(dirname), - "zoomout_padded.png")) + img1 = Image.open( + os.path.join(os.path.abspath(dirname), "zoomout_padded.png") + ) arr_o = np.asarray(img1) arr_o = np.transpose(arr_o) return arr_o diff --git a/python/cucim/src/cucim/core/operations/intensity/tests/test_scaling.py b/python/cucim/src/cucim/core/operations/intensity/tests/test_scaling.py index 97b4a0da6..e59618c8c 100644 --- a/python/cucim/src/cucim/core/operations/intensity/tests/test_scaling.py +++ b/python/cucim/src/cucim/core/operations/intensity/tests/test_scaling.py @@ -29,7 +29,7 @@ def test_scale_param(): with pytest.raises(ValueError): its.scale_intensity_range(arr, 0.0, 255.0, 1.0, 1.0, False) with pytest.raises(TypeError): - img = Image.fromarray(arr.T, 'RGB') + img = Image.fromarray(arr.T, "RGB") its.scale_intensity_range(img, 0.0, 255.0, -1.0, 1.0, False) @@ -44,8 +44,9 @@ def test_scale_cupy_input(): arr = get_input_arr() scaled_arr = get_scaled_data() cupy_arr = cupy.asarray(arr) - cupy_output = its.scale_intensity_range(cupy_arr, - 0.0, 255.0, -1.0, 1.0, False) + cupy_output = its.scale_intensity_range( + cupy_arr, 0.0, 255.0, -1.0, 1.0, False + ) np_output = cupy.asnumpy(cupy_output) assert np.allclose(np_output, scaled_arr) diff --git a/python/cucim/src/cucim/core/operations/intensity/tests/test_zoom.py b/python/cucim/src/cucim/core/operations/intensity/tests/test_zoom.py index a0507faa1..db8adec1f 100644 --- a/python/cucim/src/cucim/core/operations/intensity/tests/test_zoom.py +++ b/python/cucim/src/cucim/core/operations/intensity/tests/test_zoom.py @@ -21,8 +21,9 @@ def get_zoomed_data(zoomout=False): if not zoomout: img1 = Image.open(os.path.join(os.path.abspath(dirname), "zoomed.png")) else: - img1 = Image.open(os.path.join(os.path.abspath(dirname), - "zoomout_padded.png")) + img1 = Image.open( + os.path.join(os.path.abspath(dirname), "zoomout_padded.png") + ) arr_o = np.asarray(img1) arr_o = np.transpose(arr_o) return arr_o @@ -34,7 +35,7 @@ def test_zoom_param(): arr1 = arr.flatten() its.zoom(arr1, [1.1, 1.1]) with pytest.raises(TypeError): - img = Image.fromarray(arr.T, 'RGB') + img = Image.fromarray(arr.T, "RGB") its.zoom(img, [1.1, 1.1]) diff --git a/python/cucim/src/cucim/core/operations/intensity/zoom.py b/python/cucim/src/cucim/core/operations/intensity/zoom.py index 866a06442..d01c866f1 100644 --- a/python/cucim/src/cucim/core/operations/intensity/zoom.py +++ b/python/cucim/src/cucim/core/operations/intensity/zoom.py @@ -23,10 +23,7 @@ CUDA_KERNELS = cupy.RawModule(code=cuda_kernel_code) -def zoom( - img: Any, - zoom_factor: Sequence[float] -): +def zoom(img: Any, zoom_factor: Sequence[float]): """Zooms an ND image Parameters @@ -83,8 +80,12 @@ def zoom( C, H, W = img.shape N = 1 - output_size_cu = [N, C, int(math.floor(H * zoom_factor[0])), - int(math.floor(W * zoom_factor[1]))] + output_size_cu = [ + N, + C, + int(math.floor(H * zoom_factor[0])), + int(math.floor(W * zoom_factor[1])), + ] if output_size_cu[2] == H and output_size_cu[3] == W: return img @@ -95,34 +96,43 @@ def get_block_size(output_size_cu, H, W): # compare for 48KB for standard CC optimal occupancy # array is H, W but kernel is x--> W, y-->H for param in cu_block_options: - h_stretch = [math.floor((0 * H) / output_size_cu[2]), - math.ceil((param[1] * H) / output_size_cu[2])] - w_stretch = [math.floor((0 * W) / output_size_cu[3]), - math.ceil((param[0] * W) / output_size_cu[3])] + h_stretch = [ + math.floor((0 * H) / output_size_cu[2]), + math.ceil((param[1] * H) / output_size_cu[2]), + ] + w_stretch = [ + math.floor((0 * W) / output_size_cu[3]), + math.ceil((param[0] * W) / output_size_cu[3]), + ] smem_size = (h_stretch[1] + 1) * (w_stretch[1] + 1) * 4 if smem_size < max_smem: return param, smem_size - raise Exception("Random Zoom couldnt find a \ - shared memory configuration") + raise Exception( + "Random Zoom couldn't find a \ + shared memory configuration" + ) # input pitch pitch = H * W # get block size block_config, smem_size = get_block_size(output_size_cu, H, W) - grid = (int((output_size_cu[3] - 1) / block_config[0] + 1), - int((output_size_cu[2] - 1) / block_config[1] + 1), C * N) + grid = ( + int((output_size_cu[3] - 1) / block_config[0] + 1), + int((output_size_cu[2] - 1) / block_config[1] + 1), + C * N, + ) is_zoom_out = output_size_cu[2] < H and output_size_cu[3] < W is_zoom_in = output_size_cu[2] > H and output_size_cu[3] > W pad_dims = [[0, 0]] * 2 # zoom out slice_dims = [[0, 0]] * 2 # zoom in - for idx, (orig, zoom) in enumerate(zip((H, W), - (output_size_cu[2], - output_size_cu[3]))): + for idx, (orig, zoom) in enumerate( + zip((H, W), (output_size_cu[2], output_size_cu[3])) + ): diff = orig - zoom half = abs(diff) // 2 if diff > 0: @@ -135,42 +145,73 @@ def get_block_size(output_size_cu, H, W): if is_zoom_in: # slice kernel = CUDA_KERNELS.get_function("zoom_in_kernel") - kernel(grid, block_config, - args=(cupy_img, result, np.int32(H), np.int32(W), - np.int32(output_size_cu[2]), - np.int32(output_size_cu[3]), - np.int32(pitch), np.int32(slice_dims[0][0]), - np.int32(slice_dims[0][1]), - np.int32(slice_dims[1][0]), - np.int32(slice_dims[1][1])), - shared_mem=smem_size) + kernel( + grid, + block_config, + args=( + cupy_img, + result, + np.int32(H), + np.int32(W), + np.int32(output_size_cu[2]), + np.int32(output_size_cu[3]), + np.int32(pitch), + np.int32(slice_dims[0][0]), + np.int32(slice_dims[0][1]), + np.int32(slice_dims[1][0]), + np.int32(slice_dims[1][1]), + ), + shared_mem=smem_size, + ) elif is_zoom_out: # pad kernel = CUDA_KERNELS.get_function("zoom_out_kernel") - kernel(grid, block_config, - args=(cupy_img, result, np.int32(H), np.int32(W), - np.int32(output_size_cu[2]), - np.int32(output_size_cu[3]), - np.int32(pitch), np.int32(pad_dims[0][0]), - np.int32(pad_dims[0][1]), - np.int32(pad_dims[1][0]), - np.int32(pad_dims[1][1])), - shared_mem=smem_size) + kernel( + grid, + block_config, + args=( + cupy_img, + result, + np.int32(H), + np.int32(W), + np.int32(output_size_cu[2]), + np.int32(output_size_cu[3]), + np.int32(pitch), + np.int32(pad_dims[0][0]), + np.int32(pad_dims[0][1]), + np.int32(pad_dims[1][0]), + np.int32(pad_dims[1][1]), + ), + shared_mem=smem_size, + ) # padding kernel kernel = CUDA_KERNELS.get_function("zoomout_edge_pad") - grid = (int((W - 1) / block_config[0] + 1), - int((H - 1) / block_config[1] + 1), - C * N) - kernel(grid, block_config, - args=(result, np.int32(H), np.int32(W), np.int32(pitch), - np.int32(pad_dims[0][0]), np.int32(pad_dims[1][0]), - np.int32(pad_dims[0][0] + output_size_cu[2]), - np.int32(pad_dims[1][0] + output_size_cu[3]))) + grid = ( + int((W - 1) / block_config[0] + 1), + int((H - 1) / block_config[1] + 1), + C * N, + ) + kernel( + grid, + block_config, + args=( + result, + np.int32(H), + np.int32(W), + np.int32(pitch), + np.int32(pad_dims[0][0]), + np.int32(pad_dims[1][0]), + np.int32(pad_dims[0][0] + output_size_cu[2]), + np.int32(pad_dims[1][0] + output_size_cu[3]), + ), + ) else: - raise Exception("Can only handle simultaneous \ + raise Exception( + "Can only handle simultaneous \ expansion(or shrinkage) in both H,W dimension, \ - check zoom factors") + check zoom factors" + ) if img.dtype != np.float32: result = result.astype(img.dtype) @@ -187,8 +228,9 @@ def get_zoom_factor( ): R = np.random.RandomState() try: - zoom_factor = [R.uniform(low, high) - for low, high in zip(min_zoom, max_zoom)] + zoom_factor = [ + R.uniform(low, high) for low, high in zip(min_zoom, max_zoom) + ] except Exception: zoom_factor = [R.uniform(min_zoom, max_zoom)] @@ -203,7 +245,7 @@ def rand_zoom( min_zoom: Union[Sequence[float], float] = 0.9, max_zoom: Union[Sequence[float], float] = 1.1, prob: float = 0.1, - whole_batch: bool = False + whole_batch: bool = False, ): """ Randomly Calls zoom with random zoom factor diff --git a/python/cucim/src/cucim/core/operations/morphology/_distance_transform.py b/python/cucim/src/cucim/core/operations/morphology/_distance_transform.py index 7991fb9f8..a0ccee29a 100644 --- a/python/cucim/src/cucim/core/operations/morphology/_distance_transform.py +++ b/python/cucim/src/cucim/core/operations/morphology/_distance_transform.py @@ -7,9 +7,17 @@ # support chamfer/chessboard and taxicab/manhattan distances too? -def distance_transform_edt(image, sampling=None, return_distances=True, - return_indices=False, distances=None, indices=None, - *, block_params=None, float64_distances=False): +def distance_transform_edt( + image, + sampling=None, + return_distances=True, + return_indices=False, + distances=None, + indices=None, + *, + block_params=None, + float64_distances=False, +): r"""Exact Euclidean distance transform. This function calculates the distance transform of the `input`, by @@ -158,7 +166,8 @@ def distance_transform_edt(image, sampling=None, return_distances=True, pba_func = _pba_2d else: raise NotImplementedError( - "Only 2D and 3D distance transforms are supported.") + "Only 2D and 3D distance transforms are supported." + ) vals = pba_func( image, diff --git a/python/cucim/src/cucim/core/operations/morphology/_pba_2d.py b/python/cucim/src/cucim/core/operations/morphology/_pba_2d.py index 85aacd06e..4d1f9965d 100644 --- a/python/cucim/src/cucim/core/operations/morphology/_pba_2d.py +++ b/python/cucim/src/cucim/core/operations/morphology/_pba_2d.py @@ -12,7 +12,6 @@ # math.lcm was introduced in Python 3.9 from math import lcm except ImportError: - """Fallback implementation of least common multiple (lcm) TODO: remove once minimum Python requirement is >= 3.9 @@ -47,7 +46,7 @@ def lcm(*args): #define pixel_int2_t {pixel_int2_t} // typically short2 (int2 for images with > 32k pixels per side) #define make_pixel(x, y) {make_pixel_func}(x, y) // typically make_short2 (make_int2 images with > 32k pixels per side -""" # noqa +""" # noqa: E501 def _init_marker(int_dtype): @@ -74,7 +73,7 @@ def get_pba2d_src(block_size_2d=64, marker=-32768, pixel_int2_t="short2"): block_size_2d=block_size_2d, marker=marker, pixel_int2_t=pixel_int2_t, - make_pixel_func=make_pixel_func + make_pixel_func=make_pixel_func, ) kernel_directory = os.path.join(os.path.dirname(__file__), "cuda") with open(os.path.join(kernel_directory, "pba_kernels_2d.h"), "rt") as f: @@ -126,13 +125,12 @@ def _get_pack_kernel(int_type, marker=-32768): def _pack_int2(arr, marker=-32768, int_dtype=cupy.int16): if arr.ndim != 2: - raise ValueError("only 2d arr suppported") + raise ValueError("only 2d arr supported") int2_dtype = cupy.dtype({"names": ["x", "y"], "formats": [int_dtype] * 2}) out = cupy.zeros(arr.shape + (2,), dtype=int_dtype) assert out.size == 2 * arr.size pack_kernel = _get_pack_kernel( - int_type="short" if int_dtype == cupy.int16 else "int", - marker=marker + int_type="short" if int_dtype == cupy.int16 else "int", marker=marker ) pack_kernel(arr, out, size=arr.size) out = cupy.squeeze(out.view(int2_dtype)) @@ -151,9 +149,7 @@ def _determine_padding(shape, padded_size, block_size): # shape is not isotropic orig_sy, orig_sx = shape if orig_sx != padded_size or orig_sy != padded_size: - padding_width = ( - (0, padded_size - orig_sy), (0, padded_size - orig_sx) - ) + padding_width = ((0, padded_size - orig_sy), (0, padded_size - orig_sx)) else: padding_width = None return padding_width @@ -212,7 +208,7 @@ def _get_aniso_distance_kernel_code(int_type, raw_out_var=True): ndim=2, int_type=int_type, var_name="dist", raw_var=raw_out_var ) code += _generate_indices_ops(ndim=2, int_type=int_type) - code += f""" + code += """ F tmp; F sq_dist; tmp = static_cast(y[i] - ind_0) * sampling[0]; @@ -236,13 +232,15 @@ def _get_aniso_distance_kernel(int_type): ) -def _distance_tranform_arg_check(distances_out, indices_out, - return_distances, return_indices): +def _distance_tranform_arg_check( + distances_out, indices_out, return_distances, return_indices +): """Raise a RuntimeError if the arguments are invalid""" error_msgs = [] if (not return_distances) and (not return_indices): error_msgs.append( - "at least one of return_distances/return_indices must be True") + "at least one of return_distances/return_indices must be True" + ) if distances_out and not return_distances: error_msgs.append( "return_distances must be True if distances is supplied" @@ -257,27 +255,30 @@ def _check_distances(distances, shape, dtype): if distances.shape != shape: raise RuntimeError("distances array has wrong shape") if distances.dtype != dtype: - raise RuntimeError( - f"distances array must have dtype: {dtype}") + raise RuntimeError(f"distances array must have dtype: {dtype}") def _check_indices(indices, shape, itemsize): if indices.shape != shape: raise RuntimeError("indices array has wrong shape") - if indices.dtype.kind not in 'iu': - raise RuntimeError( - f"indices array must have an integer dtype" - ) + if indices.dtype.kind not in "iu": + raise RuntimeError("indices array must have an integer dtype") elif indices.dtype.itemsize < itemsize: - raise RuntimeError( - f"indices dtype must have itemsize > {itemsize}" - ) - - -def _pba_2d(arr, sampling=None, return_distances=True, return_indices=False, - block_params=None, check_warp_size=False, *, - float64_distances=False, distances=None, indices=None): - + raise RuntimeError(f"indices dtype must have itemsize > {itemsize}") + + +def _pba_2d( + arr, + sampling=None, + return_distances=True, + return_indices=False, + block_params=None, + check_warp_size=False, + *, + float64_distances=False, + distances=None, + indices=None, +): indices_inplace = isinstance(indices, cupy.ndarray) dt_inplace = isinstance(distances, cupy.ndarray) _distance_tranform_arg_check( @@ -301,7 +302,7 @@ def _pba_2d(arr, sampling=None, return_distances=True, return_indices=False, # size must be a multiple of m2 m2 = max(1, min(padded_size // block_size, block_size)) # m2 must also be a power of two - m2 = 2**math.floor(math.log2(m2)) + m2 = 2 ** math.floor(math.log2(m2)) if padded_size % m2 != 0: raise RuntimeError("error in setting default m2") m3 = min(min(m1, m2), 2) @@ -442,14 +443,15 @@ def _pba_2d(arr, sampling=None, return_distances=True, return_indices=False, block, (input_arr, input_arr, size, bandSize2), ) - # Repeatly merging two bands into one + # Repeatedly merging two bands into one noBand = m2 while noBand > 1: grid = (math.ceil(size / block[0]), noBand // 2) kernelMergeBands( grid, block, - (output, input_arr, input_arr, size, size // noBand) + sampling_args, # noqa + (output, input_arr, input_arr, size, size // noBand) + + sampling_args, # noqa ) noBand //= 2 # Replace the forward link with the X coordinate of the seed to remove @@ -484,7 +486,7 @@ def _pba_2d(arr, sampling=None, return_distances=True, return_indices=False, distances = cupy.zeros(y.shape, dtype=dtype_out) # make sure maximum possible distance doesn"t overflow - max_possible_dist = sum((s - 1)**2 for s in y.shape) + max_possible_dist = sum((s - 1) ** 2 for s in y.shape) dist_int_type = "int" if max_possible_dist < 2**31 else "ptrdiff_t" if sampling is None: diff --git a/python/cucim/src/cucim/core/operations/morphology/_pba_3d.py b/python/cucim/src/cucim/core/operations/morphology/_pba_3d.py index f7c572bfe..142bd4a71 100644 --- a/python/cucim/src/cucim/core/operations/morphology/_pba_3d.py +++ b/python/cucim/src/cucim/core/operations/morphology/_pba_3d.py @@ -7,9 +7,15 @@ from cucim.skimage._vendored import pad from cucim.skimage._vendored._ndimage_util import _get_inttype -from ._pba_2d import (_check_distances, _check_indices, - _distance_tranform_arg_check, _generate_indices_ops, - _generate_shape, _get_block_size, lcm) +from ._pba_2d import ( + _check_distances, + _check_indices, + _distance_tranform_arg_check, + _generate_indices_ops, + _generate_shape, + _get_block_size, + lcm, +) pba3d_defines_template = """ @@ -41,7 +47,7 @@ #define GET_Y(value) (((value) >> 10) & 0x3ff) #define GET_Z(value) ((NOTSITE((value))) ? MAX_INT : ((value) & 0x3ff)) -""" # noqa +""" # noqa # 64bit version of ENCODE/DECODE to allow a 20-bit integer per coordinate axis. @@ -64,12 +70,13 @@ #define GET_Y(value) (((value) >> 20) & 0xfffff) #define GET_Z(value) ((NOTSITE((value))) ? MAX_INT : ((value) & 0xfffff)) -""" # noqa +""" # noqa @cupy.memoize(True) -def get_pba3d_src(block_size_3d=32, marker=-2147483648, max_int=2147483647, - size_max=1024): +def get_pba3d_src( + block_size_3d=32, marker=-2147483648, max_int=2147483647, size_max=1024 +): pba3d_code = pba3d_defines_template.format( block_size_3d=block_size_3d, marker=marker, max_int=max_int ) @@ -90,7 +97,7 @@ def _get_encode3d_kernel(size_max, marker=-2147483648): if size_max > 1024: int_type = "ptrdiff_t" # int64_t else: - int_type = "int" # int32_t + int_type = "int" # int32_t # value must match TOID macro in the C++ code! if size_max > 1024: @@ -123,7 +130,7 @@ def _get_encode3d_kernel(size_max, marker=-2147483648): def encode3d(arr, marker=-2147483648, bit_depth=32, size_max=1024): if arr.ndim != 3: - raise ValueError("only 3d arr suppported") + raise ValueError("only 3d arr supported") if bit_depth not in [32, 64]: raise ValueError("only bit_depth of 32 or 64 is supported") if size_max > 1024: @@ -207,7 +214,9 @@ def _determine_padding(shape, block_size, m1, m2, m3, blockx, blocky): if aniso or round_up: smax = max(sz, sy, sx) padding_width = ( - (0, smax - orig_sz), (0, smax - orig_sy), (0, smax - orig_sx) + (0, smax - orig_sz), + (0, smax - orig_sy), + (0, smax - orig_sx), ) else: padding_width = None @@ -257,7 +266,7 @@ def _generate_aniso_distance_computation(): """ Compute euclidean distance from current coordinate (ind_0, ind_1, ind_2) to the coordinates of the nearest point (z, y, x).""" - return f""" + return """ F tmp = static_cast(z - ind_0) * sampling[0]; F sq_dist = tmp * tmp; tmp = static_cast(y - ind_1) * sampling[1]; @@ -281,9 +290,7 @@ def _get_aniso_distance_kernel_code(int_type, raw_out_var=True): def _get_aniso_distance_kernel(int_type): """Returns kernel computing the Euclidean distance from coordinates with axis spacing != 1.""" - operation = _get_aniso_distance_kernel_code( - int_type, raw_out_var=True - ) + operation = _get_aniso_distance_kernel_code(int_type, raw_out_var=True) return cupy.ElementwiseKernel( in_params="I z, I y, I x, raw F sampling", out_params="raw F dist", @@ -328,10 +335,18 @@ def _get_decode_as_distance_kernel(size_max, large_dist=False, sampling=None): ) -def _pba_3d(arr, sampling=None, return_distances=True, return_indices=False, - block_params=None, check_warp_size=False, *, - float64_distances=False, distances=None, indices=None): - +def _pba_3d( + arr, + sampling=None, + return_distances=True, + return_indices=False, + block_params=None, + check_warp_size=False, + *, + float64_distances=False, + distances=None, + indices=None, +): indices_inplace = isinstance(indices, cupy.ndarray) dt_inplace = isinstance(distances, cupy.ndarray) _distance_tranform_arg_check( @@ -396,9 +411,7 @@ def _pba_3d(arr, sampling=None, return_distances=True, return_indices=False, sampling_args = (sampling[2], sampling[1], sampling[0]) kernelFloodZ( - grid, - block, - (pba_images[buffer_idx], pba_images[1 - buffer_idx], size) + grid, block, (pba_images[buffer_idx], pba_images[1 - buffer_idx], size) ) buffer_idx = 1 - buffer_idx @@ -407,7 +420,8 @@ def _pba_3d(arr, sampling=None, return_distances=True, return_indices=False, kernelMaurerAxis( grid, block, - (pba_images[buffer_idx], pba_images[1 - buffer_idx], size) + sampling_args, # noqa + (pba_images[buffer_idx], pba_images[1 - buffer_idx], size) + + sampling_args, # noqa ) block = (block_size, m3, 1) @@ -415,7 +429,8 @@ def _pba_3d(arr, sampling=None, return_distances=True, return_indices=False, kernelColorAxis( grid, block, - (pba_images[1 - buffer_idx], pba_images[buffer_idx], size) + sampling_args, # noqa + (pba_images[1 - buffer_idx], pba_images[buffer_idx], size) + + sampling_args, # noqa ) if sampling is not None: @@ -428,7 +443,8 @@ def _pba_3d(arr, sampling=None, return_distances=True, return_indices=False, kernelMaurerAxis( grid, block, - (pba_images[buffer_idx], pba_images[1 - buffer_idx], size) + sampling_args, # noqa + (pba_images[buffer_idx], pba_images[1 - buffer_idx], size) + + sampling_args, # noqa ) block = (block_size, m3, 1) @@ -436,7 +452,8 @@ def _pba_3d(arr, sampling=None, return_distances=True, return_indices=False, kernelColorAxis( grid, block, - (pba_images[1 - buffer_idx], pba_images[buffer_idx], size) + sampling_args, # noqa + (pba_images[1 - buffer_idx], pba_images[buffer_idx], size) + + sampling_args, # noqa ) output = pba_images[buffer_idx] @@ -449,15 +466,13 @@ def _pba_3d(arr, sampling=None, return_distances=True, return_indices=False, distances = cupy.zeros(out_shape, dtype=dtype_out) # make sure maximum possible distance doesn't overflow - max_possible_dist = sum((s - 1)**2 for s in out_shape) + max_possible_dist = sum((s - 1) ** 2 for s in out_shape) large_dist = max_possible_dist >= 2**31 if not return_indices: # Compute distances without forming explicit coordinate arrays. kern = _get_decode_as_distance_kernel( - size_max=size_max, - large_dist=large_dist, - sampling=sampling + size_max=size_max, large_dist=large_dist, sampling=sampling ) if sampling is None: kern(output[:orig_sz, :orig_sy, :orig_sx], distances) @@ -467,13 +482,15 @@ def _pba_3d(arr, sampling=None, return_distances=True, return_indices=False, return (distances,) if return_indices: - x, y, z = decode3d(output[:orig_sz, :orig_sy, :orig_sx], - size_max=size_max) + x, y, z = decode3d( + output[:orig_sz, :orig_sy, :orig_sx], size_max=size_max + ) vals = () if return_distances: if sampling is None: kern = _get_distance_kernel( - int_type=_get_inttype(distances), large_dist=large_dist, + int_type=_get_inttype(distances), + large_dist=large_dist, ) kern(z, y, x, distances) else: diff --git a/python/cucim/src/cucim/core/operations/morphology/tests/test_distance_transform.py b/python/cucim/src/cucim/core/operations/morphology/tests/test_distance_transform.py index 0515c9a71..50b6335db 100644 --- a/python/cucim/src/cucim/core/operations/morphology/tests/test_distance_transform.py +++ b/python/cucim/src/cucim/core/operations/morphology/tests/test_distance_transform.py @@ -23,26 +23,25 @@ def assert_percentile_equal(arr1, arr2, pct=95): assert mismatch < pct_mismatch -@pytest.mark.parametrize('return_indices', [False, True]) -@pytest.mark.parametrize('return_distances', [False, True]) +@pytest.mark.parametrize("return_indices", [False, True]) +@pytest.mark.parametrize("return_distances", [False, True]) @pytest.mark.parametrize( - 'shape, sampling', + "shape, sampling", [ ((256, 128), None), ((384, 256), (1.5, 1.5)), ((384, 256), (3, 2)), # integer-valued anisotropic - ((384, 256), (2.25, .85)), + ((384, 256), (2.25, 0.85)), ((14, 32, 50), None), - ((50, 32, 24), (2., 2., 2.)), + ((50, 32, 24), (2.0, 2.0, 2.0)), ((50, 32, 24), (3, 1, 2)), # integer-valued anisotropic ], ) -@pytest.mark.parametrize('density', ['single_point', 5, 50, 95]) -@pytest.mark.parametrize('block_params', [None, (1, 1, 1)]) +@pytest.mark.parametrize("density", ["single_point", 5, 50, 95]) +@pytest.mark.parametrize("block_params", [None, (1, 1, 1)]) def test_distance_transform_edt( shape, sampling, return_distances, return_indices, density, block_params ): - if not (return_indices or return_distances): return @@ -52,8 +51,8 @@ def test_distance_transform_edt( return_indices=return_indices, ) kwargs_cucim = copy(kwargs_scipy) - kwargs_cucim['block_params'] = block_params - if density == 'single_point': + kwargs_cucim["block_params"] = block_params + if density == "single_point": img = cp.ones(shape, dtype=bool) img[tuple(s // 2 for s in shape)] = 0 else: @@ -78,7 +77,7 @@ def test_distance_transform_edt( @pytest.mark.parametrize( - 'shape', + "shape", ( [(s,) * 2 for s in range(512, 512 + 32)] + [(s,) * 2 for s in range(1024, 1024 + 16)] @@ -86,9 +85,8 @@ def test_distance_transform_edt( + [(s,) * 2 for s in range(4100, 4100)] ), ) -@pytest.mark.parametrize('density', [2, 98]) +@pytest.mark.parametrize("density", [2, 98]) def test_distance_transform_edt_additional_shapes(shape, density): - kwargs_scipy = dict(return_distances=True, return_indices=False) kwargs_cucim = copy(kwargs_scipy) img = binary_image(shape, pct_true=density) @@ -98,18 +96,17 @@ def test_distance_transform_edt_additional_shapes(shape, density): @pytest.mark.parametrize( - 'shape', + "shape", [(s,) * 2 for s in range(1024, 1024 + 4)], ) @pytest.mark.parametrize( - 'block_params', - [(1, 1, 1), (5, 4, 2), (3, 8, 4), (7, 16, 1), (11, 32, 3), (1, 1, 16)] + "block_params", + [(1, 1, 1), (5, 4, 2), (3, 8, 4), (7, 16, 1), (11, 32, 3), (1, 1, 16)], ) def test_distance_transform_edt_block_params(shape, block_params): - kwargs_scipy = dict(return_distances=True, return_indices=False) kwargs_cucim = copy(kwargs_scipy) - kwargs_cucim['block_params'] = block_params + kwargs_cucim["block_params"] = block_params img = binary_image(shape, pct_true=4) distances = distance_transform_edt(img, **kwargs_cucim) expected = ndi_cpu.distance_transform_edt(cp.asnumpy(img), **kwargs_scipy) @@ -117,13 +114,18 @@ def test_distance_transform_edt_block_params(shape, block_params): @pytest.mark.parametrize( - 'block_params', [ - (0, 1, 1), (1, 0, 1), (1, 1, 0), # no elements can be < 1 - (1, 3, 1), (1, 5, 1), (1, 7, 1), # 2nd element must be a power of 2 + "block_params", + [ + (0, 1, 1), + (1, 0, 1), + (1, 1, 0), # no elements can be < 1 + (1, 3, 1), + (1, 5, 1), + (1, 7, 1), # 2nd element must be a power of 2 (128, 1, 1), # m1 too large for the array size (1, 128, 1), # m2 too large for the array size (1, 1, 128), # m3 too large for the array size - ] + ], ) def test_distance_transform_edt_block_params_invalid(block_params): img = binary_image((512, 512), pct_true=4) @@ -131,11 +133,11 @@ def test_distance_transform_edt_block_params_invalid(block_params): distance_transform_edt(img, block_params=block_params) -@pytest.mark.parametrize('value', [0, 1, 3]) -@pytest.mark.parametrize('ndim', [2, 3]) +@pytest.mark.parametrize("value", [0, 1, 3]) +@pytest.mark.parametrize("ndim", [2, 3]) def test_distance_transform_edt_uniform_valued(value, ndim): """ensure default block_params is robust to anisotropic shape.""" - img = cp.full((48, ) * ndim, value, dtype=cp.uint8) + img = cp.full((48,) * ndim, value, dtype=cp.uint8) # ensure there is at least 1 pixel at background intensity img[(slice(24, 25),) * ndim] = 0 out = distance_transform_edt(img) @@ -143,8 +145,8 @@ def test_distance_transform_edt_uniform_valued(value, ndim): cp.testing.assert_allclose(out, expected) -@pytest.mark.parametrize('sx', list(range(16))) -@pytest.mark.parametrize('sy', list(range(16))) +@pytest.mark.parametrize("sx", list(range(16))) +@pytest.mark.parametrize("sy", list(range(16))) def test_distance_transform_edt_2d_aniso(sx, sy): """ensure default block_params is robust to anisotropic shape.""" shape = (128 + sy, 128 + sx) @@ -154,14 +156,14 @@ def test_distance_transform_edt_2d_aniso(sx, sy): cp.testing.assert_allclose(out, expected) -@pytest.mark.parametrize('ndim', [2, 3]) -@pytest.mark.parametrize('sampling', [None, 'iso', 'aniso']) +@pytest.mark.parametrize("ndim", [2, 3]) +@pytest.mark.parametrize("sampling", [None, "iso", "aniso"]) def test_distance_transform_inplace_distance(ndim, sampling): - img = binary_image((32, ) * ndim, pct_true=80) + img = binary_image((32,) * ndim, pct_true=80) distances = cp.empty(img.shape, dtype=cp.float32) - if sampling == 'iso': + if sampling == "iso": sampling = (1.5,) * ndim - elif sampling == 'aniso': + elif sampling == "aniso": sampling = tuple(range(1, ndim + 1)) distance_transform_edt(img, sampling=sampling, distances=distances) expected = ndi_cpu.distance_transform_edt( @@ -170,9 +172,9 @@ def test_distance_transform_inplace_distance(ndim, sampling): cp.testing.assert_allclose(distances, expected) -@pytest.mark.parametrize('ndim', [2, 3]) +@pytest.mark.parametrize("ndim", [2, 3]) def test_distance_transform_inplace_distance_errors(ndim): - img = binary_image((32, ) * ndim, pct_true=80) + img = binary_image((32,) * ndim, pct_true=80) # for binary input, distances output is float32. Other dtypes raise with pytest.raises(RuntimeError): @@ -190,28 +192,34 @@ def test_distance_transform_inplace_distance_errors(ndim): # can't provide indices array when return_indices is False with pytest.raises(RuntimeError): distances = cp.empty(img.shape, dtype=cp.float32) - distance_transform_edt(img, distances=distances, - return_distances=False, return_indices=True) + distance_transform_edt( + img, + distances=distances, + return_distances=False, + return_indices=True, + ) -@pytest.mark.parametrize('ndim', [2, 3]) -@pytest.mark.parametrize('sampling', [None, 'iso', 'aniso']) -@pytest.mark.parametrize('dtype', [cp.int16, cp.uint16, cp.uint32, cp.int32, - cp.uint64, cp.int64]) -@pytest.mark.parametrize('return_distances', [False, True]) +@pytest.mark.parametrize("ndim", [2, 3]) +@pytest.mark.parametrize("sampling", [None, "iso", "aniso"]) +@pytest.mark.parametrize( + "dtype", [cp.int16, cp.uint16, cp.uint32, cp.int32, cp.uint64, cp.int64] +) +@pytest.mark.parametrize("return_distances", [False, True]) def test_distance_transform_inplace_indices( ndim, sampling, dtype, return_distances ): - img = binary_image((32, ) * ndim, pct_true=80) + img = binary_image((32,) * ndim, pct_true=80) if ndim == 3 and dtype in [cp.int16, cp.uint16]: pytest.skip(reason="3d case requires at least 32-bit integer output") - if sampling == 'iso': + if sampling == "iso": sampling = (1.5,) * ndim - elif sampling == 'aniso': + elif sampling == "aniso": sampling = tuple(range(1, ndim + 1)) common_kwargs = dict( - sampling=sampling, return_distances=return_distances, - return_indices=True + sampling=sampling, + return_distances=return_distances, + return_indices=True, ) # verify that in-place and out-of-place results agree indices = cp.empty((ndim,) + img.shape, dtype=dtype) @@ -223,9 +231,9 @@ def test_distance_transform_inplace_indices( cp.testing.assert_array_equal(indices, expected) -@pytest.mark.parametrize('ndim', [2, 3]) +@pytest.mark.parametrize("ndim", [2, 3]) def test_distance_transform_inplace_indices_errors(ndim): - img = binary_image((32, ) * ndim, pct_true=80) + img = binary_image((32,) * ndim, pct_true=80) common_kwargs = dict(return_distances=False, return_indices=True) # int8 has itemsize too small @@ -249,9 +257,9 @@ def test_distance_transform_inplace_indices_errors(ndim): distance_transform_edt(img, indices=indices, return_indices=False) -@pytest.mark.parametrize('sx', list(range(4))) -@pytest.mark.parametrize('sy', list(range(4))) -@pytest.mark.parametrize('sz', list(range(4))) +@pytest.mark.parametrize("sx", list(range(4))) +@pytest.mark.parametrize("sy", list(range(4))) +@pytest.mark.parametrize("sz", list(range(4))) def test_distance_transform_edt_3d_aniso(sx, sy, sz): """ensure default block_params is robust to anisotropic shape.""" shape = (16 + sz, 32 + sy, 48 + sx) @@ -262,7 +270,7 @@ def test_distance_transform_edt_3d_aniso(sx, sy, sz): cp.testing.assert_allclose(out, expected) -@pytest.mark.parametrize('ndim', [1, 4, 5]) +@pytest.mark.parametrize("ndim", [1, 4, 5]) def test_distance_transform_edt_unsupported_ndim(ndim): with pytest.raises(NotImplementedError): distance_transform_edt(cp.zeros((8,) * ndim)) diff --git a/python/cucim/src/cucim/core/operations/spatial/__init__.py b/python/cucim/src/cucim/core/operations/spatial/__init__.py index 19fbc08e2..b5ad7edb2 100644 --- a/python/cucim/src/cucim/core/operations/spatial/__init__.py +++ b/python/cucim/src/cucim/core/operations/spatial/__init__.py @@ -1,9 +1,13 @@ -from .rotate_and_flip import (image_flip, image_rotate_90, rand_image_flip, - rand_image_rotate_90) +from .rotate_and_flip import ( + image_flip, + image_rotate_90, + rand_image_flip, + rand_image_rotate_90, +) __all__ = [ "image_rotate_90", "image_flip", "rand_image_flip", - "rand_image_rotate_90" + "rand_image_rotate_90", ] diff --git a/python/cucim/src/cucim/core/operations/spatial/rotate_and_flip.py b/python/cucim/src/cucim/core/operations/spatial/rotate_and_flip.py index dc99e72af..a22ddfb31 100755 --- a/python/cucim/src/cucim/core/operations/spatial/rotate_and_flip.py +++ b/python/cucim/src/cucim/core/operations/spatial/rotate_and_flip.py @@ -18,10 +18,7 @@ import numpy as np -def image_flip( - img: Any, - spatial_axis: tuple() -) -> Any: +def image_flip(img: Any, spatial_axis: tuple()) -> Any: """ Shape preserving order reversal of elements in input array along the given spatial axis @@ -65,13 +62,9 @@ def image_flip( return result -def image_rotate_90( - img: Any, - k: int, - spatial_axis: tuple() -) -> Any: +def image_rotate_90(img: Any, k: int, spatial_axis: tuple()) -> Any: """ - Rotate input array by 90 degress along the given axis + Rotate input array by 90 degrees along the given axis Parameters ---------- @@ -117,7 +110,7 @@ def rand_image_flip( img: Any, spatial_axis: tuple(), prob: float = 0.1, - whole_batch: bool = False + whole_batch: bool = False, ) -> Any: """ Randomly flips the image along axis. @@ -170,7 +163,7 @@ def rand_image_rotate_90( spatial_axis: tuple(), prob: float = 0.1, max_k: int = 3, - whole_batch: bool = False + whole_batch: bool = False, ) -> Any: """ With probability `prob`, input arrays are rotated by 90 degrees diff --git a/python/cucim/src/cucim/core/operations/spatial/tests/test_flip.py b/python/cucim/src/cucim/core/operations/spatial/tests/test_flip.py index 08be7829f..a1c9a28fa 100644 --- a/python/cucim/src/cucim/core/operations/spatial/tests/test_flip.py +++ b/python/cucim/src/cucim/core/operations/spatial/tests/test_flip.py @@ -27,7 +27,7 @@ def get_flipped_data(): def test_flip_param(): arr = get_input_arr() with pytest.raises(TypeError): - img = Image.fromarray(arr.T, 'RGB') + img = Image.fromarray(arr.T, "RGB") spt.image_flip(img, (1, 2)) diff --git a/python/cucim/src/cucim/core/operations/spatial/tests/test_random_flip.py b/python/cucim/src/cucim/core/operations/spatial/tests/test_random_flip.py index 397603f57..d6076f9d3 100644 --- a/python/cucim/src/cucim/core/operations/spatial/tests/test_random_flip.py +++ b/python/cucim/src/cucim/core/operations/spatial/tests/test_random_flip.py @@ -49,10 +49,9 @@ def test_rand_flip_batchinput(): arr = get_input_arr() flip_arr = get_flipped_data() arr_batch = np.stack((arr,) * 8, axis=0) - np_output = spt.rand_image_flip(arr_batch, - prob=1.0, - spatial_axis=(2, 3), - whole_batch=True) + np_output = spt.rand_image_flip( + arr_batch, prob=1.0, spatial_axis=(2, 3), whole_batch=True + ) assert np_output.shape[0] == 8 diff --git a/python/cucim/src/cucim/core/operations/spatial/tests/test_random_rotate90.py b/python/cucim/src/cucim/core/operations/spatial/tests/test_random_rotate90.py index 74d8b13af..9fe1279bf 100644 --- a/python/cucim/src/cucim/core/operations/spatial/tests/test_random_rotate90.py +++ b/python/cucim/src/cucim/core/operations/spatial/tests/test_random_rotate90.py @@ -26,19 +26,17 @@ def get_rotated_data(): def test_rand_rotate90_numpy_input(): arr = get_input_arr() rotate90_arr = get_rotated_data() - output = spt.rand_image_rotate_90(arr, - max_k=1, - prob=1.0, - spatial_axis=[1, 2]) + output = spt.rand_image_rotate_90( + arr, max_k=1, prob=1.0, spatial_axis=[1, 2] + ) assert np.allclose(output, rotate90_arr) def test_rand_rotate90_zero_prob(): arr = get_input_arr() - output = spt.rand_image_rotate_90(arr, - max_k=1, - prob=0.0, - spatial_axis=[1, 2]) + output = spt.rand_image_rotate_90( + arr, max_k=1, prob=0.0, spatial_axis=[1, 2] + ) assert np.allclose(output, arr) @@ -46,10 +44,9 @@ def test_rand_rotate90_cupy_input(): arr = get_input_arr() rotate90_arr = get_rotated_data() cupy_arr = cupy.asarray(arr) - cupy_output = spt.rand_image_rotate_90(cupy_arr, - max_k=1, - prob=1.0, - spatial_axis=[1, 2]) + cupy_output = spt.rand_image_rotate_90( + cupy_arr, max_k=1, prob=1.0, spatial_axis=[1, 2] + ) np_output = cupy.asnumpy(cupy_output) assert np.allclose(np_output, rotate90_arr) @@ -58,11 +55,9 @@ def test_rand_rotate90_batchinput(): arr = get_input_arr() rotate90_arr = get_rotated_data() arr_batch = np.stack((arr,) * 8, axis=0) - np_output = spt.rand_image_rotate_90(arr_batch, - max_k=1, - prob=1.0, - spatial_axis=[2, 3], - whole_batch=True) + np_output = spt.rand_image_rotate_90( + arr_batch, max_k=1, prob=1.0, spatial_axis=[2, 3], whole_batch=True + ) assert np_output.shape[0] == 8 for i in range(np_output.shape[0]): diff --git a/python/cucim/src/cucim/core/operations/spatial/tests/test_rotate90.py b/python/cucim/src/cucim/core/operations/spatial/tests/test_rotate90.py index 333b2c983..9bf2d6dca 100644 --- a/python/cucim/src/cucim/core/operations/spatial/tests/test_rotate90.py +++ b/python/cucim/src/cucim/core/operations/spatial/tests/test_rotate90.py @@ -27,7 +27,7 @@ def get_rotated_data(): def test_rotate90_param(): arr = get_input_arr() with pytest.raises(TypeError): - img = Image.fromarray(arr.T, 'RGB') + img = Image.fromarray(arr.T, "RGB") spt.image_rotate_90(img, 1, [1, 2]) diff --git a/python/cucim/src/cucim/skimage/__init__.py b/python/cucim/src/cucim/skimage/__init__.py index c6cf7d4eb..70ec2c791 100644 --- a/python/cucim/src/cucim/skimage/__init__.py +++ b/python/cucim/src/cucim/skimage/__init__.py @@ -70,7 +70,14 @@ def __dir__(): # Legacy imports into the root namespace; not advertised in __all__ -from .util.dtype import (dtype_limits, img_as_bool, img_as_float, - img_as_float32, img_as_float64, img_as_int, - img_as_ubyte, img_as_uint) +from .util.dtype import ( + dtype_limits, + img_as_bool, + img_as_float, + img_as_float32, + img_as_float64, + img_as_int, + img_as_ubyte, + img_as_uint, +) from .util.lookfor import lookfor diff --git a/python/cucim/src/cucim/skimage/__init__.pyi b/python/cucim/src/cucim/skimage/__init__.pyi index 1c3c07ea6..8b608db5b 100644 --- a/python/cucim/src/cucim/skimage/__init__.pyi +++ b/python/cucim/src/cucim/skimage/__init__.pyi @@ -1,23 +1,35 @@ -import lazy_loader as lazy +import lazy_loader as lazy # noqa: F401 submodules = [ - 'color', - 'data', - 'exposure', - 'feature', - 'filters', - 'measure', - 'metrics', - 'morphology', - 'registration', - 'restoration', - 'segmentation', - 'transform', - 'util', + "color", + "data", + "exposure", + "feature", + "filters", + "measure", + "metrics", + "morphology", + "registration", + "restoration", + "segmentation", + "transform", + "util", ] __all__ = submodules -from . import (color, data, exposure, feature, filters, measure, metrics, - morphology, registration, restoration, segmentation, transform, - util) +from . import ( # noqa: F401, E402 + color, + data, + exposure, + feature, + filters, + measure, + metrics, + morphology, + registration, + restoration, + segmentation, + transform, + util, +) diff --git a/python/cucim/src/cucim/skimage/_shared/_warnings.py b/python/cucim/src/cucim/skimage/_shared/_warnings.py index 4af6bae68..e163456d3 100644 --- a/python/cucim/src/cucim/skimage/_shared/_warnings.py +++ b/python/cucim/src/cucim/skimage/_shared/_warnings.py @@ -5,7 +5,7 @@ import warnings from contextlib import contextmanager -__all__ = ['all_warnings', 'expected_warnings', 'warn'] +__all__ = ["all_warnings", "expected_warnings", "warn"] # A version of `warnings.warn` with a default stacklevel of 2. @@ -57,7 +57,7 @@ def all_warnings(): frame = inspect.currentframe() if frame: for f in inspect.getouterframes(frame): - f[0].f_locals['__warningregistry__'] = {} + f[0].f_locals["__warningregistry__"] = {} del frame for mod_name, mod in list(sys.modules.items()): @@ -108,18 +108,20 @@ def expected_warnings(matching): """ if isinstance(matching, str): - raise ValueError('``matching`` should be a list of strings and not ' - 'a string itself.') + raise ValueError( + "``matching`` should be a list of strings and not " + "a string itself." + ) # Special case for disabling the context manager if matching is None: yield None return - strict_warnings = os.environ.get('SKIMAGE_TEST_STRICT_WARNINGS', '1') - if strict_warnings.lower() == 'true': + strict_warnings = os.environ.get("SKIMAGE_TEST_STRICT_WARNINGS", "1") + if strict_warnings.lower() == "true": strict_warnings = True - elif strict_warnings.lower() == 'false': + elif strict_warnings.lower() == "false": strict_warnings = False else: strict_warnings = bool(int(strict_warnings)) @@ -131,7 +133,7 @@ def expected_warnings(matching): # Allow users to provide None while None in matching: matching.remove(None) - remaining = [m for m in matching if r'\A\Z' not in m.split('|')] + remaining = [m for m in matching if r"\A\Z" not in m.split("|")] for warn in w: found = False for match in matching: @@ -140,7 +142,10 @@ def expected_warnings(matching): if match in remaining: remaining.remove(match) if strict_warnings and not found: - raise ValueError(f'Unexpected warning: {str(warn.message)}') + raise ValueError(f"Unexpected warning: {str(warn.message)}") if strict_warnings and (len(remaining) > 0): - msg = f"No warning raised matching:\n{{'\n'.join(remaining)}}" + newline = "\n" + msg = ( + f"No warning raised matching:{newline}{newline.join(remaining)}" + ) raise ValueError(msg) diff --git a/python/cucim/src/cucim/skimage/_shared/coord.py b/python/cucim/src/cucim/skimage/_shared/coord.py index 125f5106b..f95ddd2aa 100644 --- a/python/cucim/src/cucim/skimage/_shared/coord.py +++ b/python/cucim/src/cucim/skimage/_shared/coord.py @@ -39,12 +39,10 @@ def _ensure_spacing(coord, spacing, p_norm, max_out): if idx not in rejected_peaks_indices: # keep current point and the points at exactly spacing from it candidates.remove(idx) - dist = distance.cdist([coord[idx]], - coord[candidates], - distance.minkowski, - p=p_norm).reshape(-1) - candidates = [c for c, d in zip(candidates, dist) - if d < spacing] + dist = distance.cdist( + [coord[idx]], coord[candidates], distance.minkowski, p=p_norm + ).reshape(-1) + candidates = [c for c, d in zip(candidates, dist) if d < spacing] # candidates.remove(keep) rejected_peaks_indices.update(candidates) @@ -60,8 +58,15 @@ def _ensure_spacing(coord, spacing, p_norm, max_out): return output -def ensure_spacing(coords, spacing=1, p_norm=np.inf, min_split_size=50, - max_out=None, *, max_split_size=2000): +def ensure_spacing( + coords, + spacing=1, + p_norm=np.inf, + min_split_size=50, + max_out=None, + *, + max_split_size=2000, +): """Returns a subset of coord where a minimum spacing is guaranteed. Parameters @@ -99,7 +104,6 @@ def ensure_spacing(coords, spacing=1, p_norm=np.inf, min_split_size=50, output = coords if len(coords): - coords = cp.atleast_2d(coords) coords = cp.asnumpy(coords) if min_split_size is None: @@ -110,14 +114,16 @@ def ensure_spacing(coords, spacing=1, p_norm=np.inf, min_split_size=50, split_size = min_split_size while coord_count - split_idx[-1] > max_split_size: split_size *= 2 - split_idx.append(split_idx[-1] + min(split_size, - max_split_size)) + split_idx.append( + split_idx[-1] + min(split_size, max_split_size) + ) batch_list = np.array_split(coords, split_idx) output = np.zeros((0, coords.shape[1]), dtype=coords.dtype) for batch in batch_list: - output = _ensure_spacing(np.vstack([output, batch]), - spacing, p_norm, max_out) + output = _ensure_spacing( + np.vstack([output, batch]), spacing, p_norm, max_out + ) if max_out is not None and len(output) >= max_out: break diff --git a/python/cucim/src/cucim/skimage/_shared/fft.py b/python/cucim/src/cucim/skimage/_shared/fft.py index 3c9fe7b63..1c503a9c8 100644 --- a/python/cucim/src/cucim/skimage/_shared/fft.py +++ b/python/cucim/src/cucim/skimage/_shared/fft.py @@ -10,4 +10,4 @@ fftmodule = cupyx.scipy.fft -__all__ = ['fftmodule', 'next_fast_len'] +__all__ = ["fftmodule", "next_fast_len"] diff --git a/python/cucim/src/cucim/skimage/_shared/filters.py b/python/cucim/src/cucim/skimage/_shared/filters.py index bc0ea4905..f15c6fca4 100644 --- a/python/cucim/src/cucim/skimage/_shared/filters.py +++ b/python/cucim/src/cucim/skimage/_shared/filters.py @@ -15,6 +15,7 @@ class _PatchClassRepr(type): """Control class representations in rendered signatures.""" + def __repr__(cls): return f"<{cls.__name__}>" @@ -31,9 +32,17 @@ class ChannelAxisNotSet(metaclass=_PatchClassRepr): """ -def gaussian(image, sigma=1, output=None, mode='nearest', cval=0, - preserve_range=False, truncate=4.0, *, - channel_axis=ChannelAxisNotSet): +def gaussian( + image, + sigma=1, + output=None, + mode="nearest", + cval=0, + preserve_range=False, + truncate=4.0, + *, + channel_axis=ChannelAxisNotSet, +): """Multi-dimensional Gaussian filter. Parameters @@ -162,6 +171,7 @@ def gaussian(image, sigma=1, output=None, mode='nearest', cval=0, output = cp.empty_like(image) elif not cp.issubdtype(output.dtype, cp.floating): raise ValueError("Provided output data type is not float") - ndi.gaussian_filter(image, sigma, output=output, mode=mode, cval=cval, - truncate=truncate) + ndi.gaussian_filter( + image, sigma, output=output, mode=mode, cval=cval, truncate=truncate + ) return output diff --git a/python/cucim/src/cucim/skimage/_shared/testing.py b/python/cucim/src/cucim/skimage/_shared/testing.py index 2442db08d..f73bf94bf 100644 --- a/python/cucim/src/cucim/skimage/_shared/testing.py +++ b/python/cucim/src/cucim/skimage/_shared/testing.py @@ -1,9 +1,17 @@ import pytest -from numpy.testing import (TestCase, assert_, assert_allclose, # noqa - assert_almost_equal, assert_array_almost_equal, - assert_array_almost_equal_nulp, assert_array_equal, - assert_array_less, assert_equal, assert_no_warnings, - assert_warns) +from numpy.testing import ( # noqa + TestCase, + assert_, + assert_allclose, + assert_almost_equal, + assert_array_almost_equal, + assert_array_almost_equal_nulp, + assert_array_equal, + assert_array_less, + assert_equal, + assert_no_warnings, + assert_warns, +) from ._warnings import expected_warnings # noqa @@ -29,6 +37,6 @@ def fetch(data_filename): # CuPy Backend: TODO: avoid call to non-public _fetch method return _fetch(data_filename) except (ConnectionError, ModuleNotFoundError): - pytest.skip(f'Unable to download {data_filename}') + pytest.skip(f"Unable to download {data_filename}") else: - pytest.skip('skimage _fetch utility not found') + pytest.skip("skimage _fetch utility not found") diff --git a/python/cucim/src/cucim/skimage/_shared/tests/test_utils.py b/python/cucim/src/cucim/skimage/_shared/tests/test_utils.py index c7d817b84..a2ae1729c 100644 --- a/python/cucim/src/cucim/skimage/_shared/tests/test_utils.py +++ b/python/cucim/src/cucim/skimage/_shared/tests/test_utils.py @@ -5,33 +5,40 @@ import numpy as np import pytest -from cucim.skimage._shared.utils import (_supported_float_type, - _validate_interpolation_order, - change_default_value, - channel_as_last_axis, check_nD, - deprecate_kwarg) +from cucim.skimage._shared.utils import ( + _supported_float_type, + _validate_interpolation_order, + change_default_value, + channel_as_last_axis, + check_nD, + deprecate_kwarg, +) complex_dtypes = [np.complex64, np.complex128] -if hasattr(np, 'complex256'): +if hasattr(np, "complex256"): complex_dtypes += [np.complex256] have_numpydoc = False try: import numpydoc # noqa + have_numpydoc = True except ImportError: pass def test_change_default_value(): - - @change_default_value('arg1', new_value=-1, changed_version='0.12') + @change_default_value("arg1", new_value=-1, changed_version="0.12") def foo(arg0, arg1=0, arg2=1): """Expected docstring""" return arg0, arg1, arg2 - @change_default_value('arg1', new_value=-1, changed_version='0.12', - warning_msg="Custom warning message") + @change_default_value( + "arg1", + new_value=-1, + changed_version="0.12", + warning_msg="Custom warning message", + ) def bar(arg0, arg1=0, arg2=1): """Expected docstring""" return arg0, arg1, arg2 @@ -41,10 +48,12 @@ def bar(arg0, arg1=0, arg2=1): assert foo(0) == (0, 0, 1) assert bar(0) == (0, 0, 1) - expected_msg = ("The new recommended value for arg1 is -1. Until " - "version 0.12, the default arg1 value is 0. From " - "version 0.12, the arg1 default value will be -1. " - "To avoid this warning, please explicitly set arg1 value.") + expected_msg = ( + "The new recommended value for arg1 is -1. Until " + "version 0.12, the default arg1 value is 0. From " + "version 0.12, the arg1 default value will be -1. " + "To avoid this warning, please explicitly set arg1 value." + ) assert str(record[0].message) == expected_msg assert str(record[1].message) == "Custom warning message" @@ -56,24 +65,25 @@ def bar(arg0, arg1=0, arg2=1): assert foo(0, arg1=0) == (0, 0, 1) # Function name and doc is preserved - assert foo.__name__ == 'foo' + assert foo.__name__ == "foo" if sys.flags.optimize < 2: # if PYTHONOPTIMIZE is set to 2, docstrings are stripped - assert foo.__doc__ == 'Expected docstring' + assert foo.__doc__ == "Expected docstring" # Assert no warnings were raised assert len(recorded) == 0 def test_deprecate_kwarg(): - - @deprecate_kwarg({'old_arg1': 'new_arg1'}, '22.02.00') + @deprecate_kwarg({"old_arg1": "new_arg1"}, "22.02.00") def foo(arg0, new_arg1=1, arg2=None): """Expected docstring""" return arg0, new_arg1, arg2 - @deprecate_kwarg({'old_arg1': 'new_arg1'}, - deprecated_version='22.02.00', - warning_msg="Custom warning message") + @deprecate_kwarg( + {"old_arg1": "new_arg1"}, + deprecated_version="22.02.00", + warning_msg="Custom warning message", + ) def bar(arg0, new_arg1=1, arg2=None): """Expected docstring""" return arg0, new_arg1, arg2 @@ -84,8 +94,10 @@ def bar(arg0, new_arg1=1, arg2=None): assert foo(0, old_arg1=1) == (0, 1, None) assert bar(0, old_arg1=1) == (0, 1, None) - msg = ("`old_arg1` is a deprecated argument name " - "for `foo`. Please use `new_arg1` instead.") + msg = ( + "`old_arg1` is a deprecated argument name " + "for `foo`. Please use `new_arg1` instead." + ) assert str(record[0].message) == msg assert str(record[1].message) == "Custom warning message" @@ -102,13 +114,15 @@ def bar(arg0, new_arg1=1, arg2=None): assert foo(0, arg2=2) == (0, 1, 2) assert foo(0, 1, arg2=2) == (0, 1, 2) # Function name and doc is preserved - assert foo.__name__ == 'foo' + assert foo.__name__ == "foo" if sys.flags.optimize < 2: # if PYTHONOPTIMIZE is set to 2, docstrings are stripped if not have_numpydoc: assert foo.__doc__ == """Expected docstring""" else: - assert foo.__doc__ == """Expected docstring + assert ( + foo.__doc__ + == """Expected docstring Other Parameters @@ -118,25 +132,30 @@ def bar(arg0, new_arg1=1, arg2=None): .. deprecated:: 22.02.00 """ + ) assert len(recorded) == 0 def test_check_nD(): - z = np.random.random(200 ** 2).reshape((200, 200)) + z = np.random.random(200**2).reshape((200, 200)) x = z[10:30, 30:10] with pytest.raises(ValueError): check_nD(x, 2) -@pytest.mark.parametrize('dtype', [bool, int, np.uint8, np.uint16, - float, np.float32, np.float64]) -@pytest.mark.parametrize('order', [None, -1, 0, 1, 2, 3, 4, 5, 6]) +@pytest.mark.parametrize( + "dtype", [bool, int, np.uint8, np.uint16, float, np.float32, np.float64] +) +@pytest.mark.parametrize("order", [None, -1, 0, 1, 2, 3, 4, 5, 6]) def test_validate_interpolation_order(dtype, order): if order is None: # Default order - assert (_validate_interpolation_order(dtype, None) == 0 - if dtype == bool else 1) + assert ( + _validate_interpolation_order(dtype, None) == 0 + if dtype == bool + else 1 + ) elif order < 0 or order > 5: # Order not in valid range with pytest.raises(ValueError): @@ -151,21 +170,40 @@ def test_validate_interpolation_order(dtype, order): @pytest.mark.parametrize( - 'dtype', - [bool, np.float16, np.float32, np.float64, np.uint8, np.uint16, np.uint32, - np.uint64, np.int8, np.int16, np.int32, np.int64] + "dtype", + [ + bool, + np.float16, + np.float32, + np.float64, + np.uint8, + np.uint16, + np.uint32, + np.uint64, + np.int8, + np.int16, + np.int32, + np.int64, + ], ) def test_supported_float_dtype_real(dtype): float_dtype = _supported_float_type(dtype) - if dtype in [np.float16, np.float32, np.int8, np.uint8, np.int16, - np.uint16, bool]: + if dtype in [ + np.float16, + np.float32, + np.int8, + np.uint8, + np.int16, + np.uint16, + bool, + ]: assert float_dtype == np.float32 else: assert float_dtype == np.float64 -@pytest.mark.parametrize('dtype', complex_dtypes) -@pytest.mark.parametrize('allow_complex', [False, True]) +@pytest.mark.parametrize("dtype", complex_dtypes) +@pytest.mark.parametrize("allow_complex", [False, True]) def test_supported_float_dtype_complex(dtype, allow_complex): if allow_complex: float_dtype = _supported_float_type(dtype, allow_complex=allow_complex) @@ -179,21 +217,21 @@ def test_supported_float_dtype_complex(dtype, allow_complex): @pytest.mark.parametrize( - 'dtype', ['f', 'float32', np.float32, np.dtype(np.float32)] + "dtype", ["f", "float32", np.float32, np.dtype(np.float32)] ) def test_supported_float_dtype_input_kinds(dtype): assert _supported_float_type(dtype) == np.float32 @pytest.mark.parametrize( - 'dtypes, expected', + "dtypes, expected", [ ((np.float16, np.float64), np.float64), ([np.float32, np.uint16, np.int8], np.float32), ([np.float32, bool], np.float32), ([np.float32, np.uint32, np.int16], np.float64), ({np.float32, np.float16}, np.float32), - ] + ], ) def test_supported_float_dtype_sequence(dtypes, expected): float_dtype = _supported_float_type(dtypes) @@ -208,7 +246,7 @@ def _decorated_channel_axis_size(x, *, channel_axis=None): return x.shape[-1] -@pytest.mark.parametrize('channel_axis', [None, 0, 1, 2, -1, -2, -3]) +@pytest.mark.parametrize("channel_axis", [None, 0, 1, 2, -1, -2, -3]) def test_decorated_channel_axis_shape(channel_axis): # Verify that channel_as_last_axis modifies the channel_axis as expected diff --git a/python/cucim/src/cucim/skimage/_shared/tests/test_warnings.py b/python/cucim/src/cucim/skimage/_shared/tests/test_warnings.py index 30d697580..9e5db9662 100644 --- a/python/cucim/src/cucim/skimage/_shared/tests/test_warnings.py +++ b/python/cucim/src/cucim/skimage/_shared/tests/test_warnings.py @@ -5,35 +5,35 @@ from cucim.skimage._shared._warnings import expected_warnings -@pytest.fixture(scope='function') +@pytest.fixture(scope="function") def setup(): # Remove any environment variable if it exists - old_strictness = os.environ.pop('SKIMAGE_TEST_STRICT_WARNINGS', None) + old_strictness = os.environ.pop("SKIMAGE_TEST_STRICT_WARNINGS", None) yield # Add the user's desired strictness if old_strictness is not None: - os.environ['SKIMAGE_TEST_STRICT_WARNINGS'] = old_strictness + os.environ["SKIMAGE_TEST_STRICT_WARNINGS"] = old_strictness def test_strict_warnigns_default(setup): # By default we should fail on missing expected warnings with pytest.raises(ValueError): - with expected_warnings(['some warnings']): + with expected_warnings(["some warnings"]): pass -@pytest.mark.parametrize('strictness', ['1', 'true', 'True', 'TRUE']) +@pytest.mark.parametrize("strictness", ["1", "true", "True", "TRUE"]) def test_strict_warning_true(setup, strictness): - os.environ['SKIMAGE_TEST_STRICT_WARNINGS'] = strictness + os.environ["SKIMAGE_TEST_STRICT_WARNINGS"] = strictness with pytest.raises(ValueError): - with expected_warnings(['some warnings']): + with expected_warnings(["some warnings"]): pass -@pytest.mark.parametrize('strictness', ['0', 'false', 'False', 'FALSE']) +@pytest.mark.parametrize("strictness", ["0", "false", "False", "FALSE"]) def test_strict_warning_false(setup, strictness): - # If the user doesnn't wish to be strict about warnigns + # If the user doesnn't wish to be strict about warnings # the following shouldn't raise any error - os.environ['SKIMAGE_TEST_STRICT_WARNINGS'] = strictness - with expected_warnings(['some warnings']): + os.environ["SKIMAGE_TEST_STRICT_WARNINGS"] = strictness + with expected_warnings(["some warnings"]): pass diff --git a/python/cucim/src/cucim/skimage/_shared/utils.py b/python/cucim/src/cucim/skimage/_shared/utils.py index 51b25e198..b355c8b17 100644 --- a/python/cucim/src/cucim/skimage/_shared/utils.py +++ b/python/cucim/src/cucim/skimage/_shared/utils.py @@ -9,9 +9,18 @@ from ._warnings import all_warnings, warn # noqa -__all__ = ['deprecate_func', 'get_bound_method_class', 'all_warnings', - 'safe_as_int', 'check_shape_equality', 'check_nD', 'warn', - 'reshape_nd', 'identity', 'slice_at_axis'] +__all__ = [ + "deprecate_func", + "get_bound_method_class", + "all_warnings", + "safe_as_int", + "check_shape_equality", + "check_nD", + "warn", + "reshape_nd", + "identity", + "slice_at_axis", +] def _get_stack_rank(func): @@ -46,8 +55,7 @@ class _DecoratorBaseClass: _stack_length = {} def get_stack_length(self, func): - return self._stack_length.get(func.__name__, - _get_stack_length(func)) + return self._stack_length.get(func.__name__, _get_stack_length(func)) class change_default_value(_DecoratorBaseClass): @@ -67,8 +75,9 @@ class change_default_value(_DecoratorBaseClass): """ - def __init__(self, arg_name, *, new_value, changed_version, - warning_msg=None): + def __init__( + self, arg_name, *, new_value, changed_version, warning_msg=None + ): self.arg_name = arg_name self.new_value = new_value self.warning_msg = warning_msg @@ -87,15 +96,17 @@ def __call__(self, func): f"the default {self.arg_name} value is {old_value}. " f"From version {self.changed_version}, the {self.arg_name} " f"default value will be {self.new_value}. To avoid " - f"this warning, please explicitly set {self.arg_name} value.") + f"this warning, please explicitly set {self.arg_name} value." + ) @functools.wraps(func) def fixed_func(*args, **kwargs): stacklevel = 1 + self.get_stack_length(func) - stack_rank if len(args) < arg_idx + 1 and self.arg_name not in kwargs.keys(): # warn that arg_name default value changed: - warnings.warn(self.warning_msg, FutureWarning, - stacklevel=stacklevel) + warnings.warn( + self.warning_msg, FutureWarning, stacklevel=stacklevel + ) return func(*args, **kwargs) return fixed_func @@ -141,8 +152,7 @@ def fixed_func(*args, **kwargs): stacklevel = 1 + self.get_stack_length(func) - stack_rank if len(args) > arg_idx or self.arg_name in kwargs.keys(): # warn that arg_name is deprecated - warnings.warn(warning_msg, FutureWarning, - stacklevel=stacklevel) + warnings.warn(warning_msg, FutureWarning, stacklevel=stacklevel) return func(*args, **kwargs) return fixed_func @@ -178,13 +188,13 @@ def _docstring_add_deprecated(func, kwarg_mapping, deprecated_version): Doc = FunctionDoc(func) for old_arg, new_arg in kwarg_mapping.items(): - desc = [f'Deprecated in favor of `{new_arg}`.', - '', - f'.. deprecated:: {deprecated_version}'] - Doc['Other Parameters'].append( - Parameter(name=old_arg, - type='DEPRECATED', - desc=desc) + desc = [ + f"Deprecated in favor of `{new_arg}`.", + "", + f".. deprecated:: {deprecated_version}", + ] + Doc["Other Parameters"].append( + Parameter(name=old_arg, type="DEPRECATED", desc=desc) ) new_docstring = str(Doc) @@ -193,7 +203,7 @@ def _docstring_add_deprecated(func, kwarg_mapping, deprecated_version): # .. function:: func.__name__ # # and some additional blank lines. We strip these off below. - split = new_docstring.split('\n') + split = new_docstring.split("\n") no_header = split[1:] while not no_header[0].strip(): no_header.pop(0) @@ -203,13 +213,13 @@ def _docstring_add_deprecated(func, kwarg_mapping, deprecated_version): # where it is not. descr = no_header.pop(0) while no_header[0].strip(): - descr += '\n ' + no_header.pop(0) - descr += '\n\n' + descr += "\n " + no_header.pop(0) + descr += "\n\n" # '\n ' rather than '\n' here to restore the original indentation. - final_docstring = descr + '\n '.join(no_header) + final_docstring = descr + "\n ".join(no_header) # strip any extra spaces from ends of lines - final_docstring = '\n'.join( - [line.rstrip() for line in final_docstring.split('\n')] + final_docstring = "\n".join( + [line.rstrip() for line in final_docstring.split("\n")] ) return final_docstring @@ -234,15 +244,24 @@ class deprecate_kwarg(_DecoratorBaseClass): """ - def __init__(self, kwarg_mapping, deprecated_version, warning_msg=None, - removed_version=None): + def __init__( + self, + kwarg_mapping, + deprecated_version, + warning_msg=None, + removed_version=None, + ): self.kwarg_mapping = kwarg_mapping if warning_msg is None: - self.warning_msg = ("`{old_arg}` is a deprecated argument name " - "for `{func_name}`. ") + self.warning_msg = ( + "`{old_arg}` is a deprecated argument name " + "for `{func_name}`. " + ) if removed_version is not None: - self.warning_msg += (f'It will be removed in cuCIM ' - f'version {removed_version}.') + self.warning_msg += ( + f"It will be removed in cuCIM " + f"version {removed_version}." + ) self.warning_msg += "Please use `{new_arg}` instead." else: self.warning_msg = warning_msg @@ -250,7 +269,6 @@ def __init__(self, kwarg_mapping, deprecated_version, warning_msg=None, self.deprecated_version = deprecated_version def __call__(self, func): - stack_rank = _get_stack_rank(func) @functools.wraps(func) @@ -259,10 +277,15 @@ def fixed_func(*args, **kwargs): for old_arg, new_arg in self.kwarg_mapping.items(): if old_arg in kwargs: # warn that the function interface has changed: - warnings.warn(self.warning_msg.format( - old_arg=old_arg, func_name=func.__name__, - new_arg=new_arg), FutureWarning, - stacklevel=stacklevel) + warnings.warn( + self.warning_msg.format( + old_arg=old_arg, + func_name=func.__name__, + new_arg=new_arg, + ), + FutureWarning, + stacklevel=stacklevel, + ) # Substitute new_arg to old_arg kwargs[new_arg] = kwargs.pop(old_arg) @@ -270,8 +293,9 @@ def fixed_func(*args, **kwargs): return func(*args, **kwargs) if func.__doc__ is not None: - newdoc = _docstring_add_deprecated(func, self.kwarg_mapping, - self.deprecated_version) + newdoc = _docstring_add_deprecated( + func, self.kwarg_mapping, self.deprecated_version + ) fixed_func.__doc__ = newdoc return fixed_func @@ -299,8 +323,13 @@ class channel_as_last_axis: where some or all are multichannel. """ - def __init__(self, channel_arg_positions=(0,), channel_kwarg_names=(), - multichannel_output=True): + + def __init__( + self, + channel_arg_positions=(0,), + channel_kwarg_names=(), + multichannel_output=True, + ): self.arg_positions = set(channel_arg_positions) self.kwarg_names = set(channel_kwarg_names) self.multichannel_output = multichannel_output @@ -308,8 +337,7 @@ def __init__(self, channel_arg_positions=(0,), channel_kwarg_names=(), def __call__(self, func): @functools.wraps(func) def fixed_func(*args, **kwargs): - - channel_axis = kwargs.get('channel_axis', None) + channel_axis = kwargs.get("channel_axis", None) if channel_axis is None: return func(*args, **kwargs) @@ -321,7 +349,8 @@ def fixed_func(*args, **kwargs): channel_axis = (channel_axis,) if len(channel_axis) > 1: raise ValueError( - "only a single channel axis is currently supported") + "only a single channel axis is currently supported" + ) if channel_axis == (-1,) or channel_axis == -1: return func(*args, **kwargs) @@ -387,7 +416,6 @@ def __init__(self, *, deprecated_version, removed_version=None, hint=None): self.hint = hint def __call__(self, func): - message = ( f"`{func.__name__}` is deprecated since version " f"{self.deprecated_version}" @@ -405,26 +433,22 @@ def __call__(self, func): def wrapped(*args, **kwargs): stacklevel = 1 + self.get_stack_length(func) - stack_rank warnings.warn( - message, - category=FutureWarning, - stacklevel=stacklevel + message, category=FutureWarning, stacklevel=stacklevel ) return func(*args, **kwargs) # modify doc string to display deprecation warning - doc = f'**Deprecated:** {message}' + doc = f"**Deprecated:** {message}" if wrapped.__doc__ is None: wrapped.__doc__ = doc else: - wrapped.__doc__ = doc + '\n\n ' + wrapped.__doc__ + wrapped.__doc__ = doc + "\n\n " + wrapped.__doc__ return wrapped def get_bound_method_class(m): - """Return the class for a bound method. - - """ + """Return the class for a bound method.""" return m.__self__.__class__ @@ -477,20 +501,21 @@ def safe_as_int(val, atol=1e-3): 53 """ - mod = np.asarray(val) % 1 # Extract mantissa + mod = np.asarray(val) % 1 # Extract mantissa # Check for and subtract any mod values > 0.5 from 1 - if mod.ndim == 0: # Scalar input, cannot be indexed + if mod.ndim == 0: # Scalar input, cannot be indexed if mod > 0.5: mod = 1 - mod - else: # Iterable input, now ndarray + else: # Iterable input, now ndarray mod[mod > 0.5] = 1 - mod[mod > 0.5] # Test on each side of nearest int try: np.testing.assert_allclose(mod, 0, atol=atol) except AssertionError: - raise ValueError(f'Integer argument required but received ' - f'{val}, check inputs.') + raise ValueError( + f"Integer argument required but received " f"{val}, check inputs." + ) return np.round(val).astype(np.int64) @@ -499,7 +524,7 @@ def check_shape_equality(*images): """Check that all images have the same shape""" image0 = images[0] if not all(image0.shape == image.shape for image in images[1:]): - raise ValueError('Input images must have the same dimensions.') + raise ValueError("Input images must have the same dimensions.") return @@ -563,7 +588,7 @@ def reshape_nd(arr, ndim, dim): return np.reshape(arr, new_shape) -def check_nD(array, ndim, arg_name='image'): +def check_nD(array, ndim, arg_name="image"): """ Verify an array meets the desired ndims and array isn't empty. @@ -585,7 +610,7 @@ def check_nD(array, ndim, arg_name='image'): raise ValueError(msg_empty_array % (arg_name)) if array.ndim not in ndim: raise ValueError( - msg_incorrect_dim % (arg_name, '-or-'.join([str(n) for n in ndim])) + msg_incorrect_dim % (arg_name, "-or-".join([str(n) for n in ndim])) ) @@ -612,8 +637,10 @@ def check_random_state(seed): return cp.random.RandomState(seed) if isinstance(seed, cp.random.RandomState): return seed - raise ValueError('%r cannot be used to seed a numpy.random.RandomState' - ' instance' % seed) + raise ValueError( + "%r cannot be used to seed a numpy.random.RandomState" + " instance" % seed + ) def convert_to_float(image, preserve_range): @@ -643,7 +670,7 @@ def convert_to_float(image, preserve_range): if preserve_range: # Convert image to double only if it is not single or double # precision float - if image.dtype.char not in 'df': + if image.dtype.char not in "df": image = image.astype(_supported_float_type(image.dtype)) else: from ..util.dtype import img_as_float @@ -677,22 +704,25 @@ def _validate_interpolation_order(image_dtype, order): return 0 if image_dtype == bool else 1 if order < 0 or order > 5: - raise ValueError("Spline interpolation order has to be in the " - "range 0-5.") + raise ValueError( + "Spline interpolation order has to be in the " "range 0-5." + ) if image_dtype == bool and order != 0: raise ValueError( "Input image dtype is bool. Interpolation is not defined " "with bool data type. Please set order to 0 or explicitly " - "cast input image to another data type.") + "cast input image to another data type." + ) return order def _to_np_mode(mode): """Convert padding modes from `ndi.correlate` to `np.pad`.""" - mode_translation_dict = dict(nearest='edge', reflect='symmetric', - mirror='reflect') + mode_translation_dict = dict( + nearest="edge", reflect="symmetric", mirror="reflect" + ) if mode in mode_translation_dict: mode = mode_translation_dict[mode] return mode @@ -700,42 +730,47 @@ def _to_np_mode(mode): def _to_ndimage_mode(mode): """Convert from `numpy.pad` mode name to the corresponding ndimage mode.""" - mode_translation_dict = dict(constant='constant', edge='nearest', - symmetric='reflect', reflect='mirror', - wrap='wrap') + mode_translation_dict = dict( + constant="constant", + edge="nearest", + symmetric="reflect", + reflect="mirror", + wrap="wrap", + ) if mode not in mode_translation_dict: raise ValueError( f"Unknown mode: '{mode}', or cannot translate mode. The " f"mode should be one of 'constant', 'edge', 'symmetric', " f"'reflect', or 'wrap'. See the documentation of numpy.pad for " - f"more info.") + f"more info." + ) return _fix_ndimage_mode(mode_translation_dict[mode]) def _fix_ndimage_mode(mode): # SciPy 1.6.0 introduced grid variants of constant and wrap which # have less surprising behavior for images. Use these when available - grid_modes = {'constant': 'grid-constant', 'wrap': 'grid-wrap'} + grid_modes = {"constant": "grid-constant", "wrap": "grid-wrap"} return grid_modes.get(mode, mode) new_float_type = { # preserved types - 'f': cp.float32, # float32 - 'd': cp.float64, # float64 - 'F': cp.complex64, # complex64 - 'D': cp.complex128, # complex128 + "f": cp.float32, # float32 + "d": cp.float64, # float64 + "F": cp.complex64, # complex64 + "D": cp.complex128, # complex128 # promoted float types - 'e': cp.float32, # float16 + "e": cp.float32, # float16 # truncated float types - 'g': cp.float64, # float128 (doesn't exist on windows) - 'G': cp.complex128, # complex256 (doesn't exist on windows) + "g": cp.float64, # float128 (doesn't exist on windows) + "G": cp.complex128, # complex256 (doesn't exist on windows) # integer types that can be exactly represented in float32 - 'b': cp.float32, # int8 - 'B': cp.float32, # uint8 - 'h': cp.float32, # int16 - 'H': cp.float32, # uint16 - '?': cp.float32, # bool + "b": cp.float32, # int8 + "B": cp.float32, # uint8 + "h": cp.float32, # int16 + "H": cp.float32, # uint16 + "?": cp.float32, # bool } @@ -765,7 +800,7 @@ def _supported_float_type(input_dtype, allow_complex=False): if isinstance(input_dtype, Iterable) and not isinstance(input_dtype, str): return cp.result_type(*(_supported_float_type(d) for d in input_dtype)) input_dtype = cp.dtype(input_dtype) - if not allow_complex and input_dtype.kind == 'c': + if not allow_complex and input_dtype.kind == "c": raise ValueError("complex valued input is not supported") return new_float_type.get(input_dtype.char, cp.float64) diff --git a/python/cucim/src/cucim/skimage/_shared/version_requirements.py b/python/cucim/src/cucim/skimage/_shared/version_requirements.py index acb302e76..17c7afcc5 100644 --- a/python/cucim/src/cucim/skimage/_shared/version_requirements.py +++ b/python/cucim/src/cucim/skimage/_shared/version_requirements.py @@ -17,13 +17,13 @@ def _check_version(actver, version, cmp_op): Distributed under the terms of the BSD License. """ try: - if cmp_op == '>': + if cmp_op == ">": return _version.parse(actver) > _version.parse(version) - elif cmp_op == '>=': + elif cmp_op == ">=": return _version.parse(actver) >= _version.parse(version) - elif cmp_op == '=': + elif cmp_op == "=": return _version.parse(actver) == _version.parse(version) - elif cmp_op == '<': + elif cmp_op == "<": return _version.parse(actver) < _version.parse(version) else: return False @@ -33,9 +33,8 @@ def _check_version(actver, version, cmp_op): def get_module_version(module_name): """Return module version or None if version can't be retrieved.""" - mod = __import__(module_name, - fromlist=[module_name.rpartition('.')[-1]]) - return getattr(mod, '__version__', getattr(mod, 'VERSION', None)) + mod = __import__(module_name, fromlist=[module_name.rpartition(".")[-1]]) + return getattr(mod, "__version__", getattr(mod, "VERSION", None)) def is_installed(name, version=None): @@ -61,7 +60,7 @@ def is_installed(name, version=None): Original Copyright (C) 2009-2011 Pierre Raybaut Licensed under the terms of the MIT License. """ - if name.lower() == 'python': + if name.lower() == "python": actver = sys.version[:6] else: try: @@ -75,14 +74,15 @@ def is_installed(name, version=None): # we lazy import re import re - match = re.search('[0-9]', version) + match = re.search("[0-9]", version) assert match is not None, "Invalid version number" - symb = version[:match.start()] + symb = version[: match.start()] if not symb: - symb = '=' - assert symb in ('>=', '>', '=', '<'),\ + symb = "=" + assert symb in (">=", ">", "=", "<"), ( "Invalid version condition '%s'" % symb - version = version[match.start():] + ) + version = version[match.start() :] return _check_version(actver, version, symb) @@ -120,7 +120,9 @@ def func_wrapped(*args, **kwargs): if version is not None: msg += " %s" % version raise ImportError(msg + '"') + return func_wrapped + return decorator @@ -145,5 +147,4 @@ def get_module(module_name, version=None): """ if not is_installed(module_name, version): return None - return __import__(module_name, - fromlist=[module_name.rpartition('.')[-1]]) + return __import__(module_name, fromlist=[module_name.rpartition(".")[-1]]) diff --git a/python/cucim/src/cucim/skimage/_vendored/_internal.py b/python/cucim/src/cucim/skimage/_vendored/_internal.py index 196486d94..9ac4cef9a 100644 --- a/python/cucim/src/cucim/skimage/_vendored/_internal.py +++ b/python/cucim/src/cucim/skimage/_vendored/_internal.py @@ -7,7 +7,7 @@ try: # try importing Cython-based private axis handling functions from CuPy - if hasattr(cupy, '_core'): + if hasattr(cupy, "_core"): # CuPy 10 renames core->_core from cupy._core.internal import _normalize_axis_index # NOQA from cupy._core.internal import _normalize_axis_indices # NOQA @@ -67,7 +67,7 @@ def _normalize_axis_indices(axes, ndim): # NOQA return tuple(sorted(res)) -if hasattr(math, 'prod'): +if hasattr(math, "prod"): prod = math.prod else: diff --git a/python/cucim/src/cucim/skimage/_vendored/_ndimage_filters.py b/python/cucim/src/cucim/skimage/_vendored/_ndimage_filters.py index 29d3f6707..c9d4880d9 100644 --- a/python/cucim/src/cucim/skimage/_vendored/_ndimage_filters.py +++ b/python/cucim/src/cucim/skimage/_vendored/_ndimage_filters.py @@ -4,20 +4,25 @@ import cupy import numpy -from cucim.skimage._vendored import _internal as internal -from cucim.skimage._vendored import _ndimage_filters_core as _filters_core -from cucim.skimage._vendored import _ndimage_util as _util -from cucim.skimage.filters._separable_filtering import (ResourceLimitError, - _shmem_convolve1d) +from cucim.skimage._vendored import ( + _internal as internal, + _ndimage_filters_core as _filters_core, + _ndimage_util as _util, +) +from cucim.skimage.filters._separable_filtering import ( + ResourceLimitError, + _shmem_convolve1d, +) try: from cupy.cuda.compiler import CompileException + compile_errors = (ResourceLimitError, CompileException) except ImportError: compile_errors = (ResourceLimitError,) -def correlate(input, weights, output=None, mode='reflect', cval=0.0, origin=0): +def correlate(input, weights, output=None, mode="reflect", cval=0.0, origin=0): """Multi-dimensional correlate. The array is correlated with the given kernel. @@ -51,7 +56,7 @@ def correlate(input, weights, output=None, mode='reflect', cval=0.0, origin=0): return _correlate_or_convolve(input, weights, output, mode, cval, origin) -def convolve(input, weights, output=None, mode='reflect', cval=0.0, origin=0): +def convolve(input, weights, output=None, mode="reflect", cval=0.0, origin=0): """Multi-dimensional convolution. The array is convolved with the given kernel. @@ -82,12 +87,22 @@ def convolve(input, weights, output=None, mode='reflect', cval=0.0, origin=0): and input is integral) the results may not perfectly match the results from SciPy due to floating-point rounding of intermediate results. """ - return _correlate_or_convolve(input, weights, output, mode, cval, origin, - True) + return _correlate_or_convolve( + input, weights, output, mode, cval, origin, True + ) -def correlate1d(input, weights, axis=-1, output=None, mode="reflect", cval=0.0, - origin=0, *, algorithm=None): +def correlate1d( + input, + weights, + axis=-1, + output=None, + mode="reflect", + cval=0.0, + origin=0, + *, + algorithm=None, +): """One-dimensional correlate. The array is correlated with the given kernel. @@ -122,8 +137,17 @@ def correlate1d(input, weights, axis=-1, output=None, mode="reflect", cval=0.0, ) -def convolve1d(input, weights, axis=-1, output=None, mode="reflect", cval=0.0, - origin=0, *, algorithm=None): +def convolve1d( + input, + weights, + axis=-1, + output=None, + mode="reflect", + cval=0.0, + origin=0, + *, + algorithm=None, +): """One-dimensional convolution. The array is convolved with the given kernel. @@ -157,10 +181,12 @@ def convolve1d(input, weights, axis=-1, output=None, mode="reflect", cval=0.0, ) -def _correlate_or_convolve(input, weights, output, mode, cval, origin, - convolution=False): - origins, int_type = _filters_core._check_nd_args(input, weights, - mode, origin) +def _correlate_or_convolve( + input, weights, output, mode, cval, origin, convolution=False +): + origins, int_type = _filters_core._check_nd_args( + input, weights, mode, origin + ) if weights.size == 0: return cupy.zeros_like(input) @@ -177,41 +203,59 @@ def _correlate_or_convolve(input, weights, output, mode, cval, origin, elif weights.dtype.kind == "c": # numpy.correlate conjugates weights rather than input. weights = weights.conj() - weights_dtype = _util._get_weights_dtype(input, weights, use_cucim_casting=True) # noqa + weights_dtype = _util._get_weights_dtype( + input, weights, use_cucim_casting=True + ) # noqa offsets = _filters_core._origins_to_offsets(origins, weights.shape) - kernel = _get_correlate_kernel(mode, weights.shape, int_type, - offsets, cval) - output = _filters_core._call_kernel(kernel, input, weights, output, - weights_dtype=weights_dtype) + kernel = _get_correlate_kernel(mode, weights.shape, int_type, offsets, cval) + output = _filters_core._call_kernel( + kernel, input, weights, output, weights_dtype=weights_dtype + ) return output -def _correlate_or_convolve1d(input, weights, axis, output, mode, cval, origin, - convolution=False, algorithm=None): +def _correlate_or_convolve1d( + input, + weights, + axis, + output, + mode, + cval, + origin, + convolution=False, + algorithm=None, +): # Calls fast shared-memory convolution when possible, otherwise falls back # to the vendored elementwise _correlate_or_convolve default_algorithm = False if algorithm is None: default_algorithm = True if input.ndim == 2 and weights.size <= 256: - algorithm = 'shared_memory' + algorithm = "shared_memory" else: - algorithm = 'elementwise' - elif algorithm not in ['shared_memory', 'elementwise']: + algorithm = "elementwise" + elif algorithm not in ["shared_memory", "elementwise"]: raise ValueError( "algorithm must be 'shared_memory', 'elementwise' or None" ) - if mode == 'wrap': - mode = 'grid-wrap' - if algorithm == 'shared_memory': + if mode == "wrap": + mode = "grid-wrap" + if algorithm == "shared_memory": if input.ndim not in [2, 3]: raise NotImplementedError( f"shared_memory not implemented for ndim={input.ndim}" ) try: - out = _shmem_convolve1d(input, weights, axis=axis, output=output, - mode=mode, cval=cval, origin=origin, - convolution=convolution) + out = _shmem_convolve1d( + input, + weights, + axis=axis, + output=output, + mode=mode, + cval=cval, + origin=origin, + convolution=convolution, + ) return out except compile_errors: # fallback to elementwise if inadequate shared memory available @@ -221,8 +265,8 @@ def _correlate_or_convolve1d(input, weights, axis, output, mode, cval, origin, "Inadequate resources for algorithm='shared_memory: " "falling back to the elementwise implementation" ) - algorithm = 'elementwise' - if algorithm == 'elementwise': + algorithm = "elementwise" + if algorithm == "elementwise": weights, origins = _filters_core._convert_1d_args( input.ndim, weights, origin, axis ) @@ -234,15 +278,22 @@ def _correlate_or_convolve1d(input, weights, axis, output, mode, cval, origin, @cupy.memoize(for_each_device=True) def _get_correlate_kernel(mode, w_shape, int_type, offsets, cval): return _filters_core._generate_nd_kernel( - 'correlate', - 'W sum = (W)0;', - 'sum += cast({value}) * wval;', - 'y = cast(sum);', - mode, w_shape, int_type, offsets, cval, ctype='W') + "correlate", + "W sum = (W)0;", + "sum += cast({value}) * wval;", + "y = cast(sum);", + mode, + w_shape, + int_type, + offsets, + cval, + ctype="W", + ) -def _run_1d_correlates(input, params, get_weights, output, mode, cval, - origin=0, **filter_kwargs): +def _run_1d_correlates( + input, params, get_weights, output, mode, cval, origin=0, **filter_kwargs +): """ Enhanced version of _run_1d_filters that uses correlate1d as the filter function. The params are a list of values to pass to the get_weights @@ -257,11 +308,27 @@ def _run_1d_correlates(input, params, get_weights, output, mode, cval, wghts = [wghts[param] for param in params] return _filters_core._run_1d_filters( [None if w is None else correlate1d for w in wghts], - input, wghts, output, mode, cval, origin, **filter_kwargs) + input, + wghts, + output, + mode, + cval, + origin, + **filter_kwargs, + ) -def uniform_filter1d(input, size, axis=-1, output=None, mode="reflect", - cval=0.0, origin=0, *, algorithm=None): +def uniform_filter1d( + input, + size, + axis=-1, + output=None, + mode="reflect", + cval=0.0, + origin=0, + *, + algorithm=None, +): """One-dimensional uniform filter along the given axis. The lines of the array along the given axis are filtered with a uniform @@ -294,12 +361,21 @@ def uniform_filter1d(input, size, axis=-1, output=None, mode="reflect", """ weights_dtype = cupy.promote_types(input.dtype, cupy.float32) weights = cupy.full(size, 1 / size, dtype=weights_dtype) - return correlate1d(input, weights, axis, output, mode, cval, - origin, algorithm=algorithm) + return correlate1d( + input, weights, axis, output, mode, cval, origin, algorithm=algorithm + ) -def uniform_filter(input, size=3, output=None, mode="reflect", cval=0.0, - origin=0, *, algorithm=None): +def uniform_filter( + input, + size=3, + output=None, + mode="reflect", + cval=0.0, + origin=0, + *, + algorithm=None, +): """Multi-dimensional uniform filter. Args: @@ -328,20 +404,33 @@ def uniform_filter(input, size=3, output=None, mode="reflect", cval=0.0, and input is integral) the results may not perfectly match the results from SciPy due to floating-point rounding of intermediate results. """ - sizes = _util._fix_sequence_arg(size, input.ndim, 'size', int) + sizes = _util._fix_sequence_arg(size, input.ndim, "size", int) weights_dtype = cupy.promote_types(input.dtype, cupy.float32) def get(size): - return None if size <= 1 else cupy.full(size, 1 / size, dtype=weights_dtype) # noqa + return ( + None + if size <= 1 + else cupy.full(size, 1 / size, dtype=weights_dtype) + ) # noqa return _run_1d_correlates( input, sizes, get, output, mode, cval, origin, algorithm=algorithm ) -def gaussian_filter1d(input, sigma, axis=-1, order=0, output=None, - mode="reflect", cval=0.0, truncate=4.0, *, - algorithm=None): +def gaussian_filter1d( + input, + sigma, + axis=-1, + order=0, + output=None, + mode="reflect", + cval=0.0, + truncate=4.0, + *, + algorithm=None, +): """One-dimensional Gaussian filter along the given axis. The lines of the array along the given axis are filtered with a Gaussian @@ -382,8 +471,17 @@ def gaussian_filter1d(input, sigma, axis=-1, order=0, output=None, ) -def gaussian_filter(input, sigma, order=0, output=None, mode="reflect", - cval=0.0, truncate=4.0, *, algorithm=None): +def gaussian_filter( + input, + sigma, + order=0, + output=None, + mode="reflect", + cval=0.0, + truncate=4.0, + *, + algorithm=None, +): """Multi-dimensional Gaussian filter. Args: @@ -414,8 +512,8 @@ def gaussian_filter(input, sigma, order=0, output=None, mode="reflect", and input is integral) the results may not perfectly match the results from SciPy due to floating-point rounding of intermediate results. """ - sigmas = _util._fix_sequence_arg(sigma, input.ndim, 'sigma', float) - orders = _util._fix_sequence_arg(order, input.ndim, 'order', int) + sigmas = _util._fix_sequence_arg(sigma, input.ndim, "sigma", float) + orders = _util._fix_sequence_arg(order, input.ndim, "order", int) truncate = float(truncate) weights_dtype = cupy.promote_types(input, cupy.float32) @@ -426,8 +524,16 @@ def get(param, dtype=weights_dtype): return None return _gaussian_kernel1d(sigma, order, radius, dtype) - return _run_1d_correlates(input, list(zip(sigmas, orders)), get, output, - mode, cval, 0, algorithm=algorithm) + return _run_1d_correlates( + input, + list(zip(sigmas, orders)), + get, + output, + mode, + cval, + 0, + algorithm=algorithm, + ) def _gaussian_kernel1d(sigma, order, radius, dtype=cupy.float64): @@ -435,10 +541,10 @@ def _gaussian_kernel1d(sigma, order, radius, dtype=cupy.float64): Computes a 1-D Gaussian correlation kernel. """ if order < 0: - raise ValueError('order must be non-negative') + raise ValueError("order must be non-negative") sigma2 = sigma * sigma x = numpy.arange(-radius, radius + 1) - phi_x = numpy.exp(-0.5 / sigma2 * x ** 2) + phi_x = numpy.exp(-0.5 / sigma2 * x**2) phi_x /= phi_x.sum() if order == 0: @@ -458,11 +564,12 @@ def _gaussian_kernel1d(sigma, order, radius, dtype=cupy.float64): for _ in range(order): q = Q_deriv.dot(q) q = (x[:, None] ** exponent_range).dot(q) - return cupy.asarray((q * phi_x)[::-1], order='C', dtype=dtype) + return cupy.asarray((q * phi_x)[::-1], order="C", dtype=dtype) -def prewitt(input, axis=-1, output=None, mode="reflect", cval=0.0, *, - algorithm=None): +def prewitt( + input, axis=-1, output=None, mode="reflect", cval=0.0, *, algorithm=None +): """Compute a Prewitt filter along the given axis. Args: @@ -488,13 +595,12 @@ def prewitt(input, axis=-1, output=None, mode="reflect", cval=0.0, *, """ weights_dtype = cupy.promote_types(input.dtype, cupy.float32) smooth = cupy.ones(3, dtype=weights_dtype) - return _prewitt_or_sobel( - input, axis, output, mode, cval, smooth, algorithm - ) + return _prewitt_or_sobel(input, axis, output, mode, cval, smooth, algorithm) -def sobel(input, axis=-1, output=None, mode="reflect", cval=0.0, *, - algorithm=None): +def sobel( + input, axis=-1, output=None, mode="reflect", cval=0.0, *, algorithm=None +): """Compute a Sobel filter along the given axis. Args: @@ -520,9 +626,7 @@ def sobel(input, axis=-1, output=None, mode="reflect", cval=0.0, *, """ weights_dtype = cupy.promote_types(input.dtype, cupy.float32) smooth = cupy.array([1, 2, 1], dtype=weights_dtype) - return _prewitt_or_sobel( - input, axis, output, mode, cval, smooth, algorithm - ) + return _prewitt_or_sobel(input, axis, output, mode, cval, smooth, algorithm) def _prewitt_or_sobel(input, axis, output, mode, cval, weights, algorithm): @@ -531,14 +635,30 @@ def _prewitt_or_sobel(input, axis, output, mode, cval, weights, algorithm): weights_dtype = cupy.promote_types(input.dtype, cupy.float32) def get(is_diff, dtype=weights_dtype): - return cupy.array([-1, 0, 1], dtype=dtype) if is_diff else weights # noqa + return ( + cupy.array([-1, 0, 1], dtype=dtype) if is_diff else weights + ) # noqa - return _run_1d_correlates(input, [a == axis for a in range(input.ndim)], - get, output, mode, cval, algorithm=algorithm) + return _run_1d_correlates( + input, + [a == axis for a in range(input.ndim)], + get, + output, + mode, + cval, + algorithm=algorithm, + ) -def generic_laplace(input, derivative2, output=None, mode="reflect", - cval=0.0, extra_arguments=(), extra_keywords=None): +def generic_laplace( + input, + derivative2, + output=None, + mode="reflect", + cval=0.0, + extra_arguments=(), + extra_keywords=None, +): """Multi-dimensional Laplace filter using a provided second derivative function. @@ -579,19 +699,26 @@ def generic_laplace(input, derivative2, output=None, mode="reflect", if extra_keywords is None: extra_keywords = {} ndim = input.ndim - modes = _util._fix_sequence_arg(mode, ndim, 'mode', - _util._check_mode) + modes = _util._fix_sequence_arg(mode, ndim, "mode", _util._check_mode) output = _util._get_output(output, input) if ndim == 0: output[:] = input return output - derivative2(input, 0, output, modes[0], cval, - *extra_arguments, **extra_keywords) + derivative2( + input, 0, output, modes[0], cval, *extra_arguments, **extra_keywords + ) if ndim > 1: tmp = _util._get_output(output.dtype, input) for i in range(1, ndim): - derivative2(input, i, tmp, modes[i], cval, - *extra_arguments, **extra_keywords) + derivative2( + input, + i, + tmp, + modes[i], + cval, + *extra_arguments, + **extra_keywords, + ) output += tmp return output @@ -630,8 +757,16 @@ def derivative2(input, axis, output, mode, cval): return generic_laplace(input, derivative2, output, mode, cval) -def gaussian_laplace(input, sigma, output=None, mode="reflect", - cval=0.0, *, algorithm=None, **kwargs): +def gaussian_laplace( + input, + sigma, + output=None, + mode="reflect", + cval=0.0, + *, + algorithm=None, + **kwargs, +): """Multi-dimensional Laplace filter using Gaussian second derivatives. Args: @@ -658,17 +793,33 @@ def gaussian_laplace(input, sigma, output=None, mode="reflect", and input is integral) the results may not perfectly match the results from SciPy due to floating-point rounding of intermediate results. """ + def derivative2(input, axis, output, mode, cval): order = [0] * input.ndim order[axis] = 2 - return gaussian_filter(input, sigma, order, output, mode, cval, - algorithm=algorithm, **kwargs) + return gaussian_filter( + input, + sigma, + order, + output, + mode, + cval, + algorithm=algorithm, + **kwargs, + ) + return generic_laplace(input, derivative2, output, mode, cval) -def generic_gradient_magnitude(input, derivative, output=None, - mode="reflect", cval=0.0, - extra_arguments=(), extra_keywords=None): +def generic_gradient_magnitude( + input, + derivative, + output=None, + mode="reflect", + cval=0.0, + extra_arguments=(), + extra_keywords=None, +): """Multi-dimensional gradient magnitude filter using a provided derivative function. @@ -709,27 +860,42 @@ def generic_gradient_magnitude(input, derivative, output=None, if extra_keywords is None: extra_keywords = {} ndim = input.ndim - modes = _util._fix_sequence_arg(mode, ndim, 'mode', - _util._check_mode) + modes = _util._fix_sequence_arg(mode, ndim, "mode", _util._check_mode) output = _util._get_output(output, input) if ndim == 0: output[:] = input return output - derivative(input, 0, output, modes[0], cval, - *extra_arguments, **extra_keywords) + derivative( + input, 0, output, modes[0], cval, *extra_arguments, **extra_keywords + ) output *= output if ndim > 1: tmp = _util._get_output(output.dtype, input) for i in range(1, ndim): - derivative(input, i, tmp, modes[i], cval, - *extra_arguments, **extra_keywords) + derivative( + input, + i, + tmp, + modes[i], + cval, + *extra_arguments, + **extra_keywords, + ) tmp *= tmp output += tmp - return cupy.sqrt(output, output, casting='unsafe') - - -def gaussian_gradient_magnitude(input, sigma, output=None, mode="reflect", - cval=0.0, *, algorithm=None, **kwargs): + return cupy.sqrt(output, output, casting="unsafe") + + +def gaussian_gradient_magnitude( + input, + sigma, + output=None, + mode="reflect", + cval=0.0, + *, + algorithm=None, + **kwargs, +): """Multi-dimensional gradient magnitude using Gaussian derivatives. Args: @@ -756,16 +922,33 @@ def gaussian_gradient_magnitude(input, sigma, output=None, mode="reflect", and input is integral) the results may not perfectly match the results from SciPy due to floating-point rounding of intermediate results. """ + def derivative(input, axis, output, mode, cval): order = [0] * input.ndim order[axis] = 1 - return gaussian_filter(input, sigma, order, output, mode, cval, - algorithm=algorithm, **kwargs) + return gaussian_filter( + input, + sigma, + order, + output, + mode, + cval, + algorithm=algorithm, + **kwargs, + ) + return generic_gradient_magnitude(input, derivative, output, mode, cval) -def minimum_filter(input, size=None, footprint=None, output=None, - mode="reflect", cval=0.0, origin=0): +def minimum_filter( + input, + size=None, + footprint=None, + output=None, + mode="reflect", + cval=0.0, + origin=0, +): """Multi-dimensional minimum filter. Args: @@ -793,12 +976,20 @@ def minimum_filter(input, size=None, footprint=None, output=None, .. seealso:: :func:`scipy.ndimage.minimum_filter` """ - return _min_or_max_filter(input, size, footprint, None, output, mode, - cval, origin, 'min') + return _min_or_max_filter( + input, size, footprint, None, output, mode, cval, origin, "min" + ) -def maximum_filter(input, size=None, footprint=None, output=None, - mode="reflect", cval=0.0, origin=0): +def maximum_filter( + input, + size=None, + footprint=None, + output=None, + mode="reflect", + cval=0.0, + origin=0, +): """Multi-dimensional maximum filter. Args: @@ -826,12 +1017,14 @@ def maximum_filter(input, size=None, footprint=None, output=None, .. seealso:: :func:`scipy.ndimage.maximum_filter` """ - return _min_or_max_filter(input, size, footprint, None, output, mode, - cval, origin, 'max') + return _min_or_max_filter( + input, size, footprint, None, output, mode, cval, origin, "max" + ) -def _min_or_max_filter(input, size, ftprnt, structure, output, mode, cval, - origin, func): +def _min_or_max_filter( + input, size, ftprnt, structure, output, mode, cval, origin, func +): # structure is used by morphology.grey_erosion() and grey_dilation() # and not by the regular min/max filters @@ -840,36 +1033,51 @@ def _min_or_max_filter(input, size, ftprnt, structure, output, mode, cval, ftprnt = None sizes, ftprnt, structure = _filters_core._check_size_footprint_structure( - input.ndim, size, ftprnt, structure) + input.ndim, size, ftprnt, structure + ) if cval is cupy.nan: raise NotImplementedError("NaN cval is unsupported") if sizes is not None: - # Seperable filter, run as a series of 1D filters - fltr = minimum_filter1d if func == 'min' else maximum_filter1d + # Separable filter, run as a series of 1D filters + fltr = minimum_filter1d if func == "min" else maximum_filter1d return _filters_core._run_1d_filters( [fltr if size > 1 else None for size in sizes], - input, sizes, output, mode, cval, origin) + input, + sizes, + output, + mode, + cval, + origin, + ) - origins, int_type = _filters_core._check_nd_args(input, ftprnt, - mode, origin, 'footprint', - sizes=sizes) + origins, int_type = _filters_core._check_nd_args( + input, ftprnt, mode, origin, "footprint", sizes=sizes + ) if structure is not None and structure.ndim != input.ndim: - raise RuntimeError('structure array has incorrect shape') + raise RuntimeError("structure array has incorrect shape") if ftprnt.size == 0: return cupy.zeros_like(input) offsets = _filters_core._origins_to_offsets(origins, ftprnt.shape) - kernel = _get_min_or_max_kernel(mode, ftprnt.shape, func, - offsets, float(cval), int_type, - has_structure=structure is not None, - has_central_value=bool(ftprnt[offsets])) - return _filters_core._call_kernel(kernel, input, ftprnt, output, - structure, weights_dtype=bool) + kernel = _get_min_or_max_kernel( + mode, + ftprnt.shape, + func, + offsets, + float(cval), + int_type, + has_structure=structure is not None, + has_central_value=bool(ftprnt[offsets]), + ) + return _filters_core._call_kernel( + kernel, input, ftprnt, output, structure, weights_dtype=bool + ) -def minimum_filter1d(input, size, axis=-1, output=None, mode="reflect", - cval=0.0, origin=0): +def minimum_filter1d( + input, size, axis=-1, output=None, mode="reflect", cval=0.0, origin=0 +): """Compute the minimum filter along a single axis. Args: @@ -892,11 +1100,12 @@ def minimum_filter1d(input, size, axis=-1, output=None, mode="reflect", .. seealso:: :func:`scipy.ndimage.minimum_filter1d` """ - return _min_or_max_1d(input, size, axis, output, mode, cval, origin, 'min') + return _min_or_max_1d(input, size, axis, output, mode, cval, origin, "min") -def maximum_filter1d(input, size, axis=-1, output=None, mode="reflect", - cval=0.0, origin=0): +def maximum_filter1d( + input, size, axis=-1, output=None, mode="reflect", cval=0.0, origin=0 +): """Compute the maximum filter along a single axis. Args: @@ -919,58 +1128,101 @@ def maximum_filter1d(input, size, axis=-1, output=None, mode="reflect", .. seealso:: :func:`scipy.ndimage.maximum_filter1d` """ - return _min_or_max_1d(input, size, axis, output, mode, cval, origin, 'max') - - -def _min_or_max_1d(input, size, axis=-1, output=None, mode="reflect", cval=0.0, - origin=0, func='min'): + return _min_or_max_1d(input, size, axis, output, mode, cval, origin, "max") + + +def _min_or_max_1d( + input, + size, + axis=-1, + output=None, + mode="reflect", + cval=0.0, + origin=0, + func="min", +): ftprnt = cupy.ones(size, dtype=bool) - ftprnt, origin = _filters_core._convert_1d_args(input.ndim, ftprnt, - origin, axis) - origins, int_type = _filters_core._check_nd_args(input, ftprnt, - mode, origin, 'footprint') + ftprnt, origin = _filters_core._convert_1d_args( + input.ndim, ftprnt, origin, axis + ) + origins, int_type = _filters_core._check_nd_args( + input, ftprnt, mode, origin, "footprint" + ) offsets = _filters_core._origins_to_offsets(origins, ftprnt.shape) - kernel = _get_min_or_max_kernel(mode, ftprnt.shape, func, offsets, - float(cval), int_type, has_weights=False) - return _filters_core._call_kernel(kernel, input, None, output, - weights_dtype=bool) + kernel = _get_min_or_max_kernel( + mode, + ftprnt.shape, + func, + offsets, + float(cval), + int_type, + has_weights=False, + ) + return _filters_core._call_kernel( + kernel, input, None, output, weights_dtype=bool + ) @cupy._util.memoize(for_each_device=True) -def _get_min_or_max_kernel(mode, w_shape, func, offsets, cval, int_type, - has_weights=True, has_structure=False, - has_central_value=True): +def _get_min_or_max_kernel( + mode, + w_shape, + func, + offsets, + cval, + int_type, + has_weights=True, + has_structure=False, + has_central_value=True, +): # When there are no 'weights' (the footprint, for the 1D variants) then # we need to make sure intermediate results are stored as doubles for # consistent results with scipy. - ctype = 'X' if has_weights else 'double' - value = '{value}' + ctype = "X" if has_weights else "double" + value = "{value}" if not has_weights: - value = 'cast({})'.format(value) + value = "cast({})".format(value) # Having a non-flat structure biases the values if has_structure: - value += ('-' if func == 'min' else '+') + 'cast(sval)' + value += ("-" if func == "min" else "+") + "cast(sval)" if has_central_value: - pre = '{} value = x[i];' - found = 'value = {func}({value}, value);' + pre = "{} value = x[i];" + found = "value = {func}({value}, value);" else: # If the central pixel is not included in the footprint we cannot # assume `x[i]` is not below the min or above the max and thus cannot # seed with that value. Instead we keep track of having set `value`. - pre = '{} value; bool set = false;' - found = 'value = set ? {func}({value}, value) : {value}; set=true;' + pre = "{} value; bool set = false;" + found = "value = set ? {func}({value}, value) : {value}; set=true;" return _filters_core._generate_nd_kernel( - func, pre.format(ctype), - found.format(func=func, value=value), 'y = cast(value);', - mode, w_shape, int_type, offsets, cval, ctype=ctype, - has_weights=has_weights, has_structure=has_structure) + func, + pre.format(ctype), + found.format(func=func, value=value), + "y = cast(value);", + mode, + w_shape, + int_type, + offsets, + cval, + ctype=ctype, + has_weights=has_weights, + has_structure=has_structure, + ) -def rank_filter(input, rank, size=None, footprint=None, output=None, - mode="reflect", cval=0.0, origin=0): +def rank_filter( + input, + rank, + size=None, + footprint=None, + output=None, + mode="reflect", + cval=0.0, + origin=0, +): """Multi-dimensional rank filter. Args: @@ -1001,12 +1253,27 @@ def rank_filter(input, rank, size=None, footprint=None, output=None, .. seealso:: :func:`scipy.ndimage.rank_filter` """ rank = int(rank) - return _rank_filter(input, lambda fs: rank + fs if rank < 0 else rank, - size, footprint, output, mode, cval, origin) + return _rank_filter( + input, + lambda fs: rank + fs if rank < 0 else rank, + size, + footprint, + output, + mode, + cval, + origin, + ) -def median_filter(input, size=None, footprint=None, output=None, - mode="reflect", cval=0.0, origin=0): +def median_filter( + input, + size=None, + footprint=None, + output=None, + mode="reflect", + cval=0.0, + origin=0, +): """Multi-dimensional median filter. Args: @@ -1034,12 +1301,21 @@ def median_filter(input, size=None, footprint=None, output=None, .. seealso:: :func:`scipy.ndimage.median_filter` """ - return _rank_filter(input, lambda fs: fs // 2, - size, footprint, output, mode, cval, origin) + return _rank_filter( + input, lambda fs: fs // 2, size, footprint, output, mode, cval, origin + ) -def percentile_filter(input, percentile, size=None, footprint=None, - output=None, mode="reflect", cval=0.0, origin=0): +def percentile_filter( + input, + percentile, + size=None, + footprint=None, + output=None, + mode="reflect", + cval=0.0, + origin=0, +): """Multi-dimensional percentile filter. Args: @@ -1073,26 +1349,40 @@ def percentile_filter(input, percentile, size=None, footprint=None, if percentile < 0.0: percentile += 100.0 if percentile < 0.0 or percentile > 100.0: - raise RuntimeError('invalid percentile') + raise RuntimeError("invalid percentile") if percentile == 100.0: + def get_rank(fs): return fs - 1 + else: + def get_rank(fs): return int(float(fs) * percentile / 100.0) - return _rank_filter(input, get_rank, - size, footprint, output, mode, cval, origin) + + return _rank_filter( + input, get_rank, size, footprint, output, mode, cval, origin + ) -def _rank_filter(input, get_rank, size=None, footprint=None, output=None, - mode="reflect", cval=0.0, origin=0): +def _rank_filter( + input, + get_rank, + size=None, + footprint=None, + output=None, + mode="reflect", + cval=0.0, + origin=0, +): sizes, footprint, _ = _filters_core._check_size_footprint_structure( - input.ndim, size, footprint, None, force_footprint=False) + input.ndim, size, footprint, None, force_footprint=False + ) if cval is cupy.nan: raise NotImplementedError("NaN cval is unsupported") - origins, int_type = _filters_core._check_nd_args(input, footprint, - mode, origin, 'footprint', - sizes=sizes) + origins, int_type = _filters_core._check_nd_args( + input, footprint, mode, origin, "footprint", sizes=sizes + ) has_weights = True if sizes is not None: has_weights = False @@ -1115,29 +1405,55 @@ def _rank_filter(input, get_rank, size=None, footprint=None, output=None, rank = get_rank(filter_size) if rank < 0 or rank >= filter_size: - raise RuntimeError('rank not within filter footprint size') + raise RuntimeError("rank not within filter footprint size") if rank == 0: - min_max_op = 'min' + min_max_op = "min" elif rank == filter_size - 1: - min_max_op = 'max' + min_max_op = "max" else: min_max_op = None if min_max_op is not None: if sizes is not None: - return _min_or_max_filter(input, sizes[0], None, None, output, - mode, cval, origins, min_max_op) + return _min_or_max_filter( + input, + sizes[0], + None, + None, + output, + mode, + cval, + origins, + min_max_op, + ) else: - return _min_or_max_filter(input, None, footprint, None, output, - mode, cval, origins, min_max_op) + return _min_or_max_filter( + input, + None, + footprint, + None, + output, + mode, + cval, + origins, + min_max_op, + ) offsets = _filters_core._origins_to_offsets(origins, footprint_shape) - kernel = _get_rank_kernel(filter_size, rank, mode, footprint_shape, - offsets, float(cval), int_type, - has_weights=has_weights) - return _filters_core._call_kernel(kernel, input, footprint, output, - weights_dtype=bool) + kernel = _get_rank_kernel( + filter_size, + rank, + mode, + footprint_shape, + offsets, + float(cval), + int_type, + has_weights=has_weights, + ) + return _filters_core._call_kernel( + kernel, input, footprint, output, weights_dtype=bool + ) -__SHELL_SORT = ''' +__SHELL_SORT = """ __device__ void sort(X *array, int size) {{ int gap = {gap}; while (gap > 1) {{ @@ -1152,7 +1468,7 @@ def _rank_filter(input, get_rank, size=None, footprint=None, output=None, array[j + gap] = value; }} }} -}}''' +}}""" @cupy._util.memoize() @@ -1164,8 +1480,9 @@ def _get_shell_gap(filter_size): @cupy._util.memoize(for_each_device=True) -def _get_rank_kernel(filter_size, rank, mode, w_shape, offsets, cval, - int_type, has_weights): +def _get_rank_kernel( + filter_size, rank, mode, w_shape, offsets, cval, int_type, has_weights +): s_rank = min(rank, filter_size - rank - 1) # The threshold was set based on the measurements on a V100 # TODO(leofang, anaruse): Use Optuna to automatically tune the threshold, @@ -1176,11 +1493,11 @@ def _get_rank_kernel(filter_size, rank, mode, w_shape, offsets, cval, # selection sort approach is faster than general sorting approach # using shell sort. if s_rank == rank: - comp_op = '<' + comp_op = "<" else: - comp_op = '>' + comp_op = ">" array_size = s_rank + 2 - found_post = ''' + found_post = """ if (iv > {rank} + 1) {{{{ int target_iv = 0; X target_val = values[0]; @@ -1194,26 +1511,38 @@ def _get_rank_kernel(filter_size, rank, mode, w_shape, offsets, cval, values[target_iv] = values[{rank} + 1]; }}}} iv = {rank} + 1; - }}}}'''.format(rank=s_rank, comp_op=comp_op) - post = ''' + }}}}""".format( + rank=s_rank, comp_op=comp_op + ) + post = """ X target_val = values[0]; for (int jv = 1; jv <= {rank}; jv++) {{ if (target_val {comp_op} values[jv]) {{ target_val = values[jv]; }} }} - y=cast(target_val);'''.format(rank=s_rank, comp_op=comp_op) - sorter = '' + y=cast(target_val);""".format( + rank=s_rank, comp_op=comp_op + ) + sorter = "" else: array_size = filter_size - found_post = '' - post = 'sort(values,{});\ny=cast(values[{}]);'.format( - filter_size, rank) + found_post = "" + post = "sort(values,{});\ny=cast(values[{}]);".format( + filter_size, rank + ) sorter = __SHELL_SORT.format(gap=_get_shell_gap(filter_size)) return _filters_core._generate_nd_kernel( - 'rank_{}_{}'.format(filter_size, rank), - 'int iv = 0;\nX values[{}];'.format(array_size), - 'values[iv++] = {value};' + found_post, post, - mode, w_shape, int_type, offsets, cval, has_weights=has_weights, - preamble=sorter) + "rank_{}_{}".format(filter_size, rank), + "int iv = 0;\nX values[{}];".format(array_size), + "values[iv++] = {value};" + found_post, + post, + mode, + w_shape, + int_type, + offsets, + cval, + has_weights=has_weights, + preamble=sorter, + ) diff --git a/python/cucim/src/cucim/skimage/_vendored/_ndimage_filters_core.py b/python/cucim/src/cucim/skimage/_vendored/_ndimage_filters_core.py index a09e22306..e1c8ade57 100644 --- a/python/cucim/src/cucim/skimage/_vendored/_ndimage_filters_core.py +++ b/python/cucim/src/cucim/skimage/_vendored/_ndimage_filters_core.py @@ -4,30 +4,37 @@ import cupy import numpy -from cucim.skimage._vendored import _internal as internal -from cucim.skimage._vendored import _ndimage_util as _util +from cucim.skimage._vendored import ( + _internal as internal, + _ndimage_util as _util, +) def _origins_to_offsets(origins, w_shape): return tuple(x // 2 + o for x, o in zip(w_shape, origins)) -def _check_size_footprint_structure(ndim, size, footprint, structure, - stacklevel=3, force_footprint=False): +def _check_size_footprint_structure( + ndim, size, footprint, structure, stacklevel=3, force_footprint=False +): if structure is None and footprint is None: if size is None: raise RuntimeError("no footprint or filter size provided") - sizes = _util._fix_sequence_arg(size, ndim, 'size', int) + sizes = _util._fix_sequence_arg(size, ndim, "size", int) if force_footprint: return None, cupy.ones(sizes, bool), None return sizes, None, None if size is not None: - warnings.warn("ignoring size because {} is set".format( - 'structure' if footprint is None else 'footprint'), - UserWarning, stacklevel=stacklevel + 1) + warnings.warn( + "ignoring size because {} is set".format( + "structure" if footprint is None else "footprint" + ), + UserWarning, + stacklevel=stacklevel + 1, + ) if footprint is not None: - footprint = cupy.array(footprint, bool, True, 'C') + footprint = cupy.array(footprint, bool, True, "C") if not footprint.any(): raise ValueError("all-zero footprint is not supported") @@ -46,7 +53,7 @@ def _check_size_footprint_structure(ndim, size, footprint, structure, def _convert_1d_args(ndim, weights, origin, axis): if weights.ndim != 1 or weights.size < 1: - raise RuntimeError('incorrect filter size') + raise RuntimeError("incorrect filter size") axis = internal._normalize_axis_index(axis, ndim) w_shape = [1] * ndim w_shape[axis] = weights.size @@ -56,8 +63,9 @@ def _convert_1d_args(ndim, weights, origin, axis): return weights, tuple(origins) -def _check_nd_args(input, weights, mode, origin, wghts_name='filter weights', - sizes=None): +def _check_nd_args( + input, weights, mode, origin, wghts_name="filter weights", sizes=None +): _util._check_mode(mode) if weights is not None: # Weights must always be less than 2 GiB @@ -74,14 +82,15 @@ def _check_nd_args(input, weights, mode, origin, wghts_name='filter weights', raise ValueError("must specify either weights array or sizes") else: weight_dims = sizes - origins = _util._fix_sequence_arg(origin, len(weight_dims), 'origin', int) + origins = _util._fix_sequence_arg(origin, len(weight_dims), "origin", int) for origin, width in zip(origins, weight_dims): _util._check_origin(origin, width) return tuple(origins), _util._get_inttype(input) -def _run_1d_filters(filters, input, args, output, mode, cval, origin=0, - **filter_kwargs): +def _run_1d_filters( + filters, input, args, output, mode, cval, origin=0, **filter_kwargs +): """ Runs a series of 1D filters forming an nd filter. The filters must be a list of callables that take input, arg, axis, output, mode, cval, origin. @@ -89,17 +98,18 @@ def _run_1d_filters(filters, input, args, output, mode, cval, origin=0, filter. Individual filters can be None causing that axis to be skipped. """ output = _util._get_output(output, input) - modes = _util._fix_sequence_arg(mode, input.ndim, 'mode', - _util._check_mode) + modes = _util._fix_sequence_arg(mode, input.ndim, "mode", _util._check_mode) # for filters, "wrap" is a synonym for "grid-wrap". - modes = ['grid-wrap' if m == 'wrap' else m for m in modes] - origins = _util._fix_sequence_arg(origin, input.ndim, 'origin', int) + modes = ["grid-wrap" if m == "wrap" else m for m in modes] + origins = _util._fix_sequence_arg(origin, input.ndim, "origin", int) n_filters = sum(filter is not None for filter in filters) if n_filters == 0: output[:] = input return output # We can't operate in-place efficiently, so use a 2-buffer system - temp = _util._get_output(output.dtype, input) if n_filters > 1 else None # noqa + temp = ( + _util._get_output(output.dtype, input) if n_filters > 1 else None + ) # noqa iterator = zip(filters, args, modes, origins) for axis, (fltr, arg, mode, origin) in enumerate(iterator): if fltr is None: @@ -122,8 +132,15 @@ def _run_1d_filters(filters, input, args, output, mode, cval, origin=0, return input -def _call_kernel(kernel, input, weights, output, structure=None, - weights_dtype=numpy.float64, structure_dtype=numpy.float64): +def _call_kernel( + kernel, + input, + weights, + output, + structure=None, + weights_dtype=numpy.float64, + structure_dtype=numpy.float64, +): """ Calls a constructed ElementwiseKernel. The kernel must take an input image, an optional array of weights, an optional array for the structure, and an @@ -144,18 +161,21 @@ def _call_kernel(kernel, input, weights, output, structure=None, dtype conversion will occur. The input and output are never converted. """ args = [input] - complex_output = input.dtype.kind == 'c' + complex_output = input.dtype.kind == "c" if weights is not None: weights = cupy.ascontiguousarray(weights, weights_dtype) - complex_output = complex_output or weights.dtype.kind == 'c' + complex_output = complex_output or weights.dtype.kind == "c" args.append(weights) if structure is not None: structure = cupy.ascontiguousarray(structure, structure_dtype) args.append(structure) output = _util._get_output(output, input, None, complex_output) # noqa - needs_temp = cupy.shares_memory(output, input, 'MAY_SHARE_BOUNDS') + needs_temp = cupy.shares_memory(output, input, "MAY_SHARE_BOUNDS") if needs_temp: - output, temp = _util._get_output(output.dtype, input, None, complex_output), output # noqa + output, temp = ( + _util._get_output(output.dtype, input, None, complex_output), + output, + ) # noqa args.append(output) kernel(*args) if needs_temp: @@ -164,14 +184,14 @@ def _call_kernel(kernel, input, weights, output, structure=None, return output -_ndimage_includes = r''' +_ndimage_includes = r""" #include // let Jitify handle this #include template<> struct std::is_floating_point : std::true_type {}; template<> struct std::is_signed : std::true_type {}; template struct std::is_signed> : std::is_signed {}; -''' +""" _ndimage_CAST_FUNCTION = """ @@ -194,89 +214,115 @@ def _call_kernel(kernel, input, weights, output, structure=None, """ -def _generate_nd_kernel(name, pre, found, post, mode, w_shape, int_type, - offsets, cval, ctype='X', preamble='', options=(), - has_weights=True, has_structure=False, has_mask=False, - binary_morphology=False, all_weights_nonzero=False): +def _generate_nd_kernel( + name, + pre, + found, + post, + mode, + w_shape, + int_type, + offsets, + cval, + ctype="X", + preamble="", + options=(), + has_weights=True, + has_structure=False, + has_mask=False, + binary_morphology=False, + all_weights_nonzero=False, +): # Currently this code uses CArray for weights but avoids using CArray for # the input data and instead does the indexing itself since it is faster. # If CArray becomes faster than follow the comments that start with # CArray: to switch over to using CArray for the input data as well. ndim = len(w_shape) - in_params = 'raw X x' + in_params = "raw X x" if has_weights: - in_params += ', raw W w' + in_params += ", raw W w" if has_structure: - in_params += ', raw S s' + in_params += ", raw S s" if has_mask: - in_params += ', raw M mask' - out_params = 'Y y' + in_params += ", raw M mask" + out_params = "Y y" # for filters, "wrap" is a synonym for "grid-wrap" - mode = 'grid-wrap' if mode == 'wrap' else mode + mode = "grid-wrap" if mode == "wrap" else mode # CArray: remove xstride_{j}=... from string - size = ('%s xsize_{j}=x.shape()[{j}], ysize_{j} = _raw_y.shape()[{j}]' - ', xstride_{j}=x.strides()[{j}];' % int_type) + size = ( + "%s xsize_{j}=x.shape()[{j}], ysize_{j} = _raw_y.shape()[{j}]" + ", xstride_{j}=x.strides()[{j}];" % int_type + ) sizes = [size.format(j=j) for j in range(ndim)] inds = _util._generate_indices_ops(ndim, int_type, offsets) # CArray: remove expr entirely - expr = ' + '.join(['ix_{}'.format(j) for j in range(ndim)]) + expr = " + ".join(["ix_{}".format(j) for j in range(ndim)]) - ws_init = ws_pre = ws_post = '' + ws_init = ws_pre = ws_post = "" if has_weights or has_structure: - ws_init = 'int iws = 0;' + ws_init = "int iws = 0;" if has_structure: - ws_pre = 'S sval = s[iws];\n' + ws_pre = "S sval = s[iws];\n" if has_weights: - ws_pre += 'W wval = w[iws];\n' + ws_pre += "W wval = w[iws];\n" if not all_weights_nonzero: - ws_pre += 'if (nonzero(wval))' - ws_post = 'iws++;' + ws_pre += "if (nonzero(wval))" + ws_post = "iws++;" loops = [] for j in range(ndim): if w_shape[j] == 1: # CArray: string becomes 'inds[{j}] = ind_{j};', remove (int_)type - loops.append('{{ {type} ix_{j} = ind_{j} * xstride_{j};'. - format(j=j, type=int_type)) + loops.append( + "{{ {type} ix_{j} = ind_{j} * xstride_{j};".format( + j=j, type=int_type + ) + ) else: boundary = _util._generate_boundary_condition_ops( - mode, 'ix_{}'.format(j), 'xsize_{}'.format(j), int_type) + mode, "ix_{}".format(j), "xsize_{}".format(j), int_type + ) # CArray: last line of string becomes inds[{j}] = ix_{j}; - loops.append(''' + loops.append( + """ for (int iw_{j} = 0; iw_{j} < {wsize}; iw_{j}++) {{ {type} ix_{j} = ind_{j} + iw_{j}; {boundary} ix_{j} *= xstride_{j}; - '''.format(j=j, wsize=w_shape[j], boundary=boundary, type=int_type)) + """.format( + j=j, wsize=w_shape[j], boundary=boundary, type=int_type + ) + ) # CArray: string becomes 'x[inds]', no format call needed - value = '(*(X*)&data[{expr}])'.format(expr=expr) - if mode == 'constant': - cond = ' || '.join(['(ix_{} < 0)'.format(j) for j in range(ndim)]) + value = "(*(X*)&data[{expr}])".format(expr=expr) + if mode == "constant": + cond = " || ".join(["(ix_{} < 0)".format(j) for j in range(ndim)]) if cval is numpy.nan: - cval = 'CUDART_NAN' + cval = "CUDART_NAN" elif cval == numpy.inf: - cval = 'CUDART_INF' + cval = "CUDART_INF" elif cval == -numpy.inf: - cval = '-CUDART_INF' + cval = "-CUDART_INF" if binary_morphology: found = found.format(cond=cond, value=value) else: - if mode == 'constant': - value = '(({cond}) ? cast<{ctype}>({cval}) : {value})'.format( - cond=cond, ctype=ctype, cval=cval, value=value) + if mode == "constant": + value = "(({cond}) ? cast<{ctype}>({cval}) : {value})".format( + cond=cond, ctype=ctype, cval=cval, value=value + ) found = found.format(value=value) # CArray: replace comment and next line in string with # {type} inds[{ndim}] = {{0}}; # and add ndim=ndim, type=int_type to format call - operation = ''' + operation = """ {sizes} {inds} // don't use a CArray for indexing (faster to deal with indexing ourselves) @@ -291,23 +337,39 @@ def _generate_nd_kernel(name, pre, found, post, mode, w_shape, int_type, {ws_post} {end_loops} {post} - '''.format(sizes='\n'.join(sizes), inds=inds, pre=pre, post=post, - ws_init=ws_init, ws_pre=ws_pre, ws_post=ws_post, - loops='\n'.join(loops), found=found, end_loops='}' * ndim) - - mode_str = mode.replace('-', '_') # avoid potential hyphen in kernel name - name = 'cupyx_scipy_ndimage_{}_{}d_{}_w{}'.format( - name, ndim, mode_str, '_'.join(['{}'.format(x) for x in w_shape])) + """.format( + sizes="\n".join(sizes), + inds=inds, + pre=pre, + post=post, + ws_init=ws_init, + ws_pre=ws_pre, + ws_post=ws_post, + loops="\n".join(loops), + found=found, + end_loops="}" * ndim, + ) + + mode_str = mode.replace("-", "_") # avoid potential hyphen in kernel name + name = "cupyx_scipy_ndimage_{}_{}d_{}_w{}".format( + name, ndim, mode_str, "_".join(["{}".format(x) for x in w_shape]) + ) if all_weights_nonzero: - name += '_all_nonzero' - if int_type == 'ptrdiff_t': - name += '_i64' + name += "_all_nonzero" + if int_type == "ptrdiff_t": + name += "_i64" if has_structure: - name += '_with_structure' + name += "_with_structure" if has_mask: - name += '_with_mask' + name += "_with_mask" preamble = _ndimage_includes + _ndimage_CAST_FUNCTION + preamble - options += ('--std=c++11', '-DCUPY_USE_JITIFY') - return cupy.ElementwiseKernel(in_params, out_params, operation, name, - reduce_dims=False, preamble=preamble, - options=options) + options += ("--std=c++11", "-DCUPY_USE_JITIFY") + return cupy.ElementwiseKernel( + in_params, + out_params, + operation, + name, + reduce_dims=False, + preamble=preamble, + options=options, + ) diff --git a/python/cucim/src/cucim/skimage/_vendored/_ndimage_interp_kernels.py b/python/cucim/src/cucim/skimage/_vendored/_ndimage_interp_kernels.py index fa65eed36..216a566a1 100644 --- a/python/cucim/src/cucim/skimage/_vendored/_ndimage_interp_kernels.py +++ b/python/cucim/src/cucim/skimage/_vendored/_ndimage_interp_kernels.py @@ -1,16 +1,16 @@ import cupy import numpy -from cucim.skimage._vendored import \ - _ndimage_spline_kernel_weights as _spline_kernel_weights -from cucim.skimage._vendored import \ - _ndimage_spline_prefilter_core as _spline_prefilter_core -from cucim.skimage._vendored import _ndimage_util as _util +from cucim.skimage._vendored import ( + _ndimage_spline_kernel_weights as _spline_kernel_weights, + _ndimage_spline_prefilter_core as _spline_prefilter_core, + _ndimage_util as _util, +) -math_constants_preamble = r''' +math_constants_preamble = r""" // workaround for HIP: line begins with #include #include -''' +""" spline_weights_inline = _spline_kernel_weights.spline_weights_inline @@ -36,11 +36,13 @@ def _get_coord_map(ndim, nprepad=0): """ ops = [] - ops.append('ptrdiff_t ncoords = _ind.size();') - pre = f" + (W){nprepad}" if nprepad > 0 else '' + ops.append("ptrdiff_t ncoords = _ind.size();") + pre = f" + (W){nprepad}" if nprepad > 0 else "" for j in range(ndim): - ops.append(f''' - W c_{j} = coords[i + {j} * ncoords]{pre};''') + ops.append( + f""" + W c_{j} = coords[i + {j} * ncoords]{pre};""" + ) return ops @@ -63,10 +65,12 @@ def _get_coord_zoom_and_shift(ndim, nprepad=0): """ ops = [] - pre = f" + (W){nprepad}" if nprepad > 0 else '' + pre = f" + (W){nprepad}" if nprepad > 0 else "" for j in range(ndim): - ops.append(f''' - W c_{j} = zoom[{j}] * ((W)in_coord[{j}] - shift[{j}]){pre};''') + ops.append( + f""" + W c_{j} = zoom[{j}] * ((W)in_coord[{j}] - shift[{j}]){pre};""" + ) return ops @@ -89,10 +93,12 @@ def _get_coord_zoom_and_shift_grid(ndim, nprepad=0): """ ops = [] - pre = f" + (W){nprepad}" if nprepad > 0 else '' + pre = f" + (W){nprepad}" if nprepad > 0 else "" for j in range(ndim): - ops.append(f''' - W c_{j} = zoom[{j}] * ((W)in_coord[{j}] - shift[j] + 0.5) - 0.5{pre};''') + ops.append( + f""" + W c_{j} = zoom[{j}] * ((W)in_coord[{j}] - shift[j] + 0.5) - 0.5{pre};""" + ) return ops @@ -114,10 +120,12 @@ def _get_coord_zoom(ndim, nprepad=0): """ ops = [] - pre = f" + (W){nprepad}" if nprepad > 0 else '' + pre = f" + (W){nprepad}" if nprepad > 0 else "" for j in range(ndim): - ops.append(f''' - W c_{j} = zoom[{j}] * (W)in_coord[{j}]{pre};''') + ops.append( + f""" + W c_{j} = zoom[{j}] * (W)in_coord[{j}]{pre};""" + ) return ops @@ -139,10 +147,12 @@ def _get_coord_zoom_grid(ndim, nprepad=0): """ ops = [] - pre = f" + (W){nprepad}" if nprepad > 0 else '' + pre = f" + (W){nprepad}" if nprepad > 0 else "" for j in range(ndim): - ops.append(f''' - W c_{j} = zoom[{j}] * ((W)in_coord[{j}] + 0.5) - 0.5{pre};''') + ops.append( + f""" + W c_{j} = zoom[{j}] * ((W)in_coord[{j}] + 0.5) - 0.5{pre};""" + ) return ops @@ -162,10 +172,12 @@ def _get_coord_shift(ndim, nprepad=0): """ ops = [] - pre = f" + (W){nprepad}" if nprepad > 0 else '' + pre = f" + (W){nprepad}" if nprepad > 0 else "" for j in range(ndim): - ops.append(f''' - W c_{j} = (W)in_coord[{j}] - shift[{j}]{pre};''') + ops.append( + f""" + W c_{j} = (W)in_coord[{j}] - shift[{j}]{pre};""" + ) return ops @@ -190,42 +202,64 @@ def _get_coord_affine(ndim, nprepad=0): """ ops = [] - pre = f" + (W){nprepad}" if nprepad > 0 else '' + pre = f" + (W){nprepad}" if nprepad > 0 else "" ncol = ndim + 1 for j in range(ndim): - ops.append(f''' - W c_{j} = (W)0.0;''') + ops.append( + f""" + W c_{j} = (W)0.0;""" + ) for k in range(ndim): - ops.append(f''' - c_{j} += mat[{ncol * j + k}] * (W)in_coord[{k}];''') - ops.append(f''' - c_{j} += mat[{ncol * j + ndim}]{pre};''') + ops.append( + f""" + c_{j} += mat[{ncol * j + k}] * (W)in_coord[{k}];""" + ) + ops.append( + f""" + c_{j} += mat[{ncol * j + ndim}]{pre};""" + ) return ops -def _unravel_loop_index(shape, uint_t='unsigned int'): +def _unravel_loop_index(shape, uint_t="unsigned int"): """ declare a multi-index array in_coord and unravel the 1D index, i into it. This code assumes that the array is a C-ordered array. """ ndim = len(shape) - code = [f''' + code = [ + f""" {uint_t} in_coord[{ndim}]; - {uint_t} s, t, idx = i;'''] + {uint_t} s, t, idx = i;""" + ] for j in range(ndim - 1, 0, -1): - code.append(f''' + code.append( + f""" s = {shape[j]}; t = idx / s; in_coord[{j}] = idx - t * s; - idx = t;''') - code.append(''' - in_coord[0] = idx;''') - return '\n'.join(code) - - -def _generate_interp_custom(coord_func, ndim, large_int, yshape, mode, cval, - order, name='', integer_output=False, nprepad=0, - omit_in_coord=False): + idx = t;""" + ) + code.append( + """ + in_coord[0] = idx;""" + ) + return "\n".join(code) + + +def _generate_interp_custom( + coord_func, + ndim, + large_int, + yshape, + mode, + cval, + order, + name="", + integer_output=False, + nprepad=0, + omit_in_coord=False, +): """ Args: coord_func (function): generates code to do the coordinate @@ -247,22 +281,22 @@ def _generate_interp_custom(coord_func, ndim, large_int, yshape, mode, cval, """ ops = [] - internal_dtype = 'double' if integer_output else 'Y' - ops.append(f'{internal_dtype} out = 0.0;') + internal_dtype = "double" if integer_output else "Y" + ops.append(f"{internal_dtype} out = 0.0;") if large_int: - uint_t = 'size_t' - int_t = 'ptrdiff_t' + uint_t = "size_t" + int_t = "ptrdiff_t" else: - uint_t = 'unsigned int' - int_t = 'int' + uint_t = "unsigned int" + int_t = "int" # determine strides for x along each axis for j in range(ndim): - ops.append(f'const {int_t} xsize_{j} = x.shape()[{j}];') - ops.append(f'const {uint_t} sx_{ndim - 1} = 1;') + ops.append(f"const {int_t} xsize_{j} = x.shape()[{j}];") + ops.append(f"const {uint_t} sx_{ndim - 1} = 1;") for j in range(ndim - 1, 0, -1): - ops.append(f'const {uint_t} sx_{j - 1} = sx_{j} * xsize_{j};') + ops.append(f"const {uint_t} sx_{j - 1} = sx_{j} * xsize_{j};") if not omit_in_coord: # create in_coords array to store the unraveled indices @@ -272,112 +306,140 @@ def _generate_interp_custom(coord_func, ndim, large_int, yshape, mode, cval, ops = ops + coord_func(ndim, nprepad) if cval is numpy.nan: - cval = '(Y)CUDART_NAN' + cval = "(Y)CUDART_NAN" elif cval == numpy.inf: - cval = '(Y)CUDART_INF' + cval = "(Y)CUDART_INF" elif cval == -numpy.inf: - cval = '(Y)(-CUDART_INF)' + cval = "(Y)(-CUDART_INF)" else: - cval = f'({internal_dtype}){cval}' + cval = f"({internal_dtype}){cval}" - if mode == 'constant': + if mode == "constant": # use cval if coordinate is outside the bounds of x - _cond = ' || '.join( - [f'(c_{j} < 0) || (c_{j} > xsize_{j} - 1)' for j in range(ndim)]) - ops.append(f''' + _cond = " || ".join( + [f"(c_{j} < 0) || (c_{j} > xsize_{j} - 1)" for j in range(ndim)] + ) + ops.append( + f""" if ({_cond}) {{ out = {cval}; }} else - {{''') + {{""" + ) if order == 0: - if mode == 'wrap': - ops.append('double dcoord;') # mode 'wrap' requires this to work + if mode == "wrap": + ops.append("double dcoord;") # mode 'wrap' requires this to work for j in range(ndim): # determine nearest neighbor - if mode == 'wrap': - ops.append(f''' - dcoord = c_{j};''') + if mode == "wrap": + ops.append( + f""" + dcoord = c_{j};""" + ) else: - ops.append(f''' - {int_t} cf_{j} = ({int_t})floor((double)c_{j} + 0.5);''') + ops.append( + f""" + {int_t} cf_{j} = ({int_t})floor((double)c_{j} + 0.5);""" + ) # handle boundary - if mode != 'constant': - if mode == 'wrap': - ixvar = 'dcoord' + if mode != "constant": + if mode == "wrap": + ixvar = "dcoord" float_ix = True else: - ixvar = f'cf_{j}' + ixvar = f"cf_{j}" float_ix = False ops.append( _util._generate_boundary_condition_ops( - mode, ixvar, f'xsize_{j}', int_t, float_ix)) - if mode == 'wrap': - ops.append(f''' - {int_t} cf_{j} = ({int_t})floor(dcoord + 0.5);''') + mode, ixvar, f"xsize_{j}", int_t, float_ix + ) + ) + if mode == "wrap": + ops.append( + f""" + {int_t} cf_{j} = ({int_t})floor(dcoord + 0.5);""" + ) # sum over ic_j will give the raveled coordinate in the input - ops.append(f''' - {int_t} ic_{j} = cf_{j} * sx_{j};''') - _coord_idx = ' + '.join([f'ic_{j}' for j in range(ndim)]) - if mode == 'grid-constant': - _cond = ' || '.join([f'(ic_{j} < 0)' for j in range(ndim)]) - ops.append(f''' + ops.append( + f""" + {int_t} ic_{j} = cf_{j} * sx_{j};""" + ) + _coord_idx = " + ".join([f"ic_{j}" for j in range(ndim)]) + if mode == "grid-constant": + _cond = " || ".join([f"(ic_{j} < 0)" for j in range(ndim)]) + ops.append( + f""" if ({_cond}) {{ out = {cval}; }} else {{ out = ({internal_dtype})x[{_coord_idx}]; - }}''') + }}""" + ) else: - ops.append(f''' - out = ({internal_dtype})x[{_coord_idx}];''') + ops.append( + f""" + out = ({internal_dtype})x[{_coord_idx}];""" + ) elif order == 1: for j in range(ndim): # get coordinates for linear interpolation along axis j - ops.append(f''' + ops.append( + f""" {int_t} cf_{j} = ({int_t})floor((double)c_{j}); {int_t} cc_{j} = cf_{j} + 1; {int_t} n_{j} = (c_{j} == cf_{j}) ? 1 : 2; // points needed - ''') + """ + ) - if mode == 'wrap': - ops.append(f''' + if mode == "wrap": + ops.append( + f""" double dcoordf = c_{j}; - double dcoordc = c_{j} + 1;''') + double dcoordc = c_{j} + 1;""" + ) else: # handle boundaries for extension modes. - ops.append(f''' + ops.append( + f""" {int_t} cf_bounded_{j} = cf_{j}; - {int_t} cc_bounded_{j} = cc_{j};''') + {int_t} cc_bounded_{j} = cc_{j};""" + ) - if mode != 'constant': - if mode == 'wrap': - ixvar = 'dcoordf' + if mode != "constant": + if mode == "wrap": + ixvar = "dcoordf" float_ix = True else: - ixvar = f'cf_bounded_{j}' + ixvar = f"cf_bounded_{j}" float_ix = False ops.append( _util._generate_boundary_condition_ops( - mode, ixvar, f'xsize_{j}', int_t, float_ix)) + mode, ixvar, f"xsize_{j}", int_t, float_ix + ) + ) - ixvar = 'dcoordc' if mode == 'wrap' else f'cc_bounded_{j}' + ixvar = "dcoordc" if mode == "wrap" else f"cc_bounded_{j}" ops.append( _util._generate_boundary_condition_ops( - mode, ixvar, f'xsize_{j}', int_t, float_ix)) - if mode == 'wrap': + mode, ixvar, f"xsize_{j}", int_t, float_ix + ) + ) + if mode == "wrap": ops.append( - f''' + f""" {int_t} cf_bounded_{j} = ({int_t})floor(dcoordf);; {int_t} cc_bounded_{j} = ({int_t})floor(dcoordf + 1);; - ''' + """ ) - ops.append(f''' + ops.append( + f""" for (int s_{j} = 0; s_{j} < n_{j}; s_{j}++) {{ W w_{j}; @@ -390,109 +452,138 @@ def _generate_interp_custom(coord_func, ndim, large_int, yshape, mode, cval, {{ w_{j} = c_{j} - (W)cf_{j}; ic_{j} = cc_bounded_{j} * sx_{j}; - }}''') + }}""" + ) elif order > 1: - if mode == 'grid-constant': - spline_mode = 'constant' - elif mode == 'nearest': - spline_mode = 'nearest' + if mode == "grid-constant": + spline_mode = "constant" + elif mode == "nearest": + spline_mode = "nearest" else: spline_mode = _spline_prefilter_core._get_spline_mode(mode) # wx, wy are temporary variables used during spline weight computation - ops.append(f''' + ops.append( + f""" W wx, wy; - {int_t} start;''') + {int_t} start;""" + ) for j in range(ndim): # determine weights along the current axis - ops.append(f''' - W weights_{j}[{order + 1}];''') + ops.append( + f""" + W weights_{j}[{order + 1}];""" + ) ops.append(spline_weights_inline[order].format(j=j, order=order)) # get starting coordinate for spline interpolation along axis j - if mode in ['wrap']: - ops.append(f'double dcoord = c_{j};') - coord_var = 'dcoord' + if mode in ["wrap"]: + ops.append(f"double dcoord = c_{j};") + coord_var = "dcoord" ops.append( _util._generate_boundary_condition_ops( - mode, coord_var, f'xsize_{j}', int_t, True)) + mode, coord_var, f"xsize_{j}", int_t, True + ) + ) else: - coord_var = f'(double)c_{j}' + coord_var = f"(double)c_{j}" if order & 1: - op_str = ''' - start = ({int_t})floor({coord_var}) - {order_2};''' + op_str = """ + start = ({int_t})floor({coord_var}) - {order_2};""" else: - op_str = ''' - start = ({int_t})floor({coord_var} + 0.5) - {order_2};''' + op_str = """ + start = ({int_t})floor({coord_var} + 0.5) - {order_2};""" ops.append( op_str.format( int_t=int_t, coord_var=coord_var, order_2=order // 2 - )) + ) + ) # set of coordinate values within spline footprint along axis j - ops.append(f'''{int_t} ci_{j}[{order + 1}];''') + ops.append(f"""{int_t} ci_{j}[{order + 1}];""") for k in range(order + 1): - ixvar = f'ci_{j}[{k}]' - ops.append(f''' - {ixvar} = start + {k};''') + ixvar = f"ci_{j}[{k}]" + ops.append( + f""" + {ixvar} = start + {k};""" + ) ops.append( _util._generate_boundary_condition_ops( - spline_mode, ixvar, f'xsize_{j}', int_t)) + spline_mode, ixvar, f"xsize_{j}", int_t + ) + ) # loop over the order + 1 values in the spline filter - ops.append(f''' + ops.append( + f""" W w_{j}; {int_t} ic_{j}; for (int k_{j} = 0; k_{j} <= {order}; k_{j}++) {{ w_{j} = weights_{j}[k_{j}]; ic_{j} = ci_{j}[k_{j}] * sx_{j}; - ''') + """ + ) if order > 0: - - _weight = ' * '.join([f'w_{j}' for j in range(ndim)]) - _coord_idx = ' + '.join([f'ic_{j}' for j in range(ndim)]) - if mode == 'grid-constant' or (order > 1 and mode == 'constant'): - _cond = ' || '.join([f'(ic_{j} < 0)' for j in range(ndim)]) - ops.append(f''' + _weight = " * ".join([f"w_{j}" for j in range(ndim)]) + _coord_idx = " + ".join([f"ic_{j}" for j in range(ndim)]) + if mode == "grid-constant" or (order > 1 and mode == "constant"): + _cond = " || ".join([f"(ic_{j} < 0)" for j in range(ndim)]) + ops.append( + f""" if ({_cond}) {{ out += {cval} * ({internal_dtype})({_weight}); }} else {{ {internal_dtype} val = ({internal_dtype})x[{_coord_idx}]; out += val * ({internal_dtype})({_weight}); - }}''') + }}""" + ) else: - ops.append(f''' + ops.append( + f""" {internal_dtype} val = ({internal_dtype})x[{_coord_idx}]; - out += val * ({internal_dtype})({_weight});''') + out += val * ({internal_dtype})({_weight});""" + ) - ops.append('}' * ndim) + ops.append("}" * ndim) - if mode == 'constant': - ops.append('}') + if mode == "constant": + ops.append("}") if integer_output: - ops.append('y = (Y)rint((double)out);') + ops.append("y = (Y)rint((double)out);") else: - ops.append('y = (Y)out;') - operation = '\n'.join(ops) - - mode_str = mode.replace('-', '_') # avoid hyphen in kernel name - name = 'cupyx_scipy_ndimage_interpolate_{}_order{}_{}_{}d_y{}'.format( - name, order, mode_str, ndim, '_'.join([f'{j}' for j in yshape]), + ops.append("y = (Y)out;") + operation = "\n".join(ops) + + mode_str = mode.replace("-", "_") # avoid hyphen in kernel name + name = "cupyx_scipy_ndimage_interpolate_{}_order{}_{}_{}d_y{}".format( + name, + order, + mode_str, + ndim, + "_".join([f"{j}" for j in yshape]), ) - if uint_t == 'size_t': - name += '_i64' + if uint_t == "size_t": + name += "_i64" return operation, name @cupy._util.memoize(for_each_device=True) -def _get_map_kernel(ndim, large_int, yshape, mode, cval=0.0, order=1, - integer_output=False, nprepad=0): - in_params = 'raw X x, raw W coords' - out_params = 'Y y' +def _get_map_kernel( + ndim, + large_int, + yshape, + mode, + cval=0.0, + order=1, + integer_output=False, + nprepad=0, +): + in_params = "raw X x, raw W coords" + out_params = "Y y" operation, name = _generate_interp_custom( coord_func=_get_coord_map, ndim=ndim, @@ -501,20 +592,29 @@ def _get_map_kernel(ndim, large_int, yshape, mode, cval=0.0, order=1, mode=mode, cval=cval, order=order, - name='map', + name="map", integer_output=integer_output, nprepad=nprepad, omit_in_coord=True, # input image coordinates are not needed ) - return cupy.ElementwiseKernel(in_params, out_params, operation, name, - preamble=math_constants_preamble) + return cupy.ElementwiseKernel( + in_params, out_params, operation, name, preamble=math_constants_preamble + ) @cupy._util.memoize(for_each_device=True) -def _get_shift_kernel(ndim, large_int, yshape, mode, cval=0.0, order=1, - integer_output=False, nprepad=0): - in_params = 'raw X x, raw W shift' - out_params = 'Y y' +def _get_shift_kernel( + ndim, + large_int, + yshape, + mode, + cval=0.0, + order=1, + integer_output=False, + nprepad=0, +): + in_params = "raw X x, raw W shift" + out_params = "Y y" operation, name = _generate_interp_custom( coord_func=_get_coord_shift, ndim=ndim, @@ -523,19 +623,29 @@ def _get_shift_kernel(ndim, large_int, yshape, mode, cval=0.0, order=1, mode=mode, cval=cval, order=order, - name='shift', + name="shift", integer_output=integer_output, nprepad=nprepad, ) - return cupy.ElementwiseKernel(in_params, out_params, operation, name, - preamble=math_constants_preamble) + return cupy.ElementwiseKernel( + in_params, out_params, operation, name, preamble=math_constants_preamble + ) @cupy._util.memoize(for_each_device=True) -def _get_zoom_shift_kernel(ndim, large_int, yshape, mode, cval=0.0, order=1, - integer_output=False, grid_mode=False, nprepad=0): - in_params = 'raw X x, raw W shift, raw W zoom' - out_params = 'Y y' +def _get_zoom_shift_kernel( + ndim, + large_int, + yshape, + mode, + cval=0.0, + order=1, + integer_output=False, + grid_mode=False, + nprepad=0, +): + in_params = "raw X x, raw W shift, raw W zoom" + out_params = "Y y" if grid_mode: zoom_shift_func = _get_coord_zoom_and_shift_grid else: @@ -552,15 +662,25 @@ def _get_zoom_shift_kernel(ndim, large_int, yshape, mode, cval=0.0, order=1, integer_output=integer_output, nprepad=nprepad, ) - return cupy.ElementwiseKernel(in_params, out_params, operation, name, - preamble=math_constants_preamble) + return cupy.ElementwiseKernel( + in_params, out_params, operation, name, preamble=math_constants_preamble + ) @cupy._util.memoize(for_each_device=True) -def _get_zoom_kernel(ndim, large_int, yshape, mode, cval=0.0, order=1, - integer_output=False, grid_mode=False, nprepad=0): - in_params = 'raw X x, raw W zoom' - out_params = 'Y y' +def _get_zoom_kernel( + ndim, + large_int, + yshape, + mode, + cval=0.0, + order=1, + integer_output=False, + grid_mode=False, + nprepad=0, +): + in_params = "raw X x, raw W zoom" + out_params = "Y y" operation, name = _generate_interp_custom( coord_func=_get_coord_zoom_grid if grid_mode else _get_coord_zoom, ndim=ndim, @@ -573,15 +693,24 @@ def _get_zoom_kernel(ndim, large_int, yshape, mode, cval=0.0, order=1, integer_output=integer_output, nprepad=nprepad, ) - return cupy.ElementwiseKernel(in_params, out_params, operation, name, - preamble=math_constants_preamble) + return cupy.ElementwiseKernel( + in_params, out_params, operation, name, preamble=math_constants_preamble + ) @cupy._util.memoize(for_each_device=True) -def _get_affine_kernel(ndim, large_int, yshape, mode, cval=0.0, order=1, - integer_output=False, nprepad=0): - in_params = 'raw X x, raw W mat' - out_params = 'Y y' +def _get_affine_kernel( + ndim, + large_int, + yshape, + mode, + cval=0.0, + order=1, + integer_output=False, + nprepad=0, +): + in_params = "raw X x, raw W mat" + out_params = "Y y" operation, name = _generate_interp_custom( coord_func=_get_coord_affine, ndim=ndim, @@ -590,9 +719,10 @@ def _get_affine_kernel(ndim, large_int, yshape, mode, cval=0.0, order=1, mode=mode, cval=cval, order=order, - name='affine', + name="affine", integer_output=integer_output, nprepad=nprepad, ) - return cupy.ElementwiseKernel(in_params, out_params, operation, name, - preamble=math_constants_preamble) + return cupy.ElementwiseKernel( + in_params, out_params, operation, name, preamble=math_constants_preamble + ) diff --git a/python/cucim/src/cucim/skimage/_vendored/_ndimage_interpolation.py b/python/cucim/src/cucim/skimage/_vendored/_ndimage_interpolation.py index ab396a0e5..d77052918 100644 --- a/python/cucim/src/cucim/skimage/_vendored/_ndimage_interpolation.py +++ b/python/cucim/src/cucim/skimage/_vendored/_ndimage_interpolation.py @@ -7,26 +7,38 @@ from cupy import _core from cupy.cuda import runtime -from cucim.skimage._vendored import _ndimage_interp_kernels as _interp_kernels -from cucim.skimage._vendored import \ - _ndimage_spline_prefilter_core as _spline_prefilter_core -from cucim.skimage._vendored import _ndimage_util as _util -from cucim.skimage._vendored import pad +from cucim.skimage._vendored import ( + _ndimage_interp_kernels as _interp_kernels, + _ndimage_spline_prefilter_core as _spline_prefilter_core, + _ndimage_util as _util, + pad, +) from cucim.skimage._vendored._internal import _normalize_axis_index, prod def _check_parameter(func_name, order, mode): if order is None: - warnings.warn(f'Currently the default order of {func_name} is 1. In a ' - 'future release this may change to 3 to match ' - 'scipy.ndimage ') + warnings.warn( + f"Currently the default order of {func_name} is 1. In a " + "future release this may change to 3 to match " + "scipy.ndimage " + ) elif order < 0 or 5 < order: - raise ValueError('spline order is not supported') - - if mode not in ('constant', 'grid-constant', 'nearest', 'mirror', - 'reflect', 'grid-mirror', 'wrap', 'grid-wrap', 'opencv', - '_opencv_edge'): - raise ValueError('boundary mode ({}) is not supported'.format(mode)) + raise ValueError("spline order is not supported") + + if mode not in ( + "constant", + "grid-constant", + "nearest", + "mirror", + "reflect", + "grid-mirror", + "wrap", + "grid-wrap", + "opencv", + "_opencv_edge", + ): + raise ValueError("boundary mode ({}) is not supported".format(mode)) def _get_spline_output(input, output): @@ -35,15 +47,15 @@ def _get_spline_output(input, output): Differs from SciPy by not always forcing the internal floating point dtype to be double precision. """ - complex_data = input.dtype.kind == 'c' + complex_data = input.dtype.kind == "c" if complex_data: min_float_dtype = cupy.complex64 else: min_float_dtype = cupy.float32 if isinstance(output, cupy.ndarray): - if complex_data and output.dtype.kind != 'c': + if complex_data and output.dtype.kind != "c": raise ValueError( - 'output must have complex dtype for complex inputs' + "output must have complex dtype for complex inputs" ) float_dtype = cupy.promote_types(output.dtype, min_float_dtype) output_dtype = output.dtype @@ -54,22 +66,25 @@ def _get_spline_output(input, output): output_dtype = cupy.dtype(output) float_dtype = cupy.promote_types(output, min_float_dtype) - if (isinstance(output, cupy.ndarray) - and output.dtype == float_dtype == output_dtype - and output.flags.c_contiguous): + if ( + isinstance(output, cupy.ndarray) + and output.dtype == float_dtype == output_dtype + and output.flags.c_contiguous + ): if output is not input: _core.elementwise_copy(input, output) temp = output else: temp = input.astype(float_dtype, copy=False) temp = cupy.ascontiguousarray(temp) - if cupy.shares_memory(temp, input, 'MAY_SHARE_BOUNDS'): + if cupy.shares_memory(temp, input, "MAY_SHARE_BOUNDS"): temp = temp.copy() return temp, float_dtype, output_dtype -def spline_filter1d(input, order=3, axis=-1, output=cupy.float64, - mode='mirror'): +def spline_filter1d( + input, order=3, axis=-1, output=cupy.float64, mode="mirror" +): """ Calculate a 1-D spline filter along the given axis. @@ -96,7 +111,7 @@ def spline_filter1d(input, order=3, axis=-1, output=cupy.float64, .. seealso:: :func:`scipy.spline_filter1d` """ if order < 0 or order > 5: - raise RuntimeError('spline order not supported') + raise RuntimeError("spline order not supported") x = input ndim = x.ndim axis = _normalize_axis_index(axis, ndim) @@ -114,7 +129,7 @@ def spline_filter1d(input, order=3, axis=-1, output=cupy.float64, pole_type = cupy._core._scalar.get_typename(temp.real.dtype) index_type = _util._get_inttype(input) - index_dtype = cupy.int32 if index_type == 'int' else cupy.int64 + index_dtype = cupy.int32 if index_type == "int" else cupy.int64 n_samples = x.shape[axis] n_signals = x.size // n_samples @@ -152,7 +167,7 @@ def spline_filter1d(input, order=3, axis=-1, output=cupy.float64, return temp.astype(output_dtype, copy=False) -def spline_filter(input, order=3, output=cupy.float64, mode='mirror'): +def spline_filter(input, order=3, output=cupy.float64, mode="mirror"): """Multidimensional spline filter. Args: @@ -173,7 +188,7 @@ def spline_filter(input, order=3, output=cupy.float64, mode='mirror'): .. seealso:: :func:`scipy.spline_filter1d` """ if order < 2 or order > 5: - raise RuntimeError('spline order not supported') + raise RuntimeError("spline order not supported") x = input temp, data_dtype, output_dtype = _get_spline_output(x, output) @@ -191,13 +206,13 @@ def spline_filter(input, order=3, output=cupy.float64, mode='mirror'): def _check_coordinates(coordinates, order, allow_float32=True): - if coordinates.dtype.kind == 'f': + if coordinates.dtype.kind == "f": if allow_float32: coord_dtype = cupy.promote_types(coordinates.dtype, cupy.float32) else: coord_dtype = cupy.promote_types(coordinates.dtype, cupy.float64) coordinates = coordinates.astype(coord_dtype, copy=False) - elif coordinates.dtype.kind in 'iu': + elif coordinates.dtype.kind in "iu": if order > 1: # order > 1 (spline) kernels require floating-point coordinates if allow_float32: @@ -210,20 +225,20 @@ def _check_coordinates(coordinates, order, allow_float32=True): ) coordinates = coordinates.astype(coord_dtype) else: - raise ValueError('coordinates should have floating point dtype') + raise ValueError("coordinates should have floating point dtype") if not coordinates.flags.c_contiguous: coordinates = cupy.ascontiguousarray(coordinates) return coordinates def _prepad_for_spline_filter(input, mode, cval): - if mode in ['nearest', 'grid-constant']: + if mode in ["nearest", "grid-constant"]: # these modes need padding to get accurate boundary values npad = 12 # empirical factor chosen by SciPy - if mode == 'grid-constant': - kwargs = dict(mode='constant', constant_values=cval) + if mode == "grid-constant": + kwargs = dict(mode="constant", constant_values=cval) else: - kwargs = dict(mode='edge') + kwargs = dict(mode="edge") padded = pad(input, npad, **kwargs) else: npad = 0 @@ -249,8 +264,15 @@ def _filter_input(image, prefilter, mode, cval, order): return cupy.ascontiguousarray(filtered), npad -def map_coordinates(input, coordinates, output=None, order=3, - mode='constant', cval=0.0, prefilter=True): +def map_coordinates( + input, + coordinates, + output=None, + order=3, + mode="constant", + cval=0.0, + prefilter=True, +): """Map the input array to new coordinates by interpolation. The array of coordinates is used to find, for each point in the output, the @@ -287,33 +309,51 @@ def map_coordinates(input, coordinates, output=None, order=3, .. seealso:: :func:`scipy.ndimage.map_coordinates` """ - _check_parameter('map_coordinates', order, mode) + _check_parameter("map_coordinates", order, mode) - if mode == 'opencv' or mode == '_opencv_edge': - input = pad(input, [(1, 1)] * input.ndim, 'constant', - constant_values=cval) + if mode == "opencv" or mode == "_opencv_edge": + input = pad( + input, [(1, 1)] * input.ndim, "constant", constant_values=cval + ) coordinates = cupy.add(coordinates, 1) - mode = 'constant' + mode = "constant" ret = _util._get_output(output, input, coordinates.shape[1:]) - integer_output = ret.dtype.kind in 'iu' + integer_output = ret.dtype.kind in "iu" _util._check_cval(mode, cval, integer_output) - if input.dtype.kind in 'iu': + if input.dtype.kind in "iu": input = input.astype(cupy.float32) coordinates = _check_coordinates(coordinates, order) filtered, nprepad = _filter_input(input, prefilter, mode, cval, order) large_int = max(prod(input.shape), coordinates.shape[0]) > 1 << 31 kern = _interp_kernels._get_map_kernel( - input.ndim, large_int, yshape=coordinates.shape, mode=mode, cval=cval, - order=order, integer_output=integer_output, nprepad=nprepad) + input.ndim, + large_int, + yshape=coordinates.shape, + mode=mode, + cval=cval, + order=order, + integer_output=integer_output, + nprepad=nprepad, + ) kern(filtered, coordinates, ret) return ret -def affine_transform(input, matrix, offset=0.0, output_shape=None, output=None, - order=3, mode='constant', cval=0.0, prefilter=True, *, - texture_memory=False): +def affine_transform( + input, + matrix, + offset=0.0, + output_shape=None, + output=None, + order=3, + mode="constant", + cval=0.0, + prefilter=True, + *, + texture_memory=False, +): """Apply an affine transformation. Given an output image pixel index vector ``o``, the pixel value is @@ -381,22 +421,25 @@ def affine_transform(input, matrix, offset=0.0, output_shape=None, output=None, if runtime.is_hip: raise RuntimeError( - 'HIP currently does not support texture acceleration') - tm_interp = 'linear' if order > 0 else 'nearest' - return _texture.affine_transformation(data=input, - transformation_matrix=matrix, - output_shape=output_shape, - output=output, - interpolation=tm_interp, - mode=mode, - border_value=cval) + "HIP currently does not support texture acceleration" + ) + tm_interp = "linear" if order > 0 else "nearest" + return _texture.affine_transformation( + data=input, + transformation_matrix=matrix, + output_shape=output_shape, + output=output, + interpolation=tm_interp, + mode=mode, + border_value=cval, + ) - _check_parameter('affine_transform', order, mode) + _check_parameter("affine_transform", order, mode) - offset = _util._fix_sequence_arg(offset, input.ndim, 'offset', float) + offset = _util._fix_sequence_arg(offset, input.ndim, "offset", float) if matrix.ndim not in [1, 2] or matrix.shape[0] < 1: - raise RuntimeError('no proper affine matrix provided') + raise RuntimeError("no proper affine matrix provided") if matrix.ndim == 2: if matrix.shape[0] == matrix.shape[1] - 1: offset = matrix[:, -1] @@ -405,9 +448,9 @@ def affine_transform(input, matrix, offset=0.0, output_shape=None, output=None, offset = matrix[:-1, -1] matrix = matrix[:-1, :-1] if matrix.shape != (input.ndim, input.ndim): - raise RuntimeError('improper affine shape') + raise RuntimeError("improper affine shape") - if mode == 'opencv': + if mode == "opencv": m = cupy.zeros((input.ndim + 1, input.ndim + 1)) m[:-1, :-1] = matrix m[:-1, -1] = offset @@ -421,38 +464,53 @@ def affine_transform(input, matrix, offset=0.0, output_shape=None, output=None, if output_shape is None: output_shape = input.shape - if mode == 'opencv' or mode == '_opencv_edge': + if mode == "opencv" or mode == "_opencv_edge": if matrix.ndim == 1: matrix = cupy.diag(matrix) coordinates = cupy.indices(output_shape, dtype=cupy.float64) coordinates = cupy.dot(matrix, coordinates.reshape((input.ndim, -1))) coordinates += cupy.expand_dims(cupy.asarray(offset), -1) ret = _util._get_output(output, input, shape=output_shape) - ret[:] = map_coordinates(input, coordinates, ret.dtype, order, mode, - cval, prefilter).reshape(output_shape) + ret[:] = map_coordinates( + input, coordinates, ret.dtype, order, mode, cval, prefilter + ).reshape(output_shape) return ret matrix = matrix.astype(cupy.float64, copy=False) ndim = input.ndim output = _util._get_output(output, input, shape=output_shape) - if input.dtype.kind in 'iu': + if input.dtype.kind in "iu": input = input.astype(cupy.float32) filtered, nprepad = _filter_input(input, prefilter, mode, cval, order) - integer_output = output.dtype.kind in 'iu' + integer_output = output.dtype.kind in "iu" _util._check_cval(mode, cval, integer_output) large_int = max(prod(input.shape), prod(output_shape)) > 1 << 31 if matrix.ndim == 1: offset = cupy.asarray(offset, dtype=cupy.float64) offset = -offset / matrix kern = _interp_kernels._get_zoom_shift_kernel( - ndim, large_int, output_shape, mode, cval=cval, order=order, - integer_output=integer_output, nprepad=nprepad) + ndim, + large_int, + output_shape, + mode, + cval=cval, + order=order, + integer_output=integer_output, + nprepad=nprepad, + ) kern(filtered, offset, matrix, output) else: kern = _interp_kernels._get_affine_kernel( - ndim, large_int, output_shape, mode, cval=cval, order=order, - integer_output=integer_output, nprepad=nprepad) + ndim, + large_int, + output_shape, + mode, + cval=cval, + order=order, + integer_output=integer_output, + nprepad=nprepad, + ) m = cupy.zeros((ndim, ndim + 1), dtype=cupy.float64) m[:, :-1] = matrix m[:, -1] = cupy.asarray(offset, dtype=cupy.float64) @@ -472,8 +530,17 @@ def _minmax(coor, minc, maxc): return minc, maxc -def rotate(input, angle, axes=(1, 0), reshape=True, output=None, order=3, - mode='constant', cval=0.0, prefilter=True): +def rotate( + input, + angle, + axes=(1, 0), + reshape=True, + output=None, + order=3, + mode="constant", + cval=0.0, + prefilter=True, +): """Rotate an array. The array is rotated in the plane defined by the two axes given by the @@ -508,10 +575,10 @@ def rotate(input, angle, axes=(1, 0), reshape=True, output=None, order=3, .. seealso:: :func:`scipy.ndimage.rotate` """ - _check_parameter('rotate', order, mode) + _check_parameter("rotate", order, mode) - if mode == 'opencv': - mode = '_opencv_edge' + if mode == "opencv": + mode = "_opencv_edge" input_arr = input axes = list(axes) @@ -522,7 +589,7 @@ def rotate(input, angle, axes=(1, 0), reshape=True, output=None, order=3, if axes[0] > axes[1]: axes = [axes[1], axes[0]] if axes[0] < 0 or input_arr.ndim <= axes[1]: - raise ValueError('invalid rotation plane specified') + raise ValueError("invalid rotation plane specified") ndim = input_arr.ndim rad = math.radians(angle) @@ -530,16 +597,14 @@ def rotate(input, angle, axes=(1, 0), reshape=True, output=None, order=3, cos, sin = sincos.real, sincos.imag # determine offsets and output shape as in scipy.ndimage.rotate - rot_matrix = numpy.array([[cos, sin], - [-sin, cos]]) + rot_matrix = numpy.array([[cos, sin], [-sin, cos]]) img_shape = numpy.asarray(input_arr.shape) in_plane_shape = img_shape[axes] if reshape: # Compute transformed input bounds iy, ix = in_plane_shape - out_bounds = rot_matrix @ [[0, 0, iy, iy], - [0, ix, 0, ix]] + out_bounds = rot_matrix @ [[0, 0, iy, iy], [0, ix, 0, ix]] # Compute the shape of the transformed input plane out_plane_shape = (out_bounds.ptp(axis=1) + 0.5).astype(cupy.int64) else: @@ -564,12 +629,28 @@ def rotate(input, angle, axes=(1, 0), reshape=True, output=None, order=3, matrix = cupy.asarray(matrix) offset = cupy.asarray(offset) - return affine_transform(input, matrix, offset, output_shape, output, order, - mode, cval, prefilter) + return affine_transform( + input, + matrix, + offset, + output_shape, + output, + order, + mode, + cval, + prefilter, + ) -def shift(input, shift, output=None, order=3, mode='constant', cval=0.0, - prefilter=True): +def shift( + input, + shift, + output=None, + order=3, + mode="constant", + cval=0.0, + prefilter=True, +): """Shift an array. The array is shifted using spline interpolation of the requested order. @@ -602,12 +683,12 @@ def shift(input, shift, output=None, order=3, mode='constant', cval=0.0, .. seealso:: :func:`scipy.ndimage.shift` """ - _check_parameter('shift', order, mode) + _check_parameter("shift", order, mode) - shift = _util._fix_sequence_arg(shift, input.ndim, 'shift', float) + shift = _util._fix_sequence_arg(shift, input.ndim, "shift", float) - if mode == 'opencv': - mode = '_opencv_edge' + if mode == "opencv": + mode = "_opencv_edge" output = affine_transform( input, @@ -622,26 +703,42 @@ def shift(input, shift, output=None, order=3, mode='constant', cval=0.0, ) else: output = _util._get_output(output, input) - if input.dtype.kind in 'iu': + if input.dtype.kind in "iu": input = input.astype(cupy.float32) filtered, nprepad = _filter_input(input, prefilter, mode, cval, order) - integer_output = output.dtype.kind in 'iu' + integer_output = output.dtype.kind in "iu" _util._check_cval(mode, cval, integer_output) large_int = prod(input.shape) > 1 << 31 kern = _interp_kernels._get_shift_kernel( - input.ndim, large_int, input.shape, mode, cval=cval, order=order, - integer_output=integer_output, nprepad=nprepad) - shift = cupy.asarray(shift, dtype=cupy.float64, order='C') + input.ndim, + large_int, + input.shape, + mode, + cval=cval, + order=order, + integer_output=integer_output, + nprepad=nprepad, + ) + shift = cupy.asarray(shift, dtype=cupy.float64, order="C") if shift.ndim != 1: - raise ValueError('shift must be 1d') + raise ValueError("shift must be 1d") if shift.size != filtered.ndim: - raise ValueError('len(shift) must equal input.ndim') + raise ValueError("len(shift) must equal input.ndim") kern(filtered, shift, output) return output -def zoom(input, zoom, output=None, order=3, mode='constant', cval=0.0, - prefilter=True, *, grid_mode=False): +def zoom( + input, + zoom, + output=None, + order=3, + mode="constant", + cval=0.0, + prefilter=True, + *, + grid_mode=False, +): """Zoom an array. The array is zoomed using spline interpolation of the requested order. @@ -687,16 +784,16 @@ def zoom(input, zoom, output=None, order=3, mode='constant', cval=0.0, .. seealso:: :func:`scipy.ndimage.zoom` """ - _check_parameter('zoom', order, mode) + _check_parameter("zoom", order, mode) - zoom = _util._fix_sequence_arg(zoom, input.ndim, 'zoom', float) + zoom = _util._fix_sequence_arg(zoom, input.ndim, "zoom", float) output_shape = [] for s, z in zip(input.shape, zoom): output_shape.append(int(round(s * z))) output_shape = tuple(output_shape) - if mode == 'opencv': + if mode == "opencv": zoom = [] offset = [] for in_size, out_size in zip(input.shape, output_shape): @@ -706,7 +803,7 @@ def zoom(input, zoom, output=None, order=3, mode='constant', cval=0.0, else: zoom.append(0) offset.append(0) - mode = 'nearest' + mode = "nearest" output = affine_transform( input, @@ -721,17 +818,17 @@ def zoom(input, zoom, output=None, order=3, mode='constant', cval=0.0, ) else: if grid_mode: - # warn about modes that may have surprising behavior suggest_mode = None - if mode == 'constant': - suggest_mode = 'grid-constant' - elif mode == 'wrap': - suggest_mode = 'grid-wrap' + if mode == "constant": + suggest_mode = "grid-constant" + elif mode == "wrap": + suggest_mode = "grid-wrap" if suggest_mode is not None: warnings.warn( - f'It is recommended to use mode = {suggest_mode} instead ' - f'of {mode} when grid_mode is True.') + f"It is recommended to use mode = {suggest_mode} instead " + f"of {mode} when grid_mode is True." + ) zoom = [] for in_size, out_size in zip(input.shape, output_shape): @@ -743,16 +840,22 @@ def zoom(input, zoom, output=None, order=3, mode='constant', cval=0.0, zoom.append(0) output = _util._get_output(output, input, shape=output_shape) - if input.dtype.kind in 'iu': + if input.dtype.kind in "iu": input = input.astype(cupy.float32) filtered, nprepad = _filter_input(input, prefilter, mode, cval, order) - integer_output = output.dtype.kind in 'iu' + integer_output = output.dtype.kind in "iu" _util._check_cval(mode, cval, integer_output) large_int = max(prod(input.shape), prod(output_shape)) > 1 << 31 kern = _interp_kernels._get_zoom_kernel( - input.ndim, large_int, output_shape, mode, order=order, - integer_output=integer_output, grid_mode=grid_mode, - nprepad=nprepad) + input.ndim, + large_int, + output_shape, + mode, + order=order, + integer_output=integer_output, + grid_mode=grid_mode, + nprepad=nprepad, + ) zoom = cupy.asarray(zoom, dtype=cupy.float64) kern(filtered, zoom, output) return output diff --git a/python/cucim/src/cucim/skimage/_vendored/_ndimage_morphology.py b/python/cucim/src/cucim/skimage/_vendored/_ndimage_morphology.py index 61b957715..c39f9c605 100644 --- a/python/cucim/src/cucim/skimage/_vendored/_ndimage_morphology.py +++ b/python/cucim/src/cucim/skimage/_vendored/_ndimage_morphology.py @@ -5,16 +5,24 @@ import numpy from cupy import _core -from cucim.skimage._vendored import _internal as internal -from cucim.skimage._vendored import _ndimage_filters as _filters -from cucim.skimage._vendored import _ndimage_filters_core as _filters_core -from cucim.skimage._vendored import _ndimage_util as _util +from cucim.skimage._vendored import ( + _internal as internal, + _ndimage_filters as _filters, + _ndimage_filters_core as _filters_core, + _ndimage_util as _util, +) @cupy.memoize(for_each_device=True) def _get_binary_erosion_kernel( - w_shape, int_type, offsets, center_is_true, border_value, invert, masked, - all_weights_nonzero + w_shape, + int_type, + offsets, + center_is_true, + border_value, + invert, + masked, + all_weights_nonzero, ): if invert: border_value = int(not border_value) @@ -34,18 +42,25 @@ def _get_binary_erosion_kernel( }} else if ({center_is_true} && _in == {false_val}) {{ y = cast(_in); return; - }}""".format(center_is_true=int(center_is_true), - false_val=false_val) + }}""".format( + center_is_true=int(center_is_true), false_val=false_val + ) else: pre = """ bool _in = (bool)x[i]; if ({center_is_true} && _in == {false_val}) {{ y = cast(_in); return; - }}""".format(center_is_true=int(center_is_true), - false_val=false_val) - pre = pre + """ - y = cast({true_val});""".format(true_val=true_val) + }}""".format( + center_is_true=int(center_is_true), false_val=false_val + ) + pre = ( + pre + + """ + y = cast({true_val});""".format( + true_val=true_val + ) + ) # {{{{ required because format is called again within _generate_nd_kernel found = """ @@ -60,23 +75,33 @@ def _get_binary_erosion_kernel( y = cast({false_val}); return; }}}} - }}}}""".format(true_val=int(true_val), - false_val=int(false_val), - border_value=int(border_value),) + }}}}""".format( + true_val=int(true_val), + false_val=int(false_val), + border_value=int(border_value), + ) - name = 'binary_erosion' + name = "binary_erosion" if false_val: - name += '_invert' + name += "_invert" has_weights = not all_weights_nonzero return _filters_core._generate_nd_kernel( name, pre, found, - '', - 'constant', w_shape, int_type, offsets, 0, ctype='Y', - has_weights=has_weights, has_structure=False, has_mask=masked, - binary_morphology=True) + "", + "constant", + w_shape, + int_type, + offsets, + 0, + ctype="Y", + has_weights=has_weights, + has_structure=False, + has_mask=masked, + binary_morphology=True, + ) def _center_is_true(structure, origin): @@ -117,7 +142,7 @@ def iterate_structure(structure, iterations, origin=None): if origin is None: return out else: - origin = _util._fix_sequence_arg(origin, structure.ndim, 'origin', int) + origin = _util._fix_sequence_arg(origin, structure.ndim, "origin", int) origin = [iterations * o for o in origin] return out, origin @@ -152,15 +177,24 @@ def generate_binary_structure(rank, connectivity): return cupy.asarray(output) -def _binary_erosion(input, structure, iterations, mask, output, border_value, - origin, invert, brute_force=True): +def _binary_erosion( + input, + structure, + iterations, + mask, + output, + border_value, + origin, + invert, + brute_force=True, +): try: iterations = operator.index(iterations) except TypeError: - raise TypeError('iterations parameter should be an integer') + raise TypeError("iterations parameter should be an integer") - if input.dtype.kind == 'c': - raise TypeError('Complex type not supported') + if input.dtype.kind == "c": + raise TypeError("Complex type not supported") default_structure = False if structure is None: structure = generate_binary_structure(input.ndim, 1) @@ -171,7 +205,7 @@ def _binary_erosion(input, structure, iterations, mask, output, border_value, # For a structure that is true everywhere, can just provide the shape structure_shape = structure if len(structure_shape) == 0: - raise RuntimeError('structure must not be empty') + raise RuntimeError("structure must not be empty") else: structure = structure.astype(dtype=bool, copy=False) structure_shape = structure.shape @@ -179,30 +213,30 @@ def _binary_erosion(input, structure, iterations, mask, output, border_value, # structure_cpu = cupy.asnumpy(structure) if structure.ndim != input.ndim: raise RuntimeError( - 'structure and input must have same dimensionality' + "structure and input must have same dimensionality" ) if not structure.flags.c_contiguous: structure = cupy.ascontiguousarray(structure) if structure.size < 1: - raise RuntimeError('structure must not be empty') + raise RuntimeError("structure must not be empty") if mask is not None: if mask.shape != input.shape: - raise RuntimeError('mask and input must have equal sizes') + raise RuntimeError("mask and input must have equal sizes") if not mask.flags.c_contiguous: mask = cupy.ascontiguousarray(mask) masked = True else: masked = False - origin = _util._fix_sequence_arg(origin, input.ndim, 'origin', int) + origin = _util._fix_sequence_arg(origin, input.ndim, "origin", int) if isinstance(output, cupy.ndarray): - if output.dtype.kind == 'c': - raise TypeError('Complex output type not supported') + if output.dtype.kind == "c": + raise TypeError("Complex output type not supported") else: output = bool output = _util._get_output(output, input) - temp_needed = cupy.shares_memory(output, input, 'MAY_SHARE_BOUNDS') + temp_needed = cupy.shares_memory(output, input, "MAY_SHARE_BOUNDS") if temp_needed: # input and output arrays cannot share memory temp = output @@ -232,8 +266,14 @@ def _binary_erosion(input, structure, iterations, mask, output, border_value, center_is_true = _center_is_true(structure, origin) erode_kernel = _get_binary_erosion_kernel( - structure_shape, int_type, offsets, center_is_true, border_value, - invert, masked, all_weights_nonzero, + structure_shape, + int_type, + offsets, + center_is_true, + border_value, + invert, + masked, + all_weights_nonzero, ) if all_weights_nonzero: if masked: @@ -250,11 +290,11 @@ def _binary_erosion(input, structure, iterations, mask, output, border_value, output = erode_kernel(*in_args, output) elif center_is_true and not brute_force: raise NotImplementedError( - 'only brute_force iteration has been implemented' + "only brute_force iteration has been implemented" ) else: - if cupy.shares_memory(output, input, 'MAY_SHARE_BOUNDS'): - raise ValueError('output and input may not overlap in memory') + if cupy.shares_memory(output, input, "MAY_SHARE_BOUNDS"): + raise ValueError("output and input may not overlap in memory") tmp_in = cupy.empty_like(input, dtype=output.dtype) tmp_out = output if iterations >= 1 and not iterations & 1: @@ -307,8 +347,16 @@ def _prep_structure(structure, ndim): return structure, structure_shape, symmetric_structure -def binary_erosion(input, structure=None, iterations=1, mask=None, output=None, - border_value=0, origin=0, brute_force=False): +def binary_erosion( + input, + structure=None, + iterations=1, + mask=None, + output=None, + border_value=0, + origin=0, + brute_force=False, +): """Multidimensional binary erosion with a given structuring element. Binary erosion is a mathematical morphology operation used for image @@ -354,12 +402,29 @@ def binary_erosion(input, structure=None, iterations=1, mask=None, output=None, .. seealso:: :func:`scipy.ndimage.binary_erosion` """ structure, _, _ = _prep_structure(structure, input.ndim) - return _binary_erosion(input, structure, iterations, mask, output, - border_value, origin, 0, brute_force) + return _binary_erosion( + input, + structure, + iterations, + mask, + output, + border_value, + origin, + 0, + brute_force, + ) -def binary_dilation(input, structure=None, iterations=1, mask=None, - output=None, border_value=0, origin=0, brute_force=False): +def binary_dilation( + input, + structure=None, + iterations=1, + mask=None, + output=None, + border_value=0, + origin=0, + brute_force=False, +): """Multidimensional binary dilation with the given structuring element. Args: @@ -401,21 +466,39 @@ def binary_dilation(input, structure=None, iterations=1, mask=None, .. seealso:: :func:`scipy.ndimage.binary_dilation` """ - structure, structure_shape, symmetric = _prep_structure(structure, - input.ndim) - origin = _util._fix_sequence_arg(origin, input.ndim, 'origin', int) + structure, structure_shape, symmetric = _prep_structure( + structure, input.ndim + ) + origin = _util._fix_sequence_arg(origin, input.ndim, "origin", int) if not symmetric: structure = structure[tuple([slice(None, None, -1)] * structure.ndim)] for ii in range(len(origin)): origin[ii] = -origin[ii] if not structure_shape[ii] & 1: origin[ii] -= 1 - return _binary_erosion(input, structure, iterations, mask, output, - border_value, origin, 1, brute_force) + return _binary_erosion( + input, + structure, + iterations, + mask, + output, + border_value, + origin, + 1, + brute_force, + ) -def binary_opening(input, structure=None, iterations=1, output=None, origin=0, - mask=None, border_value=0, brute_force=False): +def binary_opening( + input, + structure=None, + iterations=1, + output=None, + origin=0, + mask=None, + border_value=0, + brute_force=False, +): """ Multidimensional binary opening with the given structuring element. @@ -462,14 +545,38 @@ def binary_opening(input, structure=None, iterations=1, output=None, origin=0, .. seealso:: :func:`scipy.ndimage.binary_opening` """ structure, _, _ = _prep_structure(structure, input.ndim) - tmp = binary_erosion(input, structure, iterations, mask, None, - border_value, origin, brute_force) - return binary_dilation(tmp, structure, iterations, mask, output, - border_value, origin, brute_force) + tmp = binary_erosion( + input, + structure, + iterations, + mask, + None, + border_value, + origin, + brute_force, + ) + return binary_dilation( + tmp, + structure, + iterations, + mask, + output, + border_value, + origin, + brute_force, + ) -def binary_closing(input, structure=None, iterations=1, output=None, origin=0, - mask=None, border_value=0, brute_force=False): +def binary_closing( + input, + structure=None, + iterations=1, + output=None, + origin=0, + mask=None, + border_value=0, + brute_force=False, +): """ Multidimensional binary closing with the given structuring element. @@ -516,14 +623,36 @@ def binary_closing(input, structure=None, iterations=1, output=None, origin=0, .. seealso:: :func:`scipy.ndimage.binary_closing` """ structure, _, _ = _prep_structure(structure, input.ndim) - tmp = binary_dilation(input, structure, iterations, mask, None, - border_value, origin, brute_force) - return binary_erosion(tmp, structure, iterations, mask, output, - border_value, origin, brute_force) + tmp = binary_dilation( + input, + structure, + iterations, + mask, + None, + border_value, + origin, + brute_force, + ) + return binary_erosion( + tmp, + structure, + iterations, + mask, + output, + border_value, + origin, + brute_force, + ) -def binary_hit_or_miss(input, structure1=None, structure2=None, output=None, - origin1=0, origin2=None): +def binary_hit_or_miss( + input, + structure1=None, + structure2=None, + output=None, + origin1=0, + origin2=None, +): """ Multidimensional binary hit-or-miss transform. @@ -563,17 +692,19 @@ def binary_hit_or_miss(input, structure1=None, structure2=None, output=None, structure1 = generate_binary_structure(input.ndim, 1) if structure2 is None: structure2 = cupy.logical_not(structure1) - origin1 = _util._fix_sequence_arg(origin1, input.ndim, 'origin1', int) + origin1 = _util._fix_sequence_arg(origin1, input.ndim, "origin1", int) if origin2 is None: origin2 = origin1 else: - origin2 = _util._fix_sequence_arg(origin2, input.ndim, 'origin2', int) + origin2 = _util._fix_sequence_arg(origin2, input.ndim, "origin2", int) - tmp1 = _binary_erosion(input, structure1, 1, None, None, 0, origin1, 0, - False) + tmp1 = _binary_erosion( + input, structure1, 1, None, None, 0, origin1, 0, False + ) inplace = isinstance(output, cupy.ndarray) - result = _binary_erosion(input, structure2, 1, None, output, 0, origin2, 1, - False) + result = _binary_erosion( + input, structure2, 1, None, output, 0, origin2, 1, False + ) if inplace: cupy.logical_not(output, output) cupy.logical_and(tmp1, output, output) @@ -582,8 +713,9 @@ def binary_hit_or_miss(input, structure1=None, structure2=None, output=None, return cupy.logical_and(tmp1, result) -def binary_propagation(input, structure=None, mask=None, output=None, - border_value=0, origin=0): +def binary_propagation( + input, structure=None, mask=None, output=None, border_value=0, origin=0 +): """ Multidimensional binary propagation with the given structuring element. @@ -611,8 +743,16 @@ def binary_propagation(input, structure=None, mask=None, output=None, .. seealso:: :func:`scipy.ndimage.binary_propagation` """ - return binary_dilation(input, structure, -1, mask, output, border_value, - origin, brute_force=True) + return binary_dilation( + input, + structure, + -1, + mask, + output, + border_value, + origin, + brute_force=True, + ) def binary_fill_holes(input, structure=None, output=None, origin=0): @@ -646,18 +786,28 @@ def binary_fill_holes(input, structure=None, output=None, origin=0): inplace = isinstance(output, cupy.ndarray) # TODO (grlee77): set brute_force=False below once implemented if inplace: - binary_dilation(tmp, structure, -1, mask, output, 1, origin, - brute_force=True) + binary_dilation( + tmp, structure, -1, mask, output, 1, origin, brute_force=True + ) cupy.logical_not(output, output) else: - output = binary_dilation(tmp, structure, -1, mask, None, 1, origin, - brute_force=True) + output = binary_dilation( + tmp, structure, -1, mask, None, 1, origin, brute_force=True + ) cupy.logical_not(output, output) return output -def grey_erosion(input, size=None, footprint=None, structure=None, output=None, - mode='reflect', cval=0.0, origin=0): +def grey_erosion( + input, + size=None, + footprint=None, + structure=None, + output=None, + mode="reflect", + cval=0.0, + origin=0, +): """Calculates a greyscale erosion. Args: @@ -689,14 +839,23 @@ def grey_erosion(input, size=None, footprint=None, structure=None, output=None, .. seealso:: :func:`scipy.ndimage.grey_erosion` """ if size is None and footprint is None and structure is None: - raise ValueError('size, footprint or structure must be specified') + raise ValueError("size, footprint or structure must be specified") - return _filters._min_or_max_filter(input, size, footprint, structure, - output, mode, cval, origin, 'min') + return _filters._min_or_max_filter( + input, size, footprint, structure, output, mode, cval, origin, "min" + ) -def grey_dilation(input, size=None, footprint=None, structure=None, - output=None, mode='reflect', cval=0.0, origin=0): +def grey_dilation( + input, + size=None, + footprint=None, + structure=None, + output=None, + mode="reflect", + cval=0.0, + origin=0, +): """Calculates a greyscale dilation. Args: @@ -729,7 +888,7 @@ def grey_dilation(input, size=None, footprint=None, structure=None, """ if size is None and footprint is None and structure is None: - raise ValueError('size, footprint or structure must be specified') + raise ValueError("size, footprint or structure must be specified") if structure is not None: structure = cupy.array(structure) structure = structure[tuple([slice(None, None, -1)] * structure.ndim)] @@ -737,7 +896,7 @@ def grey_dilation(input, size=None, footprint=None, structure=None, footprint = cupy.array(footprint) footprint = footprint[tuple([slice(None, None, -1)] * footprint.ndim)] - origin = _util._fix_sequence_arg(origin, input.ndim, 'origin', int) + origin = _util._fix_sequence_arg(origin, input.ndim, "origin", int) for i in range(len(origin)): origin[i] = -origin[i] if footprint is not None: @@ -751,12 +910,21 @@ def grey_dilation(input, size=None, footprint=None, structure=None, if sz % 2 == 0: origin[i] -= 1 - return _filters._min_or_max_filter(input, size, footprint, structure, - output, mode, cval, origin, 'max') + return _filters._min_or_max_filter( + input, size, footprint, structure, output, mode, cval, origin, "max" + ) -def grey_closing(input, size=None, footprint=None, structure=None, - output=None, mode='reflect', cval=0.0, origin=0): +def grey_closing( + input, + size=None, + footprint=None, + structure=None, + output=None, + mode="reflect", + cval=0.0, + origin=0, +): """Calculates a multi-dimensional greyscale closing. Args: @@ -788,16 +956,27 @@ def grey_closing(input, size=None, footprint=None, structure=None, .. seealso:: :func:`scipy.ndimage.grey_closing` """ if (size is not None) and (footprint is not None): - warnings.warn('ignoring size because footprint is set', UserWarning, - stacklevel=2) - tmp = grey_dilation(input, size, footprint, structure, None, mode, cval, - origin) - return grey_erosion(tmp, size, footprint, structure, output, mode, cval, - origin) + warnings.warn( + "ignoring size because footprint is set", UserWarning, stacklevel=2 + ) + tmp = grey_dilation( + input, size, footprint, structure, None, mode, cval, origin + ) + return grey_erosion( + tmp, size, footprint, structure, output, mode, cval, origin + ) -def grey_opening(input, size=None, footprint=None, structure=None, - output=None, mode='reflect', cval=0.0, origin=0): +def grey_opening( + input, + size=None, + footprint=None, + structure=None, + output=None, + mode="reflect", + cval=0.0, + origin=0, +): """Calculates a multi-dimensional greyscale opening. Args: @@ -829,12 +1008,15 @@ def grey_opening(input, size=None, footprint=None, structure=None, .. seealso:: :func:`scipy.ndimage.grey_opening` """ if (size is not None) and (footprint is not None): - warnings.warn('ignoring size because footprint is set', UserWarning, - stacklevel=2) - tmp = grey_erosion(input, size, footprint, structure, None, mode, cval, - origin) - return grey_dilation(tmp, size, footprint, structure, output, mode, cval, - origin) + warnings.warn( + "ignoring size because footprint is set", UserWarning, stacklevel=2 + ) + tmp = grey_erosion( + input, size, footprint, structure, None, mode, cval, origin + ) + return grey_dilation( + tmp, size, footprint, structure, output, mode, cval, origin + ) def morphological_gradient( @@ -843,7 +1025,7 @@ def morphological_gradient( footprint=None, structure=None, output=None, - mode='reflect', + mode="reflect", cval=0.0, origin=0, ): @@ -902,7 +1084,7 @@ def morphological_laplace( footprint=None, structure=None, output=None, - mode='reflect', + mode="reflect", cval=0.0, origin=0, ): @@ -964,7 +1146,7 @@ def white_tophat( footprint=None, structure=None, output=None, - mode='reflect', + mode="reflect", cval=0.0, origin=0, ): @@ -1001,7 +1183,7 @@ def white_tophat( """ if (size is not None) and (footprint is not None): warnings.warn( - 'ignoring size because footprint is set', UserWarning, stacklevel=2 + "ignoring size because footprint is set", UserWarning, stacklevel=2 ) tmp = grey_erosion( input, size, footprint, structure, None, mode, cval, origin @@ -1022,7 +1204,7 @@ def black_tophat( footprint=None, structure=None, output=None, - mode='reflect', + mode="reflect", cval=0.0, origin=0, ): @@ -1059,7 +1241,7 @@ def black_tophat( """ if (size is not None) and (footprint is not None): warnings.warn( - 'ignoring size because footprint is set', UserWarning, stacklevel=2 + "ignoring size because footprint is set", UserWarning, stacklevel=2 ) tmp = grey_dilation( input, size, footprint, structure, None, mode, cval, origin diff --git a/python/cucim/src/cucim/skimage/_vendored/_ndimage_spline_kernel_weights.py b/python/cucim/src/cucim/skimage/_vendored/_ndimage_spline_kernel_weights.py index b2fc84449..33786ee5b 100644 --- a/python/cucim/src/cucim/skimage/_vendored/_ndimage_spline_kernel_weights.py +++ b/python/cucim/src/cucim/skimage/_vendored/_ndimage_spline_kernel_weights.py @@ -1,41 +1,49 @@ """Determination of spline kernel weights (adapted from SciPy) See more verbose comments for each case there: -https://github.com/scipy/scipy/blob/eba29d69846ab1299976ff4af71c106188397ccc/scipy/ndimage/src/ni_splines.c#L7 # NOQA +https://github.com/scipy/scipy/blob/eba29d69846ab1299976ff4af71c106188397ccc/scipy/ndimage/src/ni_splines.c#L7 ``spline_weights_inline`` is a dict where the key is the spline order and the value is the spline weight initialization code. -""" +""" # noqa: E501 spline_weights_inline = {} # Note: This order = 1 case is currently unused (order = 1 has a different code # path in _interp_kernels.py). I think that existing code is a bit more # efficient. -spline_weights_inline[1] = ''' +spline_weights_inline[ + 1 +] = """ wx = c_{j} - floor({order} & 1 ? c_{j} : c_{j} + 0.5); weights_{j}[0] = 1.0 - wx; weights_{j}[1] = wx; -''' +""" -spline_weights_inline[2] = ''' +spline_weights_inline[ + 2 +] = """ wx = c_{j} - floor({order} & 1 ? c_{j} : c_{j} + 0.5); weights_{j}[1] = 0.75 - wx * wx; wy = 0.5 - wx; weights_{j}[0] = 0.5 * wy * wy; weights_{j}[2] = 1.0 - weights_{j}[0] - weights_{j}[1]; -''' +""" -spline_weights_inline[3] = ''' +spline_weights_inline[ + 3 +] = """ wx = c_{j} - floor({order} & 1 ? c_{j} : c_{j} + 0.5); wy = 1.0 - wx; weights_{j}[1] = (wx * wx * (wx - 2.0) * 3.0 + 4.0) / 6.0; weights_{j}[2] = (wy * wy * (wy - 2.0) * 3.0 + 4.0) / 6.0; weights_{j}[0] = wy * wy * wy / 6.0; weights_{j}[3] = 1.0 - weights_{j}[0] - weights_{j}[1] - weights_{j}[2]; -''' +""" -spline_weights_inline[4] = ''' +spline_weights_inline[ + 4 +] = """ wx = c_{j} - floor({order} & 1 ? c_{j} : c_{j} + 0.5); wy = wx * wx; weights_{j}[2] = wy * (wy * 0.25 - 0.625) + 115.0 / 192.0; @@ -50,9 +58,11 @@ weights_{j}[0] = wy * wy / 24.0; weights_{j}[4] = 1.0 - weights_{j}[0] - weights_{j}[1] - weights_{j}[2] - weights_{j}[3]; -''' +""" -spline_weights_inline[5] = ''' +spline_weights_inline[ + 5 +] = """ wx = c_{j} - floor({order} & 1 ? c_{j} : c_{j} + 0.5); wy = wx * wx; weights_{j}[2] = wy * (wy * (0.25 - wx / 12.0) - 0.5) + 0.55; @@ -70,4 +80,4 @@ weights_{j}[0] = (1.0 - wx) * wy * wy / 120.0; weights_{j}[5] = 1.0 - weights_{j}[0] - weights_{j}[1] - weights_{j}[2] - weights_{j}[3] - weights_{j}[4]; -''' +""" diff --git a/python/cucim/src/cucim/skimage/_vendored/_ndimage_spline_prefilter_core.py b/python/cucim/src/cucim/skimage/_vendored/_ndimage_spline_prefilter_core.py index c44df836e..5e9a7dd24 100644 --- a/python/cucim/src/cucim/skimage/_vendored/_ndimage_spline_prefilter_core.py +++ b/python/cucim/src/cucim/skimage/_vendored/_ndimage_spline_prefilter_core.py @@ -1,8 +1,8 @@ """ Spline poles and boundary handling implemented as in SciPy -https://github.com/scipy/scipy/blob/ee6ae72f83a0995aeb34929aed881d3f36fccfda/scipy/ndimage/src/ni_splines.c # noqa -""" +https://github.com/scipy/scipy/blob/ee6ae72f83a0995aeb34929aed881d3f36fccfda/scipy/ndimage/src/ni_splines.c +""" # noqa: E501 import functools import math import operator @@ -21,20 +21,25 @@ def get_poles(order): elif order == 4: # sqrt(664.0 - sqrt(438976.0)) + sqrt(304.0) - 19.0 # sqrt(664.0 + sqrt(438976.0)) - sqrt(304.0) - 19.0 - return (-0.361341225900220177092212841325675255, - -0.013725429297339121360331226939128204) + return ( + -0.361341225900220177092212841325675255, + -0.013725429297339121360331226939128204, + ) elif order == 5: # sqrt(67.5 - sqrt(4436.25)) + sqrt(26.25) - 6.5 # sqrt(67.5 + sqrt(4436.25)) - sqrt(26.25) - 6.5 - return (-0.430575347099973791851434783493520110, - -0.043096288203264653822712376822550182) + return ( + -0.430575347099973791851434783493520110, + -0.043096288203264653822712376822550182, + ) else: - raise ValueError('only order 2-5 supported') + raise ValueError("only order 2-5 supported") def get_gain(poles): - return functools.reduce(operator.mul, - [(1.0 - z) * (1.0 - 1.0 / z) for z in poles]) + return functools.reduce( + operator.mul, [(1.0 - z) * (1.0 - 1.0 / z) for z in poles] + ) def _causal_init_code(mode): @@ -42,10 +47,10 @@ def _causal_init_code(mode): c is a 1d array of length n and z is a filter pole """ - code = f''' - // causal init for mode={mode}''' - if mode == 'mirror': - code += ''' + code = f""" + // causal init for mode={mode}""" + if mode == "mirror": + code += """ z_i = z; z_n_1 = pow(z, (P)(n - 1)); @@ -55,18 +60,18 @@ def _causal_init_code(mode): z_n_1 * c[(n - 1 - i) * element_stride]); z_i *= z; }} - c[0] /= 1 - z_n_1 * z_n_1;''' - elif mode == 'grid-wrap': - code += ''' + c[0] /= 1 - z_n_1 * z_n_1;""" + elif mode == "grid-wrap": + code += """ z_i = z; for (i = 1; i < min(n, static_cast({n_boundary})); ++i) {{ c[0] += z_i * c[(n - i) * element_stride]; z_i *= z; }} - c[0] /= 1 - z_i; /* z_i = pow(z, n) */''' - elif mode == 'reflect': - code += ''' + c[0] /= 1 - z_i; /* z_i = pow(z, n) */""" + elif mode == "reflect": + code += """ z_i = z; z_n = pow(z, (P)n); c0 = c[0]; @@ -78,9 +83,9 @@ def _causal_init_code(mode): z_i *= z; }} c[0] *= z / (1 - z_n * z_n); - c[0] += c0;''' + c[0] += c0;""" else: - raise ValueError('invalid mode: {}'.format(mode)) + raise ValueError("invalid mode: {}".format(mode)) return code @@ -89,42 +94,42 @@ def _anticausal_init_code(mode): c is a 1d array of length n and z is a filter pole """ - code = f''' - // anti-causal init for mode={mode}''' - if mode == 'mirror': - code += ''' + code = f""" + // anti-causal init for mode={mode}""" + if mode == "mirror": + code += """ c[(n - 1) * element_stride] = ( z * c[(n - 2) * element_stride] + - c[(n - 1) * element_stride]) * z / (z * z - 1);''' - elif mode == 'grid-wrap': - code += ''' + c[(n - 1) * element_stride]) * z / (z * z - 1);""" + elif mode == "grid-wrap": + code += """ z_i = z; for (i = 0; i < min(n - 1, static_cast({n_boundary})); ++i) {{ c[(n - 1) * element_stride] += z_i * c[i * element_stride]; z_i *= z; }} - c[(n - 1) * element_stride] *= z / (z_i - 1); /* z_i = pow(z, n) */''' - elif mode == 'reflect': - code += ''' - c[(n - 1) * element_stride] *= z / (z - 1);''' + c[(n - 1) * element_stride] *= z / (z_i - 1); /* z_i = pow(z, n) */""" + elif mode == "reflect": + code += """ + c[(n - 1) * element_stride] *= z / (z - 1);""" else: - raise ValueError('invalid mode: {}'.format(mode)) + raise ValueError("invalid mode: {}".format(mode)) return code def _get_spline_mode(mode): """spline boundary mode for interpolation with order >= 2.""" - if mode in ['mirror', 'reflect', 'grid-wrap']: + if mode in ["mirror", "reflect", "grid-wrap"]: # exact analytic boundary conditions exist for these modes. return mode - elif mode == 'grid-mirror': + elif mode == "grid-mirror": # grid-mirror is a synonym for 'reflect' - return 'reflect' + return "reflect" # No exact analytical spline boundary condition implemented. Reflect gives # lower error than using mirror or wrap for mode 'nearest'. Otherwise, a # mirror spline boundary condition is used. - return 'reflect' if mode == 'nearest' else 'mirror' + return "reflect" if mode == "nearest" else "mirror" def _get_spline1d_code(mode, poles, n_boundary): @@ -133,57 +138,72 @@ def _get_spline1d_code(mode, poles, n_boundary): Prefiltering is done by causal filtering followed by anti-causal filtering. Multiple boundary conditions have been implemented. """ - code = [''' + code = [ + """ __device__ void spline_prefilter1d( T* __restrict__ c, idx_t signal_length, idx_t element_stride) - {{'''] + {{""" + ] # variables common to all boundary modes - code.append(''' + code.append( + """ idx_t i, n = signal_length; - P z, z_i;''') + P z, z_i;""" + ) # retrieve the spline boundary extension mode to use mode = _get_spline_mode(mode) - if mode == 'mirror': + if mode == "mirror": # variables specific to mirror boundary mode - code.append(''' - P z_n_1;''') - elif mode == 'reflect': + code.append( + """ + P z_n_1;""" + ) + elif mode == "reflect": # variables specific to reflect boundary mode - code.append(''' + code.append( + """ P z_n; - T c0;''') + T c0;""" + ) for pole in poles: - - code.append(f''' + code.append( + f""" // select the current pole - z = {pole};''') + z = {pole};""" + ) # initialize and apply the causal filter code.append(_causal_init_code(mode)) - code.append(''' + code.append( + """ // apply the causal filter for the current pole for (i = 1; i < n; ++i) {{ c[i * element_stride] += z * c[(i - 1) * element_stride]; - }}''') + }}""" + ) # initialize and apply the anti-causal filter code.append(_anticausal_init_code(mode)) - code.append(''' + code.append( + """ // apply the anti-causal filter for the current pole for (i = n - 2; i >= 0; --i) {{ c[i * element_stride] = z * (c[(i + 1) * element_stride] - c[i * element_stride]); - }}''') + }}""" + ) - code += [''' - }}'''] - return textwrap.dedent('\n'.join(code)).format(n_boundary=n_boundary) + code += [ + """ + }}""" + ] + return textwrap.dedent("\n".join(code)).format(n_boundary=n_boundary) -_FILTER_GENERAL = ''' +_FILTER_GENERAL = """ #include "cupy/carray.cuh" #include "cupy/complex.cuh" typedef {data_type} T; @@ -202,7 +222,7 @@ def _get_spline1d_code(mode, poles, n_boundary): }} return ptr + index + stride * i; }} -''' +""" _batch_spline1d_strided_template = """ @@ -225,9 +245,16 @@ def _get_spline1d_code(mode, poles, n_boundary): @cupy.memoize(for_each_device=True) -def get_raw_spline1d_kernel(axis, ndim, mode, order, index_type='int', - data_type='double', pole_type='double', - block_size=128): +def get_raw_spline1d_kernel( + axis, + ndim, + mode, + order, + index_type="int", + data_type="double", + pole_type="double", + block_size=128, +): """Generate a kernel for applying a spline prefilter along a given axis.""" poles = get_poles(order) @@ -235,22 +262,24 @@ def get_raw_spline1d_kernel(axis, ndim, mode, order, index_type='int', # (SciPy uses n_boundary = n_samples but this is excessive) largest_pole = max([abs(p) for p in poles]) # tol < 1e-7 fails test cases comparing to SciPy at atol = rtol = 1e-5 - tol = 1e-10 if pole_type == 'float' else 1e-18 + tol = 1e-10 if pole_type == "float" else 1e-18 n_boundary = math.ceil(math.log(tol, largest_pole)) # headers and general utility function for extracting rows of data - code = _FILTER_GENERAL.format(index_type=index_type, - data_type=data_type, - pole_type=pole_type) + code = _FILTER_GENERAL.format( + index_type=index_type, data_type=data_type, pole_type=pole_type + ) # generate source for a 1d function for a given boundary mode and poles code += _get_spline1d_code(mode, poles, n_boundary) # generate code handling batch operation of the 1d filter - mode_str = mode.replace('-', '_') # cannot have '-' in kernel name - kernel_name = (f'cupyx_scipy_ndimage_spline_filter_{ndim}d_ord{order}_' - f'axis{axis}_{mode_str}') - code += _batch_spline1d_strided_template.format(ndim=ndim, axis=axis, - block_size=block_size, - kernel_name=kernel_name) + mode_str = mode.replace("-", "_") # cannot have '-' in kernel name + kernel_name = ( + f"cupyx_scipy_ndimage_spline_filter_{ndim}d_ord{order}_" + f"axis{axis}_{mode_str}" + ) + code += _batch_spline1d_strided_template.format( + ndim=ndim, axis=axis, block_size=block_size, kernel_name=kernel_name + ) return cupy.RawKernel(code, kernel_name) diff --git a/python/cucim/src/cucim/skimage/_vendored/_ndimage_util.py b/python/cucim/src/cucim/skimage/_vendored/_ndimage_util.py index 0b0b0be6d..08b5a68a0 100644 --- a/python/cucim/src/cucim/skimage/_vendored/_ndimage_util.py +++ b/python/cucim/src/cucim/skimage/_vendored/_ndimage_util.py @@ -7,24 +7,27 @@ def _is_integer_output(output, input): if output is None: - return input.dtype.kind in 'iu' + return input.dtype.kind in "iu" elif isinstance(output, cupy.ndarray): - return output.dtype.kind in 'iu' - return cupy.dtype(output).kind in 'iu' + return output.dtype.kind in "iu" + return cupy.dtype(output).kind in "iu" def _check_cval(mode, cval, integer_output): - if mode == 'constant' and integer_output and not cupy.isfinite(cval): - raise NotImplementedError("Non-finite cval is not supported for " - "outputs with integer dtype.") + if mode == "constant" and integer_output and not cupy.isfinite(cval): + raise NotImplementedError( + "Non-finite cval is not supported for " + "outputs with integer dtype." + ) def _get_weights_dtype(input, weights, use_cucim_casting=False): if weights.dtype.kind == "c" or input.dtype.kind == "c": return cupy.promote_types(input.real.dtype, cupy.complex64) - elif weights.dtype.kind in 'iub': + elif weights.dtype.kind in "iub": if use_cucim_casting: from cucim.skimage._shared.utils import _supported_float_type + return _supported_float_type(weights.dtype) else: # convert integer dtype weights to double as in SciPy @@ -41,18 +44,18 @@ def _get_output(output, input, shape=None, complex_output=False): _dtype = input.dtype output = cupy.empty(shape, dtype=_dtype) elif isinstance(output, (type, cupy.dtype)): - if complex_output and cupy.dtype(output).kind != 'c': + if complex_output and cupy.dtype(output).kind != "c": warnings.warn("promoting specified output dtype to complex") output = cupy.promote_types(output, cupy.complex64) output = cupy.empty(shape, dtype=output) elif isinstance(output, str): output = numpy.sctypeDict[output] - if complex_output and cupy.dtype(output).kind != 'c': + if complex_output and cupy.dtype(output).kind != "c": raise RuntimeError("output must have complex dtype") output = cupy.empty(shape, dtype=output) elif output.shape != shape: raise RuntimeError("output shape not correct") - elif complex_output and output.dtype.kind != 'c': + elif complex_output and output.dtype.kind != "c": raise RuntimeError("output must have complex dtype") return output @@ -74,14 +77,22 @@ def _fix_sequence_arg(arg, ndim, name, conv=lambda x: x): def _check_origin(origin, width): origin = int(origin) if (width // 2 + origin < 0) or (width // 2 + origin >= width): - raise ValueError('invalid origin') + raise ValueError("invalid origin") return origin def _check_mode(mode): - if mode not in ('reflect', 'constant', 'nearest', 'mirror', 'wrap', - 'grid-mirror', 'grid-wrap', 'grid-reflect'): - msg = f'boundary mode not supported (actual: {mode})' + if mode not in ( + "reflect", + "constant", + "nearest", + "mirror", + "wrap", + "grid-mirror", + "grid-wrap", + "grid-reflect", + ): + msg = f"boundary mode not supported (actual: {mode})" raise RuntimeError(msg) return mode @@ -91,13 +102,19 @@ def _get_inttype(input): # The indices actually use byte positions and we can't just use # input.nbytes since that won't tell us the number of bytes between the # first and last elements when the array is non-contiguous - nbytes = sum((x - 1) * abs(stride) for x, stride in - zip(input.shape, input.strides)) + input.dtype.itemsize - return 'int' if nbytes < (1 << 31) else 'ptrdiff_t' - - -def _generate_boundary_condition_ops(mode, ix, xsize, int_t="int", - float_ix=False, separate=False): + nbytes = ( + sum( + (x - 1) * abs(stride) + for x, stride in zip(input.shape, input.strides) + ) + + input.dtype.itemsize + ) + return "int" if nbytes < (1 << 31) else "ptrdiff_t" + + +def _generate_boundary_condition_ops( + mode, ix, xsize, int_t="int", float_ix=False, separate=False +): """Generate boundary conditions If separate = True, a pair of conditions for the (lower, upper) boundary @@ -105,46 +122,49 @@ def _generate_boundary_condition_ops(mode, ix, xsize, int_t="int", """ min_func = "fmin" if float_ix else "min" max_func = "fmax" if float_ix else "max" - if mode in ['reflect', 'grid-mirror']: + if mode in ["reflect", "grid-mirror"]: if separate: - ops_upper = f''' + ops_upper = f""" {ix} %= {xsize} * 2; {ix} = {min_func}({ix}, 2 * {xsize} - 1 - {ix}); - ''' - ops_lower = f''' + """ + ops_lower = ( + f""" if ({ix} < 0) {{ {ix} = - 1 -{ix}; }} - ''' + ops_upper + """ + + ops_upper + ) ops = (ops_lower, ops_upper) else: - ops = f''' + ops = f""" if ({ix} < 0) {{ {ix} = - 1 -{ix}; }} {ix} %= {xsize} * 2; - {ix} = {min_func}({ix}, 2 * {xsize} - 1 - {ix});''' - elif mode == 'mirror': + {ix} = {min_func}({ix}, 2 * {xsize} - 1 - {ix});""" + elif mode == "mirror": if separate: - temp1 = f''' + temp1 = f""" if ({xsize} == 1) {{ {ix} = 0; }} else {{ - ''' - temp2 = f''' + """ + temp2 = f""" if ({ix} < 0) {{ {ix} = -{ix}; }} - ''' - temp3 = f''' + """ + temp3 = f""" {ix} = 1 + ({ix} - 1) % (({xsize} - 1) * 2); {ix} = {min_func}({ix}, 2 * {xsize} - 2 - {ix}); - }}''' + }}""" ops_lower = temp1 + temp2 + temp3 ops_upper = temp1 + temp3 ops = (ops_lower, ops_upper) else: - ops = f''' + ops = f""" if ({xsize} == 1) {{ {ix} = 0; }} else {{ @@ -153,68 +173,76 @@ def _generate_boundary_condition_ops(mode, ix, xsize, int_t="int", }} {ix} = 1 + ({ix} - 1) % (({xsize} - 1) * 2); {ix} = {min_func}({ix}, 2 * {xsize} - 2 - {ix}); - }}''' - elif mode == 'nearest': - T = 'int' if int_t == 'int' else 'long long' + }}""" + elif mode == "nearest": + T = "int" if int_t == "int" else "long long" if separate: - ops_lower = f'''{ix} = {max_func}(({T}){ix}, ({T})0);''' - ops_upper = f'''{ix} = {min_func}(({T}){ix}, ({T})({xsize} - 1));''' # noqa + ops_lower = f"""{ix} = {max_func}(({T}){ix}, ({T})0);""" + ops_upper = ( + f"""{ix} = {min_func}(({T}){ix}, ({T})({xsize} - 1));""" # noqa + ) ops = (ops_lower, ops_upper) else: - ops = f'''{ix} = {min_func}({max_func}(({T}){ix}, ({T})0), ({T})({xsize} - 1));''' # noqa - elif mode == 'grid-wrap': + ops = f"""{ix} = {min_func}({max_func}(({T}){ix}, ({T})0), ({T})({xsize} - 1));""" # noqa + elif mode == "grid-wrap": if separate: - ops_upper = f''' + ops_upper = f""" {ix} %= {xsize}; - ''' - ops_lower = ops_upper + f''' + """ + ops_lower = ( + ops_upper + + f""" while ({ix} < 0) {{ {ix} += {xsize}; - }}''' + }}""" + ) ops = (ops_lower, ops_upper) else: - ops = f''' + ops = f""" {ix} %= {xsize}; if ({ix} < 0) {{ {ix} += {xsize}; - }}''' + }}""" - elif mode == 'wrap': + elif mode == "wrap": if separate: - ops_lower = f'''{ix} += ({xsize} - 1) * (({int_t})(-{ix} / ({xsize} - 1)) + 1);''' # noqa - ops_upper = f'''{ix} -= ({xsize} - 1) * ({int_t})({ix} / ({xsize} - 1));''' # noqa + ops_lower = f"""{ix} += ({xsize} - 1) * (({int_t})(-{ix} / ({xsize} - 1)) + 1);""" # noqa + ops_upper = f"""{ix} -= ({xsize} - 1) * ({int_t})({ix} / ({xsize} - 1));""" # noqa ops = (ops_lower, ops_upper) else: - ops = f''' + ops = f""" if ({ix} < 0) {{ {ix} += ({xsize} - 1) * (({int_t})(-{ix} / ({xsize} - 1)) + 1); }} else if ({ix} > ({xsize} - 1)) {{ {ix} -= ({xsize} - 1) * ({int_t})({ix} / ({xsize} - 1)); - }};''' - elif mode in ['constant', 'grid-constant']: + }};""" + elif mode in ["constant", "grid-constant"]: if separate: - ops_lower = f''' + ops_lower = f""" if ({ix} < 0) {{ {ix} = -1; - }}''' - ops_upper = f''' + }}""" + ops_upper = f""" if ({ix} >= {xsize}) {{ {ix} = -1; - }}''' + }}""" ops = (ops_lower, ops_upper) else: - ops = f''' + ops = f""" if (({ix} < 0) || {ix} >= {xsize}) {{ {ix} = -1; - }}''' + }}""" if separate: ops = (ops, ops) return ops def _generate_indices_ops(ndim, int_type, offsets): - code = '{type} ind_{j} = _i % ysize_{j} - {offset}; _i /= ysize_{j};' - body = [code.format(type=int_type, j=j, offset=offsets[j]) - for j in range(ndim - 1, 0, -1)] - return '{type} _i = i;\n{body}\n{type} ind_0 = _i - {offset};'.format( - type=int_type, body='\n'.join(body), offset=offsets[0]) + code = "{type} ind_{j} = _i % ysize_{j} - {offset}; _i /= ysize_{j};" + body = [ + code.format(type=int_type, j=j, offset=offsets[j]) + for j in range(ndim - 1, 0, -1) + ] + return "{type} _i = i;\n{body}\n{type} ind_0 = _i - {offset};".format( + type=int_type, body="\n".join(body), offset=offsets[0] + ) diff --git a/python/cucim/src/cucim/skimage/_vendored/_pearsonr.py b/python/cucim/src/cucim/skimage/_vendored/_pearsonr.py index 0da8541f7..d3385da94 100644 --- a/python/cucim/src/cucim/skimage/_vendored/_pearsonr.py +++ b/python/cucim/src/cucim/skimage/_vendored/_pearsonr.py @@ -20,8 +20,10 @@ class PearsonRConstantInputWarning(RuntimeWarning): def __init__(self, msg=None): if msg is None: - msg = ("An input array is constant; the correlation coefficient " - "is not defined.") + msg = ( + "An input array is constant; the correlation coefficient " + "is not defined." + ) self.args = (msg,) @@ -30,8 +32,10 @@ class PearsonRNearConstantInputWarning(RuntimeWarning): def __init__(self, msg=None): if msg is None: - msg = ("An input array is nearly constant; the computed " - "correlation coefficient may be inaccurate.") + msg = ( + "An input array is nearly constant; the computed " + "correlation coefficient may be inaccurate." + ) self.args = (msg,) @@ -195,10 +199,10 @@ def pearsonr(x, y, *, disable_checks=False): # inputs must be 1D n = len(x) if n != len(y): - raise ValueError('x and y must have the same length.') + raise ValueError("x and y must have the same length.") if n < 2: - raise ValueError('x and y must have length at least 2.') + raise ValueError("x and y must have length at least 2.") if not disable_checks: # If an input is constant, the correlation coefficient is not defined. diff --git a/python/cucim/src/cucim/skimage/_vendored/_signaltools_core.py b/python/cucim/src/cucim/skimage/_vendored/_signaltools_core.py index 493f7b25c..69b9e8ac4 100644 --- a/python/cucim/src/cucim/skimage/_vendored/_signaltools_core.py +++ b/python/cucim/src/cucim/skimage/_vendored/_signaltools_core.py @@ -5,49 +5,60 @@ from cucim.skimage._vendored._ndimage_filters import _get_correlate_kernel -from . import _internal as internal -from . import _ndimage_util as _util +from . import _internal as internal, _ndimage_util as _util def _check_conv_inputs(in1, in2, mode, convolution=True): if in1.ndim == in2.ndim == 0: return in1 * (in2 if convolution else in2.conj()) if in1.ndim != in2.ndim: - raise ValueError('in1 and in2 should have the same dimensionality') + raise ValueError("in1 and in2 should have the same dimensionality") if in1.size == 0 or in2.size == 0: return cupy.array([], dtype=in1.dtype) - if mode not in ('full', 'same', 'valid'): + if mode not in ("full", "same", "valid"): raise ValueError('acceptable modes are "valid", "same", or "full"') return None -def _direct_correlate(in1, in2, mode='full', output=float, convolution=False, - boundary='constant', fillvalue=0.0, shift=False): - if in1.ndim != 1 and (in1.dtype.kind == 'b' or - (in1.dtype.kind == 'f' and in1.dtype.itemsize < 4)): - raise ValueError('unsupported type in SciPy') +def _direct_correlate( + in1, + in2, + mode="full", + output=float, + convolution=False, + boundary="constant", + fillvalue=0.0, + shift=False, +): + if in1.ndim != 1 and ( + in1.dtype.kind == "b" + or (in1.dtype.kind == "f" and in1.dtype.itemsize < 4) + ): + raise ValueError("unsupported type in SciPy") # Swaps inputs so smaller one is in2: # NOTE: when mode != 'valid' we can only swap with a constant-0 boundary swapped_inputs = False orig_in1_shape = in1.shape if _inputs_swap_needed(mode, in1.shape, in2.shape) or ( - in2.size > in1.size and boundary == 'constant' and fillvalue == 0): + in2.size > in1.size and boundary == "constant" and fillvalue == 0 + ): in1, in2 = in2, in1 swapped_inputs = not convolution # Due to several optimizations, the second array can only be 2 GiB if in2.nbytes >= (1 << 31): - raise RuntimeError('smaller array must be 2 GiB or less, ' - 'use method="fft" instead') + raise RuntimeError( + "smaller array must be 2 GiB or less, " 'use method="fft" instead' + ) # At this point, in1.size > in2.size # (except some cases when boundary != 'constant' or fillvalue != 0) # Figure out the output shape and the origin of the kernel - if mode == 'full': + if mode == "full": out_shape = tuple(x1 + x2 - 1 for x1, x2 in zip(in1.shape, in2.shape)) offsets = tuple(x - 1 for x in in2.shape) - elif mode == 'valid': + elif mode == "valid": out_shape = tuple(x1 - x2 + 1 for x1, x2 in zip(in1.shape, in2.shape)) offsets = (0,) * in1.ndim else: # mode == 'same': @@ -59,23 +70,26 @@ def _direct_correlate(in1, in2, mode='full', output=float, convolution=False, if orig_in1_shape == in1.shape: offsets = tuple((x - shift) // 2 for x in in2.shape) else: - offsets = tuple((2 * x2 - x1 - (not convolution) + shift) // 2 - for x1, x2 in zip(in1.shape, in2.shape)) + offsets = tuple( + (2 * x2 - x1 - (not convolution) + shift) // 2 + for x1, x2 in zip(in1.shape, in2.shape) + ) # Check the output if not isinstance(output, cupy.ndarray): output = cupy.empty(out_shape, output) elif output.shape != out_shape: - raise ValueError('out has wrong shape') + raise ValueError("out has wrong shape") # Get and run the CuPy kernel int_type = _util._get_inttype(in1) kernel = _get_correlate_kernel( - boundary, in2.shape, int_type, offsets, fillvalue) + boundary, in2.shape, int_type, offsets, fillvalue + ) in2 = _reverse_and_conj(in2) if convolution else in2 if not swapped_inputs: kernel(in1, in2, output) - elif output.dtype.kind != 'c': + elif output.dtype.kind != "c": # Avoids one array copy kernel(in1, in2, _reverse_and_conj(output)) else: @@ -91,15 +105,17 @@ def _reverse_and_conj(x): def _inputs_swap_needed(mode, shape1, shape2, axes=None): # See scipy's documentation in scipy.signal.signaltools - if mode != 'valid' or not shape1: + if mode != "valid" or not shape1: return False if axes is None: axes = range(len(shape1)) not_ok1 = any(shape1[i] < shape2[i] for i in axes) not_ok2 = any(shape1[i] > shape2[i] for i in axes) if not_ok1 and not_ok2: - raise ValueError('For "valid" mode, one must be at least ' - 'as large as the other in every dimension') + raise ValueError( + 'For "valid" mode, one must be at least ' + "as large as the other in every dimension" + ) return not_ok1 @@ -115,8 +131,10 @@ def _init_freq_conv_axes(in1, in2, mode, axes, sorted_axes=False): # Check that unused axes are either 1 (broadcast) or the same length for ax, (dim1, dim2) in enumerate(zip(s1, s2)): if ax not in axes and dim1 != dim2 and dim1 != 1 and dim2 != 1: - raise ValueError('incompatible shapes for in1 and in2:' - ' {} and {}'.format(s1, s2)) + raise ValueError( + "incompatible shapes for in1 and in2:" + " {} and {}".format(s1, s2) + ) # Check that input sizes are compatible with 'valid' mode. if _inputs_swap_needed(mode, s1, s2, axes=axes): @@ -134,17 +152,20 @@ def _init_nd_and_axes(x, axes): except TypeError: axes = internal._normalize_axis_indices(axes, x.ndim) if not len(axes): - raise ValueError('when provided, axes cannot be empty') + raise ValueError("when provided, axes cannot be empty") if any(x.shape[ax] < 1 for ax in axes): - raise ValueError('invalid number of data points specified') + raise ValueError("invalid number of data points specified") return axes def _freq_domain_conv(in1, in2, axes, shape, calc_fast_len=False): # See scipy's documentation in scipy.signal.signaltools - real = (in1.dtype.kind != 'c' and in2.dtype.kind != 'c') - fshape = ([fft.next_fast_len(shape[a], real) for a in axes] - if calc_fast_len else shape) + real = in1.dtype.kind != "c" and in2.dtype.kind != "c" + fshape = ( + [fft.next_fast_len(shape[a], real) for a in axes] + if calc_fast_len + else shape + ) fftn, ifftn = (fft.rfftn, fft.irfftn) if real else (fft.fftn, fft.ifftn) # Perform the convolution @@ -157,12 +178,15 @@ def _freq_domain_conv(in1, in2, axes, shape, calc_fast_len=False): def _apply_conv_mode(full, s1, s2, mode, axes): # See scipy's documentation in scipy.signal.signaltools - if mode == 'full': + if mode == "full": return cupy.ascontiguousarray(full) - if mode == 'valid': - s1 = [full.shape[a] if a not in axes else s1[a] - s2[a] + 1 - for a in range(full.ndim)] + if mode == "valid": + s1 = [ + full.shape[a] if a not in axes else s1[a] - s2[a] + 1 + for a in range(full.ndim) + ] starts = [(cur - new) // 2 for cur, new in zip(full.shape, s1)] - slices = tuple(slice(start, start + length) - for start, length in zip(starts, s1)) + slices = tuple( + slice(start, start + length) for start, length in zip(starts, s1) + ) return cupy.ascontiguousarray(full[slices]) diff --git a/python/cucim/src/cucim/skimage/_vendored/_texture.py b/python/cucim/src/cucim/skimage/_vendored/_texture.py index 5c18bce71..eb0040133 100644 --- a/python/cucim/src/cucim/skimage/_vendored/_texture.py +++ b/python/cucim/src/cucim/skimage/_vendored/_texture.py @@ -3,8 +3,9 @@ from cupy.cuda import runtime, texture _affine_transform_2d_array_kernel = _core.ElementwiseKernel( - 'U texObj, raw float32 m, uint64 width', 'T transformed_image', - ''' + "U texObj, raw float32 m, uint64 width", + "T transformed_image", + """ float3 pixel = make_float3( (float)(i / width), (float)(i % width), @@ -13,20 +14,21 @@ float x = dot(pixel, make_float3(m[0], m[1], m[2])) + .5f; float y = dot(pixel, make_float3(m[3], m[4], m[5])) + .5f; transformed_image = tex2D(texObj, y, x); - ''', - 'cupyx_texture_affine_transformation_2d_array', - preamble=''' + """, + "cupyx_texture_affine_transformation_2d_array", + preamble=""" inline __host__ __device__ float dot(float3 a, float3 b) { return a.x * b.x + a.y * b.y + a.z * b.z; } - ''') + """, +) _affine_transform_3d_array_kernel = _core.ElementwiseKernel( - 'U texObj, raw float32 m, uint64 height, uint64 width', - 'T transformed_volume', - ''' + "U texObj, raw float32 m, uint64 height, uint64 width", + "T transformed_volume", + """ float4 voxel = make_float4( (float)(i / (width * height)), (float)((i % (width * height)) / width), @@ -37,22 +39,20 @@ float y = dot(voxel, make_float4(m[4], m[5], m[6], m[7])) + .5f; float z = dot(voxel, make_float4(m[8], m[9], m[10], m[11])) + .5f; transformed_volume = tex3D(texObj, z, y, x); - ''', - 'cupyx_texture_affine_transformation_3d_array', - preamble=''' + """, + "cupyx_texture_affine_transformation_3d_array", + preamble=""" inline __host__ __device__ float dot(float4 a, float4 b) { return a.x * b.x + a.y * b.y + a.z * b.z + a.w * b.w; } - ''') + """, +) -def _create_texture_object(data, - address_mode: str, - filter_mode: str, - read_mode: str, - border_color=0): - +def _create_texture_object( + data, address_mode: str, filter_mode: str, read_mode: str, border_color=0 +): if cupy.issubdtype(data.dtype, cupy.unsignedinteger): fmt_kind = runtime.cudaChannelFormatKindUnsigned elif cupy.issubdtype(data.dtype, cupy.integer): @@ -60,59 +60,69 @@ def _create_texture_object(data, elif cupy.issubdtype(data.dtype, cupy.floating): fmt_kind = runtime.cudaChannelFormatKindFloat else: - raise ValueError(f'Unsupported data type {data.dtype}') + raise ValueError(f"Unsupported data type {data.dtype}") - if address_mode == 'nearest': + if address_mode == "nearest": address_mode = runtime.cudaAddressModeClamp - elif address_mode == 'constant': + elif address_mode == "constant": address_mode = runtime.cudaAddressModeBorder else: raise ValueError( - f'Unsupported address mode {address_mode} ' - '(supported: constant, nearest)') + f"Unsupported address mode {address_mode} " + "(supported: constant, nearest)" + ) - if filter_mode == 'nearest': + if filter_mode == "nearest": filter_mode = runtime.cudaFilterModePoint - elif filter_mode == 'linear': + elif filter_mode == "linear": filter_mode = runtime.cudaFilterModeLinear else: raise ValueError( - f'Unsupported filter mode {filter_mode} ' - f'(supported: nearest, linear)') + f"Unsupported filter mode {filter_mode} " + f"(supported: nearest, linear)" + ) - if read_mode == 'element_type': + if read_mode == "element_type": read_mode = runtime.cudaReadModeElementType - elif read_mode == 'normalized_float': + elif read_mode == "normalized_float": read_mode = runtime.cudaReadModeNormalizedFloat else: raise ValueError( - f'Unsupported read mode {read_mode} ' - '(supported: element_type, normalized_float)') + f"Unsupported read mode {read_mode} " + "(supported: element_type, normalized_float)" + ) texture_fmt = texture.ChannelFormatDescriptor( - data.itemsize * 8, 0, 0, 0, fmt_kind) + data.itemsize * 8, 0, 0, 0, fmt_kind + ) # CUDAArray: last dimension is the fastest changing dimension array = texture.CUDAarray(texture_fmt, *data.shape[::-1]) res_desc = texture.ResourceDescriptor( - runtime.cudaResourceTypeArray, cuArr=array) + runtime.cudaResourceTypeArray, cuArr=array + ) # TODO(the-lay): each dimension can have a different addressing mode # TODO(the-lay): border color/value can be defined for up to 4 channels tex_desc = texture.TextureDescriptor( - (address_mode, ) * data.ndim, filter_mode, read_mode, - borderColors=(border_color, )) + (address_mode,) * data.ndim, + filter_mode, + read_mode, + borderColors=(border_color,), + ) tex_obj = texture.TextureObject(res_desc, tex_desc) array.copy_from(data) return tex_obj -def affine_transformation(data, - transformation_matrix, - output_shape=None, - output=None, - interpolation: str = 'linear', - mode: str = 'constant', - border_value=0): +def affine_transformation( + data, + transformation_matrix, + output_shape=None, + output=None, + interpolation: str = "linear", + mode: str = "constant", + border_value=0, +): """ Apply an affine transformation. @@ -146,27 +156,33 @@ def affine_transformation(data, ndim = data.ndim if (ndim < 2) or (ndim > 3): raise ValueError( - 'Texture memory affine transformation is defined only for ' - '2D and 3D arrays without channel dimension.') + "Texture memory affine transformation is defined only for " + "2D and 3D arrays without channel dimension." + ) dtype = data.dtype if dtype != cupy.float32: - raise ValueError(f'Texture memory affine transformation is available ' - f'only for float32 data type (not {dtype})') + raise ValueError( + f"Texture memory affine transformation is available " + f"only for float32 data type (not {dtype})" + ) - if interpolation not in ['linear', 'nearest']: + if interpolation not in ["linear", "nearest"]: raise ValueError( - f'Unsupported interpolation {interpolation} ' - f'(supported: linear, nearest)') + f"Unsupported interpolation {interpolation} " + f"(supported: linear, nearest)" + ) if transformation_matrix.shape != (ndim + 1, ndim + 1): - raise ValueError('Matrix must be have shape (ndim + 1, ndim + 1)') + raise ValueError("Matrix must be have shape (ndim + 1, ndim + 1)") - texture_object = _create_texture_object(data, - address_mode=mode, - filter_mode=interpolation, - read_mode='element_type', - border_color=border_value) + texture_object = _create_texture_object( + data, + address_mode=mode, + filter_mode=interpolation, + read_mode="element_type", + border_color=border_value, + ) if ndim == 2: kernel = _affine_transform_2d_array_kernel @@ -180,15 +196,17 @@ def affine_transformation(data, output = cupy.zeros(output_shape, dtype=dtype) elif isinstance(output, (type, cupy.dtype)): if output != cupy.float32: - raise ValueError(f'Texture memory affine transformation is ' - f'available only for float32 data type (not ' - f'{output})') + raise ValueError( + f"Texture memory affine transformation is " + f"available only for float32 data type (not " + f"{output})" + ) output = cupy.zeros(output_shape, dtype=output) elif isinstance(output, cupy.ndarray): if output.shape != output_shape: - raise ValueError('Output shapes do not match') + raise ValueError("Output shapes do not match") else: - raise ValueError('Output must be None, cupy.ndarray or cupy.dtype') + raise ValueError("Output must be None, cupy.ndarray or cupy.dtype") kernel(texture_object, transformation_matrix, *output_shape[1:], output) return output diff --git a/python/cucim/src/cucim/skimage/_vendored/ndimage.py b/python/cucim/src/cucim/skimage/_vendored/ndimage.py index 31d8b20d0..5acc068e0 100644 --- a/python/cucim/src/cucim/skimage/_vendored/ndimage.py +++ b/python/cucim/src/cucim/skimage/_vendored/ndimage.py @@ -16,11 +16,7 @@ from cucim.skimage._vendored._ndimage_filters import correlate1d # NOQA from cucim.skimage._vendored._ndimage_filters import gaussian_filter # NOQA from cucim.skimage._vendored._ndimage_filters import gaussian_filter1d # NOQA -from cucim.skimage._vendored._ndimage_filters import \ - gaussian_gradient_magnitude # NOQA from cucim.skimage._vendored._ndimage_filters import gaussian_laplace # NOQA -from cucim.skimage._vendored._ndimage_filters import \ - generic_gradient_magnitude # NOQA from cucim.skimage._vendored._ndimage_filters import generic_laplace # NOQA from cucim.skimage._vendored._ndimage_filters import laplace # NOQA from cucim.skimage._vendored._ndimage_filters import maximum_filter # NOQA @@ -34,42 +30,42 @@ from cucim.skimage._vendored._ndimage_filters import sobel # NOQA from cucim.skimage._vendored._ndimage_filters import uniform_filter # NOQA from cucim.skimage._vendored._ndimage_filters import uniform_filter1d # NOQA +from cucim.skimage._vendored._ndimage_filters import ( # NOQA + gaussian_gradient_magnitude, + generic_gradient_magnitude, +) + # interpolation -from cucim.skimage._vendored._ndimage_interpolation import \ - affine_transform # NOQA -from cucim.skimage._vendored._ndimage_interpolation import \ - map_coordinates # NOQA from cucim.skimage._vendored._ndimage_interpolation import rotate # NOQA from cucim.skimage._vendored._ndimage_interpolation import shift # NOQA from cucim.skimage._vendored._ndimage_interpolation import spline_filter # NOQA -from cucim.skimage._vendored._ndimage_interpolation import \ - spline_filter1d # NOQA from cucim.skimage._vendored._ndimage_interpolation import zoom # NOQA +from cucim.skimage._vendored._ndimage_interpolation import ( # NOQA + affine_transform, + map_coordinates, + spline_filter1d, +) + # morphology from cucim.skimage._vendored._ndimage_morphology import binary_closing # NOQA from cucim.skimage._vendored._ndimage_morphology import binary_dilation # NOQA from cucim.skimage._vendored._ndimage_morphology import binary_erosion # NOQA -from cucim.skimage._vendored._ndimage_morphology import \ - binary_fill_holes # NOQA -from cucim.skimage._vendored._ndimage_morphology import \ - binary_hit_or_miss # NOQA from cucim.skimage._vendored._ndimage_morphology import binary_opening # NOQA -from cucim.skimage._vendored._ndimage_morphology import \ - binary_propagation # NOQA from cucim.skimage._vendored._ndimage_morphology import black_tophat # NOQA -from cucim.skimage._vendored._ndimage_morphology import \ - generate_binary_structure # NOQA from cucim.skimage._vendored._ndimage_morphology import grey_closing # NOQA from cucim.skimage._vendored._ndimage_morphology import grey_dilation # NOQA from cucim.skimage._vendored._ndimage_morphology import grey_erosion # NOQA from cucim.skimage._vendored._ndimage_morphology import grey_opening # NOQA -from cucim.skimage._vendored._ndimage_morphology import \ - iterate_structure # NOQA -from cucim.skimage._vendored._ndimage_morphology import \ - morphological_gradient # NOQA -from cucim.skimage._vendored._ndimage_morphology import \ - morphological_laplace # NOQA from cucim.skimage._vendored._ndimage_morphology import white_tophat # NOQA +from cucim.skimage._vendored._ndimage_morphology import ( # NOQA + binary_fill_holes, + binary_hit_or_miss, + binary_propagation, + generate_binary_structure, + iterate_structure, + morphological_gradient, + morphological_laplace, +) # Import the rest of the cupyx.scipy.ndimage API here diff --git a/python/cucim/src/cucim/skimage/_vendored/pad.py b/python/cucim/src/cucim/skimage/_vendored/pad.py index 3d744b431..1f6c81217 100644 --- a/python/cucim/src/cucim/skimage/_vendored/pad.py +++ b/python/cucim/src/cucim/skimage/_vendored/pad.py @@ -15,8 +15,7 @@ def _round_if_needed(arr, dtype): - """Rounds arr inplace if the destination dtype is an integer. - """ + """Rounds arr inplace if the destination dtype is an integer.""" if cupy.issubdtype(dtype, cupy.integer): arr.round(out=arr) # bug in round so use rint (cupy/cupy#2330) @@ -72,7 +71,7 @@ def _pad_simple(array, pad_width, fill_value=None): left + size + right for size, (left, right) in zip(array.shape, pad_width) ) - order = 'F' if array.flags.fnc else 'C' # Fortran and not also C-order + order = "F" if array.flags.fnc else "C" # Fortran and not also C-order padded = cupy.empty(new_shape, dtype=array.dtype, order=order) if fill_value is not None: @@ -89,8 +88,7 @@ def _pad_simple(array, pad_width, fill_value=None): def _set_pad_area(padded, axis, width_pair, value_pair): - """Set an empty-padded area in given dimension. - """ + """Set an empty-padded area in given dimension.""" left_slice = _slice_at_axis(slice(None, width_pair[0]), axis) padded[left_slice] = value_pair[0] @@ -243,7 +241,7 @@ def _set_reflect_both(padded, axis, width_pair, method, include_edge=False): left_slice = _slice_at_axis(slice(start, stop, -1), axis) left_chunk = padded[left_slice] - if method == 'odd': + if method == "odd": # Negate chunk and align with edge edge_slice = _slice_at_axis(slice(left_pad, left_pad + 1), axis) left_chunk = 2 * padded[edge_slice] - left_chunk @@ -266,11 +264,9 @@ def _set_reflect_both(padded, axis, width_pair, method, include_edge=False): right_slice = _slice_at_axis(slice(start, stop, -1), axis) right_chunk = padded[right_slice] - if method == 'odd': + if method == "odd": # Negate chunk and align with edge - edge_slice = _slice_at_axis( - slice(-right_pad - 1, -right_pad), axis - ) + edge_slice = _slice_at_axis(slice(-right_pad - 1, -right_pad), axis) right_chunk = 2 * padded[edge_slice] - right_chunk # Insert chunk into padded area @@ -417,17 +413,17 @@ def _use_elementwise_kernel(arr, mode, kwargs): use_elementwise = False if arr.ndim == 0 or arr.size == 0: return False - if mode in ('edge', 'wrap'): + if mode in ("edge", "wrap"): use_elementwise = True # elif mode == 'constant': # # Only a uniform constant is supported in the Elementwise kernel. # # A per-axis constant is not currently supported. # return isinstance(kwargs.get('constant_values', 0), numbers.Number) - elif mode in ('symmetric', 'reflect'): + elif mode in ("symmetric", "reflect"): # only the default 'even' reflect type is supported - use_elementwise = kwargs.get('reflect_type', 'even') == 'even' + use_elementwise = kwargs.get("reflect_type", "even") == "even" if use_elementwise: - if (arr.ndim > 2 and (arr.flags.fnc and arr.nbytes > 5_000_000)): + if arr.ndim > 2 and (arr.flags.fnc and arr.nbytes > 5_000_000): # Empirically found slower performance for large Fortran-ordered # arrays with ndim > 2. return False @@ -440,7 +436,7 @@ def _use_elementwise_kernel(arr, mode, kwargs): # @array_function_dispatch(_pad_dispatcher, module='numpy') -def pad(array, pad_width, mode='constant', **kwargs): +def pad(array, pad_width, mode="constant", **kwargs): """Pads an array with specified widths and values. Args: @@ -621,8 +617,8 @@ def pad(array, pad_width, mode='constant', **kwargs): else: pad_width = numpy.asarray(pad_width) - if not pad_width.dtype.kind == 'i': - raise TypeError('`pad_width` must be of integral type.') + if not pad_width.dtype.kind == "i": + raise TypeError("`pad_width` must be of integral type.") # Broadcast to shape (array.ndim, 2) pad_width = _as_pairs(pad_width, array.ndim, as_index=True) @@ -652,17 +648,17 @@ def pad(array, pad_width, mode='constant', **kwargs): # Make sure that no unsupported keywords were passed for the current mode allowed_kwargs = { - 'empty': [], - 'edge': [], - 'wrap': [], - 'constant': ['constant_values'], - 'linear_ramp': ['end_values'], - 'maximum': ['stat_length'], - 'mean': ['stat_length'], + "empty": [], + "edge": [], + "wrap": [], + "constant": ["constant_values"], + "linear_ramp": ["end_values"], + "maximum": ["stat_length"], + "mean": ["stat_length"], # 'median': ['stat_length'], - 'minimum': ['stat_length'], - 'reflect': ['reflect_type'], - 'symmetric': ['reflect_type'], + "minimum": ["stat_length"], + "reflect": ["reflect_type"], + "symmetric": ["reflect_type"], } try: unsupported_kwargs = set(kwargs) - set(allowed_kwargs[mode]) @@ -679,8 +675,8 @@ def pad(array, pad_width, mode='constant', **kwargs): # import here to avoid circular import from cucim.skimage._vendored.pad_elementwise import _get_pad_kernel - if mode == 'reflect' and min(array.shape) > 1: - mode = 'reflect_no_singleton_dim' + if mode == "reflect" and min(array.shape) > 1: + mode = "reflect_no_singleton_dim" if not array.flags.forc: # make non-contiguous input C-contiguous @@ -691,11 +687,14 @@ def pad(array, pad_width, mode='constant', **kwargs): left + size + right for size, (left, right) in zip(array.shape, pad_width) ) - order = 'F' if array.flags.fnc else 'C' # Fortran and not also C-order + order = "F" if array.flags.fnc else "C" # Fortran and not also C-order padded = cupy.empty(new_shape, dtype=array.dtype, order=order) - (int_type, np_type) = (('int', cupy.int32) if padded.size < (1 << 31) - else ('ptrdiff_t', cupy.intp)) + (int_type, np_type) = ( + ("int", cupy.int32) + if padded.size < (1 << 31) + else ("ptrdiff_t", cupy.intp) + ) kern = _get_pad_kernel( pad_starts=tuple(p[0] for p in pad_width), mode=mode, @@ -703,25 +702,28 @@ def pad(array, pad_width, mode='constant', **kwargs): order=order, ) # pad_width must be C-contiguous - if mode == 'constant': + if mode == "constant": # `_use_elementwise_kernel` excludes cases with non-scalar cval - cval = float(kwargs.get('constant_values', 0)) + cval = float(kwargs.get("constant_values", 0)) kern(array, cval, padded, size=padded.size) else: kern(array, padded, size=padded.size) return padded - if mode == 'constant': - values = kwargs.get('constant_values', 0) - if isinstance(values, numbers.Number) and values == 0 and ( - array.ndim == 1 or array.size < 4e6): + if mode == "constant": + values = kwargs.get("constant_values", 0) + if ( + isinstance(values, numbers.Number) + and values == 0 + and (array.ndim == 1 or array.size < 4e6) + ): # faster path for 1d arrays or small n-dimensional arrays return _pad_simple(array, pad_width, 0)[0] stat_functions = { - 'maximum': cupy.max, - 'minimum': cupy.min, - 'mean': cupy.mean, + "maximum": cupy.max, + "minimum": cupy.min, + "mean": cupy.mean, # 'median': cupy.median, } @@ -732,13 +734,13 @@ def pad(array, pad_width, mode='constant', **kwargs): # (zipping may be more readable than using enumerate) axes = range(padded.ndim) - if mode == 'constant': + if mode == "constant": values = _as_pairs(values, padded.ndim) for axis, width_pair, value_pair in zip(axes, pad_width, values): roi = _view_roi(padded, original_area_slice, axis) _set_pad_area(roi, axis, width_pair, value_pair) - elif mode == 'empty': + elif mode == "empty": pass # Do nothing as _pad_simple already returned the correct result elif array.size == 0: @@ -754,14 +756,14 @@ def pad(array, pad_width, mode='constant', **kwargs): # passed, don't need to do anything more as _pad_simple already # returned the correct result - elif mode == 'edge': + elif mode == "edge": for axis, width_pair in zip(axes, pad_width): roi = _view_roi(padded, original_area_slice, axis) edge_pair = _get_edges(roi, axis, width_pair) _set_pad_area(roi, axis, width_pair, edge_pair) - elif mode == 'linear_ramp': - end_values = kwargs.get('end_values', 0) + elif mode == "linear_ramp": + end_values = kwargs.get("end_values", 0) end_values = _as_pairs(end_values, padded.ndim) for axis, width_pair, value_pair in zip(axes, pad_width, end_values): roi = _view_roi(padded, original_area_slice, axis) @@ -770,16 +772,16 @@ def pad(array, pad_width, mode='constant', **kwargs): elif mode in stat_functions: func = stat_functions[mode] - length = kwargs.get('stat_length', None) + length = kwargs.get("stat_length", None) length = _as_pairs(length, padded.ndim, as_index=True) for axis, width_pair, length_pair in zip(axes, pad_width, length): roi = _view_roi(padded, original_area_slice, axis) stat_pair = _get_stats(roi, axis, width_pair, length_pair, func) _set_pad_area(roi, axis, width_pair, stat_pair) - elif mode in {'reflect', 'symmetric'}: - method = kwargs.get('reflect_type', 'even') - include_edge = True if mode == 'symmetric' else False + elif mode in {"reflect", "symmetric"}: + method = kwargs.get("reflect_type", "even") + include_edge = True if mode == "symmetric" else False for axis, (left_index, right_index) in zip(axes, pad_width): if array.shape[axis] == 1 and (left_index > 0 or right_index > 0): # Extending singleton dimension for 'reflect' is legacy @@ -799,7 +801,7 @@ def pad(array, pad_width, mode='constant', **kwargs): roi, axis, (left_index, right_index), method, include_edge ) - elif mode == 'wrap': + elif mode == "wrap": for axis, (left_index, right_index) in zip(axes, pad_width): roi = _view_roi(padded, original_area_slice, axis) while left_index > 0 or right_index > 0: diff --git a/python/cucim/src/cucim/skimage/_vendored/pad_elementwise.py b/python/cucim/src/cucim/skimage/_vendored/pad_elementwise.py index 9b58c2fe0..62f3ba11b 100644 --- a/python/cucim/src/cucim/skimage/_vendored/pad_elementwise.py +++ b/python/cucim/src/cucim/skimage/_vendored/pad_elementwise.py @@ -2,24 +2,24 @@ def _pad_boundary_ops(mode, var_name, size, int_t="int", no_singleton=False): - T = 'int' if int_t == 'int' else 'long long' - min_func = 'min' - max_func = 'max' - if mode == 'constant': - ops = f''' + T = "int" if int_t == "int" else "long long" + min_func = "min" + max_func = "max" + if mode == "constant": + ops = f""" if (({var_name} < 0) || {var_name} >= {size}) {{ {var_name} = -1; - }}''' - elif mode == 'symmetric': - ops = f''' + }}""" + elif mode == "symmetric": + ops = f""" if ({var_name} < 0) {{ {var_name} = - 1 -{var_name}; }} {var_name} %= {size} * 2; {var_name} = {min_func}({var_name}, 2 * {size} - 1 - {var_name}); - ''' - elif mode == 'reflect': - ops = f''' + """ + elif mode == "reflect": + ops = f""" if ({size} == 1) {{ {var_name} = 0; }} else {{ @@ -31,10 +31,10 @@ def _pad_boundary_ops(mode, var_name, size, int_t="int", no_singleton=False): {var_name} = {min_func}({var_name}, 2 * {size} - 2 - {var_name}); }} - }}''' # noqa - elif mode == 'reflect_no_singleton_dim': + }}""" # noqa + elif mode == "reflect_no_singleton_dim": # the same as reflect, but without the extra `{size} == 1` check - ops = f''' + ops = f""" if ({var_name} < 0) {{ {var_name} = -{var_name}; }} @@ -42,25 +42,25 @@ def _pad_boundary_ops(mode, var_name, size, int_t="int", no_singleton=False): {var_name} = 1 + ({var_name} - 1) % (({size} - 1) * 2); {var_name} = {min_func}({var_name}, 2 * {size} - 2 - {var_name}); }} - ''' - elif mode == 'edge': - ops = f''' + """ + elif mode == "edge": + ops = f""" {var_name} = {min_func}( {max_func}(static_cast<{T}>({var_name}), static_cast<{T}>(0)), static_cast<{T}>({size} - 1)); - ''' - elif mode == 'wrap': - ops = f''' + """ + elif mode == "wrap": + ops = f""" {var_name} %= {size}; if ({var_name} < 0) {{ {var_name} += {size}; }} - ''' + """ return ops + "\n" def _generate_size_vars( - ndim, arr_name='arr', size_prefix='size', int_type='int' + ndim, arr_name="arr", size_prefix="size", int_type="int" ): """Store shape of a raw array into individual variables. @@ -71,13 +71,15 @@ def _generate_size_vars( int size_1 = arr.shape()[1]; int size_2 = arr.shape()[2]; """ - set_size_vars = [f'{int_type} {size_prefix}_{i} = {arr_name}.shape()[{i}];' - for i in range(ndim)] - return '\n'.join(set_size_vars) + '\n' + set_size_vars = [ + f"{int_type} {size_prefix}_{i} = {arr_name}.shape()[{i}];" + for i in range(ndim) + ] + return "\n".join(set_size_vars) + "\n" def _generate_stride_vars( - ndim, arr_name='arr', size_prefix='stride', int_type='int' + ndim, arr_name="arr", size_prefix="stride", int_type="int" ): """Store stride (in bytes) of a raw array into individual variables. @@ -89,14 +91,18 @@ def _generate_stride_vars( int stride_2 = arr.strides()[2]; """ set_size_vars = [ - f'{int_type} {size_prefix}_{i} = {arr_name}.strides()[{i}];' + f"{int_type} {size_prefix}_{i} = {arr_name}.strides()[{i}];" for i in range(ndim) ] - return '\n'.join(set_size_vars) + '\n' + return "\n".join(set_size_vars) + "\n" def _generate_indices_ops( - ndim, size_prefix='size', int_type='int', index_prefix='ind', order='C', + ndim, + size_prefix="size", + int_type="int", + index_prefix="ind", + order="C", ): """Generate indices based existing variables. @@ -110,70 +116,77 @@ def _generate_indices_ops( int ind_1 = _i % size_1; _i /= size_1; int ind_0 = _i; """ - if order == 'C': + if order == "C": _range = range(ndim - 1, 0, -1) idx_largest_stride = 0 - elif order == 'F': + elif order == "F": _range = range(ndim - 1) idx_largest_stride = ndim - 1 else: raise ValueError(f"Unknown order: {order}. Must be one of {'C', 'F'}.") - body = [f'{int_type} {index_prefix}_{j} = _i % {size_prefix}_{j}; _i /= {size_prefix}_{j};' # noqa - for j in _range] - body = '\n'.join(body) - code = f'{int_type} _i = i;\n' - code += body + '\n' - code += f'{int_type} {index_prefix}_{idx_largest_stride} = _i;\n' + body = [ + f"{int_type} {index_prefix}_{j} = _i % {size_prefix}_{j}; _i /= {size_prefix}_{j};" # noqa + for j in _range + ] + body = "\n".join(body) + code = f"{int_type} _i = i;\n" + code += body + "\n" + code += f"{int_type} {index_prefix}_{idx_largest_stride} = _i;\n" return code -def _gen_raveled(ndim, stride_prefix='stride', index_prefix='i', order=None): +def _gen_raveled(ndim, stride_prefix="stride", index_prefix="i", order=None): """Generate raveled index for c-ordered memory layout For index_prefix='i', the indices are (i_0, i_1, ....) For stride_prefix='stride', the stride is (stride_0, stride_1, ....) """ - return ' + '.join( - f'{stride_prefix}_{j} * {index_prefix}_{j}' for j in range(ndim) + return " + ".join( + f"{stride_prefix}_{j} * {index_prefix}_{j}" for j in range(ndim) ) -def _get_pad_kernel_code(pad_starts, int_type='int', mode='edge', order='C'): +def _get_pad_kernel_code(pad_starts, int_type="int", mode="edge", order="C"): # variables storing shape of the output array ndim = len(pad_starts) - out_size_prefix = 'shape' + out_size_prefix = "shape" operation = _generate_size_vars( - ndim, arr_name='out', size_prefix=out_size_prefix, int_type=int_type + ndim, arr_name="out", size_prefix=out_size_prefix, int_type=int_type ) # variables storing shape of the input array - in_size_prefix = 'ishape' - in_stride_prefix = 'istride' + in_size_prefix = "ishape" + in_stride_prefix = "istride" operation += _generate_size_vars( - ndim, arr_name='arr', size_prefix=in_size_prefix, int_type=int_type + ndim, arr_name="arr", size_prefix=in_size_prefix, int_type=int_type ) operation += _generate_stride_vars( - ndim, arr_name='arr', size_prefix=in_stride_prefix, int_type=int_type + ndim, arr_name="arr", size_prefix=in_stride_prefix, int_type=int_type ) # unraveled indices into the output array - out_index_prefix = 'oi' + out_index_prefix = "oi" # Note: Regardless of actual memory layout, need order='C' here to match # the behavior of the index raveling used by ElementwiseKernel. operation += _generate_indices_ops( - ndim, size_prefix=out_size_prefix, int_type=int_type, - index_prefix=out_index_prefix, order='C' + ndim, + size_prefix=out_size_prefix, + int_type=int_type, + index_prefix=out_index_prefix, + order="C", ) # compute unraveled indices into the input array # (i_0, i_1, ...) - in_index_prefix = 'i' - operation += '\n'.join( - [f'{int_type} {in_index_prefix}_{j} = {out_index_prefix}_{j} - {pad_starts[j]};' # noqa - for j in range(ndim)] + in_index_prefix = "i" + operation += "\n".join( + [ + f"{int_type} {in_index_prefix}_{j} = {out_index_prefix}_{j} - {pad_starts[j]};" # noqa + for j in range(ndim) + ] ) - operation += '\n' - input_indices = tuple(f'{in_index_prefix}_{j}' for j in range(ndim)) + operation += "\n" + input_indices = tuple(f"{in_index_prefix}_{j}" for j in range(ndim)) # impose boundary condition @@ -202,7 +215,9 @@ def _get_pad_kernel_code(pad_starts, int_type='int', mode='edge', order='C'): operation += "}\n" raveled_idx = _gen_raveled( - ndim, stride_prefix=in_stride_prefix, index_prefix=in_index_prefix, + ndim, + stride_prefix=in_stride_prefix, + index_prefix=in_index_prefix, order=order, ) operation += f""" @@ -214,9 +229,9 @@ def _get_pad_kernel_code(pad_starts, int_type='int', mode='edge', order='C'): @cupy._util.memoize(for_each_device=True) -def _get_pad_kernel(pad_starts, int_type='int', mode='edge', order='C'): +def _get_pad_kernel(pad_starts, int_type="int", mode="edge", order="C"): in_params = "raw F arr" - if mode == 'constant': + if mode == "constant": in_params += ", float64 cval" kernel_name = f"pad_{len(pad_starts)}d_order{order}_{mode}" @@ -227,4 +242,5 @@ def _get_pad_kernel(pad_starts, int_type='int', mode='edge', order='C'): in_params=in_params, out_params="raw F out", operation=_get_pad_kernel_code(pad_starts, int_type, mode, order), - name=kernel_name) + name=kernel_name, + ) diff --git a/python/cucim/src/cucim/skimage/_vendored/signaltools.py b/python/cucim/src/cucim/skimage/_vendored/signaltools.py index 3184055fa..4ea5e85cb 100644 --- a/python/cucim/src/cucim/skimage/_vendored/signaltools.py +++ b/python/cucim/src/cucim/skimage/_vendored/signaltools.py @@ -20,7 +20,7 @@ _prod = _misc.prod -def convolve(in1, in2, mode='full', method='auto'): +def convolve(in1, in2, mode="full", method="auto"): """Convolve two N-dimensional arrays. Convolve ``in1`` and ``in2``, with the output size determined by the @@ -67,7 +67,7 @@ def convolve(in1, in2, mode='full', method='auto'): return _correlate(in1, in2, mode, method, True) -def correlate(in1, in2, mode='full', method='auto'): +def correlate(in1, in2, mode="full", method="auto"): """Cross-correlate two N-dimensional arrays. Cross-correlate ``in1`` and ``in2``, with the output size determined by the @@ -114,19 +114,20 @@ def correlate(in1, in2, mode='full', method='auto'): return _correlate(in1, in2, mode, method, False) -def _correlate(in1, in2, mode='full', method='auto', convolution=False): +def _correlate(in1, in2, mode="full", method="auto", convolution=False): quick_out = _st_core._check_conv_inputs(in1, in2, mode, convolution) if quick_out is not None: return quick_out - if method not in ('auto', 'direct', 'fft'): + if method not in ("auto", "direct", "fft"): raise ValueError('acceptable methods are "auto", "direct", or "fft"') - if method == 'auto': + if method == "auto": method = choose_conv_method(in1, in2, mode=mode) - if method == 'direct': - return _st_core._direct_correlate(in1, in2, mode, in1.dtype, - convolution) + if method == "direct": + return _st_core._direct_correlate( + in1, in2, mode, in1.dtype, convolution + ) # if method == 'fft': inputs_swapped = _st_core._inputs_swap_needed(mode, in1.shape, in2.shape) @@ -136,7 +137,7 @@ def _correlate(in1, in2, mode='full', method='auto', convolution=False): in2 = _st_core._reverse_and_conj(in2) out = fftconvolve(in1, in2, mode) result_type = cupy.result_type(in1, in2) - if result_type.kind in 'ui': + if result_type.kind in "ui": out = out.round() out = out.astype(result_type, copy=False) if not convolution and inputs_swapped: @@ -144,7 +145,7 @@ def _correlate(in1, in2, mode='full', method='auto', convolution=False): return out -def fftconvolve(in1, in2, mode='full', axes=None): +def fftconvolve(in1, in2, mode="full", axes=None): """Convolve two N-dimensional arrays using FFT. Convolve ``in1`` and ``in2`` using the fast Fourier transform method, with @@ -185,8 +186,10 @@ def fftconvolve(in1, in2, mode='full', axes=None): if out is not None: return out in1, in2, axes = _st_core._init_freq_conv_axes(in1, in2, mode, axes, False) - shape = [max(x1, x2) if a not in axes else x1 + x2 - 1 - for a, (x1, x2) in enumerate(zip(in1.shape, in2.shape))] + shape = [ + max(x1, x2) if a not in axes else x1 + x2 - 1 + for a, (x1, x2) in enumerate(zip(in1.shape, in2.shape)) + ] out = _st_core._freq_domain_conv(in1, in2, axes, shape, calc_fast_len=True) return _st_core._apply_conv_mode(out, in1.shape, in2.shape, mode, axes) @@ -323,7 +326,7 @@ def _timeit_fast(stmt="pass", setup="pass", repeat=3): # determine number of calls per rep so total time for 1 rep >= 5 ms x = 0 for p in range(0, 10): - number = 10 ** p + number = 10**p x = timer.timeit(number) # seconds if x >= 5e-3 / 10: # 5 ms for final test, 1/10th that for this one break @@ -342,6 +345,7 @@ def _timeit_fast(stmt="pass", setup="pass", repeat=3): # TODO: grlee77: tune this for CUDA when measure=False rather than falling # back to the choices made by SciPy + def choose_conv_method(in1, in2, mode="full", measure=False): """ Find the fastest convolution/correlation method. @@ -480,7 +484,7 @@ def choose_conv_method(in1, in2, mode="full", measure=False): return "direct" -def convolve2d(in1, in2, mode='full', boundary='fill', fillvalue=0): +def convolve2d(in1, in2, mode="full", boundary="fill", fillvalue=0): """Convolve two 2-dimensional arrays. Convolve ``in1`` and ``in2`` with output size determined by ``mode``, and @@ -522,7 +526,7 @@ def convolve2d(in1, in2, mode='full', boundary='fill', fillvalue=0): return _correlate2d(in1, in2, mode, boundary, fillvalue, True) -def correlate2d(in1, in2, mode='full', boundary='fill', fillvalue=0): +def correlate2d(in1, in2, mode="full", boundary="fill", fillvalue=0): """Cross-correlate two 2-dimensional arrays. Cross correlate ``in1`` and ``in2`` with output size determined by @@ -570,23 +574,39 @@ def correlate2d(in1, in2, mode='full', boundary='fill', fillvalue=0): def _correlate2d(in1, in2, mode, boundary, fillvalue, convolution=False): if not (in1.ndim == in2.ndim == 2): - raise ValueError('{} inputs must both be 2-D arrays'.format( - 'convolve2d' if convolution else 'correlate2d')) + raise ValueError( + "{} inputs must both be 2-D arrays".format( + "convolve2d" if convolution else "correlate2d" + ) + ) _boundaries = { - 'fill': 'constant', 'pad': 'constant', - 'wrap': 'wrap', 'circular': 'wrap', - 'symm': 'reflect', 'symmetric': 'reflect', + "fill": "constant", + "pad": "constant", + "wrap": "wrap", + "circular": "wrap", + "symm": "reflect", + "symmetric": "reflect", } boundary = _boundaries.get(boundary) if boundary is None: - raise ValueError('Acceptable boundary flags are "fill" (or "pad"), ' - '"circular" (or "wrap"), and ' - '"symmetric" (or "symm").') + raise ValueError( + 'Acceptable boundary flags are "fill" (or "pad"), ' + '"circular" (or "wrap"), and ' + '"symmetric" (or "symm").' + ) quick_out = _st_core._check_conv_inputs(in1, in2, mode, convolution) if quick_out is not None: return quick_out - return _st_core._direct_correlate(in1, in2, mode, in1.dtype, convolution, - boundary, fillvalue, not convolution) + return _st_core._direct_correlate( + in1, + in2, + mode, + in1.dtype, + convolution, + boundary, + fillvalue, + not convolution, + ) def wiener(im, mysize=None, noise=None): @@ -608,7 +628,7 @@ def wiener(im, mysize=None, noise=None): .. seealso:: :func:`scipy.signal.wiener` """ - if im.dtype.kind == 'c': + if im.dtype.kind == "c": # TODO: adding support for complex types requires ndimage filters # to support complex types (which they could easily if not for the # scipy compatibility requirement of forbidding complex and using @@ -616,14 +636,14 @@ def wiener(im, mysize=None, noise=None): raise TypeError("complex types not currently supported") if mysize is None: mysize = 3 - mysize = _fix_sequence_arg(mysize, im.ndim, 'mysize', int) + mysize = _fix_sequence_arg(mysize, im.ndim, "mysize", int) im = im.astype(float, copy=False) # Estimate the local mean - local_mean = uniform_filter(im, mysize, mode='constant') + local_mean = uniform_filter(im, mysize, mode="constant") # Estimate the local variance - local_var = uniform_filter(im * im, mysize, mode='constant') + local_var = uniform_filter(im * im, mysize, mode="constant") local_var -= local_mean * local_mean # Estimate the noise power if needed. @@ -660,13 +680,15 @@ def order_filter(a, domain, rank): .. seealso:: :func:`cupyx.scipy.ndimage.rank_filter` .. seealso:: :func:`scipy.signal.order_filter` """ - if a.dtype.kind in 'bc' or a.dtype == cupy.float16: + if a.dtype.kind in "bc" or a.dtype == cupy.float16: # scipy doesn't support these types raise ValueError("data type not supported") if any(x % 2 != 1 for x in domain.shape): - raise ValueError("Each dimension of domain argument " - " should have an odd number of elements.") - return rank_filter(a, rank, footprint=domain, mode='constant') + raise ValueError( + "Each dimension of domain argument " + " should have an odd number of elements." + ) + return rank_filter(a, rank, footprint=domain, mode="constant") def medfilt(volume, kernel_size=None): @@ -689,19 +711,21 @@ def medfilt(volume, kernel_size=None): .. seealso:: :func:`cupyx.scipy.ndimage.median_filter` .. seealso:: :func:`scipy.signal.medfilt` """ - if volume.dtype.kind == 'c': + if volume.dtype.kind == "c": # scipy doesn't support complex # (and rank_filter raise TypeError) raise ValueError("complex types not supported") # output is forced to float64 to match scipy kernel_size = _get_kernel_size(kernel_size, volume.ndim) if any(k > s for k, s in zip(kernel_size, volume.shape)): - warnings.warn('kernel_size exceeds volume extent: ' - 'volume will be zero-padded') + warnings.warn( + "kernel_size exceeds volume extent: " "volume will be zero-padded" + ) size = np.prod(kernel_size) - return rank_filter(volume, size // 2, size=kernel_size, - output=float, mode='constant') + return rank_filter( + volume, size // 2, size=kernel_size, output=float, mode="constant" + ) def medfilt2d(input, kernel_size=3): @@ -731,16 +755,16 @@ def medfilt2d(input, kernel_size=3): # Scipy's version only supports uint8, float32, and float64 raise ValueError("only supports uint8, float32, and float64") if input.ndim != 2: - raise ValueError('input must be 2d') + raise ValueError("input must be 2d") kernel_size = _get_kernel_size(kernel_size, input.ndim) order = kernel_size[0] * kernel_size[1] // 2 - return rank_filter(input, order, size=kernel_size, mode='constant') + return rank_filter(input, order, size=kernel_size, mode="constant") def _get_kernel_size(kernel_size, ndim): if kernel_size is None: kernel_size = (3,) * ndim - kernel_size = _fix_sequence_arg(kernel_size, ndim, 'kernel_size', int) + kernel_size = _fix_sequence_arg(kernel_size, ndim, "kernel_size", int) if any((k % 2) != 1 for k in kernel_size): raise ValueError("Each element of kernel_size should be odd") return kernel_size diff --git a/python/cucim/src/cucim/skimage/_vendored/time.py b/python/cucim/src/cucim/skimage/_vendored/time.py index 15d7f74b5..a4b7bd57c 100644 --- a/python/cucim/src/cucim/skimage/_vendored/time.py +++ b/python/cucim/src/cucim/skimage/_vendored/time.py @@ -97,7 +97,6 @@ def repeat( def _repeat( func, args, kwargs, n_repeat, name, n_warmup, max_duration, devices ): - events_1 = [] events_2 = [] diff --git a/python/cucim/src/cucim/skimage/color/__init__.py b/python/cucim/src/cucim/skimage/color/__init__.py index ef9049f34..b3d396559 100644 --- a/python/cucim/src/cucim/skimage/color/__init__.py +++ b/python/cucim/src/cucim/skimage/color/__init__.py @@ -1,82 +1,129 @@ -from .colorconv import (ahx_from_rgb, bex_from_rgb, bpx_from_rgb, bro_from_rgb, - combine_stains, convert_colorspace, fgx_from_rgb, - gdx_from_rgb, gray2rgb, gray2rgba, hax_from_rgb, - hdx_from_rgb, hed2rgb, hed_from_rgb, hpx_from_rgb, - hsv2rgb, lab2lch, lab2rgb, lab2xyz, lch2lab, luv2rgb, - luv2xyz, rbd_from_rgb, rgb2gray, rgb2hed, rgb2hsv, - rgb2lab, rgb2luv, rgb2rgbcie, rgb2xyz, rgb2ycbcr, - rgb2ydbdr, rgb2yiq, rgb2ypbpr, rgb2yuv, rgb_from_ahx, - rgb_from_bex, rgb_from_bpx, rgb_from_bro, rgb_from_fgx, - rgb_from_gdx, rgb_from_hax, rgb_from_hdx, rgb_from_hed, - rgb_from_hpx, rgb_from_rbd, rgba2rgb, rgbcie2rgb, - separate_stains, xyz2lab, xyz2luv, xyz2rgb, - xyz_tristimulus_values, ycbcr2rgb, ydbdr2rgb, yiq2rgb, - ypbpr2rgb, yuv2rgb) +from .colorconv import ( + ahx_from_rgb, + bex_from_rgb, + bpx_from_rgb, + bro_from_rgb, + combine_stains, + convert_colorspace, + fgx_from_rgb, + gdx_from_rgb, + gray2rgb, + gray2rgba, + hax_from_rgb, + hdx_from_rgb, + hed2rgb, + hed_from_rgb, + hpx_from_rgb, + hsv2rgb, + lab2lch, + lab2rgb, + lab2xyz, + lch2lab, + luv2rgb, + luv2xyz, + rbd_from_rgb, + rgb2gray, + rgb2hed, + rgb2hsv, + rgb2lab, + rgb2luv, + rgb2rgbcie, + rgb2xyz, + rgb2ycbcr, + rgb2ydbdr, + rgb2yiq, + rgb2ypbpr, + rgb2yuv, + rgb_from_ahx, + rgb_from_bex, + rgb_from_bpx, + rgb_from_bro, + rgb_from_fgx, + rgb_from_gdx, + rgb_from_hax, + rgb_from_hdx, + rgb_from_hed, + rgb_from_hpx, + rgb_from_rbd, + rgba2rgb, + rgbcie2rgb, + separate_stains, + xyz2lab, + xyz2luv, + xyz2rgb, + xyz_tristimulus_values, + ycbcr2rgb, + ydbdr2rgb, + yiq2rgb, + ypbpr2rgb, + yuv2rgb, +) from .colorlabel import color_dict, label2rgb from .delta_e import deltaE_cie76, deltaE_ciede94, deltaE_ciede2000, deltaE_cmc -__all__ = ['convert_colorspace', - 'xyz_tristimulus_values', - 'rgba2rgb', - 'rgb2hsv', - 'hsv2rgb', - 'rgb2xyz', - 'xyz2rgb', - 'rgb2rgbcie', - 'rgbcie2rgb', - 'rgb2gray', - 'gray2rgb', - 'gray2rgba', - 'xyz2lab', - 'lab2xyz', - 'lab2rgb', - 'rgb2lab', - 'xyz2luv', - 'luv2xyz', - 'luv2rgb', - 'rgb2luv', - 'rgb2hed', - 'hed2rgb', - 'lab2lch', - 'lch2lab', - 'rgb2yuv', - 'yuv2rgb', - 'rgb2yiq', - 'yiq2rgb', - 'rgb2ypbpr', - 'ypbpr2rgb', - 'rgb2ycbcr', - 'ycbcr2rgb', - 'rgb2ydbdr', - 'ydbdr2rgb', - 'separate_stains', - 'combine_stains', - 'rgb_from_hed', - 'hed_from_rgb', - 'rgb_from_hdx', - 'hdx_from_rgb', - 'rgb_from_fgx', - 'fgx_from_rgb', - 'rgb_from_bex', - 'bex_from_rgb', - 'rgb_from_rbd', - 'rbd_from_rgb', - 'rgb_from_gdx', - 'gdx_from_rgb', - 'rgb_from_hax', - 'hax_from_rgb', - 'rgb_from_bro', - 'bro_from_rgb', - 'rgb_from_bpx', - 'bpx_from_rgb', - 'rgb_from_ahx', - 'ahx_from_rgb', - 'rgb_from_hpx', - 'hpx_from_rgb', - 'color_dict', - 'label2rgb', - 'deltaE_cie76', - 'deltaE_ciede94', - 'deltaE_ciede2000', # TODO: fix accuracy - 'deltaE_cmc', - ] +__all__ = [ + "convert_colorspace", + "xyz_tristimulus_values", + "rgba2rgb", + "rgb2hsv", + "hsv2rgb", + "rgb2xyz", + "xyz2rgb", + "rgb2rgbcie", + "rgbcie2rgb", + "rgb2gray", + "gray2rgb", + "gray2rgba", + "xyz2lab", + "lab2xyz", + "lab2rgb", + "rgb2lab", + "xyz2luv", + "luv2xyz", + "luv2rgb", + "rgb2luv", + "rgb2hed", + "hed2rgb", + "lab2lch", + "lch2lab", + "rgb2yuv", + "yuv2rgb", + "rgb2yiq", + "yiq2rgb", + "rgb2ypbpr", + "ypbpr2rgb", + "rgb2ycbcr", + "ycbcr2rgb", + "rgb2ydbdr", + "ydbdr2rgb", + "separate_stains", + "combine_stains", + "rgb_from_hed", + "hed_from_rgb", + "rgb_from_hdx", + "hdx_from_rgb", + "rgb_from_fgx", + "fgx_from_rgb", + "rgb_from_bex", + "bex_from_rgb", + "rgb_from_rbd", + "rbd_from_rgb", + "rgb_from_gdx", + "gdx_from_rgb", + "rgb_from_hax", + "hax_from_rgb", + "rgb_from_bro", + "bro_from_rgb", + "rgb_from_bpx", + "bpx_from_rgb", + "rgb_from_ahx", + "ahx_from_rgb", + "rgb_from_hpx", + "hpx_from_rgb", + "color_dict", + "label2rgb", + "deltaE_cie76", + "deltaE_ciede94", + "deltaE_ciede2000", # TODO: fix accuracy + "deltaE_cmc", +] diff --git a/python/cucim/src/cucim/skimage/color/adapt_rgb.py b/python/cucim/src/cucim/skimage/color/adapt_rgb.py index 95e33aeba..03a15a7d6 100644 --- a/python/cucim/src/cucim/skimage/color/adapt_rgb.py +++ b/python/cucim/src/cucim/skimage/color/adapt_rgb.py @@ -5,7 +5,7 @@ from .. import color from ..util.dtype import _convert -__all__ = ['adapt_rgb', 'hsv_value', 'each_channel'] +__all__ = ["adapt_rgb", "hsv_value", "each_channel"] def is_rgb_like(image, channel_axis=-1): @@ -30,6 +30,7 @@ def adapt_rgb(apply_to_rgb): Function that returns a filtered image from an image-filter and RGB image. This will only be called if the image is RGB-like. """ + def decorator(image_filter): @functools.wraps(image_filter) def image_filter_adapted(image, *args, **kwargs): @@ -37,7 +38,9 @@ def image_filter_adapted(image, *args, **kwargs): return apply_to_rgb(image_filter, image, *args, **kwargs) else: return image_filter(image, *args, **kwargs) + return image_filter_adapted + return decorator @@ -73,6 +76,7 @@ def each_channel(image_filter, image, *args, **kwargs): image : array Input image. """ - c_new = [image_filter(c, *args, **kwargs) - for c in cp.moveaxis(image, -1, 0)] + c_new = [ + image_filter(c, *args, **kwargs) for c in cp.moveaxis(image, -1, 0) + ] return cp.stack(c_new, axis=-1) diff --git a/python/cucim/src/cucim/skimage/color/colorconv.py b/python/cucim/src/cucim/skimage/color/colorconv.py index f7a1e3678..0b18aa910 100644 --- a/python/cucim/src/cucim/skimage/color/colorconv.py +++ b/python/cucim/src/cucim/skimage/color/colorconv.py @@ -55,8 +55,12 @@ import numpy as np from scipy import linalg -from .._shared.utils import (_supported_float_type, channel_as_last_axis, - deprecate_func, identity) +from .._shared.utils import ( + _supported_float_type, + channel_as_last_axis, + deprecate_func, + identity, +) from ..util import dtype, dtype_limits @@ -104,36 +108,55 @@ def convert_colorspace(arr, fromspace, tospace, *, channel_axis=-1): >>> img = cp.array(data.astronaut()) >>> img_hsv = convert_colorspace(img, 'RGB', 'HSV') """ - fromdict = {'rgb': identity, 'hsv': hsv2rgb, 'rgb cie': rgbcie2rgb, - 'xyz': xyz2rgb, 'yuv': yuv2rgb, 'yiq': yiq2rgb, - 'ypbpr': ypbpr2rgb, 'ycbcr': ycbcr2rgb, 'ydbdr': ydbdr2rgb} - todict = {'rgb': identity, 'hsv': rgb2hsv, 'rgb cie': rgb2rgbcie, - 'xyz': rgb2xyz, 'yuv': rgb2yuv, 'yiq': rgb2yiq, - 'ypbpr': rgb2ypbpr, 'ycbcr': rgb2ycbcr, 'ydbdr': rgb2ydbdr} + fromdict = { + "rgb": identity, + "hsv": hsv2rgb, + "rgb cie": rgbcie2rgb, + "xyz": xyz2rgb, + "yuv": yuv2rgb, + "yiq": yiq2rgb, + "ypbpr": ypbpr2rgb, + "ycbcr": ycbcr2rgb, + "ydbdr": ydbdr2rgb, + } + todict = { + "rgb": identity, + "hsv": rgb2hsv, + "rgb cie": rgb2rgbcie, + "xyz": rgb2xyz, + "yuv": rgb2yuv, + "yiq": rgb2yiq, + "ypbpr": rgb2ypbpr, + "ycbcr": rgb2ycbcr, + "ydbdr": rgb2ydbdr, + } fromspace = fromspace.lower() tospace = tospace.lower() if fromspace not in fromdict: - msg = f'`fromspace` has to be one of {fromdict.keys()}' + msg = f"`fromspace` has to be one of {fromdict.keys()}" raise ValueError(msg) if tospace not in todict: - msg = f'`tospace` has to be one of {todict.keys()}' + msg = f"`tospace` has to be one of {todict.keys()}" raise ValueError(msg) return todict[tospace]( fromdict[fromspace](arr, channel_axis=channel_axis), - channel_axis=channel_axis + channel_axis=channel_axis, ) -def _prepare_colorarray(arr, force_copy=False, force_c_contiguous=True, - channel_axis=-1): +def _prepare_colorarray( + arr, force_copy=False, force_c_contiguous=True, channel_axis=-1 +): """Check the shape of the array and convert it to floating point representation. """ if arr.shape[channel_axis] != 3: - msg = (f'the input array must have size 3 along `channel_axis`, ' - f'got {arr.shape}') + msg = ( + f"the input array must have size 3 along `channel_axis`, " + f"got {arr.shape}" + ) raise ValueError(msg) float_dtype = _supported_float_type(arr.dtype) if float_dtype == cp.float32: @@ -154,7 +177,7 @@ def _validate_channel_axis(channel_axis, ndim): @cp.memoize(for_each_device=True) -def _rgba2rgb_kernel(background, name='rgba2rgb'): +def _rgba2rgb_kernel(background, name="rgba2rgb"): code = """ X alpha = rgba[4*i + 3]; X val; @@ -165,10 +188,8 @@ def _rgba2rgb_kernel(background, name='rgba2rgb'): rgb[3*i + {ch}] = min(max(val, (X)0.0), (X)1.0); """ return cp.ElementwiseKernel( - 'raw X rgba', - 'raw X rgb', - code, - name='cucim_skimage_color_' + name) + "raw X rgba", "raw X rgb", code, name="cucim_skimage_color_" + name + ) @channel_as_last_axis() # current CUDA kernel assumes channel_axis is last @@ -213,8 +234,10 @@ def rgba2rgb(rgba, background=(1, 1, 1), *, channel_axis=-1): channel_axis = channel_axis % rgba.ndim if rgba.shape[channel_axis] != 4: - msg = (f'the input array must have size 4 along `channel_axis`, ' - f'got {rgba.shape}') + msg = ( + f"the input array must have size 4 along `channel_axis`, " + f"got {rgba.shape}" + ) raise ValueError(msg) float_dtype = _supported_float_type(rgba.dtype) @@ -229,13 +252,16 @@ def rgba2rgb(rgba, background=(1, 1, 1), *, channel_axis=-1): background = cp.asnumpy(background) # synchronize background = tuple(float(b) for b in background) if len(background) != 3: - raise ValueError('background must be an array-like containing 3 RGB ' - f'values. Got {len(background)} items') + raise ValueError( + "background must be an array-like containing 3 RGB " + f"values. Got {len(background)} items" + ) if any((b < 0 or b > 1) for b in background): - raise ValueError('background RGB values must be floats between ' - '0 and 1.') + raise ValueError( + "background RGB values must be floats between " "0 and 1." + ) - name = f'rgba2rgb_{rgba.dtype.char}' + name = f"rgba2rgb_{rgba.dtype.char}" kern = _rgba2rgb_kernel(background, name) rgb = cp.empty(rgba.shape[:-1] + (3,), dtype=rgba.dtype) kern(rgba, rgb, size=rgb.size // 3) @@ -243,7 +269,7 @@ def rgba2rgb(rgba, background=(1, 1, 1), *, channel_axis=-1): @cp.memoize(for_each_device=True) -def _rgb_to_hsv_kernel(name='rgb2hsv'): +def _rgb_to_hsv_kernel(name="rgb2hsv"): code = """ X minv = rgb[3*i]; X maxv = rgb[3*i]; @@ -282,10 +308,8 @@ def _rgb_to_hsv_kernel(name='rgb2hsv'): hsv[3*i + 2] = maxv; """ return cp.ElementwiseKernel( - 'raw X rgb', - 'raw X hsv', - code, - name='cucim_skimage_color_' + name) + "raw X rgb", "raw X hsv", code, name="cucim_skimage_color_" + name + ) @channel_as_last_axis() @@ -332,11 +356,12 @@ def rgb2hsv(rgb, *, channel_axis=-1): if input_is_one_pixel: rgb = rgb[np.newaxis, ...] - rgb = _prepare_colorarray(rgb, force_c_contiguous=True, - channel_axis=channel_axis) + rgb = _prepare_colorarray( + rgb, force_c_contiguous=True, channel_axis=channel_axis + ) hsv = cp.empty_like(rgb) - name = f'rgb2hsv_{rgb.dtype.char}' + name = f"rgb2hsv_{rgb.dtype.char}" kern = _rgb_to_hsv_kernel(name=name) kern(rgb, hsv, size=rgb.size // 3) @@ -347,7 +372,7 @@ def rgb2hsv(rgb, *, channel_axis=-1): @cp.memoize(for_each_device=True) -def _hsv_to_rgb_kernel(name='hsv2rgb'): +def _hsv_to_rgb_kernel(name="hsv2rgb"): code = """ int hi = (int)floor(hsv[3*i] * 6.0); @@ -391,10 +416,8 @@ def _hsv_to_rgb_kernel(name='hsv2rgb'): } """ return cp.ElementwiseKernel( - 'raw X hsv', - 'raw X rgb', - code, - name='cucim_skimage_color_' + name) + "raw X hsv", "raw X rgb", code, name="cucim_skimage_color_" + name + ) @channel_as_last_axis() @@ -437,12 +460,13 @@ def hsv2rgb(hsv, *, channel_axis=-1): >>> img_hsv = rgb2hsv(img) >>> img_rgb = hsv2rgb(img_hsv) """ - hsv = _prepare_colorarray(hsv, force_c_contiguous=True, - channel_axis=channel_axis) + hsv = _prepare_colorarray( + hsv, force_c_contiguous=True, channel_axis=channel_axis + ) rgb = cp.empty_like(hsv) - name = f'hsv2rgb_{hsv.dtype.char}' + name = f"hsv2rgb_{hsv.dtype.char}" kern = _hsv_to_rgb_kernel(name=name) kern(hsv, rgb, size=hsv.size // 3) return rgb @@ -452,7 +476,7 @@ def hsv2rgb(hsv, *, channel_axis=-1): # Primaries for the coordinate systems # --------------------------------------------------------------- cie_primaries = np.array([700, 546.1, 435.8]) -sb_primaries = np.array([1. / 155, 1. / 190, 1. / 225]) * 1e5 +sb_primaries = np.array([1.0 / 155, 1.0 / 190, 1.0 / 225]) * 1e5 # --------------------------------------------------------------- # Matrices that define conversion between different color spaces @@ -503,22 +527,30 @@ def hsv2rgb(hsv, *, channel_axis=-1): rgb_from_ypbpr = linalg.inv(ypbpr_from_rgb) -ycbcr_from_rgb = np.array([[ 65.481, 128.553, 24.966], # noqa - [-37.797, -74.203, 112.0 ], # noqa - [ 112.0 , -93.786, -18.214]]) # noqa +ycbcr_from_rgb = np.array( + [ + [65.481, 128.553, 24.966], # noqa + [-37.797, -74.203, 112.0], # noqa + [112.0, -93.786, -18.214], + ] +) # noqa rgb_from_ycbcr = linalg.inv(ycbcr_from_rgb) -ydbdr_from_rgb = np.array([[ 0.299, 0.587, 0.114], # noqa - [-0.45 , -0.883, 1.333], # noqa - [-1.333, 1.116, 0.217]]) # noqa +ydbdr_from_rgb = np.array( + [ + [0.299, 0.587, 0.114], # noqa + [-0.45, -0.883, 1.333], # noqa + [-1.333, 1.116, 0.217], + ] +) # noqa rgb_from_ydbdr = linalg.inv(ydbdr_from_rgb) # CIE LAB constants for Observer=2A, Illuminant=D65 # NOTE: this is actually the XYZ values for the illuminant above. -lab_ref_white = np.array([0.95047, 1., 1.08883]) +lab_ref_white = np.array([0.95047, 1.0, 1.08883]) # XYZ coordinates of the illuminants, scaled to [0, 1]. For each illuminant I # we have: @@ -546,31 +578,44 @@ def hsv2rgb(hsv, *, channel_axis=-1): # ---------- # .. [1] https://en.wikipedia.org/wiki/Standard_illuminant -_illuminants = \ - {"A": {'2': (1.098466069456375, 1, 0.3558228003436005), - '10': (1.111420406956693, 1, 0.3519978321919493), - 'R': (1.098466069456375, 1, 0.3558228003436005)}, - "B": {'2': (0.9909274480248003, 1, 0.8531327322886154), - '10': (0.9917777147717607, 1, 0.8434930535866175), - 'R': (0.9909274480248003, 1, 0.8531327322886154)}, - "C": {'2': (0.980705971659919, 1, 1.1822494939271255), - '10': (0.9728569189782166, 1, 1.1614480488951577), - 'R': (0.980705971659919, 1, 1.1822494939271255)}, - "D50": {'2': (0.9642119944211994, 1, 0.8251882845188288), - '10': (0.9672062750333777, 1, 0.8142801513128616), - 'R': (0.9639501491621826, 1, 0.8241280285499208)}, - "D55": {'2': (0.956797052643698, 1, 0.9214805860173273), - '10': (0.9579665682254781, 1, 0.9092525159847462), - 'R': (0.9565317453467969, 1, 0.9202554587037198)}, - "D65": {'2': (0.95047, 1., 1.08883), # This was: `lab_ref_white` - '10': (0.94809667673716, 1, 1.0730513595166162), - 'R': (0.9532057125493769, 1, 1.0853843816469158)}, - "D75": {'2': (0.9497220898840717, 1, 1.226393520724154), - '10': (0.9441713925645873, 1, 1.2064272211720228), - 'R': (0.9497220898840717, 1, 1.226393520724154)}, - "E": {'2': (1.0, 1.0, 1.0), - '10': (1.0, 1.0, 1.0), - 'R': (1.0, 1.0, 1.0)}} +_illuminants = { + "A": { + "2": (1.098466069456375, 1, 0.3558228003436005), + "10": (1.111420406956693, 1, 0.3519978321919493), + "R": (1.098466069456375, 1, 0.3558228003436005), + }, + "B": { + "2": (0.9909274480248003, 1, 0.8531327322886154), + "10": (0.9917777147717607, 1, 0.8434930535866175), + "R": (0.9909274480248003, 1, 0.8531327322886154), + }, + "C": { + "2": (0.980705971659919, 1, 1.1822494939271255), + "10": (0.9728569189782166, 1, 1.1614480488951577), + "R": (0.980705971659919, 1, 1.1822494939271255), + }, + "D50": { + "2": (0.9642119944211994, 1, 0.8251882845188288), + "10": (0.9672062750333777, 1, 0.8142801513128616), + "R": (0.9639501491621826, 1, 0.8241280285499208), + }, + "D55": { + "2": (0.956797052643698, 1, 0.9214805860173273), + "10": (0.9579665682254781, 1, 0.9092525159847462), + "R": (0.9565317453467969, 1, 0.9202554587037198), + }, + "D65": { + "2": (0.95047, 1.0, 1.08883), # This was: `lab_ref_white` + "10": (0.94809667673716, 1, 1.0730513595166162), + "R": (0.9532057125493769, 1, 1.0853843816469158), + }, + "D75": { + "2": (0.9497220898840717, 1, 1.226393520724154), + "10": (0.9441713925645873, 1, 1.2064272211720228), + "R": (0.9497220898840717, 1, 1.226393520724154), + }, + "E": {"2": (1.0, 1.0, 1.0), "10": (1.0, 1.0, 1.0), "R": (1.0, 1.0, 1.0)}, +} def xyz_tristimulus_values(*, illuminant, observer, dtype=None): @@ -640,8 +685,10 @@ def xyz_tristimulus_values(*, illuminant, observer, dtype=None): try: return _illuminants[illuminant][observer] except KeyError: - raise ValueError(f'Unknown illuminant/observer combination ' - f'(`{illuminant}`, `{observer}`)') + raise ValueError( + f"Unknown illuminant/observer combination " + f"(`{illuminant}`, `{observer}`)" + ) @deprecate_func( diff --git a/python/cucim/src/cucim/skimage/color/colorlabel.py b/python/cucim/src/cucim/skimage/color/colorlabel.py index f93da5c9b..2ac4cf85a 100644 --- a/python/cucim/src/cucim/skimage/color/colorlabel.py +++ b/python/cucim/src/cucim/skimage/color/colorlabel.py @@ -8,15 +8,26 @@ from . import rgb_colors from .colorconv import gray2rgb, hsv2rgb, rgb2hsv -__all__ = ['color_dict', 'label2rgb', 'DEFAULT_COLORS'] +__all__ = ["color_dict", "label2rgb", "DEFAULT_COLORS"] -DEFAULT_COLORS = ('red', 'blue', 'yellow', 'magenta', 'green', - 'indigo', 'darkorange', 'cyan', 'pink', 'yellowgreen') +DEFAULT_COLORS = ( + "red", + "blue", + "yellow", + "magenta", + "green", + "indigo", + "darkorange", + "cyan", + "pink", + "yellowgreen", +) -color_dict = {k: v for k, v in rgb_colors.__dict__.items() - if isinstance(v, tuple)} +color_dict = { + k: v for k, v in rgb_colors.__dict__.items() if isinstance(v, tuple) +} def _rgb_vector(color): @@ -70,9 +81,19 @@ def _match_label_with_color(label, colors, bg_label, bg_color): return mapped_labels, color_cycle -def label2rgb(label, image=None, colors=None, alpha=0.3, - bg_label=0, bg_color=(0, 0, 0), image_alpha=1, kind='overlay', - *, saturation=0, channel_axis=-1): +def label2rgb( + label, + image=None, + colors=None, + alpha=0.3, + bg_label=0, + bg_color=(0, 0, 0), + image_alpha=1, + kind="overlay", + *, + saturation=0, + channel_axis=-1, +): """Return an RGB image where color-coded labels are painted over the image. Parameters @@ -120,19 +141,34 @@ def label2rgb(label, image=None, colors=None, alpha=0.3, """ if image is not None: image = np.moveaxis(image, source=channel_axis, destination=-1) - if kind == 'overlay': - rgb = _label2rgb_overlay(label, image, colors, alpha, bg_label, - bg_color, image_alpha, saturation) - elif kind == 'avg': + if kind == "overlay": + rgb = _label2rgb_overlay( + label, + image, + colors, + alpha, + bg_label, + bg_color, + image_alpha, + saturation, + ) + elif kind == "avg": rgb = _label2rgb_avg(label, image, bg_label, bg_color) else: raise ValueError("`kind` must be either 'overlay' or 'avg'.") return np.moveaxis(rgb, source=-1, destination=channel_axis) -def _label2rgb_overlay(label, image=None, colors=None, alpha=0.3, - bg_label=-1, bg_color=None, image_alpha=1, - saturation=0): +def _label2rgb_overlay( + label, + image=None, + colors=None, + alpha=0.3, + bg_label=-1, + bg_color=None, + image_alpha=1, + saturation=0, +): """Return an RGB image where color-coded labels are painted over the image. Parameters @@ -168,7 +204,7 @@ def _label2rgb_overlay(label, image=None, colors=None, alpha=0.3, value in `label` with the image, at a certain alpha value. """ if not 0 <= saturation <= 1: - warn(f'saturation must be in range [0, 1], got {saturation}') + warn(f"saturation must be in range [0, 1], got {saturation}") if colors is None: colors = DEFAULT_COLORS @@ -179,14 +215,14 @@ def _label2rgb_overlay(label, image=None, colors=None, alpha=0.3, # Opacity doesn't make sense if no image exists. alpha = 1 else: - if (image.shape[:label.ndim] != label.shape - or image.ndim > label.ndim + 1): + if ( + image.shape[: label.ndim] != label.shape + or image.ndim > label.ndim + 1 + ): raise ValueError("`image` and `label` must be the same shape") if image.ndim == label.ndim + 1 and image.shape[-1] != 3: - raise ValueError( - "`image` must be RGB (image.shape[-1] must be 3)." - ) + raise ValueError("`image` must be RGB (image.shape[-1] must be 3).") if image.min() < 0: warn("Negative intensities in `image` are not supported") @@ -214,7 +250,8 @@ def _label2rgb_overlay(label, image=None, colors=None, alpha=0.3, label = label.astype(new_type) mapped_labels_flat, color_cycle = _match_label_with_color( - label, colors, bg_label, bg_color) + label, colors, bg_label, bg_color + ) if len(mapped_labels_flat) == 0: return image @@ -259,7 +296,7 @@ def _label2rgb_avg(label_field, image, bg_label=0, bg_color=(0, 0, 0)): """ out = cp.zeros(label_field.shape + (3,), dtype=image.dtype) labels = cp.unique(label_field) - bg = (labels == bg_label) + bg = labels == bg_label if bg.any(): labels = labels[labels != bg_label] mask = (label_field == bg_label).nonzero() diff --git a/python/cucim/src/cucim/skimage/color/delta_e.py b/python/cucim/src/cucim/skimage/color/delta_e.py index bf07162a8..f5ad592a9 100644 --- a/python/cucim/src/cucim/skimage/color/delta_e.py +++ b/python/cucim/src/cucim/skimage/color/delta_e.py @@ -70,8 +70,9 @@ def deltaE_cie76(lab1, lab2, channel_axis=-1): return cp.sqrt(out, out=out) -def deltaE_ciede94(lab1, lab2, kH=1, kC=1, kL=1, k1=0.045, k2=0.015, *, - channel_axis=-1): +def deltaE_ciede94( + lab1, lab2, kH=1, kC=1, kL=1, k1=0.045, k2=0.015, *, channel_axis=-1 +): """Color difference according to CIEDE 94 standard Accommodates perceptual non-uniformities through the use of application @@ -214,8 +215,8 @@ def deltaE_ciede2000(lab1, lab2, kL=1, kC=1, kH=1, *, channel_axis=-1): # all subsequence calculations are in the new coordinates # (often denoted "prime" in the literature) Cbar = 0.5 * (cp.hypot(a1, b1) + cp.hypot(a2, b2)) - c7 = Cbar ** 7 - G = 0.5 * (1 - cp.sqrt(c7 / (c7 + 25 ** 7))) + c7 = Cbar**7 + G = 0.5 * (1 - cp.sqrt(c7 / (c7 + 25**7))) scale = 1 + G C1, h1 = _cart2polar_2pi(a1 * scale, b1) C2, h2 = _cart2polar_2pi(a2 * scale, b2) @@ -247,29 +248,30 @@ def deltaE_ciede2000(lab1, lab2, kL=1, kC=1, kH=1, *, channel_axis=-1): dH = h_diff.copy() dH[h_diff > np.pi] -= 2 * np.pi dH[h_diff < -np.pi] += 2 * np.pi - dH[CC == 0.] = 0. # if r == 0, dtheta == 0 + dH[CC == 0.0] = 0.0 # if r == 0, dtheta == 0 dH_term = 2 * cp.sqrt(CC) * cp.sin(dH / 2) Hbar = h_sum.copy() - mask = cp.logical_and(CC != 0., cp.abs(h_diff) > np.pi) + mask = cp.logical_and(CC != 0.0, cp.abs(h_diff) > np.pi) Hbar[mask * (h_sum < 2 * np.pi)] += 2 * np.pi Hbar[mask * (h_sum >= 2 * np.pi)] -= 2 * np.pi - Hbar[CC == 0.] *= 2 + Hbar[CC == 0.0] *= 2 Hbar *= 0.5 - T = (1 - - 0.17 * cp.cos(Hbar - np.deg2rad(30)) + - 0.24 * cp.cos(2 * Hbar) + - 0.32 * cp.cos(3 * Hbar + np.deg2rad(6)) - - 0.20 * cp.cos(4 * Hbar - np.deg2rad(63)) - ) + T = ( + 1 + - 0.17 * cp.cos(Hbar - np.deg2rad(30)) + + 0.24 * cp.cos(2 * Hbar) + + 0.32 * cp.cos(3 * Hbar + np.deg2rad(6)) + - 0.20 * cp.cos(4 * Hbar - np.deg2rad(63)) + ) SH = 1 + 0.015 * Cbar * T H_term = dH_term / (kH * SH) # hue rotation - c7 = Cbar ** 7 - Rc = 2 * cp.sqrt(c7 / (c7 + 25 ** 7)) + c7 = Cbar**7 + Rc = 2 * cp.sqrt(c7 / (c7 + 25**7)) tmp = (cp.rad2deg(Hbar) - 275) / 25 tmp *= tmp dtheta = np.deg2rad(30) * cp.exp(-tmp) @@ -338,20 +340,21 @@ def deltaE_cmc(lab1, lab2, kL=1, kC=1, *, channel_axis=-1): dL = L1 - L2 dH2 = get_dH2(lab1, lab2, channel_axis=0) - T = cp.where(cp.logical_and(cp.rad2deg(h1) >= 164, cp.rad2deg(h1) <= 345), - 0.56 + 0.2 * cp.abs(np.cos(h1 + cp.deg2rad(168))), - 0.36 + 0.4 * cp.abs(np.cos(h1 + cp.deg2rad(35))) - ) - c1_4 = C1 ** 4 + T = cp.where( + cp.logical_and(cp.rad2deg(h1) >= 164, cp.rad2deg(h1) <= 345), + 0.56 + 0.2 * cp.abs(np.cos(h1 + cp.deg2rad(168))), + 0.36 + 0.4 * cp.abs(np.cos(h1 + cp.deg2rad(35))), + ) + c1_4 = C1**4 F = cp.sqrt(c1_4 / (c1_4 + 1900)) - SL = cp.where(L1 < 16, 0.511, 0.040975 * L1 / (1. + 0.01765 * L1)) - SC = 0.638 + 0.0638 * C1 / (1. + 0.0131 * C1) + SL = cp.where(L1 < 16, 0.511, 0.040975 * L1 / (1.0 + 0.01765 * L1)) + SC = 0.638 + 0.0638 * C1 / (1.0 + 0.0131 * C1) SH = SC * (F * T + 1 - F) dE2 = (dL / (kL * SL)) ** 2 dE2 += (dC / (kC * SC)) ** 2 - dE2 += dH2 / (SH ** 2) + dE2 += dH2 / (SH**2) return cp.sqrt(cp.maximum(dE2, 0, out=dE2), out=dE2) @@ -374,9 +377,9 @@ def get_dH2(lab1, lab2, *, channel_axis=-1): 2*|ab1|*|ab2| - 2*dot(ab1, ab2) """ # This function needs double precision internally for accuracy - input_is_float_32 = _supported_float_type( - (lab1.dtype, lab2.dtype) - ) == cp.float32 + input_is_float_32 = ( + _supported_float_type((lab1.dtype, lab2.dtype)) == cp.float32 + ) lab1, lab2 = _float_inputs(lab1, lab2, allow_float32=False) a1, b1 = cp.moveaxis(lab1, source=channel_axis, destination=0)[1:3] a2, b2 = cp.moveaxis(lab2, source=channel_axis, destination=0)[1:3] diff --git a/python/cucim/src/cucim/skimage/color/tests/test_colorconv.py b/python/cucim/src/cucim/skimage/color/tests/test_colorconv.py index f3bd45a1e..7dfc80564 100644 --- a/python/cucim/src/cucim/skimage/color/tests/test_colorconv.py +++ b/python/cucim/src/cucim/skimage/color/tests/test_colorconv.py @@ -14,32 +14,62 @@ import cupy as cp import numpy as np import pytest -from cupy.testing import (assert_allclose, assert_array_almost_equal, - assert_array_equal) +from cupy.testing import ( + assert_allclose, + assert_array_almost_equal, + assert_array_equal, +) from numpy.testing import assert_equal from skimage import data from cucim.skimage._shared._warnings import expected_warnings from cucim.skimage._shared.utils import _supported_float_type, slice_at_axis -from cucim.skimage.color import (combine_stains, convert_colorspace, gray2rgb, - gray2rgba, hed2rgb, hsv2rgb, lab2lch, lab2rgb, - lab2xyz, lch2lab, luv2rgb, luv2xyz, rgb2gray, - rgb2hed, rgb2hsv, rgb2lab, rgb2luv, rgb2rgbcie, - rgb2xyz, rgb2ycbcr, rgb2ydbdr, rgb2yiq, - rgb2ypbpr, rgb2yuv, rgba2rgb, rgbcie2rgb, - separate_stains, xyz2lab, xyz2luv, xyz2rgb, - ycbcr2rgb, ydbdr2rgb, yiq2rgb, ypbpr2rgb, - yuv2rgb) +from cucim.skimage.color import ( + combine_stains, + convert_colorspace, + gray2rgb, + gray2rgba, + hed2rgb, + hsv2rgb, + lab2lch, + lab2rgb, + lab2xyz, + lch2lab, + luv2rgb, + luv2xyz, + rgb2gray, + rgb2hed, + rgb2hsv, + rgb2lab, + rgb2luv, + rgb2rgbcie, + rgb2xyz, + rgb2ycbcr, + rgb2ydbdr, + rgb2yiq, + rgb2ypbpr, + rgb2yuv, + rgba2rgb, + rgbcie2rgb, + separate_stains, + xyz2lab, + xyz2luv, + xyz2rgb, + ycbcr2rgb, + ydbdr2rgb, + yiq2rgb, + ypbpr2rgb, + yuv2rgb, +) from cucim.skimage.util import img_as_float, img_as_float32, img_as_ubyte -data_dir = os.path.join(os.path.dirname(__file__), 'data') +data_dir = os.path.join(os.path.dirname(__file__), "data") -class TestColorconv(): - +class TestColorconv: img_rgb = cp.asarray(data.colorwheel()) img_grayscale = cp.asarray(data.camera()) - # ftm: off + # fmt: off img_rgba = cp.array([[[0, 0.5, 1, 0], [0, 0.5, 1, 1], [0, 0.5, 1, 0.5]]]).astype(float) @@ -72,7 +102,7 @@ class TestColorconv(): [[32.303, -9.400, -130.358]], # blue [[46.228, -43.774, 56.589]], # green ]) - # ftm: on + # fmt: on # RGBA to RGB @pytest.mark.parametrize("channel_axis", [0, 1, 2, -1, -2, -3]) @@ -83,12 +113,12 @@ def test_rgba2rgb_conversion(self, channel_axis): rgb = rgba2rgb(rgba, channel_axis=channel_axis) rgb = cp.moveaxis(rgb, source=channel_axis, destination=-1) - # ftm: off + # fmt: off expected = cp.asarray([[[1, 1, 1], [0, 0.5, 1], [0.5, 0.75, 1]]]).astype(float) - # ftm: on + # fmt: on assert_equal(rgb.shape, expected.shape) assert_array_almost_equal(rgb, expected) @@ -128,8 +158,12 @@ def test_rgb2hsv_conversion(self, channel_axis): hsv = hsv.reshape(-1, 3) # ground truth from colorsys - gt = np.asarray([colorsys.rgb_to_hsv(pt[0], pt[1], pt[2]) - for pt in cp.asnumpy(rgb).reshape(-1, 3)]) + gt = np.asarray( + [ + colorsys.rgb_to_hsv(pt[0], pt[1], pt[2]) + for pt in cp.asnumpy(rgb).reshape(-1, 3) + ] + ) assert_array_almost_equal(hsv, gt) def test_rgb2hsv_error_grayscale(self): @@ -184,7 +218,7 @@ def test_hsv2rgb_dtype(self): # RGB to XYZ @pytest.mark.parametrize("channel_axis", [0, 1, -1, -2]) def test_rgb2xyz_conversion(self, channel_axis): - # ftm: off + # fmt: off gt = cp.asarray([[[0.950456, 1. , 1.088754], # noqa [0.538003, 0.787329, 1.06942 ], # noqa [0.592876, 0.28484 , 0.969561], # noqa @@ -193,7 +227,7 @@ def test_rgb2xyz_conversion(self, channel_axis): [0.35758 , 0.71516 , 0.119193], # noqa [0.412453, 0.212671, 0.019334], # noqa [0. , 0. , 0. ]]]) # noqa - # ftm: on + # fmt: on img = cp.moveaxis( self.colbars_array, source=-1, destination=channel_axis @@ -211,19 +245,20 @@ def test_rgb2xyz_error_grayscale(self): def test_rgb2xyz_dtype(self): img = self.colbars_array - img32 = img.astype('float32') + img32 = img.astype("float32") assert rgb2xyz(img).dtype == img.dtype assert rgb2xyz(img32).dtype == img32.dtype # XYZ to RGB def test_xyz2rgb_conversion(self): - assert_array_almost_equal(xyz2rgb(rgb2xyz(self.colbars_array)), - self.colbars_array) + assert_array_almost_equal( + xyz2rgb(rgb2xyz(self.colbars_array)), self.colbars_array + ) def test_xyz2rgb_dtype(self): img = rgb2xyz(self.colbars_array) - img32 = img.astype('float32') + img32 = img.astype("float32") assert xyz2rgb(img).dtype == img.dtype assert xyz2rgb(img32).dtype == img32.dtype @@ -234,8 +269,10 @@ def test_xyz_rgb_roundtrip(self, channel_axis): img_rgb = img_as_float(self.img_rgb) img_rgb = cp.moveaxis(img_rgb, source=-1, destination=channel_axis) - round_trip = xyz2rgb(rgb2xyz(img_rgb, channel_axis=channel_axis), - channel_axis=channel_axis) + round_trip = xyz2rgb( + rgb2xyz(img_rgb, channel_axis=channel_axis), + channel_axis=channel_axis, + ) assert_allclose(round_trip, img_rgb, rtol=1e-5, atol=1e-5) @@ -252,13 +289,14 @@ def test_hed_rgb_float_roundtrip(self, channel_axis): img_in = cp.moveaxis(img_in, source=-1, destination=channel_axis) img_out = rgb2hed( hed2rgb(img_in, channel_axis=channel_axis), - channel_axis=channel_axis + channel_axis=channel_axis, ) assert_array_almost_equal(img_out, img_in) # RGB<->BRO roundtrip with ubyte image def test_bro_rgb_roundtrip(self): from cucim.skimage.color.colorconv import bro_from_rgb, rgb_from_bro + img_in = img_as_ubyte(self.img_stains) img_out = combine_stains(img_in, rgb_from_bro) img_out = separate_stains(img_out, bro_from_rgb) @@ -268,6 +306,7 @@ def test_bro_rgb_roundtrip(self): @pytest.mark.parametrize("channel_axis", [0, 1, -1]) def test_bro_rgb_roundtrip_float(self, channel_axis): from skimage.color.colorconv import bro_from_rgb, rgb_from_bro + img_in = self.img_stains img_in = cp.moveaxis(img_in, source=-1, destination=channel_axis) img_out = combine_stains( @@ -281,7 +320,7 @@ def test_bro_rgb_roundtrip_float(self, channel_axis): # RGB to RGB CIE @pytest.mark.parametrize("channel_axis", [0, 1, -1, -2]) def test_rgb2rgbcie_conversion(self, channel_axis): - # ftm: off + # fmt: off gt = cp.asarray([[[ 0.1488856 , 0.18288098, 0.19277574], # noqa [ 0.01163224, 0.16649536, 0.18948516], # noqa [ 0.12259182, 0.03308008, 0.17298223], # noqa @@ -290,7 +329,7 @@ def test_rgb2rgbcie_conversion(self, channel_axis): [ 0.02629378, 0.1498009 , 0.01979351], # noqa [ 0.13725336, 0.01638562, 0.00329059], # noqa [ 0. , 0. , 0. ]]]) # noqa - # ftm: on + # fmt: on img = np.moveaxis( self.colbars_array, source=-1, destination=channel_axis @@ -301,8 +340,8 @@ def test_rgb2rgbcie_conversion(self, channel_axis): assert_array_almost_equal(out, gt) def test_rgb2rgbcie_dtype(self): - img = self.colbars_array.astype('float64') - img32 = img.astype('float32') + img = self.colbars_array.astype("float64") + img32 = img.astype("float32") assert rgb2rgbcie(img).dtype == img.dtype assert rgb2rgbcie(img32).dtype == img32.dtype @@ -313,28 +352,38 @@ def test_rgbcie2rgb_conversion(self, channel_axis): rgb = cp.moveaxis( self.colbars_array, source=-1, destination=channel_axis ) - round_trip = rgbcie2rgb(rgb2rgbcie(rgb, channel_axis=channel_axis), - channel_axis=channel_axis) + round_trip = rgbcie2rgb( + rgb2rgbcie(rgb, channel_axis=channel_axis), + channel_axis=channel_axis, + ) # only roundtrip test, we checked rgb2rgbcie above already assert_array_almost_equal(round_trip, rgb) def test_rgbcie2rgb_dtype(self): - img = rgb2rgbcie(self.colbars_array).astype('float64') - img32 = img.astype('float32') + img = rgb2rgbcie(self.colbars_array).astype("float64") + img32 = img.astype("float32") assert rgbcie2rgb(img).dtype == img.dtype assert rgbcie2rgb(img32).dtype == img32.dtype @pytest.mark.parametrize("channel_axis", [0, -1]) def test_convert_colorspace(self, channel_axis): - colspaces = ['HSV', 'RGB CIE', 'XYZ', 'YCbCr', 'YPbPr', 'YDbDr'] + colspaces = ["HSV", "RGB CIE", "XYZ", "YCbCr", "YPbPr", "YDbDr"] colfuncs_from = [ - hsv2rgb, rgbcie2rgb, xyz2rgb, - ycbcr2rgb, ypbpr2rgb, ydbdr2rgb + hsv2rgb, + rgbcie2rgb, + xyz2rgb, + ycbcr2rgb, + ypbpr2rgb, + ydbdr2rgb, ] colfuncs_to = [ - rgb2hsv, rgb2rgbcie, rgb2xyz, - rgb2ycbcr, rgb2ypbpr, rgb2ydbdr + rgb2hsv, + rgb2rgbcie, + rgb2xyz, + rgb2ycbcr, + rgb2ypbpr, + rgb2ydbdr, ] colbars_array = cp.moveaxis( @@ -344,22 +393,24 @@ def test_convert_colorspace(self, channel_axis): kw = dict(channel_axis=channel_axis) assert_array_almost_equal( - convert_colorspace(colbars_array, 'RGB', 'RGB', **kw), - colbars_array) + convert_colorspace(colbars_array, "RGB", "RGB", **kw), colbars_array + ) for i, space in enumerate(colspaces): # print(f"space={space}") gt = colfuncs_from[i](colbars_array, **kw) assert_array_almost_equal( - convert_colorspace(colbars_array, space, 'RGB', **kw), gt) + convert_colorspace(colbars_array, space, "RGB", **kw), gt + ) gt = colfuncs_to[i](colbars_array, **kw) assert_array_almost_equal( - convert_colorspace(colbars_array, 'RGB', space, **kw), gt) + convert_colorspace(colbars_array, "RGB", space, **kw), gt + ) with pytest.raises(ValueError): - convert_colorspace(colbars_array, 'nokey', 'XYZ', **kw) + convert_colorspace(colbars_array, "nokey", "XYZ", **kw) with pytest.raises(ValueError): - convert_colorspace(colbars_array, 'RGB', 'nokey', **kw) + convert_colorspace(colbars_array, "RGB", "nokey", **kw) @pytest.mark.parametrize("channel_axis", [0, 1, -1, -2]) def test_rgb2gray(self, channel_axis): @@ -385,8 +436,8 @@ def test_rgb2gray_on_gray(self): rgb2gray(np.empty((5, 5))) def test_rgb2gray_dtype(self): - img = cp.random.rand(10, 10, 3).astype('float64') - img32 = img.astype('float32') + img = cp.random.rand(10, 10, 3).astype("float64") + img32 = img.astype("float32") assert rgb2gray(img).dtype == img.dtype assert rgb2gray(img32).dtype == img32.dtype @@ -395,25 +446,26 @@ def test_rgb2gray_dtype(self): # http://www.easyrgb.com/index.php?X=CALC # Note: easyrgb website displays xyz*100 def test_xyz2lab(self): - assert_array_almost_equal(xyz2lab(self.xyz_array), - self.lab_array, decimal=3) + assert_array_almost_equal( + xyz2lab(self.xyz_array), self.lab_array, decimal=3 + ) # Test the conversion with the rest of the illuminants. for i in ["A", "B", "C", "d50", "d55", "d65"]: i = i.lower() for obs in ["2", "10", "R"]: obs = obs.lower() - fname = os.path.join(data_dir, f'lab_array_{i}_{obs}.npy') + fname = os.path.join(data_dir, f"lab_array_{i}_{obs}.npy") lab_array_i_obs = np.load(fname) - assert_array_almost_equal(lab_array_i_obs, - xyz2lab(self.xyz_array, i, obs), - decimal=2) + assert_array_almost_equal( + lab_array_i_obs, xyz2lab(self.xyz_array, i, obs), decimal=2 + ) for i in ["d75", "e"]: - fname = os.path.join(data_dir, f'lab_array_{i}_2.npy') + fname = os.path.join(data_dir, f"lab_array_{i}_2.npy") lab_array_i_obs = np.load(fname) - assert_array_almost_equal(lab_array_i_obs, - xyz2lab(self.xyz_array, i, "2"), - decimal=2) + assert_array_almost_equal( + lab_array_i_obs, xyz2lab(self.xyz_array, i, "2"), decimal=2 + ) @pytest.mark.parametrize("channel_axis", [0, 1, -1, -2]) def test_xyz2lab_channel_axis(self, channel_axis): @@ -424,37 +476,40 @@ def test_xyz2lab_channel_axis(self, channel_axis): assert_array_almost_equal(lab, self.lab_array, decimal=3) def test_xyz2lab_dtype(self): - img = self.xyz_array.astype('float64') - img32 = img.astype('float32') + img = self.xyz_array.astype("float64") + img32 = img.astype("float32") assert xyz2lab(img).dtype == img.dtype assert xyz2lab(img32).dtype == img32.dtype def test_lab2xyz(self): - assert_array_almost_equal(lab2xyz(self.lab_array), - self.xyz_array, decimal=3) + assert_array_almost_equal( + lab2xyz(self.lab_array), self.xyz_array, decimal=3 + ) # Test the conversion with the rest of the illuminants. for i in ["A", "B", "C", "d50", "d55", "d65"]: i = i.lower() for obs in ["2", "10", "R"]: obs = obs.lower() - fname = os.path.join(data_dir, f'lab_array_{i}_{obs}.npy') + fname = os.path.join(data_dir, f"lab_array_{i}_{obs}.npy") lab_array_i_obs = cp.array(np.load(fname)) - assert_array_almost_equal(lab2xyz(lab_array_i_obs, i, obs), - self.xyz_array, decimal=3) + assert_array_almost_equal( + lab2xyz(lab_array_i_obs, i, obs), self.xyz_array, decimal=3 + ) for i in ["d75", "e"]: - fname = os.path.join(data_dir, f'lab_array_{i}_2.npy') + fname = os.path.join(data_dir, f"lab_array_{i}_2.npy") lab_array_i_obs = cp.array(np.load(fname)) - assert_array_almost_equal(lab2xyz(lab_array_i_obs, i, "2"), - self.xyz_array, decimal=3) + assert_array_almost_equal( + lab2xyz(lab_array_i_obs, i, "2"), self.xyz_array, decimal=3 + ) # And we include a call to test the exception handling in the code. with pytest.raises(ValueError): - lab2xyz(lab_array_i_obs, "NaI", "2") # Not an illuminant + lab2xyz(lab_array_i_obs, "NaI", "2") # Not an illuminant with pytest.raises(ValueError): - lab2xyz(lab_array_i_obs, "d50", "42") # Not a degree + lab2xyz(lab_array_i_obs, "d50", "42") # Not a degree @pytest.mark.parametrize("channel_axis", [0, 1, -1, -2]) def test_lab2xyz_channel_axis(self, channel_axis): @@ -465,8 +520,8 @@ def test_lab2xyz_channel_axis(self, channel_axis): assert_array_almost_equal(xyz, self.xyz_array, decimal=3) def test_lab2xyz_dtype(self): - img = self.lab_array.astype('float64') - img32 = img.astype('float32') + img = self.lab_array.astype("float64") + img32 = img.astype("float32") assert lab2xyz(img).dtype == img.dtype assert lab2xyz(img32).dtype == img32.dtype @@ -478,7 +533,7 @@ def test_rgb2lab_brucelindbloom(self): [website](http://brucelindbloom.com/index.html?ColorCalculator.html). """ # Obtained with D65 white point, sRGB model and gamma - # ftm: off + # fmt: off gt_for_colbars = cp.asarray([ [100, 0, 0], [97.1393, -21.5537, 94.4780], @@ -489,10 +544,11 @@ def test_rgb2lab_brucelindbloom(self): [32.2970, 79.1875, -107.8602], [0, 0, 0]]).T - # ftm: on + # fmt: on gt_array = cp.swapaxes(gt_for_colbars.reshape(3, 4, 2), 0, 2) - assert_array_almost_equal(rgb2lab(self.colbars_array), gt_array, - decimal=2) + assert_array_almost_equal( + rgb2lab(self.colbars_array), gt_array, decimal=2 + ) @pytest.mark.parametrize("channel_axis", [0, 1, -1, -2]) def test_lab_rgb_roundtrip(self, channel_axis): @@ -502,7 +558,7 @@ def test_lab_rgb_roundtrip(self, channel_axis): assert_allclose( lab2rgb( rgb2lab(img_rgb, channel_axis=channel_axis), - channel_axis=channel_axis + channel_axis=channel_axis, ), img_rgb, rtol=1e-5, @@ -510,15 +566,15 @@ def test_lab_rgb_roundtrip(self, channel_axis): ) def test_rgb2lab_dtype(self): - img = self.colbars_array.astype('float64') - img32 = img.astype('float32') + img = self.colbars_array.astype("float64") + img32 = img.astype("float32") assert rgb2lab(img).dtype == img.dtype assert rgb2lab(img32).dtype == img32.dtype def test_lab2rgb_dtype(self): - img = self.lab_array.astype('float64') - img32 = img.astype('float32') + img = self.lab_array.astype("float64") + img32 = img.astype("float32") assert lab2rgb(img).dtype == img.dtype assert lab2rgb(img32).dtype == img32.dtype @@ -527,25 +583,26 @@ def test_lab2rgb_dtype(self): # http://www.easyrgb.com/index.php?X=CALC # Note: easyrgb website displays xyz*100 def test_xyz2luv(self): - assert_array_almost_equal(xyz2luv(self.xyz_array), - self.luv_array, decimal=3) + assert_array_almost_equal( + xyz2luv(self.xyz_array), self.luv_array, decimal=3 + ) # Test the conversion with the rest of the illuminants. for i in ["A", "B", "C", "d50", "d55", "d65"]: i = i.lower() for obs in ["2", "10", "R"]: obs = obs.lower() - fname = os.path.join(data_dir, f'luv_array_{i}_{obs}.npy') + fname = os.path.join(data_dir, f"luv_array_{i}_{obs}.npy") luv_array_i_obs = np.load(fname) - assert_array_almost_equal(luv_array_i_obs, - xyz2luv(self.xyz_array, i, obs), - decimal=2) + assert_array_almost_equal( + luv_array_i_obs, xyz2luv(self.xyz_array, i, obs), decimal=2 + ) for i in ["d75", "e"]: - fname = os.path.join(data_dir, f'luv_array_{i}_2.npy') + fname = os.path.join(data_dir, f"luv_array_{i}_2.npy") luv_array_i_obs = np.load(fname) - assert_array_almost_equal(luv_array_i_obs, - xyz2luv(self.xyz_array, i, "2"), - decimal=2) + assert_array_almost_equal( + luv_array_i_obs, xyz2luv(self.xyz_array, i, "2"), decimal=2 + ) @pytest.mark.parametrize("channel_axis", [0, 1, -1, -2]) def test_xyz2luv_channel_axis(self, channel_axis): @@ -556,30 +613,33 @@ def test_xyz2luv_channel_axis(self, channel_axis): assert_array_almost_equal(luv, self.luv_array, decimal=3) def test_xyz2luv_dtype(self): - img = self.xyz_array.astype('float64') - img32 = img.astype('float32') + img = self.xyz_array.astype("float64") + img32 = img.astype("float32") assert xyz2luv(img).dtype == img.dtype assert xyz2luv(img32).dtype == img32.dtype def test_luv2xyz(self): - assert_array_almost_equal(luv2xyz(self.luv_array), - self.xyz_array, decimal=3) + assert_array_almost_equal( + luv2xyz(self.luv_array), self.xyz_array, decimal=3 + ) # Test the conversion with the rest of the illuminants. for i in ["A", "B", "C", "d50", "d55", "d65"]: i = i.lower() for obs in ["2", "10", "R"]: obs = obs.lower() - fname = os.path.join(data_dir, f'luv_array_{i}_{obs}.npy') + fname = os.path.join(data_dir, f"luv_array_{i}_{obs}.npy") luv_array_i_obs = cp.array(np.load(fname)) - assert_array_almost_equal(luv2xyz(luv_array_i_obs, i, obs), - self.xyz_array, decimal=3) + assert_array_almost_equal( + luv2xyz(luv_array_i_obs, i, obs), self.xyz_array, decimal=3 + ) for i in ["d75", "e"]: - fname = os.path.join(data_dir, f'luv_array_{i}_2.npy') + fname = os.path.join(data_dir, f"luv_array_{i}_2.npy") luv_array_i_obs = cp.array(np.load(fname)) - assert_array_almost_equal(luv2xyz(luv_array_i_obs, i, "2"), - self.xyz_array, decimal=3) + assert_array_almost_equal( + luv2xyz(luv_array_i_obs, i, "2"), self.xyz_array, decimal=3 + ) @pytest.mark.parametrize("channel_axis", [0, 1, -1, -2]) def test_luv2xyz_channel_axis(self, channel_axis): @@ -590,8 +650,8 @@ def test_luv2xyz_channel_axis(self, channel_axis): assert_array_almost_equal(xyz, self.xyz_array, decimal=3) def test_luv2xyz_dtype(self): - img = self.luv_array.astype('float64') - img32 = img.astype('float32') + img = self.luv_array.astype("float64") + img32 = img.astype("float32") assert luv2xyz(img).dtype == img.dtype assert luv2xyz(img32).dtype == img32.dtype @@ -603,7 +663,7 @@ def test_rgb2luv_brucelindbloom(self): [website](http://brucelindbloom.com/index.html?ColorCalculator.html). """ # Obtained with D65 white point, sRGB model and gamma - # ftm: off + # fmt: off gt_for_colbars = cp.asarray([ [100, 0, 0], [97.1393, 7.7056, 106.7866], @@ -614,21 +674,22 @@ def test_rgb2luv_brucelindbloom(self): [32.2970, -9.4054, -130.3423], [0, 0, 0]]).T - # ftm: on + # fmt: on gt_array = cp.swapaxes(gt_for_colbars.reshape(3, 4, 2), 0, 2) - assert_array_almost_equal(rgb2luv(self.colbars_array), - gt_array, decimal=2) + assert_array_almost_equal( + rgb2luv(self.colbars_array), gt_array, decimal=2 + ) def test_rgb2luv_dtype(self): - img = self.colbars_array.astype('float64') - img32 = img.astype('float32') + img = self.colbars_array.astype("float64") + img32 = img.astype("float32") assert rgb2luv(img).dtype == img.dtype assert rgb2luv(img32).dtype == img32.dtype def test_luv2rgb_dtype(self): - img = self.luv_array.astype('float64') - img32 = img.astype('float32') + img = self.luv_array.astype("float64") + img32 = img.astype("float32") assert luv2rgb(img).dtype == img.dtype assert luv2rgb(img32).dtype == img32.dtype @@ -640,7 +701,7 @@ def test_luv_rgb_roundtrip(self, channel_axis): assert_allclose( luv2rgb( rgb2luv(img_rgb, channel_axis=channel_axis), - channel_axis=channel_axis + channel_axis=channel_axis, ), img_rgb, rtol=1e-4, @@ -653,13 +714,13 @@ def test_lab_rgb_outlier(self): lab_array[1] = [50, 12, -85] lab_array[2] = [90, -4, -47] lab_array = cp.asarray(lab_array) - # ftm: off + # fmt: off rgb_array = cp.asarray([[[0.501, 0.481, 0]], [[0, 0.482, 1.]], [[0.578, 0.914, 1.]], ]) - # ftm: on + # fmt: on assert_array_almost_equal(lab2rgb(lab_array), rgb_array, decimal=3) def test_lab_full_gamut(self): @@ -747,51 +808,61 @@ def test_yuv_roundtrip(self, channel_axis): img_rgb = img_as_float(self.img_rgb)[::16, ::16] img_rgb = cp.moveaxis(img_rgb, source=-1, destination=channel_axis) assert_allclose( - yuv2rgb(rgb2yuv(img_rgb, channel_axis=channel_axis), - channel_axis=channel_axis), + yuv2rgb( + rgb2yuv(img_rgb, channel_axis=channel_axis), + channel_axis=channel_axis, + ), img_rgb, rtol=1e-5, atol=1e-5, ) assert_allclose( - yiq2rgb(rgb2yiq(img_rgb, channel_axis=channel_axis), - channel_axis=channel_axis), + yiq2rgb( + rgb2yiq(img_rgb, channel_axis=channel_axis), + channel_axis=channel_axis, + ), img_rgb, rtol=1e-5, atol=1e-5, ) assert_allclose( - ypbpr2rgb(rgb2ypbpr(img_rgb, channel_axis=channel_axis), - channel_axis=channel_axis), + ypbpr2rgb( + rgb2ypbpr(img_rgb, channel_axis=channel_axis), + channel_axis=channel_axis, + ), img_rgb, rtol=1e-5, atol=1e-5, ) assert_allclose( - ycbcr2rgb(rgb2ycbcr(img_rgb, channel_axis=channel_axis), - channel_axis=channel_axis), + ycbcr2rgb( + rgb2ycbcr(img_rgb, channel_axis=channel_axis), + channel_axis=channel_axis, + ), img_rgb, rtol=1e-5, atol=1e-5, ) assert_allclose( - ydbdr2rgb(rgb2ydbdr(img_rgb, channel_axis=channel_axis), - channel_axis=channel_axis), + ydbdr2rgb( + rgb2ydbdr(img_rgb, channel_axis=channel_axis), + channel_axis=channel_axis, + ), img_rgb, rtol=1e-5, atol=1e-5, ) def test_rgb2yuv_dtype(self): - img = self.colbars_array.astype('float64') - img32 = img.astype('float32') + img = self.colbars_array.astype("float64") + img32 = img.astype("float32") assert rgb2yuv(img).dtype == img.dtype assert rgb2yuv(img32).dtype == img32.dtype def test_yuv2rgb_dtype(self): - img = rgb2yuv(self.colbars_array).astype('float64') - img32 = img.astype('float32') + img = rgb2yuv(self.colbars_array).astype("float64") + img32 = img.astype("float32") assert yuv2rgb(img).dtype == img.dtype assert yuv2rgb(img32).dtype == img32.dtype @@ -799,9 +870,12 @@ def test_yuv2rgb_dtype(self): def test_rgb2yiq_conversion(self): rgb = img_as_float(self.img_rgb)[::16, ::16] yiq = rgb2yiq(rgb).reshape(-1, 3) - gt = np.asarray([colorsys.rgb_to_yiq(pt[0], pt[1], pt[2]) - for pt in cp.asnumpy(rgb).reshape(-1, 3)] - ) + gt = np.asarray( + [ + colorsys.rgb_to_yiq(pt[0], pt[1], pt[2]) + for pt in cp.asnumpy(rgb).reshape(-1, 3) + ] + ) assert_array_almost_equal(yiq, gt, decimal=2) @pytest.mark.parametrize("func", [lab2rgb, lab2xyz]) @@ -811,7 +885,7 @@ def test_warning_stacklevel(self, func): "1 negative Z values that have been clipped to zero" ) with pytest.warns(UserWarning, match=regex) as messages: - func(lab=cp.array([[[0, 0, 300.]]])) + func(lab=cp.array([[[0, 0, 300.0]]])) assert len(messages) == 1 assert messages[0].filename == __file__, "warning points at wrong file" @@ -820,9 +894,7 @@ def test_gray2rgb(): x = cp.asarray([0, 0.5, 1]) w = gray2rgb(x) # fmt off - expected_output = cp.asarray([[0, 0, 0], - [0.5, 0.5, 0.5], - [1, 1, 1]]) + expected_output = cp.asarray([[0, 0, 0], [0.5, 0.5, 0.5], [1, 1, 1]]) # fmt on assert_array_equal(w, expected_output) @@ -860,8 +932,7 @@ def test_gray2rgba(shape, channel_axis): # Shape check new_axis_loc = channel_axis % rgba.ndim - assert_equal(rgba.shape, - shape[:new_axis_loc] + (4, ) + shape[new_axis_loc:]) + assert_equal(rgba.shape, shape[:new_axis_loc] + (4,) + shape[new_axis_loc:]) # dtype check assert rgba.dtype == img.dtype @@ -884,8 +955,7 @@ def test_gray2rgb_channel_axis(shape, channel_axis): # Shape check new_axis_loc = channel_axis % rgb.ndim - assert_equal(rgb.shape, - shape[:new_axis_loc] + (3, ) + shape[new_axis_loc:]) + assert_equal(rgb.shape, shape[:new_axis_loc] + (3,) + shape[new_axis_loc:]) # dtype check assert rgb.dtype == img.dtype @@ -893,7 +963,7 @@ def test_gray2rgb_channel_axis(shape, channel_axis): def test_gray2rgba_dtype(): img_f64 = cp.random.random((5, 5)) - img_f32 = img_f64.astype('float32') + img_f32 = img_f64.astype("float32") img_u8 = img_as_ubyte(img_f64) img_int = img_u8.astype(int) @@ -934,7 +1004,7 @@ def test_gray2rgba_alpha(): # Invalid shape alpha = cp.random.random((5, 5, 1)) - expected_err_msg = ("alpha.shape must match image.shape") + expected_err_msg = "alpha.shape must match image.shape" with pytest.raises(ValueError) as err: rgba = gray2rgba(img, alpha) @@ -942,8 +1012,9 @@ def test_gray2rgba_alpha(): @pytest.mark.parametrize("func", [rgb2gray, gray2rgb, gray2rgba]) -@pytest.mark.parametrize("shape", ([(3, ), (2, 3), (4, 5, 3), (5, 4, 5, 3), - (4, 5, 4, 5, 3)])) +@pytest.mark.parametrize( + "shape", ([(3,), (2, 3), (4, 5, 3), (5, 4, 5, 3), (4, 5, 4, 5, 3)]) +) def test_nD_gray_conversion(func, shape): img = cp.random.rand(*shape) out = func(img) @@ -951,22 +1022,42 @@ def test_nD_gray_conversion(func, shape): assert out.shape[:common_ndim] == shape[:common_ndim] -@pytest.mark.parametrize("func", [rgb2hsv, hsv2rgb, - rgb2xyz, xyz2rgb, - rgb2hed, hed2rgb, - rgb2rgbcie, rgbcie2rgb, - xyz2lab, lab2xyz, - lab2rgb, rgb2lab, - xyz2luv, luv2xyz, - luv2rgb, rgb2luv, - lab2lch, lch2lab, - rgb2yuv, yuv2rgb, - rgb2yiq, yiq2rgb, - rgb2ypbpr, ypbpr2rgb, - rgb2ycbcr, ycbcr2rgb, - rgb2ydbdr, ydbdr2rgb]) -@pytest.mark.parametrize("shape", ([(3, ), (2, 3), (4, 5, 3), (5, 4, 5, 3), - (4, 5, 4, 5, 3)])) +@pytest.mark.parametrize( + "func", + [ + rgb2hsv, + hsv2rgb, + rgb2xyz, + xyz2rgb, + rgb2hed, + hed2rgb, + rgb2rgbcie, + rgbcie2rgb, + xyz2lab, + lab2xyz, + lab2rgb, + rgb2lab, + xyz2luv, + luv2xyz, + luv2rgb, + rgb2luv, + lab2lch, + lch2lab, + rgb2yuv, + yuv2rgb, + rgb2yiq, + yiq2rgb, + rgb2ypbpr, + ypbpr2rgb, + rgb2ycbcr, + ycbcr2rgb, + rgb2ydbdr, + ydbdr2rgb, + ], +) +@pytest.mark.parametrize( + "shape", ([(3,), (2, 3), (4, 5, 3), (5, 4, 5, 3), (4, 5, 4, 5, 3)]) +) def test_nD_color_conversion(func, shape): img = cp.random.rand(*shape) out = func(img) @@ -974,8 +1065,9 @@ def test_nD_color_conversion(func, shape): assert out.shape == img.shape -@pytest.mark.parametrize("shape", ([(4, ), (2, 4), (4, 5, 4), (5, 4, 5, 4), - (4, 5, 4, 5, 4)])) +@pytest.mark.parametrize( + "shape", ([(4,), (2, 4), (4, 5, 4), (5, 4, 5, 4), (4, 5, 4, 5, 4)]) +) def test_rgba2rgb_nD(shape): img = cp.random.rand(*shape) out = rgba2rgb(img) @@ -985,22 +1077,22 @@ def test_rgba2rgb_nD(shape): assert out.shape == expected_shape -@pytest.mark.parametrize('dtype', [cp.float16, cp.float32, cp.float64]) +@pytest.mark.parametrize("dtype", [cp.float16, cp.float32, cp.float64]) def test_rgba2rgb_dtypes(dtype): - rgba = cp.array([[[0, 0.5, 1, 0], - [0, 0.5, 1, 1], - [0, 0.5, 1, 0.5]]]).astype(dtype=dtype) + rgba = cp.array( + [[[0, 0.5, 1, 0], [0, 0.5, 1, 1], [0, 0.5, 1, 0.5]]] + ).astype(dtype=dtype) rgb = rgba2rgb(rgba) float_dtype = _supported_float_type(rgba.dtype) assert rgb.dtype == float_dtype - expected = cp.array([[[1, 1, 1], - [0, 0.5, 1], - [0.5, 0.75, 1]]]).astype(float) + expected = cp.array([[[1, 1, 1], [0, 0.5, 1], [0.5, 0.75, 1]]]).astype( + float + ) assert rgb.shape == expected.shape assert_array_almost_equal(rgb, expected) -@pytest.mark.parametrize('dtype', [cp.float16, cp.float32, cp.float64]) +@pytest.mark.parametrize("dtype", [cp.float16, cp.float32, cp.float64]) def test_lab_lch_roundtrip_dtypes(dtype): rgb = cp.asarray(data.colorwheel()) rgb = img_as_float(rgb).astype(dtype=dtype, copy=False) @@ -1012,7 +1104,7 @@ def test_lab_lch_roundtrip_dtypes(dtype): assert_array_almost_equal(lab2, lab, decimal=decimal) -@pytest.mark.parametrize('dtype', [cp.float16, cp.float32, cp.float64]) +@pytest.mark.parametrize("dtype", [cp.float16, cp.float32, cp.float64]) def test_rgb2hsv_dtypes(dtype): rgb = cp.asarray(data.colorwheel()) rgb = img_as_float(rgb)[::16, ::16] @@ -1022,8 +1114,10 @@ def test_rgb2hsv_dtypes(dtype): assert hsv.dtype == float_dtype # ground truth from colorsys gt = cp.asarray( - [colorsys.rgb_to_hsv(pt[0], pt[1], pt[2]) - for pt in cp.asnumpy(rgb).reshape(-1, 3)] + [ + colorsys.rgb_to_hsv(pt[0], pt[1], pt[2]) + for pt in cp.asnumpy(rgb).reshape(-1, 3) + ] ) decimal = 3 if float_dtype == cp.float32 else 7 assert_array_almost_equal(hsv, gt, decimal=decimal) diff --git a/python/cucim/src/cucim/skimage/color/tests/test_colorlabel.py b/python/cucim/src/cucim/skimage/color/tests/test_colorlabel.py index adc5fd00b..281c1b1e6 100644 --- a/python/cucim/src/cucim/skimage/color/tests/test_colorlabel.py +++ b/python/cucim/src/cucim/skimage/color/tests/test_colorlabel.py @@ -32,8 +32,7 @@ def test_uint_image(channel_axis): labels = cp.zeros((10, 10), dtype=cp.int64) labels[1:3, 1:3] = 1 labels[6:9, 6:9] = 2 - output = label2rgb(labels, image=img, bg_label=0, - channel_axis=channel_axis) + output = label2rgb(labels, image=img, bg_label=0, channel_axis=channel_axis) # Make sure that the output is made of floats and in the correct range assert cp.issubdtype(output.dtype, cp.floating) assert output.max() <= 1 @@ -48,8 +47,9 @@ def test_rgb(): label = cp.arange(3).reshape(1, -1) colors = [(1, 0, 0), (0, 1, 0), (0, 0, 1)] # Set alphas just in case the defaults change - rgb = label2rgb(label, image=image, colors=colors, alpha=1, - image_alpha=1, bg_label=-1) + rgb = label2rgb( + label, image=image, colors=colors, alpha=1, image_alpha=1, bg_label=-1 + ) assert_array_almost_equal(rgb, [colors]) @@ -57,8 +57,7 @@ def test_alpha(): image = cp.random.uniform(size=(3, 3)) label = cp.random.randint(0, 9, size=(3, 3)) # If we set `alpha = 0`, then rgb should match image exactly. - rgb = label2rgb(label, image=image, alpha=0, image_alpha=1, - bg_label=-1) + rgb = label2rgb(label, image=image, alpha=0, image_alpha=1, bg_label=-1) assert_array_almost_equal(rgb[..., 0], image) assert_array_almost_equal(rgb[..., 1], image) assert_array_almost_equal(rgb[..., 2], image) @@ -76,19 +75,21 @@ def test_image_alpha(): label = cp.arange(3).reshape(1, -1) colors = [(1, 0, 0), (0, 1, 0), (0, 0, 1)] # If we set `image_alpha = 0`, then rgb should match label colors exactly. - rgb = label2rgb(label, image=image, colors=colors, alpha=1, - image_alpha=0, bg_label=-1) + rgb = label2rgb( + label, image=image, colors=colors, alpha=1, image_alpha=0, bg_label=-1 + ) assert_array_almost_equal(rgb, [colors]) def test_color_names(): image = cp.ones((1, 3)) label = cp.arange(3).reshape(1, -1) - cnames = ['red', 'lime', 'blue'] + cnames = ["red", "lime", "blue"] colors = [(1, 0, 0), (0, 1, 0), (0, 0, 1)] # Set alphas just in case the defaults change - rgb = label2rgb(label, image=image, colors=cnames, alpha=1, - image_alpha=1, bg_label=-1) + rgb = label2rgb( + label, image=image, colors=cnames, alpha=1, image_alpha=1, bg_label=-1 + ) assert_array_almost_equal(rgb, [colors]) @@ -97,8 +98,14 @@ def test_bg_and_color_cycle(): label = cp.arange(10).reshape(1, -1) colors = [(1, 0, 0), (0, 0, 1)] bg_color = (0, 0, 0) - rgb = label2rgb(label, image=image, bg_label=0, bg_color=bg_color, - colors=colors, alpha=1) + rgb = label2rgb( + label, + image=image, + bg_label=0, + bg_color=bg_color, + colors=colors, + alpha=1, + ) assert_array_almost_equal(rgb[0, 0], bg_color) for pixel, color in zip(rgb[0, 1:], itertools.cycle(colors)): assert_array_almost_equal(pixel, color) @@ -106,18 +113,24 @@ def test_bg_and_color_cycle(): def test_negative_labels(): labels = cp.array([0, -1, -2, 0]) - rout = cp.array([(0., 0., 0.), (0., 0., 1.), (1., 0., 0.), (0., 0., 0.)]) + rout = cp.array( + [(0.0, 0.0, 0.0), (0.0, 0.0, 1.0), (1.0, 0.0, 0.0), (0.0, 0.0, 0.0)] + ) assert_array_almost_equal( - rout, label2rgb(labels, bg_label=0, alpha=1, image_alpha=1)) + rout, label2rgb(labels, bg_label=0, alpha=1, image_alpha=1) + ) def test_nonconsecutive(): labels = cp.array([0, 2, 4, 0]) colors = [(1, 0, 0), (0, 0, 1)] - rout = cp.array([(1., 0., 0.), (0., 0., 1.), (1., 0., 0.), (1., 0., 0.)]) + rout = cp.array( + [(1.0, 0.0, 0.0), (0.0, 0.0, 1.0), (1.0, 0.0, 0.0), (1.0, 0.0, 0.0)] + ) assert_array_almost_equal( - rout, label2rgb(labels, colors=colors, alpha=1, - image_alpha=1, bg_label=-1)) + rout, + label2rgb(labels, colors=colors, alpha=1, image_alpha=1, bg_label=-1), + ) def test_label_consistency(): @@ -129,8 +142,9 @@ def test_label_consistency(): rgb_1 = label2rgb(label_1, colors=colors, bg_label=-1) rgb_2 = label2rgb(label_2, colors=colors, bg_label=-1) for label_id in label_2.ravel(): - assert_array_almost_equal(rgb_1[label_1 == label_id], - rgb_2[label_2 == label_id]) + assert_array_almost_equal( + rgb_1[label_1 == label_id], rgb_2[label_2 == label_id] + ) def test_leave_labels_alone(): @@ -211,7 +225,7 @@ def test_bg_color_rgb_string(): labels[6:9, 6:9] = 2 img = cp.asarray(img) labels = cp.asarray(labels) - output = label2rgb(labels, image=img, alpha=0.9, bg_label=0, bg_color='red') + output = label2rgb(labels, image=img, alpha=0.9, bg_label=0, bg_color="red") assert output[0, 0, 0] > 0.9 # red channel @@ -222,16 +236,16 @@ def test_avg_with_2d_image(): labels[6:9, 6:9] = 2 img = cp.asarray(img) labels = cp.asarray(labels) - assert_no_warnings(label2rgb, labels, image=img, bg_label=0, kind='avg') + assert_no_warnings(label2rgb, labels, image=img, bg_label=0, kind="avg") -@pytest.mark.parametrize('image_type', ['rgb', 'gray', None]) +@pytest.mark.parametrize("image_type", ["rgb", "gray", None]) def test_label2rgb_nd(image_type): # validate 1D and 3D cases by testing their output relative to the 2D case shape = (10, 10) - if image_type == 'rgb': + if image_type == "rgb": img = cp.random.randint(0, 255, shape + (3,), dtype=np.uint8) - elif image_type == 'gray': + elif image_type == "gray": img = cp.random.randint(0, 255, shape, dtype=np.uint8) else: img = None @@ -253,7 +267,7 @@ def test_label2rgb_nd(image_type): assert_array_equal(labeled_1d, expected) # Labeling a 3D stack of duplicates gives the same result in each plane - image_3d = cp.stack((img, ) * 4) if image_type is not None else None + image_3d = cp.stack((img,) * 4) if image_type is not None else None labels_3d = cp.stack((labels,) * 4) labeled_3d = label2rgb(labels_3d, image=image_3d, bg_label=0) for labeled_plane in labeled_3d: @@ -284,8 +298,9 @@ def test_overlay_full_saturation(): labels[5:, 5:] = 2 labels[:3, :3] = 0 alpha = 0.3 - rgb = label2rgb(labels, image=rgb_img, alpha=alpha, - bg_label=0, saturation=1) + rgb = label2rgb( + labels, image=rgb_img, alpha=alpha, bg_label=0, saturation=1 + ) # check that rgb part of input image is preserved, where labels=0 assert_array_almost_equal(rgb_img[:3, :3] * (1 - alpha), rgb[:3, :3]) @@ -297,8 +312,9 @@ def test_overlay_custom_saturation(): labels[:3, :3] = 0 alpha = 0.3 saturation = 0.3 - rgb = label2rgb(labels, image=rgb_img, alpha=alpha, - bg_label=0, saturation=saturation) + rgb = label2rgb( + labels, image=rgb_img, alpha=alpha, bg_label=0, saturation=saturation + ) hsv = rgb2hsv(rgb_img) hsv[..., 1] *= saturation @@ -312,8 +328,6 @@ def test_saturation_warning(): rgb_img = cp.random.uniform(size=(10, 10, 3)) labels = cp.ones((10, 10), dtype=np.int64) with expected_warnings(["saturation must be in range"]): - label2rgb(labels, image=rgb_img, - bg_label=0, saturation=2) + label2rgb(labels, image=rgb_img, bg_label=0, saturation=2) with expected_warnings(["saturation must be in range"]): - label2rgb(labels, image=rgb_img, - bg_label=0, saturation=-1) + label2rgb(labels, image=rgb_img, bg_label=0, saturation=-1) diff --git a/python/cucim/src/cucim/skimage/color/tests/test_delta_e.py b/python/cucim/src/cucim/skimage/color/tests/test_delta_e.py index 605098e6e..95af3a506 100644 --- a/python/cucim/src/cucim/skimage/color/tests/test_delta_e.py +++ b/python/cucim/src/cucim/skimage/color/tests/test_delta_e.py @@ -3,29 +3,36 @@ import cupy as cp import numpy as np import pytest -from cupy.testing import (assert_allclose, assert_array_almost_equal, - assert_array_equal) +from cupy.testing import ( + assert_allclose, + assert_array_almost_equal, + assert_array_equal, +) from cucim.skimage._shared.testing import expected_warnings, fetch from cucim.skimage._shared.utils import _supported_float_type -from cucim.skimage.color.delta_e import (deltaE_cie76, deltaE_ciede94, - deltaE_ciede2000, deltaE_cmc) +from cucim.skimage.color.delta_e import ( + deltaE_cie76, + deltaE_ciede94, + deltaE_ciede2000, + deltaE_cmc, +) @pytest.mark.parametrize("channel_axis", [0, 1, -1]) -@pytest.mark.parametrize('dtype', [cp.float32, cp.float64]) +@pytest.mark.parametrize("dtype", [cp.float32, cp.float64]) def test_ciede2000_dE(dtype, channel_axis): data = load_ciede2000_data() N = len(data) lab1 = np.zeros((N, 3), dtype=dtype) - lab1[:, 0] = data['L1'] - lab1[:, 1] = data['a1'] - lab1[:, 2] = data['b1'] + lab1[:, 0] = data["L1"] + lab1[:, 1] = data["a1"] + lab1[:, 2] = data["b1"] lab2 = np.zeros((N, 3), dtype=dtype) - lab2[:, 0] = data['L2'] - lab2[:, 1] = data['a2'] - lab2[:, 2] = data['b2'] + lab2[:, 0] = data["L2"] + lab2[:, 1] = data["a2"] + lab2[:, 2] = data["b2"] lab1 = cp.moveaxis(cp.asarray(lab1), source=-1, destination=channel_axis) lab2 = cp.moveaxis(cp.asarray(lab2), source=-1, destination=channel_axis) @@ -35,54 +42,55 @@ def test_ciede2000_dE(dtype, channel_axis): # Note: lower float64 accuracy than scikit-image # rtol = 1e-2 if dtype == cp.float32 else 1e-4 rtol = 1e-2 - assert_allclose(dE2, data['dE'], rtol=rtol) + assert_allclose(dE2, data["dE"], rtol=rtol) def load_ciede2000_data(): - dtype = [('pair', int), - ('1', int), - ('L1', float), - ('a1', float), - ('b1', float), - ('a1_prime', float), - ('C1_prime', float), - ('h1_prime', float), - ('hbar_prime', float), - ('G', float), - ('T', float), - ('SL', float), - ('SC', float), - ('SH', float), - ('RT', float), - ('dE', float), - ('2', int), - ('L2', float), - ('a2', float), - ('b2', float), - ('a2_prime', float), - ('C2_prime', float), - ('h2_prime', float), - ] + dtype = [ + ("pair", int), + ("1", int), + ("L1", float), + ("a1", float), + ("b1", float), + ("a1_prime", float), + ("C1_prime", float), + ("h1_prime", float), + ("hbar_prime", float), + ("G", float), + ("T", float), + ("SL", float), + ("SC", float), + ("SH", float), + ("RT", float), + ("dE", float), + ("2", int), + ("L2", float), + ("a2", float), + ("b2", float), + ("a2_prime", float), + ("C2_prime", float), + ("h2_prime", float), + ] # note: ciede_test_data.txt contains several intermediate quantities - path = fetch('color/tests/ciede2000_test_data.txt') + path = fetch("color/tests/ciede2000_test_data.txt") return np.loadtxt(path, dtype=dtype) @pytest.mark.parametrize("channel_axis", [0, 1, -1]) -@pytest.mark.parametrize('dtype', [cp.float32, cp.float64]) +@pytest.mark.parametrize("dtype", [cp.float32, cp.float64]) def test_cie76(dtype, channel_axis): data = load_ciede2000_data() N = len(data) lab1 = np.zeros((N, 3), dtype=dtype) - lab1[:, 0] = data['L1'] - lab1[:, 1] = data['a1'] - lab1[:, 2] = data['b1'] + lab1[:, 0] = data["L1"] + lab1[:, 1] = data["a1"] + lab1[:, 2] = data["b1"] lab2 = np.zeros((N, 3), dtype=dtype) - lab2[:, 0] = data['L2'] - lab2[:, 1] = data['a2'] - lab2[:, 2] = data['b2'] + lab2[:, 0] = data["L2"] + lab2[:, 1] = data["a2"] + lab2[:, 2] = data["b2"] lab1 = cp.moveaxis(cp.asarray(lab1), source=-1, destination=channel_axis) lab2 = cp.moveaxis(cp.asarray(lab2), source=-1, destination=channel_axis) @@ -105,19 +113,19 @@ def test_cie76(dtype, channel_axis): @pytest.mark.parametrize("channel_axis", [0, 1, -1]) -@pytest.mark.parametrize('dtype', [cp.float32, cp.float64]) +@pytest.mark.parametrize("dtype", [cp.float32, cp.float64]) def test_ciede94(dtype, channel_axis): data = load_ciede2000_data() N = len(data) lab1 = np.zeros((N, 3), dtype=dtype) - lab1[:, 0] = data['L1'] - lab1[:, 1] = data['a1'] - lab1[:, 2] = data['b1'] + lab1[:, 0] = data["L1"] + lab1[:, 1] = data["a1"] + lab1[:, 2] = data["b1"] lab2 = np.zeros((N, 3), dtype=dtype) - lab2[:, 0] = data['L2'] - lab2[:, 1] = data['a2'] - lab2[:, 2] = data['b2'] + lab2[:, 0] = data["L2"] + lab2[:, 1] = data["a2"] + lab2[:, 2] = data["b2"] lab1 = cp.moveaxis(cp.asarray(lab1), source=-1, destination=channel_axis) lab2 = cp.moveaxis(cp.asarray(lab2), source=-1, destination=channel_axis) @@ -140,19 +148,19 @@ def test_ciede94(dtype, channel_axis): @pytest.mark.parametrize("channel_axis", [0, 1, -1]) -@pytest.mark.parametrize('dtype', [cp.float32, cp.float64]) +@pytest.mark.parametrize("dtype", [cp.float32, cp.float64]) def test_cmc(dtype, channel_axis): data = load_ciede2000_data() N = len(data) lab1 = np.zeros((N, 3), dtype=dtype) - lab1[:, 0] = data['L1'] - lab1[:, 1] = data['a1'] - lab1[:, 2] = data['b1'] + lab1[:, 0] = data["L1"] + lab1[:, 1] = data["a1"] + lab1[:, 2] = data["b1"] lab2 = np.zeros((N, 3), dtype=dtype) - lab2[:, 0] = data['L2'] - lab2[:, 1] = data['a2'] - lab2[:, 2] = data['b2'] + lab2[:, 0] = data["L2"] + lab2[:, 1] = data["a2"] + lab2[:, 2] = data["b2"] lab1 = cp.moveaxis(cp.asarray(lab1), source=-1, destination=channel_axis) lab2 = cp.moveaxis(cp.asarray(lab2), source=-1, destination=channel_axis) @@ -190,7 +198,7 @@ def test_cmc(dtype, channel_axis): def test_cmc_single_item(): # Single item case: - lab1 = lab2 = cp.array([0., 1.59607713, 0.87755709]) + lab1 = lab2 = cp.array([0.0, 1.59607713, 0.87755709]) assert_array_equal(deltaE_cmc(lab1, lab2), 0) lab2[0] += cp.finfo(float).eps diff --git a/python/cucim/src/cucim/skimage/data/__init__.pyi b/python/cucim/src/cucim/skimage/data/__init__.pyi index 88deb1e76..990729a46 100644 --- a/python/cucim/src/cucim/skimage/data/__init__.pyi +++ b/python/cucim/src/cucim/skimage/data/__init__.pyi @@ -1,5 +1,5 @@ __all__ = [ - 'binary_blobs', + "binary_blobs", ] from ._binary_blobs import binary_blobs diff --git a/python/cucim/src/cucim/skimage/data/_binary_blobs.py b/python/cucim/src/cucim/skimage/data/_binary_blobs.py index 007622f07..2a1e1114f 100644 --- a/python/cucim/src/cucim/skimage/data/_binary_blobs.py +++ b/python/cucim/src/cucim/skimage/data/_binary_blobs.py @@ -1,8 +1,9 @@ import cupy as cp -def binary_blobs(length=512, blob_size_fraction=0.1, n_dim=2, - volume_fraction=0.5, seed=None): +def binary_blobs( + length=512, blob_size_fraction=0.1, n_dim=2, volume_fraction=0.5, seed=None +): """ Generate synthetic binary image with several rounded blob-like objects. @@ -59,7 +60,7 @@ def binary_blobs(length=512, blob_size_fraction=0.1, n_dim=2, rs = cp.random.default_rng(seed) shape = tuple([length] * n_dim) mask = cp.zeros(shape) - n_pts = max(int(1. / blob_size_fraction) ** n_dim, 1) + n_pts = max(int(1.0 / blob_size_fraction) ** n_dim, 1) points = (length * rs.random((n_dim, n_pts))).astype(int) mask[tuple(indices for indices in points)] = 1 mask = gaussian(mask, sigma=0.25 * length * blob_size_fraction) diff --git a/python/cucim/src/cucim/skimage/data/tests/test_data.py b/python/cucim/src/cucim/skimage/data/tests/test_data.py index c56133cc1..eb92b2294 100644 --- a/python/cucim/src/cucim/skimage/data/tests/test_data.py +++ b/python/cucim/src/cucim/skimage/data/tests/test_data.py @@ -11,6 +11,7 @@ def test_binary_blobs(): assert_almost_equal(blobs.mean(), 0.25, decimal=1) blobs = data.binary_blobs(length=32, volume_fraction=0.25, n_dim=3) assert_almost_equal(blobs.mean(), 0.25, decimal=1) - other_realization = data.binary_blobs(length=32, volume_fraction=0.25, - n_dim=3) + other_realization = data.binary_blobs( + length=32, volume_fraction=0.25, n_dim=3 + ) assert not cp.all(blobs == other_realization) diff --git a/python/cucim/src/cucim/skimage/exposure/__init__.py b/python/cucim/src/cucim/skimage/exposure/__init__.py index 6d74f539a..5af690875 100644 --- a/python/cucim/src/cucim/skimage/exposure/__init__.py +++ b/python/cucim/src/cucim/skimage/exposure/__init__.py @@ -1,16 +1,25 @@ from ._adapthist import equalize_adapthist -from .exposure import (adjust_gamma, adjust_log, adjust_sigmoid, - cumulative_distribution, equalize_hist, histogram, - is_low_contrast, rescale_intensity) +from .exposure import ( + adjust_gamma, + adjust_log, + adjust_sigmoid, + cumulative_distribution, + equalize_hist, + histogram, + is_low_contrast, + rescale_intensity, +) from .histogram_matching import match_histograms -__all__ = ['histogram', - 'equalize_hist', - 'equalize_adapthist', - 'rescale_intensity', - 'cumulative_distribution', - 'adjust_gamma', - 'adjust_sigmoid', - 'adjust_log', - 'is_low_contrast', - 'match_histograms'] +__all__ = [ + "histogram", + "equalize_hist", + "equalize_adapthist", + "rescale_intensity", + "cumulative_distribution", + "adjust_gamma", + "adjust_sigmoid", + "adjust_log", + "is_low_contrast", + "match_histograms", +] diff --git a/python/cucim/src/cucim/skimage/exposure/_adapthist.py b/python/cucim/src/cucim/skimage/exposure/_adapthist.py index 939601973..bb449517a 100644 --- a/python/cucim/src/cucim/skimage/exposure/_adapthist.py +++ b/python/cucim/src/cucim/skimage/exposure/_adapthist.py @@ -24,12 +24,11 @@ from ..color.adapt_rgb import adapt_rgb, hsv_value from ..util import img_as_uint -NR_OF_GRAY = 2 ** 14 # number of grayscale levels to use in CLAHE algorithm +NR_OF_GRAY = 2**14 # number of grayscale levels to use in CLAHE algorithm @adapt_rgb(hsv_value) -def equalize_adapthist(image, kernel_size=None, - clip_limit=0.01, nbins=256): +def equalize_adapthist(image, kernel_size=None, clip_limit=0.01, nbins=256): """Contrast Limited Adaptive Histogram Equalization (CLAHE). An algorithm for local contrast enhancement, that uses histograms computed @@ -90,7 +89,7 @@ def equalize_adapthist(image, kernel_size=None, elif isinstance(kernel_size, numbers.Number): kernel_size = (kernel_size,) * image.ndim elif len(kernel_size) != image.ndim: - raise ValueError(f'Incorrect value of `kernel_size`: {kernel_size}') + raise ValueError(f"Incorrect value of `kernel_size`: {kernel_size}") kernel_size = [int(k) for k in kernel_size] @@ -132,13 +131,15 @@ def _clahe(image, kernel_size, clip_limit, nbins): # - is preceded by half a kernel size pad_start_per_dim = [k // 2 for k in kernel_size] - pad_end_per_dim = [(k - s % k) % k + math.ceil(k / 2.) - for k, s in zip(kernel_size, image.shape)] + pad_end_per_dim = [ + (k - s % k) % k + math.ceil(k / 2.0) + for k, s in zip(kernel_size, image.shape) + ] image = pad( image, [[p_i, p_f] for p_i, p_f in zip(pad_start_per_dim, pad_end_per_dim)], - mode='reflect', + mode="reflect", ) # determine gray value bins @@ -154,8 +155,9 @@ def _clahe(image, kernel_size, clip_limit, nbins): hist_blocks_shape = functools.reduce( operator.add, [(s, k) for s, k in zip(ns_hist, kernel_size)] ) - hist_blocks_axis_order = (tuple(range(0, ndim * 2, 2)) + - tuple(range(1, ndim * 2, 2))) + hist_blocks_axis_order = tuple(range(0, ndim * 2, 2)) + tuple( + range(1, ndim * 2, 2) + ) hist_slices = [ slice(k // 2, k // 2 + n * k) for k, n in zip(kernel_size, ns_hist) ] @@ -181,18 +183,18 @@ def _clahe(image, kernel_size, clip_limit, nbins): # faster to loop over the arrays on the host # (hist is small and clip_histogram has too much overhead) # TODO: implement clip_histogram kernel to avoid synchronization? - hist = cp.asarray(np.apply_along_axis( # synchronize! - clip_histogram, -1, cp.asnumpy(hist), clip_limit=clim - )) + hist = cp.asarray( + np.apply_along_axis( # synchronize! + clip_histogram, -1, cp.asnumpy(hist), clip_limit=clim + ) + ) else: hist = cp.apply_along_axis(clip_histogram, -1, hist, clip_limit=clim) hist = map_histogram(hist, 0, NR_OF_GRAY - 1, kernel_elements) hist = hist.reshape(hist_block_assembled_shape[:ndim] + (-1,)) # duplicate leading mappings in each dim - map_array = pad( - hist, [(1, 1) for _ in range(ndim)] + [(0, 0)], mode='edge' - ) + map_array = pad(hist, [(1, 1) for _ in range(ndim)] + [(0, 0)], mode="edge") # Perform multilinear interpolation of graylevel mappings # using the convention described here: @@ -209,12 +211,14 @@ def _clahe(image, kernel_size, clip_limit, nbins): blocks = image.reshape(blocks_shape) blocks = blocks.transpose(blocks_axis_order) blocks_flattened_shape = blocks.shape - blocks = blocks.reshape((_misc.prod(ns_proc), - _misc.prod(blocks.shape[ndim:]))) + blocks = blocks.reshape( + (_misc.prod(ns_proc), _misc.prod(blocks.shape[ndim:])) + ) # calculate interpolation coefficients - coeffs = cp.meshgrid(*tuple([cp.arange(k) / k - for k in kernel_size[::-1]]), indexing='ij') + coeffs = cp.meshgrid( + *tuple([cp.arange(k) / k for k in kernel_size[::-1]]), indexing="ij" + ) coeffs = [cp.transpose(c).flatten() for c in coeffs] inv_coeffs = [1 - c for c in coeffs] @@ -222,9 +226,9 @@ def _clahe(image, kernel_size, clip_limit, nbins): # regions in each direction result = cp.zeros(blocks.shape, dtype=cp.float32) for iedge, edge in enumerate(itertools.product(*((range(2),) * ndim))): - - edge_maps = map_array[tuple(slice(e, e + n) - for e, n in zip(edge, ns_proc))] + edge_maps = map_array[ + tuple(slice(e, e + n) for e, n in zip(edge, ns_proc)) + ] edge_maps = edge_maps.reshape((_misc.prod(ns_proc), -1)) # apply map @@ -250,9 +254,14 @@ def _clahe(image, kernel_size, clip_limit, nbins): result = result.reshape(image.shape) # undo padding - unpad_slices = tuple([slice(p_i, s - p_f) for p_i, p_f, s in - zip(pad_start_per_dim, pad_end_per_dim, - image.shape)]) + unpad_slices = tuple( + [ + slice(p_i, s - p_f) + for p_i, p_f, s in zip( + pad_start_per_dim, pad_end_per_dim, image.shape + ) + ] + ) result = result[unpad_slices] return result diff --git a/python/cucim/src/cucim/skimage/exposure/exposure.py b/python/cucim/src/cucim/skimage/exposure/exposure.py index c6038cae4..0d521cf66 100644 --- a/python/cucim/src/cucim/skimage/exposure/exposure.py +++ b/python/cucim/src/cucim/skimage/exposure/exposure.py @@ -4,17 +4,28 @@ from .._shared import utils from ..util.dtype import dtype_limits, dtype_range -__all__ = ['histogram', 'cumulative_distribution', 'equalize_hist', - 'rescale_intensity', 'adjust_gamma', 'adjust_log', 'adjust_sigmoid'] +__all__ = [ + "histogram", + "cumulative_distribution", + "equalize_hist", + "rescale_intensity", + "adjust_gamma", + "adjust_log", + "adjust_sigmoid", +] DTYPE_RANGE = dtype_range.copy() DTYPE_RANGE.update((d.__name__, limits) for d, limits in dtype_range.items()) -DTYPE_RANGE.update({'uint10': (0, 2 ** 10 - 1), - 'uint12': (0, 2 ** 12 - 1), - 'uint14': (0, 2 ** 14 - 1), - 'bool': dtype_range[bool], - 'float': dtype_range[np.float64]}) +DTYPE_RANGE.update( + { + "uint10": (0, 2**10 - 1), + "uint12": (0, 2**12 - 1), + "uint14": (0, 2**14 - 1), + "bool": dtype_range[bool], + "float": dtype_range[np.float64], + } +) def _offset_array(arr, low_boundary, high_boundary): @@ -23,8 +34,9 @@ def _offset_array(arr, low_boundary, high_boundary): offset = low_boundary dyn_range = high_boundary - low_boundary # get smallest dtype that can hold both minimum and offset maximum - offset_dtype = np.promote_types(np.min_scalar_type(dyn_range), - np.min_scalar_type(low_boundary)) + offset_dtype = np.promote_types( + np.min_scalar_type(dyn_range), np.min_scalar_type(low_boundary) + ) if arr.dtype != offset_dtype: # prevent overflow errors when offsetting arr = arr.astype(offset_dtype) @@ -34,14 +46,14 @@ def _offset_array(arr, low_boundary, high_boundary): def _bincount_histogram_centers(image, source_range): """Compute bin centers for bincount-based histogram.""" - if source_range not in ['image', 'dtype']: + if source_range not in ["image", "dtype"]: raise ValueError( - f'Incorrect value for `source_range` argument: {source_range}' + f"Incorrect value for `source_range` argument: {source_range}" ) - if source_range == 'image': + if source_range == "image": image_min = int(image.min().astype(np.int64)) # synchronize image_max = int(image.max().astype(np.int64)) # synchronize - elif source_range == 'dtype': + elif source_range == "dtype": image_min, image_max = dtype_limits(image, clip_negative=False) bin_centers = cp.arange(image_min, image_max + 1) return bin_centers @@ -83,7 +95,7 @@ def _bincount_histogram(image, source_range, bin_centers=None): hist = cp.bincount( image.ravel(), minlength=image_max - min(image_min, 0) + 1 ) - if source_range == 'image': + if source_range == "image": idx = max(image_min, 0) hist = hist[idx:] return hist, bin_centers @@ -119,18 +131,20 @@ def _get_outer_edges(image, hist_range): ) if not (np.isfinite(first_edge) and np.isfinite(last_edge)): raise ValueError( - f'supplied hist_range of [{first_edge}, {last_edge}] is ' - f'not finite' + f"supplied hist_range of [{first_edge}, {last_edge}] is " + f"not finite" ) elif image.size == 0: # handle empty arrays. Can't determine hist_range, so use 0-1. first_edge, last_edge = 0, 1 else: - first_edge, last_edge = float(image.min()), float(image.max()) # synchronize # noqa + first_edge, last_edge = float(image.min()), float( + image.max() + ) # synchronize # noqa if not (np.isfinite(first_edge) and np.isfinite(last_edge)): raise ValueError( - f'autodetected hist_range of [{first_edge}, {last_edge}] is ' - f'not finite' + f"autodetected hist_range of [{first_edge}, {last_edge}] is " + f"not finite" ) # expand empty hist_range to avoid divide by zero @@ -179,20 +193,26 @@ def _get_bin_edges(image, nbins, hist_range): def _get_numpy_hist_range(image, source_range): - if source_range == 'image': + if source_range == "image": hist_range = None - elif source_range == 'dtype': + elif source_range == "dtype": hist_range = dtype_limits(image, clip_negative=False) else: raise ValueError( - f'Incorrect value for `source_range` argument: {source_range}' + f"Incorrect value for `source_range` argument: {source_range}" ) return hist_range @utils.channel_as_last_axis(multichannel_output=False) -def histogram(image, nbins=256, source_range='image', normalize=False, *, - channel_axis=None): +def histogram( + image, + nbins=256, + source_range="image", + normalize=False, + *, + channel_axis=None, +): """Return histogram of image. Unlike `numpy.histogram`, this function returns the centers of bins and @@ -248,10 +268,12 @@ def histogram(image, nbins=256, source_range='image', normalize=False, *, """ sh = image.shape if len(sh) == 3 and sh[-1] < 4 and channel_axis is None: - utils.warn('This might be a color image. The histogram will be ' - 'computed on the flattened image. You can instead ' - 'apply this function to each color channel, or set ' - 'channel_axis.') + utils.warn( + "This might be a color image. The histogram will be " + "computed on the flattened image. You can instead " + "apply this function to each color channel, or set " + "channel_axis." + ) if channel_axis is not None: channels = sh[-1] @@ -308,7 +330,7 @@ def _histogram(image, bins, source_range, normalize): else: hist_range = _get_numpy_hist_range(image, source_range) hist, bin_edges = cp.histogram(image, bins=bins, range=hist_range) - bin_centers = (bin_edges[:-1] + bin_edges[1:]) / 2. + bin_centers = (bin_edges[:-1] + bin_edges[1:]) / 2.0 if normalize: hist = hist / cp.sum(hist) @@ -433,10 +455,10 @@ def intensity_range(image, range_values="image", clip_negative=False): A 2-tuple where the first element is the minimum and the second is the maximum. """ - if range_values == 'dtype': + if range_values == "dtype": range_values = image.dtype.type - if range_values == 'image': + if range_values == "image": i_min = image.min().item() i_max = image.max().item() elif range_values in DTYPE_RANGE: @@ -490,8 +512,8 @@ def _output_dtype(dtype_or_range, image_dtype): return np.uint16 else: raise ValueError( - 'Incorrect value for out_range, should be a valid image data ' - f'type or a pair of values, got {dtype_or_range}.' + "Incorrect value for out_range, should be a valid image data " + f"type or a pair of values, got {dtype_or_range}." ) @@ -587,21 +609,22 @@ def rescale_intensity(image, in_range="image", out_range="dtype"): >>> rescale_intensity(image, out_range=(0, 127)).astype(np.int32) array([127, 127, 127], dtype=int32) """ - if out_range in ['dtype', 'image']: + if out_range in ["dtype", "image"]: out_dtype = _output_dtype(image.dtype.type, image.dtype) else: out_dtype = _output_dtype(out_range, image.dtype) imin, imax = map(float, intensity_range(image, in_range)) - omin, omax = map(float, intensity_range(image, out_range, - clip_negative=(imin >= 0))) + omin, omax = map( + float, intensity_range(image, out_range, clip_negative=(imin >= 0)) + ) if np.any(np.isnan([imin, imax, omin, omax])): utils.warn( "One or more intensity levels are NaN. Rescaling will broadcast " "NaN to the full image. Provide intensity levels yourself to " "avoid this. E.g. with np.nanmin(image), np.nanmax(image).", - stacklevel=2 + stacklevel=2, ) image = cp.clip(image, imin, imax) @@ -614,17 +637,18 @@ def rescale_intensity(image, in_range="image", out_range="dtype"): def _assert_non_negative(image): - if cp.any(image < 0): # synchronize! - raise ValueError('Image Correction methods work correctly only on ' - 'images with non-negative values. Use ' - 'skimage.exposure.rescale_intensity.') + raise ValueError( + "Image Correction methods work correctly only on " + "images with non-negative values. Use " + "skimage.exposure.rescale_intensity." + ) def _adjust_gamma_u8(image, gamma, gain): - """LUT based implmentation of gamma adjustement.""" + """LUT based implementation of gamma adjustment.""" lut = 255 * gain * (np.linspace(0, 1, 256) ** gamma) - lut = np.minimum(np.rint(lut), 255).astype('uint8') + lut = np.minimum(np.rint(lut), 255).astype("uint8") lut = cp.asarray(lut) return lut[image] @@ -686,8 +710,9 @@ def adjust_gamma(image, gamma=1, gain=1): else: _assert_non_negative(image) - scale = float(dtype_limits(image, True)[1] - - dtype_limits(image, True)[0]) + scale = float( + dtype_limits(image, True)[1] - dtype_limits(image, True)[0] + ) out = (((image / scale) ** gamma) * scale * gain).astype(dtype) @@ -789,8 +814,13 @@ def adjust_sigmoid(image, cutoff=0.5, gain=10, inv=False): return out.astype(dtype, copy=False) -def is_low_contrast(image, fraction_threshold=0.05, lower_percentile=1, - upper_percentile=99, method='linear'): +def is_low_contrast( + image, + fraction_threshold=0.05, + lower_percentile=1, + upper_percentile=99, + method="linear", +): """Determine if an image is low contrast. Parameters diff --git a/python/cucim/src/cucim/skimage/exposure/histogram_matching.py b/python/cucim/src/cucim/skimage/exposure/histogram_matching.py index 84192d39d..90bd8e606 100644 --- a/python/cucim/src/cucim/skimage/exposure/histogram_matching.py +++ b/python/cucim/src/cucim/skimage/exposure/histogram_matching.py @@ -8,7 +8,7 @@ def _match_cumulative_cdf(source, template): Return modified source array so that the cumulative density function of its values matches the cumulative density function of the template. """ - if source.dtype.kind == 'u': + if source.dtype.kind == "u": src_lookup = source.reshape(-1) src_counts = cp.bincount(src_lookup) tmpl_counts = cp.bincount(template.reshape(-1)) @@ -17,11 +17,12 @@ def _match_cumulative_cdf(source, template): tmpl_values = cp.nonzero(tmpl_counts)[0] tmpl_counts = tmpl_counts[tmpl_values] else: - src_values, src_lookup, src_counts = cp.unique(source.reshape(-1), - return_inverse=True, - return_counts=True) - tmpl_values, tmpl_counts = cp.unique(template.reshape(-1), - return_counts=True) + src_values, src_lookup, src_counts = cp.unique( + source.reshape(-1), return_inverse=True, return_counts=True + ) + tmpl_values, tmpl_counts = cp.unique( + template.reshape(-1), return_counts=True + ) # calculate normalized quantiles for each array src_quantiles = cp.cumsum(src_counts) / source.size @@ -66,23 +67,27 @@ def match_histograms(image, reference, *, channel_axis=None): """ if image.ndim != reference.ndim: - raise ValueError('Image and reference must have the same number ' - 'of channels.') + raise ValueError( + "Image and reference must have the same number " "of channels." + ) if channel_axis is not None: if image.shape[channel_axis] != reference.shape[channel_axis]: - raise ValueError('Number of channels in the input image and ' - 'reference image must match!') + raise ValueError( + "Number of channels in the input image and " + "reference image must match!" + ) matched = cp.empty(image.shape, dtype=image.dtype) for channel in range(image.shape[-1]): - matched_channel = _match_cumulative_cdf(image[..., channel], - reference[..., channel]) + matched_channel = _match_cumulative_cdf( + image[..., channel], reference[..., channel] + ) matched[..., channel] = matched_channel else: matched = _match_cumulative_cdf(image, reference) - if matched.dtype.kind == 'f': + if matched.dtype.kind == "f": # output a float32 result when the input is float16 or float32 out_dtype = utils._supported_float_type(image.dtype) matched = matched.astype(out_dtype, copy=False) diff --git a/python/cucim/src/cucim/skimage/exposure/tests/test_exposure.py b/python/cucim/src/cucim/skimage/exposure/tests/test_exposure.py index 11f6fb673..493eb7c73 100644 --- a/python/cucim/src/cucim/skimage/exposure/tests/test_exposure.py +++ b/python/cucim/src/cucim/skimage/exposure/tests/test_exposure.py @@ -27,9 +27,7 @@ def test_wrong_source_range(): im = cp.array([-1, 100], dtype=cp.int8) match = "Incorrect value for `source_range` argument" with pytest.raises(ValueError, match=match): - frequencies, bin_centers = exposure.histogram( - im, source_range="foobar" - ) + frequencies, bin_centers = exposure.histogram(im, source_range="foobar") @pytest.mark.xfail(ON_AARCH64, reason=ON_AARCH64_REASON) @@ -102,7 +100,7 @@ def test_flat_int_range_dtype(): assert frequencies.shape == (256,) -@pytest.mark.parametrize('dtype', [cp.float16, cp.float32, cp.float64]) +@pytest.mark.parametrize("dtype", [cp.float16, cp.float32, cp.float64]) def test_peak_float_out_of_range_image(dtype): im = cp.array([10, 100], dtype=dtype) frequencies, bin_centers = exposure.histogram(im, nbins=90) @@ -110,12 +108,12 @@ def test_peak_float_out_of_range_image(dtype): assert_array_equal(bin_centers, cp.arange(10, 100) + 0.5) -@pytest.mark.parametrize('dtype', [cp.float16, cp.float32, cp.float64]) +@pytest.mark.parametrize("dtype", [cp.float16, cp.float32, cp.float64]) def test_peak_float_out_of_range_dtype(dtype): im = cp.array([10, 100], dtype=dtype) nbins = 10 frequencies, bin_centers = exposure.histogram( - im, nbins=nbins, source_range='dtype' + im, nbins=nbins, source_range="dtype" ) assert bin_centers.dtype == dtype assert_almost_equal(cp.min(bin_centers).get(), -0.9, 3) @@ -125,15 +123,17 @@ def test_peak_float_out_of_range_dtype(dtype): def test_normalize(): im = cp.array([0, 255, 255], dtype=cp.uint8) - frequencies, bin_centers = exposure.histogram(im, source_range='dtype', - normalize=False) + frequencies, bin_centers = exposure.histogram( + im, source_range="dtype", normalize=False + ) expected = cp.zeros(256) expected[0] = 1 expected[-1] = 2 assert_array_equal(frequencies, expected) - frequencies, bin_centers = exposure.histogram(im, source_range='dtype', - normalize=True) + frequencies, bin_centers = exposure.histogram( + im, source_range="dtype", normalize=True + ) expected /= 3.0 assert_array_equal(frequencies, expected) @@ -142,11 +142,11 @@ def test_normalize(): # Test multichannel histograms # ============================ -@pytest.mark.parametrize('source_range', ['dtype', 'image']) -@pytest.mark.parametrize('dtype', [cp.uint8, cp.int16, cp.float64]) -@pytest.mark.parametrize('channel_axis', [0, 1, -1]) -def test_multichannel_hist_common_bins_uint8(dtype, source_range, - channel_axis): + +@pytest.mark.parametrize("source_range", ["dtype", "image"]) +@pytest.mark.parametrize("dtype", [cp.uint8, cp.int16, cp.float64]) +@pytest.mark.parametrize("channel_axis", [0, 1, -1]) +def test_multichannel_hist_common_bins_uint8(dtype, source_range, channel_axis): """Check that all channels use the same binning.""" # Construct multichannel image with uniform values within each channel, # but the full range of values across channels. @@ -158,7 +158,7 @@ def test_multichannel_hist_common_bins_uint8(dtype, source_range, np.full(shape, imin, dtype=dtype), np.full(shape, imax, dtype=dtype), ), - axis=channel_axis + axis=channel_axis, ) im = cp.asarray(im) frequencies, bin_centers = exposure.histogram( @@ -199,7 +199,7 @@ def test_equalize_ubyte(): check_cdf_slope(cdf) -@pytest.mark.parametrize('dtype', [cp.float16, cp.float32, cp.float64]) +@pytest.mark.parametrize("dtype", [cp.float16, cp.float32, cp.float64]) def test_equalize_float(dtype): img = util.img_as_float(test_img).astype(dtype, copy=False) img_eq = exposure.equalize_hist(img) @@ -234,22 +234,20 @@ def check_cdf_slope(cdf): # ==================== -@pytest.mark.parametrize("test_input,expected", [ - ('image', [0, 1]), - ('dtype', [0, 255]), - ((10, 20), [10, 20]) -]) +@pytest.mark.parametrize( + "test_input,expected", + [("image", [0, 1]), ("dtype", [0, 255]), ((10, 20), [10, 20])], +) def test_intensity_range_uint8(test_input, expected): image = cp.array([0, 1], dtype=cp.uint8) out = intensity_range(image, range_values=test_input) assert_array_equal(out, cp.array(expected)) -@pytest.mark.parametrize("test_input,expected", [ - ('image', [0.1, 0.2]), - ('dtype', [-1, 1]), - ((0.3, 0.4), [0.3, 0.4]) -]) +@pytest.mark.parametrize( + "test_input,expected", + [("image", [0.1, 0.2]), ("dtype", [-1, 1]), ((0.3, 0.4), [0.3, 0.4])], +) def test_intensity_range_float(test_input, expected): image = cp.array([0.1, 0.2], dtype=cp.float64) out = intensity_range(image, range_values=test_input) @@ -284,9 +282,9 @@ def test_rescale_shrink(): assert_array_almost_equal(out, [0, 0.5, 1]) -@pytest.mark.parametrize('dtype', [cp.float16, cp.float32, cp.float64]) +@pytest.mark.parametrize("dtype", [cp.float16, cp.float32, cp.float64]) def test_rescale_in_range(dtype): - image = cp.array([51., 102., 153.], dtype=dtype) + image = cp.array([51.0, 102.0, 153.0], dtype=dtype) out = exposure.rescale_intensity(image, in_range=(0, 255)) assert_array_almost_equal(out, [0.2, 0.4, 0.6], decimal=4) # with out_range='dtype', the output has the same dtype @@ -299,8 +297,9 @@ def test_rescale_in_range_clip(): assert_array_almost_equal(out, [0.5, 1, 1]) -@pytest.mark.parametrize('dtype', [cp.int8, cp.int32, cp.float16, cp.float32, - cp.float64]) +@pytest.mark.parametrize( + "dtype", [cp.int8, cp.int32, cp.float16, cp.float32, cp.float64] +) @pytest.mark.xfail(ON_AARCH64, reason=ON_AARCH64_REASON) def test_rescale_out_range(dtype): """Check that output range is correct. @@ -317,25 +316,25 @@ def test_rescale_out_range(dtype): def test_rescale_named_in_range(): image = cp.array([0, uint10_max, uint10_max + 100], dtype=cp.uint16) - out = exposure.rescale_intensity(image, in_range='uint10') + out = exposure.rescale_intensity(image, in_range="uint10") assert_array_almost_equal(out, [0, uint16_max, uint16_max]) def test_rescale_named_out_range(): image = cp.array([0, uint16_max], dtype=cp.uint16) - out = exposure.rescale_intensity(image, out_range='uint10') + out = exposure.rescale_intensity(image, out_range="uint10") assert_array_almost_equal(out, [0, uint10_max]) def test_rescale_uint12_limits(): image = cp.array([0, uint16_max], dtype=cp.uint16) - out = exposure.rescale_intensity(image, out_range='uint12') + out = exposure.rescale_intensity(image, out_range="uint12") assert_array_almost_equal(out, [0, uint12_max]) def test_rescale_uint14_limits(): image = cp.array([0, uint16_max], dtype=cp.uint16) - out = exposure.rescale_intensity(image, out_range='uint14') + out = exposure.rescale_intensity(image, out_range="uint14") assert_array_almost_equal(out, [0, uint14_max]) @@ -360,8 +359,7 @@ def test_rescale_same_values(): @pytest.mark.parametrize( - "in_range,out_range", [("image", "dtype"), - ("dtype", "image")] + "in_range,out_range", [("image", "dtype"), ("dtype", "image")] ) def test_rescale_nan_warning(in_range, out_range): image = cp.arange(12, dtype=float).reshape(3, 4) @@ -377,13 +375,14 @@ def test_rescale_nan_warning(in_range, out_range): @pytest.mark.parametrize( - "out_range, out_dtype", [ - ('uint8', cp.uint8), - ('uint10', cp.uint16), - ('uint12', cp.uint16), - ('uint16', cp.uint16), - ('float', float), - ] + "out_range, out_dtype", + [ + ("uint8", cp.uint8), + ("uint10", cp.uint16), + ("uint12", cp.uint16), + ("uint16", cp.uint16), + ("float", float), + ], ) def test_rescale_output_dtype(out_range, out_dtype): image = cp.array([-128, 0, 127], dtype=cp.int8) @@ -417,15 +416,16 @@ def test_rescale_raises_on_incorrect_out_range(): # ==================================== -@pytest.mark.parametrize('dtype', [cp.float16, cp.float32, cp.float64]) +@pytest.mark.parametrize("dtype", [cp.float16, cp.float32, cp.float64]) def test_adapthist_grayscale(dtype): """Test a grayscale float image""" img = cp.array(data.astronaut()) img = util.img_as_float(img).astype(dtype, copy=False) img = rgb2gray(img) img = cp.dstack((img, img, img)) - adapted = exposure.equalize_adapthist(img, kernel_size=(57, 51), - clip_limit=0.01, nbins=128) + adapted = exposure.equalize_adapthist( + img, kernel_size=(57, 51), clip_limit=0.01, nbins=128 + ) assert img.shape == adapted.shape assert adapted.dtype == _supported_float_type(dtype) snr_decimal = 3 if dtype != cp.float16 else 2 @@ -434,11 +434,10 @@ def test_adapthist_grayscale(dtype): def test_adapthist_color(): - """Test an RGB color uint16 image - """ + """Test an RGB color uint16 image""" img = util.img_as_uint(cp.array(data.astronaut())) with warnings.catch_warnings(record=True) as w: - warnings.simplefilter('always') + warnings.simplefilter("always") hist, bin_centers = exposure.histogram(img) assert len(w) > 0 adapted = exposure.equalize_adapthist(img, clip_limit=0.01) @@ -449,7 +448,8 @@ def test_adapthist_color(): full_scale = exposure.rescale_intensity(img) assert_almost_equal(float(peak_snr(full_scale, adapted)), 109.393, 1) assert_almost_equal( - float(norm_brightness_err(full_scale, adapted)), 0.02, 2) + float(norm_brightness_err(full_scale, adapted)), 0.02, 2 + ) def test_adapthist_alpha(): @@ -482,12 +482,12 @@ def test_adapthist_grayscale_Nd(): img3d = cp.stack([img2d] * (img.shape[0] // a), axis=0) # apply CLAHE - adapted2d = exposure.equalize_adapthist(img2d, - kernel_size=5, - clip_limit=0.05) - adapted3d = exposure.equalize_adapthist(img3d, - kernel_size=5, - clip_limit=0.05) + adapted2d = exposure.equalize_adapthist( + img2d, kernel_size=5, clip_limit=0.05 + ) + adapted3d = exposure.equalize_adapthist( + img3d, kernel_size=5, clip_limit=0.05 + ) # check that dimensions of input and output match assert img2d.shape == adapted2d.shape @@ -495,13 +495,13 @@ def test_adapthist_grayscale_Nd(): # check that the result from the stack of 2d images is similar # to the underlying 2d image - assert cp.mean(cp.abs(adapted2d - - adapted3d[adapted3d.shape[0] // 2])) < 0.02 + assert ( + cp.mean(cp.abs(adapted2d - adapted3d[adapted3d.shape[0] // 2])) < 0.02 + ) def test_adapthist_constant(): - """Test constant image, float and uint - """ + """Test constant image, float and uint""" img = cp.zeros((8, 8)) img += 2 img = img.astype(cp.uint16) @@ -516,8 +516,7 @@ def test_adapthist_constant(): def test_adapthist_borders(): - """Test border processing - """ + """Test border processing""" img = rgb2gray(util.img_as_float(cp.array(data.astronaut()))) # maximize difference between orig and processed img @@ -529,11 +528,15 @@ def test_adapthist_borders(): for kernel_size in range(51, 71, 2): adapted = exposure.equalize_adapthist(img, kernel_size, clip_limit=0.5) # Check last columns are processed - assert norm_brightness_err(adapted[:, border_index], - img[:, border_index]) > 0.1 + assert ( + norm_brightness_err(adapted[:, border_index], img[:, border_index]) + > 0.1 + ) # Check last rows are processed - assert norm_brightness_err(adapted[border_index, :], - img[border_index, :]) > 0.1 + assert ( + norm_brightness_err(adapted[border_index, :], img[border_index, :]) + > 0.1 + ) def test_adapthist_clip_limit(): @@ -617,7 +620,7 @@ def test_adjust_gamma_one(): assert_array_almost_equal(result, image) -@pytest.mark.parametrize('dtype', [cp.float16, cp.float32, cp.float64]) +@pytest.mark.parametrize("dtype", [cp.float16, cp.float32, cp.float64]) def test_adjust_gamma_zero(dtype): """White image should be returned for gamma equal to zero""" image = cp.random.uniform(0, 255, (8, 8)).astype(dtype, copy=False) @@ -719,7 +722,8 @@ def test_adjust_gamma_u8_overflow(): # Test Logarithmic Correction # =========================== -@pytest.mark.parametrize('dtype', [cp.float16, cp.float32, cp.float64]) + +@pytest.mark.parametrize("dtype", [cp.float16, cp.float32, cp.float64]) def test_adjust_log_1x1_shape(dtype): """Check that the shape is maintained""" img = cp.ones([1, 1], dtype=dtype) @@ -771,7 +775,8 @@ def test_adjust_inv_log(): # Test Sigmoid Correction # ======================= -@pytest.mark.parametrize('dtype', [cp.float16, cp.float32, cp.float64]) + +@pytest.mark.parametrize("dtype", [cp.float16, cp.float32, cp.float64]) def test_adjust_sigmoid_1x1_shape(dtype): """Check that the shape is maintained""" img = cp.ones([1, 1], dtype=dtype) @@ -870,7 +875,7 @@ def test_is_low_contrast(): assert exposure.is_low_contrast(image) assert not exposure.is_low_contrast(image, upper_percentile=100) - image = (image.astype(cp.uint16)) * 2 ** 8 + image = (image.astype(cp.uint16)) * 2**8 assert exposure.is_low_contrast(image) assert not exposure.is_low_contrast(image, upper_percentile=100) @@ -886,9 +891,11 @@ def test_is_low_contrast_boolean(): # Test negative input ##################### -@pytest.mark.parametrize("exposure_func", [exposure.adjust_gamma, - exposure.adjust_log, - exposure.adjust_sigmoid]) + +@pytest.mark.parametrize( + "exposure_func", + [exposure.adjust_gamma, exposure.adjust_log, exposure.adjust_sigmoid], +) def test_negative_input(exposure_func): image = cp.arange(-10, 245, 4).reshape((8, 8)).astype(cp.float64) with pytest.raises(ValueError): @@ -903,7 +910,7 @@ def test_negative_input(exposure_func): # @pytest.mark.xfail(True, reason="dask case not currently supported") @pytest.mark.skip("dask case not currently supported") def test_dask_histogram(): - pytest.importorskip('dask', reason="dask python library is not installed") + pytest.importorskip("dask", reason="dask python library is not installed") import dask.array as da dask_array = da.from_array(cp.array([[0, 1], [1, 2]]), chunks=(1, 2)) diff --git a/python/cucim/src/cucim/skimage/exposure/tests/test_histogram_matching.py b/python/cucim/src/cucim/skimage/exposure/tests/test_histogram_matching.py index cd7675752..ec357c8df 100644 --- a/python/cucim/src/cucim/skimage/exposure/tests/test_histogram_matching.py +++ b/python/cucim/src/cucim/skimage/exposure/tests/test_histogram_matching.py @@ -9,10 +9,13 @@ from cucim.skimage.exposure import histogram_matching -@pytest.mark.parametrize('array, template, expected_array', [ - (cp.arange(10), cp.arange(100), cp.arange(9, 100, 10)), - (cp.random.rand(4), cp.ones(3), cp.ones(4)) -]) +@pytest.mark.parametrize( + "array, template, expected_array", + [ + (cp.arange(10), cp.arange(100), cp.arange(9, 100, 10)), + (cp.random.rand(4), cp.ones(3), cp.ones(4)), + ], +) def test_match_array_values(array, template, expected_array): # when matched = histogram_matching._match_cumulative_cdf(array, template) @@ -22,19 +25,19 @@ def test_match_array_values(array, template, expected_array): class TestMatchHistogram: - image_rgb = cp.asarray(data.chelsea()) template_rgb = cp.asarray(data.astronaut()) - @pytest.mark.parametrize('channel_axis', (0, 1, -1)) + @pytest.mark.parametrize("channel_axis", (0, 1, -1)) def test_match_histograms_channel_axis(self, channel_axis): """Assert that pdf of matched image is close to the reference's pdf for all channels and all values of matched""" image = cp.moveaxis(self.image_rgb, -1, channel_axis) reference = cp.moveaxis(self.template_rgb, -1, channel_axis) - matched = exposure.match_histograms(image, reference, - channel_axis=channel_axis) + matched = exposure.match_histograms( + image, reference, channel_axis=channel_axis + ) assert matched.dtype == image.dtype matched = cp.moveaxis(matched, channel_axis, -1) reference = cp.moveaxis(reference, channel_axis, -1) @@ -48,14 +51,14 @@ def test_match_histograms_channel_axis(self, channel_axis): matched_values, matched_quantiles = matched_pdf[channel] for i, matched_value in enumerate(matched_values): - closest_id = ( - np.abs(reference_values - matched_value) - ).argmin() - assert_array_almost_equal(matched_quantiles[i], - reference_quantiles[closest_id], - decimal=1) - - @pytest.mark.parametrize('dtype', [cp.float16, cp.float32, cp.float64]) + closest_id = (np.abs(reference_values - matched_value)).argmin() + assert_array_almost_equal( + matched_quantiles[i], + reference_quantiles[closest_id], + decimal=1, + ) + + @pytest.mark.parametrize("dtype", [cp.float16, cp.float32, cp.float64]) def test_match_histograms_float_dtype(self, dtype): """float16 or float32 inputs give float32 output""" image = self.image_rgb.astype(dtype, copy=False) @@ -63,10 +66,13 @@ def test_match_histograms_float_dtype(self, dtype): matched = exposure.match_histograms(image, reference) assert matched.dtype == _supported_float_type(dtype) - @pytest.mark.parametrize('image, reference', [ - (image_rgb, template_rgb[:, :, 0]), - (image_rgb[:, :, 0], template_rgb) - ]) + @pytest.mark.parametrize( + "image, reference", + [ + (image_rgb, template_rgb[:, :, 0]), + (image_rgb[:, :, 0], template_rgb), + ], + ) def test_raises_value_error_on_channels_mismatch(self, image, reference): with pytest.raises(ValueError): exposure.match_histograms(image, reference) diff --git a/python/cucim/src/cucim/skimage/feature/__init__.py b/python/cucim/src/cucim/skimage/feature/__init__.py index fcfa62c03..04b53fbfc 100644 --- a/python/cucim/src/cucim/skimage/feature/__init__.py +++ b/python/cucim/src/cucim/skimage/feature/__init__.py @@ -2,36 +2,47 @@ from ._canny import canny from ._daisy import daisy from .blob import blob_dog, blob_doh, blob_log -from .corner import (corner_foerstner, corner_harris, corner_kitchen_rosenfeld, - corner_peaks, corner_shi_tomasi, hessian_matrix, - hessian_matrix_det, hessian_matrix_eigvals, shape_index, - structure_tensor, structure_tensor_eigenvalues) +from .corner import ( + corner_foerstner, + corner_harris, + corner_kitchen_rosenfeld, + corner_peaks, + corner_shi_tomasi, + hessian_matrix, + hessian_matrix_det, + hessian_matrix_eigvals, + shape_index, + structure_tensor, + structure_tensor_eigenvalues, +) from .match import match_descriptors from .peak import peak_local_max from .template import match_template -__all__ = ['canny', - 'daisy', - 'multiscale_basic_features', - 'peak_local_max', - 'structure_tensor', - 'structure_tensor_eigenvalues', - 'structure_tensor_eigvals', - 'hessian_matrix', - 'hessian_matrix_det', - 'hessian_matrix_eigvals', - 'shape_index', - 'corner_kitchen_rosenfeld', - 'corner_harris', - 'corner_shi_tomasi', - 'corner_foerstner', - # 'corner_subpix', - 'corner_peaks', - # 'corner_moravec', - # 'corner_fast', - # 'corner_orientations', - 'match_template', - 'match_descriptors', - 'blob_dog', - 'blob_log', - 'blob_doh'] +__all__ = [ + "canny", + "daisy", + "multiscale_basic_features", + "peak_local_max", + "structure_tensor", + "structure_tensor_eigenvalues", + "structure_tensor_eigvals", + "hessian_matrix", + "hessian_matrix_det", + "hessian_matrix_eigvals", + "shape_index", + "corner_kitchen_rosenfeld", + "corner_harris", + "corner_shi_tomasi", + "corner_foerstner", + # 'corner_subpix', + "corner_peaks", + # 'corner_moravec', + # 'corner_fast', + # 'corner_orientations', + "match_template", + "match_descriptors", + "blob_dog", + "blob_log", + "blob_doh", +] diff --git a/python/cucim/src/cucim/skimage/feature/_basic_features.py b/python/cucim/src/cucim/skimage/feature/_basic_features.py index 531e3d56a..c6e45b966 100644 --- a/python/cucim/src/cucim/skimage/feature/_basic_features.py +++ b/python/cucim/src/cucim/skimage/feature/_basic_features.py @@ -92,7 +92,9 @@ def _mutiscale_basic_features_singlechannel( ) singlescale_func = functools.partial( _singlescale_basic_features_singlechannel, - intensity=intensity, edges=edges, texture=texture + intensity=intensity, + edges=edges, + texture=texture, ) out_sigmas = [singlescale_func(img, s) for s in sigmas] features = itertools.chain.from_iterable(out_sigmas) diff --git a/python/cucim/src/cucim/skimage/feature/_canny.py b/python/cucim/src/cucim/skimage/feature/_canny.py index 1d60dbf86..9f25c99b2 100644 --- a/python/cucim/src/cucim/skimage/feature/_canny.py +++ b/python/cucim/src/cucim/skimage/feature/_canny.py @@ -62,9 +62,10 @@ def _preprocess(image, mask, sigma, mode, cval): pixels. """ - gaussian_kwargs = dict(sigma=sigma, mode=mode, cval=cval, - preserve_range=False) - compute_bleedover = (mode == 'constant' or mask is not None) + gaussian_kwargs = dict( + sigma=sigma, mode=mode, cval=cval, preserve_range=False + ) + compute_bleedover = mode == "constant" or mask is not None float_type = _supported_float_type(image.dtype) if mask is None: if compute_bleedover: @@ -91,8 +92,9 @@ def _preprocess(image, mask, sigma, mode, cval): # Compute the fractional contribution of masked pixels by applying # the function to the mask (which gets you the fraction of the # pixel data that's due to significant points) - bleed_over = gaussian(mask.astype(float_type, copy=False), - **gaussian_kwargs) + bleed_over = gaussian( + mask.astype(float_type, copy=False), **gaussian_kwargs + ) bleed_over += cp.finfo(float_type).eps # Smooth the masked image @@ -123,9 +125,9 @@ def _generate_nonmaximum_suppression_bilinear_op(large_int=False): """ if large_int: - uint_t = 'size_t' + uint_t = "size_t" else: - uint_t = 'unsigned int' + uint_t = "unsigned int" ops = f""" // determine strides (in number of elements) along each axis @@ -201,12 +203,14 @@ def _generate_nonmaximum_suppression_bilinear_op(large_int=False): @cp.memoize(for_each_device=True) def _get_nonmax_kernel(large_int=False): - in_params = ('raw T isobel, raw T jsobel, raw T magnitude, ' - 'raw uint8 eroded_mask, float64 low_threshold') - out_params = 'T out' - name = 'cupyx_skimage_canny_nonmaximum_suppression_bilinear' + in_params = ( + "raw T isobel, raw T jsobel, raw T magnitude, " + "raw uint8 eroded_mask, float64 low_threshold" + ) + out_params = "T out" + name = "cupyx_skimage_canny_nonmaximum_suppression_bilinear" if large_int: - name += '_large' + name += "_large" return cp.ElementwiseKernel( in_params, out_params, @@ -218,7 +222,6 @@ def _get_nonmax_kernel(large_int=False): def _nonmaximum_suppression_bilinear( isobel, jsobel, magnitude, eroded_mask, low_threshold ): - # make sure inputs are C-contiguous (stride calculations assume this) isobel = cp.ascontiguousarray(isobel) jsobel = cp.ascontiguousarray(jsobel) @@ -246,8 +249,17 @@ def _nonmaximum_suppression_bilinear( return out -def canny(image, sigma=1., low_threshold=None, high_threshold=None, mask=None, - use_quantiles=False, *, mode='constant', cval=0.0): +def canny( + image, + sigma=1.0, + low_threshold=None, + high_threshold=None, + mask=None, + use_quantiles=False, + *, + mode="constant", + cval=0.0, +): """Edge filter an image using the Canny algorithm. Parameters @@ -358,7 +370,7 @@ def canny(image, sigma=1., low_threshold=None, high_threshold=None, mask=None, # mask by one and then mask the output. We also mask out the border points # because who knows what lies beyond the edge of the image? - if (image.dtype.kind in 'iu' and image.dtype.itemsize >= 8): + if image.dtype.kind in "iu" and image.dtype.itemsize >= 8: raise ValueError("64-bit or larger integer images are not supported") check_nD(image, 2) diff --git a/python/cucim/src/cucim/skimage/feature/_daisy.py b/python/cucim/src/cucim/skimage/feature/_daisy.py index d547fe864..e7f938355 100644 --- a/python/cucim/src/cucim/skimage/feature/_daisy.py +++ b/python/cucim/src/cucim/skimage/feature/_daisy.py @@ -8,8 +8,18 @@ from .._shared.utils import check_nD -def daisy(image, step=4, radius=15, rings=3, histograms=8, orientations=8, - normalization='l1', sigmas=None, ring_radii=None, visualize=False): +def daisy( + image, + step=4, + radius=15, + rings=3, + histograms=8, + orientations=8, + normalization="l1", + sigmas=None, + ring_radii=None, + visualize=False, +): """Extract DAISY feature descriptors densely for the given image. DAISY is a feature descriptor similar to SIFT formulated in a way that @@ -102,9 +112,12 @@ def daisy(image, step=4, radius=15, rings=3, histograms=8, orientations=8, float_dtype = image.dtype # Validate parameters. - if sigmas is not None and ring_radii is not None \ - and len(sigmas) - 1 != len(ring_radii): - raise ValueError('`len(sigmas)-1 != len(ring_radii)`') + if ( + sigmas is not None + and ring_radii is not None + and len(sigmas) - 1 != len(ring_radii) + ): + raise ValueError("`len(sigmas)-1 != len(ring_radii)`") if ring_radii is not None: rings = len(ring_radii) radius = ring_radii[-1] @@ -114,8 +127,8 @@ def daisy(image, step=4, radius=15, rings=3, histograms=8, orientations=8, sigmas = [radius * (i + 1) / float(2 * rings) for i in range(rings)] if ring_radii is None: ring_radii = [radius * (i + 1) / float(rings) for i in range(rings)] - if normalization not in ['l1', 'l2', 'daisy', 'off']: - raise ValueError('Invalid normalization method.') + if normalization not in ["l1", "l2", "daisy", "off"]: + raise ValueError("Invalid normalization method.") # Compute image derivatives. dx = cp.zeros(image.shape, dtype=float_dtype) @@ -131,8 +144,9 @@ def daisy(image, step=4, radius=15, rings=3, histograms=8, orientations=8, grad_ori = cp.arctan2(dy, dx) pi = cp.pi orientation_kappa = orientations / pi - orientation_angles = [2 * o * pi / orientations - pi - for o in range(orientations)] + orientation_angles = [ + 2 * o * pi / orientations - pi for o in range(orientations) + ] hist = cp.empty((orientations,) + image.shape, dtype=float_dtype) for i, o in enumerate(orientation_angles): # Weigh bin contribution by the circular normal distribution @@ -145,17 +159,20 @@ def daisy(image, step=4, radius=15, rings=3, histograms=8, orientations=8, hist_smooth = cp.empty((rings + 1,) + hist.shape, dtype=float_dtype) for i in range(rings + 1): for j in range(orientations): - hist_smooth[i, j, :, :] = gaussian(hist[j, :, :], sigma=sigmas[i], - mode='reflect') + hist_smooth[i, j, :, :] = gaussian( + hist[j, :, :], sigma=sigmas[i], mode="reflect" + ) # Assemble descriptor grid. theta = [2 * pi * j / histograms for j in range(histograms)] desc_dims = (rings * histograms + 1) * orientations - descs = cp.empty((desc_dims, image.shape[0] - 2 * radius, - image.shape[1] - 2 * radius), - dtype=float_dtype) - descs[:orientations, :, :] = hist_smooth[0, :, radius:-radius, - radius:-radius] + descs = cp.empty( + (desc_dims, image.shape[0] - 2 * radius, image.shape[1] - 2 * radius), + dtype=float_dtype, + ) + descs[:orientations, :, :] = hist_smooth[ + 0, :, radius:-radius, radius:-radius + ] idx = orientations for i in range(rings): for j in range(histograms): @@ -163,27 +180,27 @@ def daisy(image, step=4, radius=15, rings=3, histograms=8, orientations=8, y_max = descs.shape[1] + y_min x_min = radius + int(round(ring_radii[i] * math.cos(theta[j]))) x_max = descs.shape[2] + x_min - descs[idx:idx + orientations, :, :] = hist_smooth[i + 1, :, - y_min:y_max, - x_min:x_max] + descs[idx : idx + orientations, :, :] = hist_smooth[ + i + 1, :, y_min:y_max, x_min:x_max + ] idx += orientations descs = descs[:, ::step, ::step] descs = descs.swapaxes(0, 1).swapaxes(1, 2) # Normalize descriptors. - if normalization != 'off': + if normalization != "off": descs += 1e-10 - if normalization == 'l1': + if normalization == "l1": descs /= cp.sum(descs, axis=2)[:, :, cp.newaxis] - elif normalization == 'l2': + elif normalization == "l2": descs /= cp.sqrt(cp.sum(descs * descs, axis=2))[:, :, cp.newaxis] - elif normalization == 'daisy': + elif normalization == "daisy": for i in range(0, desc_dims, orientations): - norms = descs[:, :, i:i + orientations] + norms = descs[:, :, i : i + orientations] norms = norms * norms norms = norms.sum(axis=2) cp.sqrt(norms, out=norms) - descs[:, :, i:i + orientations] /= norms[:, :, cp.newaxis] + descs[:, :, i : i + orientations] /= norms[:, :, cp.newaxis] if visualize: from skimage import draw @@ -198,7 +215,8 @@ def daisy(image, step=4, radius=15, rings=3, histograms=8, orientations=8, desc_y = i * step + radius desc_x = j * step + radius rows, cols, val = draw.circle_perimeter_aa( - desc_y, desc_x, int(sigmas[0])) + desc_y, desc_x, int(sigmas[0]) + ) draw.set_color(descs_img, (rows, cols), color, alpha=val) max_bin = float(cp.max(descs[i, j, :])) for o_num, o in enumerate(orientation_angles): @@ -207,7 +225,8 @@ def daisy(image, step=4, radius=15, rings=3, histograms=8, orientations=8, dy = sigmas[0] * bin_size * math.sin(o) dx = sigmas[0] * bin_size * math.cos(o) rows, cols, val = draw.line_aa( - desc_y, desc_x, int(desc_y + dy), int(desc_x + dx)) + desc_y, desc_x, int(desc_y + dy), int(desc_x + dx) + ) draw.set_color(descs_img, (rows, cols), color, alpha=val) for r_num, r in enumerate(ring_radii): color_offset = float(1 + r_num) / rings @@ -217,22 +236,33 @@ def daisy(image, step=4, radius=15, rings=3, histograms=8, orientations=8, hist_y = desc_y + int(round(r * math.sin(t))) hist_x = desc_x + int(round(r * math.cos(t))) rows, cols, val = draw.circle_perimeter_aa( - hist_y, hist_x, int(sigmas[r_num + 1])) + hist_y, hist_x, int(sigmas[r_num + 1]) + ) draw.set_color( - descs_img, (rows, cols), color, alpha=val) + descs_img, (rows, cols), color, alpha=val + ) for o_num, o in enumerate(orientation_angles): # Draw histogram bins - bin_size = descs[i, j, orientations + r_num * - histograms * orientations + - t_num * orientations + o_num] + bin_size = descs[ + i, + j, + orientations + + r_num * histograms * orientations + + t_num * orientations + + o_num, + ] bin_size /= max_bin dy = sigmas[r_num + 1] * bin_size * math.sin(o) dx = sigmas[r_num + 1] * bin_size * math.cos(o) - rows, cols, val = draw.line_aa(hist_y, hist_x, - int(hist_y + dy), - int(hist_x + dx)) + rows, cols, val = draw.line_aa( + hist_y, + hist_x, + int(hist_y + dy), + int(hist_x + dx), + ) draw.set_color( - descs_img, (rows, cols), color, alpha=val) + descs_img, (rows, cols), color, alpha=val + ) return descs, descs_img else: return descs diff --git a/python/cucim/src/cucim/skimage/feature/_hessian_det_appx.py b/python/cucim/src/cucim/skimage/feature/_hessian_det_appx.py index 4624410fe..b93d61fc2 100644 --- a/python/cucim/src/cucim/skimage/feature/_hessian_det_appx.py +++ b/python/cucim/src/cucim/skimage/feature/_hessian_det_appx.py @@ -20,8 +20,8 @@ def _dtype_to_cuda_float_type(dtype): Supported cuda data type """ cpp_float_types = { - cp.float32: 'float', - cp.float64: 'double', + cp.float32: "float", + cp.float64: "double", } dtype = cp.dtype(dtype) if dtype.type not in cpp_float_types: @@ -47,7 +47,7 @@ def _get_hessian_det_appx_kernel(dtype, large_int) -> cp.RawModule: """ image_t = _dtype_to_cuda_float_type(dtype) - int_t = 'long long' if large_int else 'int' + int_t = "long long" if large_int else "int" _preamble = f""" #define IMAGE_T {image_t} @@ -55,14 +55,17 @@ def _get_hessian_det_appx_kernel(dtype, large_int) -> cp.RawModule: """ kernel_directory = os.path.join( - os.path.normpath(os.path.dirname(__file__)), 'cuda') + os.path.normpath(os.path.dirname(__file__)), "cuda" + ) cu_file = os.path.join(kernel_directory, "_hessian_det_appx.cu") - with open(cu_file, 'rt') as f: + with open(cu_file, "rt") as f: _code = f.read() - return cp.RawModule(code=_preamble + _code, - options=('--std=c++11',), - name_expressions=["_hessian_matrix_det"]) + return cp.RawModule( + code=_preamble + _code, + options=("--std=c++11",), + name_expressions=["_hessian_matrix_det"], + ) def _hessian_matrix_det(img: cp.ndarray, sigma) -> cp.ndarray: @@ -98,7 +101,9 @@ def _hessian_matrix_det(img: cp.ndarray, sigma) -> cp.ndarray: the result obtained if someone computed the Hessian and took its determinant. """ - rawmodule = _get_hessian_det_appx_kernel(img.dtype, max(img.shape) > 2**31) + rawmodule = _get_hessian_det_appx_kernel( + img.dtype, max(img.shape) > 2**31 + ) _hessian_det_appx_kernel = rawmodule.get_function("_hessian_matrix_det") out = cp.empty_like(img, dtype=img.dtype) @@ -108,6 +113,6 @@ def _hessian_matrix_det(img: cp.ndarray, sigma) -> cp.ndarray: _hessian_det_appx_kernel( (grid_size,), (block_size,), - (img.ravel(), img.shape[0], img.shape[1], float(sigma), out) + (img.ravel(), img.shape[0], img.shape[1], float(sigma), out), ) return out diff --git a/python/cucim/src/cucim/skimage/feature/blob.py b/python/cucim/src/cucim/skimage/feature/blob.py index ed9228178..6b47f8777 100644 --- a/python/cucim/src/cucim/skimage/feature/blob.py +++ b/python/cucim/src/cucim/skimage/feature/blob.py @@ -33,8 +33,8 @@ def _dtype_to_cuda_float_type(dtype): Supported cuda data type """ cpp_float_types = { - cp.float32: 'float', - cp.float64: 'double', + cp.float32: "float", + cp.float64: "double", } dtype = cp.dtype(dtype) if dtype.type not in cpp_float_types: @@ -58,7 +58,7 @@ def _get_prune_blob_rawmodule(dtype, large_int) -> cp.RawModule: A cupy RawModule containing the __global__ functions `_prune_blobs`. """ blob_t = _dtype_to_cuda_float_type(dtype) - int_t = 'long long' if large_int else 'int' + int_t = "long long" if large_int else "int" _preamble = f""" #define BLOB_T {blob_t} @@ -66,15 +66,15 @@ def _get_prune_blob_rawmodule(dtype, large_int) -> cp.RawModule: """ kernel_directory = os.path.join( - os.path.normpath(os.path.dirname(__file__)), 'cuda' + os.path.normpath(os.path.dirname(__file__)), "cuda" ) - with open(os.path.join(kernel_directory, "blob.cu"), 'rt') as f: + with open(os.path.join(kernel_directory, "blob.cu"), "rt") as f: _code = f.read() return cp.RawModule( code=_preamble + _code, - options=('--std=c++11',), - name_expressions=["_prune_blobs"] + options=("--std=c++11",), + name_expressions=["_prune_blobs"], ) @@ -104,19 +104,24 @@ def _prune_blobs(blobs_array, overlap, *, sigma_dim=1): """ # from here, the kernel does the calculation - blobs_module = _get_prune_blob_rawmodule(blobs_array.dtype, - max(blobs_array.shape) > 2**31) + blobs_module = _get_prune_blob_rawmodule( + blobs_array.dtype, max(blobs_array.shape) > 2**31 + ) _prune_blobs_kernel = blobs_module.get_function("_prune_blobs") block_size = 64 grid_size = int(math.ceil(blobs_array.shape[0] / block_size)) - _prune_blobs_kernel((grid_size,), (block_size,), - (blobs_array.ravel(), - int(blobs_array.shape[0]), - int(blobs_array.shape[1]), - float(overlap), - int(sigma_dim)) - ) + _prune_blobs_kernel( + (grid_size,), + (block_size,), + ( + blobs_array.ravel(), + int(blobs_array.shape[0]), + int(blobs_array.shape[1]), + float(overlap), + int(sigma_dim), + ), + ) return blobs_array[blobs_array[:, -1] > 0, :] @@ -128,12 +133,14 @@ def _format_exclude_border(img_ndim, exclude_border): if len(exclude_border) != img_ndim: raise ValueError( "`exclude_border` should have the same length as the " - "dimensionality of the image.") + "dimensionality of the image." + ) for exclude in exclude_border: if not isinstance(exclude, int): raise ValueError( "exclude border, when expressed as a tuple, must only " - "contain ints.") + "contain ints." + ) return exclude_border elif isinstance(exclude_border, int): return (exclude_border,) * img_ndim + (0,) @@ -143,7 +150,7 @@ def _format_exclude_border(img_ndim, exclude_border): return (0,) * (img_ndim + 1) else: raise ValueError( - f'Unsupported value ({exclude_border}) for exclude_border' + f"Unsupported value ({exclude_border}) for exclude_border" ) @@ -162,8 +169,17 @@ def _prep_sigmas(ndim, min_sigma, max_sigma): return scalar_sigma, min_sigma, max_sigma -def blob_dog(image, min_sigma=1, max_sigma=50, sigma_ratio=1.6, threshold=0.5, - overlap=.5, *, threshold_rel=None, exclude_border=False): +def blob_dog( + image, + min_sigma=1, + max_sigma=50, + sigma_ratio=1.6, + threshold=0.5, + overlap=0.5, + *, + threshold_rel=None, + exclude_border=False, +): r"""Finds blobs in the given grayscale image. Blobs are found using the Difference of Gaussian (DoG) method [1]_, [2]_. For each blob found, the method returns its coordinates and the standard @@ -228,7 +244,7 @@ def blob_dog(image, min_sigma=1, max_sigma=50, sigma_ratio=1.6, threshold=0.5, References ---------- - .. [1] https://en.wikipedia.org/wiki/Blob_detection#The_difference_of_Gaussians_approach # noqa + .. [1] https://en.wikipedia.org/wiki/Blob_detection#The_difference_of_Gaussians_approach .. [2] Lowe, D. G. "Distinctive Image Features from Scale-Invariant Keypoints." International Journal of Computer Vision 60, 91–110 (2004). https://www.cs.ubc.ca/~lowe/papers/ijcv04.pdf @@ -270,37 +286,39 @@ def blob_dog(image, min_sigma=1, max_sigma=50, sigma_ratio=1.6, threshold=0.5, ----- The radius of each blob is approximately :math:`\sqrt{2}\sigma` for a 2-D image and :math:`\sqrt{3}\sigma` for a 3-D image. - """ + """ # noqa: E501 image = img_as_float(image) float_dtype = _supported_float_type(image.dtype) image = image.astype(float_dtype, copy=False) # Determine if provixed sigmas are scalar and broadcast to image.ndim - scalar_sigma, min_sigma, max_sigma = _prep_sigmas(image.ndim, min_sigma, - max_sigma) + scalar_sigma, min_sigma, max_sigma = _prep_sigmas( + image.ndim, min_sigma, max_sigma + ) if sigma_ratio <= 1.0: - raise ValueError('sigma_ratio must be > 1.0') + raise ValueError("sigma_ratio must be > 1.0") # k such that min_sigma*(sigma_ratio**k) > max_sigma log_ratio = math.log(sigma_ratio) - k = sum(math.log(max_s / min_s) / log_ratio + 1 - for max_s, min_s in zip(max_sigma, min_sigma)) + k = sum( + math.log(max_s / min_s) / log_ratio + 1 + for max_s, min_s in zip(max_sigma, min_sigma) + ) k /= len(min_sigma) k = int(k) # a geometric progression of standard deviations for gaussian kernels - ratio_powers = tuple(sigma_ratio ** i for i in range(k + 1)) - sigma_list = tuple(tuple(s * p for s in min_sigma) - for p in ratio_powers) + ratio_powers = tuple(sigma_ratio**i for i in range(k + 1)) + sigma_list = tuple(tuple(s * p for s in min_sigma) for p in ratio_powers) # computing difference between two successive Gaussian blurred images # to obtain an approximation of the scale invariant Laplacian of the # Gaussian operator dog_image_cube = cp.empty(image.shape + (k,), dtype=float_dtype) - gaussian_previous = gaussian(image, sigma_list[0], mode='reflect') + gaussian_previous = gaussian(image, sigma_list[0], mode="reflect") for i, s in enumerate(sigma_list[1:]): - gaussian_current = gaussian(image, s, mode='reflect') + gaussian_current = gaussian(image, s, mode="reflect") dog_image_cube[..., i] = gaussian_previous - gaussian_current gaussian_previous = gaussian_current @@ -341,9 +359,18 @@ def blob_dog(image, min_sigma=1, max_sigma=50, sigma_ratio=1.6, threshold=0.5, return _prune_blobs(lm, overlap, sigma_dim=sigma_dim) -def blob_log(image, min_sigma=1, max_sigma=50, num_sigma=10, threshold=.2, - overlap=.5, log_scale=False, *, threshold_rel=None, - exclude_border=False): +def blob_log( + image, + min_sigma=1, + max_sigma=50, + num_sigma=10, + threshold=0.2, + overlap=0.5, + log_scale=False, + *, + threshold_rel=None, + exclude_border=False, +): r"""Finds blobs in the given grayscale image. Blobs are found using the Laplacian of Gaussian (LoG) method [1]_. For each blob found, the method returns its coordinates and the standard @@ -408,7 +435,7 @@ def blob_log(image, min_sigma=1, max_sigma=50, num_sigma=10, threshold=.2, References ---------- - .. [1] https://en.wikipedia.org/wiki/Blob_detection#The_Laplacian_of_Gaussian # noqa + .. [1] https://en.wikipedia.org/wiki/Blob_detection#The_Laplacian_of_Gaussian Examples -------- @@ -439,14 +466,15 @@ def blob_log(image, min_sigma=1, max_sigma=50, num_sigma=10, threshold=.2, ----- The radius of each blob is approximately :math:`\sqrt{2}\sigma` for a 2-D image and :math:`\sqrt{3}\sigma` for a 3-D image. - """ + """ # noqa: E501 image = img_as_float(image) float_dtype = _supported_float_type(image.dtype) image = image.astype(float_dtype, copy=False) # Determine if provixed sigmas are scalar and broadcast to image.ndim - scalar_sigma, min_sigma, max_sigma = _prep_sigmas(image.ndim, min_sigma, - max_sigma) + scalar_sigma, min_sigma, max_sigma = _prep_sigmas( + image.ndim, min_sigma, max_sigma + ) if log_scale: start = tuple(math.log10(s) for s in min_sigma) @@ -472,7 +500,7 @@ def _mean_sq(s): threshold_abs=threshold, threshold_rel=threshold_rel, exclude_border=exclude_border, - footprint=cp.ones((3,) * (image.ndim + 1)) + footprint=cp.ones((3,) * (image.ndim + 1)), ) # Catch no peaks @@ -499,8 +527,17 @@ def _mean_sq(s): return _prune_blobs(lm, overlap, sigma_dim=sigma_dim) -def blob_doh(image, min_sigma=1, max_sigma=30, num_sigma=10, threshold=0.01, - overlap=.5, log_scale=False, *, threshold_rel=None): +def blob_doh( + image, + min_sigma=1, + max_sigma=30, + num_sigma=10, + threshold=0.01, + overlap=0.5, + log_scale=False, + *, + threshold_rel=None, +): """Finds blobs in the given grayscale image. Blobs are found using the Determinant of Hessian method [1]_. For each blob @@ -550,7 +587,7 @@ def blob_doh(image, min_sigma=1, max_sigma=30, num_sigma=10, threshold=0.01, References ---------- - .. [1] https://en.wikipedia.org/wiki/Blob_detection#The_determinant_of_the_Hessian # noqa + .. [1] https://en.wikipedia.org/wiki/Blob_detection#The_determinant_of_the_Hessian .. [2] Herbert Bay, Andreas Ess, Tinne Tuytelaars, Luc Van Gool, "SURF: Speeded Up Robust Features" ftp://ftp.vision.ee.ethz.ch/publications/articles/eth_biwi_00517.pdf @@ -590,7 +627,7 @@ def blob_doh(image, min_sigma=1, max_sigma=30, num_sigma=10, threshold=0.01, of Gaussians for larger `sigma` takes more time. The downside is that this method can't be used for detecting blobs of radius less than `3px` due to the box filters used in the approximation of Hessian Determinant. - """ + """ # noqa: E501 check_nD(image, 2) image = img_as_float(image) @@ -609,11 +646,13 @@ def blob_doh(image, min_sigma=1, max_sigma=30, num_sigma=10, threshold=0.01, for i, s in enumerate(sigma_list): image_cube[..., i] = _hessian_matrix_det(image, s) - local_maxima = peak_local_max(image_cube, - threshold_abs=threshold, - threshold_rel=threshold_rel, - exclude_border=False, - footprint=cp.ones((3,) * image_cube.ndim)) + local_maxima = peak_local_max( + image_cube, + threshold_abs=threshold, + threshold_rel=threshold_rel, + exclude_border=False, + footprint=cp.ones((3,) * image_cube.ndim), + ) # Catch no peaks if local_maxima.size == 0: diff --git a/python/cucim/src/cucim/skimage/feature/corner.py b/python/cucim/src/cucim/skimage/feature/corner.py index c7f66dd89..a58120bde 100644 --- a/python/cucim/src/cucim/skimage/feature/corner.py +++ b/python/cucim/src/cucim/skimage/feature/corner.py @@ -45,7 +45,7 @@ def _compute_derivatives(image, mode="constant", cval=0): return derivatives -def structure_tensor(image, sigma=1, mode="constant", cval=0, order='rc'): +def structure_tensor(image, sigma=1, mode="constant", cval=0, order="rc"): """Compute structure tensor using sum of squared differences. The (2-dimensional) structure tensor A is defined as:: @@ -121,8 +121,9 @@ def structure_tensor(image, sigma=1, mode="constant", cval=0, order='rc'): if not np.isscalar(sigma): sigma = tuple(sigma) if len(sigma) != image.ndim: - raise ValueError("sigma must have as many elements as image " - "has axes") + raise ValueError( + "sigma must have as many elements as image " "has axes" + ) image = _prepare_grayscale_input_nD(image) @@ -136,15 +137,19 @@ def structure_tensor(image, sigma=1, mode="constant", cval=0, order='rc'): channel_axis = -1 if (image.ndim == 3 and image.shape[-1] == 3) else None # structure tensor - A_elems = [gaussian(der0 * der1, sigma, mode=mode, cval=cval, - channel_axis=channel_axis) - for der0, der1 in combinations_with_replacement(derivatives, 2)] + A_elems = [ + gaussian( + der0 * der1, sigma, mode=mode, cval=cval, channel_axis=channel_axis + ) + for der0, der1 in combinations_with_replacement(derivatives, 2) + ] return A_elems -def _hessian_matrix_with_gaussian(image, sigma=1, mode='reflect', cval=0, - order='rc'): +def _hessian_matrix_with_gaussian( + image, sigma=1, mode="reflect", cval=0, order="rc" +): """Compute the Hessian via convolutions with Gaussian derivatives. In 2D, the Hessian matrix is defined as: @@ -209,8 +214,9 @@ def _hessian_matrix_with_gaussian(image, sigma=1, mode='reflect', cval=0, truncate = 8 if all(s > 1 for s in sigma) else 100 sq1_2 = 1 / math.sqrt(2) sigma_scaled = tuple(sq1_2 * s for s in sigma) - common_kwargs = dict(sigma=sigma_scaled, mode=mode, cval=cval, - truncate=truncate) + common_kwargs = dict( + sigma=sigma_scaled, mode=mode, cval=cval, truncate=truncate + ) gaussian_ = functools.partial(ndi.gaussian_filter, **common_kwargs) # Apply two successive first order Gaussian derivative operations, as @@ -228,15 +234,23 @@ def _hessian_matrix_with_gaussian(image, sigma=1, mode='reflect', cval=0, # 2.) apply the derivative along another axis as well axes = range(ndim) - if order == 'xy': + if order == "xy": axes = reversed(axes) - H_elems = [gaussian_(gradients[ax0], order=orders[ax1]) - for ax0, ax1 in combinations_with_replacement(axes, 2)] + H_elems = [ + gaussian_(gradients[ax0], order=orders[ax1]) + for ax0, ax1 in combinations_with_replacement(axes, 2) + ] return H_elems -def hessian_matrix(image, sigma=1, mode='constant', cval=0, order='rc', - use_gaussian_derivatives=None): +def hessian_matrix( + image, + sigma=1, + mode="constant", + cval=0, + order="rc", + use_gaussian_derivatives=None, +): r"""Compute the Hessian matrix. In 2D, the Hessian matrix is defined as:: @@ -323,22 +337,27 @@ def hessian_matrix(image, sigma=1, mode='constant', cval=0, order='rc', if use_gaussian_derivatives is None: use_gaussian_derivatives = False - warn("use_gaussian_derivatives currently defaults to False, but will " - "change to True in a future version. Please specify this " - "argument explicitly to maintain the current behavior", - category=FutureWarning, stacklevel=2) + warn( + "use_gaussian_derivatives currently defaults to False, but will " + "change to True in a future version. Please specify this " + "argument explicitly to maintain the current behavior", + category=FutureWarning, + stacklevel=2, + ) if use_gaussian_derivatives: - return _hessian_matrix_with_gaussian(image, sigma=sigma, mode=mode, - cval=cval, order=order) + return _hessian_matrix_with_gaussian( + image, sigma=sigma, mode=mode, cval=cval, order=order + ) # Autodetection as done internally to Gaussian, but set it here to silence # a warning. # TODO: eventually remove this as this behavior of gaussian is deprecated channel_axis = -1 if (image.ndim == 3 and image.shape[-1] == 3) else None - gaussian_filtered = gaussian(image, sigma=sigma, mode=mode, cval=cval, - channel_axis=channel_axis) + gaussian_filtered = gaussian( + image, sigma=sigma, mode=mode, cval=cval, channel_axis=channel_axis + ) gradients = gradient(gaussian_filtered) axes = range(image.ndim) @@ -360,7 +379,8 @@ def _get_real_symmetric_2x2_det_kernel(): in_params="F M00, F M01, F M11", out_params="F det", operation="det = M00 * M11 - M01 * M01;", - name="cucim_skimage_symmetric_det22_kernel") + name="cucim_skimage_symmetric_det22_kernel", + ) @cp.memoize(for_each_device=True) @@ -374,7 +394,8 @@ def _get_real_symmetric_3x3_det_kernel(): in_params="F M00, F M01, F M02, F M11, F M12, F M22", out_params="F det", operation=operation, - name="cucim_skimage_symmetric_det33_kernel") + name="cucim_skimage_symmetric_det33_kernel", + ) def hessian_matrix_det(image, sigma=1, approximate=True): @@ -439,8 +460,7 @@ def hessian_matrix_det(image, sigma=1, approximate=True): @cp.memoize(for_each_device=True) -def _get_real_symmetric_2x2_eigvals_kernel(sort='ascending', abs_sort=False): - +def _get_real_symmetric_2x2_eigvals_kernel(sort="ascending", abs_sort=False): operation = """ F tmp1, tmp2; double m00 = static_cast(M00); @@ -458,7 +478,7 @@ def _get_real_symmetric_2x2_eigvals_kernel(sort='ascending', abs_sort=False): tmp1 = m00 + m11; tmp1 /= 2; """ - if sort == 'ascending': + if sort == "ascending": operation += """ lam1 = tmp1 - tmp2; lam2 = tmp1 + tmp2; @@ -472,7 +492,7 @@ def _get_real_symmetric_2x2_eigvals_kernel(sort='ascending', abs_sort=False): lam2 = stmp; } """ - elif sort == 'descending': + elif sort == "descending": operation += """ lam1 = tmp1 + tmp2; lam2 = tmp1 - tmp2; @@ -492,11 +512,12 @@ def _get_real_symmetric_2x2_eigvals_kernel(sort='ascending', abs_sort=False): in_params="F M00, F M01, F M11", out_params="F lam1, F lam2", operation=operation, - name="cucim_skimage_symmetric_eig22_kernel") + name="cucim_skimage_symmetric_eig22_kernel", + ) def _image_orthogonal_matrix22_eigvals( - M00, M01, M11, sort='descending', abs_sort=False + M00, M01, M11, sort="descending", abs_sort=False ): r"""Analytical expressions of the eigenvalues of a symmetric 2 x 2 matrix. It corresponds to:: @@ -532,8 +553,7 @@ def _image_orthogonal_matrix22_eigvals( @cp.memoize(for_each_device=True) -def _get_real_symmetric_3x3_eigvals_kernel(sort='ascending', abs_sort=False): - +def _get_real_symmetric_3x3_eigvals_kernel(sort="ascending", abs_sort=False): operation = """ double x1, x2, phi; double a = static_cast(aa); @@ -599,10 +619,10 @@ def _get_real_symmetric_3x3_eigvals_kernel(sort='ascending', abs_sort=False): prefix = "abs_" else: prefix = "" - if sort == 'ascending': + if sort == "ascending": var1 = "lam1" var3 = "lam3" - elif sort == 'descending': + elif sort == "descending": var1 = "lam3" var3 = "lam1" operation += sort_template.format( @@ -612,11 +632,12 @@ def _get_real_symmetric_3x3_eigvals_kernel(sort='ascending', abs_sort=False): in_params="F aa, F bb, F cc, F dd, F ee, F ff", out_params="F lam1, F lam2, F lam3", operation=operation, - name="cucim_skimage_symmetric_eig33_kernel") + name="cucim_skimage_symmetric_eig33_kernel", + ) def _image_orthogonal_matrix33_eigvals( - a, d, f, b, e, c, sort='descending', abs_sort=False + a, d, f, b, e, c, sort="descending", abs_sort=False ): r"""Analytical expressions of the eigenvalues of a symmetric 3 x 3 matrix. @@ -627,7 +648,7 @@ def _image_orthogonal_matrix33_eigvals( ``d``, ``f``, ``b``, ``e``, and ``c`` will be equal in shape to the 3D volume. - Invidual arguments correspond to the following moment matrix entries + Individual arguments correspond to the following moment matrix entries .. math:: @@ -664,7 +685,7 @@ def _image_orthogonal_matrix33_eigvals( return eigs -def _symmetric_compute_eigenvalues(S_elems, sort='descending', abs_sort=False): +def _symmetric_compute_eigenvalues(S_elems, sort="descending", abs_sort=False): """Compute eigenvalues from the upper-diagonal entries of a symmetric matrix. @@ -718,7 +739,7 @@ def _symmetric_compute_eigenvalues(S_elems, sort='descending', abs_sort=False): if abs_sort: # (sort by magnitude) eigs = cp.take_along_axis(eigs, cp.abs(eigs).argsort(0), 0) - if sort == 'descending': + if sort == "descending": eigs = eigs[::-1, ...] return eigs @@ -740,8 +761,9 @@ def _symmetric_image(S_elems): containing the matrix corresponding to each coordinate. """ image = S_elems[0] - symmetric_image = cp.zeros(image.shape + (image.ndim, image.ndim), - dtype=image.dtype) + symmetric_image = cp.zeros( + image.shape + (image.ndim, image.ndim), dtype=image.dtype + ) for idx, (row, col) in enumerate( combinations_with_replacement(range(image.ndim), 2) ): @@ -889,20 +911,26 @@ def shape_index(image, sigma=1, mode="constant", cval=0): [ nan, nan, -0.5, nan, nan]]) """ - H = hessian_matrix(image, sigma=sigma, mode=mode, cval=cval, order="rc", - use_gaussian_derivatives=False) + H = hessian_matrix( + image, + sigma=sigma, + mode=mode, + cval=cval, + order="rc", + use_gaussian_derivatives=False, + ) l1, l2 = hessian_matrix_eigvals(H) # don't warn on divide by 0 as occurs in the docstring example - with np.errstate(divide='ignore', invalid='ignore'): + with np.errstate(divide="ignore", invalid="ignore"): return (2.0 / np.pi) * np.arctan((l2 + l1) / (l2 - l1)) @cp.memoize(for_each_device=True) def _get_kitchen_rosenfeld_kernel(): return cp.ElementwiseKernel( - in_params='F imx, F imy, F imxx, F imxy, F imyy', - out_params='F response', + in_params="F imx, F imy, F imxx, F imxy, F imyy", + out_params="F response", operation=""" F numerator, denominator, imx_sq, imy_sq; imx_sq = imx * imx; @@ -916,7 +944,7 @@ def _get_kitchen_rosenfeld_kernel(): response = numerator / denominator; } """, # noqa - name='cucim_feature_kitchen_rosenfeld' + name="cucim_feature_kitchen_rosenfeld", ) @@ -961,15 +989,15 @@ def corner_kitchen_rosenfeld(image, mode="constant", cval=0): imyy, _ = _compute_derivatives(imy, mode=mode, cval=cval) kernel = _get_kitchen_rosenfeld_kernel() - response = cp.empty_like(image, order='C') + response = cp.empty_like(image, order="C") return kernel(imx, imy, imxx, imxy, imyy, response) @cp.memoize(for_each_device=True) def _get_corner_harris_k_kernel(): return cp.ElementwiseKernel( - in_params='F Arr, F Acc, F Arc, float64 k', - out_params='F response', + in_params="F Arr, F Acc, F Arc, float64 k", + out_params="F response", operation=""" F detA, traceA; // determinant @@ -978,15 +1006,15 @@ def _get_corner_harris_k_kernel(): traceA = Arr + Acc; response = detA - k * traceA * traceA; """, - name='cucim_skimage_feature_corner_harris_k' + name="cucim_skimage_feature_corner_harris_k", ) @cp.memoize(for_each_device=True) def _get_corner_harris_kernel(): return cp.ElementwiseKernel( - in_params='F Arr, F Acc, F Arc, float64 eps', - out_params='F response', + in_params="F Arr, F Acc, F Arc, float64 eps", + out_params="F response", operation=""" F detA, traceA; // determinant @@ -995,7 +1023,7 @@ def _get_corner_harris_kernel(): traceA = Arr + Acc; response = 2 * detA / (traceA + eps); """, - name='cucim_skimage_feature_corner_harris_k' + name="cucim_skimage_feature_corner_harris_k", ) @@ -1077,15 +1105,15 @@ def corner_harris(image, method="k", k=0.05, eps=1e-6, sigma=1): @cp.memoize(for_each_device=True) def _get_shi_tomasi_kernel(): return cp.ElementwiseKernel( - in_params='F Arr, F Acc, F Arc', - out_params='F response', + in_params="F Arr, F Acc, F Arc", + out_params="F response", operation=""" F tmp; tmp = (Arr - Acc); tmp *= tmp; response = (Arr + Acc - sqrt(tmp + 4 * Arc * Arc)) / 2.0; """, - name='cucim_skimage_feature_shi_tomasi' + name="cucim_skimage_feature_shi_tomasi", ) @@ -1153,8 +1181,8 @@ def corner_shi_tomasi(image, sigma=1): @cp.memoize(for_each_device=True) def _get_foerstner_kernel(): return cp.ElementwiseKernel( - in_params='F Arr, F Acc, F Arc', - out_params='F w, F q', + in_params="F Arr, F Acc, F Arc", + out_params="F w, F q", operation=""" F detA, traceA; @@ -1170,7 +1198,7 @@ def _get_foerstner_kernel(): q = 4 * detA / (traceA * traceA); } """, - name='cucim_skimage_feature_forstner' + name="cucim_skimage_feature_forstner", ) diff --git a/python/cucim/src/cucim/skimage/feature/match.py b/python/cucim/src/cucim/skimage/feature/match.py index 0187e369c..5f1e8f56d 100644 --- a/python/cucim/src/cucim/skimage/feature/match.py +++ b/python/cucim/src/cucim/skimage/feature/match.py @@ -6,14 +6,23 @@ # CuPy's cdist will only work if pylibraft is available import pylibraft # noqa from cupyx.scipy.spatial.distance import cdist + have_gpu_cdist = True except ImportError: from scipy.spatial.distance import cdist + have_gpu_cdist = False -def match_descriptors(descriptors1, descriptors2, metric=None, p=2, - max_distance=cp.inf, cross_check=True, max_ratio=1.0): +def match_descriptors( + descriptors1, + descriptors2, + metric=None, + p=2, + max_distance=cp.inf, + cross_check=True, + max_ratio=1.0, +): """Brute-force matching of descriptors. For each descriptor in the first set this matcher finds the closest @@ -65,25 +74,27 @@ def match_descriptors(descriptors1, descriptors2, metric=None, p=2, if metric is None: if cp.issubdtype(descriptors1.dtype, bool): - metric = 'hamming' + metric = "hamming" else: - metric = 'euclidean' + metric = "euclidean" kwargs = {} # Scipy raises an error if p is passed as an extra argument when it isn't # necessary for the chosen metric. - if metric == 'minkowski': - kwargs['p'] = p + if metric == "minkowski": + kwargs["p"] = p if not have_gpu_cdist: - warnings.warn("pylibraft not found, falling back to SciPy " - "implementation of cdist on the CPU") + warnings.warn( + "pylibraft not found, falling back to SciPy " + "implementation of cdist on the CPU" + ) distances = cp.array( cdist( cp.asnumpy(descriptors1), cp.asnumpy(descriptors2), metric=metric, - **kwargs + **kwargs, ) ) else: @@ -108,8 +119,9 @@ def match_descriptors(descriptors1, descriptors2, metric=None, p=2, distances[indices1, indices2] = cp.inf second_best_indices2 = cp.argmin(distances[indices1], axis=1) second_best_distances = distances[indices1, second_best_indices2] - second_best_distances[second_best_distances == 0] \ - = cp.finfo(cp.float64).eps + second_best_distances[second_best_distances == 0] = cp.finfo( + cp.float64 + ).eps ratio = best_distances / second_best_distances mask = ratio < max_ratio indices1 = indices1[mask] diff --git a/python/cucim/src/cucim/skimage/feature/peak.py b/python/cucim/src/cucim/skimage/feature/peak.py index 389285692..84965b328 100644 --- a/python/cucim/src/cucim/skimage/feature/peak.py +++ b/python/cucim/src/cucim/skimage/feature/peak.py @@ -5,6 +5,7 @@ from scipy.ndimage import find_objects as cpu_find_objects import cucim.skimage._vendored.ndimage as ndi + # from ..filters import rank_order from cucim.skimage import measure @@ -30,8 +31,9 @@ def _get_high_intensity_peaks(image, mask, num_peaks, min_distance, p_norm): else: max_out = None - coord = ensure_spacing(coord, spacing=min_distance, p_norm=p_norm, - max_out=max_out) + coord = ensure_spacing( + coord, spacing=min_distance, p_norm=p_norm, max_out=max_out + ) if len(coord) > num_peaks: coord = coord[:num_peaks] @@ -46,8 +48,7 @@ def _get_peak_mask(image, footprint, threshold, mask=None): if footprint.size == 1 or image.size == 1: return image > threshold - image_max = ndi.maximum_filter(image, footprint=footprint, - mode='nearest') + image_max = ndi.maximum_filter(image, footprint=footprint, mode="nearest") out = image == image_max @@ -65,8 +66,7 @@ def _get_peak_mask(image, footprint, threshold, mask=None): def _exclude_border(label, border_width): - """Set label border values to 0. - """ + """Set label border values to 0.""" # zero out label borders for i, width in enumerate(border_width): if width == 0: @@ -90,9 +90,7 @@ def _get_threshold(image, threshold_abs, threshold_rel): def _get_excluded_border_width(image, min_distance, exclude_border): - """Return border_width values relative to a min_distance if requested. - - """ + """Return border_width values relative to a min_distance if requested.""" if isinstance(exclude_border, bool): border_width = (min_distance if exclude_border else 0,) * image.ndim @@ -104,7 +102,8 @@ def _get_excluded_border_width(image, min_distance, exclude_border): if len(exclude_border) != image.ndim: raise ValueError( "`exclude_border` should have the same length as the " - "dimensionality of the image.") + "dimensionality of the image." + ) for exclude in exclude_border: if not isinstance(exclude, int): raise ValueError( @@ -112,21 +111,29 @@ def _get_excluded_border_width(image, min_distance, exclude_border): "contain ints." ) if exclude < 0: - raise ValueError( - "`exclude_border` can not be a negative value") + raise ValueError("`exclude_border` can not be a negative value") border_width = exclude_border else: raise TypeError( "`exclude_border` must be bool, int, or tuple with the same " - "length as the dimensionality of the image.") + "length as the dimensionality of the image." + ) return border_width -def peak_local_max(image, min_distance=1, threshold_abs=None, - threshold_rel=None, exclude_border=True, - num_peaks=cp.inf, footprint=None, labels=None, - num_peaks_per_label=cp.inf, p_norm=cp.inf): +def peak_local_max( + image, + min_distance=1, + threshold_abs=None, + threshold_rel=None, + exclude_border=True, + num_peaks=cp.inf, + footprint=None, + labels=None, + num_peaks_per_label=cp.inf, + p_norm=cp.inf, +): """Find peaks in an image as coordinate list. Peaks are the local maxima in a region of `2 * min_distance + 1` @@ -235,12 +242,16 @@ def peak_local_max(image, min_distance=1, threshold_abs=None, """ if (footprint is None or footprint.size == 1) and min_distance < 1: - warn("When min_distance < 1, peak_local_max acts as finding " - "image > max(threshold_abs, threshold_rel * max(image)).", - RuntimeWarning, stacklevel=2) + warn( + "When min_distance < 1, peak_local_max acts as finding " + "image > max(threshold_abs, threshold_rel * max(image)).", + RuntimeWarning, + stacklevel=2, + ) - border_width = _get_excluded_border_width(image, min_distance, - exclude_border) + border_width = _get_excluded_border_width( + image, min_distance, exclude_border + ) threshold = _get_threshold(image, threshold_abs, threshold_rel) @@ -255,9 +266,9 @@ def peak_local_max(image, min_distance=1, threshold_abs=None, mask = _exclude_border(mask, border_width) # Select highest intensities (num_peaks) - coordinates = _get_high_intensity_peaks(image, mask, - num_peaks, - min_distance, p_norm) + coordinates = _get_high_intensity_peaks( + image, mask, num_peaks, min_distance, p_norm + ) else: # Backend: casting="safe" not implemented in CuPy @@ -294,10 +305,9 @@ def peak_local_max(image, min_distance=1, threshold_abs=None, mask = _get_peak_mask(img_object, footprint, threshold, label_mask) - coordinates = _get_high_intensity_peaks(img_object, mask, - num_peaks_per_label, - min_distance, - p_norm) + coordinates = _get_high_intensity_peaks( + img_object, mask, num_peaks_per_label, min_distance, p_norm + ) # transform coordinates in global image indices space for idx, s in enumerate(roi): @@ -313,16 +323,16 @@ def peak_local_max(image, min_distance=1, threshold_abs=None, if len(coordinates) > num_peaks: out = cp.zeros_like(image, dtype=bool) out[tuple(coordinates.T)] = True - coordinates = _get_high_intensity_peaks(image, out, - num_peaks, - min_distance, - p_norm) + coordinates = _get_high_intensity_peaks( + image, out, num_peaks, min_distance, p_norm + ) return coordinates -def _prominent_peaks(image, min_xdistance=1, min_ydistance=1, - threshold=None, num_peaks=np.inf): +def _prominent_peaks( + image, min_xdistance=1, min_ydistance=1, threshold=None, num_peaks=np.inf +): """Return peaks with non-maximum suppression. Identifies most prominent features separated by certain distances. @@ -357,11 +367,13 @@ def _prominent_peaks(image, min_xdistance=1, min_ydistance=1, ycoords_size = 2 * min_ydistance + 1 xcoords_size = 2 * min_xdistance + 1 - img_max = ndi.maximum_filter1d(img, size=ycoords_size, axis=0, - mode='constant', cval=0) - img_max = ndi.maximum_filter1d(img_max, size=xcoords_size, axis=1, - mode='constant', cval=0) - mask = (img == img_max) + img_max = ndi.maximum_filter1d( + img, size=ycoords_size, axis=0, mode="constant", cval=0 + ) + img_max = ndi.maximum_filter1d( + img_max, size=xcoords_size, axis=1, mode="constant", cval=0 + ) + mask = img == img_max img *= mask img_t = img > threshold @@ -378,8 +390,9 @@ def _prominent_peaks(image, min_xdistance=1, min_ydistance=1, xcoords_peaks = [] # relative coordinate grid for local neighborhood suppression - ycoords_ext, xcoords_ext = cp.mgrid[-min_ydistance:min_ydistance + 1, - -min_xdistance:min_xdistance + 1] + ycoords_ext, xcoords_ext = cp.mgrid[ + -min_ydistance : min_ydistance + 1, -min_xdistance : min_xdistance + 1 + ] for ycoords_idx, xcoords_idx in coords: accum = img_max[ycoords_idx, xcoords_idx] diff --git a/python/cucim/src/cucim/skimage/feature/template.py b/python/cucim/src/cucim/skimage/feature/template.py index 63f696759..3cad08c90 100644 --- a/python/cucim/src/cucim/skimage/feature/template.py +++ b/python/cucim/src/cucim/skimage/feature/template.py @@ -9,33 +9,37 @@ def _window_sum_2d(image, window_shape): - # TODO: remove copy in line below once the following issue is resolved # https://github.com/cupy/cupy/issues/4456 window_sum = cp.cumsum(image, axis=0) - window_sum = (window_sum[window_shape[0]:-1] - - window_sum[:-window_shape[0] - 1]) + window_sum = ( + window_sum[window_shape[0] : -1] - window_sum[: -window_shape[0] - 1] + ) window_sum = cp.cumsum(window_sum, axis=1) - window_sum = (window_sum[:, window_shape[1]:-1] - - window_sum[:, :-window_shape[1] - 1]) + window_sum = ( + window_sum[:, window_shape[1] : -1] + - window_sum[:, : -window_shape[1] - 1] + ) return window_sum def _window_sum_3d(image, window_shape): - window_sum = _window_sum_2d(image, window_shape) window_sum = cp.cumsum(window_sum, axis=2) - window_sum = (window_sum[:, :, window_shape[2]:-1] - - window_sum[:, :, :-window_shape[2] - 1]) + window_sum = ( + window_sum[:, :, window_shape[2] : -1] + - window_sum[:, :, : -window_shape[2] - 1] + ) return window_sum -def match_template(image, template, pad_input=False, mode='constant', - constant_values=0): +def match_template( + image, template, pad_input=False, mode="constant", constant_values=0 +): """Match a template to a 2-D or 3-D image using normalized correlation. The output is an array with values between -1.0 and 1.0. The value at a @@ -127,8 +131,10 @@ def match_template(image, template, pad_input=False, mode='constant', check_nD(image, (2, 3)) if image.ndim < template.ndim: - raise ValueError("Dimensionality of template must be less than or " - "equal to the dimensionality of image.") + raise ValueError( + "Dimensionality of template must be less than or " + "equal to the dimensionality of image." + ) if any(si < st for si, st in zip(image.shape, template.shape)): raise ValueError("Image must be larger than template.") @@ -141,9 +147,13 @@ def match_template(image, template, pad_input=False, mode='constant', template = template.astype(float_dtype, copy=False) pad_width = tuple((width, width) for width in template.shape) - if mode == 'constant': - image = pad(image, pad_width=pad_width, mode=mode, - constant_values=constant_values) + if mode == "constant": + image = pad( + image, + pad_width=pad_width, + mode=mode, + constant_values=constant_values, + ) else: image = pad(image, pad_width=pad_width, mode=mode) @@ -164,11 +174,13 @@ def match_template(image, template, pad_input=False, mode='constant', template_ssd = cp.sum(template_ssd, dtype=cp.float64) if image.ndim == 2: - xcorr = signal.fftconvolve(image, template[::-1, ::-1], - mode="valid")[1:-1, 1:-1] + xcorr = signal.fftconvolve(image, template[::-1, ::-1], mode="valid")[ + 1:-1, 1:-1 + ] elif image.ndim == 3: - xcorr = signal.fftconvolve(image, template[::-1, ::-1, ::-1], - mode="valid")[1:-1, 1:-1, 1:-1] + xcorr = signal.fftconvolve( + image, template[::-1, ::-1, ::-1], mode="valid" + )[1:-1, 1:-1, 1:-1] numerator = xcorr - image_window_sum * template_mean diff --git a/python/cucim/src/cucim/skimage/feature/tests/test_basic_features.py b/python/cucim/src/cucim/skimage/feature/tests/test_basic_features.py index 9f3efb878..1911e95e0 100644 --- a/python/cucim/src/cucim/skimage/feature/tests/test_basic_features.py +++ b/python/cucim/src/cucim/skimage/feature/tests/test_basic_features.py @@ -5,8 +5,8 @@ from cucim.skimage.feature import multiscale_basic_features -@pytest.mark.parametrize('edges', (False, True)) -@pytest.mark.parametrize('texture', (False, True)) +@pytest.mark.parametrize("edges", (False, True)) +@pytest.mark.parametrize("texture", (False, True)) def test_multiscale_basic_features_gray(edges, texture): img = np.zeros((20, 20)) img[:10] = 1 @@ -22,7 +22,7 @@ def test_multiscale_basic_features_gray(edges, texture): assert features.shape[:-1] == img.shape[:] -@pytest.mark.parametrize('channel_axis', [0, 1, 2, -1, -2]) +@pytest.mark.parametrize("channel_axis", [0, 1, 2, -1, -2]) def test_multiscale_basic_features_channel_axis(channel_axis): num_channels = 5 shape_spatial = (10, 10) @@ -37,8 +37,9 @@ def test_multiscale_basic_features_channel_axis(channel_axis): n_sigmas = 2 # features for all channels are concatenated along the last axis - features = multiscale_basic_features(img, sigma_min=1, sigma_max=2, - channel_axis=channel_axis) + features = multiscale_basic_features( + img, sigma_min=1, sigma_max=2, channel_axis=channel_axis + ) assert features.shape[-1] == 5 * n_sigmas * 4 assert features.shape[:-1] == cp.moveaxis(img, channel_axis, -1).shape[:-1] diff --git a/python/cucim/src/cucim/skimage/feature/tests/test_blob.py b/python/cucim/src/cucim/skimage/feature/tests/test_blob.py index 91683111f..8136775f9 100644 --- a/python/cucim/src/cucim/skimage/feature/tests/test_blob.py +++ b/python/cucim/src/cucim/skimage/feature/tests/test_blob.py @@ -11,9 +11,9 @@ @pytest.mark.parametrize( - 'dtype', [cp.uint8, cp.float16, cp.float32, cp.float64] + "dtype", [cp.uint8, cp.float16, cp.float32, cp.float64] ) -@pytest.mark.parametrize('threshold_type', ['absolute', 'relative']) +@pytest.mark.parametrize("threshold_type", ["absolute", "relative"]) def test_blob_dog(dtype, threshold_type): img = cp.ones((512, 512), dtype=dtype) @@ -26,13 +26,13 @@ def test_blob_dog(dtype, threshold_type): xs, ys = cp.asarray(disk((200, 350), 45)) img[xs, ys] = 255 - if threshold_type == 'absolute': + if threshold_type == "absolute": threshold = 2.0 - if img.dtype.kind != 'f': + if img.dtype.kind != "f": # account for internal scaling to [0, 1] by img_as_float threshold /= img.ptp() threshold_rel = None - elif threshold_type == 'relative': + elif threshold_type == "relative": threshold = None threshold_rel = 0.5 @@ -72,20 +72,20 @@ def radius(x): @pytest.mark.parametrize( - 'dtype', [cp.uint8, cp.float16, cp.float32, cp.float64] + "dtype", [cp.uint8, cp.float16, cp.float32, cp.float64] ) -@pytest.mark.parametrize('threshold_type', ['absolute', 'relative']) +@pytest.mark.parametrize("threshold_type", ["absolute", "relative"]) def test_blob_dog_3d(dtype, threshold_type): # Testing 3D r = 10 pad_width = 10 im3 = cp.asarray(ellipsoid(r, r, r)) - im3 = pad(im3, pad_width, mode='constant') + im3 = pad(im3, pad_width, mode="constant") - if threshold_type == 'absolute': + if threshold_type == "absolute": threshold = 0.001 threshold_rel = 0 - elif threshold_type == 'relative': + elif threshold_type == "relative": threshold = 0 threshold_rel = 0.5 @@ -107,20 +107,20 @@ def test_blob_dog_3d(dtype, threshold_type): @pytest.mark.parametrize( - 'dtype', [cp.uint8, cp.float16, cp.float32, cp.float64] + "dtype", [cp.uint8, cp.float16, cp.float32, cp.float64] ) -@pytest.mark.parametrize('threshold_type', ['absolute', 'relative']) +@pytest.mark.parametrize("threshold_type", ["absolute", "relative"]) def test_blob_dog_3d_anisotropic(dtype, threshold_type): # Testing 3D anisotropic r = 10 pad_width = 10 im3 = cp.asarray(ellipsoid(r / 2, r, r)) - im3 = pad(im3, pad_width, mode='constant') + im3 = pad(im3, pad_width, mode="constant") - if threshold_type == 'absolute': + if threshold_type == "absolute": threshold = 0.001 threshold_rel = None - elif threshold_type == 'relative': + elif threshold_type == "relative": threshold = None threshold_rel = 0.5 @@ -171,9 +171,9 @@ def test_blob_dog_excl_border(): assert blobs.shape[0] == 0, msg -@pytest.mark.parametrize('anisotropic', [False, True]) -@pytest.mark.parametrize('ndim', [1, 2, 3, 4]) -@pytest.mark.parametrize('function_name', ['blob_dog', 'blob_log']) +@pytest.mark.parametrize("anisotropic", [False, True]) +@pytest.mark.parametrize("ndim", [1, 2, 3, 4]) +@pytest.mark.parametrize("function_name", ["blob_dog", "blob_log"]) def test_nd_blob_no_peaks_shape(function_name, ndim, anisotropic): # uniform image so no blobs will be found z = cp.zeros((16,) * ndim, dtype=cp.float32) @@ -189,9 +189,9 @@ def test_nd_blob_no_peaks_shape(function_name, ndim, anisotropic): @pytest.mark.parametrize( - 'dtype', [cp.uint8, cp.float16, cp.float32, cp.float64] + "dtype", [cp.uint8, cp.float16, cp.float32, cp.float64] ) -@pytest.mark.parametrize('threshold_type', ['absolute', 'relative']) +@pytest.mark.parametrize("threshold_type", ["absolute", "relative"]) def test_blob_log(dtype, threshold_type): img = cp.ones((256, 256), dtype=dtype) @@ -207,18 +207,23 @@ def test_blob_log(dtype, threshold_type): xs, ys = cp.asarray(disk((100, 175), 30)) img[xs, ys] = 255 - if threshold_type == 'absolute': + if threshold_type == "absolute": threshold = 1 - if img.dtype.kind != 'f': + if img.dtype.kind != "f": # account for internal scaling to [0, 1] by img_as_float threshold /= img.ptp() threshold_rel = None - elif threshold_type == 'relative': + elif threshold_type == "relative": threshold = None threshold_rel = 0.5 - blobs = blob_log(img, min_sigma=5, max_sigma=20, threshold=threshold, - threshold_rel=threshold_rel) + blobs = blob_log( + img, + min_sigma=5, + max_sigma=20, + threshold=threshold, + threshold_rel=threshold_rel, + ) def radius(x): return math.sqrt(2) * x[2] @@ -253,7 +258,8 @@ def radius(x): max_sigma=20, threshold=threshold, threshold_rel=threshold_rel, - log_scale=True) + log_scale=True, + ) b = s[0] assert abs(b[0] - 200) <= thresh @@ -289,7 +295,7 @@ def test_blob_log_no_warnings(): xs, ys = cp.asarray(disk((7, 6), 2)) img[xs, ys] = 255 - blob_log(img, max_sigma=20, num_sigma=10, threshold=.1) + blob_log(img, max_sigma=20, num_sigma=10, threshold=0.1) def test_blob_log_3d(): @@ -297,7 +303,7 @@ def test_blob_log_3d(): r = 6 pad_width = 10 im3 = cp.asarray(ellipsoid(r, r, r)) - im3 = pad(im3, pad_width, mode='constant') + im3 = pad(im3, pad_width, mode="constant") blobs = blob_log(im3, min_sigma=3, max_sigma=10) b = blobs[0] @@ -314,7 +320,7 @@ def test_blob_log_3d_anisotropic(): r = 6 pad_width = 10 im3 = cp.asarray(ellipsoid(r / 2, r, r)) - im3 = pad(im3, pad_width, mode='constant') + im3 = pad(im3, pad_width, mode="constant") blobs = blob_log( im3, @@ -358,7 +364,7 @@ def test_blob_log_exclude_border(): @pytest.mark.parametrize("dtype", [cp.uint8, cp.float16, cp.float32]) -@pytest.mark.parametrize('threshold_type', ['absolute', 'relative']) +@pytest.mark.parametrize("threshold_type", ["absolute", "relative"]) def test_blob_doh(dtype, threshold_type): img = cp.ones((512, 512), dtype=dtype) @@ -374,16 +380,16 @@ def test_blob_doh(dtype, threshold_type): xs, ys = cp.asarray(disk((200, 350), 50)) img[xs, ys] = 255 - if threshold_type == 'absolute': + if threshold_type == "absolute": # Note: have to either scale up threshold or rescale the image to the # range [0, 1] internally. threshold = 0.05 - if img.dtype.kind == 'f': + if img.dtype.kind == "f": # account for lack of internal scaling to [0, 1] by img_as_float ptp = img.ptp() - threshold *= ptp ** 2 + threshold *= ptp**2 threshold_rel = None - elif threshold_type == 'relative': + elif threshold_type == "relative": threshold = None threshold_rel = 0.5 @@ -393,7 +399,8 @@ def test_blob_doh(dtype, threshold_type): max_sigma=60, num_sigma=10, threshold=threshold, - threshold_rel=threshold_rel) + threshold_rel=threshold_rel, + ) def radius(x): return x[2] @@ -443,7 +450,8 @@ def test_blob_doh_log_scale(): max_sigma=60, num_sigma=10, log_scale=True, - threshold=.05) + threshold=0.05, + ) def radius(x): return x[2] @@ -488,11 +496,7 @@ def test_blob_doh_overlap(): img[xs, ys] = 255 blobs = blob_doh( - img, - min_sigma=1, - max_sigma=60, - num_sigma=10, - threshold=.05 + img, min_sigma=1, max_sigma=60, num_sigma=10, threshold=0.05 ) assert len(blobs) == 1 @@ -502,10 +506,13 @@ def test_blob_log_overlap_3d(): r1, r2 = 7, 6 pad1, pad2 = 11, 12 blob1 = cp.asarray(ellipsoid(r1, r1, r1)) - blob1 = pad(blob1, pad1, mode='constant') + blob1 = pad(blob1, pad1, mode="constant") blob2 = cp.asarray(ellipsoid(r2, r2, r2)) - blob2 = pad(blob2, [(pad2, pad2), (pad2 - 9, pad2 + 9), (pad2, pad2)], - mode='constant') + blob2 = pad( + blob2, + [(pad2, pad2), (pad2 - 9, pad2 + 9), (pad2, pad2)], + mode="constant", + ) im3 = cp.logical_or(blob1, blob2) blobs = blob_log(im3, min_sigma=2, max_sigma=10, overlap=0.1) @@ -517,8 +524,9 @@ def test_blob_log_anisotropic(): image[20, 10:20] = 1 isotropic_blobs = blob_log(image, min_sigma=0.5, max_sigma=2, num_sigma=3) assert len(isotropic_blobs) > 1 # many small blobs found in line - ani_blobs = blob_log(image, min_sigma=[0.5, 5], max_sigma=[2, 20], - num_sigma=3) # 10x anisotropy, line is 1x10 + ani_blobs = blob_log( + image, min_sigma=[0.5, 5], max_sigma=[2, 20], num_sigma=3 + ) # 10x anisotropy, line is 1x10 assert len(ani_blobs) == 1 # single anisotropic blob found diff --git a/python/cucim/src/cucim/skimage/feature/tests/test_canny.py b/python/cucim/src/cucim/skimage/feature/tests/test_canny.py index c7880bb60..80beb4fc8 100644 --- a/python/cucim/src/cucim/skimage/feature/tests/test_canny.py +++ b/python/cucim/src/cucim/skimage/feature/tests/test_canny.py @@ -8,17 +8,19 @@ from cucim.skimage.util import img_as_float -class TestCanny(): +class TestCanny: def test_00_00_zeros(self): """Test that the Canny filter finds no points for a blank field""" - result = feature.canny(cp.zeros((20, 20)), 4, 0, 0, cp.ones((20, 20), - bool)) + result = feature.canny( + cp.zeros((20, 20)), 4, 0, 0, cp.ones((20, 20), bool) + ) assert not cp.any(result) def test_00_01_zeros_mask(self): """Test that the Canny filter finds no points in a masked image""" - result = (feature.canny(cp.random.uniform(size=(20, 20)), 4, 0, 0, - cp.zeros((20, 20), bool))) + result = feature.canny( + cp.random.uniform(size=(20, 20)), 4, 0, 0, cp.zeros((20, 20), bool) + ) assert not cp.any(result) def test_01_01_circle(self): @@ -71,32 +73,42 @@ def test_image_shape(self): feature.canny(cp.zeros((20, 20, 20)), 4, 0, 0) def test_mask_none(self): - result1 = feature.canny(cp.zeros((20, 20)), 4, 0, 0, cp.ones((20, 20), - bool)) + result1 = feature.canny( + cp.zeros((20, 20)), 4, 0, 0, cp.ones((20, 20), bool) + ) result2 = feature.canny(cp.zeros((20, 20)), 4, 0, 0) assert cp.all(result1 == result2) - @pytest.mark.parametrize('image_dtype', [cp.uint8, cp.int32, cp.float32, - cp.float64]) + @pytest.mark.parametrize( + "image_dtype", [cp.uint8, cp.int32, cp.float32, cp.float64] + ) def test_use_quantiles(self, image_dtype): dtype = cp.dtype(image_dtype) image = cp.asarray(data.camera()[::100, ::100]) - if dtype.kind == 'f': + if dtype.kind == "f": image = img_as_float(image) image = image.astype(dtype) # Correct output produced manually with quantiles # of 0.8 and 0.6 for high and low respectively correct_output = cp.asarray( - [[False, False, False, False, False, False], - [False, True, True, True, False, False], # noqa - [False, False, False, True, False, False], # noqa - [False, False, False, True, False, False], # noqa - [False, False, True, True, False, False], # noqa - [False, False, False, False, False, False]]) + [ + [False, False, False, False, False, False], + [False, True, True, True, False, False], # noqa + [False, False, False, True, False, False], # noqa + [False, False, False, True, False, False], # noqa + [False, False, True, True, False, False], # noqa + [False, False, False, False, False, False], + ] + ) - result = feature.canny(image, low_threshold=0.6, high_threshold=0.8, - use_quantiles=True, mode='nearest') + result = feature.canny( + image, + low_threshold=0.6, + high_threshold=0.8, + use_quantiles=True, + mode="nearest", + ) assert_array_equal(result, correct_output) @@ -108,26 +120,34 @@ def test_invalid_use_quantiles(self): image = img_as_float(cp.array(data.camera()[::50, ::50])) with pytest.raises(ValueError): - feature.canny(image, use_quantiles=True, - low_threshold=0.5, high_threshold=3.6) + feature.canny( + image, use_quantiles=True, low_threshold=0.5, high_threshold=3.6 + ) with pytest.raises(ValueError): - feature.canny(image, use_quantiles=True, - low_threshold=-5, high_threshold=0.5) + feature.canny( + image, use_quantiles=True, low_threshold=-5, high_threshold=0.5 + ) with pytest.raises(ValueError): - feature.canny(image, use_quantiles=True, - low_threshold=99, high_threshold=0.9) + feature.canny( + image, use_quantiles=True, low_threshold=99, high_threshold=0.9 + ) with pytest.raises(ValueError): - feature.canny(image, use_quantiles=True, - low_threshold=0.5, high_threshold=-100) + feature.canny( + image, + use_quantiles=True, + low_threshold=0.5, + high_threshold=-100, + ) # Example from issue #4282 image = data.camera() with pytest.raises(ValueError): - feature.canny(image, use_quantiles=True, - low_threshold=50, high_threshold=150) + feature.canny( + image, use_quantiles=True, low_threshold=50, high_threshold=150 + ) def test_dtype(self): """Check that the same output is produced regardless of image dtype.""" @@ -143,23 +163,22 @@ def test_dtype(self): assert_array_equal( feature.canny(image_float, 1.0, low, high), - feature.canny(image_uint8, 1.0, 255 * low, 255 * high) + feature.canny(image_uint8, 1.0, 255 * low, 255 * high), ) def test_full_mask_matches_no_mask(self): - """The masked and unmasked algorithms should return the same result. - - """ + """The masked and unmasked algorithms should return the same result.""" image = cp.array(data.camera()) - for mode in ('constant', 'nearest', 'reflect'): + for mode in ("constant", "nearest", "reflect"): cp.testing.assert_array_equal( feature.canny(image, mode=mode), - feature.canny(image, mode=mode, - mask=cp.ones_like(image, dtype=bool)) + feature.canny( + image, mode=mode, mask=cp.ones_like(image, dtype=bool) + ), ) - @pytest.mark.parametrize('dtype', (cp.int64, cp.uint64)) + @pytest.mark.parametrize("dtype", (cp.int64, cp.uint64)) def test_unsupported_int64(self, dtype): image = cp.zeros((10, 10), dtype=dtype) image[3, 3] = cp.iinfo(dtype).max diff --git a/python/cucim/src/cucim/skimage/feature/tests/test_corner.py b/python/cucim/src/cucim/skimage/feature/tests/test_corner.py index 649af9ad1..3554aadb8 100644 --- a/python/cucim/src/cucim/skimage/feature/tests/test_corner.py +++ b/python/cucim/src/cucim/skimage/feature/tests/test_corner.py @@ -11,13 +11,20 @@ from cucim.skimage._shared.utils import _supported_float_type from cucim.skimage._vendored import pad from cucim.skimage.color import rgb2gray -from cucim.skimage.feature import (corner_foerstner, corner_harris, - corner_kitchen_rosenfeld, corner_peaks, - corner_shi_tomasi, hessian_matrix, - hessian_matrix_det, hessian_matrix_eigvals, - peak_local_max, shape_index, - structure_tensor, - structure_tensor_eigenvalues) +from cucim.skimage.feature import ( + corner_foerstner, + corner_harris, + corner_kitchen_rosenfeld, + corner_peaks, + corner_shi_tomasi, + hessian_matrix, + hessian_matrix_det, + hessian_matrix_eigvals, + peak_local_max, + shape_index, + structure_tensor, + structure_tensor_eigenvalues, +) from cucim.skimage.feature.corner import _symmetric_image @@ -26,15 +33,15 @@ def im3d(): r = 10 pad_width = 10 im3 = draw.ellipsoid(r, r, r) - im3 = np.pad(im3, pad_width, mode='constant').astype(np.uint8) + im3 = np.pad(im3, pad_width, mode="constant").astype(np.uint8) return cp.asarray(im3) -@pytest.mark.parametrize('dtype', [cp.float16, cp.float32, cp.float64]) +@pytest.mark.parametrize("dtype", [cp.float16, cp.float32, cp.float64]) def test_structure_tensor(dtype): square = cp.zeros((5, 5), dtype=dtype) square[2, 2] = 1 - Arr, Arc, Acc = structure_tensor(square, sigma=0.1, order='rc') + Arr, Arc, Acc = structure_tensor(square, sigma=0.1, order="rc") out_dtype = _supported_float_type(dtype) assert all(a.dtype == out_dtype for a in (Arr, Arc, Acc)) # fmt: off @@ -56,7 +63,7 @@ def test_structure_tensor(dtype): # fmt: on -@pytest.mark.parametrize('dtype', [cp.float16, cp.float32, cp.float64]) +@pytest.mark.parametrize("dtype", [cp.float16, cp.float32, cp.float64]) def test_structure_tensor_3d(dtype): cube = cp.zeros((5, 5, 5), dtype=dtype) cube[2, 2, 2] = 1 @@ -85,8 +92,8 @@ def test_structure_tensor_3d(dtype): def test_structure_tensor_3d_rc_only(): cube = cp.zeros((5, 5, 5)) with pytest.raises(ValueError): - structure_tensor(cube, sigma=0.1, order='xy') - A_elems_rc = structure_tensor(cube, sigma=0.1, order='rc') + structure_tensor(cube, sigma=0.1, order="xy") + A_elems_rc = structure_tensor(cube, sigma=0.1, order="rc") A_elems_none = structure_tensor(cube, sigma=0.1) for a_rc, a_none in zip(A_elems_rc, A_elems_none): assert_array_equal(a_rc, a_none) @@ -96,37 +103,38 @@ def test_structure_tensor_orders(): square = cp.zeros((5, 5)) square[2, 2] = 1 A_elems_default = structure_tensor(square, sigma=0.1) - A_elems_xy = structure_tensor(square, sigma=0.1, order='xy') - A_elems_rc = structure_tensor(square, sigma=0.1, order='rc') + A_elems_xy = structure_tensor(square, sigma=0.1, order="xy") + A_elems_rc = structure_tensor(square, sigma=0.1, order="rc") for elem_rc, elem_def in zip(A_elems_rc, A_elems_default): assert_array_equal(elem_rc, elem_def) for elem_xy, elem_def in zip(A_elems_xy, A_elems_default[::-1]): assert_array_equal(elem_xy, elem_def) -@pytest.mark.parametrize('ndim', [2, 3]) +@pytest.mark.parametrize("ndim", [2, 3]) def test_structure_tensor_sigma(ndim): img = cp.zeros((5,) * ndim) img[[2] * ndim] = 1 - A_default = structure_tensor(img, sigma=0.1, order='rc') - A_tuple = structure_tensor(img, sigma=(0.1,) * ndim, order='rc') - A_list = structure_tensor(img, sigma=[0.1] * ndim, order='rc') + A_default = structure_tensor(img, sigma=0.1, order="rc") + A_tuple = structure_tensor(img, sigma=(0.1,) * ndim, order="rc") + A_list = structure_tensor(img, sigma=[0.1] * ndim, order="rc") for elem_tup, elem_def in zip(A_tuple, A_default): assert_array_equal(elem_tup, elem_def) for elem_list, elem_def in zip(A_list, A_default): assert_array_equal(elem_list, elem_def) with pytest.raises(ValueError): - structure_tensor(img, sigma=(0.1,) * (ndim - 1), order='rc') + structure_tensor(img, sigma=(0.1,) * (ndim - 1), order="rc") with pytest.raises(ValueError): - structure_tensor(img, sigma=[0.1] * (ndim + 1), order='rc') + structure_tensor(img, sigma=[0.1] * (ndim + 1), order="rc") -@pytest.mark.parametrize('dtype', [cp.float16, cp.float32, cp.float64]) +@pytest.mark.parametrize("dtype", [cp.float16, cp.float32, cp.float64]) def test_hessian_matrix(dtype): square = cp.zeros((5, 5), dtype=dtype) square[2, 2] = 4 - Hrr, Hrc, Hcc = hessian_matrix(square, sigma=0.1, order="rc", - use_gaussian_derivatives=False) + Hrr, Hrc, Hcc = hessian_matrix( + square, sigma=0.1, order="rc", use_gaussian_derivatives=False + ) out_dtype = _supported_float_type(dtype) assert all(a.dtype == out_dtype for a in (Hrr, Hrc, Hcc)) # fmt: off @@ -155,18 +163,24 @@ def test_hessian_matrix(dtype): hessian_matrix(square, sigma=0.1, order="rc") -@pytest.mark.parametrize('use_gaussian_derivatives', [False, True]) +@pytest.mark.parametrize("use_gaussian_derivatives", [False, True]) def test_hessian_matrix_order(use_gaussian_derivatives): square = cp.zeros((5, 5), dtype=float) square[2, 2] = 4 Hxx, Hxy, Hyy = hessian_matrix( - square, sigma=0.1, order="xy", - use_gaussian_derivatives=use_gaussian_derivatives) + square, + sigma=0.1, + order="xy", + use_gaussian_derivatives=use_gaussian_derivatives, + ) Hrr, Hrc, Hcc = hessian_matrix( - square, sigma=0.1, order="rc", - use_gaussian_derivatives=use_gaussian_derivatives) + square, + sigma=0.1, + order="rc", + use_gaussian_derivatives=use_gaussian_derivatives, + ) # verify results are equivalent, just reversed in order cp.testing.assert_allclose(Hxx, Hcc, atol=1e-30) @@ -177,9 +191,12 @@ def test_hessian_matrix_order(use_gaussian_derivatives): def test_hessian_matrix_3d(): cube = cp.zeros((5, 5, 5)) cube[2, 2, 2] = 4 - Hs = hessian_matrix(cube, sigma=0.1, order='rc', - use_gaussian_derivatives=False) - assert len(Hs) == 6, (f"incorrect number of Hessian images ({len(Hs)}) for 3D") # noqa + Hs = hessian_matrix( + cube, sigma=0.1, order="rc", use_gaussian_derivatives=False + ) + assert ( + len(Hs) == 6 + ), f"incorrect number of Hessian images ({len(Hs)}) for 3D" # noqa # fmt: off assert_array_almost_equal( Hs[2][:, 2, :], cp.asarray([[0, 0, 0, 0, 0], # noqa @@ -190,59 +207,82 @@ def test_hessian_matrix_3d(): # fmt: on -@pytest.mark.parametrize('use_gaussian_derivatives', [False, True]) +@pytest.mark.parametrize("use_gaussian_derivatives", [False, True]) def test_hessian_matrix_3d_xy(use_gaussian_derivatives): - img = cp.ones((5, 5, 5)) # order="xy" is only permitted for 2D with pytest.raises(ValueError): - hessian_matrix(img, sigma=0.1, order="xy", - use_gaussian_derivatives=use_gaussian_derivatives) + hessian_matrix( + img, + sigma=0.1, + order="xy", + use_gaussian_derivatives=use_gaussian_derivatives, + ) with pytest.raises(ValueError): - hessian_matrix(img, sigma=0.1, order='nonexistant', - use_gaussian_derivatives=use_gaussian_derivatives) + hessian_matrix( + img, + sigma=0.1, + order="nonexistent", + use_gaussian_derivatives=use_gaussian_derivatives, + ) -@pytest.mark.parametrize('dtype', [cp.float16, cp.float32, cp.float64]) +@pytest.mark.parametrize("dtype", [cp.float16, cp.float32, cp.float64]) def test_structure_tensor_eigenvalues(dtype): square = cp.zeros((5, 5), dtype=dtype) square[2, 2] = 1 - A_elems = structure_tensor(square, sigma=0.1, order='rc') + A_elems = structure_tensor(square, sigma=0.1, order="rc") l1, l2 = structure_tensor_eigenvalues(A_elems) out_dtype = _supported_float_type(dtype) assert all(a.dtype == out_dtype for a in (l1, l2)) - assert_array_equal(l1, cp.asarray([[0, 0, 0, 0, 0], - [0, 2, 4, 2, 0], - [0, 4, 0, 4, 0], - [0, 2, 4, 2, 0], - [0, 0, 0, 0, 0]])) - assert_array_equal(l2, cp.asarray([[0, 0, 0, 0, 0], - [0, 0, 0, 0, 0], - [0, 0, 0, 0, 0], - [0, 0, 0, 0, 0], - [0, 0, 0, 0, 0]])) + assert_array_equal( + l1, + cp.asarray( + [ + [0, 0, 0, 0, 0], + [0, 2, 4, 2, 0], + [0, 4, 0, 4, 0], + [0, 2, 4, 2, 0], + [0, 0, 0, 0, 0], + ] + ), + ) + assert_array_equal( + l2, + cp.asarray( + [ + [0, 0, 0, 0, 0], + [0, 0, 0, 0, 0], + [0, 0, 0, 0, 0], + [0, 0, 0, 0, 0], + [0, 0, 0, 0, 0], + ] + ), + ) def test_structure_tensor_eigenvalues_3d(): cube9 = cp.ones((9,) * 3, dtype=cp.uint8) cube7 = cp.ones((7,) * 3, dtype=cp.uint8) - image = pad(cube9, 5, mode='constant') * 1000 - boundary = (pad(cube9, 5, mode='constant') - - pad(cube7, 6, mode='constant')).astype(bool) + image = pad(cube9, 5, mode="constant") * 1000 + boundary = ( + pad(cube9, 5, mode="constant") - pad(cube7, 6, mode="constant") + ).astype(bool) A_elems = structure_tensor(image, sigma=0.1) e0, e1, e2 = structure_tensor_eigenvalues(A_elems) # e0 should detect facets assert np.all(e0[boundary] != 0) -@pytest.mark.parametrize('dtype', [cp.float16, cp.float32, cp.float64]) +@pytest.mark.parametrize("dtype", [cp.float16, cp.float32, cp.float64]) def test_hessian_matrix_eigvals(dtype): square = cp.zeros((5, 5), dtype=dtype) square[2, 2] = 4 - H = hessian_matrix(square, sigma=0.1, order='rc', - use_gaussian_derivatives=False) + H = hessian_matrix( + square, sigma=0.1, order="rc", use_gaussian_derivatives=False + ) l1, l2 = hessian_matrix_eigvals(H) out_dtype = _supported_float_type(dtype) assert all(a.dtype == out_dtype for a in (l1, l2)) @@ -260,7 +300,7 @@ def test_hessian_matrix_eigvals(dtype): # fmt: on -@pytest.mark.parametrize('dtype', [cp.float16, cp.float32, cp.float64]) +@pytest.mark.parametrize("dtype", [cp.float16, cp.float32, cp.float64]) def test_hessian_matrix_eigvals_3d(im3d, dtype): im3d = im3d.astype(dtype, copy=False) H = hessian_matrix(im3d, use_gaussian_derivatives=False) @@ -297,10 +337,8 @@ def _reference_eigvals_computation(S_elems): return eigs -@pytest.mark.parametrize( - 'shape', [(64, 64), (512, 1024), (8, 16, 24)] -) -@pytest.mark.parametrize('dtype', [np.float32, np.float64]) +@pytest.mark.parametrize("shape", [(64, 64), (512, 1024), (8, 16, 24)]) +@pytest.mark.parametrize("dtype", [np.float32, np.float64]) def test_custom_eigvals_kernels_vs_linalg_eigvalsh(shape, dtype): rng = cp.random.default_rng(seed=5) img = rng.integers(0, 256, shape) @@ -312,7 +350,7 @@ def test_custom_eigvals_kernels_vs_linalg_eigvalsh(shape, dtype): cp.testing.assert_allclose(evs1, evs2, atol=atol) -@pytest.mark.parametrize('approximate', [False, True]) +@pytest.mark.parametrize("approximate", [False, True]) def test_hessian_matrix_det(approximate): image = cp.zeros((5, 5)) image[2, 2] = 1 @@ -320,10 +358,10 @@ def test_hessian_matrix_det(approximate): assert_array_almost_equal(det, 0, decimal=3) -@pytest.mark.parametrize('approximate', [False, True]) -@pytest.mark.parametrize('ndim', [2, 3]) +@pytest.mark.parametrize("approximate", [False, True]) +@pytest.mark.parametrize("ndim", [2, 3]) @pytest.mark.parametrize( - 'dtype', [cp.uint8, cp.float16, cp.float32, cp.float64] + "dtype", [cp.uint8, cp.float16, cp.float32, cp.float64] ) def test_hessian_matrix_det_vs_skimage(approximate, ndim, dtype): if approximate and ndim != 2: @@ -333,7 +371,7 @@ def test_hessian_matrix_det_vs_skimage(approximate, ndim, dtype): else: sigma = 1.5 rng = cp.random.default_rng(5) - if np.dtype(dtype).kind in 'iu': + if np.dtype(dtype).kind in "iu": image = rng.integers(0, 256, (16,) * ndim, dtype=dtype) else: image = rng.standard_normal((16,) * ndim).astype(dtype=dtype) @@ -347,7 +385,7 @@ def test_hessian_matrix_det_vs_skimage(approximate, ndim, dtype): cp.testing.assert_allclose(det, expected, rtol=tol, atol=tol) -@pytest.mark.parametrize('dtype', [cp.float16, cp.float32, cp.float64]) +@pytest.mark.parametrize("dtype", [cp.float16, cp.float32, cp.float64]) def test_hessian_matrix_det_3d(im3d, dtype): im3d = im3d.astype(dtype, copy=False) D = hessian_matrix_det(im3d) @@ -405,45 +443,48 @@ def test_square_image(): # assert len(results) == 57 # Harris - results = peak_local_max(corner_harris(im, method='k'), - min_distance=10, threshold_rel=0) + results = peak_local_max( + corner_harris(im, method="k"), min_distance=10, threshold_rel=0 + ) # interest at corner assert len(results) == 1 - results = peak_local_max(corner_harris(im, method='eps'), - min_distance=10, threshold_rel=0) + results = peak_local_max( + corner_harris(im, method="eps"), min_distance=10, threshold_rel=0 + ) # interest at corner assert len(results) == 1 # Shi-Tomasi - results = peak_local_max(corner_shi_tomasi(im), - min_distance=10, threshold_rel=0) + results = peak_local_max( + corner_shi_tomasi(im), min_distance=10, threshold_rel=0 + ) # interest at corner assert len(results) == 1 -@pytest.mark.parametrize('dtype', [cp.float16, cp.float32, cp.float64]) +@pytest.mark.parametrize("dtype", [cp.float16, cp.float32, cp.float64]) @pytest.mark.parametrize( - 'func', + "func", [ # corner_moravec, corner_harris, corner_shi_tomasi, corner_kitchen_rosenfeld, - ] + ], ) def test_corner_dtype(dtype, func): im = cp.zeros((50, 50), dtype=dtype) - im[:25, :25] = 1. + im[:25, :25] = 1.0 out_dtype = _supported_float_type(dtype) corners = func(im) assert corners.dtype == out_dtype -@pytest.mark.parametrize('dtype', [cp.float16, cp.float32, cp.float64]) +@pytest.mark.parametrize("dtype", [cp.float16, cp.float32, cp.float64]) def test_corner_foerstner_dtype(dtype): im = cp.zeros((50, 50), dtype=dtype) - im[:25, :25] = 1. + im[:25, :25] = 1.0 out_dtype = _supported_float_type(dtype) assert all(arr.dtype == out_dtype for arr in corner_foerstner(im)) @@ -451,7 +492,7 @@ def test_corner_foerstner_dtype(dtype): def test_noisy_square_image(): im = cp.zeros((50, 50)).astype(float) im[:25, :25] = 1.0 - np.random.seed(seed=1234) # result is specic to this NumPy seed + np.random.seed(seed=1234) # result is specific to this NumPy seed im = im + cp.asarray(np.random.uniform(size=im.shape)) * 0.2 # # Moravec @@ -461,16 +502,19 @@ def test_noisy_square_image(): # assert results.any() # Harris - results = peak_local_max(corner_harris(im, method='k'), - min_distance=10, threshold_rel=0) + results = peak_local_max( + corner_harris(im, method="k"), min_distance=10, threshold_rel=0 + ) assert len(results) == 1 - results = peak_local_max(corner_harris(im, method='eps'), - min_distance=10, threshold_rel=0) + results = peak_local_max( + corner_harris(im, method="eps"), min_distance=10, threshold_rel=0 + ) assert len(results) == 1 # Shi-Tomasi - results = peak_local_max(corner_shi_tomasi(im, sigma=1.5), - min_distance=10, threshold_rel=0) + results = peak_local_max( + corner_shi_tomasi(im, sigma=1.5), min_distance=10, threshold_rel=0 + ) assert len(results) == 1 @@ -482,14 +526,16 @@ def test_squared_dot(): # Moravec fails # Harris - results = peak_local_max(corner_harris(im), - min_distance=10, threshold_rel=0) + results = peak_local_max( + corner_harris(im), min_distance=10, threshold_rel=0 + ) assert (results == cp.asarray([[6, 6]])).all() # Shi-Tomasi - results = peak_local_max(corner_shi_tomasi(im), - min_distance=10, threshold_rel=0) + results = peak_local_max( + corner_shi_tomasi(im), min_distance=10, threshold_rel=0 + ) assert (results == cp.asarray([[6, 6]])).all() @@ -584,8 +630,9 @@ def test_num_peaks(): for i in range(20): n = cp.random.randint(1, 21) - results = peak_local_max(img_corners, - min_distance=10, threshold_rel=0, num_peaks=n) + results = peak_local_max( + img_corners, min_distance=10, threshold_rel=0, num_peaks=n + ) assert results.shape[0] == n @@ -594,19 +641,22 @@ def test_corner_peaks(): response[2:5, 2:5] = 1 response[8:10, 0:2] = 1 - corners = corner_peaks(response, exclude_border=False, min_distance=10, - threshold_rel=0) + corners = corner_peaks( + response, exclude_border=False, min_distance=10, threshold_rel=0 + ) assert corners.shape == (1, 2) - corners = corner_peaks(response, exclude_border=False, min_distance=5, - threshold_rel=0) + corners = corner_peaks( + response, exclude_border=False, min_distance=5, threshold_rel=0 + ) assert corners.shape == (2, 2) corners = corner_peaks(response, exclude_border=False, min_distance=1) assert corners.shape == (5, 2) - corners = corner_peaks(response, exclude_border=False, min_distance=1, - indices=False) + corners = corner_peaks( + response, exclude_border=False, min_distance=1, indices=False + ) assert cp.sum(corners) == 5 diff --git a/python/cucim/src/cucim/skimage/feature/tests/test_daisy.py b/python/cucim/src/cucim/skimage/feature/tests/test_daisy.py index 9b4aa7335..d28e07a0a 100644 --- a/python/cucim/src/cucim/skimage/feature/tests/test_daisy.py +++ b/python/cucim/src/cucim/skimage/feature/tests/test_daisy.py @@ -95,7 +95,7 @@ def test_daisy_normalization(): for i in range(descs.shape[0]): for j in range(descs.shape[1]): for k in range(0, desc_dims, orientations): - dtmp = descs[i, j, k:k + orientations] + dtmp = descs[i, j, k : k + orientations] assert_array_almost_equal(cp.sqrt(cp.sum(dtmp * dtmp)), 1) img = cp.zeros((50, 50)) diff --git a/python/cucim/src/cucim/skimage/feature/tests/test_match.py b/python/cucim/src/cucim/skimage/feature/tests/test_match.py index 8bee76b53..2ce8b21ec 100644 --- a/python/cucim/src/cucim/skimage/feature/tests/test_match.py +++ b/python/cucim/src/cucim/skimage/feature/tests/test_match.py @@ -3,6 +3,7 @@ import cupy as cp from cupy.testing import assert_array_equal from skimage import data + # TODO: change to cucim.skimage.feature.BRIEF once implemented from skimage.feature import BRIEF @@ -14,19 +15,23 @@ def test_binary_descriptors_unequal_descriptor_sizes_error(): """Sizes of descriptors of keypoints to be matched should be equal.""" + # fmt: off descs1 = cp.array([[True, True, False, True], [False, True, False, True]]) descs2 = cp.array([[True, False, False, True, False], [False, True, True, True, False]]) + # fmt: on with testing.raises(ValueError): match_descriptors(descs1, descs2) def test_binary_descriptors(): + # fmt: off descs1 = cp.array([[True, True, False, True, True], [False, True, False, True, True]]) descs2 = cp.array([[True, False, False, True, False], [False, False, True, True, True]]) + # fmt: on matches = match_descriptors(descs1, descs2) assert_array_equal(matches, [[0, 0], [1, 1]]) @@ -44,37 +49,46 @@ def test_binary_descriptors_rotation_crosscheck_false(): extractor = BRIEF(descriptor_size=512) - keypoints1 = corner_peaks(corner_harris(img), min_distance=5, - threshold_abs=0, threshold_rel=0.1) + keypoints1 = corner_peaks( + corner_harris(img), min_distance=5, threshold_abs=0, threshold_rel=0.1 + ) extractor.extract(cp.asnumpy(img), cp.asnumpy(keypoints1)) descriptors1 = cp.array(extractor.descriptors) - keypoints2 = corner_peaks(corner_harris(rotated_img), min_distance=5, - threshold_abs=0, threshold_rel=0.1) + keypoints2 = corner_peaks( + corner_harris(rotated_img), + min_distance=5, + threshold_abs=0, + threshold_rel=0.1, + ) extractor.extract(cp.asnumpy(rotated_img), cp.asnumpy(keypoints2)) descriptors2 = cp.array(extractor.descriptors) matches = match_descriptors(descriptors1, descriptors2, cross_check=False) exp_matches1 = cp.arange(47) + # fmt: off exp_matches2 = cp.array([0, 2, 1, 3, 4, 5, 7, 8, 14, 9, 11, 13, 23, 15, 16, 22, 17, 19, 37, 18, 24, 27, 30, 25, 26, 32, 28, 35, 37, 42, 29, 38, 33, 40, 36, 39, 10, 36, 43, 15, 35, 41, 6, 37, 32, 24, 8]) + # fmt: on assert_array_equal(matches[:, 0], exp_matches1) assert_array_equal(matches[:, 1], exp_matches2) # minkowski takes a different code path, therefore we test it explicitly - matches = match_descriptors(descriptors1, descriptors2, - metric='minkowski', cross_check=False) + matches = match_descriptors( + descriptors1, descriptors2, metric="minkowski", cross_check=False + ) assert_array_equal(matches[:, 0], exp_matches1) assert_array_equal(matches[:, 1], exp_matches2) # it also has an extra parameter - matches = match_descriptors(descriptors1, descriptors2, - metric='minkowski', p=4, cross_check=False) + matches = match_descriptors( + descriptors1, descriptors2, metric="minkowski", p=4, cross_check=False + ) assert_array_equal(matches[:, 0], exp_matches1) assert_array_equal(matches[:, 1], exp_matches2) @@ -92,18 +106,24 @@ def test_binary_descriptors_rotation_crosscheck_true(): extractor = BRIEF(descriptor_size=512) - keypoints1 = corner_peaks(corner_harris(img), min_distance=5, - threshold_abs=0, threshold_rel=0.1) + keypoints1 = corner_peaks( + corner_harris(img), min_distance=5, threshold_abs=0, threshold_rel=0.1 + ) extractor.extract(cp.asnumpy(img), cp.asnumpy(keypoints1)) descriptors1 = cp.array(extractor.descriptors) - keypoints2 = corner_peaks(corner_harris(rotated_img), min_distance=5, - threshold_abs=0, threshold_rel=0.1) + keypoints2 = corner_peaks( + corner_harris(rotated_img), + min_distance=5, + threshold_abs=0, + threshold_rel=0.1, + ) extractor.extract(cp.asnumpy(rotated_img), cp.asnumpy(keypoints2)) descriptors2 = cp.array(extractor.descriptors) matches = match_descriptors(descriptors1, descriptors2, cross_check=True) + # fmt: off exp_matches1 = cp.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 19, 20, 21, 22, 23, 24, 26, 27, 28, 29, 30, 31, 32, 33, @@ -112,6 +132,7 @@ def test_binary_descriptors_rotation_crosscheck_true(): 23, 15, 16, 22, 17, 19, 18, 24, 27, 30, 25, 26, 28, 35, 37, 42, 29, 38, 33, 40, 36, 43, 41, 6]) + # fmt: on assert_array_equal(matches[:, 0], exp_matches1) assert_array_equal(matches[:, 1], exp_matches2) @@ -122,23 +143,32 @@ def test_max_distance(): descs1[0, :] = 1 - matches = match_descriptors(descs1, descs2, metric='euclidean', - max_distance=0.1, cross_check=False) + matches = match_descriptors( + descs1, descs2, metric="euclidean", max_distance=0.1, cross_check=False + ) assert len(matches) == 9 - matches = match_descriptors(descs1, descs2, metric='euclidean', - max_distance=math.sqrt(128.1), - cross_check=False) + matches = match_descriptors( + descs1, + descs2, + metric="euclidean", + max_distance=math.sqrt(128.1), + cross_check=False, + ) assert len(matches) == 10 - matches = match_descriptors(descs1, descs2, metric='euclidean', - max_distance=0.1, - cross_check=True) + matches = match_descriptors( + descs1, descs2, metric="euclidean", max_distance=0.1, cross_check=True + ) assert_array_equal(matches, [[1, 0]]) - matches = match_descriptors(descs1, descs2, metric='euclidean', - max_distance=math.sqrt(128.1), - cross_check=True) + matches = match_descriptors( + descs1, + descs2, + metric="euclidean", + max_distance=math.sqrt(128.1), + cross_check=True, + ) assert_array_equal(matches, [[1, 0]]) @@ -148,40 +178,48 @@ def test_max_ratio(): descs2[0] = 5.0 - matches = match_descriptors(descs1, descs2, metric='euclidean', - max_ratio=1.0, cross_check=False) + matches = match_descriptors( + descs1, descs2, metric="euclidean", max_ratio=1.0, cross_check=False + ) assert_array_equal(len(matches), 10) - matches = match_descriptors(descs1, descs2, metric='euclidean', - max_ratio=0.6, cross_check=False) + matches = match_descriptors( + descs1, descs2, metric="euclidean", max_ratio=0.6, cross_check=False + ) assert_array_equal(len(matches), 10) - matches = match_descriptors(descs1, descs2, metric='euclidean', - max_ratio=0.5, cross_check=False) + matches = match_descriptors( + descs1, descs2, metric="euclidean", max_ratio=0.5, cross_check=False + ) assert_array_equal(len(matches), 9) descs1[0] = 7.5 - matches = match_descriptors(descs1, descs2, metric='euclidean', - max_ratio=0.5, cross_check=False) + matches = match_descriptors( + descs1, descs2, metric="euclidean", max_ratio=0.5, cross_check=False + ) assert_array_equal(len(matches), 9) descs2 = 10 * cp.arange(1)[:, None].astype(cp.float32) - matches = match_descriptors(descs1, descs2, metric='euclidean', - max_ratio=1.0, cross_check=False) + matches = match_descriptors( + descs1, descs2, metric="euclidean", max_ratio=1.0, cross_check=False + ) assert_array_equal(len(matches), 10) - matches = match_descriptors(descs1, descs2, metric='euclidean', - max_ratio=0.5, cross_check=False) + matches = match_descriptors( + descs1, descs2, metric="euclidean", max_ratio=0.5, cross_check=False + ) assert_array_equal(len(matches), 10) descs1 = 10 * cp.arange(1)[:, None].astype(cp.float32) - matches = match_descriptors(descs1, descs2, metric='euclidean', - max_ratio=1.0, cross_check=False) + matches = match_descriptors( + descs1, descs2, metric="euclidean", max_ratio=1.0, cross_check=False + ) assert_array_equal(len(matches), 1) - matches = match_descriptors(descs1, descs2, metric='euclidean', - max_ratio=0.5, cross_check=False) + matches = match_descriptors( + descs1, descs2, metric="euclidean", max_ratio=0.5, cross_check=False + ) assert_array_equal(len(matches), 1) diff --git a/python/cucim/src/cucim/skimage/feature/tests/test_peak.py b/python/cucim/src/cucim/skimage/feature/tests/test_peak.py index cda636b39..58b5e490d 100644 --- a/python/cucim/src/cucim/skimage/feature/tests/test_peak.py +++ b/python/cucim/src/cucim/skimage/feature/tests/test_peak.py @@ -130,20 +130,32 @@ def test_num_peaks_tot_vs_labels_4quadrants(self): image = cp.asarray(np.random.uniform(size=(20, 30))) i, j = cp.mgrid[0:20, 0:30] labels = 1 + (i >= 10) + (j >= 15) * 2 - result = peak.peak_local_max(image, labels=labels, - min_distance=1, threshold_rel=0, - num_peaks=cp.inf, - num_peaks_per_label=2) + result = peak.peak_local_max( + image, + labels=labels, + min_distance=1, + threshold_rel=0, + num_peaks=cp.inf, + num_peaks_per_label=2, + ) assert len(result) == 8 - result = peak.peak_local_max(image, labels=labels, - min_distance=1, threshold_rel=0, - num_peaks=cp.inf, - num_peaks_per_label=1) + result = peak.peak_local_max( + image, + labels=labels, + min_distance=1, + threshold_rel=0, + num_peaks=cp.inf, + num_peaks_per_label=1, + ) assert len(result) == 4 - result = peak.peak_local_max(image, labels=labels, - min_distance=1, threshold_rel=0, - num_peaks=2, - num_peaks_per_label=2) + result = peak.peak_local_max( + image, + labels=labels, + min_distance=1, + threshold_rel=0, + num_peaks=2, + num_peaks_per_label=2, + ) assert len(result) == 2 def test_num_peaks3D(self): @@ -168,9 +180,14 @@ def test_reorder_labels(self): image[imin:imax, jmin:jmax], footprint=footprint ) expected = expected == image - peak_idx = peak.peak_local_max(image, labels=labels, min_distance=1, - threshold_rel=0, footprint=footprint, - exclude_border=False) + peak_idx = peak.peak_local_max( + image, + labels=labels, + min_distance=1, + threshold_rel=0, + footprint=footprint, + exclude_border=False, + ) result = cp.zeros_like(expected, dtype=bool) result[tuple(peak_idx.T)] = True assert (result == expected).all() @@ -189,9 +206,14 @@ def test_indices_with_labels(self): ) expected = cp.stack(cp.nonzero(expected == image), axis=-1) expected = expected[cp.argsort(image[tuple(expected.T)])[::-1]] - result = peak.peak_local_max(image, labels=labels, min_distance=1, - threshold_rel=0, footprint=footprint, - exclude_border=False) + result = peak.peak_local_max( + image, + labels=labels, + min_distance=1, + threshold_rel=0, + footprint=footprint, + exclude_border=False, + ) result = result[cp.argsort(image[tuple(result.T)])[::-1]] assert (result == expected).all() @@ -201,24 +223,23 @@ def test_ndarray_exclude_border(self): nd_image[3, 0, 0] = 1 nd_image[2, 2, 2] = 1 expected = cp.array([[2, 2, 2]], dtype=int) - expectedNoBorder = cp.array([[0, 0, 1], [2, 2, 2], [3, 0, 0]], - dtype=int) - result = peak.peak_local_max(nd_image, min_distance=2, - exclude_border=2) + expectedNoBorder = cp.array( + [[0, 0, 1], [2, 2, 2], [3, 0, 0]], dtype=int + ) + result = peak.peak_local_max(nd_image, min_distance=2, exclude_border=2) assert_array_equal(result, expected) # Check that bools work as expected assert_array_equal( peak.peak_local_max(nd_image, min_distance=2, exclude_border=2), - peak.peak_local_max(nd_image, min_distance=2, exclude_border=True) + peak.peak_local_max(nd_image, min_distance=2, exclude_border=True), ) assert_array_equal( peak.peak_local_max(nd_image, min_distance=2, exclude_border=0), - peak.peak_local_max(nd_image, min_distance=2, exclude_border=False) + peak.peak_local_max(nd_image, min_distance=2, exclude_border=False), ) # Check both versions with no border - result = peak.peak_local_max(nd_image, min_distance=2, - exclude_border=0) + result = peak.peak_local_max(nd_image, min_distance=2, exclude_border=0) assert_array_equal(result, expectedNoBorder) peak_idx = peak.peak_local_max(nd_image, exclude_border=False) result = cp.zeros_like(nd_image, dtype=bool) @@ -228,18 +249,25 @@ def test_ndarray_exclude_border(self): def test_empty(self): image = cp.zeros((10, 20)) labels = cp.zeros((10, 20), int) - result = peak.peak_local_max(image, labels=labels, - footprint=cp.ones((3, 3), bool), - min_distance=1, threshold_rel=0, - exclude_border=False) + result = peak.peak_local_max( + image, + labels=labels, + footprint=cp.ones((3, 3), bool), + min_distance=1, + threshold_rel=0, + exclude_border=False, + ) assert result.shape == (0, image.ndim) def test_empty_non2d_indices(self): image = cp.zeros((10, 10, 10)) - result = peak.peak_local_max(image, - footprint=cp.ones((3, 3, 3), bool), - min_distance=1, threshold_rel=0, - exclude_border=False) + result = peak.peak_local_max( + image, + footprint=cp.ones((3, 3, 3), bool), + min_distance=1, + threshold_rel=0, + exclude_border=False, + ) assert result.shape == (0, image.ndim) def test_one_point(self): @@ -247,10 +275,14 @@ def test_one_point(self): labels = cp.zeros((10, 20), int) image[5, 5] = 1 labels[5, 5] = 1 - peak_idx = peak.peak_local_max(image, labels=labels, - footprint=cp.ones((3, 3), bool), - min_distance=1, threshold_rel=0, - exclude_border=False) + peak_idx = peak.peak_local_max( + image, + labels=labels, + footprint=cp.ones((3, 3), bool), + min_distance=1, + threshold_rel=0, + exclude_border=False, + ) result = np.zeros_like(image, dtype=bool) result[tuple(peak_idx.T)] = True assert cp.all(result == (labels == 1)) @@ -261,10 +293,14 @@ def test_adjacent_and_same(self): image[5, 5:6] = 1 labels[5, 5:6] = 1 expected = np.stack(np.where(labels == 1), axis=-1) - result = peak.peak_local_max(image, labels=labels, - footprint=cp.ones((3, 3), bool), - min_distance=1, threshold_rel=0, - exclude_border=False) + result = peak.peak_local_max( + image, + labels=labels, + footprint=cp.ones((3, 3), bool), + min_distance=1, + threshold_rel=0, + exclude_border=False, + ) assert_array_equal(result, expected) def test_adjacent_and_different(self): @@ -275,14 +311,22 @@ def test_adjacent_and_different(self): labels[5, 5:6] = 1 expected = image == 1 expected = cp.stack(cp.where(image == 1), axis=-1) - result = peak.peak_local_max(image, labels=labels, - footprint=cp.ones((3, 3), bool), - min_distance=1, threshold_rel=0, - exclude_border=False) + result = peak.peak_local_max( + image, + labels=labels, + footprint=cp.ones((3, 3), bool), + min_distance=1, + threshold_rel=0, + exclude_border=False, + ) assert_array_equal(result, expected) - result = peak.peak_local_max(image, labels=labels, - min_distance=1, threshold_rel=0, - exclude_border=False) + result = peak.peak_local_max( + image, + labels=labels, + min_distance=1, + threshold_rel=0, + exclude_border=False, + ) assert_array_equal(result, expected) def test_not_adjacent_and_different(self): @@ -292,10 +336,14 @@ def test_not_adjacent_and_different(self): image[5, 8] = 0.5 labels[image > 0] = 1 expected = cp.stack(cp.where(labels == 1), axis=-1) - result = peak.peak_local_max(image, labels=labels, - footprint=cp.ones((3, 3), bool), - min_distance=1, threshold_rel=0, - exclude_border=False) + result = peak.peak_local_max( + image, + labels=labels, + footprint=cp.ones((3, 3), bool), + min_distance=1, + threshold_rel=0, + exclude_border=False, + ) assert_array_equal(result, expected) def test_two_objects(self): @@ -306,10 +354,14 @@ def test_two_objects(self): labels[5, 5] = 1 labels[5, 15] = 2 expected = cp.stack(cp.where(labels > 0), axis=-1) - result = peak.peak_local_max(image, labels=labels, - footprint=cp.ones((3, 3), bool), - min_distance=1, threshold_rel=0, - exclude_border=False) + result = peak.peak_local_max( + image, + labels=labels, + footprint=cp.ones((3, 3), bool), + min_distance=1, + threshold_rel=0, + exclude_border=False, + ) assert_array_equal(result, expected) def test_adjacent_different_objects(self): @@ -320,10 +372,14 @@ def test_adjacent_different_objects(self): labels[5, 5] = 1 labels[5, 6] = 2 expected = cp.stack(cp.where(labels > 0), axis=-1) - result = peak.peak_local_max(image, labels=labels, - footprint=cp.ones((3, 3), bool), - min_distance=1, threshold_rel=0, - exclude_border=False) + result = peak.peak_local_max( + image, + labels=labels, + footprint=cp.ones((3, 3), bool), + min_distance=1, + threshold_rel=0, + exclude_border=False, + ) assert_array_equal(result, expected) def test_four_quadrants(self): @@ -339,11 +395,14 @@ def test_four_quadrants(self): image[imin:imax, jmin:jmax], footprint=footprint ) expected = expected == image - peak_idx = peak.peak_local_max(image, labels=labels, - footprint=footprint, - min_distance=1, - threshold_rel=0, - exclude_border=False) + peak_idx = peak.peak_local_max( + image, + labels=labels, + footprint=footprint, + min_distance=1, + threshold_rel=0, + exclude_border=False, + ) result = cp.zeros_like(image, dtype=bool) result[tuple(peak_idx.T)] = True assert cp.all(result == expected) @@ -354,17 +413,21 @@ def test_disk(self): """ image = cp.asarray(np.random.uniform(size=(10, 20))) footprint = cp.asarray([[1]]) - peak_idx = peak.peak_local_max(image, labels=cp.ones((10, 20), int), - footprint=footprint, - min_distance=1, threshold_rel=0, - threshold_abs=-1, - exclude_border=False) + peak_idx = peak.peak_local_max( + image, + labels=cp.ones((10, 20), int), + footprint=footprint, + min_distance=1, + threshold_rel=0, + threshold_abs=-1, + exclude_border=False, + ) result = cp.zeros_like(image, dtype=bool) result[tuple(peak_idx.T)] = True assert cp.all(result) - peak_idx = peak.peak_local_max(image, footprint=footprint, - threshold_abs=-1, - exclude_border=False) + peak_idx = peak.peak_local_max( + image, footprint=footprint, threshold_abs=-1, exclude_border=False + ) result = cp.zeros_like(image, dtype=bool) result[tuple(peak_idx.T)] = True assert cp.all(result) @@ -381,13 +444,14 @@ def test_3D(self): peak.peak_local_max(image, min_distance=6, threshold_rel=0), [[15, 15, 15]], ) - assert sorted(peak.peak_local_max(image, min_distance=10, - threshold_rel=0, - exclude_border=False).tolist()) == \ - [[5, 5, 5], [15, 15, 15]] - assert sorted(peak.peak_local_max(image, min_distance=5, - threshold_rel=0).tolist()) == \ - [[5, 5, 5], [15, 15, 15]] + assert sorted( + peak.peak_local_max( + image, min_distance=10, threshold_rel=0, exclude_border=False + ).tolist() + ) == [[5, 5, 5], [15, 15, 15]] + assert sorted( + peak.peak_local_max(image, min_distance=5, threshold_rel=0).tolist() + ) == [[5, 5, 5], [15, 15, 15]] def test_4D(self): image = cp.zeros((30, 30, 30, 30)) @@ -421,8 +485,10 @@ def test_threshold_rel_default(self): image[2, 2] = 0 with expected_warnings(["When min_distance < 1"]): - assert len(peak.peak_local_max(image, - min_distance=0)) == image.size - 1 + assert ( + len(peak.peak_local_max(image, min_distance=0)) + == image.size - 1 + ) def test_peak_at_border(self): image = cp.full((10, 10), -2) @@ -459,8 +525,10 @@ def test_exclude_border(indices): expected_peaks = 0 else: expected_peaks = 1 - assert len(peak.peak_local_max( - image, min_distance=1, exclude_border=True)) == expected_peaks + assert ( + len(peak.peak_local_max(image, min_distance=1, exclude_border=True)) + == expected_peaks + ) # exclude_border = (1, 0) means it will be found unless it's on the edge of # the first dimension. @@ -468,8 +536,9 @@ def test_exclude_border(indices): expected_peaks = 0 else: expected_peaks = 1 - assert len(peak.peak_local_max( - image, exclude_border=(1, 0))) == expected_peaks + assert ( + len(peak.peak_local_max(image, exclude_border=(1, 0))) == expected_peaks + ) # exclude_border = (0, 1) means it will be found unless it's on the edge of # the second dimension. @@ -477,8 +546,9 @@ def test_exclude_border(indices): expected_peaks = 0 else: expected_peaks = 1 - assert len(peak.peak_local_max( - image, exclude_border=(0, 1))) == expected_peaks + assert ( + len(peak.peak_local_max(image, exclude_border=(0, 1))) == expected_peaks + ) def test_exclude_border_errors(): @@ -495,7 +565,7 @@ def test_exclude_border_errors(): # exclude_border is a tuple of the right cardinality but contains # non-integer values. with pytest.raises(ValueError): - assert peak.peak_local_max(image, exclude_border=(1, 'a')) + assert peak.peak_local_max(image, exclude_border=(1, "a")) # exclude_border is a tuple of the right cardinality but contains a # negative value. @@ -523,7 +593,7 @@ def test_input_values_with_labels(): assert_array_equal(img, img_before) -class TestProminentPeaks(): +class TestProminentPeaks: def test_isolated_peaks(self): image = cp.zeros((15, 15)) x0, y0, i0 = (12, 8, 1) @@ -578,10 +648,14 @@ def test_input_labels_unmodified(self): image[5, 5] = 1 labels[5, 5] = 3 labelsin = labels.copy() - peak.peak_local_max(image, labels=labels, - footprint=cp.ones((3, 3), bool), - min_distance=1, threshold_rel=0, - exclude_border=False) + peak.peak_local_max( + image, + labels=labels, + footprint=cp.ones((3, 3), bool), + min_distance=1, + threshold_rel=0, + exclude_border=False, + ) assert cp.all(labels == labelsin) def test_many_objects(self): @@ -589,12 +663,13 @@ def test_many_objects(self): x, y = np.indices((500, 500)) x_c = x // 20 * 20 + 10 y_c = y // 20 * 20 + 10 - mask[(x - x_c) ** 2 + (y - y_c) ** 2 < 8 ** 2] = True + mask[(x - x_c) ** 2 + (y - y_c) ** 2 < 8**2] = True labels, num_objs = ndimage_cpu.label(mask) dist = ndimage_cpu.distance_transform_edt(mask) dist = cp.asarray(dist) labels = cp.asarray(labels) - local_max = peak.peak_local_max(dist, min_distance=20, - exclude_border=False, labels=labels) + local_max = peak.peak_local_max( + dist, min_distance=20, exclude_border=False, labels=labels + ) assert len(local_max) == 625 diff --git a/python/cucim/src/cucim/skimage/feature/tests/test_template.py b/python/cucim/src/cucim/skimage/feature/tests/test_template.py index 4b9050927..186b829e3 100644 --- a/python/cucim/src/cucim/skimage/feature/tests/test_template.py +++ b/python/cucim/src/cucim/skimage/feature/tests/test_template.py @@ -10,7 +10,7 @@ from cucim.skimage.morphology import diamond -@pytest.mark.parametrize('dtype', [cp.float32, cp.float64]) +@pytest.mark.parametrize("dtype", [cp.float32, cp.float64]) def test_template(dtype): size = 100 # Float prefactors ensure that image range is between 0 and 1 @@ -19,7 +19,7 @@ def test_template(dtype): target = target.astype(dtype, copy=False) target_positions = [(50, 50), (200, 200)] for x, y in target_positions: - image[x:x + size, y:y + size] = target + image[x : x + size, y : y + size] = target np.random.seed(1) image += 0.1 * np.random.uniform(size=(400, 400)).astype(dtype, copy=False) image = cp.asarray(image) @@ -56,12 +56,12 @@ def test_normalization(): ipos, jpos = (2, 3) ineg, jneg = (12, 11) image = cp.full((N, N), 0.5) - image[ipos:ipos + n, jpos:jpos + n] = 1 - image[ineg:ineg + n, jneg:jneg + n] = 0 + image[ipos : ipos + n, jpos : jpos + n] = 1 + image[ineg : ineg + n, jneg : jneg + n] = 0 # white square with a black border template = cp.zeros((n + 2, n + 2)) - template[1:1 + n, 1:1 + n] = 1 + template[1 : 1 + n, 1 : 1 + n] = 1 result = match_template(image, template) @@ -117,12 +117,13 @@ def test_pad_input(): image = 0.5 * cp.ones((9, 19)) mid = slice(2, 7) image[mid, :3] -= template[:, -3:] # half min template centered at 0 - image[mid, 4:9] += template # full max template centered at 6 - image[mid, -9:-4] -= template # full min template centered at 12 + image[mid, 4:9] += template # full max template centered at 6 + image[mid, -9:-4] -= template # full min template centered at 12 image[mid, -3:] += template[:, :3] # half max template centered at 18 - result = match_template(image, template, pad_input=True, - constant_values=float(image.mean())) + result = match_template( + image, template, pad_input=True, constant_values=float(image.mean()) + ) # get the max and min results. sorted_result = cp.argsort(result.ravel()) diff --git a/python/cucim/src/cucim/skimage/filters/__init__.pyi b/python/cucim/src/cucim/skimage/filters/__init__.pyi index c7703e933..185f24eb3 100644 --- a/python/cucim/src/cucim/skimage/filters/__init__.pyi +++ b/python/cucim/src/cucim/skimage/filters/__init__.pyi @@ -1,51 +1,51 @@ __all__ = [ -'LPIFilter2D', -'apply_hysteresis_threshold', -'butterworth', -'correlate_sparse', -'difference_of_gaussians', -'farid', -'farid_h', -'farid_v', -'filter_forward', -'filter_inverse', -'frangi', -'gabor', -'gabor_kernel', -'gaussian', -'hessian', -'laplace', -'median', -'meijering', -'prewitt', -'prewitt_h', -'prewitt_v', -'rank_order', -'roberts', -'roberts_neg_diag', -'roberts_pos_diag', -'sato', -'scharr', -'scharr_h', -'scharr_v', -'sobel', -'sobel_h', -'sobel_v', -'threshold_isodata', -'threshold_li', -'threshold_local', -'threshold_mean', -'threshold_minimum', -'threshold_multiotsu', -'threshold_niblack', -'threshold_otsu', -'threshold_sauvola', -'threshold_triangle', -'threshold_yen', -'try_all_threshold', -'unsharp_mask', -'wiener', -'window' + "LPIFilter2D", + "apply_hysteresis_threshold", + "butterworth", + "correlate_sparse", + "difference_of_gaussians", + "farid", + "farid_h", + "farid_v", + "filter_forward", + "filter_inverse", + "frangi", + "gabor", + "gabor_kernel", + "gaussian", + "hessian", + "laplace", + "median", + "meijering", + "prewitt", + "prewitt_h", + "prewitt_v", + "rank_order", + "roberts", + "roberts_neg_diag", + "roberts_pos_diag", + "sato", + "scharr", + "scharr_h", + "scharr_v", + "sobel", + "sobel_h", + "sobel_v", + "threshold_isodata", + "threshold_li", + "threshold_local", + "threshold_mean", + "threshold_minimum", + "threshold_multiotsu", + "threshold_niblack", + "threshold_otsu", + "threshold_sauvola", + "threshold_triangle", + "threshold_yen", + "try_all_threshold", + "unsharp_mask", + "wiener", + "window", ] from ._fft_based import butterworth @@ -56,13 +56,38 @@ from ._rank_order import rank_order from ._sparse import correlate_sparse from ._unsharp_mask import unsharp_mask from ._window import window -from .edges import (farid, farid_h, farid_v, laplace, prewitt, prewitt_h, - prewitt_v, roberts, roberts_neg_diag, roberts_pos_diag, - scharr, scharr_h, scharr_v, sobel, sobel_h, sobel_v) +from .edges import ( + farid, + farid_h, + farid_v, + laplace, + prewitt, + prewitt_h, + prewitt_v, + roberts, + roberts_neg_diag, + roberts_pos_diag, + scharr, + scharr_h, + scharr_v, + sobel, + sobel_h, + sobel_v, +) from .lpi_filter import LPIFilter2D, filter_forward, filter_inverse, wiener from .ridges import frangi, hessian, meijering, sato -from .thresholding import (apply_hysteresis_threshold, threshold_isodata, - threshold_li, threshold_local, threshold_mean, - threshold_minimum, threshold_multiotsu, - threshold_niblack, threshold_otsu, threshold_sauvola, - threshold_triangle, threshold_yen, try_all_threshold) +from .thresholding import ( + apply_hysteresis_threshold, + threshold_isodata, + threshold_li, + threshold_local, + threshold_mean, + threshold_minimum, + threshold_multiotsu, + threshold_niblack, + threshold_otsu, + threshold_sauvola, + threshold_triangle, + threshold_yen, + try_all_threshold, +) diff --git a/python/cucim/src/cucim/skimage/filters/_fft_based.py b/python/cucim/src/cucim/skimage/filters/_fft_based.py index 9a68ea7b1..8b3d0897e 100644 --- a/python/cucim/src/cucim/skimage/filters/_fft_based.py +++ b/python/cucim/src/cucim/skimage/filters/_fft_based.py @@ -8,8 +8,15 @@ from .._vendored import pad -def _get_nd_butterworth_filter(shape, factor, order, high_pass, real, - dtype=cp.float64, squared_butterworth=True): +def _get_nd_butterworth_filter( + shape, + factor, + order, + high_pass, + real, + dtype=cp.float64, + squared_butterworth=True, +): """Create a N-dimensional Butterworth mask for an FFT Parameters @@ -38,7 +45,7 @@ def _get_nd_butterworth_filter(shape, factor, order, high_pass, real, for i, d in enumerate(shape): # start and stop ensures center of mask aligns with center of FFT axis = cp.arange(-(d - 1) // 2, (d - 1) // 2 + 1) / (d * factor) - ranges.append(fft.ifftshift(axis ** 2)) + ranges.append(fft.ifftshift(axis**2)) # for real image FFT, halve the last axis if real: limit = d // 2 + 1 @@ -158,9 +165,12 @@ def butterworth( raise ValueError("npad must be >= 0") elif npad > 0: center_slice = tuple(slice(npad, s + npad) for s in image.shape) - image = pad(image, npad, mode='edge') - fft_shape = tuple(image.shape if channel_axis is None - else np.delete(image.shape, channel_axis)) + image = pad(image, npad, mode="edge") + fft_shape = tuple( + image.shape + if channel_axis is None + else np.delete(image.shape, channel_axis) + ) is_real = cp.isrealobj(image) float_dtype = _supported_float_type(image.dtype, allow_complex=True) image = image.astype(float_dtype, copy=False) @@ -169,24 +179,32 @@ def butterworth( "cutoff_frequency_ratio should be in the range [0, 0.5]" ) wfilt = _get_nd_butterworth_filter( - fft_shape, cutoff_frequency_ratio, order, high_pass, is_real, - float_dtype, squared_butterworth + fft_shape, + cutoff_frequency_ratio, + order, + high_pass, + is_real, + float_dtype, + squared_butterworth, ) axes = np.arange(image.ndim) if channel_axis is not None: axes = np.delete(axes, channel_axis) abs_channel = channel_axis % image.ndim post = image.ndim - abs_channel - 1 - sl = ((slice(None),) * abs_channel + (np.newaxis,) + - (slice(None),) * post) + sl = ( + (slice(None),) * abs_channel + (np.newaxis,) + (slice(None),) * post + ) wfilt = wfilt[sl] axes = tuple(axes) if is_real: - butterfilt = fft.irfftn(wfilt * fft.rfftn(image, axes=axes), - s=fft_shape, axes=axes) + butterfilt = fft.irfftn( + wfilt * fft.rfftn(image, axes=axes), s=fft_shape, axes=axes + ) else: - butterfilt = fft.ifftn(wfilt * fft.fftn(image, axes=axes), - s=fft_shape, axes=axes) + butterfilt = fft.ifftn( + wfilt * fft.fftn(image, axes=axes), s=fft_shape, axes=axes + ) if npad > 0: butterfilt = butterfilt[center_slice] return butterfilt diff --git a/python/cucim/src/cucim/skimage/filters/_gabor.py b/python/cucim/src/cucim/skimage/filters/_gabor.py index 0e811039e..209e67eeb 100644 --- a/python/cucim/src/cucim/skimage/filters/_gabor.py +++ b/python/cucim/src/cucim/skimage/filters/_gabor.py @@ -12,12 +12,27 @@ def _sigma_prefactor(bandwidth): b = bandwidth # See http://www.cs.rug.nl/~imaging/simplecell.html - return 1.0 / math.pi * math.sqrt(math.log(2) / 2.0) * \ - (2.0 ** b + 1) / (2.0 ** b - 1) + return ( + 1.0 + / math.pi + * math.sqrt(math.log(2) / 2.0) + * (2.0**b + 1) + / (2.0**b - 1) + ) -def gabor_kernel(frequency, theta=0, bandwidth=1, sigma_x=None, sigma_y=None, - n_stds=3, offset=0, dtype=None, *, float_dtype=None): +def gabor_kernel( + frequency, + theta=0, + bandwidth=1, + sigma_x=None, + sigma_y=None, + n_stds=3, + offset=0, + dtype=None, + *, + float_dtype=None, +): """Return complex 2D Gabor filter kernel. Gabor kernel is a Gaussian kernel modulated by a complex harmonic function. @@ -96,7 +111,7 @@ def gabor_kernel(frequency, theta=0, bandwidth=1, sigma_x=None, sigma_y=None, elif dtype is None: dtype = cp.complex128 - if cp.dtype(dtype).kind != 'c': + if cp.dtype(dtype).kind != "c": raise ValueError("dtype must be complex") ct = math.cos(theta) @@ -107,16 +122,18 @@ def gabor_kernel(frequency, theta=0, bandwidth=1, sigma_x=None, sigma_y=None, y0 = math.ceil( max(abs(n_stds * sigma_y * ct), abs(n_stds * sigma_x * st), 1) ) - y, x = cp.meshgrid(cp.arange(-y0, y0 + 1), - cp.arange(-x0, x0 + 1), - indexing='ij', - sparse=True) + y, x = cp.meshgrid( + cp.arange(-y0, y0 + 1), + cp.arange(-x0, x0 + 1), + indexing="ij", + sparse=True, + ) rotx = x * ct + y * st roty = -x * st + y * ct g = cp.empty(roty.shape, dtype=dtype) cp.exp( - -0.5 * ((rotx * rotx) / sigma_x ** 2 + (roty * roty) / sigma_y ** 2), + -0.5 * ((rotx * rotx) / sigma_x**2 + (roty * roty) / sigma_y**2), out=g, ) g /= 2 * math.pi * sigma_x * sigma_y @@ -125,8 +142,18 @@ def gabor_kernel(frequency, theta=0, bandwidth=1, sigma_x=None, sigma_y=None, return g -def gabor(image, frequency, theta=0, bandwidth=1, sigma_x=None, - sigma_y=None, n_stds=3, offset=0, mode='reflect', cval=0): +def gabor( + image, + frequency, + theta=0, + bandwidth=1, + sigma_x=None, + sigma_y=None, + n_stds=3, + offset=0, + mode="reflect", + cval=0, +): """Return real and imaginary responses to Gabor filter. The real and imaginary parts of the Gabor filter kernel are applied to the @@ -200,15 +227,23 @@ def gabor(image, frequency, theta=0, bandwidth=1, sigma_x=None, """ # noqa check_nD(image, 2) # do not cast integer types to float! - if image.dtype.kind == 'f': + if image.dtype.kind == "f": float_dtype = _supported_float_type(image.dtype) image = image.astype(float_dtype, copy=False) kernel_dtype = cp.promote_types(image.dtype, cp.complex64) else: kernel_dtype = cp.complex128 - g = gabor_kernel(frequency, theta, bandwidth, sigma_x, sigma_y, n_stds, - offset, dtype=kernel_dtype) + g = gabor_kernel( + frequency, + theta, + bandwidth, + sigma_x, + sigma_y, + n_stds, + offset, + dtype=kernel_dtype, + ) # separate real and imaginary calls in order to preserve integer dtypes filtered_real = ndi.convolve(image, g.real, mode=mode, cval=cval) diff --git a/python/cucim/src/cucim/skimage/filters/_gaussian.py b/python/cucim/src/cucim/skimage/filters/_gaussian.py index 14428f8cd..0b23cce91 100644 --- a/python/cucim/src/cucim/skimage/filters/_gaussian.py +++ b/python/cucim/src/cucim/skimage/filters/_gaussian.py @@ -3,12 +3,19 @@ from .._shared.filters import gaussian from ..util import img_as_float -__all__ = ['gaussian', 'difference_of_gaussians'] - - -def difference_of_gaussians(image, low_sigma, high_sigma=None, *, - mode='nearest', cval=0, channel_axis=None, - truncate=4.0): +__all__ = ["gaussian", "difference_of_gaussians"] + + +def difference_of_gaussians( + image, + low_sigma, + high_sigma=None, + *, + mode="nearest", + cval=0, + channel_axis=None, + truncate=4.0, +): """Find features between ``low_sigma`` and ``high_sigma`` in size. This function uses the Difference of Gaussians method for applying @@ -125,22 +132,40 @@ def difference_of_gaussians(image, low_sigma, high_sigma=None, *, high_sigma = tuple(map(float, high_sigma)) if len(low_sigma) != 1 and len(low_sigma) != spatial_dims: - raise ValueError('low_sigma must have length equal to number of' - ' spatial dimensions of input') + raise ValueError( + "low_sigma must have length equal to number of" + " spatial dimensions of input" + ) if len(high_sigma) != 1 and len(high_sigma) != spatial_dims: - raise ValueError('high_sigma must have length equal to number of' - ' spatial dimensions of input') - - if any(h < l for h, l in zip(high_sigma, low_sigma)): - raise ValueError('high_sigma must be equal to or larger than' - 'low_sigma for all axes') - - out = gaussian(image, low_sigma, mode=mode, cval=cval, - channel_axis=channel_axis, truncate=truncate, - preserve_range=False) - - out -= gaussian(image, high_sigma, mode=mode, cval=cval, - channel_axis=channel_axis, truncate=truncate, - preserve_range=False) + raise ValueError( + "high_sigma must have length equal to number of" + " spatial dimensions of input" + ) + + if any(s_hi < s_low for s_hi, s_low in zip(high_sigma, low_sigma)): + raise ValueError( + "high_sigma must be equal to or larger than" + "low_sigma for all axes" + ) + + out = gaussian( + image, + low_sigma, + mode=mode, + cval=cval, + channel_axis=channel_axis, + truncate=truncate, + preserve_range=False, + ) + + out -= gaussian( + image, + high_sigma, + mode=mode, + cval=cval, + channel_axis=channel_axis, + truncate=truncate, + preserve_range=False, + ) return out diff --git a/python/cucim/src/cucim/skimage/filters/_median.py b/python/cucim/src/cucim/skimage/filters/_median.py index e5d89d2c2..20948dc0b 100644 --- a/python/cucim/src/cucim/skimage/filters/_median.py +++ b/python/cucim/src/cucim/skimage/filters/_median.py @@ -18,11 +18,22 @@ def prod(x): return reduce(mul, x) -@deprecate_kwarg(kwarg_mapping={'selem': 'footprint'}, - removed_version="23.02.00", - deprecated_version="22.02.00") -def median(image, footprint=None, out=None, mode='nearest', cval=0.0, - behavior='ndimage', *, algorithm='auto', algorithm_kwargs={}): +@deprecate_kwarg( + kwarg_mapping={"selem": "footprint"}, + removed_version="23.02.00", + deprecated_version="22.02.00", +) +def median( + image, + footprint=None, + out=None, + mode="nearest", + cval=0.0, + behavior="ndimage", + *, + algorithm="auto", + algorithm_kwargs={}, +): """Return local median of an image. Parameters @@ -70,12 +81,12 @@ def median(image, footprint=None, out=None, mode='nearest', cval=0.0, Determines which algorithm is used to compute the median. The default of 'auto' will attempt to use a histogram-based algorithm for 2D images with 8 or 16-bit integer data types. Otherwise a sorting-based - algorithm will be used. Note: this paramter is cuCIM-specific and does + algorithm will be used. Note: this parameter is cuCIM-specific and does not exist in upstream scikit-image. algorithm_kwargs : dict Any additional algorithm-specific keywords. Currently can only be used to set the number of parallel partitions for the 'histogram' algorithm. - (e.g. ``algorithm_kwargs={'partitions': 256}``). Note: this paramter is + (e.g. ``algorithm_kwargs={'partitions': 256}``). Note: this parameter is cuCIM-specific and does not exist in upstream scikit-image. Returns @@ -115,11 +126,13 @@ def median(image, footprint=None, out=None, mode='nearest', cval=0.0, >>> med = median(img, disk(5)) """ - if behavior == 'rank': - if mode != 'nearest' or not np.isclose(cval, 0.0): - warn("Change 'behavior' to 'ndimage' if you want to use the " - "parameters 'mode' or 'cval'. They will be discarded " - "otherwise.") + if behavior == "rank": + if mode != "nearest" or not np.isclose(cval, 0.0): + warn( + "Change 'behavior' to 'ndimage' if you want to use the " + "parameters 'mode' or 'cval'. They will be discarded " + "otherwise." + ) raise NotImplementedError("rank behavior not currently implemented") # TODO: implement median rank filter # return generic.median(image, selem=selem, out=out) @@ -134,16 +147,16 @@ def median(image, footprint=None, out=None, mode='nearest', cval=0.0, else: footprint_shape = footprint.shape - if algorithm == 'sorting': + if algorithm == "sorting": can_use_histogram = False - elif algorithm in ['auto', 'histogram']: + elif algorithm in ["auto", "histogram"]: can_use_histogram, reason = _can_use_histogram( image, footprint, footprint_shape ) else: raise ValueError(f"unknown algorithm: {algorithm}") - if algorithm == 'histogram' and not can_use_histogram: + if algorithm == "histogram" and not can_use_histogram: raise ValueError( "The histogram-based algorithm was requested, but it cannot " f"be used for this image and footprint (reason: {reason})." @@ -153,7 +166,7 @@ def median(image, footprint=None, out=None, mode='nearest', cval=0.0, # Empirically, shapes above (13, 13) and above on RTX A6000 have faster # execution for the histogram-based approach. use_histogram = can_use_histogram - if algorithm == 'auto': + if algorithm == "auto": # prefer sorting-based algorithm if footprint shape is small use_histogram = use_histogram and prod(footprint_shape) > 150 @@ -183,7 +196,7 @@ def median(image, footprint=None, out=None, mode='nearest', cval=0.0, footprint_shape if footprint is None else footprint, mode=mode, cval=cval, - **algorithm_kwargs + **algorithm_kwargs, ) if output_array_provided: out[:] = temp @@ -195,14 +208,19 @@ def median(image, footprint=None, out=None, mode='nearest', cval=0.0, except KernelResourceError as e: # Fall back to sorting-based implementation if we encounter a # resource limit (e.g. insufficient shared memory per block). - warn("Kernel resource error encountered in histogram-based " - f"median kernel: {e}\n" - "Falling back to sorting-based median instead.") + warn( + "Kernel resource error encountered in histogram-based " + f"median kernel: {e}\n" + "Falling back to sorting-based median instead." + ) if algorithm_kwargs: - warn(f"algorithm_kwargs={algorithm_kwargs} ignored for sorting-based " - f"algorithm") + warn( + f"algorithm_kwargs={algorithm_kwargs} ignored for sorting-based " + f"algorithm" + ) size = footprint_shape if footprint is None else None - return ndi.median_filter(image, size=size, footprint=footprint, output=out, - mode=mode, cval=cval) + return ndi.median_filter( + image, size=size, footprint=footprint, output=out, mode=mode, cval=cval + ) diff --git a/python/cucim/src/cucim/skimage/filters/_median_hist.py b/python/cucim/src/cucim/skimage/filters/_median_hist.py index 1dfbd8d09..e0fe08e81 100644 --- a/python/cucim/src/cucim/skimage/filters/_median_hist.py +++ b/python/cucim/src/cucim/skimage/filters/_median_hist.py @@ -9,7 +9,7 @@ from .._shared.utils import _to_np_mode from .._vendored import pad -if hasattr(math, 'prod'): +if hasattr(math, "prod"): prod = math.prod else: prod = np.prod @@ -17,14 +17,14 @@ def _dtype_to_CUDA_int_type(dtype): cpp_int_types = { - cp.uint8: 'unsigned char', - cp.uint16: 'unsigned short', - cp.uint32: 'unsigned int', - cp.uint64: 'unsigned long long', - cp.int8: 'signed char', - cp.int16: 'short', - cp.int32: 'int', - cp.int64: 'long long', + cp.uint8: "unsigned char", + cp.uint16: "unsigned short", + cp.uint32: "unsigned int", + cp.uint64: "unsigned long long", + cp.int8: "signed char", + cp.int16: "short", + cp.int32: "int", + cp.int64: "long long", } dtype = cp.dtype(dtype) if dtype.type not in cpp_int_types: @@ -46,11 +46,11 @@ def _get_hist_dtype(footprint_shape): def _gen_global_definitions( - image_t='unsigned char', + image_t="unsigned char", hist_offset=0, - hist_int_t='int', + hist_int_t="int", hist_size=256, - hist_size_coarse=8 + hist_size_coarse=8, ): """Generate C++ #define statements needed for the CUDA kernels. @@ -59,9 +59,7 @@ def _gen_global_definitions( """ if hist_size % hist_size_coarse != 0: - raise ValueError( - "`hist_size` must be a multiple of `hist_size_coarse`" - ) + raise ValueError("`hist_size` must be a multiple of `hist_size_coarse`") hist_size_fine = hist_size // hist_size_coarse log2_coarse = math.log2(hist_size_coarse) log2_fine = math.log2(hist_size_fine) @@ -195,9 +193,9 @@ def _get_median_rawkernel( hist_size_coarse=hist_size_coarse, ) - kernel_directory = os.path.join(os.path.dirname(__file__), 'cuda') - with open(os.path.join(kernel_directory, 'histogram_median.cu'), 'rt') as f: - rank_filter_kernel = '\n'.join(f.readlines()) + kernel_directory = os.path.join(os.path.dirname(__file__), "cuda") + with open(os.path.join(kernel_directory, "histogram_median.cu"), "rt") as f: + rank_filter_kernel = "\n".join(f.readlines()) return cp.RawKernel( code=preamble + rank_filter_kernel, @@ -210,11 +208,11 @@ def _check_shared_memory_requirement_bytes( ): """computes amount of shared memory required by cuRankFilterMultiBlock""" s = np.dtype(hist_dtype).itemsize - shared_size = hist_size_coarse * s # for HCoarse - shared_size += hist_size_fine * s # for HCoarseScane - shared_size += hist_size_coarse * hist_size_fine * s # for HFine - shared_size += hist_size_coarse * 4 # for luc - shared_size += 12 # three more ints + shared_size = hist_size_coarse * s # for HCoarse + shared_size += hist_size_fine * s # for HCoarseScane + shared_size += hist_size_coarse * hist_size_fine * s # for HFine + shared_size += hist_size_coarse * 4 # for luc + shared_size += 12 # three more ints return shared_size @@ -258,9 +256,7 @@ def _can_use_histogram(image, footprint, footprint_shape=None): if footprint is None: if footprint_shape is None: - raise ValueError( - "must provide either footprint or footprint_shape" - ) + raise ValueError("must provide either footprint or footprint_shape") else: footprint_shape = footprint.shape @@ -290,8 +286,13 @@ class KernelResourceError(RuntimeError): pass -def _get_kernel_params(image, footprint_shape, value_range='auto', - partitions=None, hist_size_coarse=None): +def _get_kernel_params( + image, + footprint_shape, + value_range="auto", + partitions=None, + hist_size_coarse=None, +): """Determine kernel launch parameters and #define values for its code. Parameters @@ -328,22 +329,22 @@ def _get_kernel_params(image, footprint_shape, value_range='auto', See comments next to the KernelParams declaration below for details. """ - if value_range == 'auto': + if value_range == "auto": if image.dtype.itemsize < 2: - value_range = 'dtype' + value_range = "dtype" else: # to save memory, try using actual value range for >8-bit images # (e.g. DICOM images often have 12-bit range) - value_range = 'image' + value_range = "image" - if value_range == 'dtype': + if value_range == "dtype": if image.dtype.itemsize > 2: raise ValueError( "dtype range only supported for 8 and 16-bit integer dtypes." ) iinfo = cp.iinfo(image.dtype) minv, maxv = iinfo.min, iinfo.max - elif value_range == 'image': + elif value_range == "image": minv = int(image.min()) maxv = int(image.max()) else: @@ -354,14 +355,14 @@ def _get_kernel_params(image, footprint_shape, value_range='auto', ) minv, maxv = value_range - if image.dtype.kind == 'u': + if image.dtype.kind == "u": # cannot subtract a positive offset in the unsigned case minv = min(minv, 0) hist_offset = 0 if minv == 0 else -minv hist_size = maxv - minv + 1 hist_size = max(hist_size, 256) # use at least 256 bins # round hist_size up to the nearest power of 2 - hist_size = round(2**math.ceil(math.log2(hist_size))) + hist_size = round(2 ** math.ceil(math.log2(hist_size))) hist_size = max(hist_size, 32) if hist_size_coarse is None: @@ -410,24 +411,24 @@ def _get_kernel_params(image, footprint_shape, value_range='auto', hist_dtype, hist_size_coarse, hist_size_fine ) d = cp.cuda.Device() - smem_available = d.attributes['MaxSharedMemoryPerBlock'] + smem_available = d.attributes["MaxSharedMemoryPerBlock"] if smem_size > smem_available: raise KernelResourceError( f"Shared memory requirement of {smem_size} bytes per block" f"exceeds the device limit of {smem_available}." ) CUDAParams = namedtuple( - 'HistogramMedianKernelParams', + "HistogramMedianKernelParams", [ - 'grid', - 'block', - 'hist_size', # total number of histogram bins - 'hist_size_coarse', # number of coarse-level histogram bins - 'hist_dtype', # cupy.dtype of the histogram - 'hist_int_t', # C++ type of the histogram - 'hist_offset', # offset from 0 for the first bin - 'partitions' # number of parallel bands to use - ] + "grid", + "block", + "hist_size", # total number of histogram bins + "hist_size_coarse", # number of coarse-level histogram bins + "hist_dtype", # cupy.dtype of the histogram + "hist_int_t", # C++ type of the histogram + "hist_offset", # offset from 0 for the first bin + "partitions", # number of parallel bands to use + ], ) return CUDAParams( grid, @@ -441,9 +442,15 @@ def _get_kernel_params(image, footprint_shape, value_range='auto', ) -def _median_hist(image, footprint, output=None, mode='mirror', cval=0, - value_range='auto', partitions=None): - +def _median_hist( + image, + footprint, + output=None, + mode="mirror", + cval=0, + value_range="auto", + partitions=None, +): if output is not None: raise NotImplementedError( "Use of a user-defined output array has not been implemented" @@ -468,31 +475,29 @@ def _median_hist(image, footprint, output=None, mode='mirror', cval=0, # kernel pointer offset calculations assume C-contiguous image data image = cp.ascontiguousarray(image) n_rows, n_cols = image.shape[:2] - if image.dtype.kind == 'b': + if image.dtype.kind == "b": image = image.view(cp.uint8) - if image.dtype.kind not in 'iu': + if image.dtype.kind not in "iu": raise ValueError("only integer-type images are accepted") radii = tuple(s // 2 for s in footprint_shape) # med_pos is the index corresponding to the median # (calculation here assumes all elements of the footprint are True) - params = _get_kernel_params( - image, footprint_shape, value_range, partitions - ) + params = _get_kernel_params(image, footprint_shape, value_range, partitions) # pad as necessary to avoid boundary artifacts # Don't have to pad along axis 0 if mode is already 'nearest' because the # kernel already assumes 'nearest' mode internally. autopad = True - pad_both_axes = mode != 'nearest' + pad_both_axes = mode != "nearest" if autopad: if pad_both_axes: npad = tuple((r, r) for r in radii) else: npad = ((0, 0),) * (image.ndim - 1) + ((radii[-1], radii[-1]),) mode = _to_np_mode(mode) - if mode == 'constant': + if mode == "constant": pad_kwargs = dict(mode=mode, constant_values=cval) else: pad_kwargs = dict(mode=mode) @@ -532,5 +537,5 @@ def _median_hist(image, footprint, output=None, mode='mirror', cval=0, out_sl = tuple(slice(r, -r) for r in radii) out = out[out_sl] else: - out = out[..., radii[-1]:-radii[-1]] + out = out[..., radii[-1] : -radii[-1]] return out diff --git a/python/cucim/src/cucim/skimage/filters/_separable_filtering.py b/python/cucim/src/cucim/skimage/filters/_separable_filtering.py index e4e0fffa7..107d303e6 100644 --- a/python/cucim/src/cucim/skimage/filters/_separable_filtering.py +++ b/python/cucim/src/cucim/skimage/filters/_separable_filtering.py @@ -5,7 +5,9 @@ from cucim.skimage._vendored import _ndimage_util as util from cucim.skimage._vendored._internal import _normalize_axis_index, prod from cucim.skimage._vendored._ndimage_filters_core import ( - _ndimage_CAST_FUNCTION, _ndimage_includes) + _ndimage_CAST_FUNCTION, + _ndimage_includes, +) def _get_constants(ndim, axis, kernel_size, anchor, patch_per_block=None): @@ -63,8 +65,9 @@ def _get_constants(ndim, axis, kernel_size, anchor, patch_per_block=None): return block, patch_per_block, halo_size -def _get_smem_shape(ndim, axis, block, patch_per_block, halo_size, - image_dtype=cp.float32): +def _get_smem_shape( + ndim, axis, block, patch_per_block, halo_size, image_dtype=cp.float32 +): bx, by, bz = block if ndim == 2: if axis == 0: @@ -88,31 +91,31 @@ def _get_warp_size(device_id=None): if device_id is None: device_id = cp.cuda.runtime.getDevice() device_props = cp.cuda.runtime.getDeviceProperties(device_id) - return device_props['warpSize'] + return device_props["warpSize"] def _get_shmem_limits(device_id=None): if device_id is None: device_id = cp.cuda.runtime.getDevice() device_props = cp.cuda.runtime.getDeviceProperties(device_id) - shared_mp = device_props.get('sharedMemPerMultiprocessor', None) - shared_block = device_props.get('sharedMemPerBlock', None) - shared_block_optin = device_props.get('sharedMemPerBlockOptin', None) - global_l1_cache_supported = device_props.get('globalL1CacheSupported', None) - local_l1_cache_supported = device_props.get('localL1CacheSupported', None) - l2_size = device_props.get('l2CacheSize', None) - warp_size = device_props.get('warpSize', None) - regs_per_block = device_props.get('regsPerBlock', None) + shared_mp = device_props.get("sharedMemPerMultiprocessor", None) + shared_block = device_props.get("sharedMemPerBlock", None) + shared_block_optin = device_props.get("sharedMemPerBlockOptin", None) + global_l1_cache_supported = device_props.get("globalL1CacheSupported", None) + local_l1_cache_supported = device_props.get("localL1CacheSupported", None) + l2_size = device_props.get("l2CacheSize", None) + warp_size = device_props.get("warpSize", None) + regs_per_block = device_props.get("regsPerBlock", None) return { - 'device_id': device_id, - 'shared_mp': shared_mp, - 'shared_block': shared_block, - 'shared_block_optin': shared_block_optin, - 'global_l1_cache_supported': global_l1_cache_supported, - 'local_l1_cache_supported': local_l1_cache_supported, - 'l2_size': l2_size, - 'warp_size': warp_size, - 'regs_per_block': regs_per_block, + "device_id": device_id, + "shared_mp": shared_mp, + "shared_block": shared_block, + "shared_block_optin": shared_block_optin, + "global_l1_cache_supported": global_l1_cache_supported, + "local_l1_cache_supported": local_l1_cache_supported, + "l2_size": l2_size, + "warp_size": warp_size, + "regs_per_block": regs_per_block, } @@ -121,9 +124,15 @@ class ResourceLimitError(RuntimeError): @cp.memoize(for_each_device=True) -def _check_smem_availability(ndim, axis, kernel_size, anchor=None, - patch_per_block=None, image_dtype=cp.float32, - device_id=None): +def _check_smem_availability( + ndim, + axis, + kernel_size, + anchor=None, + patch_per_block=None, + image_dtype=cp.float32, + device_id=None, +): block, patch_per_block, halo_size = _get_constants( ndim, axis, kernel_size, anchor=anchor, patch_per_block=patch_per_block ) @@ -133,28 +142,28 @@ def _check_smem_availability(ndim, axis, kernel_size, anchor=None, block=block, patch_per_block=patch_per_block, halo_size=halo_size, - image_dtype=image_dtype + image_dtype=image_dtype, ) props = _get_shmem_limits(device_id=device_id) - if nbytes > props['shared_block']: + if nbytes > props["shared_block"]: raise ResourceLimitError("inadequate shared memory available") _dtype_char_to_c_types = { - 'e': 'float16', - 'f': 'float', - 'd': 'double', - 'F': 'complex', - 'D': 'complex', - '?': 'char', - 'b': 'char', - 'h': 'short', - 'i': 'int', - 'l': 'long long', - 'B': 'unsigned char', - 'H': 'unsigned short', - 'I': 'unsigned int', - 'L': 'unsigned long long', + "e": "float16", + "f": "float", + "d": "double", + "F": "complex", + "D": "complex", + "?": "char", + "b": "char", + "h": "short", + "i": "int", + "l": "long long", + "B": "unsigned char", + "H": "unsigned short", + "I": "unsigned int", + "L": "unsigned long long", } @@ -167,8 +176,13 @@ def _get_code_stage1_shared_memory_load_2d(ndim, axis, mode, cval): """ if ndim == 2 and axis == 0: - if mode not in ['constant', 'grid-constant']: - boundary_code_lower, boundary_code_upper = util._generate_boundary_condition_ops(mode, 'row', 'n_rows', separate=True) # noqa + if mode not in ["constant", "grid-constant"]: + ( + boundary_code_lower, + boundary_code_upper, + ) = util._generate_boundary_condition_ops( + mode, "row", "n_rows", separate=True + ) # noqa # as in OpenCV's column_filter.hpp code = """ @@ -197,7 +211,7 @@ def _get_code_stage1_shared_memory_load_2d(ndim, axis, mode, cval): for (int j = 0; j < HALO_SIZE; ++j) { row = yStart - (HALO_SIZE - j) * BLOCK_DIM_Y; """ # noqa - if mode == 'constant': + if mode == "constant": code += f""" if (row < 0) smem[threadIdx.y + j * BLOCK_DIM_Y][threadIdx.x] = static_cast({cval}); @@ -229,7 +243,7 @@ def _get_code_stage1_shared_memory_load_2d(ndim, axis, mode, cval): for (int j = 0; j < PATCH_PER_BLOCK; ++j) { row = yStart + j * BLOCK_DIM_Y; """ # noqa - if mode == 'constant': + if mode == "constant": code += f""" if (row >= n_rows) smem[threadIdx.y + (HALO_SIZE + j) * BLOCK_DIM_Y][threadIdx.x] = static_cast({cval}); @@ -247,7 +261,7 @@ def _get_code_stage1_shared_memory_load_2d(ndim, axis, mode, cval): { row = yStart + (PATCH_PER_BLOCK + j) * BLOCK_DIM_Y; """ # noqa - if mode == 'constant': + if mode == "constant": code += f""" if (row >= n_rows) smem[threadIdx.y + (PATCH_PER_BLOCK + HALO_SIZE + j) * BLOCK_DIM_Y][threadIdx.x] = static_cast({cval}); @@ -261,8 +275,13 @@ def _get_code_stage1_shared_memory_load_2d(ndim, axis, mode, cval): } """ # noqa elif ndim == 2 and axis == 1: - if mode not in ['constant', 'grid-constant']: - boundary_code_lower, boundary_code_upper = util._generate_boundary_condition_ops(mode, 'col', 'n_cols', separate=True) # noqa + if mode not in ["constant", "grid-constant"]: + ( + boundary_code_lower, + boundary_code_upper, + ) = util._generate_boundary_condition_ops( + mode, "col", "n_cols", separate=True + ) # noqa # as in OpenCV's row_filter.hpp code = """ @@ -290,7 +309,7 @@ def _get_code_stage1_shared_memory_load_2d(ndim, axis, mode, cval): for (int j = 0; j < HALO_SIZE; ++j){ col = xStart - (HALO_SIZE - j) * BLOCK_DIM_X; """ # noqa - if mode == 'constant': + if mode == "constant": code += f""" if (col < 0) smem[threadIdx.y][threadIdx.x + j * BLOCK_DIM_X] = static_cast({cval}); @@ -321,7 +340,7 @@ def _get_code_stage1_shared_memory_load_2d(ndim, axis, mode, cval): for (int j = 0; j < PATCH_PER_BLOCK; ++j) { col = xStart + j * BLOCK_DIM_X; """ # noqa - if mode == 'constant': + if mode == "constant": code += f""" if (col >= n_cols) smem[threadIdx.y][threadIdx.x + (HALO_SIZE + j) * BLOCK_DIM_X] = static_cast({cval}); @@ -338,7 +357,7 @@ def _get_code_stage1_shared_memory_load_2d(ndim, axis, mode, cval): for (int j = 0; j < HALO_SIZE; ++j){ col = xStart + (PATCH_PER_BLOCK + j) * BLOCK_DIM_X; """ # noqa - if mode == 'constant': + if mode == "constant": code += f""" if (col >= n_cols) smem[threadIdx.y][threadIdx.x + (PATCH_PER_BLOCK + HALO_SIZE + j) * BLOCK_DIM_X] = static_cast({cval}); @@ -367,8 +386,13 @@ def _get_code_stage1_shared_memory_load_3d(ndim, axis, mode, cval): """ if ndim == 3 and axis == 0: - if mode not in ['constant', 'grid-constant']: - boundary_code_lower, boundary_code_upper = util._generate_boundary_condition_ops(mode, 'row', 's_0', separate=True) # noqa + if mode not in ["constant", "grid-constant"]: + ( + boundary_code_lower, + boundary_code_upper, + ) = util._generate_boundary_condition_ops( + mode, "row", "s_0", separate=True + ) # noqa # as in OpenCV's column_filter.hpp code = """ @@ -400,7 +424,7 @@ def _get_code_stage1_shared_memory_load_3d(ndim, axis, mode, cval): for (int j = 0; j < HALO_SIZE; ++j) { row = zStart - (HALO_SIZE - j) * BLOCK_DIM_Z; """ # noqa - if mode == 'constant': + if mode == "constant": code += f""" if (row < 0) smem[threadIdx.z + j * BLOCK_DIM_Z][threadIdx.y][threadIdx.x] = static_cast({cval}); @@ -432,7 +456,7 @@ def _get_code_stage1_shared_memory_load_3d(ndim, axis, mode, cval): for (int j = 0; j < PATCH_PER_BLOCK; ++j) { row = zStart + j * BLOCK_DIM_Z; """ # noqa - if mode == 'constant': + if mode == "constant": code += f""" if (row >= s_0) smem[threadIdx.z + (HALO_SIZE + j) * BLOCK_DIM_Z][threadIdx.y][threadIdx.x] = static_cast({cval}); @@ -450,7 +474,7 @@ def _get_code_stage1_shared_memory_load_3d(ndim, axis, mode, cval): { row = zStart + (PATCH_PER_BLOCK + j) * BLOCK_DIM_Z; """ # noqa - if mode == 'constant': + if mode == "constant": code += f""" if (row >= s_0) smem[threadIdx.z + (PATCH_PER_BLOCK + HALO_SIZE + j) * BLOCK_DIM_Z][threadIdx.y][threadIdx.x] = static_cast({cval}); @@ -464,8 +488,13 @@ def _get_code_stage1_shared_memory_load_3d(ndim, axis, mode, cval): } """ # noqa elif ndim == 3 and axis == 1: - if mode not in ['constant', 'grid-constant']: - boundary_code_lower, boundary_code_upper = util._generate_boundary_condition_ops(mode, 'row', 's_1', separate=True) # noqa + if mode not in ["constant", "grid-constant"]: + ( + boundary_code_lower, + boundary_code_upper, + ) = util._generate_boundary_condition_ops( + mode, "row", "s_1", separate=True + ) # noqa # as in OpenCV's column_filter.hpp code = """ @@ -497,7 +526,7 @@ def _get_code_stage1_shared_memory_load_3d(ndim, axis, mode, cval): for (int j = 0; j < HALO_SIZE; ++j) { row = yStart - (HALO_SIZE - j) * BLOCK_DIM_Y; """ # noqa - if mode == 'constant': + if mode == "constant": code += f""" if (row < 0) smem[threadIdx.z][threadIdx.y + j * BLOCK_DIM_Y][threadIdx.x] = static_cast({cval}); @@ -529,7 +558,7 @@ def _get_code_stage1_shared_memory_load_3d(ndim, axis, mode, cval): for (int j = 0; j < PATCH_PER_BLOCK; ++j) { row = yStart + j * BLOCK_DIM_Y; """ # noqa - if mode == 'constant': + if mode == "constant": code += f""" if (row >= s_1) smem[threadIdx.z][threadIdx.y + (HALO_SIZE + j) * BLOCK_DIM_Y][threadIdx.x] = static_cast({cval}); @@ -547,7 +576,7 @@ def _get_code_stage1_shared_memory_load_3d(ndim, axis, mode, cval): { row = yStart + (PATCH_PER_BLOCK + j) * BLOCK_DIM_Y; """ # noqa - if mode == 'constant': + if mode == "constant": code += f""" if (row >= s_1) smem[threadIdx.z][threadIdx.y + (PATCH_PER_BLOCK + HALO_SIZE + j) * BLOCK_DIM_Y][threadIdx.x] = static_cast({cval}); @@ -561,8 +590,13 @@ def _get_code_stage1_shared_memory_load_3d(ndim, axis, mode, cval): } """ # noqa elif ndim == 3 and axis == 2: - if mode not in ['constant', 'grid-constant']: - boundary_code_lower, boundary_code_upper = util._generate_boundary_condition_ops(mode, 'col', 's_2', separate=True) # noqa + if mode not in ["constant", "grid-constant"]: + ( + boundary_code_lower, + boundary_code_upper, + ) = util._generate_boundary_condition_ops( + mode, "col", "s_2", separate=True + ) # noqa # as in OpenCV's row_filter.hpp code = """ @@ -592,7 +626,7 @@ def _get_code_stage1_shared_memory_load_3d(ndim, axis, mode, cval): for (int j = 0; j < HALO_SIZE; ++j){ col = xStart - (HALO_SIZE - j) * BLOCK_DIM_X; """ # noqa - if mode == 'constant': + if mode == "constant": code += f""" if (col < 0) smem[threadIdx.z][threadIdx.y][threadIdx.x + j * BLOCK_DIM_X] = static_cast({cval}); @@ -623,7 +657,7 @@ def _get_code_stage1_shared_memory_load_3d(ndim, axis, mode, cval): for (int j = 0; j < PATCH_PER_BLOCK; ++j) { col = xStart + j * BLOCK_DIM_X; """ # noqa - if mode == 'constant': + if mode == "constant": code += f""" if (col >= s_2) smem[threadIdx.z][threadIdx.y][threadIdx.x + (HALO_SIZE + j) * BLOCK_DIM_X] = static_cast({cval}); @@ -640,7 +674,7 @@ def _get_code_stage1_shared_memory_load_3d(ndim, axis, mode, cval): for (int j = 0; j < HALO_SIZE; ++j){ col = xStart + (PATCH_PER_BLOCK + j) * BLOCK_DIM_X; """ # noqa - if mode == 'constant': + if mode == "constant": code += f""" if (col >= s_2) smem[threadIdx.z][threadIdx.y][threadIdx.x + (PATCH_PER_BLOCK + HALO_SIZE + j) * BLOCK_DIM_X] = static_cast({cval}); @@ -778,8 +812,17 @@ def _get_code_stage2_convolve(ndim, axis, flip_kernel): @cp.memoize(for_each_device=True) def _get_separable_conv_kernel_src( - kernel_size, axis, ndim, anchor, image_c_type, kernel_c_type, - output_c_type, mode, cval, patch_per_block=None, flip_kernel=False + kernel_size, + axis, + ndim, + anchor, + image_c_type, + kernel_c_type, + output_c_type, + mode, + cval, + patch_per_block=None, + flip_kernel=False, ): blocks, patch_per_block, halo_size = _get_constants( ndim, axis, kernel_size, anchor, patch_per_block @@ -787,14 +830,21 @@ def _get_separable_conv_kernel_src( block_x, block_y, block_z = blocks mode_str = mode - if 'constant' in mode_str: - mode_str += f'_{cval:0.2f}'.replace('.', '_') - mode_str = mode_str.replace('-', '_') + if "constant" in mode_str: + mode_str += f"_{cval:0.2f}".replace(".", "_") + mode_str = mode_str.replace("-", "_") if flip_kernel: - func_name = f'convolve_s{kernel_size}_{ndim}d_ax{axis}_{mode_str}' + func_name = f"convolve_s{kernel_size}_{ndim}d_ax{axis}_{mode_str}" else: - func_name = f'correlate_s{kernel_size}_{ndim}d_ax{axis}_{mode_str}' - func_name += f"_T{image_c_type}_W{kernel_c_type}_D{output_c_type}".replace('complex<', 'c').replace('>', '').replace('long ', 'l').replace('unsigned ', 'u') # noqa + func_name = f"correlate_s{kernel_size}_{ndim}d_ax{axis}_{mode_str}" + func_name += ( + f"_T{image_c_type}_W{kernel_c_type}_D{output_c_type}".replace( + "complex<", "c" + ) + .replace(">", "") + .replace("long ", "l") + .replace("unsigned ", "u") + ) # noqa func_name += f"_patch{patch_per_block}_halo{halo_size}" # func_name += f"_bx{block_x}_by{block_y}" // these are fixed per axis @@ -841,10 +891,19 @@ def _get_separable_conv_kernel_src( @cp.memoize(for_each_device=True) -def _get_separable_conv_kernel(kernel_size, axis, ndim, image_c_type, - kernel_c_type, output_c_type, anchor=None, - mode='nearest', cval=0, - patch_per_block=None, flip_kernel=False): +def _get_separable_conv_kernel( + kernel_size, + axis, + ndim, + image_c_type, + kernel_c_type, + output_c_type, + anchor=None, + mode="nearest", + cval=0, + patch_per_block=None, + flip_kernel=False, +): func_name, block, patch_per_block, code = _get_separable_conv_kernel_src( kernel_size=kernel_size, axis=axis, @@ -858,7 +917,7 @@ def _get_separable_conv_kernel(kernel_size, axis, ndim, image_c_type, patch_per_block=patch_per_block, flip_kernel=flip_kernel, ) - options = ('--std=c++11', '-DCUPY_USE_JITIFY') + options = ("--std=c++11", "-DCUPY_USE_JITIFY") m = cp.RawModule(code=code, options=options) return m.get_function(func_name), block, patch_per_block @@ -912,9 +971,16 @@ def _get_grid(shape, block, axis, patch_per_block): return grid -def _shmem_convolve1d(image, weights, axis=-1, output=None, mode="reflect", - cval=0.0, origin=0, convolution=False): - +def _shmem_convolve1d( + image, + weights, + axis=-1, + output=None, + mode="reflect", + cval=0.0, + origin=0, + convolution=False, +): ndim = image.ndim if weights.ndim != 1: raise ValueError("expected 1d weight array") @@ -937,15 +1003,21 @@ def _shmem_convolve1d(image, weights, axis=-1, output=None, mode="reflect", if weights.size > 32: # For large kernels, make sure we have adequate shared memory - _check_smem_availability(ndim, axis, weights.size, anchor=anchor, - patch_per_block=None, image_dtype=image.dtype, - device_id=None) + _check_smem_availability( + ndim, + axis, + weights.size, + anchor=anchor, + patch_per_block=None, + image_dtype=image.dtype, + device_id=None, + ) # CUDA kernels assume C-contiguous memory layout if not image.flags.c_contiguous: image = cp.ascontiguousarray(image) - complex_output = image.dtype.kind == 'c' + complex_output = image.dtype.kind == "c" # Note: important to set use_cucim_casting=True for performance with # 8 and 16-bit integer types. This causes the weights to get cast to # float32 rather than float64. @@ -956,11 +1028,11 @@ def _shmem_convolve1d(image, weights, axis=-1, output=None, mode="reflect", weights = cp.ascontiguousarray(weights, weights_dtype) # promote output to nearest complex dtype if necessary - complex_output = complex_output or weights.dtype.kind == 'c' + complex_output = complex_output or weights.dtype.kind == "c" output = util._get_output(output, image, None, complex_output) # handle potential overlap between input and output arrays - needs_temp = cp.shares_memory(output, image, 'MAY_SHARE_BOUNDS') + needs_temp = cp.shares_memory(output, image, "MAY_SHARE_BOUNDS") if needs_temp: output, temp = util._get_output(output.dtype, input), output diff --git a/python/cucim/src/cucim/skimage/filters/_sparse.py b/python/cucim/src/cucim/skimage/filters/_sparse.py index d202ce3e9..a1d674629 100644 --- a/python/cucim/src/cucim/skimage/filters/_sparse.py +++ b/python/cucim/src/cucim/skimage/filters/_sparse.py @@ -19,9 +19,11 @@ def _validate_window_size(axis_sizes): """ for axis_size in axis_sizes: if axis_size % 2 == 0: - msg = (f'Window size for `threshold_sauvola` or ' - f'`threshold_niblack` must not be even on any dimension. ' - f'Got {axis_sizes}') + msg = ( + f"Window size for `threshold_sauvola` or " + f"`threshold_niblack` must not be even on any dimension. " + f"Got {axis_sizes}" + ) raise ValueError(msg) @@ -32,8 +34,12 @@ def _get_view(padded, kernel_shape, idx, val): in correlate_sparse, then the view created here will match the size of the original image. """ - sl_shift = tuple([slice(c, s - (w_ - 1 - c)) - for c, w_, s in zip(idx, kernel_shape, padded.shape)]) + sl_shift = tuple( + [ + slice(c, s - (w_ - 1 - c)) + for c, w_, s in zip(idx, kernel_shape, padded.shape) + ] + ) v = padded[sl_shift] if val == 1: return v @@ -81,7 +87,7 @@ def _correlate_sparse(image, kernel_shape, kernel_indices, kernel_values): return out -def correlate_sparse(image, kernel, mode='reflect'): +def correlate_sparse(image, kernel, mode="reflect"): """Compute valid cross-correlation of `padded_array` and `kernel`. This function is *fast* when `kernel` is large with many zeros. @@ -114,7 +120,7 @@ def correlate_sparse(image, kernel, mode='reflect'): float_dtype = _supported_float_type(image.dtype) image = image.astype(float_dtype, copy=False) - if mode == 'valid': + if mode == "valid": padded_image = image else: np_mode = _to_np_mode(mode) diff --git a/python/cucim/src/cucim/skimage/filters/_unsharp_mask.py b/python/cucim/src/cucim/skimage/filters/_unsharp_mask.py index 83fd69615..1be8e833d 100644 --- a/python/cucim/src/cucim/skimage/filters/_unsharp_mask.py +++ b/python/cucim/src/cucim/skimage/filters/_unsharp_mask.py @@ -8,7 +8,7 @@ def _unsharp_mask_single_channel(image, radius, amount, vrange): """Single channel implementation of the unsharp masking filter.""" - blurred = gaussian(image, sigma=radius, mode='reflect') + blurred = gaussian(image, sigma=radius, mode="reflect") result = image + (image - blurred) * amount if vrange is not None: @@ -16,8 +16,9 @@ def _unsharp_mask_single_channel(image, radius, amount, vrange): return result -def unsharp_mask(image, radius=1.0, amount=1.0, preserve_range=False, *, - channel_axis=None): +def unsharp_mask( + image, radius=1.0, amount=1.0, preserve_range=False, *, channel_axis=None +): """Unsharp masking filter. The sharp details are identified as the difference between the original @@ -134,7 +135,8 @@ def unsharp_mask(image, radius=1.0, amount=1.0, preserve_range=False, *, for channel in range(image.shape[channel_axis]): sl = utils.slice_at_axis(channel, channel_axis) result[sl] = _unsharp_mask_single_channel( - fimg[sl], radius, amount, vrange) + fimg[sl], radius, amount, vrange + ) return result else: return _unsharp_mask_single_channel(fimg, radius, amount, vrange) diff --git a/python/cucim/src/cucim/skimage/filters/_window.py b/python/cucim/src/cucim/skimage/filters/_window.py index b5cb409bd..1eed578c3 100644 --- a/python/cucim/src/cucim/skimage/filters/_window.py +++ b/python/cucim/src/cucim/skimage/filters/_window.py @@ -129,4 +129,4 @@ def window(window_type, shape, warp_kwargs=None): if warp_kwargs is None: warp_kwargs = {} - return warp(w, coords, mode='constant', cval=0.0, **warp_kwargs) + return warp(w, coords, mode="constant", cval=0.0, **warp_kwargs) diff --git a/python/cucim/src/cucim/skimage/filters/edges.py b/python/cucim/src/cucim/skimage/filters/edges.py index 45a8c99a9..8966f6cd8 100644 --- a/python/cucim/src/cucim/skimage/filters/edges.py +++ b/python/cucim/src/cucim/skimage/filters/edges.py @@ -33,8 +33,7 @@ PREWITT_EDGE = np.array([1, 0, -1]) PREWITT_SMOOTH = np.full((3,), 1 / 3) -HPREWITT_WEIGHTS = (PREWITT_EDGE.reshape((3, 1)) - * PREWITT_SMOOTH.reshape((1, 3))) +HPREWITT_WEIGHTS = PREWITT_EDGE.reshape((3, 1)) * PREWITT_SMOOTH.reshape((1, 3)) VPREWITT_WEIGHTS = HPREWITT_WEIGHTS.T # 2D-only filter weights @@ -137,8 +136,16 @@ def _reshape_nd(arr, ndim, dim): return cp.reshape(arr, kernel_shape) -def _generic_edge_filter(image, *, smooth_weights, edge_weights=[1, 0, -1], - axis=None, mode='reflect', cval=0.0, mask=None): +def _generic_edge_filter( + image, + *, + smooth_weights, + edge_weights=[1, 0, -1], + axis=None, + mode="reflect", + cval=0.0, + mask=None, +): """Apply a generic, n-dimensional edge filter. The filter is computed by applying the edge weights along one dimension @@ -181,7 +188,7 @@ def _generic_edge_filter(image, *, smooth_weights, edge_weights=[1, 0, -1], axes = axis return_magnitude = len(axes) > 1 - if image.dtype.kind == 'f': + if image.dtype.kind == "f": float_dtype = _supported_float_type(image.dtype) image = image.astype(float_dtype, copy=False) else: @@ -200,14 +207,19 @@ def _generic_edge_filter(image, *, smooth_weights, edge_weights=[1, 0, -1], # moderately faster for large 2D images and substantially # faster in 3D and higher dimensions. for i, edge_dim in enumerate(axes): - ax_output = ndi.convolve1d(image, edge_weights, axis=edge_dim, - mode=mode, output=float_dtype) + ax_output = ndi.convolve1d( + image, edge_weights, axis=edge_dim, mode=mode, output=float_dtype + ) smooth_axes = list(set(range(ndim)) - {edge_dim}) for smooth_dim in smooth_axes: # TODO: why did this benchmark slower if output=ax_output was used? - ax_output = ndi.convolve1d(ax_output, smooth_weights, - axis=smooth_dim, mode=mode, - output=float_dtype) + ax_output = ndi.convolve1d( + ax_output, + smooth_weights, + axis=smooth_dim, + mode=mode, + output=float_dtype, + ) if return_magnitude: ax_output *= ax_output if i == 0: @@ -220,7 +232,7 @@ def _generic_edge_filter(image, *, smooth_weights, edge_weights=[1, 0, -1], return output -def sobel(image, mask=None, *, axis=None, mode='reflect', cval=0.0): +def sobel(image, mask=None, *, axis=None, mode="reflect", cval=0.0): """Find edges in an image using the Sobel filter. Parameters @@ -271,8 +283,9 @@ def sobel(image, mask=None, *, axis=None, mode='reflect', cval=0.0): >>> camera = cp.array(data.camera()) >>> edges = filters.sobel(camera) """ - output = _generic_edge_filter(image, smooth_weights=SOBEL_SMOOTH, - axis=axis, mode=mode, cval=cval) + output = _generic_edge_filter( + image, smooth_weights=SOBEL_SMOOTH, axis=axis, mode=mode, cval=cval + ) output = _mask_filter_result(output, mask) return output @@ -337,7 +350,7 @@ def sobel_v(image, mask=None): return sobel(image, mask=mask, axis=1) -def scharr(image, mask=None, *, axis=None, mode='reflect', cval=0.0): +def scharr(image, mask=None, *, axis=None, mode="reflect", cval=0.0): """Find the edge magnitude using the Scharr transform. Parameters @@ -393,8 +406,9 @@ def scharr(image, mask=None, *, axis=None, mode='reflect', cval=0.0): >>> camera = cp.array(data.camera()) >>> edges = filters.scharr(camera) """ - output = _generic_edge_filter(image, smooth_weights=SCHARR_SMOOTH, - axis=axis, mode=mode, cval=cval) + output = _generic_edge_filter( + image, smooth_weights=SCHARR_SMOOTH, axis=axis, mode=mode, cval=cval + ) output = _mask_filter_result(output, mask) return output @@ -468,7 +482,7 @@ def scharr_v(image, mask=None): return scharr(image, mask=mask, axis=1) -def prewitt(image, mask=None, *, axis=None, mode='reflect', cval=0.0): +def prewitt(image, mask=None, *, axis=None, mode="reflect", cval=0.0): """Find the edge magnitude using the Prewitt transform. Parameters @@ -521,8 +535,9 @@ def prewitt(image, mask=None, *, axis=None, mode='reflect', cval=0.0): >>> camera = cp.array(data.camera()) >>> edges = filters.prewitt(camera) """ - output = _generic_edge_filter(image, smooth_weights=PREWITT_SMOOTH, - axis=axis, mode=mode, cval=cval) + output = _generic_edge_filter( + image, smooth_weights=PREWITT_SMOOTH, axis=axis, mode=mode, cval=cval + ) output = _mask_filter_result(output, mask) return output @@ -661,7 +676,7 @@ def roberts_pos_diag(image, mask=None): """ check_nD(image, 2) - if image.dtype.kind == 'f': + if image.dtype.kind == "f": float_dtype = _supported_float_type(image.dtype) image = image.astype(float_dtype, copy=False) else: @@ -701,7 +716,7 @@ def roberts_neg_diag(image, mask=None): """ check_nD(image, 2) - if image.dtype.kind == 'f': + if image.dtype.kind == "f": float_dtype = _supported_float_type(image.dtype) image = image.astype(float_dtype, copy=False) else: @@ -738,7 +753,7 @@ def laplace(image, ksize=3, mask=None): skimage.restoration.uft.laplacian(). """ - if image.dtype.kind == 'f': + if image.dtype.kind == "f": float_dtype = _supported_float_type(image.dtype) image = image.astype(float_dtype, copy=False) else: @@ -756,7 +771,7 @@ def laplace(image, ksize=3, mask=None): return _mask_filter_result(result, mask) -def farid(image, mask=None, *, axis=None, mode='reflect', cval=0.0): +def farid(image, mask=None, *, axis=None, mode="reflect", cval=0.0): """Find the edge magnitude using the Farid transform. Parameters @@ -814,9 +829,14 @@ def farid(image, mask=None, *, axis=None, mode='reflect', cval=0.0): >>> from cucim.skimage import filters >>> edges = filters.farid(camera) """ - output = _generic_edge_filter(image, smooth_weights=farid_smooth, - edge_weights=farid_edge, axis=axis, - mode=mode, cval=cval) + output = _generic_edge_filter( + image, + smooth_weights=farid_smooth, + edge_weights=farid_edge, + axis=axis, + mode=mode, + cval=cval, + ) output = _mask_filter_result(output, mask) return output @@ -852,7 +872,7 @@ def farid_h(image, *, mask=None): Computer Analysis of Images and Patterns, Kiel, Germany. Sep, 1997. """ check_nD(image, 2) - if image.dtype.kind == 'f': + if image.dtype.kind == "f": float_dtype = _supported_float_type(image.dtype) image = image.astype(float_dtype, copy=False) else: @@ -889,7 +909,7 @@ def farid_v(image, *, mask=None): 13(4): 496-508, 2004. :DOI:`10.1109/TIP.2004.823819` """ check_nD(image, 2) - if image.dtype.kind == 'f': + if image.dtype.kind == "f": float_dtype = _supported_float_type(image.dtype) image = image.astype(float_dtype, copy=False) else: diff --git a/python/cucim/src/cucim/skimage/filters/lpi_filter.py b/python/cucim/src/cucim/skimage/filters/lpi_filter.py index e443a0723..498373e73 100644 --- a/python/cucim/src/cucim/skimage/filters/lpi_filter.py +++ b/python/cucim/src/cucim/skimage/filters/lpi_filter.py @@ -84,9 +84,7 @@ def _prepare(self, data): dshape = np.array(data.shape) # all filter dimensions must be uneven even_offset = tuple(int(s % 2 == 0) for s in data.shape) - dshape = tuple( - s + offset for s, offset in zip(data.shape, even_offset) - ) + dshape = tuple(s + offset for s, offset in zip(data.shape, even_offset)) oshape = tuple(s * 2 - 1 for s in data.shape) @@ -95,8 +93,10 @@ def _prepare(self, data): if self._cache is None or np.any(self._cache.shape != oshape): coords = cp.mgrid[ - [slice(0 + offset, float(n + offset)) - for (n, offset) in zip(dshape, even_offset)] + [ + slice(0 + offset, float(n + offset)) + for (n, offset) in zip(dshape, even_offset) + ] ] # this steps over two sets of coordinates, # not over the coordinates individually @@ -105,8 +105,9 @@ def _prepare(self, data): coords = coords.reshape(2, -1).T # coordinate pairs (r,c) coords = coords.astype(float_dtype, copy=False) - f = self.impulse_response(coords[:, 0], coords[:, 1], - **self.filter_params).reshape(dshape) + f = self.impulse_response( + coords[:, 0], coords[:, 1], **self.filter_params + ).reshape(dshape) f = _pad(f, oshape) F = fft.fftn(f) @@ -127,15 +128,16 @@ def __call__(self, data): data : (M, N) ndarray """ - check_nD(data, 2, 'data') + check_nD(data, 2, "data") F, G = self._prepare(data) out = fft.ifftn(F * G) out = cp.abs(_center(out, data.shape)) return out -def filter_forward(data, impulse_response=None, filter_params=None, - predefined_filter=None): +def filter_forward( + data, impulse_response=None, filter_params=None, predefined_filter=None +): """Apply the given filter to data. Parameters @@ -166,7 +168,7 @@ def filter_forward(data, impulse_response=None, filter_params=None, """ if filter_params is None: filter_params = {} - check_nD(data, 2, 'data') + check_nD(data, 2, "data") if predefined_filter is None: predefined_filter = LPIFilter2D(impulse_response, **filter_params) return predefined_filter(data) @@ -177,14 +179,25 @@ def filter_forward(data, impulse_response=None, filter_params=None, deprecated_version="", removed_version="2023.12.00", ) -def inverse(data, impulse_response=None, filter_params=None, max_gain=2, - predefined_filter=None): - return filter_inverse(data, impulse_response, filter_params, - max_gain, predefined_filter) - - -def filter_inverse(data, impulse_response=None, filter_params=None, max_gain=2, - predefined_filter=None): +def inverse( + data, + impulse_response=None, + filter_params=None, + max_gain=2, + predefined_filter=None, +): + return filter_inverse( + data, impulse_response, filter_params, max_gain, predefined_filter + ) + + +def filter_inverse( + data, + impulse_response=None, + filter_params=None, + max_gain=2, + predefined_filter=None, +): """Apply the filter in reverse to the given data. Parameters @@ -210,7 +223,7 @@ def filter_inverse(data, impulse_response=None, filter_params=None, max_gain=2, """ if filter_params is None: filter_params = {} - check_nD(data, 2, 'data') + check_nD(data, 2, "data") if predefined_filter is None: filt = LPIFilter2D(impulse_response, **filter_params) else: @@ -226,8 +239,13 @@ def filter_inverse(data, impulse_response=None, filter_params=None, max_gain=2, return _center(cp.abs(fft.ifftshift(fft.ifftn(G * F))), data.shape) -def wiener(data, impulse_response=None, filter_params=None, K=0.25, - predefined_filter=None): +def wiener( + data, + impulse_response=None, + filter_params=None, + K=0.25, + predefined_filter=None, +): """Minimum Mean Square Error (Wiener) inverse filter. Parameters @@ -251,10 +269,10 @@ def wiener(data, impulse_response=None, filter_params=None, K=0.25, """ if filter_params is None: filter_params = {} - check_nD(data, 2, 'data') + check_nD(data, 2, "data") if not isinstance(K, float): - check_nD(K, 2, 'K') + check_nD(K, 2, "K") if predefined_filter is None: filt = LPIFilter2D(impulse_response, **filter_params) diff --git a/python/cucim/src/cucim/skimage/filters/ridges.py b/python/cucim/src/cucim/skimage/filters/ridges.py index 3615278bc..0a979d7f2 100644 --- a/python/cucim/src/cucim/skimage/filters/ridges.py +++ b/python/cucim/src/cucim/skimage/filters/ridges.py @@ -15,15 +15,23 @@ class of ridge filters relies on the eigenvalues of the Hessian matrix of import numpy as np from .._shared.utils import _supported_float_type, check_nD, deprecate_func -from ..feature.corner import (_symmetric_compute_eigenvalues, hessian_matrix, - hessian_matrix_eigvals) +from ..feature.corner import ( + _symmetric_compute_eigenvalues, + hessian_matrix, + hessian_matrix_eigvals, +) from ..util import img_as_float @deprecate_func(deprecated_version="", removed_version="2023.06.01") -def compute_hessian_eigenvalues(image, sigma, sorting='none', - mode='constant', cval=0, - use_gaussian_derivatives=False): +def compute_hessian_eigenvalues( + image, + sigma, + sorting="none", + mode="constant", + cval=0, + use_gaussian_derivatives=False, +): """ Compute Hessian eigenvalues of nD images. @@ -65,8 +73,11 @@ def compute_hessian_eigenvalues(image, sigma, sorting='none', # Make nD hessian hessian_matrix_kwargs = dict( - sigma=sigma, order='rc', mode=mode, cval=cval, - use_gaussian_derivatives=use_gaussian_derivatives + sigma=sigma, + order="rc", + mode=mode, + cval=cval, + use_gaussian_derivatives=use_gaussian_derivatives, ) hessian_elements = hessian_matrix(image, **hessian_matrix_kwargs) @@ -78,14 +89,13 @@ def compute_hessian_eigenvalues(image, sigma, sorting='none', # Compute Hessian eigenvalues hessian_eigenvalues = hessian_matrix_eigvals(hessian_elements) - if sorting == 'abs': - + if sorting == "abs": # Sort eigenvalues by absolute values in ascending order hessian_eigenvalues = cp.take_along_axis( - hessian_eigenvalues, abs(hessian_eigenvalues).argsort(0), 0) - - elif sorting == 'val': + hessian_eigenvalues, abs(hessian_eigenvalues).argsort(0), 0 + ) + elif sorting == "val": # Sort eigenvalues by values in ascending order hessian_eigenvalues = cp.sort(hessian_eigenvalues, axis=0) @@ -110,11 +120,18 @@ def _get_circulant_init_kernel(ndim, alpha): "", "raw F out", operation=operation, - name=f"cucim_circulant_init_{ndim}d_alpha{int(1000*alpha)}") + name=f"cucim_circulant_init_{ndim}d_alpha{int(1000*alpha)}", + ) -def meijering(image, sigmas=range(1, 10, 2), alpha=None, - black_ridges=True, mode='reflect', cval=0): +def meijering( + image, + sigmas=range(1, 10, 2), + alpha=None, + black_ridges=True, + mode="reflect", + cval=0, +): """ Filter an image with the Meijering neuriteness filter. @@ -179,8 +196,9 @@ def meijering(image, sigmas=range(1, 10, 2), alpha=None, # from different (sigma) scales filtered_max = cp.zeros_like(image) for sigma in sigmas: # Filter for all sigmas. - H = hessian_matrix(image, sigma, mode=mode, cval=cval, - use_gaussian_derivatives=True) + H = hessian_matrix( + image, sigma, mode=mode, cval=cval, use_gaussian_derivatives=True + ) eigvals = hessian_matrix_eigvals(H) # cucim's hessian_matrix differs numerically from the one in skimage. @@ -192,8 +210,7 @@ def meijering(image, sigmas=range(1, 10, 2), alpha=None, # Compute normalized eigenvalues l_i = e_i + sum_{j!=i} alpha * e_j. vals = cp.tensordot(mtx, eigvals, 1) # Get largest normalized eigenvalue (by magnitude) at each pixel. - vals = cp.take_along_axis( - vals, abs(vals).argmax(0)[None], 0).squeeze(0) + vals = cp.take_along_axis(vals, abs(vals).argmax(0)[None], 0).squeeze(0) # Remove negative values. vals = cp.maximum(vals, 0) # Normalize to max = 1 (unless everything is already zero). @@ -205,8 +222,9 @@ def meijering(image, sigmas=range(1, 10, 2), alpha=None, return filtered_max # Return pixel-wise max over all sigmas. -def sato(image, sigmas=range(1, 10, 2), black_ridges=True, - mode='reflect', cval=0): +def sato( + image, sigmas=range(1, 10, 2), black_ridges=True, mode="reflect", cval=0 +): """ Filter an image with the Sato tubeness filter. @@ -262,8 +280,9 @@ def sato(image, sigmas=range(1, 10, 2), black_ridges=True, # from different (sigma) scales filtered_max = cp.zeros_like(image) for sigma in sigmas: # Filter for all sigmas. - H = hessian_matrix(image, sigma, mode=mode, cval=cval, - use_gaussian_derivatives=True) + H = hessian_matrix( + image, sigma, mode=mode, cval=cval, use_gaussian_derivatives=True + ) eigvals = hessian_matrix_eigvals(H) # cucim's hessian_matrix differs numerically from the one in skimage. @@ -278,7 +297,7 @@ def sato(image, sigmas=range(1, 10, 2), black_ridges=True, # to 0, multiplied by sigma^2. eigvals = eigvals[:-1] vals = cp.prod(cp.maximum(eigvals, 0), 0) ** (1 / len(eigvals)) - vals *= sigma ** 2 + vals *= sigma**2 filtered_max = cp.maximum(filtered_max, vals) return filtered_max # Return pixel-wise max over all sigmas. @@ -286,23 +305,23 @@ def sato(image, sigmas=range(1, 10, 2), black_ridges=True, @cp.memoize(for_each_device=True) def _get_frangi2d_sum_kernel(): return cp.ElementwiseKernel( - in_params='F lambda1, F lambda2', # noqa - out_params='F r_g', + in_params="F lambda1, F lambda2", # noqa + out_params="F r_g", operation=""" // Compute sensitivity to areas of high variance/texture/structure, // see equation (12)in reference [1]_ r_g = lambda1 * lambda1; r_g += lambda2 * lambda2; """, - name='cucim_skimage_filters_frangi3d_inner' + name="cucim_skimage_filters_frangi3d_inner", ) @cp.memoize(for_each_device=True) def _get_frangi2d_inner_kernel(): return cp.ElementwiseKernel( - in_params='F lambda1, F lambda2, F r_g, float64 beta_sq, float64 gamma_sq', # noqa - out_params='F result', + in_params="F lambda1, F lambda2, F r_g, float64 beta_sq, float64 gamma_sq", # noqa + out_params="F result", operation=""" F r_b; @@ -321,15 +340,15 @@ def _get_frangi2d_inner_kernel(): result = exp(-r_b / beta_sq); result *= 1.0 - exp(-r_g / gamma_sq); """, - name='cucim_skimage_filters_frangi2d_inner' + name="cucim_skimage_filters_frangi2d_inner", ) @cp.memoize(for_each_device=True) def _get_frangi3d_sum_kernel(): return cp.ElementwiseKernel( - in_params='F lambda1, F lambda2, F lambda3', # noqa - out_params='F r_g', + in_params="F lambda1, F lambda2, F lambda3", # noqa + out_params="F r_g", operation=""" // Compute sensitivity to areas of high variance/texture/structure, // see equation (12)in reference [1]_ @@ -337,15 +356,15 @@ def _get_frangi3d_sum_kernel(): r_g += lambda2 * lambda2; r_g += lambda3 * lambda3; """, - name='cucim_skimage_filters_frangi3d_inner' + name="cucim_skimage_filters_frangi3d_inner", ) @cp.memoize(for_each_device=True) def _get_frangi3d_inner_kernel(): return cp.ElementwiseKernel( - in_params='F lambda1, F lambda2, F lambda3, F r_g, float64 alpha_sq, float64 beta_sq, float64 gamma_sq', # noqa - out_params='F result', + in_params="F lambda1, F lambda2, F lambda3, F r_g, float64 alpha_sq, float64 beta_sq, float64 gamma_sq", # noqa + out_params="F result", operation=""" F r_a, r_b; @@ -373,13 +392,22 @@ def _get_frangi3d_inner_kernel(): result *= 1.0 - exp(-r_g / gamma_sq); """, - name='cucim_skimage_filters_frangi3d_inner' + name="cucim_skimage_filters_frangi3d_inner", ) -def frangi(image, sigmas=range(1, 10, 2), scale_range=None, - scale_step=None, alpha=0.5, beta=0.5, gamma=None, - black_ridges=True, mode='reflect', cval=0): +def frangi( + image, + sigmas=range(1, 10, 2), + scale_range=None, + scale_step=None, + alpha=0.5, + beta=0.5, + gamma=None, + black_ridges=True, + mode="reflect", + cval=0, +): """ Filter an image with the Frangi vesselness filter. @@ -449,9 +477,11 @@ def frangi(image, sigmas=range(1, 10, 2), scale_range=None, .. [3] Ellis, D. G.: https://github.com/ellisdg/frangi3d/tree/master/frangi """ if scale_range is not None and scale_step is not None: - warn('Use keyword parameter `sigmas` instead of `scale_range` and ' - '`scale_range` which will be removed in version 0.17.', - stacklevel=2) + warn( + "Use keyword parameter `sigmas` instead of `scale_range` and " + "`scale_range` which will be removed in version 0.17.", + stacklevel=2, + ) sigmas = np.arange(scale_range[0], scale_range[1], scale_step) check_nD(image, [2, 3]) # Check image dimensions. @@ -473,13 +503,14 @@ def frangi(image, sigmas=range(1, 10, 2), scale_range=None, vals = cp.empty(image.shape, dtype=image.dtype) ev_sq_sum = cp.empty_like(vals) for i, sigma in enumerate(sigmas): # Filter for all sigmas. - H = hessian_matrix(image, sigma, mode=mode, cval=cval, - use_gaussian_derivatives=True) + H = hessian_matrix( + image, sigma, mode=mode, cval=cval, use_gaussian_derivatives=True + ) # Use _symmetric_compute_eigenvalues rather than # hessian_matrix_eigvals so we can directly sort by ascending magnitude eigvals = _symmetric_compute_eigenvalues( - H, sort='ascending', abs_sort=True + H, sort="ascending", abs_sort=True ) # compute squared sum of the eigenvalues @@ -504,8 +535,14 @@ def frangi(image, sigmas=range(1, 10, 2), scale_range=None, ) else: inner_kernel( - eigvals[0], eigvals[1], eigvals[2], ev_sq_sum, alpha_sq, - beta_sq, gamma_sq, vals + eigvals[0], + eigvals[1], + eigvals[2], + ev_sq_sum, + alpha_sq, + beta_sq, + gamma_sq, + vals, ) # Store maximum value from different (sigma) scales @@ -516,9 +553,18 @@ def frangi(image, sigmas=range(1, 10, 2), scale_range=None, return filtered_max # Return pixel-wise max over all sigmas. -def hessian(image, sigmas=range(1, 10, 2), scale_range=None, scale_step=None, - alpha=0.5, beta=0.5, gamma=15, black_ridges=True, mode='reflect', - cval=0): +def hessian( + image, + sigmas=range(1, 10, 2), + scale_range=None, + scale_step=None, + alpha=0.5, + beta=0.5, + gamma=15, + black_ridges=True, + mode="reflect", + cval=0, +): """Filter an image with the Hybrid Hessian filter. This filter can be used to detect continuous edges, e.g. vessels, @@ -579,10 +625,18 @@ def hessian(image, sigmas=range(1, 10, 2), scale_range=None, scale_step=None, :DOI:`10.1007/978-3-319-16811-1_40` .. [2] Kroon, D. J.: Hessian based Frangi vesselness filter. """ - filtered = frangi(image, sigmas=sigmas, scale_range=scale_range, - scale_step=scale_step, alpha=alpha, beta=beta, - gamma=gamma, black_ridges=black_ridges, mode=mode, - cval=cval) + filtered = frangi( + image, + sigmas=sigmas, + scale_range=scale_range, + scale_step=scale_step, + alpha=alpha, + beta=beta, + gamma=gamma, + black_ridges=black_ridges, + mode=mode, + cval=cval, + ) filtered[filtered <= 0] = 1 return filtered diff --git a/python/cucim/src/cucim/skimage/filters/tests/test_correlate.py b/python/cucim/src/cucim/skimage/filters/tests/test_correlate.py index de247b0e0..0f354a577 100644 --- a/python/cucim/src/cucim/skimage/filters/tests/test_correlate.py +++ b/python/cucim/src/cucim/skimage/filters/tests/test_correlate.py @@ -8,16 +8,21 @@ def test_correlate_sparse_valid_mode(): - image = cp.array([[0, 0, 1, 3, 5], - [0, 1, 4, 3, 4], - [1, 2, 5, 4, 1], - [2, 4, 5, 2, 1], - [4, 5, 1, 0, 0]], dtype=float) + image = cp.array( + [ + [0, 0, 1, 3, 5], + [0, 1, 4, 3, 4], + [1, 2, 5, 4, 1], + [2, 4, 5, 2, 1], + [4, 5, 1, 0, 0], + ], + dtype=float, + ) kernel = cp.array([0, 1, 2, 4, 8, 16, 32, 64, 128]).reshape((3, 3)) cs_output = correlate_sparse(image, kernel, mode="valid") - ndi_output = ndi.correlate(image, kernel, mode='wrap') + ndi_output = ndi.correlate(image, kernel, mode="wrap") ndi_output = ndi_output[1:4, 1:4] assert_array_equal(cs_output, ndi_output) @@ -28,11 +33,16 @@ def test_correlate_sparse_valid_mode(): "dtype", [cp.uint16, cp.int32, cp.float16, cp.float32, cp.float64] ) def test_correlate_sparse(mode, dtype): - image = cp.array([[0, 0, 1, 3, 5], - [0, 1, 4, 3, 4], - [1, 2, 5, 4, 1], - [2, 4, 5, 2, 1], - [4, 5, 1, 0, 0]], dtype=dtype) + image = cp.array( + [ + [0, 0, 1, 3, 5], + [0, 1, 4, 3, 4], + [1, 2, 5, 4, 1], + [2, 4, 5, 2, 1], + [4, 5, 1, 0, 0], + ], + dtype=dtype, + ) kernel = cp.array([0, 1, 2, 4, 8, 16, 32, 64, 128]).reshape((3, 3)) @@ -46,11 +56,16 @@ def test_correlate_sparse(mode, dtype): @pytest.mark.parametrize("mode", ["nearest", "reflect", "mirror"]) def test_correlate_sparse_invalid_kernel(mode): - image = cp.array([[0, 0, 1, 3, 5], - [0, 1, 4, 3, 4], - [1, 2, 5, 4, 1], - [2, 4, 5, 2, 1], - [4, 5, 1, 0, 0]], dtype=float) + image = cp.array( + [ + [0, 0, 1, 3, 5], + [0, 1, 4, 3, 4], + [1, 2, 5, 4, 1], + [2, 4, 5, 2, 1], + [4, 5, 1, 0, 0], + ], + dtype=float, + ) # invalid kernel size invalid_kernel = cp.array([0, 1, 2, 4]).reshape((2, 2)) with pytest.raises(ValueError): diff --git a/python/cucim/src/cucim/skimage/filters/tests/test_edges.py b/python/cucim/src/cucim/skimage/filters/tests/test_edges.py index ca1c0c5d3..033161286 100644 --- a/python/cucim/src/cucim/skimage/filters/tests/test_edges.py +++ b/python/cucim/src/cucim/skimage/filters/tests/test_edges.py @@ -12,21 +12,24 @@ from cucim.skimage.filters.edges import _mask_filter_result -@pytest.mark.parametrize('dtype', [cp.float16, cp.float32, cp.float64]) +@pytest.mark.parametrize("dtype", [cp.float16, cp.float32, cp.float64]) def test_roberts_zeros(dtype): """Roberts' filter on an array of all zeros.""" - result = filters.roberts(cp.zeros((10, 10), dtype=dtype), - cp.ones((10, 10), bool)) + result = filters.roberts( + cp.zeros((10, 10), dtype=dtype), cp.ones((10, 10), bool) + ) assert result.dtype == _supported_float_type(dtype) assert cp.all(result == 0) -@pytest.mark.parametrize('dtype', [cp.float16, cp.float32, cp.float64]) +@pytest.mark.parametrize("dtype", [cp.float16, cp.float32, cp.float64]) def test_roberts_diagonal1(dtype): """Roberts' filter on a diagonal edge should be a diagonal line.""" image = cp.tri(10, 10, 0, dtype=dtype) - expected = ~(cp.tri(10, 10, -1).astype(bool) | - cp.tri(10, 10, -2).astype(bool).transpose()) + expected = ~( + cp.tri(10, 10, -1).astype(bool) + | cp.tri(10, 10, -2).astype(bool).transpose() + ) expected[-1, -1] = 0 # due to 'reflect' & image shape, last pixel not edge result = filters.roberts(image) assert result.dtype == _supported_float_type(dtype) @@ -34,8 +37,8 @@ def test_roberts_diagonal1(dtype): @pytest.mark.parametrize( - 'function_name', - ['farid', 'laplace', 'prewitt', 'roberts', 'scharr', 'sobel'] + "function_name", + ["farid", "laplace", "prewitt", "roberts", "scharr", "sobel"], ) def test_int_rescaling(function_name): """Basic test that uint8 inputs get rescaled from [0, 255] to [0, 1.] @@ -53,8 +56,10 @@ def test_int_rescaling(function_name): def test_roberts_diagonal2(): """Roberts' filter on a diagonal edge should be a diagonal line.""" image = cp.rot90(cp.tri(10, 10, 0), 3) - expected = ~cp.rot90(cp.tri(10, 10, -1).astype(bool) | - cp.tri(10, 10, -2).astype(bool).transpose()) + expected = ~cp.rot90( + cp.tri(10, 10, -1).astype(bool) + | cp.tri(10, 10, -2).astype(bool).transpose() + ) expected = _mask_filter_result(expected, None) result = filters.roberts(image).astype(bool) assert_array_almost_equal(result, expected) @@ -66,12 +71,12 @@ def test_sobel_zeros(): assert cp.all(result == 0) -@pytest.mark.parametrize('dtype', [cp.float16, cp.float32, cp.float64]) +@pytest.mark.parametrize("dtype", [cp.float16, cp.float32, cp.float64]) def test_sobel_mask(dtype): """Sobel on a masked array should be zero.""" result = filters.sobel( cp.random.uniform(size=(10, 10)).astype(dtype, copy=False), - cp.zeros((10, 10), dtype=bool) + cp.zeros((10, 10), dtype=bool), ) assert result.dtype == _supported_float_type(dtype) assert cp.all(result == 0) @@ -105,8 +110,9 @@ def test_sobel_h_zeros(): def test_sobel_h_mask(): """Horizontal Sobel on a masked array should be zero.""" - result = filters.sobel_h(cp.random.uniform(size=(10, 10)), - cp.zeros((10, 10), dtype=bool)) + result = filters.sobel_h( + cp.random.uniform(size=(10, 10)), cp.zeros((10, 10), dtype=bool) + ) assert cp.all(result == 0) @@ -136,8 +142,9 @@ def test_sobel_v_zeros(): def test_sobel_v_mask(): """Vertical Sobel on a masked array should be zero.""" - result = filters.sobel_v(cp.random.uniform(size=(10, 10)), - cp.zeros((10, 10), dtype=bool)) + result = filters.sobel_v( + cp.random.uniform(size=(10, 10)), cp.zeros((10, 10), dtype=bool) + ) assert_allclose(result, 0) @@ -165,11 +172,13 @@ def test_scharr_zeros(): assert cp.all(result < 1e-16) -@pytest.mark.parametrize('dtype', [cp.float16, cp.float32, cp.float64]) +@pytest.mark.parametrize("dtype", [cp.float16, cp.float32, cp.float64]) def test_scharr_mask(dtype): """Scharr on a masked array should be zero.""" - result = filters.scharr(cp.random.uniform(size=(10, 10)).astype(dtype), - cp.zeros((10, 10), dtype=bool)) + result = filters.scharr( + cp.random.uniform(size=(10, 10)).astype(dtype), + cp.zeros((10, 10), dtype=bool), + ) assert result.dtype == _supported_float_type(dtype) assert_allclose(result, 0) @@ -195,15 +204,15 @@ def test_scharr_vertical(): def test_scharr_h_zeros(): """Horizontal Scharr on an array of all zeros.""" - result = filters.scharr_h(cp.zeros((10, 10)), - cp.ones((10, 10), dtype=bool)) + result = filters.scharr_h(cp.zeros((10, 10)), cp.ones((10, 10), dtype=bool)) assert_allclose(result, 0) def test_scharr_h_mask(): """Horizontal Scharr on a masked array should be zero.""" - result = filters.scharr_h(cp.random.uniform(size=(10, 10)), - cp.zeros((10, 10), dtype=bool)) + result = filters.scharr_h( + cp.random.uniform(size=(10, 10)), cp.zeros((10, 10), dtype=bool) + ) assert_allclose(result, 0) @@ -227,15 +236,15 @@ def test_scharr_h_vertical(): def test_scharr_v_zeros(): """Vertical Scharr on an array of all zeros.""" - result = filters.scharr_v(cp.zeros((10, 10)), - cp.ones((10, 10), dtype=bool)) + result = filters.scharr_v(cp.zeros((10, 10)), cp.ones((10, 10), dtype=bool)) assert_allclose(result, 0) def test_scharr_v_mask(): """Vertical Scharr on a masked array should be zero.""" - result = filters.scharr_v(cp.random.uniform(size=(10, 10)), - cp.zeros((10, 10), dtype=bool)) + result = filters.scharr_v( + cp.random.uniform(size=(10, 10)), cp.zeros((10, 10), dtype=bool) + ) assert_allclose(result, 0) @@ -259,16 +268,17 @@ def test_scharr_v_horizontal(): def test_prewitt_zeros(): """Prewitt on an array of all zeros.""" - result = filters.prewitt(cp.zeros((10, 10)), - cp.ones((10, 10), dtype=bool)) + result = filters.prewitt(cp.zeros((10, 10)), cp.ones((10, 10), dtype=bool)) assert_allclose(result, 0) -@pytest.mark.parametrize('dtype', [cp.float16, cp.float32, cp.float64]) +@pytest.mark.parametrize("dtype", [cp.float16, cp.float32, cp.float64]) def test_prewitt_mask(dtype): """Prewitt on a masked array should be zero.""" - result = filters.prewitt(cp.random.uniform(size=(10, 10)).astype(dtype), - cp.zeros((10, 10), dtype=bool)) + result = filters.prewitt( + cp.random.uniform(size=(10, 10)).astype(dtype), + cp.zeros((10, 10), dtype=bool), + ) assert result.dtype == _supported_float_type(dtype) assert_allclose(cp.abs(result), 0) @@ -294,15 +304,17 @@ def test_prewitt_vertical(): def test_prewitt_h_zeros(): """Horizontal prewitt on an array of all zeros.""" - result = filters.prewitt_h(cp.zeros((10, 10)), - cp.ones((10, 10), dtype=bool)) + result = filters.prewitt_h( + cp.zeros((10, 10)), cp.ones((10, 10), dtype=bool) + ) assert_allclose(result, 0) def test_prewitt_h_mask(): """Horizontal prewitt on a masked array should be zero.""" - result = filters.prewitt_h(cp.random.uniform(size=(10, 10)), - cp.zeros((10, 10), dtype=bool)) + result = filters.prewitt_h( + cp.random.uniform(size=(10, 10)), cp.zeros((10, 10), dtype=bool) + ) assert_allclose(result, 0) @@ -326,15 +338,17 @@ def test_prewitt_h_vertical(): def test_prewitt_v_zeros(): """Vertical prewitt on an array of all zeros.""" - result = filters.prewitt_v(cp.zeros((10, 10)), - cp.ones((10, 10), dtype=bool)) + result = filters.prewitt_v( + cp.zeros((10, 10)), cp.ones((10, 10), dtype=bool) + ) assert_allclose(result, 0) def test_prewitt_v_mask(): """Vertical prewitt on a masked array should be zero.""" - result = filters.prewitt_v(cp.random.uniform(size=(10, 10)), - cp.zeros((10, 10), dtype=bool)) + result = filters.prewitt_v( + cp.random.uniform(size=(10, 10)), cp.zeros((10, 10), dtype=bool) + ) assert_allclose(result, 0) @@ -376,7 +390,7 @@ def test_laplace_zeros(): assert_allclose(result, check_result) -@pytest.mark.parametrize('dtype', [cp.float16, cp.float32, cp.float64]) +@pytest.mark.parametrize("dtype", [cp.float16, cp.float32, cp.float64]) def test_laplace_mask(dtype): """Laplace on a masked array should be zero.""" # Create a synthetic 2D image @@ -390,18 +404,21 @@ def test_laplace_mask(dtype): def test_farid_zeros(): """Farid on an array of all zeros.""" - result = filters.farid(cp.zeros((10, 10)), - mask=cp.ones((10, 10), dtype=bool)) + result = filters.farid( + cp.zeros((10, 10)), mask=cp.ones((10, 10), dtype=bool) + ) assert cp.all(result == 0) -@pytest.mark.parametrize('dtype', [cp.float16, cp.float32, cp.float64]) +@pytest.mark.parametrize("dtype", [cp.float16, cp.float32, cp.float64]) def test_farid_mask(dtype): """Farid on a masked array should be zero.""" - result = filters.farid(cp.random.uniform(size=(10, 10)).astype(dtype), - mask=cp.zeros((10, 10), dtype=bool)) + result = filters.farid( + cp.random.uniform(size=(10, 10)).astype(dtype), + mask=cp.zeros((10, 10), dtype=bool), + ) assert result.dtype == _supported_float_type(dtype) - assert (cp.all(result == 0)) + assert cp.all(result == 0) def test_farid_horizontal(): @@ -425,15 +442,17 @@ def test_farid_vertical(): def test_farid_h_zeros(): """Horizontal Farid on an array of all zeros.""" - result = filters.farid_h(cp.zeros((10, 10)), - mask=cp.ones((10, 10), dtype=bool)) - assert (cp.all(result == 0)) + result = filters.farid_h( + cp.zeros((10, 10)), mask=cp.ones((10, 10), dtype=bool) + ) + assert cp.all(result == 0) def test_farid_h_mask(): """Horizontal Farid on a masked array should be zero.""" - result = filters.farid_h(cp.random.uniform(size=(10, 10)), - mask=cp.zeros((10, 10), dtype=bool)) + result = filters.farid_h( + cp.random.uniform(size=(10, 10)), mask=cp.zeros((10, 10), dtype=bool) + ) assert cp.all(result == 0) @@ -457,15 +476,17 @@ def test_farid_h_vertical(): def test_farid_v_zeros(): """Vertical Farid on an array of all zeros.""" - result = filters.farid_v(cp.zeros((10, 10)), - mask=cp.ones((10, 10), dtype=bool)) + result = filters.farid_v( + cp.zeros((10, 10)), mask=cp.ones((10, 10), dtype=bool) + ) assert_allclose(result, 0, atol=1e-10) def test_farid_v_mask(): """Vertical Farid on a masked array should be zero.""" - result = filters.farid_v(cp.random.uniform(size=(10, 10)), - mask=cp.zeros((10, 10), dtype=bool)) + result = filters.farid_v( + cp.random.uniform(size=(10, 10)), mask=cp.zeros((10, 10), dtype=bool) + ) assert_allclose(result, 0) @@ -610,11 +631,13 @@ def test_vertical_mask_line(grad_func): @pytest.mark.parametrize( - ('func', 'max_edge'), - [(filters.prewitt, MAX_SOBEL_ND), - (filters.sobel, MAX_SOBEL_ND), - (filters.scharr, MAX_SCHARR_ND), - (filters.farid, MAX_FARID_ND)] + ("func", "max_edge"), + [ + (filters.prewitt, MAX_SOBEL_ND), + (filters.sobel, MAX_SOBEL_ND), + (filters.scharr, MAX_SCHARR_ND), + (filters.farid, MAX_FARID_ND), + ], ) def test_3d_edge_filters(func, max_edge): blobs = binary_blobs(length=128, n_dim=3, seed=5) @@ -625,17 +648,19 @@ def test_3d_edge_filters(func, max_edge): rtol = 1e-3 else: rtol = 1e-7 - assert_allclose(cp.max(edges), - func(max_edge)[center, center, center], - rtol=rtol) + assert_allclose( + cp.max(edges), func(max_edge)[center, center, center], rtol=rtol + ) @pytest.mark.parametrize( - ('func', 'max_edge'), - [(filters.prewitt, MAX_SOBEL_0), - (filters.sobel, MAX_SOBEL_0), - (filters.scharr, MAX_SOBEL_0), - (filters.farid, MAX_FARID_0)] + ("func", "max_edge"), + [ + (filters.prewitt, MAX_SOBEL_0), + (filters.sobel, MAX_SOBEL_0), + (filters.scharr, MAX_SOBEL_0), + (filters.farid, MAX_FARID_0), + ], ) def test_3d_edge_filters_single_axis(func, max_edge): blobs = binary_blobs(length=128, n_dim=3, seed=5) @@ -646,23 +671,30 @@ def test_3d_edge_filters_single_axis(func, max_edge): rtol = 1e-3 else: rtol = 1e-7 - assert_allclose(cp.max(edges0), - func(max_edge, axis=0)[center, center, center], - rtol=rtol) + assert_allclose( + cp.max(edges0), + func(max_edge, axis=0)[center, center, center], + rtol=rtol, + ) @pytest.mark.parametrize( - 'detector', - [filters.sobel, filters.scharr, filters.prewitt, - filters.roberts, filters.farid] + "detector", + [ + filters.sobel, + filters.scharr, + filters.prewitt, + filters.roberts, + filters.farid, + ], ) def test_range(detector): """Output of edge detection should be in [0, 1]""" image = cp.random.random((100, 100)) out = detector(image) assert_( - out.min() >= 0, f'Minimum of `{detector.__name__}` is smaller than 0.' + out.min() >= 0, f"Minimum of `{detector.__name__}` is smaller than 0." ) assert_( - out.max() <= 1, f'Maximum of `{detector.__name__}` is larger than 1.' + out.max() <= 1, f"Maximum of `{detector.__name__}` is larger than 1." ) diff --git a/python/cucim/src/cucim/skimage/filters/tests/test_fft_based.py b/python/cucim/src/cucim/skimage/filters/tests/test_fft_based.py index 47070fefd..402397310 100644 --- a/python/cucim/src/cucim/skimage/filters/tests/test_fft_based.py +++ b/python/cucim/src/cucim/skimage/filters/tests/test_fft_based.py @@ -15,9 +15,10 @@ def _fft_centered(x): return fftmodule.fftshift(fftmodule.fftn(fftmodule.fftshift(x))) -@pytest.mark.parametrize('dtype', [cp.float16, cp.float32, cp.float64, - cp.uint8, cp.int32]) -@pytest.mark.parametrize('squared_butterworth', [False, True]) +@pytest.mark.parametrize( + "dtype", [cp.float16, cp.float32, cp.float64, cp.uint8, cp.int32] +) +@pytest.mark.parametrize("squared_butterworth", [False, True]) def test_butterworth_2D_zeros_dtypes(dtype, squared_butterworth): im = cp.zeros((4, 4), dtype=dtype) filtered = butterworth(im, squared_butterworth=squared_butterworth) @@ -26,19 +27,21 @@ def test_butterworth_2D_zeros_dtypes(dtype, squared_butterworth): assert_array_equal(im, filtered) -@pytest.mark.parametrize('squared_butterworth', [False, True]) -@pytest.mark.parametrize('high_pass', [False, True]) +@pytest.mark.parametrize("squared_butterworth", [False, True]) +@pytest.mark.parametrize("high_pass", [False, True]) # order chosen large enough that lowpass stopband always approaches 0 -@pytest.mark.parametrize('order', [6, 10]) -@pytest.mark.parametrize('cutoff', [0.2, 0.3]) +@pytest.mark.parametrize("order", [6, 10]) +@pytest.mark.parametrize("cutoff", [0.2, 0.3]) def test_butterworth_cutoff(cutoff, order, high_pass, squared_butterworth): - wfilt = _get_nd_butterworth_filter( - shape=(512, 512), factor=cutoff, order=order, - high_pass=high_pass, real=False, + shape=(512, 512), + factor=cutoff, + order=order, + high_pass=high_pass, + real=False, squared_butterworth=squared_butterworth, ) - # select DC frequence on first axis to get profile along a single axis + # select DC frequency on first axis to get profile along a single axis wfilt_profile = cp.abs(wfilt[0]) wfilt_profile = cp.asnumpy(wfilt_profile) @@ -62,14 +65,14 @@ def test_butterworth_cutoff(cutoff, order, high_pass, squared_butterworth): assert abs(wfilt_profile[f_cutoff] - 1 / math.sqrt(2)) < tol -@pytest.mark.parametrize('cutoff', [-0.01, 0.51]) +@pytest.mark.parametrize("cutoff", [-0.01, 0.51]) def test_butterworth_invalid_cutoff(cutoff): with pytest.raises(ValueError): butterworth(cp.ones((4, 4)), cutoff_frequency_ratio=cutoff) @pytest.mark.parametrize("high_pass", [True, False]) -@pytest.mark.parametrize('squared_butterworth', [False, True]) +@pytest.mark.parametrize("squared_butterworth", [False, True]) def test_butterworth_2D(high_pass, squared_butterworth): # rough check of high-pass vs. low-pass behavior via relative energy @@ -110,11 +113,11 @@ def test_butterworth_2D(high_pass, squared_butterworth): @pytest.mark.parametrize("high_pass", [True, False]) -@pytest.mark.parametrize('dtype', [cp.float32, cp.float64]) -@pytest.mark.parametrize('squared_butterworth', [False, True]) +@pytest.mark.parametrize("dtype", [cp.float32, cp.float64]) +@pytest.mark.parametrize("squared_butterworth", [False, True]) def test_butterworth_2D_realfft(high_pass, dtype, squared_butterworth): """Filtering a real-valued array is equivalent to filtering a - complex-valued array where the imaginary part is zero. + complex-valued array where the imaginary part is zero. """ im = cp.random.randn(32, 64).astype(dtype) kwargs = dict( @@ -164,8 +167,8 @@ def test_butterworth_4D_channel(chan, dtype): def test_butterworth_correctness_bw(): small = cp.array(coins()[180:190, 260:270], dtype=float) - filtered = butterworth(small, - cutoff_frequency_ratio=0.2) + filtered = butterworth(small, cutoff_frequency_ratio=0.2) + # fmt: off correct = cp.array( [ [ 28.63019362, -17.69023786, 26.95346957, 20.57423019, -15.1933463 , -28.05828136, -35.25135674, -25.70376951, -43.37121955, -16.87688457], # noqa @@ -180,15 +183,16 @@ def test_butterworth_correctness_bw(): [-50.53430811, 12.14152989, 17.69341877, 9.1858496 , 12.1470914 , 1.45865179, 61.08961357, 29.76775029, -11.04603619, 24.18621404], # noqa ] ) + # fmt: on assert_allclose(filtered, correct) def test_butterworth_correctness_rgb(): small = cp.array(astronaut()[135:145, 205:215], dtype=float) - filtered = butterworth(small, - cutoff_frequency_ratio=0.3, - high_pass=True, - channel_axis=-1) + filtered = butterworth( + small, cutoff_frequency_ratio=0.3, high_pass=True, channel_axis=-1 + ) + # fmt: off correct = cp.array([ [ [-5.30292781e-01, 2.17985072e+00, 2.86622486e+00], # noqa @@ -317,4 +321,5 @@ def test_butterworth_correctness_rgb(): [ 6.17010624e+00, 1.56199152e+01, 1.79889524e+01], # noqa ], ]) + # fmt: on assert_allclose(filtered, correct) diff --git a/python/cucim/src/cucim/skimage/filters/tests/test_gabor.py b/python/cucim/src/cucim/skimage/filters/tests/test_gabor.py index f1e048494..7f3a8c4f2 100644 --- a/python/cucim/src/cucim/skimage/filters/tests/test_gabor.py +++ b/python/cucim/src/cucim/skimage/filters/tests/test_gabor.py @@ -33,13 +33,13 @@ def test_gabor_kernel_bandwidth(): assert kernel.shape == (9, 9) -@pytest.mark.parametrize('dtype', [cp.complex64, cp.complex128]) +@pytest.mark.parametrize("dtype", [cp.complex64, cp.complex128]) def test_gabor_kernel_dtype(dtype): kernel = gabor_kernel(1, bandwidth=1, dtype=dtype) assert kernel.dtype == dtype -@pytest.mark.parametrize('dtype', [cp.uint8, cp.float32]) +@pytest.mark.parametrize("dtype", [cp.uint8, cp.float32]) def test_gabor_kernel_invalid_dtype(dtype): with pytest.raises(ValueError): kernel = gabor_kernel(1, bandwidth=1, dtype=dtype) @@ -55,8 +55,9 @@ def test_gabor_kernel_sum(): for sigma_x in range(1, 10, 2): for sigma_y in range(1, 10, 2): for frequency in range(0, 10, 2): - kernel = gabor_kernel(frequency + 0.1, theta=0, - sigma_x=sigma_x, sigma_y=sigma_y) + kernel = gabor_kernel( + frequency + 0.1, theta=0, sigma_x=sigma_x, sigma_y=sigma_y + ) # make sure gaussian distribution is covered nearly 100% assert_almost_equal(float(cp.abs(kernel).sum()), 1, 2) @@ -66,16 +67,25 @@ def test_gabor_kernel_theta(): for sigma_y in range(1, 10, 2): for frequency in range(0, 10, 2): for theta in range(0, 10, 2): - kernel0 = gabor_kernel(frequency + 0.1, theta=theta, - sigma_x=sigma_x, sigma_y=sigma_y) - kernel180 = gabor_kernel(frequency, theta=theta + np.pi, - sigma_x=sigma_x, sigma_y=sigma_y) - - assert_array_almost_equal(cp.abs(kernel0), - cp.abs(kernel180)) - - -@pytest.mark.parametrize('dtype', [cp.float32, cp.float64]) + kernel0 = gabor_kernel( + frequency + 0.1, + theta=theta, + sigma_x=sigma_x, + sigma_y=sigma_y, + ) + kernel180 = gabor_kernel( + frequency, + theta=theta + np.pi, + sigma_x=sigma_x, + sigma_y=sigma_y, + ) + + assert_array_almost_equal( + cp.abs(kernel0), cp.abs(kernel180) + ) + + +@pytest.mark.parametrize("dtype", [cp.float32, cp.float64]) def test_gabor(dtype): Y, X = cp.mgrid[:40, :40] frequencies = (0.1, 0.3) @@ -98,14 +108,14 @@ def match_score(image, frequency): assert responses[1, 1] > responses[1, 0] -@pytest.mark.parametrize('dtype', [cp.float16, cp.float32, cp.float64]) +@pytest.mark.parametrize("dtype", [cp.float16, cp.float32, cp.float64]) def test_gabor_float_dtype(dtype): image = cp.ones((16, 16), dtype=dtype) y = gabor(image, 0.3) assert all(arr.dtype == _supported_float_type(image.dtype) for arr in y) -@pytest.mark.parametrize('dtype', [cp.uint8, cp.int32, cp.intp]) +@pytest.mark.parametrize("dtype", [cp.uint8, cp.int32, cp.intp]) def test_gabor_int_dtype(dtype): image = cp.full((16, 16), 128, dtype=dtype) y = gabor(image, 0.3) diff --git a/python/cucim/src/cucim/skimage/filters/tests/test_gaussian.py b/python/cucim/src/cucim/skimage/filters/tests/test_gaussian.py index 15cc7d1c3..73ffe943d 100644 --- a/python/cucim/src/cucim/skimage/filters/tests/test_gaussian.py +++ b/python/cucim/src/cucim/skimage/filters/tests/test_gaussian.py @@ -37,38 +37,51 @@ def test_energy_decrease(): assert gaussian_a.std() < a.std() -@pytest.mark.parametrize('channel_axis', [0, 1, -1]) +@pytest.mark.parametrize("channel_axis", [0, 1, -1]) def test_multichannel(channel_axis): a = np.zeros((5, 5, 3)) a[1, 1] = np.arange(1, 4) a = np.moveaxis(a, -1, channel_axis) a = cp.asarray(a) - gaussian_rgb_a = gaussian(a, sigma=1, mode='reflect', preserve_range=True, - channel_axis=channel_axis) + gaussian_rgb_a = gaussian( + a, + sigma=1, + mode="reflect", + preserve_range=True, + channel_axis=channel_axis, + ) # Check that the mean value is conserved in each channel # (color channels are not mixed together) spatial_axes = tuple( [ax for ax in range(a.ndim) if ax != channel_axis % a.ndim] ) - assert cp.allclose(a.mean(axis=spatial_axes), - gaussian_rgb_a.mean(axis=spatial_axes)) + assert cp.allclose( + a.mean(axis=spatial_axes), gaussian_rgb_a.mean(axis=spatial_axes) + ) if channel_axis % a.ndim == 2: # Test legacy behavior equivalent to old (channel_axis = -1) - with expected_warnings(['Automatic detection of the color channel']): - gaussian_rgb_a = gaussian(a, sigma=1, mode='reflect', - preserve_range=True) + with expected_warnings(["Automatic detection of the color channel"]): + gaussian_rgb_a = gaussian( + a, sigma=1, mode="reflect", preserve_range=True + ) # Check that the mean value is conserved in each channel # (color channels are not mixed together) - assert cp.allclose(a.mean(axis=spatial_axes), - gaussian_rgb_a.mean(axis=spatial_axes)) + assert cp.allclose( + a.mean(axis=spatial_axes), gaussian_rgb_a.mean(axis=spatial_axes) + ) # Iterable sigma - gaussian_rgb_a = gaussian(a, sigma=[1, 2], mode='reflect', - channel_axis=channel_axis, - preserve_range=True) - assert cp.allclose(a.mean(axis=spatial_axes), - gaussian_rgb_a.mean(axis=spatial_axes)) + gaussian_rgb_a = gaussian( + a, + sigma=[1, 2], + mode="reflect", + channel_axis=channel_axis, + preserve_range=True, + ) + assert cp.allclose( + a.mean(axis=spatial_axes), gaussian_rgb_a.mean(axis=spatial_axes) + ) def test_preserve_range(): @@ -104,14 +117,13 @@ def test_4d_ok(): assert cp.allclose(res.sum(), 1) -@pytest.mark.parametrize( - "dtype", [cp.float32, cp.float64] -) +@pytest.mark.parametrize("dtype", [cp.float32, cp.float64]) def test_preserve_output(dtype): image = cp.arange(9, dtype=dtype).reshape((3, 3)) output = cp.zeros_like(image, dtype=dtype) - gaussian_image = gaussian(image, sigma=1, output=output, - preserve_range=True) + gaussian_image = gaussian( + image, sigma=1, output=output, preserve_range=True + ) assert gaussian_image is output @@ -119,8 +131,7 @@ def test_output_error(): image = cp.arange(9, dtype=cp.float32).reshape((3, 3)) output = cp.zeros_like(image, dtype=cp.uint8) with pytest.raises(ValueError): - gaussian(image, sigma=1, output=output, - preserve_range=True) + gaussian(image, sigma=1, output=output, preserve_range=True) @pytest.mark.parametrize("s", [1, (2, 3)]) @@ -169,9 +180,9 @@ def test_dog_invalid_sigma2(): @pytest.mark.parametrize( - 'dtype', [cp.uint8, cp.float16, cp.float32, cp.float64] + "dtype", [cp.uint8, cp.float16, cp.float32, cp.float64] ) -@pytest.mark.parametrize('sigma', range(1, 40, 5)) +@pytest.mark.parametrize("sigma", range(1, 40, 5)) def test_shared_mem_check_fix(dtype, sigma): # Verify fix for gh-408 (no compilation errors occur). # Prior to the fix in gh-409, some float64 cases failed. @@ -188,7 +199,7 @@ def test_deprecated_automatic_channel_detection(): # Warning is raised if channel_axis is not set and shape is (M, N, 3) with pytest.warns( FutureWarning, - match="Automatic detection .* was deprecated .* Set `channel_axis=-1`" + match="Automatic detection .* was deprecated .* Set `channel_axis=-1`", ): filtered_rgb = gaussian(rgb, sigma=1, mode="reflect") # Check that the mean value is conserved in each channel @@ -207,4 +218,5 @@ def test_deprecated_automatic_channel_detection(): # Check how the proxy value shows up in the rendered function signature from cucim.skimage._shared.filters import ChannelAxisNotSet + assert repr(ChannelAxisNotSet) == "" diff --git a/python/cucim/src/cucim/skimage/filters/tests/test_lpi_filter.py b/python/cucim/src/cucim/skimage/filters/tests/test_lpi_filter.py index 846314e44..f9a7e7d4d 100644 --- a/python/cucim/src/cucim/skimage/filters/tests/test_lpi_filter.py +++ b/python/cucim/src/cucim/skimage/filters/tests/test_lpi_filter.py @@ -16,14 +16,14 @@ def setup_method(self): self.f = LPIFilter2D(self.filt_func) @pytest.mark.parametrize( - 'c_slice', [slice(None), slice(0, -5), slice(0, -20)] + "c_slice", [slice(None), slice(0, -5), slice(0, -20)] ) def test_ip_shape(self, c_slice): x = self.img[:, c_slice] assert self.f(x).shape == x.shape @pytest.mark.parametrize( - 'dtype', [cp.uint8, cp.float16, cp.float32, cp.float64] + "dtype", [cp.uint8, cp.float16, cp.float32, cp.float64] ) def test_filter_inverse(self, dtype): img = self.img.astype(dtype, copy=False) @@ -47,7 +47,7 @@ def test_filter_inverse(self, dtype): assert (g - g1[::-1, ::-1]).sum() < 55 @pytest.mark.parametrize( - 'dtype', [cp.uint8, cp.float16, cp.float32, cp.float64] + "dtype", [cp.uint8, cp.float16, cp.float32, cp.float64] ) def test_wiener(self, dtype): img = self.img.astype(dtype, copy=False) diff --git a/python/cucim/src/cucim/skimage/filters/tests/test_median.py b/python/cucim/src/cucim/skimage/filters/tests/test_median.py index 324ee27c5..e104640c1 100644 --- a/python/cucim/src/cucim/skimage/filters/tests/test_median.py +++ b/python/cucim/src/cucim/skimage/filters/tests/test_median.py @@ -20,12 +20,16 @@ def prod(x): @pytest.fixture def image(): - return cp.array([[1, 2, 3, 2, 1], - [1, 1, 2, 2, 3], - [3, 2, 1, 2, 1], - [3, 2, 1, 1, 1], - [1, 2, 1, 2, 3]], - dtype=cp.uint8) + return cp.array( + [ + [1, 2, 3, 2, 1], + [1, 1, 2, 2, 3], + [3, 2, 1, 2, 1], + [3, 2, 1, 1, 1], + [1, 2, 1, 2, 3], + ], + dtype=cp.uint8, + ) @pytest.fixture @@ -33,16 +37,17 @@ def camera(): return cp.array(data.camera()) -# TODO: mode='rank' disabled until it has been implmented +# TODO: mode='rank' disabled until it has been implemented @pytest.mark.parametrize( "mode, cval, behavior, warning_type", - [('nearest', 0.0, 'ndimage', []), - # ('constant', 0.0, 'rank', (UserWarning,)), - # ('nearest', 0.0, 'rank', []), - ('nearest', 0.0, 'ndimage', [])] + [ + ("nearest", 0.0, "ndimage", []), + # ('constant', 0.0, 'rank', (UserWarning,)), + # ('nearest', 0.0, 'rank', []), + ("nearest", 0.0, "ndimage", []), + ], ) def test_median_warning(image, mode, cval, behavior, warning_type): - if warning_type: with pytest.warns(warning_type): median(image, mode=mode, behavior=behavior) @@ -57,21 +62,30 @@ def test_selem_kwarg_deprecation(image): # TODO: update if rank.median implemented @pytest.mark.parametrize( - 'behavior, func', [('ndimage', ndimage.median_filter)], + "behavior, func", + [("ndimage", ndimage.median_filter)], # ('rank', rank.median, {'footprint': cp.ones((3, 3), dtype=cp.uint8)})] ) @pytest.mark.parametrize( - 'mode', ['reflect', 'mirror', 'nearest', 'constant', 'wrap'] + "mode", ["reflect", "mirror", "nearest", "constant", "wrap"] ) # include even shapes and singleton shape that force non-histogram code path. # include some large shapes that always take the histogram-based code path. @pytest.mark.parametrize( - 'footprint_shape', [ - (3, 3), (5, 5), (9, 15), (2, 2), (1, 1), (2, 7), (23, 23), (15, 35), - ] + "footprint_shape", + [ + (3, 3), + (5, 5), + (9, 15), + (2, 2), + (1, 1), + (2, 7), + (23, 23), + (15, 35), + ], ) -@pytest.mark.parametrize('footprint_tuple', (False, True)) -@pytest.mark.parametrize('out', [None, cp.uint8, cp.float32, 'array']) +@pytest.mark.parametrize("footprint_tuple", (False, True)) +@pytest.mark.parametrize("out", [None, cp.uint8, cp.float32, "array"]) def test_median_behavior( camera, behavior, func, mode, footprint_shape, footprint_tuple, out ): @@ -81,7 +95,7 @@ def test_median_behavior( footprint = cp.ones(footprint_shape, dtype=bool) cam2 = camera[:, :177] # use anisotropic size assert cam2.dtype == cp.uint8 - if out == 'array': + if out == "array": out = cp.zeros_like(cam2) assert_allclose( median(cam2, footprint, mode=mode, behavior=behavior, out=out), @@ -90,21 +104,19 @@ def test_median_behavior( @pytest.mark.parametrize( - 'mode', ['reflect', 'mirror', 'nearest', 'constant', 'wrap'] + "mode", ["reflect", "mirror", "nearest", "constant", "wrap"] ) # use an anisotropic footprint large enough to trigger the histogram-based path -@pytest.mark.parametrize('footprint_shape', [(3, 3), (3, 5), (15, 23)]) -@pytest.mark.parametrize( - 'int_dtype', [cp.uint8, cp.int8, cp.uint16, cp.int16] -) -@pytest.mark.parametrize( - 'algorithm', ['auto', 'histogram', 'sorting'] -) -@pytest.mark.parametrize( - 'algorithm_kwargs', [{}, {'partitions': 32}] -) +@pytest.mark.parametrize("footprint_shape", [(3, 3), (3, 5), (15, 23)]) +@pytest.mark.parametrize("int_dtype", [cp.uint8, cp.int8, cp.uint16, cp.int16]) +@pytest.mark.parametrize("algorithm", ["auto", "histogram", "sorting"]) +@pytest.mark.parametrize("algorithm_kwargs", [{}, {"partitions": 32}]) def test_median_hist_dtypes( - mode, footprint_shape, int_dtype, algorithm, algorithm_kwargs, + mode, + footprint_shape, + int_dtype, + algorithm, + algorithm_kwargs, ): footprint = cp.ones(footprint_shape, dtype=bool) rng = cp.random.default_rng(123) @@ -128,26 +140,36 @@ def test_median_hist_dtypes( # 150 is the value used to auto-select between sorting vs. histogram small_kernel = prod(footprint_shape) < 150 if algorithm_kwargs and ( - algorithm == 'sorting' - or (algorithm == 'auto' and small_kernel) + algorithm == "sorting" or (algorithm == "auto" and small_kernel) ): msg = ["algorithm_kwargs={'partitions': 32} ignored"] else: msg = [] with expected_warnings(msg): - out = median(img, footprint, mode=mode, behavior='ndimage', - algorithm=algorithm, algorithm_kwargs=algorithm_kwargs) + out = median( + img, + footprint, + mode=mode, + behavior="ndimage", + algorithm=algorithm, + algorithm_kwargs=algorithm_kwargs, + ) expected = ndimage.median_filter(img, size=footprint.shape, mode=mode) assert_allclose(expected, out) # TODO: Determine source of isolated remote test failures when 16-bit range # is > 1024. Could not reproduce locally. -@pytest.mark.parametrize('mode', ['reflect', ]) +@pytest.mark.parametrize( + "mode", + [ + "reflect", + ], +) # use an anisotropic footprint large enough to trigger the histogram-based path -@pytest.mark.parametrize('footprint_shape', [(7, 11)]) +@pytest.mark.parametrize("footprint_shape", [(7, 11)]) @pytest.mark.parametrize( - 'int_dtype, irange', + "int_dtype, irange", [ (cp.uint16, (0, 256)), (cp.uint16, (0, 15)), @@ -156,16 +178,32 @@ def test_median_hist_dtypes( (cp.uint16, (0, 510)), (cp.uint16, (500, 550)), (cp.uint16, (0, 1024)), - pytest.param(cp.uint16, (0, 2048), marks=pytest.mark.skip(reason="isolated failure on CI only")), # noqa - pytest.param(cp.uint16, (1024, 3185), marks=pytest.mark.skip(reason="isolated failure on CI only")), # noqa + pytest.param( + cp.uint16, + (0, 2048), + marks=pytest.mark.skip(reason="isolated failure on CI only"), + ), # noqa + pytest.param( + cp.uint16, + (1024, 3185), + marks=pytest.mark.skip(reason="isolated failure on CI only"), + ), # noqa (cp.int16, (0, 256)), (cp.int16, (-15, 15)), (cp.int16, (128, 384)), (cp.int16, (-128, 384)), (cp.int16, (-400, 400)), - pytest.param(cp.int16, (-1024, 2048), marks=pytest.mark.skip(reason="isolated failure on CI only")), # noqa - pytest.param(cp.int16, (150, 2048), marks=pytest.mark.skip(reason="isolated failure on CI only")), # noqa - ] + pytest.param( + cp.int16, + (-1024, 2048), + marks=pytest.mark.skip(reason="isolated failure on CI only"), + ), # noqa + pytest.param( + cp.int16, + (150, 2048), + marks=pytest.mark.skip(reason="isolated failure on CI only"), + ), # noqa + ], ) def test_median_hist_16bit_offsets(mode, footprint_shape, int_dtype, irange): """Make sure 16-bit cases are robust to various value ranges""" @@ -179,18 +217,19 @@ def test_median_hist_16bit_offsets(mode, footprint_shape, int_dtype, irange): # chose a limited range of values to test 512 hist_size case img = rng.integers(irange[0], irange[1], shape, dtype=int) img = img.astype(cp.int16) - out = median(img, footprint, mode=mode, behavior='ndimage', - algorithm='histogram') + out = median( + img, footprint, mode=mode, behavior="ndimage", algorithm="histogram" + ) expected = ndimage.median_filter(img, size=footprint.shape, mode=mode) assert_allclose(expected, out) -@pytest.mark.parametrize('int_dtype', [cp.uint16, cp.int16]) +@pytest.mark.parametrize("int_dtype", [cp.uint16, cp.int16]) def test_median_hist_kernel_resource_limit_try_except(int_dtype): # use an anisotropic footprint large enough to trigger # the histogram-based path footprint = cp.ones((15, 23), dtype=bool) - mode = 'nearest' + mode = "nearest" rng = cp.random.default_rng(123) shape = (350, 407) # use anisotropic size if int_dtype == cp.uint16: @@ -205,17 +244,16 @@ def test_median_hist_kernel_resource_limit_try_except(int_dtype): @pytest.mark.parametrize( - 'algorithm', ['auto', 'histogram', 'sorting', 'invalid'] + "algorithm", ["auto", "histogram", "sorting", "invalid"] ) def test_median_algorithm_parameter(algorithm): - """Call all algorithms for float32 input. - """ + """Call all algorithms for float32 input.""" footprint = cp.ones((15, 23), dtype=bool) - mode = 'nearest' + mode = "nearest" rng = cp.random.default_rng(123) shape = (350, 407) # use anisotropic size img = rng.standard_normal(shape, dtype=cp.float32) - if algorithm in ['invalid', 'histogram']: + if algorithm in ["invalid", "histogram"]: # histogram supports only integer-valued dtypes # 'invalid' is an uncrecognized algorithm with pytest.raises(ValueError): @@ -226,11 +264,9 @@ def test_median_algorithm_parameter(algorithm): assert_allclose(expected, out) -@pytest.mark.parametrize( - "dtype", [cp.uint8, cp.uint16, cp.float32, cp.float64] -) +@pytest.mark.parametrize("dtype", [cp.uint8, cp.uint16, cp.float32, cp.float64]) def test_median_preserve_dtype(image, dtype): - median_image = median(image.astype(dtype), behavior='ndimage') + median_image = median(image.astype(dtype), behavior="ndimage") assert median_image.dtype == dtype @@ -245,8 +281,10 @@ def test_median_preserve_dtype(image, dtype): @pytest.mark.parametrize( "img, behavior", # (cp.random.randint(0, 10, size=(3, 3), dtype=cp.uint8), 'rank'), - [(cp.random.randint(0, 10, size=(3, 3), dtype=cp.uint8), 'ndimage'), - (cp.random.randint(0, 10, size=(3, 3, 3), dtype=cp.uint8), 'ndimage')] + [ + (cp.random.randint(0, 10, size=(3, 3), dtype=cp.uint8), "ndimage"), + (cp.random.randint(0, 10, size=(3, 3, 3), dtype=cp.uint8), "ndimage"), + ], ) def test_median(img, behavior): median(img, behavior=behavior) @@ -260,7 +298,7 @@ def test_median_nonsquare(): rng = cp.random.default_rng() img = rng.integers(0, 256, (128, 128), dtype=cp.uint8) footprint = morphology.disk(5) - mode = 'nearest' - out = median(img, footprint, mode=mode, behavior='ndimage') + mode = "nearest" + out = median(img, footprint, mode=mode, behavior="ndimage") expected = ndimage.median_filter(img, footprint=footprint, mode=mode) cp.testing.assert_array_equal(out, expected) diff --git a/python/cucim/src/cucim/skimage/filters/tests/test_ridges.py b/python/cucim/src/cucim/skimage/filters/tests/test_ridges.py index aff7cd518..6feb3a12f 100644 --- a/python/cucim/src/cucim/skimage/filters/tests/test_ridges.py +++ b/python/cucim/src/cucim/skimage/filters/tests/test_ridges.py @@ -12,7 +12,6 @@ def test_2d_null_matrix(): - a_black = cp.zeros((3, 3)).astype(cp.uint8) a_white = invert(a_black) @@ -22,21 +21,20 @@ def test_2d_null_matrix(): assert_array_equal(meijering(a_black, black_ridges=True), zeros) assert_array_equal(meijering(a_white, black_ridges=False), zeros) - assert_array_equal(sato(a_black, black_ridges=True, mode='reflect'), - zeros) - assert_array_equal(sato(a_white, black_ridges=False, mode='reflect'), - zeros) + assert_array_equal(sato(a_black, black_ridges=True, mode="reflect"), zeros) + assert_array_equal(sato(a_white, black_ridges=False, mode="reflect"), zeros) assert_allclose(frangi(a_black, black_ridges=True), zeros, atol=1e-3) assert_allclose(frangi(a_white, black_ridges=False), zeros, atol=1e-3) - assert_array_equal(hessian(a_black, black_ridges=False, mode='reflect'), - ones) - assert_array_equal(hessian(a_white, black_ridges=True, mode='reflect'), - ones) + assert_array_equal( + hessian(a_black, black_ridges=False, mode="reflect"), ones + ) + assert_array_equal( + hessian(a_white, black_ridges=True, mode="reflect"), ones + ) def test_3d_null_matrix(): - # Note: last axis intentionally not size 3 to avoid 2D+RGB autodetection # warning from an internal call to `skimage.filters.gaussian`. a_black = cp.zeros((3, 3, 5)).astype(cp.uint8) @@ -48,178 +46,238 @@ def test_3d_null_matrix(): assert_allclose(meijering(a_black, black_ridges=True), zeros, atol=1e-1) assert_allclose(meijering(a_white, black_ridges=False), zeros, atol=1e-1) - assert_array_equal(sato(a_black, black_ridges=True, mode='reflect'), - zeros) - assert_array_equal(sato(a_white, black_ridges=False, mode='reflect'), - zeros) + assert_array_equal(sato(a_black, black_ridges=True, mode="reflect"), zeros) + assert_array_equal(sato(a_white, black_ridges=False, mode="reflect"), zeros) assert_allclose(frangi(a_black, black_ridges=True), zeros, atol=1e-3) assert_allclose(frangi(a_white, black_ridges=False), zeros, atol=1e-3) - assert_array_equal(hessian(a_black, black_ridges=False, mode='reflect'), - ones) - assert_array_equal(hessian(a_white, black_ridges=True, mode='reflect'), - ones) + assert_array_equal( + hessian(a_black, black_ridges=False, mode="reflect"), ones + ) + assert_array_equal( + hessian(a_white, black_ridges=True, mode="reflect"), ones + ) def test_2d_energy_decrease(): - a_black = cp.zeros((5, 5)).astype(np.uint8) a_black[2, 2] = 255 a_white = invert(a_black) - assert_array_less(meijering(a_black, black_ridges=True).std(), - a_black.std()) - assert_array_less(meijering(a_white, black_ridges=False).std(), - a_white.std()) + assert_array_less( + meijering(a_black, black_ridges=True).std(), a_black.std() + ) + assert_array_less( + meijering(a_white, black_ridges=False).std(), a_white.std() + ) - assert_array_less(sato(a_black, black_ridges=True, mode='reflect').std(), - a_black.std()) - assert_array_less(sato(a_white, black_ridges=False, mode='reflect').std(), - a_white.std()) + assert_array_less( + sato(a_black, black_ridges=True, mode="reflect").std(), a_black.std() + ) + assert_array_less( + sato(a_white, black_ridges=False, mode="reflect").std(), a_white.std() + ) - assert_array_less(frangi(a_black, black_ridges=True).std(), - a_black.std()) - assert_array_less(frangi(a_white, black_ridges=False).std(), - a_white.std()) + assert_array_less(frangi(a_black, black_ridges=True).std(), a_black.std()) + assert_array_less(frangi(a_white, black_ridges=False).std(), a_white.std()) - assert_array_less(hessian(a_black, black_ridges=True, - mode='reflect').std(), a_black.std()) - assert_array_less(hessian(a_white, black_ridges=False, - mode='reflect').std(), a_white.std()) + assert_array_less( + hessian(a_black, black_ridges=True, mode="reflect").std(), a_black.std() + ) + assert_array_less( + hessian(a_white, black_ridges=False, mode="reflect").std(), + a_white.std(), + ) def test_3d_energy_decrease(): - a_black = cp.zeros((5, 5, 5)).astype(np.uint8) a_black[2, 2, 2] = 255 a_white = invert(a_black) - assert_array_less(meijering(a_black, black_ridges=True).std(), - a_black.std()) - assert_array_less(meijering(a_white, black_ridges=False).std(), - a_white.std()) + assert_array_less( + meijering(a_black, black_ridges=True).std(), a_black.std() + ) + assert_array_less( + meijering(a_white, black_ridges=False).std(), a_white.std() + ) - assert_array_less(sato(a_black, black_ridges=True, mode='reflect').std(), - a_black.std()) - assert_array_less(sato(a_white, black_ridges=False, mode='reflect').std(), - a_white.std()) + assert_array_less( + sato(a_black, black_ridges=True, mode="reflect").std(), a_black.std() + ) + assert_array_less( + sato(a_white, black_ridges=False, mode="reflect").std(), a_white.std() + ) - assert_array_less(frangi(a_black, black_ridges=True).std(), - a_black.std()) - assert_array_less(frangi(a_white, black_ridges=False).std(), - a_white.std()) + assert_array_less(frangi(a_black, black_ridges=True).std(), a_black.std()) + assert_array_less(frangi(a_white, black_ridges=False).std(), a_white.std()) - assert_array_less(hessian(a_black, black_ridges=True, - mode='reflect').std(), a_black.std()) - assert_array_less(hessian(a_white, black_ridges=False, - mode='reflect').std(), a_white.std()) + assert_array_less( + hessian(a_black, black_ridges=True, mode="reflect").std(), a_black.std() + ) + assert_array_less( + hessian(a_white, black_ridges=False, mode="reflect").std(), + a_white.std(), + ) def test_2d_linearity(): - a_black = cp.ones((3, 3)).astype(np.uint8) a_white = invert(a_black) - assert_allclose(meijering(1 * a_black, black_ridges=True), - meijering(10 * a_black, black_ridges=True), atol=1e-3) - assert_allclose(meijering(1 * a_white, black_ridges=False), - meijering(10 * a_white, black_ridges=False), atol=1e-3) - - assert_allclose(sato(1 * a_black, black_ridges=True, mode='reflect'), - sato(10 * a_black, black_ridges=True, mode='reflect'), - atol=1e-3) - assert_allclose(sato(1 * a_white, black_ridges=False, mode='reflect'), - sato(10 * a_white, black_ridges=False, mode='reflect'), - atol=1e-3) - - assert_allclose(frangi(1 * a_black, black_ridges=True), - frangi(10 * a_black, black_ridges=True), atol=1e-3) - assert_allclose(frangi(1 * a_white, black_ridges=False), - frangi(10 * a_white, black_ridges=False), atol=1e-3) - - assert_allclose(hessian(1 * a_black, black_ridges=True, mode='reflect'), - hessian(10 * a_black, black_ridges=True, mode='reflect'), - atol=1e-3) - assert_allclose(hessian(1 * a_white, black_ridges=False, mode='reflect'), - hessian(10 * a_white, black_ridges=False, mode='reflect'), - atol=1e-3) + assert_allclose( + meijering(1 * a_black, black_ridges=True), + meijering(10 * a_black, black_ridges=True), + atol=1e-3, + ) + assert_allclose( + meijering(1 * a_white, black_ridges=False), + meijering(10 * a_white, black_ridges=False), + atol=1e-3, + ) + + assert_allclose( + sato(1 * a_black, black_ridges=True, mode="reflect"), + sato(10 * a_black, black_ridges=True, mode="reflect"), + atol=1e-3, + ) + assert_allclose( + sato(1 * a_white, black_ridges=False, mode="reflect"), + sato(10 * a_white, black_ridges=False, mode="reflect"), + atol=1e-3, + ) + + assert_allclose( + frangi(1 * a_black, black_ridges=True), + frangi(10 * a_black, black_ridges=True), + atol=1e-3, + ) + assert_allclose( + frangi(1 * a_white, black_ridges=False), + frangi(10 * a_white, black_ridges=False), + atol=1e-3, + ) + + assert_allclose( + hessian(1 * a_black, black_ridges=True, mode="reflect"), + hessian(10 * a_black, black_ridges=True, mode="reflect"), + atol=1e-3, + ) + assert_allclose( + hessian(1 * a_white, black_ridges=False, mode="reflect"), + hessian(10 * a_white, black_ridges=False, mode="reflect"), + atol=1e-3, + ) def test_3d_linearity(): - # Note: last axis intentionally not size 3 to avoid 2D+RGB autodetection # warning from an internal call to `skimage.filters.gaussian`. a_black = cp.ones((3, 3, 5)).astype(np.uint8) a_white = invert(a_black) - assert_allclose(meijering(1 * a_black, black_ridges=True), - meijering(10 * a_black, black_ridges=True), atol=1e-3) - assert_allclose(meijering(1 * a_white, black_ridges=False), - meijering(10 * a_white, black_ridges=False), atol=1e-3) - - assert_allclose(sato(1 * a_black, black_ridges=True, mode='reflect'), - sato(10 * a_black, black_ridges=True, mode='reflect'), - atol=1e-3) - assert_allclose(sato(1 * a_white, black_ridges=False, mode='reflect'), - sato(10 * a_white, black_ridges=False, mode='reflect'), - atol=1e-3) - - assert_allclose(frangi(1 * a_black, black_ridges=True), - frangi(10 * a_black, black_ridges=True), atol=1e-3) - assert_allclose(frangi(1 * a_white, black_ridges=False), - frangi(10 * a_white, black_ridges=False), atol=1e-3) - - assert_allclose(hessian(1 * a_black, black_ridges=True, mode='reflect'), - hessian(10 * a_black, black_ridges=True, mode='reflect'), - atol=1e-3) - assert_allclose(hessian(1 * a_white, black_ridges=False, mode='reflect'), - hessian(10 * a_white, black_ridges=False, mode='reflect'), - atol=1e-3) - - -@pytest.mark.parametrize('dtype', ['float64', 'uint8']) + assert_allclose( + meijering(1 * a_black, black_ridges=True), + meijering(10 * a_black, black_ridges=True), + atol=1e-3, + ) + assert_allclose( + meijering(1 * a_white, black_ridges=False), + meijering(10 * a_white, black_ridges=False), + atol=1e-3, + ) + + assert_allclose( + sato(1 * a_black, black_ridges=True, mode="reflect"), + sato(10 * a_black, black_ridges=True, mode="reflect"), + atol=1e-3, + ) + assert_allclose( + sato(1 * a_white, black_ridges=False, mode="reflect"), + sato(10 * a_white, black_ridges=False, mode="reflect"), + atol=1e-3, + ) + + assert_allclose( + frangi(1 * a_black, black_ridges=True), + frangi(10 * a_black, black_ridges=True), + atol=1e-3, + ) + assert_allclose( + frangi(1 * a_white, black_ridges=False), + frangi(10 * a_white, black_ridges=False), + atol=1e-3, + ) + + assert_allclose( + hessian(1 * a_black, black_ridges=True, mode="reflect"), + hessian(10 * a_black, black_ridges=True, mode="reflect"), + atol=1e-3, + ) + assert_allclose( + hessian(1 * a_white, black_ridges=False, mode="reflect"), + hessian(10 * a_white, black_ridges=False, mode="reflect"), + atol=1e-3, + ) + + +@pytest.mark.parametrize("dtype", ["float64", "uint8"]) def test_2d_cropped_camera_image(dtype): a_black = crop(cp.array(camera()), ((200, 212), (100, 312))) assert a_black.dtype == cp.uint8 - if dtype == 'float64': + if dtype == "float64": a_black = img_as_float64(a_black) a_white = invert(a_black) ones = cp.ones((100, 100)) - tol = 1e-7 if dtype == 'float64' else 1e-5 - - assert_allclose(meijering(a_black, black_ridges=True), - meijering(a_white, black_ridges=False), atol=tol, rtol=tol) - - assert_allclose(sato(a_black, black_ridges=True, mode='reflect'), - sato(a_white, black_ridges=False, mode='reflect'), - atol=tol, rtol=tol) - - assert_allclose(frangi(a_black, black_ridges=True), - frangi(a_white, black_ridges=False), atol=tol, rtol=tol) - - assert_allclose(hessian(a_black, black_ridges=True, mode='reflect'), - ones, atol=1 - 1e-7) - assert_allclose(hessian(a_white, black_ridges=False, mode='reflect'), - ones, atol=1 - 1e-7) - - -@pytest.mark.parametrize('func', [meijering, sato, frangi, hessian]) -@pytest.mark.parametrize('dtype', [cp.float16, cp.float32, cp.float64]) + tol = 1e-7 if dtype == "float64" else 1e-5 + + assert_allclose( + meijering(a_black, black_ridges=True), + meijering(a_white, black_ridges=False), + atol=tol, + rtol=tol, + ) + + assert_allclose( + sato(a_black, black_ridges=True, mode="reflect"), + sato(a_white, black_ridges=False, mode="reflect"), + atol=tol, + rtol=tol, + ) + + assert_allclose( + frangi(a_black, black_ridges=True), + frangi(a_white, black_ridges=False), + atol=tol, + rtol=tol, + ) + + assert_allclose( + hessian(a_black, black_ridges=True, mode="reflect"), ones, atol=1 - 1e-7 + ) + assert_allclose( + hessian(a_white, black_ridges=False, mode="reflect"), + ones, + atol=1 - 1e-7, + ) + + +@pytest.mark.parametrize("func", [meijering, sato, frangi, hessian]) +@pytest.mark.parametrize("dtype", [cp.float16, cp.float32, cp.float64]) def test_ridge_output_dtype(func, dtype): img = img_as_float(cp.array(camera()).astype(dtype, copy=False)) assert func(img).dtype == _supported_float_type(img.dtype) -@pytest.mark.parametrize('dtype', ['float64', 'uint8']) -@pytest.mark.parametrize('uniform_stack', [False, True]) +@pytest.mark.parametrize("dtype", ["float64", "uint8"]) +@pytest.mark.parametrize("uniform_stack", [False, True]) def test_3d_cropped_camera_image(dtype, uniform_stack): - a_black = crop(cp.asarray(camera()), ((200, 212), (100, 312))) assert a_black.dtype == cp.uint8 - if dtype == 'float64': + if dtype == "float64": a_black = img_as_float64(a_black) if uniform_stack: # Hessian along last axis will be 0 due to identical image content @@ -227,46 +285,62 @@ def test_3d_cropped_camera_image(dtype, uniform_stack): else: # stack using shift to give a non-zero Hessian on the last axis a_black = cp.stack( - [cp.roll(a_black, shift=n, axis=0) for n in range(5)], - axis=-1 + [cp.roll(a_black, shift=n, axis=0) for n in range(5)], axis=-1 ) - tol = 1e-10 if dtype == 'float64' else 4e-3 + tol = 1e-10 if dtype == "float64" else 4e-3 a_white = invert(a_black) ones = cp.ones(a_black.shape) - assert_allclose(meijering(a_black, black_ridges=True), - meijering(a_white, black_ridges=False), atol=tol, rtol=tol) - - assert_allclose(sato(a_black, black_ridges=True, mode='reflect'), - sato(a_white, black_ridges=False, mode='reflect'), - atol=tol, rtol=tol) - - assert_allclose(frangi(a_black, black_ridges=True), - frangi(a_white, black_ridges=False), atol=tol, rtol=tol) - - assert_allclose(hessian(a_black, black_ridges=True, mode='reflect'), - ones, atol=1 - 1e-7) - assert_allclose(hessian(a_white, black_ridges=False, mode='reflect'), - ones, atol=1 - 1e-7) - - -@pytest.mark.parametrize('func, tol', [(frangi, 1e-2), - (meijering, 1e-2), - (sato, 2e-3), - (hessian, 1e-2)]) + assert_allclose( + meijering(a_black, black_ridges=True), + meijering(a_white, black_ridges=False), + atol=tol, + rtol=tol, + ) + + assert_allclose( + sato(a_black, black_ridges=True, mode="reflect"), + sato(a_white, black_ridges=False, mode="reflect"), + atol=tol, + rtol=tol, + ) + + assert_allclose( + frangi(a_black, black_ridges=True), + frangi(a_white, black_ridges=False), + atol=tol, + rtol=tol, + ) + + assert_allclose( + hessian(a_black, black_ridges=True, mode="reflect"), ones, atol=1 - 1e-7 + ) + assert_allclose( + hessian(a_white, black_ridges=False, mode="reflect"), + ones, + atol=1 - 1e-7, + ) + + +@pytest.mark.parametrize( + "func, tol", + [(frangi, 1e-2), (meijering, 1e-2), (sato, 2e-3), (hessian, 1e-2)], +) def test_border_management(func, tol): img = rgb2gray(cp.array(retina()[300:500, 700:900])) - out = func(img, sigmas=[1], mode='mirror') + out = func(img, sigmas=[1], mode="mirror") full_std = out.std() full_mean = out.mean() inside_std = out[4:-4, 4:-4].std() inside_mean = out[4:-4, 4:-4].mean() - border_std = cp.stack([out[:4, :], out[-4:, :], - out[:, :4].T, out[:, -4:].T]).std() - border_mean = cp.stack([out[:4, :], out[-4:, :], - out[:, :4].T, out[:, -4:].T]).mean() + border_std = cp.stack( + [out[:4, :], out[-4:, :], out[:, :4].T, out[:, -4:].T] + ).std() + border_mean = cp.stack( + [out[:4, :], out[-4:, :], out[:, :4].T, out[:, -4:].T] + ).mean() assert abs(full_std - inside_std) < tol assert abs(full_std - border_std) < tol diff --git a/python/cucim/src/cucim/skimage/filters/tests/test_separable_filtering.py b/python/cucim/src/cucim/skimage/filters/tests/test_separable_filtering.py index 0f2c5897f..026b8c84b 100644 --- a/python/cucim/src/cucim/skimage/filters/tests/test_separable_filtering.py +++ b/python/cucim/src/cucim/skimage/filters/tests/test_separable_filtering.py @@ -1,22 +1,29 @@ import cupy as cp import pytest -from cucim.skimage._vendored.ndimage import (convolve1d, correlate1d, - gaussian_filter, gaussian_filter1d, - gaussian_gradient_magnitude, - gaussian_laplace, laplace, prewitt, - sobel, uniform_filter, - uniform_filter1d) +from cucim.skimage._vendored.ndimage import ( + convolve1d, + correlate1d, + gaussian_filter, + gaussian_filter1d, + gaussian_gradient_magnitude, + gaussian_laplace, + laplace, + prewitt, + sobel, + uniform_filter, + uniform_filter1d, +) def _get_image(shape, dtype, seed=123): rng = cp.random.default_rng(seed) dtype = cp.dtype(dtype) - if dtype.kind == 'b': + if dtype.kind == "b": image = rng.integers(0, 1, shape, dtype=cp.uint8).astype(bool) - elif dtype.kind in 'iu': + elif dtype.kind in "iu": image = rng.integers(0, 128, shape, dtype=dtype) - elif dtype.kind in 'c': + elif dtype.kind in "c": real_dtype = cp.asarray([], dtype=dtype).real.dtype image = rng.standard_normal(shape, dtype=real_dtype) image = image + 1j * rng.standard_normal(shape, dtype=real_dtype) @@ -39,8 +46,16 @@ def _get_rtol_atol(dtype): def _compare_implementations( - shape, kernel_size, axis, dtype, mode, cval=0.0, origin=0, - output_dtype=None, kernel_dtype=None, output_preallocated=False, + shape, + kernel_size, + axis, + dtype, + mode, + cval=0.0, + origin=0, + output_dtype=None, + kernel_dtype=None, + output_preallocated=False, function=convolve1d, ): dtype = cp.dtype(dtype) @@ -58,27 +73,33 @@ def _compare_implementations( output1 = cp.empty(image.shape, dtype=output_dtype) output2 = cp.empty(image.shape, dtype=output_dtype) function( - image, kernel, output=output1, algorithm='elementwise', **kwargs + image, kernel, output=output1, algorithm="elementwise", **kwargs ) function( - image, kernel, output=output2, algorithm='shared_memory', **kwargs + image, kernel, output=output2, algorithm="shared_memory", **kwargs ) cp.testing.assert_allclose(output1, output2, rtol=rtol, atol=atol) return output1 = function( - image, kernel, output=output_dtype, algorithm='elementwise', **kwargs + image, kernel, output=output_dtype, algorithm="elementwise", **kwargs ) output2 = function( - image, kernel, output=output_dtype, algorithm='shared_memory', **kwargs + image, kernel, output=output_dtype, algorithm="shared_memory", **kwargs ) cp.testing.assert_allclose(output1, output2, rtol=rtol, atol=atol) return def _compare_implementations_other( - shape, dtype, mode, cval=0.0, - output_dtype=None, kernel_dtype=None, output_preallocated=False, - function=convolve1d, func_kwargs={}, + shape, + dtype, + mode, + cval=0.0, + output_dtype=None, + kernel_dtype=None, + output_preallocated=False, + function=convolve1d, + func_kwargs={}, ): dtype = cp.dtype(dtype) image = _get_image(shape, dtype) @@ -93,33 +114,33 @@ def _compare_implementations_other( output_dtype = image.dtype output1 = cp.empty(image.shape, dtype=output_dtype) output2 = cp.empty(image.shape, dtype=output_dtype) - function(image, output=output1, algorithm='elementwise', **kwargs) - function(image, output=output2, algorithm='shared_memory', **kwargs) + function(image, output=output1, algorithm="elementwise", **kwargs) + function(image, output=output2, algorithm="shared_memory", **kwargs) cp.testing.assert_allclose(output1, output2, rtol=rtol, atol=atol) return output1 = function( - image, output=output_dtype, algorithm='elementwise', **kwargs + image, output=output_dtype, algorithm="elementwise", **kwargs ) output2 = function( - image, output=output_dtype, algorithm='shared_memory', **kwargs + image, output=output_dtype, algorithm="shared_memory", **kwargs ) cp.testing.assert_allclose(output1, output2, rtol=rtol, atol=atol) return -@pytest.mark.parametrize('shape', ((64, 57), (1000, 500))) -@pytest.mark.parametrize('axis', (0, 1)) -@pytest.mark.parametrize('origin', ('min', 0, 'max')) -@pytest.mark.parametrize('kernel_size', tuple(range(1, 17))) -@pytest.mark.parametrize('function', [convolve1d, correlate1d]) +@pytest.mark.parametrize("shape", ((64, 57), (1000, 500))) +@pytest.mark.parametrize("axis", (0, 1)) +@pytest.mark.parametrize("origin", ("min", 0, "max")) +@pytest.mark.parametrize("kernel_size", tuple(range(1, 17))) +@pytest.mark.parametrize("function", [convolve1d, correlate1d]) def test_separable_kernel_sizes_and_origins( shape, axis, origin, kernel_size, function ): if kernel_size == 1: origin = 0 - elif origin == 'min': + elif origin == "min": origin = -(kernel_size // 2) - elif origin == 'max': + elif origin == "max": origin = kernel_size // 2 if kernel_size % 2 == 0: origin -= 1 @@ -128,17 +149,16 @@ def test_separable_kernel_sizes_and_origins( kernel_size=kernel_size, axis=axis, dtype=cp.float32, - mode='nearest', + mode="nearest", origin=origin, function=function, ) -@pytest.mark.parametrize('shape', ((64, 57), (1000, 500))) -@pytest.mark.parametrize('axis', (0, 1)) +@pytest.mark.parametrize("shape", ((64, 57), (1000, 500))) +@pytest.mark.parametrize("axis", (0, 1)) @pytest.mark.parametrize( - 'kernel_size', - tuple(range(17, 129, 11)) + tuple(range(145, 275, 41)) + "kernel_size", tuple(range(17, 129, 11)) + tuple(range(145, 275, 41)) ) def test_separable_kernel_larger_sizes(shape, axis, kernel_size): _compare_implementations( @@ -146,13 +166,13 @@ def test_separable_kernel_larger_sizes(shape, axis, kernel_size): kernel_size=kernel_size, axis=axis, dtype=cp.float32, - mode='reflect', + mode="reflect", origin=0, ) -@pytest.mark.parametrize('shape', ((1000, 500),)) -@pytest.mark.parametrize('axis', (0, 1)) +@pytest.mark.parametrize("shape", ((1000, 500),)) +@pytest.mark.parametrize("axis", (0, 1)) def test_separable_elementwise_very_large_size_fallback(shape, axis): """Very large kernel to make it likely shared memory will be exceeded.""" _compare_implementations( @@ -160,20 +180,19 @@ def test_separable_elementwise_very_large_size_fallback(shape, axis): kernel_size=901, axis=axis, dtype=cp.float64, - mode='nearest', + mode="nearest", origin=0, ) -@pytest.mark.parametrize('shape', ((4000, 2000), (1, 1), (5, 500), (1500, 5))) -@pytest.mark.parametrize('axis', (-1, -2)) -@pytest.mark.parametrize('kernel_size', (1, 38, 129)) +@pytest.mark.parametrize("shape", ((4000, 2000), (1, 1), (5, 500), (1500, 5))) +@pytest.mark.parametrize("axis", (-1, -2)) +@pytest.mark.parametrize("kernel_size", (1, 38, 129)) @pytest.mark.parametrize( - 'mode', - ('nearest', 'reflect', 'wrap', 'mirror', 'constant', ('constant', 1)), + "mode", + ("nearest", "reflect", "wrap", "mirror", "constant", ("constant", 1)), ) def test_separable_image_shapes_and_modes(shape, axis, kernel_size, mode): - if isinstance(mode, tuple): mode, cval = mode else: @@ -191,16 +210,27 @@ def test_separable_image_shapes_and_modes(shape, axis, kernel_size, mode): image_dtypes_tested = ( - cp.float16, cp.float32, cp.float64, cp.complex64, cp.complex128, bool, - cp.int8, cp.uint8, cp.int16, cp.uint16, cp.int32, cp.uint32, cp.int64, + cp.float16, + cp.float32, + cp.float64, + cp.complex64, + cp.complex128, + bool, + cp.int8, + cp.uint8, + cp.int16, + cp.uint16, + cp.int32, + cp.uint32, + cp.int64, cp.uint64, ) -@pytest.mark.parametrize('axis', (0, 1)) -@pytest.mark.parametrize('image_dtype', image_dtypes_tested) +@pytest.mark.parametrize("axis", (0, 1)) +@pytest.mark.parametrize("image_dtype", image_dtypes_tested) @pytest.mark.parametrize( - 'kernel_dtype', (None, cp.float32, cp.uint8, cp.complex64) + "kernel_dtype", (None, cp.float32, cp.uint8, cp.complex64) ) def test_separable_image_and_kernel_dtypes(axis, image_dtype, kernel_dtype): """Test many kernel and image dtype combinations""" @@ -210,31 +240,31 @@ def test_separable_image_and_kernel_dtypes(axis, image_dtype, kernel_dtype): kernel_size=3, axis=axis, dtype=image_dtype, - mode='nearest', + mode="nearest", origin=0, kernel_dtype=kernel_dtype, ) -@pytest.mark.parametrize('axis', (0, 1)) -@pytest.mark.parametrize('image_dtype', image_dtypes_tested) +@pytest.mark.parametrize("axis", (0, 1)) +@pytest.mark.parametrize("image_dtype", image_dtypes_tested) @pytest.mark.parametrize( - 'output_dtype', (None, cp.float32, cp.int32, cp.complex64) + "output_dtype", (None, cp.float32, cp.int32, cp.complex64) ) -@pytest.mark.parametrize('output_preallocated', (False, True)) +@pytest.mark.parametrize("output_preallocated", (False, True)) def test_separable_input_and_output_dtypes( axis, image_dtype, output_dtype, output_preallocated ): """Test many kernel and image dtype combinations""" - if cp.dtype(image_dtype).kind == 'c' and output_dtype is not None: - if not cp.dtype(output_dtype).kind == 'c': - pytest.skip('cannot cast complex values to real') + if cp.dtype(image_dtype).kind == "c" and output_dtype is not None: + if not cp.dtype(output_dtype).kind == "c": + pytest.skip("cannot cast complex values to real") _compare_implementations( (64, 32), kernel_size=3, axis=axis, dtype=image_dtype, - mode='nearest', + mode="nearest", origin=0, kernel_dtype=None, output_dtype=output_dtype, @@ -242,11 +272,11 @@ def test_separable_input_and_output_dtypes( ) -@pytest.mark.parametrize('shape', ((64, 57),)) -@pytest.mark.parametrize('axis', (0, 1)) -@pytest.mark.parametrize('origin', ('min', 0, 'max')) +@pytest.mark.parametrize("shape", ((64, 57),)) +@pytest.mark.parametrize("axis", (0, 1)) +@pytest.mark.parametrize("origin", ("min", 0, "max")) @pytest.mark.parametrize( - 'function, func_kwargs', + "function, func_kwargs", [ (gaussian_filter, dict(sigma=1.5)), (gaussian_filter1d, dict(sigma=1.5, axis=0)), @@ -259,11 +289,9 @@ def test_separable_input_and_output_dtypes( (uniform_filter, dict(size=7)), (uniform_filter1d, dict(size=7, axis=0)), (uniform_filter1d, dict(size=7, axis=1)), - ] + ], ) -def test_separable_internal_kernel( - shape, axis, origin, function, func_kwargs -): +def test_separable_internal_kernel(shape, axis, origin, function, func_kwargs): """ Test case to make sure the 'algorithm' kwarg works for all other separable ndimage filters as well. @@ -271,51 +299,49 @@ def test_separable_internal_kernel( _compare_implementations_other( shape, dtype=cp.float32, - mode='nearest', + mode="nearest", function=function, func_kwargs=func_kwargs, ) -@pytest.mark.parametrize('shape', ((16, 24, 32), (192, 128, 160))) -@pytest.mark.parametrize('axis', (0, 1, 2)) -@pytest.mark.parametrize('kernel_size', tuple(range(1, 17, 3))) -@pytest.mark.parametrize('function', [convolve1d, correlate1d]) -def test_separable_kernel_sizes_3d( - shape, axis, kernel_size, function -): +@pytest.mark.parametrize("shape", ((16, 24, 32), (192, 128, 160))) +@pytest.mark.parametrize("axis", (0, 1, 2)) +@pytest.mark.parametrize("kernel_size", tuple(range(1, 17, 3))) +@pytest.mark.parametrize("function", [convolve1d, correlate1d]) +def test_separable_kernel_sizes_3d(shape, axis, kernel_size, function): _compare_implementations( shape, kernel_size=kernel_size, axis=axis, dtype=cp.float32, - mode='nearest', + mode="nearest", origin=0, function=function, ) -@pytest.mark.parametrize('axis', (0, 1, 2)) -@pytest.mark.parametrize('kernel_size', (65, 129, 198)) +@pytest.mark.parametrize("axis", (0, 1, 2)) +@pytest.mark.parametrize("kernel_size", (65, 129, 198)) def test_separable_large_kernel_3d(axis, kernel_size): _compare_implementations( shape=(256, 128, 96), kernel_size=kernel_size, axis=axis, dtype=cp.float32, - mode='reflect', + mode="reflect", origin=0, ) @pytest.mark.parametrize( - 'shape', ((64, 5, 64), (5, 64, 64), (64, 64, 5), (32, 32, 32)) + "shape", ((64, 5, 64), (5, 64, 64), (64, 64, 5), (32, 32, 32)) ) -@pytest.mark.parametrize('axis', (-1, -2, -3)) -@pytest.mark.parametrize('kernel_size', (9,)) +@pytest.mark.parametrize("axis", (-1, -2, -3)) +@pytest.mark.parametrize("kernel_size", (9,)) @pytest.mark.parametrize( - 'mode', - ('nearest', 'reflect', 'wrap', 'mirror', 'constant', ('constant', 1)), + "mode", + ("nearest", "reflect", "wrap", "mirror", "constant", ("constant", 1)), ) def test_separable_image_shapes_and_modes_3d(shape, axis, kernel_size, mode): if isinstance(mode, tuple): diff --git a/python/cucim/src/cucim/skimage/filters/tests/test_thresholding.py b/python/cucim/src/cucim/skimage/filters/tests/test_thresholding.py index 5e393a06a..f08fe60b4 100644 --- a/python/cucim/src/cucim/skimage/filters/tests/test_thresholding.py +++ b/python/cucim/src/cucim/skimage/filters/tests/test_thresholding.py @@ -1,12 +1,17 @@ import cupy as cp import numpy as np import pytest -from cupy.testing import (assert_allclose, assert_array_almost_equal, - assert_array_equal) +from cupy.testing import ( + assert_allclose, + assert_array_almost_equal, + assert_array_equal, +) from skimage import data from skimage.draw import disk -from skimage.filters._multiotsu import (_get_multiotsu_thresh_indices, - _get_multiotsu_thresh_indices_lut) +from skimage.filters._multiotsu import ( + _get_multiotsu_thresh_indices, + _get_multiotsu_thresh_indices_lut, +) # from cupyx.scipy import ndimage as ndi from cucim.skimage import util @@ -15,17 +20,21 @@ from cucim.skimage._shared.utils import _supported_float_type from cucim.skimage.color import rgb2gray from cucim.skimage.exposure import histogram -from cucim.skimage.filters.thresholding import (_cross_entropy, - threshold_isodata, threshold_li, - threshold_local, threshold_mean, - threshold_minimum, - threshold_multiotsu, - threshold_niblack, - threshold_otsu, - threshold_sauvola, - threshold_triangle, - threshold_yen, - try_all_threshold) +from cucim.skimage.filters.thresholding import ( + _cross_entropy, + threshold_isodata, + threshold_li, + threshold_local, + threshold_mean, + threshold_minimum, + threshold_multiotsu, + threshold_niblack, + threshold_otsu, + threshold_sauvola, + threshold_triangle, + threshold_yen, + try_all_threshold, +) # transfer images to GPU astronautd = cp.array(data.astronaut()) @@ -54,7 +63,7 @@ def test_try_all_threshold(self): fig, ax = try_all_threshold(self.image) all_texts = [axis.texts for axis in ax if axis.texts != []] text_content = [text.get_text() for x in all_texts for text in x] - assert 'RuntimeError' in text_content + assert "RuntimeError" in text_content def test_otsu(self): assert threshold_otsu(self.image) == 2 @@ -131,10 +140,11 @@ def test_isodata_16bit(self): np.random.seed(0) imfloat = cp.array(np.random.rand(256, 256)) assert 0.49 < threshold_isodata(imfloat, nbins=1024) < 0.51 - assert all(0.49 < threshold_isodata(imfloat, nbins=1024, - return_all=True)) + assert all( + 0.49 < threshold_isodata(imfloat, nbins=1024, return_all=True) + ) - @pytest.mark.parametrize('ndim', [2, 3]) + @pytest.mark.parametrize("ndim", [2, 3]) def test_threshold_local_gaussian(self, ndim): # fmt: off ref = cp.array( @@ -162,9 +172,8 @@ def test_threshold_local_gaussian(self, ndim): param=1 / 3) assert_array_equal(ref, image > out) - @pytest.mark.parametrize('ndim', [2, 3]) + @pytest.mark.parametrize("ndim", [2, 3]) def test_threshold_local_mean(self, ndim): - # fmt: off ref = cp.array( [[False, False, False, False, True], # noqa @@ -188,32 +197,35 @@ def test_threshold_local_mean(self, ndim): mode='reflect') assert_array_equal(ref, image > out) - @pytest.mark.parametrize('block_size', [(3, ), (3, 3, 3)]) + @pytest.mark.parametrize("block_size", [(3,), (3, 3, 3)]) def test_threshold_local_invalid_block_size(self, block_size): # len(block_size) != image.ndim with pytest.raises(ValueError): - threshold_local(self.image, block_size, method='mean') + threshold_local(self.image, block_size, method="mean") - @pytest.mark.parametrize('ndim', [2, 3]) + @pytest.mark.parametrize("ndim", [2, 3]) def test_threshold_local_median(self, ndim): ref = cp.array( - [[False, False, False, False, True], # noqa - [False, False, True, False, False], # noqa - [False, False, True, False, False], # noqa - [False, False, True, True, False], # noqa - [False, True, False, False, False]] # noqa + [ + [False, False, False, False, True], # noqa + [False, False, True, False, False], # noqa + [False, False, True, False, False], # noqa + [False, False, True, True, False], # noqa + [False, True, False, False, False], + ] # noqa ) if ndim == 2: image = self.image else: - image = cp.stack((self.image, ) * 5, axis=-1) - ref = cp.stack((ref, ) * 5, axis=-1) - out = threshold_local(image, 3, method='median', mode='reflect') + image = cp.stack((self.image,) * 5, axis=-1) + ref = cp.stack((ref,) * 5, axis=-1) + out = threshold_local(image, 3, method="median", mode="reflect") assert_array_equal(ref, image > out) def test_threshold_local_median_constant_mode(self): - out = threshold_local(self.image, 3, method='median', - mode='constant', cval=20) + out = threshold_local( + self.image, 3, method="median", mode="constant", cval=20 + ) # fmt: off expected = cp.array( @@ -324,7 +336,7 @@ def test_otsu_coins_image_as_float(): def test_otsu_astro_image(): img = util.img_as_ubyte(astronautd) - with expected_warnings(['grayscale']): + with expected_warnings(["grayscale"]): assert 109 < threshold_otsu(img) < 111 @@ -421,7 +433,7 @@ def test_li_pathological_arrays(): e = cp.array([1, 1]) f = cp.asarray([1, 2]) arrays = [a, b, c, d, e, f] - with np.errstate(divide='ignore'): + with np.errstate(divide="ignore"): # ignoring "divide by zero encountered in log" error from np.log(0) thresholds = cp.array([float(threshold_li(arr)) for arr in arrays]) assert cp.all(cp.isfinite(thresholds)) @@ -440,7 +452,7 @@ def test_yen_camera_image_histogram(): def test_yen_camera_image_counts(): camera = util.img_as_ubyte(camerad) - counts, bin_centers = histogram(camera.ravel(), 256, source_range='image') + counts, bin_centers = histogram(camera.ravel(), 256, source_range="image") assert 145 < threshold_yen(hist=counts) < 147 @@ -464,8 +476,16 @@ def test_isodata_camera_image(): camera = util.img_as_ubyte(camerad) threshold = threshold_isodata(camera) - assert np.floor((camera[camera <= threshold].mean() + - camera[camera > threshold].mean()) / 2.0) == threshold + assert ( + np.floor( + ( + camera[camera <= threshold].mean() + + camera[camera > threshold].mean() + ) + / 2.0 + ) + == threshold + ) assert threshold == 102 assert_array_equal(threshold_isodata(camera, return_all=True), [102, 103]) @@ -473,14 +493,14 @@ def test_isodata_camera_image(): def test_isodata_camera_image_histogram(): camera = util.img_as_ubyte(camerad) - hist = histogram(camera.ravel(), 256, source_range='image') + hist = histogram(camera.ravel(), 256, source_range="image") threshold = threshold_isodata(hist=hist) assert threshold == 102 def test_isodata_camera_image_counts(): camera = util.img_as_ubyte(camerad) - counts, bin_centers = histogram(camera.ravel(), 256, source_range='image') + counts, bin_centers = histogram(camera.ravel(), 256, source_range="image") threshold = threshold_isodata(hist=counts) assert threshold == 102 @@ -489,8 +509,13 @@ def test_isodata_coins_image(): coins = util.img_as_ubyte(coinsd) threshold = threshold_isodata(coins) - assert np.floor((coins[coins <= threshold].mean() + - coins[coins > threshold].mean()) / 2.0) == threshold + assert ( + np.floor( + (coins[coins <= threshold].mean() + coins[coins > threshold].mean()) + / 2.0 + ) + == threshold + ) assert threshold == 107 assert_array_equal(threshold_isodata(coins, return_all=True), [107]) @@ -500,14 +525,24 @@ def test_isodata_moon_image(): moon = util.img_as_ubyte(moond) threshold = threshold_isodata(moon) - assert np.floor((moon[moon <= threshold].mean() + - moon[moon > threshold].mean()) / 2.0) == threshold + assert ( + np.floor( + (moon[moon <= threshold].mean() + moon[moon > threshold].mean()) + / 2.0 + ) + == threshold + ) assert threshold == 86 thresholds = threshold_isodata(moon, return_all=True) for threshold in thresholds: - assert np.floor((moon[moon <= threshold].mean() + - moon[moon > threshold].mean()) / 2.0) == threshold + assert ( + np.floor( + (moon[moon <= threshold].mean() + moon[moon > threshold].mean()) + / 2.0 + ) + == threshold + ) assert_array_equal(thresholds, [86, 87, 88, 122, 123, 124, 139, 140]) @@ -516,14 +551,24 @@ def test_isodata_moon_image_negative_int(): moon -= 100 threshold = threshold_isodata(moon) - assert np.floor((moon[moon <= threshold].mean() + - moon[moon > threshold].mean()) / 2.0) == threshold + assert ( + np.floor( + (moon[moon <= threshold].mean() + moon[moon > threshold].mean()) + / 2.0 + ) + == threshold + ) assert threshold == -14 thresholds = threshold_isodata(moon, return_all=True) for threshold in thresholds: - assert np.floor((moon[moon <= threshold].mean() + - moon[moon > threshold].mean()) / 2.0) == threshold + assert ( + np.floor( + (moon[moon <= threshold].mean() + moon[moon > threshold].mean()) + / 2.0 + ) + == threshold + ) assert_array_equal(thresholds, [-14, -13, -12, 22, 23, 24, 39, 40]) @@ -554,21 +599,21 @@ def test_threshold_minimum(): def test_threshold_minimum_histogram(): camera = util.img_as_ubyte(camerad) - hist = histogram(camera.ravel(), 256, source_range='image') + hist = histogram(camera.ravel(), 256, source_range="image") threshold = threshold_minimum(hist=hist) assert_array_equal(threshold, 85) def test_threshold_minimum_deprecated_max_iter_kwarg(): camera = util.img_as_ubyte(camerad) - hist = histogram(camera.ravel(), 256, source_range='image') + hist = histogram(camera.ravel(), 256, source_range="image") with expected_warnings(["`max_iter` is a deprecated argument"]): threshold_minimum(hist=hist, max_iter=5000) def test_threshold_minimum_counts(): camera = util.img_as_ubyte(camerad) - counts, bin_centers = histogram(camera.ravel(), 256, source_range='image') + counts, bin_centers = histogram(camera.ravel(), 256, source_range="image") threshold = threshold_minimum(hist=counts) assert_array_equal(threshold, 85) @@ -607,18 +652,34 @@ def test_triangle_float_images(): text = cp.array(data.text()) int_bins = int(text.max() - text.min() + 1) # Set nbins to match the uint case and threshold as float. - assert round(float(threshold_triangle( - text.astype(float), nbins=int_bins))) == 104 + assert ( + round(float(threshold_triangle(text.astype(float), nbins=int_bins))) + == 104 + ) # Check that rescaling image to floats in unit interval is equivalent. assert ( round(float(threshold_triangle(text / 255.0, nbins=int_bins) * 255)) == 104 ) # Repeat for inverted image. - assert round(float(threshold_triangle( - cp.invert(text).astype(float), nbins=int_bins))) == 151 - assert round(float(threshold_triangle( - cp.invert(text) / 255, nbins=int_bins) * 255)) == 151 + assert ( + round( + float( + threshold_triangle( + cp.invert(text).astype(float), nbins=int_bins + ) + ) + ) + == 151 + ) + assert ( + round( + float( + threshold_triangle(cp.invert(text) / 255, nbins=int_bins) * 255 + ) + ) + == 151 + ) def test_triangle_flip(): @@ -677,7 +738,8 @@ def test_triangle_flip(): @pytest.mark.parametrize( - "threshold_func", [threshold_local, threshold_niblack, threshold_sauvola], + "threshold_func", + [threshold_local, threshold_niblack, threshold_sauvola], ) @pytest.mark.parametrize("dtype", [cp.uint8, cp.int16, cp.float16, cp.float32]) def test_variable_dtypes(threshold_func, dtype): @@ -709,11 +771,11 @@ def test_niblack_sauvola_pathological_image(): def test_bimodal_multiotsu_hist(): - for name in ['camera', 'moon', 'coins', 'text', 'clock', 'page']: + for name in ["camera", "moon", "coins", "text", "clock", "page"]: img = cp.array(getattr(data, name)()) assert threshold_otsu(img) == threshold_multiotsu(img, 2) - for name in ['chelsea', 'coffee', 'astronaut', 'rocket']: + for name in ["chelsea", "coffee", "astronaut", "rocket"]: img = rgb2gray(cp.array(getattr(data, name)())) assert threshold_otsu(img) == threshold_multiotsu(img, 2) @@ -726,13 +788,12 @@ def test_check_multiotsu_results(): [0, 1, 2, 3, 4]]) # fmt: on for idx in range(3, 6): - thr_multi = threshold_multiotsu(image, - classes=idx) + thr_multi = threshold_multiotsu(image, classes=idx) assert len(thr_multi) == idx - 1 def test_multiotsu_output(): - image = cp.zeros((100, 100), dtype='int') + image = cp.zeros((100, 100), dtype="int") coords = [(25, 25), (50, 50), (75, 75)] values = [64, 128, 192] for coor, val in zip(coords, values): @@ -745,7 +806,7 @@ def test_multiotsu_output(): def test_multiotsu_astro_image(): img = util.img_as_ubyte(astronautd) - with expected_warnings(['grayscale']): + with expected_warnings(["grayscale"]): assert_array_almost_equal(threshold_multiotsu(img), [58, 149]) @@ -782,13 +843,12 @@ def test_multiotsu_more_classes_then_values(): @pytest.mark.skip("_get_multiotsu_thresh_indices functions not implemented yet") def test_multiotsu_lut(): for classes in [2, 3, 4]: - for name in ['camera', 'moon', 'coins', 'text', 'clock', 'page']: + for name in ["camera", "moon", "coins", "text", "clock", "page"]: img = cp.array(getattr(data, name)()) - prob, bin_centers = histogram(img.ravel(), - nbins=256, - source_range='image', - normalize=True) - prob = prob.astype('float32') + prob, bin_centers = histogram( + img.ravel(), nbins=256, source_range="image", normalize=True + ) + prob = prob.astype("float32") result_lut = _get_multiotsu_thresh_indices_lut(prob, classes - 1) result = _get_multiotsu_thresh_indices(prob, classes - 1) @@ -803,7 +863,7 @@ def test_multiotsu_missing_img_and_hist(): def test_multiotsu_hist_parameter(): for classes in [2, 3, 4]: - for name in ['camera', 'moon', 'coins', 'text', 'clock', 'page']: + for name in ["camera", "moon", "coins", "text", "clock", "page"]: img = cp.array(getattr(data, name)()) sk_hist = histogram(img, nbins=256) # diff --git a/python/cucim/src/cucim/skimage/filters/tests/test_unsharp_mask.py b/python/cucim/src/cucim/skimage/filters/tests/test_unsharp_mask.py index f5437f578..0c5157ee2 100644 --- a/python/cucim/src/cucim/skimage/filters/tests/test_unsharp_mask.py +++ b/python/cucim/src/cucim/skimage/filters/tests/test_unsharp_mask.py @@ -5,71 +5,108 @@ from cucim.skimage.filters import unsharp_mask -@pytest.mark.parametrize("shape,multichannel", - [((29,), False), - ((40, 4), True), - ((32, 32), False), - ((29, 31, 3), True), - ((13, 17, 4, 8), False)]) -@pytest.mark.parametrize("dtype", [cp.uint8, cp.int8, - cp.uint16, cp.int16, - cp.uint32, cp.int32, - cp.uint64, cp.int64, - cp.float16, cp.float32, cp.float64]) +@pytest.mark.parametrize( + "shape,multichannel", + [ + ((29,), False), + ((40, 4), True), + ((32, 32), False), + ((29, 31, 3), True), + ((13, 17, 4, 8), False), + ], +) +@pytest.mark.parametrize( + "dtype", + [ + cp.uint8, + cp.int8, + cp.uint16, + cp.int16, + cp.uint32, + cp.int32, + cp.uint64, + cp.int64, + cp.float16, + cp.float32, + cp.float64, + ], +) @pytest.mark.parametrize("radius", [0, 0.1, 2.0]) @pytest.mark.parametrize("amount", [0.0, 0.5, 2.0, -1.0]) @pytest.mark.parametrize("offset", [-1.0, 0.0, 1.0]) @pytest.mark.parametrize("preserve", [False, True]) def test_unsharp_masking_output_type_and_shape( - radius, amount, shape, multichannel, dtype, offset, preserve): + radius, amount, shape, multichannel, dtype, offset, preserve +): array = cp.random.random(shape) array = ((array + offset) * 128).astype(dtype) if (preserve is False) and (dtype in [cp.float32, cp.float64]): array /= max(cp.abs(array).max(), 1.0) channel_axis = -1 if multichannel else None - output = unsharp_mask(array, radius, amount, preserve_range=preserve, - channel_axis=channel_axis) + output = unsharp_mask( + array, + radius, + amount, + preserve_range=preserve, + channel_axis=channel_axis, + ) assert output.dtype in [cp.float32, cp.float64] assert output.shape == shape -@pytest.mark.parametrize("shape,multichannel", - [((32, 32), False), - ((15, 15, 2), True), - ((17, 19, 3), True)]) +@pytest.mark.parametrize( + "shape,multichannel", + [((32, 32), False), ((15, 15, 2), True), ((17, 19, 3), True)], +) @pytest.mark.parametrize("radius", [(0.0, 0.0), (1.0, 1.0), (2.0, 1.5)]) @pytest.mark.parametrize("preserve", [False, True]) -def test_unsharp_masking_with_different_radii(radius, shape, - multichannel, preserve): +def test_unsharp_masking_with_different_radii( + radius, shape, multichannel, preserve +): amount = 1.0 dtype = cp.float64 array = (cp.random.random(shape) * 96).astype(dtype) if preserve is False: array /= max(cp.abs(array).max(), 1.0) channel_axis = -1 if multichannel else None - output = unsharp_mask(array, radius, amount, preserve_range=preserve, - channel_axis=channel_axis) + output = unsharp_mask( + array, + radius, + amount, + preserve_range=preserve, + channel_axis=channel_axis, + ) assert output.dtype in [cp.float32, cp.float64] assert output.shape == shape -@pytest.mark.parametrize("shape, channel_axis", - [((16, 16), None), - ((15, 15, 2), -1), - ((13, 17, 3), -1), - ((2, 15, 15), 0), - ((3, 13, 17), 0)]) +@pytest.mark.parametrize( + "shape, channel_axis", + [ + ((16, 16), None), + ((15, 15, 2), -1), + ((13, 17, 3), -1), + ((2, 15, 15), 0), + ((3, 13, 17), 0), + ], +) @pytest.mark.parametrize("offset", [-5, 0, 5]) @pytest.mark.parametrize("preserve", [False, True]) -def test_unsharp_masking_with_different_ranges(shape, offset, channel_axis, - preserve): +def test_unsharp_masking_with_different_ranges( + shape, offset, channel_axis, preserve +): radius = 2.0 amount = 1.0 dtype = cp.int16 array = (cp.random.random(shape) * 5 + offset).astype(dtype) negative = cp.any(array < 0) - output = unsharp_mask(array, radius, amount, preserve_range=preserve, - channel_axis=channel_axis) + output = unsharp_mask( + array, + radius, + amount, + preserve_range=preserve, + channel_axis=channel_axis, + ) if preserve is False: assert cp.any(output <= 1) assert cp.any(output >= -1) @@ -79,21 +116,27 @@ def test_unsharp_masking_with_different_ranges(shape, offset, channel_axis, assert output.shape == shape -@pytest.mark.parametrize("shape, channel_axis", - [((16, 16), None), - ((15, 15, 2), -1), - ((13, 17, 3), -1)]) +@pytest.mark.parametrize( + "shape, channel_axis", + [((16, 16), None), ((15, 15, 2), -1), ((13, 17, 3), -1)], +) @pytest.mark.parametrize("offset", [-5, 0, 5]) @pytest.mark.parametrize("preserve", [False, True]) -def test_unsharp_masking_with_different_ranges_dep(shape, offset, - channel_axis, preserve): +def test_unsharp_masking_with_different_ranges_dep( + shape, offset, channel_axis, preserve +): radius = 2.0 amount = 1.0 dtype = cp.int16 array = (cp.random.random(shape) * 5 + offset).astype(dtype) negative = cp.any(array < 0) - output = unsharp_mask(array, radius, amount, channel_axis=channel_axis, - preserve_range=preserve) + output = unsharp_mask( + array, + radius, + amount, + channel_axis=channel_axis, + preserve_range=preserve, + ) if preserve is False: assert cp.any(output <= 1) assert cp.any(output >= -1) @@ -103,20 +146,26 @@ def test_unsharp_masking_with_different_ranges_dep(shape, offset, assert output.shape == shape -@pytest.mark.parametrize("shape,channel_axis", - [((16, 16), None), - ((15, 15, 2), -1), - ((13, 17, 3), -1)]) +@pytest.mark.parametrize( + "shape,channel_axis", + [((16, 16), None), ((15, 15, 2), -1), ((13, 17, 3), -1)], +) @pytest.mark.parametrize("preserve", [False, True]) -@pytest.mark.parametrize("dtype", - [cp.uint8, cp.float16, cp.float32, cp.float64]) +@pytest.mark.parametrize( + "dtype", [cp.uint8, cp.float16, cp.float32, cp.float64] +) def test_unsharp_masking_dtypes(shape, channel_axis, preserve, dtype): radius = 2.0 amount = 1.0 array = (cp.random.random(shape) * 10).astype(dtype, copy=False) negative = cp.any(array < 0) - output = unsharp_mask(array, radius, amount, preserve_range=preserve, - channel_axis=channel_axis) + output = unsharp_mask( + array, + radius, + amount, + preserve_range=preserve, + channel_axis=channel_axis, + ) if preserve is False: assert cp.any(output <= 1) assert cp.any(output >= -1) diff --git a/python/cucim/src/cucim/skimage/filters/tests/test_window.py b/python/cucim/src/cucim/skimage/filters/tests/test_window.py index 0341c9844..4cc21d137 100644 --- a/python/cucim/src/cucim/skimage/filters/tests/test_window.py +++ b/python/cucim/src/cucim/skimage/filters/tests/test_window.py @@ -8,7 +8,7 @@ @pytest.mark.parametrize("size", [5, 6]) @pytest.mark.parametrize("ndim", [2, 3, 4]) def test_window_shape_isotropic(size, ndim): - w = window('hann', (size,) * ndim) + w = window("hann", (size,) * ndim) assert w.ndim == ndim assert w.shape[1:] == w.shape[:-1] for i in range(1, ndim - 1): @@ -17,13 +17,13 @@ def test_window_shape_isotropic(size, ndim): @pytest.mark.parametrize("shape", [(8, 16), (16, 8), (2, 3, 4)]) def test_window_shape_anisotropic(shape): - w = window('hann', shape) + w = window("hann", shape) assert w.shape == shape @pytest.mark.parametrize("shape", [[17, 33], [17, 97]]) def test_window_anisotropic_amplitude(shape): - w = window(('tukey', 0.8), shape) + w = window(("tukey", 0.8), shape) # The shape is stretched to give approximately the same range on each axis, # so the center profile should have a similar mean value. @@ -42,8 +42,8 @@ def test_window_type(wintype): @pytest.mark.parametrize("size", [10, 11]) def test_window_1d(size): - w = window('hann', size) - w1 = get_window('hann', size, fftbins=False) + w = window("hann", size) + w1 = get_window("hann", size, fftbins=False) assert cp.allclose(w, w1) diff --git a/python/cucim/src/cucim/skimage/filters/thresholding.py b/python/cucim/src/cucim/skimage/filters/thresholding.py index c2dedb2fa..53e8b8d55 100644 --- a/python/cucim/src/cucim/skimage/filters/thresholding.py +++ b/python/cucim/src/cucim/skimage/filters/thresholding.py @@ -6,11 +6,13 @@ import cupy as cp import numpy as np -from skimage.filters import threshold_isodata as _threshold_isodata_cpu -from skimage.filters import threshold_minimum as _threshold_minimum_cpu -from skimage.filters import threshold_multiotsu as _threshold_multiotsu_cpu -from skimage.filters import threshold_otsu as _threshold_otsu_cpu -from skimage.filters import threshold_yen as _threshold_yen_cpu +from skimage.filters import ( + threshold_isodata as _threshold_isodata_cpu, + threshold_minimum as _threshold_minimum_cpu, + threshold_multiotsu as _threshold_multiotsu_cpu, + threshold_otsu as _threshold_otsu_cpu, + threshold_yen as _threshold_yen_cpu, +) import cucim.skimage._vendored.ndimage as ndi from cucim import _misc @@ -24,19 +26,21 @@ from ..util import dtype_limits from ._sparse import _correlate_sparse, _validate_window_size -__all__ = ['try_all_threshold', - 'threshold_otsu', - 'threshold_yen', - 'threshold_isodata', - 'threshold_li', - 'threshold_local', - 'threshold_minimum', - 'threshold_mean', - 'threshold_niblack', - 'threshold_sauvola', - 'threshold_triangle', - 'apply_hysteresis_threshold', - 'threshold_multiotsu'] +__all__ = [ + "try_all_threshold", + "threshold_otsu", + "threshold_yen", + "threshold_isodata", + "threshold_li", + "threshold_local", + "threshold_minimum", + "threshold_mean", + "threshold_niblack", + "threshold_sauvola", + "threshold_triangle", + "apply_hysteresis_threshold", + "threshold_multiotsu", +] def _try_all(image, methods=None, figsize=None, num_cols=2, verbose=True): @@ -65,37 +69,44 @@ def _try_all(image, methods=None, figsize=None, num_cols=2, verbose=True): # Compute the image histogram for better performances nbins = 256 # Default in threshold functions - hist = histogram(image.ravel(), nbins, source_range='image') + hist = histogram(image.ravel(), nbins, source_range="image") # Handle default value methods = methods or {} num_rows = math.ceil((len(methods) + 1.0) / num_cols) - fig, ax = plt.subplots(num_rows, num_cols, figsize=figsize, - sharex=True, sharey=True) + fig, ax = plt.subplots( + num_rows, num_cols, figsize=figsize, sharex=True, sharey=True + ) ax = ax.ravel() ax[0].imshow(cp.asnumpy(image), cmap=plt.cm.gray) - ax[0].set_title('Original') + ax[0].set_title("Original") i = 1 for name, func in methods.items(): # Use precomputed histogram for supporting functions sig = inspect.signature(func) - _kwargs = dict(hist=hist) if 'hist' in sig.parameters else {} + _kwargs = dict(hist=hist) if "hist" in sig.parameters else {} ax[i].set_title(name) try: ax[i].imshow(cp.asnumpy(func(image, **_kwargs)), cmap=plt.cm.gray) except Exception as e: - ax[i].text(0.5, 0.5, f"{type(e).__name__}", - ha="center", va="center", transform=ax[i].transAxes) + ax[i].text( + 0.5, + 0.5, + f"{type(e).__name__}", + ha="center", + va="center", + transform=ax[i].transAxes, + ) i += 1 if verbose: print(func.__orifunc__) for a in ax: - a.axis('off') + a.axis("off") fig.tight_layout() return fig, ax @@ -153,20 +164,30 @@ def wrapper(im): return wrapper # Global algorithms. - methods = OrderedDict({'Isodata': thresh(threshold_isodata), - 'Li': thresh(threshold_li), - 'Mean': thresh(threshold_mean), - 'Minimum': thresh(threshold_minimum), - 'Otsu': thresh(threshold_otsu), - 'Triangle': thresh(threshold_triangle), - 'Yen': thresh(threshold_yen)}) + methods = OrderedDict( + { + "Isodata": thresh(threshold_isodata), + "Li": thresh(threshold_li), + "Mean": thresh(threshold_mean), + "Minimum": thresh(threshold_minimum), + "Otsu": thresh(threshold_otsu), + "Triangle": thresh(threshold_triangle), + "Yen": thresh(threshold_yen), + } + ) - return _try_all(image, figsize=figsize, - methods=methods, verbose=verbose) + return _try_all(image, figsize=figsize, methods=methods, verbose=verbose) -def threshold_local(image, block_size=3, method='gaussian', offset=0, - mode='reflect', param=None, cval=0): +def threshold_local( + image, + block_size=3, + method="gaussian", + offset=0, + mode="reflect", + param=None, + cval=0, +): """Compute a threshold mask image based on local pixel neighborhood. Also known as adaptive or dynamic thresholding. The threshold value is @@ -232,32 +253,38 @@ def threshold_local(image, block_size=3, method='gaussian', offset=0, raise ValueError("len(block_size) must equal image.ndim.") block_size = tuple(block_size) if any(b % 2 == 0 for b in block_size): - raise ValueError(f'block_size must be odd! Given block_size ' - f'{block_size} contains even values.') + raise ValueError( + f"block_size must be odd! Given block_size " + f"{block_size} contains even values." + ) float_dtype = _supported_float_type(image.dtype) image = image.astype(float_dtype, copy=False) thresh_image = cp.zeros(image.shape, dtype=float_dtype) - if method == 'generic': + if method == "generic": raise NotImplementedError("TODO: implement generic_filter") ndi.generic_filter( image, param, block_size, output=thresh_image, mode=mode, cval=cval ) - elif method == 'gaussian': + elif method == "gaussian": if param is None: # automatically determine sigma which covers > 99% of distribution sigma = tuple([(b - 1) / 6.0 for b in block_size]) else: sigma = param gaussian(image, sigma, output=thresh_image, mode=mode, cval=cval) - elif method == 'mean': - ndi.uniform_filter(image, block_size, output=thresh_image, mode=mode, - cval=cval) - elif method == 'median': - ndi.median_filter(image, block_size, output=thresh_image, mode=mode, - cval=cval) + elif method == "mean": + ndi.uniform_filter( + image, block_size, output=thresh_image, mode=mode, cval=cval + ) + elif method == "median": + ndi.median_filter( + image, block_size, output=thresh_image, mode=mode, cval=cval + ) else: - raise ValueError("Invalid method specified. Please use `generic`, " - "`gaussian`, `mean`, or `median`.") + raise ValueError( + "Invalid method specified. Please use `generic`, " + "`gaussian`, `mean`, or `median`." + ) return thresh_image - offset @@ -314,7 +341,7 @@ def _validate_image_histogram(image, hist, nbins=None, normalize=False): else: counts, bin_centers = histogram( - image.ravel(), nbins, source_range='image', normalize=normalize + image.ravel(), nbins, source_range="image", normalize=normalize ) return counts.astype(cp.float32, copy=False), bin_centers @@ -361,9 +388,11 @@ def threshold_otsu(image=None, nbins=256, *, hist=None): """ if image is not None and image.ndim > 2 and image.shape[-1] in (3, 4): - warn(f'threshold_otsu is expected to work correctly only for ' - f'grayscale images; image shape {image.shape} looks like ' - f'that of an RGB image.') + warn( + f"threshold_otsu is expected to work correctly only for " + f"grayscale images; image shape {image.shape} looks like " + f"that of an RGB image." + ) # Check if the image has more than one intensity value; if not, return that # value @@ -515,9 +544,7 @@ def threshold_isodata(image=None, nbins=256, return_all=False, *, hist=None): counts = counts.astype(cp.float32, copy=False) return cp.asarray( _threshold_isodata_cpu( - nbins=nbins, - return_all=return_all, - hist=(counts, bin_centers) + nbins=nbins, return_all=return_all, hist=(counts, bin_centers) ) ) @@ -566,7 +593,7 @@ def _cross_entropy(image, threshold, bins=_DEFAULT_ENTROPY_BINS): """ # noqa bins = cp.asarray(bins) # required for _DEFAULT_ENTROPY_BINS tuple histogram, bin_edges = cp.histogram(image, bins=bins, density=True) - bin_centers = cp.convolve(bin_edges, cp.array([0.5, 0.5]), mode='valid') + bin_centers = cp.convolve(bin_edges, cp.array([0.5, 0.5]), mode="valid") t = cp.flatnonzero(bin_centers > threshold)[0] m0a = cp.sum(histogram[:t]) # 0th moment, background m0b = cp.sum(histogram[t:]) @@ -578,8 +605,9 @@ def _cross_entropy(image, threshold, bins=_DEFAULT_ENTROPY_BINS): return nu -def threshold_li(image, *, tolerance=None, initial_guess=None, - iter_callback=None): +def threshold_li( + image, *, tolerance=None, initial_guess=None, iter_callback=None +): """Compute threshold value by Li's iterative Minimum Cross Entropy method. Parameters @@ -656,7 +684,7 @@ def threshold_li(image, *, tolerance=None, initial_guess=None, # Li's algorithm requires positive image (because of log(mean)) image_min = cp.min(image) image -= image_min - if image.dtype.kind in 'iu': + if image.dtype.kind in "iu": tolerance = tolerance or 0.5 else: tolerance = tolerance or float(cp.min(cp.diff(cp.unique(image))) / 2) @@ -670,14 +698,18 @@ def threshold_li(image, *, tolerance=None, initial_guess=None, t_next = initial_guess - image_min image_max = cp.max(image) + image_min if not 0 < t_next < cp.max(image): - msg = (f'The initial guess for threshold_li must be within the ' - f'range of the image. Got {initial_guess} for image min ' - f'{image_min} and max {image_max}.') + msg = ( + f"The initial guess for threshold_li must be within the " + f"range of the image. Got {initial_guess} for image min " + f"{image_min} and max {image_max}." + ) raise ValueError(msg) else: - raise TypeError('Incorrect type for `initial_guess`; should be ' - 'a floating point value, or a function mapping an ' - 'array to a floating point value.') + raise TypeError( + "Incorrect type for `initial_guess`; should be " + "a floating point value, or a function mapping an " + "array to a floating point value." + ) # initial value for t_curr must be different from t_next by at # least the tolerance. Since the image is positive, we ensure this @@ -691,19 +723,20 @@ def threshold_li(image, *, tolerance=None, initial_guess=None, # Stop the iterations when the difference between the # new and old threshold values is less than the tolerance - if image.dtype.kind in 'iu': - hist, bin_centers = histogram(image.reshape(-1), - source_range='image') + if image.dtype.kind in "iu": + hist, bin_centers = histogram(image.reshape(-1), source_range="image") hist = hist.astype(cp.float32, copy=False) while abs(t_next - t_curr) > tolerance: t_curr = t_next foreground = bin_centers > t_curr background = ~foreground - mean_fore = np.average(bin_centers[foreground], - weights=hist[foreground]) - mean_back = np.average(bin_centers[background], - weights=hist[background]) + mean_fore = np.average( + bin_centers[foreground], weights=hist[foreground] + ) + mean_back = np.average( + bin_centers[background], weights=hist[background] + ) if mean_back == 0: break @@ -711,16 +744,14 @@ def threshold_li(image, *, tolerance=None, initial_guess=None, eps = 100 * np.finfo(float).eps mean_back = float(mean_back) mean_fore = float(mean_fore) - t_next = ( - (mean_back - mean_fore) - / (math.log(mean_back + eps) - math.log(mean_fore + eps)) + t_next = (mean_back - mean_fore) / ( + math.log(mean_back + eps) - math.log(mean_fore + eps) ) if iter_callback is not None: iter_callback(t_next + image_min) else: - # Stop the iterations when the difference between the # new and old threshold values is less than the tolerance while abs(t_next - t_curr) > tolerance: @@ -733,8 +764,9 @@ def threshold_li(image, *, tolerance=None, initial_guess=None, if mean_back == 0: break - t_next = ((mean_back - mean_fore) / - (math.log(mean_back + eps) - math.log(mean_fore + eps))) + t_next = (mean_back - mean_fore) / ( + math.log(mean_back + eps) - math.log(mean_fore + eps) + ) if iter_callback is not None: iter_callback(t_next + image_min) @@ -743,8 +775,11 @@ def threshold_li(image, *, tolerance=None, initial_guess=None, return threshold -@deprecate_kwarg({'max_iter': 'max_num_iter'}, removed_version="23.02.00", - deprecated_version="22.02.00") +@deprecate_kwarg( + {"max_iter": "max_num_iter"}, + removed_version="23.02.00", + deprecated_version="22.02.00", +) def threshold_minimum(image=None, nbins=256, max_num_iter=10000, *, hist=None): """Return threshold value based on minimum method. @@ -803,9 +838,7 @@ def threshold_minimum(image=None, nbins=256, max_num_iter=10000, *, hist=None): bin_centers = cp.asnumpy(bin_centers) return cp.asarray( _threshold_minimum_cpu( - nbins=nbins, - max_num_iter=max_num_iter, - hist=(counts, bin_centers) + nbins=nbins, max_num_iter=max_num_iter, hist=(counts, bin_centers) ) ) @@ -876,7 +909,7 @@ def threshold_triangle(image, nbins=256): """ # nbins is ignored for integer arrays # so, we recalculate the effective nbins. - hist, bin_centers = histogram(image.ravel(), nbins, source_range='image') + hist, bin_centers = histogram(image.ravel(), nbins, source_range="image") nbins = len(hist) # Find peak, lowest and highest gray levels. @@ -901,7 +934,7 @@ def threshold_triangle(image, nbins=256): y1 = hist[x1 + arg_low_level] # Normalize. - norm = cp.sqrt(peak_height ** 2 + width ** 2) + norm = cp.sqrt(peak_height**2 + width**2) try: peak_height /= norm width /= norm @@ -963,8 +996,9 @@ def _mean_std(image, w): float_dtype = _supported_float_type(image.dtype) pad_width = tuple((k // 2 + 1, k // 2) for k in w) - padded = pad(image.astype(float_dtype, copy=False), pad_width, - mode='reflect') + padded = pad( + image.astype(float_dtype, copy=False), pad_width, mode="reflect" + ) # Note: keep float64 integral images for accuracy. Outputs of # _correlate_sparse can later be safely cast to float_dtype @@ -974,17 +1008,19 @@ def _mean_std(image, w): # Create lists of non-zero kernel indices and values kernel_indices = list(itertools.product(*tuple([(0, _w) for _w in w]))) - kernel_values = [(-1) ** (image.ndim % 2 != np.sum(indices) % 2) - for indices in kernel_indices] + kernel_values = [ + (-1) ** (image.ndim % 2 != np.sum(indices) % 2) + for indices in kernel_indices + ] total_window_size = _misc.prod(w) kernel_shape = tuple(_w + 1 for _w in w) - m = _correlate_sparse(integral, kernel_shape, kernel_indices, - kernel_values) + m = _correlate_sparse(integral, kernel_shape, kernel_indices, kernel_values) m = m.astype(float_dtype, copy=False) m /= total_window_size - g2 = _correlate_sparse(integral_sq, kernel_shape, kernel_indices, - kernel_values) + g2 = _correlate_sparse( + integral_sq, kernel_shape, kernel_indices, kernel_values + ) g2 = g2.astype(float_dtype, copy=False) g2 /= total_window_size # Note: we use np.clip because g2 is not guaranteed to be greater than @@ -1247,17 +1283,22 @@ def threshold_multiotsu(image=None, classes=3, nbins=256, *, hist=None): """ if image is not None and image.ndim > 2 and image.shape[-1] in (3, 4): - warn(f'threshold_multiotsu is expected to work correctly only for ' - f'grayscale images; image shape {image.shape} looks like ' - f'that of an RGB image.') + warn( + f"threshold_multiotsu is expected to work correctly only for " + f"grayscale images; image shape {image.shape} looks like " + f"that of an RGB image." + ) # calculating the histogram and the probability of each gray level. - prob, bin_centers = _validate_image_histogram(image, hist, nbins, - normalize=True) + prob, bin_centers = _validate_image_histogram( + image, hist, nbins, normalize=True + ) prob = cp.asnumpy(prob).astype(cp.float32, copy=False) bin_centers = cp.asnumpy(bin_centers) return cp.asarray( _threshold_multiotsu_cpu( - classes=classes, nbins=nbins, hist=(prob, bin_centers), + classes=classes, + nbins=nbins, + hist=(prob, bin_centers), ) ) diff --git a/python/cucim/src/cucim/skimage/measure/__init__.py b/python/cucim/src/cucim/skimage/measure/__init__.py index f24751e14..4f32c7980 100644 --- a/python/cucim/src/cucim/skimage/measure/__init__.py +++ b/python/cucim/src/cucim/skimage/measure/__init__.py @@ -1,13 +1,30 @@ from ._blur_effect import blur_effect -from ._colocalization import (intersection_coeff, manders_coloc_coeff, - manders_overlap_coeff, pearson_corr_coeff) +from ._colocalization import ( + intersection_coeff, + manders_coloc_coeff, + manders_overlap_coeff, + pearson_corr_coeff, +) from ._label import label -from ._moments import (centroid, inertia_tensor, inertia_tensor_eigvals, - moments, moments_central, moments_coords, - moments_coords_central, moments_hu, moments_normalized) +from ._moments import ( + centroid, + inertia_tensor, + inertia_tensor_eigvals, + moments, + moments_central, + moments_coords, + moments_coords_central, + moments_hu, + moments_normalized, +) from ._polygon import approximate_polygon, subdivide_polygon -from ._regionprops import (euler_number, perimeter, perimeter_crofton, - regionprops, regionprops_table) +from ._regionprops import ( + euler_number, + perimeter, + perimeter_crofton, + regionprops, + regionprops_table, +) from .block import block_reduce from .entropy import shannon_entropy from .profile import profile_line diff --git a/python/cucim/src/cucim/skimage/measure/_blur_effect.py b/python/cucim/src/cucim/skimage/measure/_blur_effect.py index 06461f7ad..e3ba81d1f 100644 --- a/python/cucim/src/cucim/skimage/measure/_blur_effect.py +++ b/python/cucim/src/cucim/skimage/measure/_blur_effect.py @@ -5,7 +5,7 @@ from ..color import rgb2gray from ..util import img_as_float -__all__ = ['blur_effect'] +__all__ = ["blur_effect"] def blur_effect(image, h_size=11, channel_axis=None, reduce_func=max): @@ -57,10 +57,10 @@ def blur_effect(image, h_size=11, channel_axis=None, reduce_func=max): # ensure color channels are in the final dimension image = cp.moveaxis(image, channel_axis, -1) except cp.AxisError: - print('channel_axis must be one of the image array dimensions') + print("channel_axis must be one of the image array dimensions") raise except TypeError: - print('channel_axis must be an integer') + print("channel_axis must be an integer") raise image = rgb2gray(image) n_axes = image.ndim @@ -69,6 +69,7 @@ def blur_effect(image, h_size=11, channel_axis=None, reduce_func=max): B = [] from ..filters import sobel + host_scalars = True slices = tuple([slice(2, s - 1) for s in shape]) for ax in range(n_axes): diff --git a/python/cucim/src/cucim/skimage/measure/_colocalization.py b/python/cucim/src/cucim/skimage/measure/_colocalization.py index 3587a331b..acc509a10 100644 --- a/python/cucim/src/cucim/skimage/measure/_colocalization.py +++ b/python/cucim/src/cucim/skimage/measure/_colocalization.py @@ -70,8 +70,8 @@ def pearson_corr_coeff(image0, image1, mask=None): References ---------- - .. [1] https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html # noqa - .. [2] https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html # noqa + .. [1] https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html + .. [2] https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html .. [3] Dunn, K. W., Kamocka, M. M., & McDonald, J. H. (2011). A practical guide to evaluating colocalization in biological microscopy. American journal of physiology. Cell physiology, 300(4), C723–C742. @@ -80,7 +80,7 @@ def pearson_corr_coeff(image0, image1, mask=None): subcellular colocalization analysis in light microscopy. Journal of Microscopy, 224: 213-232. https://doi.org/10.1111/j.1365-2818.2006.01706.x - """ + """ # noqa: E501 if mask is not None: mask = as_binary_ndarray(mask, variable_name="mask") check_shape_equality(image0, image1, mask) @@ -171,7 +171,7 @@ def manders_coloc_coeff(image0, image1_mask, mask=None): raise ValueError("image contains negative values") img_sum = cp.sum(image0) - if (img_sum == 0): + if img_sum == 0: return 0 return cp.sum(image0 * image1_mask) / img_sum diff --git a/python/cucim/src/cucim/skimage/measure/_label_kernels.py b/python/cucim/src/cucim/skimage/measure/_label_kernels.py index 68d6de95d..c93638230 100644 --- a/python/cucim/src/cucim/skimage/measure/_label_kernels.py +++ b/python/cucim/src/cucim/skimage/measure/_label_kernels.py @@ -117,7 +117,10 @@ def _kernel_connect(greyscale_mode=False, int_t="int"): ) return cupy.ElementwiseKernel( - in_params, "raw Y y", code, "cucim_skimage_measure_label_connect", + in_params, + "raw Y y", + code, + "cucim_skimage_measure_label_connect", ) diff --git a/python/cucim/src/cucim/skimage/measure/_moments.py b/python/cucim/src/cucim/skimage/measure/_moments.py index b79b7121c..cdd87ae91 100644 --- a/python/cucim/src/cucim/skimage/measure/_moments.py +++ b/python/cucim/src/cucim/skimage/measure/_moments.py @@ -214,13 +214,13 @@ def moments(image, order=3, *, spacing=None): _delta = cp.arange(max(image.shape), dtype=float_dtype)[:, cp.newaxis] if spacing is None: # when spacing is not used can compute the powers outside the loop - _powers_of_delta = _delta ** powers + _powers_of_delta = _delta**powers for dim, dim_length in enumerate(image.shape): if spacing is None: powers_of_delta = _powers_of_delta[:dim_length] else: delta = _delta[:dim_length] * spacing[dim] - powers_of_delta = delta ** powers + powers_of_delta = delta**powers calc = cp.moveaxis(calc, source=dim, destination=-1) calc = cp.dot(calc, powers_of_delta) calc = cp.moveaxis(calc, source=-1, destination=dim) @@ -291,7 +291,7 @@ def moments_central(image, center=None, order=3, *, spacing=None, **kwargs): _delta = cp.arange(max(image.shape), dtype=float_dtype)[:, cp.newaxis] for dim, dim_length in enumerate(image.shape): delta = _delta[:dim_length] * spacing[dim] - center[dim] - powers_of_delta = delta ** powers + powers_of_delta = delta**powers calc = cp.moveaxis(calc, source=dim, destination=-1) calc = cp.dot(calc, powers_of_delta) calc = cp.moveaxis(calc, source=-1, destination=dim) @@ -348,10 +348,10 @@ def _get_moments_norm_operation(ndim, order, unit_scale=True): @cp.memoize() def _get_normalize_kernel(ndim, order, unit_scale=True): return cp.ElementwiseKernel( - 'raw F mu, int32 order, float64 scale', - 'F nu', + "raw F mu, int32 order, float64 scale", + "F nu", operation=_get_moments_norm_operation(ndim, order, unit_scale), - name=f"moments_normmalize_2d_kernel" + name="moments_normmalize_2d_kernel", ) @@ -442,7 +442,7 @@ def moments_hu(nu): Notes ----- Due to the small array sizes, this function will be faster on the CPU. - Consider transfering ``nu`` to the host and running + Consider transferring ``nu`` to the host and running ``skimage.measure.moments_hu`` if the moments are not needed on the device. @@ -512,8 +512,11 @@ def centroid(image, *, spacing=None): mu = moments(image, order=1, spacing=spacing) ndim = image.ndim mu0 = mu[(0,) * ndim] - center = mu[tuple((0,) * dim + (1,) + (0,) * (ndim - dim - 1) - for dim in range(ndim))] + center = mu[ + tuple( + (0,) * dim + (1,) + (0,) * (ndim - dim - 1) for dim in range(ndim) + ) + ] center /= mu0 return center @@ -531,10 +534,10 @@ def _get_inertia_tensor_2x2_kernel(): result[3] = mxx / mu0; """ return cp.ElementwiseKernel( - in_params='raw F mu', - out_params='raw F result', + in_params="raw F mu", + out_params="raw F result", operation=operation, - name='cucim_skimage_measure_inertia_tensor_2x2' + name="cucim_skimage_measure_inertia_tensor_2x2", ) @@ -559,10 +562,10 @@ def _get_inertia_tensor_3x3_kernel(): result[5] = result[7] = -myz / mu0; """ return cp.ElementwiseKernel( - in_params='raw F mu', - out_params='raw F result', + in_params="raw F mu", + out_params="raw F result", operation=operation, - name='cucim_skimage_measure_inertia_tensor_3x3' + name="cucim_skimage_measure_inertia_tensor_3x3", ) @@ -670,14 +673,16 @@ def inertia_tensor_eigvals(image, mu=None, T=None, *, spacing=None): alternatively, one can provide the inertia tensor (``T``) directly. """ # avoid circular import - from ..feature.corner import (_image_orthogonal_matrix22_eigvals, - _image_orthogonal_matrix33_eigvals) + from ..feature.corner import ( + _image_orthogonal_matrix22_eigvals, + _image_orthogonal_matrix33_eigvals, + ) if T is None: T = inertia_tensor(image, mu, spacing=spacing) if image.ndim == 2: eigvals = _image_orthogonal_matrix22_eigvals( - T[0, 0], T[0, 1], T[1, 1], sort='descending', abs_sort=False + T[0, 0], T[0, 1], T[1, 1], sort="descending", abs_sort=False ) cp.maximum(eigvals, 0.0, out=eigvals) elif image.ndim == 3: diff --git a/python/cucim/src/cucim/skimage/measure/_moments_analytical.py b/python/cucim/src/cucim/skimage/measure/_moments_analytical.py index 639daeede..2d1100564 100644 --- a/python/cucim/src/cucim/skimage/measure/_moments_analytical.py +++ b/python/cucim/src/cucim/skimage/measure/_moments_analytical.py @@ -180,10 +180,10 @@ def _moments_raw_to_central_fast(moments_raw): operation = _order3_3d kernel = cp.ElementwiseKernel( - 'raw F m', - 'raw F mc', + "raw F m", + "raw F mc", operation=operation, - name=f"order{order}_{ndim}d_kernel" + name=f"order{order}_{ndim}d_kernel", ) # run a single-threaded kernel, so we can avoid device->host->device copy kernel(moments_raw, moments_central, size=1) diff --git a/python/cucim/src/cucim/skimage/measure/_polygon.py b/python/cucim/src/cucim/skimage/measure/_polygon.py index 39e99eb6d..f1bf8c9d3 100644 --- a/python/cucim/src/cucim/skimage/measure/_polygon.py +++ b/python/cucim/src/cucim/skimage/measure/_polygon.py @@ -51,8 +51,8 @@ def approximate_polygon(coords, tolerance): segment_dist = c0 * cp.sin(segment_angle) + r0 * cp.cos(segment_angle) # select points in-between line segment - segment_coords = coords[start + 1:end, :] - segment_dists = dists[start + 1:end] + segment_coords = coords[start + 1 : end, :] + segment_dists = dists[start + 1 : end] # check whether to take perpendicular or euclidean distance with # inner product of vectors @@ -65,8 +65,7 @@ def approximate_polygon(coords, tolerance): # vectors points -> start and end projected on start -> end vector projected_lengths0 = dr0 * dr + dc0 * dc projected_lengths1 = -dr1 * dr - dc1 * dc - perp = cp.logical_and(projected_lengths0 > 0, - projected_lengths1 > 0) + perp = cp.logical_and(projected_lengths0 > 0, projected_lengths1 > 0) eucl = cp.logical_not(perp) segment_dists[perp] = cp.abs( segment_coords[perp, 0] * cp.cos(segment_angle) @@ -135,28 +134,31 @@ def subdivide_polygon(coords, degree=2, preserve_ends=False): from cucim.skimage import _vendored as signal if degree not in _SUBDIVISION_MASKS: - raise ValueError("Invalid B-Spline degree. Only degree 1 - 7 is " - "supported.") + raise ValueError( + "Invalid B-Spline degree. Only degree 1 - 7 is " "supported." + ) circular = cp.all(coords[0, :] == coords[-1, :]) - method = 'valid' + method = "valid" if circular: # remove last coordinate because of wrapping coords = coords[:-1, :] # circular convolution by wrapping boundaries - method = 'same' + method = "same" mask_even, mask_odd = _SUBDIVISION_MASKS[degree] # divide by total weight - float_dtype = coords.dtype if coords.dtype.kind == 'f' else cp.float64 - mask_even = cp.array(mask_even, float_dtype) / (2 ** degree) - mask_odd = cp.array(mask_odd, float_dtype) / (2 ** degree) - - even = signal.convolve2d(coords.T, cp.atleast_2d(mask_even), mode=method, - boundary='wrap') - odd = signal.convolve2d(coords.T, cp.atleast_2d(mask_odd), mode=method, - boundary='wrap') + float_dtype = coords.dtype if coords.dtype.kind == "f" else cp.float64 + mask_even = cp.array(mask_even, float_dtype) / (2**degree) + mask_odd = cp.array(mask_odd, float_dtype) / (2**degree) + + even = signal.convolve2d( + coords.T, cp.atleast_2d(mask_even), mode=method, boundary="wrap" + ) + odd = signal.convolve2d( + coords.T, cp.atleast_2d(mask_odd), mode=method, boundary="wrap" + ) out = cp.empty((even.shape[1] + odd.shape[1], 2), dtype=float_dtype) out[1::2] = even.T diff --git a/python/cucim/src/cucim/skimage/measure/_regionprops.py b/python/cucim/src/cucim/skimage/measure/_regionprops.py index daf9de458..fb1da9dfa 100644 --- a/python/cucim/src/cucim/skimage/measure/_regionprops.py +++ b/python/cucim/src/cucim/skimage/measure/_regionprops.py @@ -14,7 +14,7 @@ from . import _moments from ._regionprops_utils import euler_number, perimeter, perimeter_crofton -__all__ = ['regionprops', 'euler_number', 'perimeter', 'perimeter_crofton'] +__all__ = ["regionprops", "euler_number", "perimeter", "perimeter_crofton"] # All values in this PROPS dict correspond to current scikit-image property @@ -22,114 +22,114 @@ # releases. For backwards compatibility, these older names will continue to # work, but will not be documented. PROPS = { - 'Area': 'area', - 'BoundingBox': 'bbox', - 'BoundingBoxArea': 'area_bbox', - 'bbox_area': 'area_bbox', - 'CentralMoments': 'moments_central', - 'Centroid': 'centroid', - 'ConvexArea': 'area_convex', - 'convex_area': 'area_convex', + "Area": "area", + "BoundingBox": "bbox", + "BoundingBoxArea": "area_bbox", + "bbox_area": "area_bbox", + "CentralMoments": "moments_central", + "Centroid": "centroid", + "ConvexArea": "area_convex", + "convex_area": "area_convex", # 'ConvexHull', - 'ConvexImage': 'image_convex', - 'convex_image': 'image_convex', - 'Coordinates': 'coords', - 'Eccentricity': 'eccentricity', - 'EquivDiameter': 'equivalent_diameter_area', - 'equivalent_diameter': 'equivalent_diameter_area', - 'EulerNumber': 'euler_number', - 'Extent': 'extent', + "ConvexImage": "image_convex", + "convex_image": "image_convex", + "Coordinates": "coords", + "Eccentricity": "eccentricity", + "EquivDiameter": "equivalent_diameter_area", + "equivalent_diameter": "equivalent_diameter_area", + "EulerNumber": "euler_number", + "Extent": "extent", # 'Extrema', - 'FeretDiameter': 'feret_diameter_max', - 'FeretDiameterMax': 'feret_diameter_max', - 'FilledArea': 'area_filled', - 'filled_area': 'area_filled', - 'FilledImage': 'image_filled', - 'filled_image': 'image_filled', - 'HuMoments': 'moments_hu', - 'Image': 'image', - 'InertiaTensor': 'inertia_tensor', - 'InertiaTensorEigvals': 'inertia_tensor_eigvals', - 'IntensityImage': 'image_intensity', - 'intensity_image': 'image_intensity', - 'Label': 'label', - 'LocalCentroid': 'centroid_local', - 'local_centroid': 'centroid_local', - 'MajorAxisLength': 'axis_major_length', - 'major_axis_length': 'axis_major_length', - 'MaxIntensity': 'intensity_max', - 'max_intensity': 'intensity_max', - 'MeanIntensity': 'intensity_mean', - 'mean_intensity': 'intensity_mean', - 'MinIntensity': 'intensity_min', - 'min_intensity': 'intensity_min', - 'MinorAxisLength': 'axis_minor_length', - 'minor_axis_length': 'axis_minor_length', - 'Moments': 'moments', - 'NormalizedMoments': 'moments_normalized', - 'Orientation': 'orientation', - 'Perimeter': 'perimeter', - 'CroftonPerimeter': 'perimeter_crofton', + "FeretDiameter": "feret_diameter_max", + "FeretDiameterMax": "feret_diameter_max", + "FilledArea": "area_filled", + "filled_area": "area_filled", + "FilledImage": "image_filled", + "filled_image": "image_filled", + "HuMoments": "moments_hu", + "Image": "image", + "InertiaTensor": "inertia_tensor", + "InertiaTensorEigvals": "inertia_tensor_eigvals", + "IntensityImage": "image_intensity", + "intensity_image": "image_intensity", + "Label": "label", + "LocalCentroid": "centroid_local", + "local_centroid": "centroid_local", + "MajorAxisLength": "axis_major_length", + "major_axis_length": "axis_major_length", + "MaxIntensity": "intensity_max", + "max_intensity": "intensity_max", + "MeanIntensity": "intensity_mean", + "mean_intensity": "intensity_mean", + "MinIntensity": "intensity_min", + "min_intensity": "intensity_min", + "MinorAxisLength": "axis_minor_length", + "minor_axis_length": "axis_minor_length", + "Moments": "moments", + "NormalizedMoments": "moments_normalized", + "Orientation": "orientation", + "Perimeter": "perimeter", + "CroftonPerimeter": "perimeter_crofton", # 'PixelIdxList', # 'PixelList', - 'Slice': 'slice', - 'Solidity': 'solidity', + "Slice": "slice", + "Solidity": "solidity", # 'SubarrayIdx' - 'WeightedCentralMoments': 'moments_weighted_central', - 'weighted_moments_central': 'moments_weighted_central', - 'WeightedCentroid': 'centroid_weighted', - 'weighted_centroid': 'centroid_weighted', - 'WeightedHuMoments': 'moments_weighted_hu', - 'weighted_moments_hu': 'moments_weighted_hu', - 'WeightedLocalCentroid': 'centroid_weighted_local', - 'weighted_local_centroid': 'centroid_weighted_local', - 'WeightedMoments': 'moments_weighted', - 'weighted_moments': 'moments_weighted', - 'WeightedNormalizedMoments': 'moments_weighted_normalized', - 'weighted_moments_normalized': 'moments_weighted_normalized', + "WeightedCentralMoments": "moments_weighted_central", + "weighted_moments_central": "moments_weighted_central", + "WeightedCentroid": "centroid_weighted", + "weighted_centroid": "centroid_weighted", + "WeightedHuMoments": "moments_weighted_hu", + "weighted_moments_hu": "moments_weighted_hu", + "WeightedLocalCentroid": "centroid_weighted_local", + "weighted_local_centroid": "centroid_weighted_local", + "WeightedMoments": "moments_weighted", + "weighted_moments": "moments_weighted", + "WeightedNormalizedMoments": "moments_weighted_normalized", + "weighted_moments_normalized": "moments_weighted_normalized", } COL_DTYPES = { - 'area': float, - 'area_bbox': float, - 'area_convex': float, - 'area_filled': float, - 'axis_major_length': float, - 'axis_minor_length': float, - 'bbox': int, - 'centroid': float, - 'centroid_local': float, - 'centroid_weighted': float, - 'centroid_weighted_local': float, - 'coords': object, - 'eccentricity': float, - 'equivalent_diameter_area': float, - 'euler_number': int, - 'extent': float, - 'feret_diameter_max': float, - 'image': object, - 'image_convex': object, - 'image_filled': object, - 'image_intensity': object, - 'inertia_tensor': float, - 'inertia_tensor_eigvals': float, - 'intensity_max': float, - 'intensity_mean': float, - 'intensity_min': float, - 'label': int, - 'moments': float, - 'moments_central': float, - 'moments_hu': float, - 'moments_normalized': float, - 'moments_weighted': float, - 'moments_weighted_central': float, - 'moments_weighted_hu': float, - 'moments_weighted_normalized': float, - 'orientation': float, - 'perimeter': float, - 'perimeter_crofton': float, - 'slice': object, - 'solidity': float, + "area": float, + "area_bbox": float, + "area_convex": float, + "area_filled": float, + "axis_major_length": float, + "axis_minor_length": float, + "bbox": int, + "centroid": float, + "centroid_local": float, + "centroid_weighted": float, + "centroid_weighted_local": float, + "coords": object, + "eccentricity": float, + "equivalent_diameter_area": float, + "euler_number": int, + "extent": float, + "feret_diameter_max": float, + "image": object, + "image_convex": object, + "image_filled": object, + "image_intensity": object, + "inertia_tensor": float, + "inertia_tensor_eigvals": float, + "intensity_max": float, + "intensity_mean": float, + "intensity_min": float, + "label": int, + "moments": float, + "moments_central": float, + "moments_hu": float, + "moments_normalized": float, + "moments_weighted": float, + "moments_weighted_central": float, + "moments_weighted_hu": float, + "moments_weighted_normalized": float, + "orientation": float, + "perimeter": float, + "perimeter_crofton": float, + "slice": object, + "solidity": float, } OBJECT_COLUMNS = [col for col, dtype in COL_DTYPES.items() if dtype == object] @@ -137,16 +137,16 @@ PROP_VALS = set(PROPS.values()) _require_intensity_image = ( - 'image_intensity', - 'intensity_max', - 'intensity_mean', - 'intensity_min', - 'moments_weighted', - 'moments_weighted_central', - 'centroid_weighted', - 'centroid_weighted_local', - 'moments_weighted_hu', - 'moments_weighted_normalized', + "image_intensity", + "intensity_max", + "intensity_mean", + "intensity_min", + "moments_weighted", + "moments_weighted_central", + "centroid_weighted", + "centroid_weighted_local", + "moments_weighted_hu", + "moments_weighted_normalized", ) @@ -241,6 +241,7 @@ def func2d(self, *args, **kwargs): f"Property {method.__name__} is not implemented for 3D images" ) return method(self, *args, **kwargs) + return func2d @@ -277,12 +278,14 @@ def _inertia_eigvals_to_axes_lengths_3D(inertia_tensor_eigvals): References ---------- - ..[1] https://en.wikipedia.org/wiki/List_of_moments_of_inertia#List_of_3D_inertia_tensors # noqa - """ + ..[1] https://en.wikipedia.org/wiki/List_of_moments_of_inertia#List_of_3D_inertia_tensors + """ # noqa: E501 axis_lengths = [] for ax in range(2, -1, -1): - w = sum(v * -1 if i == ax else v - for i, v in enumerate(inertia_tensor_eigvals)) + w = sum( + v * -1 if i == ax else v + for i, v in enumerate(inertia_tensor_eigvals) + ) axis_lengths.append(math.sqrt(10 * w)) return axis_lengths @@ -292,15 +295,27 @@ class RegionProperties: on the available region properties. """ - def __init__(self, slice, label, label_image, intensity_image, - cache_active, *, extra_properties=None, spacing=None): - + def __init__( + self, + slice, + label, + label_image, + intensity_image, + cache_active, + *, + extra_properties=None, + spacing=None, + ): if intensity_image is not None: ndim = label_image.ndim - if not (intensity_image.shape[:ndim] == label_image.shape - and intensity_image.ndim in [ndim, ndim + 1]): - raise ValueError('Label and intensity image shapes must match,' - ' except for channel (last) axis.') + if not ( + intensity_image.shape[:ndim] == label_image.shape + and intensity_image.ndim in [ndim, ndim + 1] + ): + raise ValueError( + "Label and intensity image shapes must match," + " except for channel (last) axis." + ) multichannel = label_image.shape < intensity_image.shape else: multichannel = False @@ -317,7 +332,7 @@ def __init__(self, slice, label, label_image, intensity_image, self._ndim = label_image.ndim self._multichannel = multichannel self._spatial_axes = tuple(range(self._ndim)) - self._spacing = spacing if spacing is not None else (1.,) * self._ndim + self._spacing = spacing if spacing is not None else (1.0,) * self._ndim if isinstance(self._spacing, cp.ndarray): self._pixel_area = cp.product(self._spacing) else: @@ -351,23 +366,23 @@ def __getattr__(self, attr): if n_args == 2: if self._intensity_image is not None: if self._multichannel: - multichannel_list = [func(self.image, - self.image_intensity[..., i]) - for i in range( - self.image_intensity.shape[-1])] + multichannel_list = [ + func(self.image, self.image_intensity[..., i]) + for i in range(self.image_intensity.shape[-1]) + ] return cp.stack(multichannel_list, axis=-1) else: return func(self.image, self.image_intensity) else: raise AttributeError( - f'intensity image required to calculate {attr}' + f"intensity image required to calculate {attr}" ) elif n_args == 1: return func(self.image) else: raise AttributeError( - f'Custom regionprop function\'s number of arguments must ' - f'be 1 or 2, but {attr} takes {n_args} arguments.' + f"Custom regionprop function's number of arguments must " + f"be 1 or 2, but {attr} takes {n_args} arguments." ) elif attr in PROPS and attr.lower() == attr: if ( @@ -409,8 +424,10 @@ def bbox(self): A tuple of the bounding box's start coordinates for each dimension, followed by the end coordinates for each dimension """ - return tuple([self.slice[i].start for i in range(self._ndim)] + - [self.slice[i].stop for i in range(self._ndim)]) + return tuple( + [self.slice[i].start for i in range(self._ndim)] + + [self.slice[i].stop for i in range(self._ndim)] + ) @property def area_bbox(self): @@ -435,19 +452,25 @@ def image_convex(self): # CuPy Backend: explicitly cast to uint8 to avoid the issue see in # reported in https://github.com/cupy/cupy/issues/4354 return cp.asarray(convex_hull_image(cp.asnumpy(self.image))).astype( - cp.uint8) + cp.uint8 + ) @property def coords_scaled(self): indices = cp.nonzero(self.image) - return cp.vstack([(indices[i] + self.slice[i].start) * s - for i, s in zip(range(self._ndim), self._spacing)]).T + return cp.vstack( + [ + (indices[i] + self.slice[i].start) * s + for i, s in zip(range(self._ndim), self._spacing) + ] + ).T @property def coords(self): indices = cp.nonzero(self.image) - return cp.vstack([indices[i] + self.slice[i].start - for i in range(self._ndim)]).T + return cp.vstack( + [indices[i] + self.slice[i].start for i in range(self._ndim)] + ).T @property @only2d @@ -466,8 +489,9 @@ def equivalent_diameter_area(self): @property def euler_number(self): if self._ndim not in [2, 3]: - raise NotImplementedError('Euler number is implemented for ' - '2D or 3D images only') + raise NotImplementedError( + "Euler number is implemented for " "2D or 3D images only" + ) return euler_number(self.image, self._ndim) @property @@ -486,12 +510,14 @@ def feret_diameter_max(self): ) identity_convex_hull = cp.asnumpy(identity_convex_hull) if self._ndim == 2: - coordinates = np.vstack(find_contours(identity_convex_hull, 0.5, - fully_connected='high')) + coordinates = np.vstack( + find_contours(identity_convex_hull, 0.5, fully_connected="high") + ) elif self._ndim == 3: - coordinates, _, _, _ = marching_cubes(identity_convex_hull, - level=0.5) - distances = pdist(coordinates * self._spacing, 'sqeuclidean') + coordinates, _, _, _ = marching_cubes( + identity_convex_hull, level=0.5 + ) + distances = pdist(coordinates * self._spacing, "sqeuclidean") return math.sqrt(np.max(distances)) @property @@ -518,14 +544,15 @@ def inertia_tensor(self): @property @_cached def inertia_tensor_eigvals(self): - return _moments.inertia_tensor_eigvals(self.image, - T=self.inertia_tensor) + return _moments.inertia_tensor_eigvals( + self.image, T=self.inertia_tensor + ) @property @_cached def image_intensity(self): if self._intensity_image is None: - raise AttributeError('No intensity image specified.') + raise AttributeError("No intensity image specified.") image = ( self.image if not self._multichannel @@ -539,8 +566,9 @@ def _image_intensity_double(self): @property def centroid_local(self): M = self.moments - return tuple(M[tuple(cp.eye(self._ndim, dtype=int))] / - M[(0,) * self._ndim]) + return tuple( + M[tuple(cp.eye(self._ndim, dtype=int))] / M[(0,) * self._ndim] + ) @property def intensity_max(self): @@ -595,7 +623,7 @@ def moments_central(self): self.image.astype(cp.uint8), self.centroid_local, order=3, - spacing=self._spacing + spacing=self._spacing, ) return mu @@ -604,15 +632,16 @@ def moments_central(self): def moments_hu(self): if any(s != 1.0 for s in self._spacing): raise NotImplementedError( - '`moments_hu` supports spacing = (1, 1) only' + "`moments_hu` supports spacing = (1, 1) only" ) return _moments.moments_hu(self.moments_normalized) @property @_cached def moments_normalized(self): - return _moments.moments_normalized(self.moments_central, 3, - spacing=self._spacing) + return _moments.moments_normalized( + self.moments_central, 3, spacing=self._spacing + ) @property @only2d @@ -631,7 +660,7 @@ def orientation(self): def perimeter(self): if len(np.unique(self._spacing)) != 1: raise NotImplementedError( - '`perimeter` supports isotropic spacings only' + "`perimeter` supports isotropic spacings only" ) return perimeter(self.image, 4) * self._spacing[0] @@ -640,7 +669,7 @@ def perimeter(self): def perimeter_crofton(self): if len(np.unique(self._spacing)) != 1: raise NotImplementedError( - '`perimeter` supports isotropic spacings only' + "`perimeter` supports isotropic spacings only" ) return perimeter_crofton(self.image, 4) * self._spacing[0] @@ -651,14 +680,12 @@ def solidity(self): @property def centroid_weighted(self): ctr = self.centroid_weighted_local - return tuple(idx + slc.start - for idx, slc in zip(ctr, self.slice)) + return tuple(idx + slc.start for idx, slc in zip(ctr, self.slice)) @property def centroid_weighted_local(self): M = self.moments_weighted - return (M[tuple(cp.eye(self._ndim, dtype=int))] / - M[(0,) * self._ndim]) + return M[tuple(cp.eye(self._ndim, dtype=int))] / M[(0,) * self._ndim] @property @_cached @@ -666,9 +693,12 @@ def moments_weighted(self): image = self._image_intensity_double() if self._multichannel: moments = cp.stack( - [_moments.moments(image[..., i], order=3, - spacing=self._spacing) - for i in range(image.shape[-1])], + [ + _moments.moments( + image[..., i], order=3, spacing=self._spacing + ) + for i in range(image.shape[-1]) + ], axis=-1, ) else: @@ -686,7 +716,7 @@ def moments_weighted_central(self): image[..., i], center=ctr[..., i], order=3, - spacing=self._spacing + spacing=self._spacing, ) for i in range(image.shape[-1]) ] @@ -702,7 +732,7 @@ def moments_weighted_central(self): def moments_weighted_hu(self): if not (np.array(self._spacing) == np.array([1, 1])).all(): raise NotImplementedError( - '`moments_hu` supports spacing = (1, 1) only' + "`moments_hu` supports spacing = (1, 1) only" ) nu = self.moments_weighted_normalized if self._multichannel: @@ -721,16 +751,21 @@ def moments_weighted_normalized(self): if self._multichannel: nchannels = self._intensity_image.shape[-1] return cp.stack( - [_moments.moments_normalized(mu[..., i], order=3, - spacing=self._spacing) - for i in range(nchannels)], + [ + _moments.moments_normalized( + mu[..., i], order=3, spacing=self._spacing + ) + for i in range(nchannels) + ], axis=-1, ) else: - return _moments.moments_normalized(mu, order=3, - spacing=self._spacing) - return _moments.moments_normalized(self.moments_weighted_central, 3, - spacing=self._spacing) + return _moments.moments_normalized( + mu, order=3, spacing=self._spacing + ) + return _moments.moments_normalized( + self.moments_weighted_central, 3, spacing=self._spacing + ) def __iter__(self): props = PROP_VALS @@ -760,8 +795,9 @@ def __eq__(self, other): np.testing.assert_equal(v1, v2) else: # so that NaNs are equal - cp.testing.assert_array_equal(getattr(self, key, None), - getattr(other, key, None)) + cp.testing.assert_array_equal( + getattr(self, key, None), getattr(other, key, None) + ) except AssertionError: return False @@ -772,7 +808,7 @@ def __eq__(self, other): _RegionProperties = RegionProperties -def _props_to_dict(regions, properties=('label', 'bbox'), separator='-'): +def _props_to_dict(regions, properties=("label", "bbox"), separator="-"): """Convert image region properties list into a column dictionary. Parameters @@ -927,11 +963,16 @@ def _props_to_dict(regions, properties=('label', 'bbox'), separator='-'): return out -def regionprops_table(label_image, intensity_image=None, - properties=('label', 'bbox'), - *, - cache=True, separator='-', extra_properties=None, - spacing=None): +def regionprops_table( + label_image, + intensity_image=None, + properties=("label", "bbox"), + *, + cache=True, + separator="-", + extra_properties=None, + spacing=None, +): """Compute image properties and return them as a pandas-compatible table. The table is a dictionary mapping column names to value arrays. See Notes @@ -1066,13 +1107,17 @@ def regionprops_table(label_image, intensity_image=None, 4 5 112.50 113.0 114.0 """ - regions = regionprops(label_image, intensity_image=intensity_image, - cache=cache, extra_properties=extra_properties, - spacing=spacing) + regions = regionprops( + label_image, + intensity_image=intensity_image, + cache=cache, + extra_properties=extra_properties, + spacing=spacing, + ) if extra_properties is not None: - properties = ( - list(properties) + [prop.__name__ for prop in extra_properties] - ) + properties = list(properties) + [ + prop.__name__ for prop in extra_properties + ] if len(regions) == 0: ndim = label_image.ndim label_image = np.zeros((3,) * ndim, dtype=int) @@ -1083,21 +1128,30 @@ def regionprops_table(label_image, intensity_image=None, label_image.shape + intensity_image.shape[ndim:], dtype=intensity_image.dtype, ) - regions = regionprops(label_image, intensity_image=intensity_image, - cache=cache, extra_properties=extra_properties, - spacing=spacing) + regions = regionprops( + label_image, + intensity_image=intensity_image, + cache=cache, + extra_properties=extra_properties, + spacing=spacing, + ) - out_d = _props_to_dict(regions, properties=properties, - separator=separator) + out_d = _props_to_dict( + regions, properties=properties, separator=separator + ) return {k: v[:0] for k, v in out_d.items()} - return _props_to_dict( - regions, properties=properties, separator=separator - ) + return _props_to_dict(regions, properties=properties, separator=separator) -def regionprops(label_image, intensity_image=None, cache=True, *, - extra_properties=None, spacing=None): +def regionprops( + label_image, + intensity_image=None, + cache=True, + *, + extra_properties=None, + spacing=None, +): r"""Measure properties of labeled image regions. Parameters @@ -1338,19 +1392,19 @@ def regionprops(label_image, intensity_image=None, cache=True, *, """ # noqa if label_image.ndim not in (2, 3): - raise TypeError('Only 2-D and 3-D images supported.') + raise TypeError("Only 2-D and 3-D images supported.") if not cp.issubdtype(label_image.dtype, cp.integer): if cp.issubdtype(label_image.dtype, bool): raise TypeError( - 'Non-integer image types are ambiguous: ' - 'use skimage.measure.label to label the connected' - 'components of label_image,' - 'or label_image.astype(np.uint8) to interpret' - 'the True values as a single label.') + "Non-integer image types are ambiguous: " + "use skimage.measure.label to label the connected" + "components of label_image," + "or label_image.astype(np.uint8) to interpret" + "the True values as a single label." + ) else: - raise TypeError( - 'Non-integer label_image types are ambiguous') + raise TypeError("Non-integer label_image types are ambiguous") regions = [] @@ -1362,9 +1416,15 @@ def regionprops(label_image, intensity_image=None, cache=True, *, label = i + 1 - props = RegionProperties(sl, label, label_image, intensity_image, - cache, spacing=spacing, - extra_properties=extra_properties) + props = RegionProperties( + sl, + label, + label_image, + intensity_image, + cache, + spacing=spacing, + extra_properties=extra_properties, + ) regions.append(props) return regions @@ -1374,9 +1434,10 @@ def _parse_docs(): import re import textwrap - doc = regionprops.__doc__ or '' - matches = re.finditer(r'\*\*(\w+)\*\* \:.*?\n(.*?)(?=\n [\*\S]+)', - doc, flags=re.DOTALL) + doc = regionprops.__doc__ or "" + matches = re.finditer( + r"\*\*(\w+)\*\* \:.*?\n(.*?)(?=\n [\*\S]+)", doc, flags=re.DOTALL + ) prop_doc = {m.group(1): textwrap.dedent(m.group(2)) for m in matches} return prop_doc @@ -1385,8 +1446,9 @@ def _parse_docs(): def _install_properties_docs(): prop_doc = _parse_docs() - for p in [member for member in dir(RegionProperties) - if not member.startswith('_')]: + for p in [ + member for member in dir(RegionProperties) if not member.startswith("_") + ]: getattr(RegionProperties, p).__doc__ = prop_doc[p] diff --git a/python/cucim/src/cucim/skimage/measure/_regionprops_utils.py b/python/cucim/src/cucim/skimage/measure/_regionprops_utils.py index 54812a11b..70642aa1e 100644 --- a/python/cucim/src/cucim/skimage/measure/_regionprops_utils.py +++ b/python/cucim/src/cucim/skimage/measure/_regionprops_utils.py @@ -148,7 +148,7 @@ def euler_number(image, connectivity=None): # as image can be a label image, transform it to binary image = (image > 0).astype(int) - image = pad(image, pad_width=1, mode='constant') + image = pad(image, pad_width=1, mode="constant") # check connectivity if connectivity is None: @@ -158,7 +158,6 @@ def euler_number(image, connectivity=None): # variable coefs is attributed to each configuration in order to get # the Euler characteristic. if image.ndim == 2: - config = cp.array([[0, 0, 0], [0, 1, 4], [0, 2, 8]]) if connectivity == 1: coefs = EULER_COEFS2D_4 @@ -168,8 +167,9 @@ def euler_number(image, connectivity=None): else: # 3D images if connectivity == 2: raise NotImplementedError( - 'For 3D images, Euler number is implemented ' - 'for connectivities 1 and 3 only') + "For 3D images, Euler number is implemented " + "for connectivities 1 and 3 only" + ) # fmt: off config = cp.array([[[0, 0, 0], [0, 0, 0], [0, 0, 0]], @@ -185,7 +185,7 @@ def euler_number(image, connectivity=None): # XF has values in the 0-255 range in 3D, and in the 0-15 range in 2D, # with one unique value for each binary configuration of the # 27-voxel cube in 3D / 8-pixel square in 2D, up to symmetries - XF = ndi.convolve(image, config, mode='constant', cval=0) + XF = ndi.convolve(image, config, mode="constant", cval=0) h = cp.bincount(XF.ravel(), minlength=bins) coefs = cp.asarray(coefs) @@ -195,9 +195,11 @@ def euler_number(image, connectivity=None): return int(0.125 * coefs @ h) -@deprecate_kwarg(kwarg_mapping={'neighbourhood': 'neighborhood'}, - removed_version="2023.06.00", - deprecated_version="2022.12.00") +@deprecate_kwarg( + kwarg_mapping={"neighbourhood": "neighborhood"}, + removed_version="2023.06.00", + deprecated_version="2022.12.00", +) def perimeter(image, neighborhood=4): """Calculate total perimeter of all objects in binary image. @@ -237,7 +239,7 @@ def perimeter(image, neighborhood=4): """ if image.ndim != 2: - raise NotImplementedError('`perimeter` supports 2D images only') + raise NotImplementedError("`perimeter` supports 2D images only") if neighborhood == 4: strel = STREL_4 @@ -253,10 +255,12 @@ def perimeter(image, neighborhood=4): perimeter_weights[[21, 33]] = math.sqrt(2) perimeter_weights[[13, 23]] = (1 + math.sqrt(2)) / 2 - perimeter_image = ndi.convolve(border_image, cp.array([[10, 2, 10], - [2, 1, 2], - [10, 2, 10]]), - mode='constant', cval=0) + perimeter_image = ndi.convolve( + border_image, + cp.array([[10, 2, 10], [2, 1, 2], [10, 2, 10]]), + mode="constant", + cval=0, + ) # You can also write # return perimeter_weights[perimeter_image].sum() @@ -320,14 +324,17 @@ def perimeter_crofton(image, directions=4): array(7837.07740694) """ if image.ndim != 2: - raise NotImplementedError( - '`perimeter_crofton` supports 2D images only') + raise NotImplementedError("`perimeter_crofton` supports 2D images only") # as image could be a label image, transform it to binary image image = (image > 0).astype(cp.uint8) image = pad(image, pad_width=1, mode="constant") - XF = ndi.convolve(image, cp.array([[0, 0, 0], [0, 1, 4], [0, 2, 8]]), - mode='constant', cval=0) + XF = ndi.convolve( + image, + cp.array([[0, 0, 0], [0, 1, 4], [0, 2, 8]]), + mode="constant", + cval=0, + ) h = cp.bincount(XF.ravel(), minlength=16) diff --git a/python/cucim/src/cucim/skimage/measure/block.py b/python/cucim/src/cucim/skimage/measure/block.py index 3db2ea742..db8cbfeac 100644 --- a/python/cucim/src/cucim/skimage/measure/block.py +++ b/python/cucim/src/cucim/skimage/measure/block.py @@ -67,8 +67,10 @@ def block_reduce(image, block_size=2, func=cp.sum, cval=0, func_kwargs=None): if np.isscalar(block_size): block_size = (block_size,) * image.ndim elif len(block_size) != image.ndim: - raise ValueError("`block_size` must be a scalar or have " - "the same length as `image.shape`") + raise ValueError( + "`block_size` must be a scalar or have " + "the same length as `image.shape`" + ) if func_kwargs is None: func_kwargs = {} @@ -76,19 +78,23 @@ def block_reduce(image, block_size=2, func=cp.sum, cval=0, func_kwargs=None): pad_width = [] for i in range(len(block_size)): if block_size[i] < 1: - raise ValueError("Down-sampling factors must be >= 1. Use " - "`skimage.transform.resize` to up-sample an " - "image.") + raise ValueError( + "Down-sampling factors must be >= 1. Use " + "`skimage.transform.resize` to up-sample an " + "image." + ) if image.shape[i] % block_size[i] != 0: after_width = block_size[i] - (image.shape[i] % block_size[i]) else: after_width = 0 pad_width.append((0, after_width)) - image = pad(image, pad_width=pad_width, mode='constant', - constant_values=cval) + image = pad( + image, pad_width=pad_width, mode="constant", constant_values=cval + ) blocked = view_as_blocks(image, block_size) - return func(blocked, axis=tuple(range(image.ndim, blocked.ndim)), - **func_kwargs) + return func( + blocked, axis=tuple(range(image.ndim, blocked.ndim)), **func_kwargs + ) diff --git a/python/cucim/src/cucim/skimage/measure/entropy.py b/python/cucim/src/cucim/skimage/measure/entropy.py index 895d94239..8abcf9056 100644 --- a/python/cucim/src/cucim/skimage/measure/entropy.py +++ b/python/cucim/src/cucim/skimage/measure/entropy.py @@ -26,7 +26,7 @@ def shannon_entropy(image, base=2): References ---------- - .. [1] https://en.wikipedia.org/wiki/Entropy_(information_theory) `_ # noqa + .. [1] https://en.wikipedia.org/wiki/Entropy_(information_theory) `_ .. [2] https://en.wiktionary.org/wiki/Shannon_entropy Examples @@ -36,7 +36,7 @@ def shannon_entropy(image, base=2): >>> from cucim.skimage.measure import shannon_entropy >>> shannon_entropy(cp.array(data.camera())) array(7.23169501) - """ + """ # noqa: E501 _, counts = cp.unique(image, return_counts=True) return scipy_entropy(counts, base=base) diff --git a/python/cucim/src/cucim/skimage/measure/profile.py b/python/cucim/src/cucim/skimage/measure/profile.py index d4447c9a9..7ccc1e2ae 100644 --- a/python/cucim/src/cucim/skimage/measure/profile.py +++ b/python/cucim/src/cucim/skimage/measure/profile.py @@ -8,9 +8,17 @@ from .._shared.utils import _fix_ndimage_mode, _validate_interpolation_order -def profile_line(image, src, dst, linewidth=1, - order=None, mode='reflect', cval=0.0, - *, reduce_func=cp.mean): +def profile_line( + image, + src, + dst, + linewidth=1, + order=None, + mode="reflect", + cval=0.0, + *, + reduce_func=cp.mean, +): """Return the intensity profile of an image measured along a scan line. Parameters @@ -101,15 +109,27 @@ def profile_line(image, src, dst, linewidth=1, perp_lines = _line_profile_coordinates(src, dst, linewidth=linewidth) if image.ndim == 3: - pixels = [ndi.map_coordinates(image[..., i], perp_lines, - prefilter=order > 1, - order=order, mode=mode, - cval=cval) for i in - range(image.shape[2])] + pixels = [ + ndi.map_coordinates( + image[..., i], + perp_lines, + prefilter=order > 1, + order=order, + mode=mode, + cval=cval, + ) + for i in range(image.shape[2]) + ] pixels = cp.transpose(cp.stack(pixels, axis=0), (1, 2, 0)) else: - pixels = ndi.map_coordinates(image, perp_lines, prefilter=order > 1, - order=order, mode=mode, cval=cval) + pixels = ndi.map_coordinates( + image, + perp_lines, + prefilter=order > 1, + order=order, + mode=mode, + cval=cval, + ) # The outputted array with reduce_func=None gives an array where the # row values (axis=1) are flipped. Here, we make this consistent. pixels = np.flip(pixels, axis=1) @@ -165,8 +185,16 @@ def _line_profile_coordinates(src, dst, linewidth=1): # distance between pixel centers) col_width = (linewidth - 1) * cp.sin(-theta) / 2 row_width = (linewidth - 1) * cp.cos(theta) / 2 - perp_rows = cp.stack([cp.linspace(row_i - row_width, row_i + row_width, - linewidth) for row_i in line_row]) - perp_cols = cp.stack([cp.linspace(col_i - col_width, col_i + col_width, - linewidth) for col_i in line_col]) + perp_rows = cp.stack( + [ + cp.linspace(row_i - row_width, row_i + row_width, linewidth) + for row_i in line_row + ] + ) + perp_cols = cp.stack( + [ + cp.linspace(col_i - col_width, col_i + col_width, linewidth) + for col_i in line_col + ] + ) return cp.stack([perp_rows, perp_cols]) diff --git a/python/cucim/src/cucim/skimage/measure/tests/test_block.py b/python/cucim/src/cucim/skimage/measure/tests/test_block.py index 0c5d60a2f..c420e7a32 100644 --- a/python/cucim/src/cucim/skimage/measure/tests/test_block.py +++ b/python/cucim/src/cucim/skimage/measure/tests/test_block.py @@ -111,16 +111,14 @@ def test_invalid_block_size(): def test_default_block_size(): image = cp.arange(4 * 6).reshape(4, 6) out = block_reduce(image, func=cp.min) - expected = cp.array([[0, 2, 4], - [12, 14, 16]]) + expected = cp.array([[0, 2, 4], [12, 14, 16]]) assert_array_equal(expected, out) def test_scalar_block_size(): image = cp.arange(6 * 6).reshape(6, 6) out = block_reduce(image, 3, func=cp.min) - expected1 = cp.array([[0, 3], - [18, 21]]) + expected1 = cp.array([[0, 3], [18, 21]]) assert_array_equal(expected1, out) expected2 = block_reduce(image, (3, 3), func=cp.min) assert_array_equal(expected2, out) @@ -153,8 +151,9 @@ def test_func_kwargs_different_dtype(): dtype=cp.float64) # fmt: on - out = block_reduce(image, (2, 2), func=cp.mean, - func_kwargs={'dtype': cp.float16}) + out = block_reduce( + image, (2, 2), func=cp.mean, func_kwargs={"dtype": cp.float16} + ) expected = cp.array([[0.6855, 0.3164], [0.4922, 0.521]], dtype=cp.float16) # Note: had to set decimal=3 for float16 to pass here when using CuPy diff --git a/python/cucim/src/cucim/skimage/measure/tests/test_blur_effect.py b/python/cucim/src/cucim/skimage/measure/tests/test_blur_effect.py index 3b0013005..1fe103b9c 100644 --- a/python/cucim/src/cucim/skimage/measure/tests/test_blur_effect.py +++ b/python/cucim/src/cucim/skimage/measure/tests/test_blur_effect.py @@ -12,10 +12,8 @@ def test_blur_effect(): """Test that the blur metric increases with more blurring.""" image = cp.array(astronaut()) B0 = blur_effect(image, channel_axis=-1) - B1 = blur_effect(gaussian(image, sigma=1, channel_axis=-1), - channel_axis=-1) - B2 = blur_effect(gaussian(image, sigma=4, channel_axis=-1), - channel_axis=-1) + B1 = blur_effect(gaussian(image, sigma=1, channel_axis=-1), channel_axis=-1) + B2 = blur_effect(gaussian(image, sigma=4, channel_axis=-1), channel_axis=-1) assert 0 <= B0 < 1 assert B0 < B1 < B2 @@ -48,7 +46,7 @@ def test_blur_effect_channel_axis(): def test_blur_effect_3d(): """Test that the blur metric works on a 3D image.""" - cells3d = pytest.importorskip('skimage.data.cells3d') + cells3d = pytest.importorskip("skimage.data.cells3d") image_3d = cp.array(cells3d()[:, 1, :, :]) # grab just the nuclei B0 = blur_effect(image_3d) B1 = blur_effect(gaussian(image_3d, sigma=1)) diff --git a/python/cucim/src/cucim/skimage/measure/tests/test_ccomp.py b/python/cucim/src/cucim/skimage/measure/tests/test_ccomp.py index f79714008..a7a606798 100644 --- a/python/cucim/src/cucim/skimage/measure/tests/test_ccomp.py +++ b/python/cucim/src/cucim/skimage/measure/tests/test_ccomp.py @@ -1,6 +1,7 @@ # Note: These test cases originated in skimage/morphology/tests/test_ccomp.py import cupy as cp + # import numpy as np from cupy.testing import assert_array_equal @@ -283,14 +284,20 @@ def test_1D(self): x = cp.array((0, 1, 2, 2, 1, 1, 0, 0)) xlen = len(x) y = cp.array((0, 1, 2, 2, 3, 3, 0, 0)) - reshapes = ((xlen,), - (1, xlen), (xlen, 1), - (1, xlen, 1), (xlen, 1, 1), (1, 1, xlen)) + reshapes = ( + (xlen,), + (1, xlen), + (xlen, 1), + (1, xlen, 1), + (xlen, 1, 1), + (1, 1, xlen), + ) for reshape in reshapes: x2 = x.reshape(reshape) labelled = label(x2) assert_array_equal(y, labelled.flatten()) + # CuPy Backend: unlike scikit-image, the CUDA implementation is nD # def test_nd(self): # x = cp.ones((1, 2, 3, 4)) diff --git a/python/cucim/src/cucim/skimage/measure/tests/test_colocalization.py b/python/cucim/src/cucim/skimage/measure/tests/test_colocalization.py index ab1c05949..f332858b2 100644 --- a/python/cucim/src/cucim/skimage/measure/tests/test_colocalization.py +++ b/python/cucim/src/cucim/skimage/measure/tests/test_colocalization.py @@ -2,8 +2,12 @@ import numpy as np import pytest -from cucim.skimage.measure import (intersection_coeff, manders_coloc_coeff, - manders_overlap_coeff, pearson_corr_coeff) +from cucim.skimage.measure import ( + intersection_coeff, + manders_coloc_coeff, + manders_overlap_coeff, + pearson_corr_coeff, +) def test_invalid_input(): @@ -74,7 +78,7 @@ def test_pcc(): def test_mcc(): img1 = cp.array([[j for j in range(4)] for i in range(4)]) - mask = cp.array([[i <= 1 for j in range(4)]for i in range(4)]) + mask = cp.array([[i <= 1 for j in range(4)] for i in range(4)]) assert manders_coloc_coeff(img1, mask) == 0.5 # test negative values diff --git a/python/cucim/src/cucim/skimage/measure/tests/test_moments.py b/python/cucim/src/cucim/skimage/measure/tests/test_moments.py index df20a82d9..82e9aa6ce 100644 --- a/python/cucim/src/cucim/skimage/measure/tests/test_moments.py +++ b/python/cucim/src/cucim/skimage/measure/tests/test_moments.py @@ -3,18 +3,27 @@ import cupy as cp import numpy as np import pytest -from cupy.testing import (assert_allclose, assert_array_almost_equal, - assert_array_equal) +from cupy.testing import ( + assert_allclose, + assert_array_almost_equal, + assert_array_equal, +) from cupyx.scipy import ndimage as ndi from numpy.testing import assert_almost_equal from skimage import draw from cucim.skimage._shared.utils import _supported_float_type -from cucim.skimage.measure import (centroid, inertia_tensor, - inertia_tensor_eigvals, moments, - moments_central, moments_coords, - moments_coords_central, moments_hu, - moments_normalized) +from cucim.skimage.measure import ( + centroid, + inertia_tensor, + inertia_tensor_eigvals, + moments, + moments_central, + moments_coords, + moments_coords_central, + moments_hu, + moments_normalized, +) def compare_moments(m1, m2, thresh=1e-8): @@ -52,8 +61,8 @@ def compare_moments(m1, m2, thresh=1e-8): assert rel_diff < thresh -@pytest.mark.parametrize('dtype', [cp.float32, cp.float64]) -@pytest.mark.parametrize('anisotropic', [False, True, None]) +@pytest.mark.parametrize("dtype", [cp.float32, cp.float64]) +@pytest.mark.parametrize("anisotropic", [False, True, None]) def test_moments(anisotropic, dtype): image = cp.zeros((20, 20), dtype=dtype) image[14, 14] = 1 @@ -75,8 +84,8 @@ def test_moments(anisotropic, dtype): assert_almost_equal(m[0, 1] / m[0, 0], 14.5 * spacing[1], decimal=decimal) -@pytest.mark.parametrize('dtype', [cp.float32, cp.float64]) -@pytest.mark.parametrize('anisotropic', [False, True, None]) +@pytest.mark.parametrize("dtype", [cp.float32, cp.float64]) +@pytest.mark.parametrize("anisotropic", [False, True, None]) def test_moments_central(anisotropic, dtype): image = cp.zeros((20, 20), dtype=dtype) image[14, 14] = 1 @@ -92,8 +101,9 @@ def test_moments_central(anisotropic, dtype): # check for proper centroid computation mu_calc_centroid = moments_central(image) else: - mu = moments_central(image, (14.5 * spacing[0], 14.5 * spacing[1]), - spacing=spacing) + mu = moments_central( + image, (14.5 * spacing[0], 14.5 * spacing[1]), spacing=spacing + ) # check for proper centroid computation mu_calc_centroid = moments_central(image, spacing=spacing) assert mu.dtype == dtype @@ -112,7 +122,7 @@ def test_moments_central(anisotropic, dtype): mu2 = moments_central( image2, ((14.5 + 2) * spacing[0], (14.5 + 2) * spacing[1]), - spacing=spacing + spacing=spacing, ) assert mu2.dtype == dtype # central moments must be translation invariant @@ -124,13 +134,14 @@ def test_moments_coords(): image[13:17, 13:17] = 1 mu_image = moments(image) - coords = cp.array([[r, c] for r in range(13, 17) - for c in range(13, 17)], dtype=cp.float64) + coords = cp.array( + [[r, c] for r in range(13, 17) for c in range(13, 17)], dtype=cp.float64 + ) mu_coords = moments_coords(coords) assert_array_almost_equal(mu_coords, mu_image) -@pytest.mark.parametrize('dtype', [cp.float16, cp.float32, cp.float64]) +@pytest.mark.parametrize("dtype", [cp.float16, cp.float32, cp.float64]) def test_moments_coords_dtype(dtype): image = cp.zeros((20, 20), dtype=dtype) image[13:17, 13:17] = 1 @@ -140,8 +151,9 @@ def test_moments_coords_dtype(dtype): assert mu_image.dtype == expected_dtype coords = cp.asarray( - np.array([[r, c] for r in range(13, 17) - for c in range(13, 17)], dtype=dtype) + np.array( + [[r, c] for r in range(13, 17) for c in range(13, 17)], dtype=dtype + ) ) mu_coords = moments_coords(coords) assert mu_coords.dtype == expected_dtype @@ -173,8 +185,9 @@ def test_moments_central_coords(): mu_image = moments_central(image, (14.5, 14.5)) coords = cp.asarray( - np.array([[r, c] for r in range(16, 20) - for c in range(16, 20)], dtype=float) + np.array( + [[r, c] for r in range(16, 20) for c in range(16, 20)], dtype=float + ) ) mu_coords = moments_coords_central(coords, (14.5, 14.5)) decimal = 6 @@ -196,7 +209,7 @@ def test_moments_normalized(): assert_array_almost_equal(nu, nu2, decimal=1) -@pytest.mark.parametrize('anisotropic', [False, True]) +@pytest.mark.parametrize("anisotropic", [False, True]) def test_moments_normalized_spacing(anisotropic): image = cp.zeros((20, 20), dtype=np.double) image[13:17, 13:17] = 1 @@ -230,18 +243,18 @@ def test_moments_normalized_3d(): assert_array_almost_equal(mu_coords, mu_image) -@pytest.mark.parametrize('dtype', [np.uint8, np.int32, np.float32, np.float64]) -@pytest.mark.parametrize('order', [1, 2, 3, 4]) -@pytest.mark.parametrize('ndim', [2, 3, 4]) +@pytest.mark.parametrize("dtype", [np.uint8, np.int32, np.float32, np.float64]) +@pytest.mark.parametrize("order", [1, 2, 3, 4]) +@pytest.mark.parametrize("ndim", [2, 3, 4]) def test_analytical_moments_calculation(dtype, order, ndim): if ndim == 2: shape = (256, 256) elif ndim == 3: shape = (64, 64, 64) else: - shape = (16, ) * ndim + shape = (16,) * ndim rng = np.random.default_rng(1234) - if np.dtype(dtype).kind in 'iu': + if np.dtype(dtype).kind in "iu": x = rng.integers(0, 256, shape, dtype=dtype) else: x = rng.standard_normal(shape, dtype=dtype) @@ -280,7 +293,7 @@ def test_moments_hu(): assert_array_almost_equal(hu, hu2, decimal=1) -@pytest.mark.parametrize('dtype', [cp.float16, cp.float32, cp.float64]) +@pytest.mark.parametrize("dtype", [cp.float16, cp.float32, cp.float64]) def test_moments_dtype(dtype): image = cp.zeros((20, 20), dtype=dtype) image[13:15, 13:17] = 1 @@ -296,7 +309,7 @@ def test_moments_dtype(dtype): assert hu.dtype == expected_dtype -@pytest.mark.parametrize('dtype', [cp.float16, cp.float32, cp.float64]) +@pytest.mark.parametrize("dtype", [cp.float16, cp.float32, cp.float64]) def test_centroid(dtype): image = cp.zeros((20, 20), dtype=dtype) image[14, 14:16] = 1 @@ -311,7 +324,7 @@ def test_centroid(dtype): assert_allclose(image_centroid, (14.25, 14.5), rtol=rtol) -@pytest.mark.parametrize('dtype', [cp.float16, cp.float32, cp.float64]) +@pytest.mark.parametrize("dtype", [cp.float16, cp.float32, cp.float64]) def test_inertia_tensor_2d(dtype): image = cp.zeros((40, 40), dtype=dtype) image[15:25, 5:35] = 1 # big horizontal rectangle (aligned with axis 1) @@ -349,8 +362,9 @@ def test_inertia_tensor_3d(): [ 0, 0, 1]]) # noqa # fmt: on expected_vr = R @ v0 - assert (cp.allclose(vr, expected_vr, atol=1e-3, rtol=0.01) or - cp.allclose(-vr, expected_vr, atol=1e-3, rtol=0.01)) + assert cp.allclose(vr, expected_vr, atol=1e-3, rtol=0.01) or cp.allclose( + -vr, expected_vr, atol=1e-3, rtol=0.01 + ) def test_inertia_tensor_eigvals(): diff --git a/python/cucim/src/cucim/skimage/measure/tests/test_polygon.py b/python/cucim/src/cucim/skimage/measure/tests/test_polygon.py index ffd7c13b8..10cfbb6d5 100644 --- a/python/cucim/src/cucim/skimage/measure/tests/test_polygon.py +++ b/python/cucim/src/cucim/skimage/measure/tests/test_polygon.py @@ -6,15 +6,26 @@ from cucim.skimage.measure import approximate_polygon, subdivide_polygon from cucim.skimage.measure._polygon import _SUBDIVISION_MASKS -_square = cp.array([ - [0, 0], [0, 1], [0, 2], [0, 3], - [1, 3], [2, 3], [3, 3], - [3, 2], [3, 1], [3, 0], - [2, 0], [1, 0], [0, 0] -]) +_square = cp.array( + [ + [0, 0], + [0, 1], + [0, 2], + [0, 3], + [1, 3], + [2, 3], + [3, 3], + [3, 2], + [3, 1], + [3, 0], + [2, 0], + [1, 0], + [0, 0], + ] +) -@pytest.mark.parametrize('dtype', [cp.float32, cp.float64]) +@pytest.mark.parametrize("dtype", [cp.float32, cp.float64]) def test_approximate_polygon(dtype): square = _square.astype(dtype, copy=False) out = approximate_polygon(square, 0.1) @@ -35,7 +46,7 @@ def test_approximate_polygon(dtype): assert_array_equal(out, square) -@pytest.mark.parametrize('dtype', [cp.float32, cp.float64]) +@pytest.mark.parametrize("dtype", [cp.float32, cp.float64]) def test_subdivide_polygon(dtype): square = _square.astype(dtype, copy=False) new_square1 = square @@ -51,21 +62,22 @@ def test_subdivide_polygon(dtype): new_square1 = subdivide_polygon(square1, degree) assert new_square1.dtype == dtype assert_array_equal(new_square1[-1], new_square1[0]) - assert_equal(new_square1.shape[0], - 2 * square1.shape[0] - 1) + assert_equal(new_square1.shape[0], 2 * square1.shape[0] - 1) # test non-circular new_square2 = subdivide_polygon(square2, degree) assert new_square3.dtype == dtype - assert_equal(new_square2.shape[0], - 2 * (square2.shape[0] - mask_len + 1)) + assert_equal( + new_square2.shape[0], 2 * (square2.shape[0] - mask_len + 1) + ) # test non-circular, preserve_ends new_square3 = subdivide_polygon(square3, degree, True) assert new_square3.dtype == dtype assert_array_equal(new_square3[0], square3[0]) assert_array_equal(new_square3[-1], square3[-1]) - assert_equal(new_square3.shape[0], - 2 * (square3.shape[0] - mask_len + 2)) + assert_equal( + new_square3.shape[0], 2 * (square3.shape[0] - mask_len + 2) + ) # not supported B-Spline degree with pytest.raises(ValueError): diff --git a/python/cucim/src/cucim/skimage/measure/tests/test_profile.py b/python/cucim/src/cucim/skimage/measure/tests/test_profile.py index 46971a059..20c18764d 100644 --- a/python/cucim/src/cucim/skimage/measure/tests/test_profile.py +++ b/python/cucim/src/cucim/skimage/measure/tests/test_profile.py @@ -89,127 +89,218 @@ def test_pythagorean_triangle_right_downward_interpolated(): def test_pythagorean_triangle_right_downward_linewidth(): - prof = profile_line(pyth_image, (1, 1), (4, 5), linewidth=3, order=0, - mode='constant') + prof = profile_line( + pyth_image, (1, 1), (4, 5), linewidth=3, order=0, mode="constant" + ) expected_prof = cp.ones(6) assert_array_almost_equal(prof, expected_prof) def test_pythagorean_triangle_right_upward_linewidth(): - prof = profile_line(pyth_image[::-1, :], (4, 1), (1, 5), - linewidth=3, order=0, mode='constant') + prof = profile_line( + pyth_image[::-1, :], + (4, 1), + (1, 5), + linewidth=3, + order=0, + mode="constant", + ) expected_prof = cp.ones(6) assert_array_almost_equal(prof, expected_prof) def test_pythagorean_triangle_transpose_left_down_linewidth(): - prof = profile_line(pyth_image.T[:, ::-1], (1, 4), (5, 1), - linewidth=3, order=0, mode='constant') + prof = profile_line( + pyth_image.T[:, ::-1], + (1, 4), + (5, 1), + linewidth=3, + order=0, + mode="constant", + ) expected_prof = np.ones(6) assert_array_almost_equal(prof, expected_prof) def test_reduce_func_mean(): - prof = profile_line(pyth_image, (0, 1), (3, 1), linewidth=3, order=0, - reduce_func=np.mean, mode='reflect') + prof = profile_line( + pyth_image, + (0, 1), + (3, 1), + linewidth=3, + order=0, + reduce_func=np.mean, + mode="reflect", + ) expected_prof = pyth_image[:4, :3].mean(1) assert_array_almost_equal(prof, expected_prof) def test_reduce_func_max(): - prof = profile_line(pyth_image, (0, 1), (3, 1), linewidth=3, order=0, - reduce_func=np.max, mode='reflect') + prof = profile_line( + pyth_image, + (0, 1), + (3, 1), + linewidth=3, + order=0, + reduce_func=np.max, + mode="reflect", + ) expected_prof = pyth_image[:4, :3].max(1) assert_array_almost_equal(prof, expected_prof) def test_reduce_func_sum(): - prof = profile_line(pyth_image, (0, 1), (3, 1), linewidth=3, order=0, - reduce_func=np.sum, mode='reflect') + prof = profile_line( + pyth_image, + (0, 1), + (3, 1), + linewidth=3, + order=0, + reduce_func=np.sum, + mode="reflect", + ) expected_prof = pyth_image[:4, :3].sum(1) assert_array_almost_equal(prof, expected_prof) def test_reduce_func_mean_linewidth_1(): - prof = profile_line(pyth_image, (0, 1), (3, 1), linewidth=1, order=0, - reduce_func=np.mean, mode='constant') + prof = profile_line( + pyth_image, + (0, 1), + (3, 1), + linewidth=1, + order=0, + reduce_func=np.mean, + mode="constant", + ) expected_prof = pyth_image[:4, 1] assert_array_almost_equal(prof, expected_prof) def test_reduce_func_None_linewidth_1(): - prof = profile_line(pyth_image, (1, 2), (4, 2), linewidth=1, - order=0, reduce_func=None, mode='constant') + prof = profile_line( + pyth_image, + (1, 2), + (4, 2), + linewidth=1, + order=0, + reduce_func=None, + mode="constant", + ) expected_prof = pyth_image[1:5, 2, np.newaxis] assert_array_almost_equal(prof, expected_prof) def test_reduce_func_None_linewidth_3(): - prof = profile_line(pyth_image, (1, 2), (4, 2), linewidth=3, - order=0, reduce_func=None, mode='constant') + prof = profile_line( + pyth_image, + (1, 2), + (4, 2), + linewidth=3, + order=0, + reduce_func=None, + mode="constant", + ) expected_prof = pyth_image[1:5, 1:4] assert_array_almost_equal(prof, expected_prof) def test_reduce_func_lambda_linewidth_3(): def reduce_func(x): - return x + x ** 2 - prof = profile_line(pyth_image, (1, 2), (4, 2), linewidth=3, order=0, - reduce_func=reduce_func, mode='constant') - expected_prof = cp.apply_along_axis(reduce_func, - arr=pyth_image[1:5, 1:4], axis=1) + return x + x**2 + + prof = profile_line( + pyth_image, + (1, 2), + (4, 2), + linewidth=3, + order=0, + reduce_func=reduce_func, + mode="constant", + ) + expected_prof = cp.apply_along_axis( + reduce_func, arr=pyth_image[1:5, 1:4], axis=1 + ) assert_array_almost_equal(prof, expected_prof) def test_reduce_func_sqrt_linewidth_3(): def reduce_func(x): - return x ** 0.5 - prof = profile_line(pyth_image, (1, 2), (4, 2), linewidth=3, - order=0, reduce_func=reduce_func, - mode='constant') - expected_prof = cp.apply_along_axis(reduce_func, - arr=pyth_image[1:5, 1:4], axis=1) + return x**0.5 + + prof = profile_line( + pyth_image, + (1, 2), + (4, 2), + linewidth=3, + order=0, + reduce_func=reduce_func, + mode="constant", + ) + expected_prof = cp.apply_along_axis( + reduce_func, arr=pyth_image[1:5, 1:4], axis=1 + ) assert_array_almost_equal(prof, expected_prof) def test_reduce_func_sumofsqrt_linewidth_3(): def reduce_func(x): - return np.sum(x ** 0.5) - prof = profile_line(pyth_image, (1, 2), (4, 2), linewidth=3, order=0, - reduce_func=reduce_func, mode='constant') - expected_prof = cp.apply_along_axis(reduce_func, - arr=pyth_image[1:5, 1:4], axis=1) + return np.sum(x**0.5) + + prof = profile_line( + pyth_image, + (1, 2), + (4, 2), + linewidth=3, + order=0, + reduce_func=reduce_func, + mode="constant", + ) + expected_prof = cp.apply_along_axis( + reduce_func, arr=pyth_image[1:5, 1:4], axis=1 + ) assert_array_almost_equal(prof, expected_prof) def test_oob_coodinates(): offset = 2 idx = pyth_image.shape[0] + offset - prof = profile_line(pyth_image, (-offset, 2), (idx, 2), linewidth=1, - order=0, reduce_func=None, mode='constant') - expected_prof = cp.vstack([cp.zeros((offset, 1)), - pyth_image[:, 2, cp.newaxis], - cp.zeros((offset + 1, 1))]) + prof = profile_line( + pyth_image, + (-offset, 2), + (idx, 2), + linewidth=1, + order=0, + reduce_func=None, + mode="constant", + ) + expected_prof = cp.vstack( + [ + cp.zeros((offset, 1)), + pyth_image[:, 2, cp.newaxis], + cp.zeros((offset + 1, 1)), + ] + ) assert_array_almost_equal(prof, expected_prof) def test_bool_array_input(): - shape = (200, 200) center_x, center_y = (140, 150) radius = 20 x, y = cp.meshgrid(cp.arange(shape[1]), cp.arange(shape[0])) - mask = (y - center_y) ** 2 + (x - center_x) ** 2 < radius ** 2 + mask = (y - center_y) ** 2 + (x - center_x) ** 2 < radius**2 src = (center_y, center_x) phi = 4 * np.pi / 9.0 dy = 31 * np.cos(phi) dx = 31 * np.sin(phi) dst = (center_y + dy, center_x + dx) - profile_u8 = profile_line(mask.astype(cp.uint8), src, dst, mode='reflect') + profile_u8 = profile_line(mask.astype(cp.uint8), src, dst, mode="reflect") assert int(cp.all(profile_u8[:radius] == 1)) - profile_b = profile_line(mask, src, dst, mode='constant') + profile_b = profile_line(mask, src, dst, mode="constant") assert int(cp.all(profile_b[:radius] == 1)) assert int(cp.all(profile_b == profile_u8)) diff --git a/python/cucim/src/cucim/skimage/measure/tests/test_regionprops.py b/python/cucim/src/cucim/skimage/measure/tests/test_regionprops.py index 67763b8eb..1cb34f88f 100644 --- a/python/cucim/src/cucim/skimage/measure/tests/test_regionprops.py +++ b/python/cucim/src/cucim/skimage/measure/tests/test_regionprops.py @@ -12,14 +12,22 @@ from cucim.skimage import transform from cucim.skimage._shared._warnings import expected_warnings from cucim.skimage._vendored import pad -from cucim.skimage.measure import (euler_number, perimeter, perimeter_crofton, - regionprops, regionprops_table) -from cucim.skimage.measure._regionprops import \ - _inertia_eigvals_to_axes_lengths_3D # noqa -from cucim.skimage.measure._regionprops import (COL_DTYPES, OBJECT_COLUMNS, - PROPS, _parse_docs, - _props_to_dict, - _require_intensity_image) +from cucim.skimage.measure import ( + euler_number, + perimeter, + perimeter_crofton, + regionprops, + regionprops_table, +) +from cucim.skimage.measure._regionprops import ( # noqa + COL_DTYPES, + OBJECT_COLUMNS, + PROPS, + _inertia_eigvals_to_axes_lengths_3D, + _parse_docs, + _props_to_dict, + _require_intensity_image, +) # fmt: off SAMPLE = cp.array( @@ -54,9 +62,9 @@ def get_moment_function(img, spacing=(1, 1)): Y, X = np.meshgrid( cp.linspace(0, rows * spacing[0], rows, endpoint=False), cp.linspace(0, cols * spacing[1], cols, endpoint=False), - indexing='ij' + indexing="ij", ) - return lambda p, q: cp.sum(Y ** p * X ** q * img) + return lambda p, q: cp.sum(Y**p * X**q * img) def get_moment3D_function(img, spacing=(1, 1, 1)): @@ -65,9 +73,9 @@ def get_moment3D_function(img, spacing=(1, 1, 1)): cp.linspace(0, slices * spacing[0], slices, endpoint=False), cp.linspace(0, rows * spacing[1], rows, endpoint=False), cp.linspace(0, cols * spacing[2], cols, endpoint=False), - indexing='ij' + indexing="ij", ) - return lambda p, q, r: cp.sum(Z ** p * Y ** q * X ** r * img) + return lambda p, q, r: cp.sum(Z**p * Y**q * X**r * img) def get_central_moment_function(img, spacing=(1, 1)): @@ -75,7 +83,7 @@ def get_central_moment_function(img, spacing=(1, 1)): Y, X = np.meshgrid( cp.linspace(0, rows * spacing[0], rows, endpoint=False), cp.linspace(0, cols * spacing[1], cols, endpoint=False), - indexing='ij' + indexing="ij", ) Mpq = get_moment_function(img, spacing=spacing) cY = Mpq(1, 0) / Mpq(0, 0) @@ -88,15 +96,17 @@ def test_all_props(): for prop in PROPS: try: # access legacy name via dict - assert_array_almost_equal(region[prop], - getattr(region, PROPS[prop])) + assert_array_almost_equal( + region[prop], getattr(region, PROPS[prop]) + ) # skip property access tests for old CamelCase names # (we intentionally do not provide properties for these) if prop.lower() == prop: # access legacy name via attribute - assert_array_almost_equal(getattr(region, prop), - getattr(region, PROPS[prop])) + assert_array_almost_equal( + getattr(region, prop), getattr(region, PROPS[prop]) + ) except TypeError: # the `slice` property causes this pass @@ -106,14 +116,16 @@ def test_all_props_3d(): region = regionprops(SAMPLE_3D, INTENSITY_SAMPLE_3D)[0] for prop in PROPS: try: - assert_array_almost_equal(region[prop], - getattr(region, PROPS[prop])) + assert_array_almost_equal( + region[prop], getattr(region, PROPS[prop]) + ) # skip property access tests for old CamelCase names # (we intentionally do not provide properties for these) if prop.lower() == prop: - assert_array_almost_equal(getattr(region, prop), - getattr(region, PROPS[prop])) + assert_array_almost_equal( + getattr(region, prop), getattr(region, PROPS[prop]) + ) except (NotImplementedError, TypeError): pass @@ -148,14 +160,16 @@ def test_ndim(): regionprops(cp.zeros((10, 10, 10, 2), dtype=int)) -@pytest.mark.skip('feret_diameter_max not implmented on the GPU') +@pytest.mark.skip("feret_diameter_max not implemented on the GPU") def test_feret_diameter_max(): # comparator result is based on SAMPLE from manually-inspected computations comparator_result = 18 test_result = regionprops(SAMPLE)[0].feret_diameter_max assert cp.abs(test_result - comparator_result) < 1 comparator_result_spacing = 10 - test_result_spacing = regionprops(SAMPLE, spacing=[1, 0.1])[0].feret_diameter_max # noqa + test_result_spacing = regionprops(SAMPLE, spacing=[1, 0.1])[ + 0 + ].feret_diameter_max # noqa assert cp.abs(test_result_spacing - comparator_result_spacing) < 1 # square, test that Feret diameter is sqrt(2) * square side img = cp.zeros((20, 20), dtype=cp.uint8) @@ -164,16 +178,25 @@ def test_feret_diameter_max(): assert cp.abs(feret_diameter_max - 16 * math.sqrt(2)) < 1 # Due to marching-squares with a level of .5 the diagonal goes # from (0, 0.5) to (16, 15.5). - assert cp.abs(feret_diameter_max - np.sqrt(16 ** 2 + (16 - 1) ** 2)) < 1e-6 + assert cp.abs(feret_diameter_max - np.sqrt(16**2 + (16 - 1) ** 2)) < 1e-6 spacing = (2, 1) - feret_diameter_max = regionprops(img, spacing=spacing)[0].feret_diameter_max # noqa + feret_diameter_max = regionprops(img, spacing=spacing)[ + 0 + ].feret_diameter_max # noqa # For anisotropic spacing the shift is applied to the smaller spacing. - assert cp.abs(feret_diameter_max - cp.sqrt( - (spacing[0] * 16 - (spacing[0] <= spacing[1])) ** 2 + - (spacing[1] * 16 - (spacing[1] < spacing[0])) ** 2)) < 1e-6 + assert ( + cp.abs( + feret_diameter_max + - cp.sqrt( + (spacing[0] * 16 - (spacing[0] <= spacing[1])) ** 2 + + (spacing[1] * 16 - (spacing[1] < spacing[0])) ** 2 + ) + ) + < 1e-6 + ) -@pytest.mark.skip('feret_diameter_max not implmented on the GPU') +@pytest.mark.skip("feret_diameter_max not implemented on the GPU") def test_feret_diameter_max_3d(): img = cp.zeros((20, 20), dtype=cp.uint8) img[2:-2, 2:-2] = 1 @@ -184,22 +207,50 @@ def test_feret_diameter_max_3d(): # (x-1, y-1, z), (x-1, y, z-1), (x, y-1, z-1). # The option yielding the longest diagonal is the computed # max_feret_diameter. - assert cp.abs(feret_diameter_max - cp.sqrt((16 - 1) ** 2 + 16 ** 2 + (3 - 1) ** 2)) < 1e-6 # noqa + assert ( + cp.abs( + feret_diameter_max - cp.sqrt((16 - 1) ** 2 + 16**2 + (3 - 1) ** 2) + ) + < 1e-6 + ) # noqa spacing = (1, 2, 3) - feret_diameter_max = regionprops(img_3d, spacing=spacing)[0].feret_diameter_max # noqa + feret_diameter_max = regionprops(img_3d, spacing=spacing)[ + 0 + ].feret_diameter_max # noqa # The longest of the three options is the max_feret_diameter - assert cp.abs(feret_diameter_max - cp.sqrt( - (spacing[0] * (16 - 1)) ** 2 + - (spacing[1] * (16 - 0)) ** 2 + - (spacing[2] * (3 - 1)) ** 2)) < 1e-6 - assert cp.abs(feret_diameter_max - cp.sqrt( - (spacing[0] * (16 - 1)) ** 2 + - (spacing[1] * (16 - 1)) ** 2 + - (spacing[2] * (3 - 0)) ** 2)) > 1e-6 - assert cp.abs(feret_diameter_max - cp.sqrt( - (spacing[0] * (16 - 0)) ** 2 + - (spacing[1] * (16 - 1)) ** 2 + - (spacing[2] * (3 - 1)) ** 2)) > 1e-6 + assert ( + cp.abs( + feret_diameter_max + - cp.sqrt( + (spacing[0] * (16 - 1)) ** 2 + + (spacing[1] * (16 - 0)) ** 2 + + (spacing[2] * (3 - 1)) ** 2 + ) + ) + < 1e-6 + ) + assert ( + cp.abs( + feret_diameter_max + - cp.sqrt( + (spacing[0] * (16 - 1)) ** 2 + + (spacing[1] * (16 - 1)) ** 2 + + (spacing[2] * (3 - 0)) ** 2 + ) + ) + > 1e-6 + ) + assert ( + cp.abs( + feret_diameter_max + - cp.sqrt( + (spacing[0] * (16 - 0)) ** 2 + + (spacing[1] * (16 - 1)) ** 2 + + (spacing[2] * (3 - 1)) ** 2 + ) + ) + > 1e-6 + ) def test_area(): @@ -240,7 +291,7 @@ def test_bbox(): def test_area_bbox(): - padded = pad(SAMPLE, 5, mode='constant') + padded = pad(SAMPLE, 5, mode="constant") bbox_area = regionprops(padded)[0].area_bbox assert_array_almost_equal(bbox_area, SAMPLE.size) @@ -426,7 +477,7 @@ def test_equivalent_diameter_area(): spacing = (1, 3) diameter = regionprops(SAMPLE, spacing=spacing)[0].equivalent_diameter_area - equivalent_area = cp.pi * (diameter / 2.) ** 2 + equivalent_area = cp.pi * (diameter / 2.0) ** 2 assert_almost_equal(equivalent_area, SAMPLE.sum() * math.prod(spacing)) @@ -541,33 +592,38 @@ def test_axis_major_length(): assert_almost_equal(length, 2 * target_length, decimal=4) from skimage.draw import ellipse + img = cp.zeros((20, 24), dtype=cp.uint8) rr, cc = ellipse(11, 11, 7, 9, rotation=np.deg2rad(45)) img[rr, cc] = 1 target_length = regionprops(img, spacing=(1, 1))[0].axis_major_length length_wo_spacing = regionprops(img[::2], spacing=(1, 1))[ - 0].axis_minor_length + 0 + ].axis_minor_length assert abs(length_wo_spacing - target_length) > 0.1 length = regionprops(img[:, ::2], spacing=(1, 2))[0].axis_major_length assert_almost_equal(length, target_length, decimal=0) def test_intensity_max(): - intensity = regionprops(SAMPLE, intensity_image=INTENSITY_SAMPLE - )[0].intensity_max + intensity = regionprops(SAMPLE, intensity_image=INTENSITY_SAMPLE)[ + 0 + ].intensity_max assert_almost_equal(intensity, 2) def test_intensity_mean(): - intensity = regionprops(SAMPLE, intensity_image=INTENSITY_SAMPLE - )[0].intensity_mean + intensity = regionprops(SAMPLE, intensity_image=INTENSITY_SAMPLE)[ + 0 + ].intensity_mean assert_almost_equal(intensity, 1.02777777777777) def test_intensity_min(): - intensity = regionprops(SAMPLE, intensity_image=INTENSITY_SAMPLE - )[0].intensity_min + intensity = regionprops(SAMPLE, intensity_image=INTENSITY_SAMPLE)[ + 0 + ].intensity_min assert_almost_equal(intensity, 1) @@ -582,13 +638,15 @@ def test_axis_minor_length(): assert_almost_equal(length, 1.5 * target_length, decimal=5) from skimage.draw import ellipse + img = cp.zeros((10, 12), dtype=np.uint8) rr, cc = ellipse(5, 6, 3, 5, rotation=np.deg2rad(30)) img[rr, cc] = 1 target_length = regionprops(img, spacing=(1, 1))[0].axis_minor_length length_wo_spacing = regionprops(img[::2], spacing=(1, 1))[ - 0].axis_minor_length + 0 + ].axis_minor_length assert abs(length_wo_spacing - target_length) > 0.1 length = regionprops(img[::2], spacing=(2, 1))[0].axis_minor_length assert_almost_equal(length, target_length, decimal=1) @@ -674,21 +732,21 @@ def test_orientation(): orient_diag = regionprops(diag)[0].orientation assert_almost_equal(orient_diag, -math.pi / 4) orient_diag = regionprops(diag, spacing=(1, 2))[0].orientation - assert_almost_equal(orient_diag, np.arccos(0.5 / math.sqrt(1 + 0.5 ** 2))) + assert_almost_equal(orient_diag, np.arccos(0.5 / math.sqrt(1 + 0.5**2))) orient_diag = regionprops(cp.flipud(diag))[0].orientation assert_almost_equal(orient_diag, math.pi / 4) orient_diag = regionprops(cp.flipud(diag), spacing=(1, 2))[0].orientation - assert_almost_equal(orient_diag, -np.arccos(0.5 / math.sqrt(1 + 0.5 ** 2))) + assert_almost_equal(orient_diag, -np.arccos(0.5 / math.sqrt(1 + 0.5**2))) orient_diag = regionprops(cp.fliplr(diag))[0].orientation assert_almost_equal(orient_diag, math.pi / 4) orient_diag = regionprops(cp.fliplr(diag), spacing=(1, 2))[0].orientation - assert_almost_equal(orient_diag, -np.arccos(0.5 / math.sqrt(1 + 0.5 ** 2))) + assert_almost_equal(orient_diag, -np.arccos(0.5 / math.sqrt(1 + 0.5**2))) orient_diag = regionprops(cp.fliplr(cp.flipud(diag)))[0].orientation assert_almost_equal(orient_diag, -math.pi / 4) - orient_diag = regionprops( - np.fliplr(np.flipud(diag)), spacing=(1, 2) - )[0].orientation - assert_almost_equal(orient_diag, np.arccos(0.5 / math.sqrt(1 + 0.5 ** 2))) + orient_diag = regionprops(np.fliplr(np.flipud(diag)), spacing=(1, 2))[ + 0 + ].orientation + assert_almost_equal(orient_diag, np.arccos(0.5 / math.sqrt(1 + 0.5**2))) def test_perimeter(): @@ -713,7 +771,7 @@ def test_perimeter_crofton(): per = regionprops(SAMPLE, spacing=(2, 2))[0].perimeter_crofton assert_almost_equal(per, 2 * target_per_crof) - per = perimeter_crofton(SAMPLE.astype('double'), directions=2) + per = perimeter_crofton(SAMPLE.astype("double"), directions=2) assert_almost_equal(per, 64.4026493985) with pytest.raises(NotImplementedError): @@ -730,8 +788,9 @@ def test_solidity(): def test_moments_weighted_central(): - wmu = regionprops(SAMPLE, intensity_image=INTENSITY_SAMPLE - )[0].moments_weighted_central + wmu = regionprops(SAMPLE, intensity_image=INTENSITY_SAMPLE)[ + 0 + ].moments_weighted_central # fmt: off ref = cp.array( [[7.4000000000e+01, 3.7303493627e-14, 1.2602837838e+03, @@ -767,8 +826,9 @@ def test_moments_weighted_central(): # Test spacing spacing = (3.2, 1.2) - wmu = regionprops(SAMPLE, intensity_image=INTENSITY_SAMPLE, - spacing=spacing)[0].moments_weighted_central + wmu = regionprops( + SAMPLE, intensity_image=INTENSITY_SAMPLE, spacing=spacing + )[0].moments_weighted_central centralMpq = get_central_moment_function(INTENSITY_SAMPLE, spacing=spacing) assert_almost_equal(wmu[0, 0], centralMpq(0, 0)) assert_almost_equal(wmu[0, 1], centralMpq(0, 1)) @@ -789,8 +849,9 @@ def test_moments_weighted_central(): def test_centroid_weighted(): - centroid = regionprops(SAMPLE, intensity_image=INTENSITY_SAMPLE - )[0].centroid_weighted + centroid = regionprops(SAMPLE, intensity_image=INTENSITY_SAMPLE)[ + 0 + ].centroid_weighted target_centroid = (5.540540540540, 9.445945945945) centroid = tuple(float(c) for c in centroid) assert_array_almost_equal(centroid, target_centroid) @@ -825,8 +886,9 @@ def test_centroid_weighted(): def test_moments_weighted_hu(): - whu = regionprops(SAMPLE, intensity_image=INTENSITY_SAMPLE - )[0].moments_weighted_hu + whu = regionprops(SAMPLE, intensity_image=INTENSITY_SAMPLE)[ + 0 + ].moments_weighted_hu # fmt: off ref = cp.array([ 3.1750587329e-01, @@ -845,8 +907,9 @@ def test_moments_weighted_hu(): def test_moments_weighted(): - wm = regionprops(SAMPLE, intensity_image=INTENSITY_SAMPLE - )[0].moments_weighted + wm = regionprops(SAMPLE, intensity_image=INTENSITY_SAMPLE)[ + 0 + ].moments_weighted # fmt: off ref = cp.array( [[7.4000000e+01, 6.9900000e+02, 7.8630000e+03, 9.7317000e+04], @@ -878,8 +941,9 @@ def test_moments_weighted(): # Test spacing spacing = (3.2, 1.2) - wmu = regionprops(SAMPLE, intensity_image=INTENSITY_SAMPLE, - spacing=spacing)[0].moments_weighted + wmu = regionprops( + SAMPLE, intensity_image=INTENSITY_SAMPLE, spacing=spacing + )[0].moments_weighted Mpq = get_moment_function(INTENSITY_SAMPLE, spacing=spacing) assert_almost_equal(wmu[0, 0], Mpq(0, 0)) assert_almost_equal(wmu[0, 1], Mpq(0, 1)) @@ -900,8 +964,9 @@ def test_moments_weighted(): def test_moments_weighted_normalized(): - wnu = regionprops(SAMPLE, intensity_image=INTENSITY_SAMPLE - )[0].moments_weighted_normalized + wnu = regionprops(SAMPLE, intensity_image=INTENSITY_SAMPLE)[ + 0 + ].moments_weighted_normalized # fmt: off ref = np.array( [[np.nan, np.nan, 0.2301467830, -0.0162529732], # noqa @@ -1009,15 +1074,15 @@ def foo(): region = regionprops(SAMPLE)[0] docs = _parse_docs() - props = [m for m in dir(region) if not m.startswith('_')] + props = [m for m in dir(region) if not m.startswith("_")] nr_docs_parsed = len(docs) nr_props = len(props) if has_docstrings: assert_equal(nr_docs_parsed, nr_props) - ds = docs['moments_weighted_normalized'] - assert 'iteration' not in ds - assert len(ds.split('\n')) > 3 + ds = docs["moments_weighted_normalized"] + assert "iteration" not in ds + assert len(ds.split("\n")) > 3 else: assert_equal(nr_docs_parsed, 0) @@ -1025,72 +1090,95 @@ def foo(): def test_props_to_dict(): regions = regionprops(SAMPLE) out = _props_to_dict(regions) - assert out == {'label': cp.array([1]), - 'bbox-0': cp.array([0]), 'bbox-1': cp.array([0]), - 'bbox-2': cp.array([10]), 'bbox-3': cp.array([18])} + assert out == { + "label": cp.array([1]), + "bbox-0": cp.array([0]), + "bbox-1": cp.array([0]), + "bbox-2": cp.array([10]), + "bbox-3": cp.array([18]), + } regions = regionprops(SAMPLE) - out = _props_to_dict(regions, properties=('label', 'area', 'bbox'), - separator='+') - assert out == {'label': cp.array([1]), 'area': cp.array([72]), - 'bbox+0': cp.array([0]), 'bbox+1': cp.array([0]), - 'bbox+2': cp.array([10]), 'bbox+3': cp.array([18])} + out = _props_to_dict( + regions, properties=("label", "area", "bbox"), separator="+" + ) + assert out == { + "label": cp.array([1]), + "area": cp.array([72]), + "bbox+0": cp.array([0]), + "bbox+1": cp.array([0]), + "bbox+2": cp.array([10]), + "bbox+3": cp.array([18]), + } regions = regionprops(SAMPLE_MULTIPLE) - out = _props_to_dict(regions, properties=('coords',)) + out = _props_to_dict(regions, properties=("coords",)) coords = np.empty(2, object) coords[0] = cp.stack((cp.arange(10),) * 2, axis=-1) coords[1] = cp.array([[3, 7], [4, 7]]) - assert out['coords'].shape == coords.shape - assert_array_equal(out['coords'][0], coords[0]) - assert_array_equal(out['coords'][1], coords[1]) + assert out["coords"].shape == coords.shape + assert_array_equal(out["coords"][0], coords[0]) + assert_array_equal(out["coords"][1], coords[1]) def test_regionprops_table(): out = regionprops_table(SAMPLE) - assert out == {'label': cp.array([1]), - 'bbox-0': cp.array([0]), 'bbox-1': cp.array([0]), - 'bbox-2': cp.array([10]), 'bbox-3': cp.array([18])} - - out = regionprops_table(SAMPLE, properties=('label', 'area', 'bbox'), - separator='+') - assert out == {'label': cp.array([1]), 'area': cp.array([72]), - 'bbox+0': cp.array([0]), 'bbox+1': cp.array([0]), - 'bbox+2': cp.array([10]), 'bbox+3': cp.array([18])} - - out = regionprops_table(SAMPLE_MULTIPLE, properties=('coords',)) + assert out == { + "label": cp.array([1]), + "bbox-0": cp.array([0]), + "bbox-1": cp.array([0]), + "bbox-2": cp.array([10]), + "bbox-3": cp.array([18]), + } + + out = regionprops_table( + SAMPLE, properties=("label", "area", "bbox"), separator="+" + ) + assert out == { + "label": cp.array([1]), + "area": cp.array([72]), + "bbox+0": cp.array([0]), + "bbox+1": cp.array([0]), + "bbox+2": cp.array([10]), + "bbox+3": cp.array([18]), + } + + out = regionprops_table(SAMPLE_MULTIPLE, properties=("coords",)) coords = np.empty(2, object) coords[0] = cp.stack((cp.arange(10),) * 2, axis=-1) coords[1] = cp.array([[3, 7], [4, 7]]) - assert out['coords'].shape == coords.shape - assert_array_equal(out['coords'][0], coords[0]) - assert_array_equal(out['coords'][1], coords[1]) + assert out["coords"].shape == coords.shape + assert_array_equal(out["coords"][0], coords[0]) + assert_array_equal(out["coords"][1], coords[1]) def test_regionprops_table_deprecated_vector_property(): - out = regionprops_table(SAMPLE, properties=('local_centroid',)) + out = regionprops_table(SAMPLE, properties=("local_centroid",)) for key in out.keys(): # key reflects the deprecated name, not its new (centroid_local) value - assert key.startswith('local_centroid') + assert key.startswith("local_centroid") def test_regionprops_table_deprecated_scalar_property(): - out = regionprops_table(SAMPLE, properties=('bbox_area',)) - assert list(out.keys()) == ['bbox_area'] + out = regionprops_table(SAMPLE, properties=("bbox_area",)) + assert list(out.keys()) == ["bbox_area"] def test_regionprops_table_equal_to_original(): regions = regionprops(SAMPLE, INTENSITY_FLOAT_SAMPLE) - out_table = regionprops_table(SAMPLE, INTENSITY_FLOAT_SAMPLE, - properties=COL_DTYPES.keys()) + out_table = regionprops_table( + SAMPLE, INTENSITY_FLOAT_SAMPLE, properties=COL_DTYPES.keys() + ) for prop, dtype in COL_DTYPES.items(): for i, reg in enumerate(regions): rp = reg[prop] - if cp.isscalar(rp) or \ - (isinstance(rp, cp.ndarray) and rp.ndim == 0) or \ - prop in OBJECT_COLUMNS or \ - dtype is np.object_: + if ( + cp.isscalar(rp) + or (isinstance(rp, cp.ndarray) and rp.ndim == 0) + or prop in OBJECT_COLUMNS + or dtype is np.object_ + ): assert_array_equal(rp, out_table[prop][i]) else: shape = rp.shape if isinstance(rp, cp.ndarray) else (len(rp),) @@ -1101,16 +1189,18 @@ def test_regionprops_table_equal_to_original(): def test_regionprops_table_no_regions(): - out = regionprops_table(cp.zeros((2, 2), dtype=int), - properties=('label', 'area', 'bbox'), - separator='+') + out = regionprops_table( + cp.zeros((2, 2), dtype=int), + properties=("label", "area", "bbox"), + separator="+", + ) assert len(out) == 6 - assert len(out['label']) == 0 - assert len(out['area']) == 0 - assert len(out['bbox+0']) == 0 - assert len(out['bbox+1']) == 0 - assert len(out['bbox+2']) == 0 - assert len(out['bbox+3']) == 0 + assert len(out["label"]) == 0 + assert len(out["area"]) == 0 + assert len(out["bbox+0"]) == 0 + assert len(out["bbox+1"]) == 0 + assert len(out["bbox+2"]) == 0 + assert len(out["bbox+3"]) == 0 def test_column_dtypes_complete(): @@ -1118,7 +1208,7 @@ def test_column_dtypes_complete(): def test_column_dtypes_correct(): - msg = 'mismatch with expected type,' + msg = "mismatch with expected type," region = regionprops(SAMPLE, intensity_image=INTENSITY_SAMPLE)[0] for col in COL_DTYPES: r = region[col] @@ -1141,17 +1231,15 @@ def test_column_dtypes_correct(): t = type(r.ravel()[0].item()) if cp.issubdtype(t, cp.floating): - assert COL_DTYPES[col] == float, ( - f'{col} dtype {t} {msg} {COL_DTYPES[col]}' - ) + assert ( + COL_DTYPES[col] == float + ), f"{col} dtype {t} {msg} {COL_DTYPES[col]}" elif cp.issubdtype(t, cp.integer): - assert COL_DTYPES[col] == int, ( - f'{col} dtype {t} {msg} {COL_DTYPES[col]}' - ) + assert ( + COL_DTYPES[col] == int + ), f"{col} dtype {t} {msg} {COL_DTYPES[col]}" else: - assert False, ( - f'{col} dtype {t} {msg} {COL_DTYPES[col]}' - ) + assert False, f"{col} dtype {t} {msg} {COL_DTYPES[col]}" def pixelcount(regionmask): @@ -1177,13 +1265,15 @@ def test_extra_properties(): def test_extra_properties_intensity(): - region = regionprops(SAMPLE, intensity_image=INTENSITY_SAMPLE, - extra_properties=(intensity_median,) - )[0] + region = regionprops( + SAMPLE, + intensity_image=INTENSITY_SAMPLE, + extra_properties=(intensity_median,), + )[0] assert region.intensity_median == cp.median(INTENSITY_SAMPLE[SAMPLE == 1]) -@pytest.mark.parametrize('intensity_prop', _require_intensity_image) +@pytest.mark.parametrize("intensity_prop", _require_intensity_image) def test_intensity_image_required(intensity_prop): region = regionprops(SAMPLE)[0] with pytest.raises(AttributeError) as e: @@ -1212,21 +1302,24 @@ def test_extra_properties_nr_args(): def test_extra_properties_mixed(): # mixed properties, with and without intensity - region = regionprops(SAMPLE, intensity_image=INTENSITY_SAMPLE, - extra_properties=(intensity_median, pixelcount) - )[0] + region = regionprops( + SAMPLE, + intensity_image=INTENSITY_SAMPLE, + extra_properties=(intensity_median, pixelcount), + )[0] assert region.intensity_median == cp.median(INTENSITY_SAMPLE[SAMPLE == 1]) assert region.pixelcount == cp.sum(SAMPLE == 1) def test_extra_properties_table(): - out = regionprops_table(SAMPLE_MULTIPLE, - intensity_image=INTENSITY_SAMPLE_MULTIPLE, - properties=('label',), - extra_properties=(intensity_median, pixelcount) - ) - assert_array_almost_equal(out['intensity_median'], np.array([2.0, 4.0])) - assert_array_equal(out['pixelcount'], np.array([10, 2])) + out = regionprops_table( + SAMPLE_MULTIPLE, + intensity_image=INTENSITY_SAMPLE_MULTIPLE, + properties=("label",), + extra_properties=(intensity_median, pixelcount), + ) + assert_array_almost_equal(out["intensity_median"], np.array([2.0, 4.0])) + assert_array_equal(out["pixelcount"], np.array([10, 2])) def test_multichannel(): @@ -1239,14 +1332,16 @@ def test_multichannel(): labels = cp.asarray(labels) segment_idx = int(cp.max(labels) // 2) - region = regionprops(labels, - astro_green, - extra_properties=[intensity_median], - )[segment_idx] - region_multi = regionprops(labels, - astro, - extra_properties=[intensity_median], - )[segment_idx] + region = regionprops( + labels, + astro_green, + extra_properties=[intensity_median], + )[segment_idx] + region_multi = regionprops( + labels, + astro, + extra_properties=[intensity_median], + )[segment_idx] for prop in list(PROPS.keys()) + ["intensity_median"]: p = region[prop] @@ -1277,12 +1372,11 @@ def test_3d_ellipsoid_axis_lengths(): # Pad by asymmetric amounts so the ellipse isn't centered. Also, pad enough # that the rotated ellipse will still be within the original volume. - e = np.pad(e, pad_width=[(30, 18), (30, 12), (40, 20)], mode='constant') + e = np.pad(e, pad_width=[(30, 18), (30, 12), (40, 20)], mode="constant") e = cp.array(e) # apply rotations to the ellipsoid - R = transform.EuclideanTransform(rotation=[0.2, 0.3, 0.4], - dimensionality=3) + R = transform.EuclideanTransform(rotation=[0.2, 0.3, 0.4], dimensionality=3) e = ndi.affine_transform(e, R.params) # Compute regionprops diff --git a/python/cucim/src/cucim/skimage/metrics/__init__.py b/python/cucim/src/cucim/skimage/metrics/__init__.py index fe87dd988..571d7790a 100644 --- a/python/cucim/src/cucim/skimage/metrics/__init__.py +++ b/python/cucim/src/cucim/skimage/metrics/__init__.py @@ -2,8 +2,12 @@ from ._contingency_table import contingency_table from ._structural_similarity import structural_similarity from ._variation_of_information import variation_of_information -from .simple_metrics import (mean_squared_error, normalized_mutual_information, - normalized_root_mse, peak_signal_noise_ratio) +from .simple_metrics import ( + mean_squared_error, + normalized_mutual_information, + normalized_root_mse, + peak_signal_noise_ratio, +) __all__ = [ "adapted_rand_error", diff --git a/python/cucim/src/cucim/skimage/metrics/_adapted_rand_error.py b/python/cucim/src/cucim/skimage/metrics/_adapted_rand_error.py index 9b3ef211a..2345f24cc 100644 --- a/python/cucim/src/cucim/skimage/metrics/_adapted_rand_error.py +++ b/python/cucim/src/cucim/skimage/metrics/_adapted_rand_error.py @@ -1,11 +1,17 @@ from .._shared.utils import check_shape_equality from ._contingency_table import contingency_table -__all__ = ['adapted_rand_error'] +__all__ = ["adapted_rand_error"] -def adapted_rand_error(image_true=None, image_test=None, *, table=None, - ignore_labels=(0,), alpha=0.5): +def adapted_rand_error( + image_true=None, + image_test=None, + *, + table=None, + ignore_labels=(0,), + alpha=0.5, +): r"""Compute Adapted Rand error as defined by the SNEMI3D contest. [1]_ Parameters @@ -67,13 +73,14 @@ def adapted_rand_error(image_true=None, image_test=None, *, table=None, check_shape_equality(image_true, image_test) if table is None: - p_ij = contingency_table(image_true, image_test, - ignore_labels=ignore_labels, normalize=False) + p_ij = contingency_table( + image_true, image_test, ignore_labels=ignore_labels, normalize=False + ) else: p_ij = table if alpha < 0.0 or alpha > 1.0: - raise ValueError('alpha must be between 0 and 1') + raise ValueError("alpha must be between 0 and 1") # Sum of the joint distribution squared sum_p_ij2 = p_ij.data @ p_ij.data - p_ij.sum() @@ -91,6 +98,6 @@ def adapted_rand_error(image_true=None, image_test=None, *, table=None, recall = sum_p_ij2 / sum_b2 fscore = sum_p_ij2 / (alpha * sum_a2 + (1 - alpha) * sum_b2) - are = 1. - fscore + are = 1.0 - fscore return are, precision, recall diff --git a/python/cucim/src/cucim/skimage/metrics/_contingency_table.py b/python/cucim/src/cucim/skimage/metrics/_contingency_table.py index 56c906cd3..45923b478 100644 --- a/python/cucim/src/cucim/skimage/metrics/_contingency_table.py +++ b/python/cucim/src/cucim/skimage/metrics/_contingency_table.py @@ -1,11 +1,10 @@ import cupy as cp import cupyx.scipy.sparse as sparse -__all__ = ['contingency_table'] +__all__ = ["contingency_table"] -def contingency_table(im_true, im_test, *, ignore_labels=None, - normalize=False): +def contingency_table(im_true, im_test, *, ignore_labels=None, normalize=False): """ Return the contingency table for all regions in matched segmentations. diff --git a/python/cucim/src/cucim/skimage/metrics/_structural_similarity.py b/python/cucim/src/cucim/skimage/metrics/_structural_similarity.py index 0d68b6fb3..188a216d1 100644 --- a/python/cucim/src/cucim/skimage/metrics/_structural_similarity.py +++ b/python/cucim/src/cucim/skimage/metrics/_structural_similarity.py @@ -10,7 +10,7 @@ from ..util.arraycrop import crop from ..util.dtype import dtype_range -__all__ = ['structural_similarity'] +__all__ = ["structural_similarity"] _ssim_operation = """ @@ -36,32 +36,40 @@ @cp.memoize(for_each_device=True) def _get_ssim_kernel(): return cp.ElementwiseKernel( - in_params='float64 cov_norm, F ux, F uy, F uxx, F uyy, F uxy, float64 data_range, float64 K1, float64 K2', # noqa - out_params='F ssim', + in_params="float64 cov_norm, F ux, F uy, F uxx, F uyy, F uxy, float64 data_range, float64 K1, float64 K2", # noqa + out_params="F ssim", operation=_ssim_operation, - name='cucim_ssim' + name="cucim_ssim", ) @cp.memoize(for_each_device=True) def _get_ssim_grad_kernel(): return cp.ElementwiseKernel( - in_params='float64 cov_norm, F ux, F uy, F uxx, F uyy, F uxy, float64 data_range, float64 K1, float64 K2', # noqa - out_params='F ssim, F grad_temp1, F grad_temp2, F grad_temp3', - operation=_ssim_operation + """ + in_params="float64 cov_norm, F ux, F uy, F uxx, F uyy, F uxy, float64 data_range, float64 K1, float64 K2", # noqa + out_params="F ssim, F grad_temp1, F grad_temp2, F grad_temp3", + operation=_ssim_operation + + """ grad_temp1 = A1 / D; grad_temp2 = -ssim / B2; grad_temp3 = (ux * (A2 - A1) - uy * (B2 - B1) * ssim) / D; """, - name='cucim_ssim' + name="cucim_ssim", ) -def structural_similarity(im1, im2, - *, - win_size=None, gradient=False, data_range=None, - channel_axis=None, gaussian_weights=False, - full=False, **kwargs): +def structural_similarity( + im1, + im2, + *, + win_size=None, + gradient=False, + data_range=None, + channel_axis=None, + gaussian_weights=False, + full=False, + **kwargs, +): """ Compute the mean structural similarity index between two images. Please pay attention to the `data_range` parameter with floating-point @@ -162,12 +170,14 @@ def structural_similarity(im1, im2, if channel_axis is not None: # loop over channels - args = dict(win_size=win_size, - gradient=gradient, - data_range=data_range, - channel_axis=None, - gaussian_weights=gaussian_weights, - full=full) + args = dict( + win_size=win_size, + gradient=gradient, + data_range=data_range, + channel_axis=None, + gaussian_weights=gaussian_weights, + full=full, + ) args.update(kwargs) nch = im1.shape[channel_axis] mssim = cp.empty(nch, dtype=float_type) @@ -178,8 +188,9 @@ def structural_similarity(im1, im2, channel_axis = channel_axis % im1.ndim _at = functools.partial(utils.slice_at_axis, axis=channel_axis) for ch in range(nch): - ch_result = structural_similarity(im1[_at(ch)], - im2[_at(ch)], **args) + ch_result = structural_similarity( + im1[_at(ch)], im2[_at(ch)], **args + ) if gradient and full: mssim[ch], G[_at(ch)], S[_at(ch)] = ch_result elif gradient: @@ -198,16 +209,16 @@ def structural_similarity(im1, im2, else: return mssim - K1 = kwargs.pop('K1', 0.01) - K2 = kwargs.pop('K2', 0.03) - sigma = kwargs.pop('sigma', 1.5) + K1 = kwargs.pop("K1", 0.01) + K2 = kwargs.pop("K2", 0.03) + sigma = kwargs.pop("sigma", 1.5) if K1 < 0: raise ValueError("K1 must be positive") if K2 < 0: raise ValueError("K2 must be positive") if sigma < 0: raise ValueError("sigma must be positive") - use_sample_covariance = kwargs.pop('use_sample_covariance', True) + use_sample_covariance = kwargs.pop("use_sample_covariance", True) if gaussian_weights: # Set to give an 11-tap filter with the default sigma of 1.5 to match @@ -224,53 +235,59 @@ def structural_similarity(im1, im2, if any(s < win_size for s in im1.shape): raise ValueError( - 'win_size exceeds image extent. ' - 'Either ensure that your images are ' - 'at least 7x7; or pass win_size explicitly ' - 'in the function call, with an odd value ' - 'less than or equal to the smaller side of your ' - 'images. If your images are multichannel ' - '(with color channels), set channel_axis to ' - 'the axis number corresponding to the channels.') + "win_size exceeds image extent. " + "Either ensure that your images are " + "at least 7x7; or pass win_size explicitly " + "in the function call, with an odd value " + "less than or equal to the smaller side of your " + "images. If your images are multichannel " + "(with color channels), set channel_axis to " + "the axis number corresponding to the channels." + ) if not (win_size % 2 == 1): - raise ValueError('Window size must be odd.') + raise ValueError("Window size must be odd.") if data_range is None: - if ( - cp.issubdtype(im1.dtype, cp.floating) or - cp.issubdtype(im2.dtype, cp.floating) + if cp.issubdtype(im1.dtype, cp.floating) or cp.issubdtype( + im2.dtype, cp.floating ): raise ValueError( - 'Since image dtype is floating point, you must specify ' - 'the data_range parameter. Please read the documentation ' - 'carefully (including the note). It is recommended that ' - 'you always specify the data_range anyway.') + "Since image dtype is floating point, you must specify " + "the data_range parameter. Please read the documentation " + "carefully (including the note). It is recommended that " + "you always specify the data_range anyway." + ) if im1.dtype != im2.dtype: - warn("Inputs have mismatched dtypes. Setting data_range based on " - "im1.dtype.", stacklevel=2) + warn( + "Inputs have mismatched dtypes. Setting data_range based on " + "im1.dtype.", + stacklevel=2, + ) dmin, dmax = dtype_range[im1.dtype.type] data_range = float(dmax - dmin) if cp.issubdtype(im1.dtype, cp.integer) and (im1.dtype != cp.uint8): - warn("Setting data_range based on im1.dtype. " + - ("data_range = %.0f. " % data_range) + - "Please specify data_range explicitly to avoid mistakes.", - stacklevel=2) + warn( + "Setting data_range based on im1.dtype. " + + ("data_range = %.0f. " % data_range) + + "Please specify data_range explicitly to avoid mistakes.", + stacklevel=2, + ) ndim = im1.ndim if gaussian_weights: filter_func = gaussian - filter_args = {'sigma': sigma, 'truncate': truncate, 'mode': 'reflect'} + filter_args = {"sigma": sigma, "truncate": truncate, "mode": "reflect"} else: filter_func = ndi.uniform_filter - filter_args = {'size': win_size} + filter_args = {"size": win_size} # ndimage filters need floating point data im1 = im1.astype(float_type, copy=False) im2 = im2.astype(float_type, copy=False) - NP = win_size ** ndim + NP = win_size**ndim # filter has already normalized by NP if use_sample_covariance: @@ -297,8 +314,21 @@ def structural_similarity(im1, im2, grad_temp2 = cp.empty_like(ux) grad_temp3 = cp.empty_like(ux) kernel = _get_ssim_grad_kernel() - kernel(cov_norm, ux, uy, uxx, uyy, uxy, data_range, K1, K2, S, - grad_temp1, grad_temp2, grad_temp3) + kernel( + cov_norm, + ux, + uy, + uxx, + uyy, + uxy, + data_range, + K1, + K2, + S, + grad_temp1, + grad_temp2, + grad_temp3, + ) # to avoid edge effects will ignore filter radius strip around edges pad = (win_size - 1) // 2 @@ -311,7 +341,7 @@ def structural_similarity(im1, im2, grad = filter_func(grad_temp1, **filter_args) * im1 grad += filter_func(grad_temp2, **filter_args) * im2 grad += filter_func(grad_temp3, **filter_args) - grad *= (2 / im1.size) + grad *= 2 / im1.size if full: return mssim, grad, S diff --git a/python/cucim/src/cucim/skimage/metrics/_variation_of_information.py b/python/cucim/src/cucim/skimage/metrics/_variation_of_information.py index 143c702cd..eeab01b35 100644 --- a/python/cucim/src/cucim/skimage/metrics/_variation_of_information.py +++ b/python/cucim/src/cucim/skimage/metrics/_variation_of_information.py @@ -4,11 +4,12 @@ from .._shared.utils import check_shape_equality from ._contingency_table import contingency_table -__all__ = ['variation_of_information'] +__all__ = ["variation_of_information"] -def variation_of_information(image0=None, image1=None, *, table=None, - ignore_labels=()): +def variation_of_information( + image0=None, image1=None, *, table=None, ignore_labels=() +): """Return symmetric conditional entropies associated with the VI. [1]_ The variation of information is defined as VI(X,Y) = H(X|Y) + H(Y|X). @@ -41,8 +42,9 @@ def variation_of_information(image0=None, image1=None, *, table=None, distance, Journal of Multivariate Analysis, Volume 98, Issue 5, Pages 873-895, ISSN 0047-259X, :DOI:`10.1016/j.jmva.2006.11.013`. """ - h0g1, h1g0 = _vi_tables(image0, image1, table=table, - ignore_labels=ignore_labels) + h0g1, h1g0 = _vi_tables( + image0, image1, table=table, ignore_labels=ignore_labels + ) # false splits, false merges return cp.array([h1g0.sum(), h0g1.sum()]) @@ -95,8 +97,7 @@ def _vi_tables(im_true, im_test, table=None, ignore_labels=()): if table is None: # normalize, since it is an identity op if already done pxy = contingency_table( - im_true, im_test, - ignore_labels=ignore_labels, normalize=True + im_true, im_test, ignore_labels=ignore_labels, normalize=True ) else: diff --git a/python/cucim/src/cucim/skimage/metrics/simple_metrics.py b/python/cucim/src/cucim/skimage/metrics/simple_metrics.py index 3e1a7bdbb..b798704e3 100644 --- a/python/cucim/src/cucim/skimage/metrics/simple_metrics.py +++ b/python/cucim/src/cucim/skimage/metrics/simple_metrics.py @@ -5,11 +5,12 @@ from .._vendored import pad from ..util.dtype import dtype_range -__all__ = ['mean_squared_error', - 'normalized_root_mse', - 'peak_signal_noise_ratio', - 'normalized_mutual_information', - ] +__all__ = [ + "mean_squared_error", + "normalized_root_mse", + "peak_signal_noise_ratio", + "normalized_mutual_information", +] def _as_floats(image0, image1): @@ -49,7 +50,7 @@ def mean_squared_error(image0, image1): return cp.mean(diff * diff, dtype=cp.float64) -def normalized_root_mse(image_true, image_test, *, normalization='euclidean'): +def normalized_root_mse(image_true, image_test, *, normalization="euclidean"): """ Compute the normalized root mean-squared error (NRMSE) between two images. @@ -99,11 +100,11 @@ def normalized_root_mse(image_true, image_test, *, normalization='euclidean'): # Ensure that both 'Euclidean' and 'euclidean' match normalization = normalization.lower() - if normalization == 'euclidean': + if normalization == "euclidean": denom = cp.sqrt(cp.mean((image_true * image_true), dtype=cp.float64)) - elif normalization == 'min-max': + elif normalization == "min-max": denom = image_true.max() - image_true.min() - elif normalization == 'mean': + elif normalization == "mean": denom = image_true.mean() else: raise ValueError("Unsupported norm_type") @@ -145,14 +146,18 @@ def peak_signal_noise_ratio(image_true, image_test, *, data_range=None): if data_range is None: if image_true.dtype != image_test.dtype: - warn("Inputs have mismatched dtype. Setting data_range based on " - "im_true.", stacklevel=2) + warn( + "Inputs have mismatched dtype. Setting data_range based on " + "im_true.", + stacklevel=2, + ) dmin, dmax = dtype_range[image_true.dtype.type] true_min, true_max = cp.min(image_true), cp.max(image_true) if true_max > dmax or true_min < dmin: raise ValueError( "im_true has intensity values outside the range expected for " - "its data type. Please manually specify the data_range") + "its data type. Please manually specify the data_range" + ) if true_min >= 0: # most common case (255 for uint8, 1 for float) data_range = dmax @@ -186,10 +191,12 @@ def _pad_to(arr, shape): array([[1, 0, 0]]) """ if not all(s >= i for s, i in zip(shape, arr.shape)): - raise ValueError(f'Target shape {shape} cannot be smaller than input' - f'shape {arr.shape} along any axis.') + raise ValueError( + f"Target shape {shape} cannot be smaller than input" + f"shape {arr.shape} along any axis." + ) padding = [(0, s - i) for s, i in zip(shape, arr.shape)] - return pad(arr, pad_width=padding, mode='constant', constant_values=0) + return pad(arr, pad_width=padding, mode="constant", constant_values=0) def normalized_mutual_information(image0, image1, *, bins=100): @@ -240,9 +247,11 @@ def normalized_mutual_information(image0, image1, *, bins=100): :DOI:`10.1016/S0031-3203(98)00091-0` """ if image0.ndim != image1.ndim: - raise ValueError(f'NMI requires images of same number of dimensions. ' - f'Got {image0.ndim}D for `image0` and ' - f'{image1.ndim}D for `image1`.') + raise ValueError( + f"NMI requires images of same number of dimensions. " + f"Got {image0.ndim}D for `image0` and " + f"{image1.ndim}D for `image1`." + ) if image0.shape != image1.shape: max_shape = tuple( max(s0, s1) for s0, s1 in zip(image0.shape, image1.shape) diff --git a/python/cucim/src/cucim/skimage/metrics/tests/test_segmentation_metrics.py b/python/cucim/src/cucim/skimage/metrics/tests/test_segmentation_metrics.py index 11409f2f7..704bd46ff 100644 --- a/python/cucim/src/cucim/skimage/metrics/tests/test_segmentation_metrics.py +++ b/python/cucim/src/cucim/skimage/metrics/tests/test_segmentation_metrics.py @@ -3,8 +3,11 @@ from cupy.testing import assert_array_equal from numpy.testing import assert_almost_equal, assert_equal -from cucim.skimage.metrics import (adapted_rand_error, contingency_table, - variation_of_information) +from cucim.skimage.metrics import ( + adapted_rand_error, + contingency_table, + variation_of_information, +) def test_contingency_table(): @@ -31,13 +34,12 @@ def test_vi(): def test_vi_ignore_labels(): - im1 = cp.array([[1, 0], - [2, 3]], dtype='uint8') - im2 = cp.array([[1, 1], - [1, 0]], dtype='uint8') + im1 = cp.array([[1, 0], [2, 3]], dtype="uint8") + im2 = cp.array([[1, 1], [1, 0]], dtype="uint8") - false_splits, false_merges = variation_of_information(im1, im2, - ignore_labels=[0]) + false_splits, false_merges = variation_of_information( + im1, im2, ignore_labels=[0] + ) assert (false_splits, false_merges) == (0, 2 / 3) @@ -46,15 +48,15 @@ def test_are(): im_test = cp.array([[1, 2], [3, 1]]) assert_almost_equal( tuple(map(float, adapted_rand_error(im_true, im_test))), - (0.3333333, 0.5, 1.0) + (0.3333333, 0.5, 1.0), ) assert_almost_equal( tuple(map(float, adapted_rand_error(im_true, im_test, alpha=0))), - (0, 0.5, 1.0) + (0, 0.5, 1.0), ) assert_almost_equal( tuple(map(float, adapted_rand_error(im_true, im_test, alpha=1))), - (0.5, 0.5, 1.0) + (0.5, 0.5, 1.0), ) with pytest.raises(ValueError): diff --git a/python/cucim/src/cucim/skimage/metrics/tests/test_simple_metrics.py b/python/cucim/src/cucim/skimage/metrics/tests/test_simple_metrics.py index 8c062a2dc..161f08933 100644 --- a/python/cucim/src/cucim/skimage/metrics/tests/test_simple_metrics.py +++ b/python/cucim/src/cucim/skimage/metrics/tests/test_simple_metrics.py @@ -6,9 +6,12 @@ from skimage import data from cucim.skimage._shared._warnings import expected_warnings -from cucim.skimage.metrics import (mean_squared_error, - normalized_mutual_information, - normalized_root_mse, peak_signal_noise_ratio) +from cucim.skimage.metrics import ( + mean_squared_error, + normalized_mutual_information, + normalized_root_mse, + peak_signal_noise_ratio, +) np.random.seed( 5 @@ -23,7 +26,7 @@ assert_almost_equal = cp.testing.assert_array_almost_equal -@pytest.mark.parametrize('dtype', [cp.uint8, cp.float32, cp.float64]) +@pytest.mark.parametrize("dtype", [cp.uint8, cp.float32, cp.float64]) def test_PSNR_vs_IPOL(dtype): """Tests vs. imdiff result from the following IPOL article and code: https://www.ipol.im/pub/art/2011/g_lmii/. @@ -33,18 +36,19 @@ def test_PSNR_vs_IPOL(dtype): https://github.com/scikit-image/scikit-image/pull/4913#issuecomment-700653165 """ p_IPOL = 22.409353363576034 - p = peak_signal_noise_ratio(cam.astype(dtype), cam_noisy.astype(dtype), - data_range=255) + p = peak_signal_noise_ratio( + cam.astype(dtype), cam_noisy.astype(dtype), data_range=255 + ) # internally, mean_square_error always sets dtype=cp.float64 for accuracy assert p.dtype == cp.float64 assert_almost_equal(p, p_IPOL, decimal=4) -@pytest.mark.parametrize('dtype', [cp.float16, cp.float32, cp.float64]) +@pytest.mark.parametrize("dtype", [cp.float16, cp.float32, cp.float64]) def test_PSNR_float(dtype): p_uint8 = peak_signal_noise_ratio(cam, cam_noisy) - camf = (cam / 255.).astype(dtype, copy=False) - camf_noisy = (cam_noisy / 255.).astype(dtype, copy=False) + camf = (cam / 255.0).astype(dtype, copy=False) + camf_noisy = (cam_noisy / 255.0).astype(dtype, copy=False) p_float64 = peak_signal_noise_ratio(camf, camf_noisy, data_range=1) assert p_float64.dtype == cp.float64 decimal = 3 if dtype == cp.float16 else 5 @@ -52,15 +56,15 @@ def test_PSNR_float(dtype): # mixed precision inputs p_mixed = peak_signal_noise_ratio( - cam / 255., (cam_noisy / 255.).astype(cp.float32), data_range=1 + cam / 255.0, (cam_noisy / 255.0).astype(cp.float32), data_range=1 ) assert_almost_equal(p_mixed, p_float64, decimal=decimal) # mismatched dtype results in a warning if data_range is unspecified - with expected_warnings(['Inputs have mismatched dtype']): + with expected_warnings(["Inputs have mismatched dtype"]): p_mixed = peak_signal_noise_ratio( - cam / 255., (cam_noisy / 255.).astype(cp.float32) + cam / 255.0, (cam_noisy / 255.0).astype(cp.float32) ) assert_almost_equal(p_mixed, p_float64, decimal=decimal) @@ -71,31 +75,38 @@ def test_PSNR_errors(): peak_signal_noise_ratio(cam, cam[:-1, :]) -@pytest.mark.parametrize('dtype', [cp.float16, cp.float32, cp.float64]) +@pytest.mark.parametrize("dtype", [cp.float16, cp.float32, cp.float64]) def test_NRMSE(dtype): x = cp.ones(4, dtype=dtype) - y = cp.asarray([0., 2., 2., 2.], dtype=dtype) - nrmse = normalized_root_mse(y, x, normalization='mean') + y = cp.asarray([0.0, 2.0, 2.0, 2.0], dtype=dtype) + nrmse = normalized_root_mse(y, x, normalization="mean") assert nrmse.dtype == cp.float64 assert_almost_equal(nrmse, 1 / cp.mean(y, dtype=cp.float64)) - assert_almost_equal(normalized_root_mse(y, x, normalization='euclidean'), - 1 / math.sqrt(3)) - assert_almost_equal(normalized_root_mse(y, x, normalization='min-max'), - 1 / (y.max() - y.min())) + assert_almost_equal( + normalized_root_mse(y, x, normalization="euclidean"), 1 / math.sqrt(3) + ) + assert_almost_equal( + normalized_root_mse(y, x, normalization="min-max"), + 1 / (y.max() - y.min()), + ) # mixed precision inputs are allowed - assert_almost_equal(normalized_root_mse(y, x.astype(cp.float32), - normalization='min-max'), - 1 / (y.max() - y.min())) + assert_almost_equal( + normalized_root_mse(y, x.astype(cp.float32), normalization="min-max"), + 1 / (y.max() - y.min()), + ) def test_NRMSE_no_int_overflow(): camf = cam.astype(cp.float32) cam_noisyf = cam_noisy.astype(cp.float32) - assert_almost_equal(mean_squared_error(cam, cam_noisy), - mean_squared_error(camf, cam_noisyf)) - assert_almost_equal(normalized_root_mse(cam, cam_noisy), - normalized_root_mse(camf, cam_noisyf)) + assert_almost_equal( + mean_squared_error(cam, cam_noisy), mean_squared_error(camf, cam_noisyf) + ) + assert_almost_equal( + normalized_root_mse(cam, cam_noisy), + normalized_root_mse(camf, cam_noisyf), + ) def test_NRMSE_errors(): @@ -110,15 +121,16 @@ def test_NRMSE_errors(): def test_nmi(): assert_almost_equal(float(normalized_mutual_information(cam, cam)), 2) - assert (normalized_mutual_information(cam, cam_noisy) - < normalized_mutual_information(cam, cam)) + assert normalized_mutual_information( + cam, cam_noisy + ) < normalized_mutual_information(cam, cam) def test_nmi_different_sizes(): assert float(normalized_mutual_information(cam[:, :400], cam[:400, :])) > 1 -@pytest.mark.parametrize('dtype', [cp.float16, cp.float32, cp.float64]) +@pytest.mark.parametrize("dtype", [cp.float16, cp.float32, cp.float64]) def test_nmi_random(dtype): rng = cp.random.default_rng() random1 = rng.random((100, 100)).astype(dtype) diff --git a/python/cucim/src/cucim/skimage/metrics/tests/test_structural_similarity.py b/python/cucim/src/cucim/skimage/metrics/tests/test_structural_similarity.py index c11569df9..6a64a3b7e 100644 --- a/python/cucim/src/cucim/skimage/metrics/tests/test_structural_similarity.py +++ b/python/cucim/src/cucim/skimage/metrics/tests/test_structural_similarity.py @@ -59,8 +59,8 @@ def test_structural_similarity_image(): # Because we are forcing a random seed state, it is probably good to test # against a few seeds in case on seed gives a particularly bad example -@pytest.mark.parametrize('seed', [1, 2, 3, 5, 8, 13]) -@pytest.mark.parametrize('dtype', [cp.float16, cp.float32, cp.float64]) +@pytest.mark.parametrize("seed", [1, 2, 3, 5, 8, 13]) +@pytest.mark.parametrize("dtype", [cp.float16, cp.float32, cp.float64]) def test_structural_similarity_grad(seed, dtype): N = 60 # NOTE: This test is known to randomly fail on some systems (Mac OS X 10.6) @@ -92,13 +92,13 @@ def test_structural_similarity_grad(seed, dtype): @pytest.mark.parametrize( - 'dtype', [cp.uint8, cp.int32, cp.float16, cp.float32, cp.float64] + "dtype", [cp.uint8, cp.int32, cp.float16, cp.float32, cp.float64] ) def test_structural_similarity_dtype(dtype): N = 30 X = np.random.rand(N, N) Y = np.random.rand(N, N) - if np.dtype(dtype).kind in 'iub': + if np.dtype(dtype).kind in "iub": data_range = 255.0 X = (X * 255).astype(dtype) Y = (X * 255).astype(dtype) @@ -115,7 +115,7 @@ def test_structural_similarity_dtype(dtype): assert S1 < 0.1 -@pytest.mark.parametrize('channel_axis', [0, 1, 2, -1]) +@pytest.mark.parametrize("channel_axis", [0, 1, 2, -1]) def test_structural_similarity_multichannel(channel_axis): N = 100 X = (cp.random.rand(N, N) * 255).astype(cp.uint8) @@ -138,15 +138,15 @@ def test_structural_similarity_multichannel(channel_axis): assert_equal(S3.shape, Xc.shape) # gradient case - m, grad = structural_similarity(Xc, Yc, channel_axis=channel_axis, - gradient=True) + m, grad = structural_similarity( + Xc, Yc, channel_axis=channel_axis, gradient=True + ) assert_equal(grad.shape, Xc.shape) # full and gradient case - m, grad, S3 = structural_similarity(Xc, Yc, - channel_axis=channel_axis, - full=True, - gradient=True) + m, grad, S3 = structural_similarity( + Xc, Yc, channel_axis=channel_axis, full=True, gradient=True + ) assert_equal(grad.shape, Xc.shape) assert_equal(S3.shape, Xc.shape) @@ -155,7 +155,7 @@ def test_structural_similarity_multichannel(channel_axis): structural_similarity(Xc, Yc, win_size=7, channel_axis=None) -@pytest.mark.parametrize('dtype', [cp.uint8, cp.float32, cp.float64]) +@pytest.mark.parametrize("dtype", [cp.uint8, cp.float32, cp.float64]) def test_structural_similarity_nD(dtype): # test 1D through 4D on small random arrays N = 10 @@ -206,7 +206,7 @@ def test_gaussian_structural_similarity_vs_IPOL(): @pytest.mark.parametrize( - 'dtype', [np.uint8, np.int32, np.float16, np.float32, np.float64] + "dtype", [np.uint8, np.int32, np.float16, np.float32, np.float64] ) def test_mssim_vs_legacy(dtype): # check that ssim with default options matches skimage 0.11 result @@ -214,9 +214,9 @@ def test_mssim_vs_legacy(dtype): decimal = 4 if _supported_float_type(dtype) == cp.float32 else 7 # also check with double precision and explicit specification of data_range - mssim = structural_similarity(cam.astype(dtype), - cam_noisy.astype(dtype), - data_range=255) + mssim = structural_similarity( + cam.astype(dtype), cam_noisy.astype(dtype), data_range=255 + ) assert_almost_equal(mssim, mssim_skimage_0pt17, decimal=decimal) @@ -237,26 +237,32 @@ def test_mssim_mixed_dtype(): def test_ssim_warns_about_data_range(): mssim = structural_similarity(cam, cam_noisy) - with expected_warnings(['Setting data_range based on im1.dtype']): - mssim_uint16 = structural_similarity(cam.astype(cp.uint16), - cam_noisy.astype(cp.uint16)) + with expected_warnings(["Setting data_range based on im1.dtype"]): + mssim_uint16 = structural_similarity( + cam.astype(cp.uint16), cam_noisy.astype(cp.uint16) + ) # The value computed for mssim_uint16 is wrong, because the # dtype of im1 led to infer an erroneous data_range. The user # is getting a warning about avoiding mistakes. assert mssim_uint16 > 0.99 - with expected_warnings(['Setting data_range based on im1.dtype', - 'Inputs have mismatched dtypes']): + with expected_warnings( + [ + "Setting data_range based on im1.dtype", + "Inputs have mismatched dtypes", + ] + ): mssim_mixed = structural_similarity(cam, cam_noisy.astype(cp.int32)) # no warning when user supplies data_range mssim_mixed = structural_similarity( - cam, cam_noisy.astype(cp.float32), data_range=255) + cam, cam_noisy.astype(cp.float32), data_range=255 + ) assert_almost_equal(mssim, mssim_mixed) -@pytest.mark.parametrize('dtype', [cp.float16, cp.float32, cp.float64]) +@pytest.mark.parametrize("dtype", [cp.float16, cp.float32, cp.float64]) def test_structural_similarity_small_image(dtype): X = cp.zeros((5, 5), dtype=dtype) # structural_similarity can be computed for small images if win_size is @@ -269,7 +275,7 @@ def test_structural_similarity_small_image(dtype): structural_similarity(X, X) -@pytest.mark.parametrize('dtype', [np.float16, np.float32, np.float64]) +@pytest.mark.parametrize("dtype", [np.float16, np.float32, np.float64]) def test_structural_similarity_errors_on_float_without_data_range(dtype): X = cp.zeros((64, 64), dtype=dtype) with pytest.raises(ValueError): diff --git a/python/cucim/src/cucim/skimage/morphology/__init__.py b/python/cucim/src/cucim/skimage/morphology/__init__.py index b4648e0a9..2c3cbf114 100644 --- a/python/cucim/src/cucim/skimage/morphology/__init__.py +++ b/python/cucim/src/cucim/skimage/morphology/__init__.py @@ -1,13 +1,36 @@ from ._skeletonize import medial_axis, thin -from .binary import (binary_closing, binary_dilation, binary_erosion, - binary_opening) -from .footprints import (ball, cube, diamond, disk, octagon, octahedron, - rectangle, square, star) -from .gray import (black_tophat, closing, dilation, erosion, opening, - white_tophat) +from .binary import ( + binary_closing, + binary_dilation, + binary_erosion, + binary_opening, +) +from .footprints import ( + ball, + cube, + diamond, + disk, + octagon, + octahedron, + rectangle, + square, + star, +) +from .gray import ( + black_tophat, + closing, + dilation, + erosion, + opening, + white_tophat, +) from .grayreconstruct import reconstruction -from .isotropic import (isotropic_closing, isotropic_dilation, - isotropic_erosion, isotropic_opening) +from .isotropic import ( + isotropic_closing, + isotropic_dilation, + isotropic_erosion, + isotropic_opening, +) from .misc import remove_small_holes, remove_small_objects __all__ = [ diff --git a/python/cucim/src/cucim/skimage/morphology/_medial_axis_lookup.py b/python/cucim/src/cucim/skimage/morphology/_medial_axis_lookup.py index 37f40b13e..1507c377a 100644 --- a/python/cucim/src/cucim/skimage/morphology/_medial_axis_lookup.py +++ b/python/cucim/src/cucim/skimage/morphology/_medial_axis_lookup.py @@ -5,6 +5,7 @@ # Note: lookup table generated using scikit-image code from # https://github.com/scikit-image/scikit-image/blob/38b595d60befe3a0b4c0742995b9737200a079c6/skimage/morphology/_skeletonize.py#L449-L458 # noqa +# fmt: off lookup_table = np.array( [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, @@ -65,3 +66,4 @@ ], dtype=np.uint8, ) +# fmt: on diff --git a/python/cucim/src/cucim/skimage/morphology/_skeletonize.py b/python/cucim/src/cucim/skimage/morphology/_skeletonize.py index a58ba8681..c69e8ddc0 100644 --- a/python/cucim/src/cucim/skimage/morphology/_skeletonize.py +++ b/python/cucim/src/cucim/skimage/morphology/_skeletonize.py @@ -7,13 +7,14 @@ from cucim.core.operations.morphology import distance_transform_edt from .._shared.utils import check_nD, deprecate_kwarg -from ._medial_axis_lookup import \ - cornerness_table as _medial_axis_cornerness_table -from ._medial_axis_lookup import lookup_table as _medial_axis_lookup_table +from ._medial_axis_lookup import ( + cornerness_table as _medial_axis_cornerness_table, + lookup_table as _medial_axis_lookup_table, +) # --------- Skeletonization and thinning based on Guo and Hall 1989 --------- - +# fmt: off _G123_LUT = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, @@ -44,10 +45,14 @@ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], dtype=bool) +# fmt: on -@deprecate_kwarg({"max_iter": "max_num_iter"}, removed_version="23.02.00", - deprecated_version="22.02.00") +@deprecate_kwarg( + {"max_iter": "max_num_iter"}, + removed_version="23.02.00", + deprecated_version="22.02.00", +) def thin(image, max_num_iter=None): """ Perform morphological thinning of a binary image. @@ -121,9 +126,9 @@ def thin(image, max_num_iter=None): skel = cp.asarray(image, dtype=bool).astype(cp.uint8) # neighborhood mask - mask = cp.asarray([[ 8, 4, 2], # noqa - [16, 0, 1], # noqa - [32, 64, 128]], dtype=cp.uint8) + mask = cp.asarray( + [[8, 4, 2], [16, 0, 1], [32, 64, 128]], dtype=cp.uint8 # noqa # noqa + ) G123_LUT = cp.asarray(_G123_LUT) G123P_LUT = cp.asarray(_G123P_LUT) @@ -166,9 +171,9 @@ def _get_tiebreaker(n, seed): @deprecate_kwarg( - {'random_state': 'seed'}, - deprecated_version='23.08', - removed_version='24.06' + {"random_state": "seed"}, + deprecated_version="23.08", + removed_version="24.06", ) def medial_axis(image, mask=None, return_distance=False, *, seed=None): """Compute the medial axis transform of a binary image. @@ -301,13 +306,10 @@ def medial_axis(image, mask=None, return_distance=False, *, seed=None): # of skeletons tiebreaker = _get_tiebreaker(n=distance.size, seed=seed) order = cp.lexsort( - cp.stack( - (tiebreaker, corner_score[masked_image], distance), - axis=0 - ) + cp.stack((tiebreaker, corner_score[masked_image], distance), axis=0) ) - # Call _skeletonize_loop on the CPU. It requies a single pass over the + # Call _skeletonize_loop on the CPU. It requires a single pass over the # full array using a specific pixel order, so cannot be run multithreaded! order = cp.asnumpy(order.astype(cp.int32, copy=False)) table = cp.asnumpy(table.astype(cp.uint8, copy=False)) @@ -360,10 +362,7 @@ def _table_lookup(image, table): # at each point in the image # # max possible value of indexer is 512, so just use int16 dtype - kernel = cp.array( - [[256, 128, 64], [32, 16, 8], [4, 2, 1]], - dtype=cp.int16 - ) + kernel = cp.array([[256, 128, 64], [32, 16, 8], [4, 2, 1]], dtype=cp.int16) indexer = ndi.convolve(image, kernel, output=np.int16, mode="constant") image = table[indexer] return image diff --git a/python/cucim/src/cucim/skimage/morphology/binary.py b/python/cucim/src/cucim/skimage/morphology/binary.py index e7b8fa6c5..39c916ca2 100644 --- a/python/cucim/src/cucim/skimage/morphology/binary.py +++ b/python/cucim/src/cucim/skimage/morphology/binary.py @@ -24,13 +24,19 @@ def _iterate_binary_func(binary_func, image, footprint, out): # `brute_force=True`. Update here if a more efficient method for # `iterations > 1` is added. fp, num_iter = footprint[0] - binary_func(image, structure=fp, output=out, iterations=num_iter, - brute_force=True) + binary_func( + image, structure=fp, output=out, iterations=num_iter, brute_force=True + ) for fp, num_iter in footprint[1:]: # Note: out.copy() because the computation cannot be in-place! # SciPy <= 1.7 did not automatically make a copy if needed. - binary_func(out.copy(), structure=fp, output=out, iterations=num_iter, - brute_force=True) + binary_func( + out.copy(), + structure=fp, + output=out, + iterations=num_iter, + brute_force=True, + ) return out @@ -38,9 +44,11 @@ def _iterate_binary_func(binary_func, image, footprint, out): # default with the same dimension as the input image and size 3 along each # axis. @default_footprint -@deprecate_kwarg(kwarg_mapping={'selem': 'footprint'}, - removed_version="23.02.00", - deprecated_version="22.02.00") +@deprecate_kwarg( + kwarg_mapping={"selem": "footprint"}, + removed_version="23.02.00", + deprecated_version="22.02.00", +) def binary_erosion(image, footprint=None, out=None): """Return fast binary morphological erosion of an image. @@ -79,7 +87,7 @@ def binary_erosion(image, footprint=None, out=None): would apply a 9x1 footprint followed by a 1x9 footprint resulting in a net effect that is the same as ``footprint=cp.ones((9, 9))``, but with lower computational cost. Most of the builtin footprints such as - ``skimage.morphology.disk`` provide an option to automically generate a + ``skimage.morphology.disk`` provide an option to automatically generate a footprint sequence of this type. """ @@ -90,15 +98,18 @@ def binary_erosion(image, footprint=None, out=None): binary_func = functools.partial(ndi.binary_erosion, border_value=True) return _iterate_binary_func(binary_func, image, footprint, out) - ndi.binary_erosion(image, structure=footprint, output=out, - border_value=True) + ndi.binary_erosion( + image, structure=footprint, output=out, border_value=True + ) return out @default_footprint -@deprecate_kwarg(kwarg_mapping={'selem': 'footprint'}, - removed_version="23.02.00", - deprecated_version="22.02.00") +@deprecate_kwarg( + kwarg_mapping={"selem": "footprint"}, + removed_version="23.02.00", + deprecated_version="22.02.00", +) def binary_dilation(image, footprint=None, out=None): """Return fast binary morphological dilation of an image. @@ -137,7 +148,7 @@ def binary_dilation(image, footprint=None, out=None): would apply a 9x1 footprint followed by a 1x9 footprint resulting in a net effect that is the same as ``footprint=cp.ones((9, 9))``, but with lower computational cost. Most of the builtin footprints such as - ``skimage.morphology.disk`` provide an option to automically generate a + ``skimage.morphology.disk`` provide an option to automatically generate a footprint sequence of this type. """ @@ -152,9 +163,11 @@ def binary_dilation(image, footprint=None, out=None): @default_footprint -@deprecate_kwarg(kwarg_mapping={'selem': 'footprint'}, - removed_version="23.02.00", - deprecated_version="22.02.00") +@deprecate_kwarg( + kwarg_mapping={"selem": "footprint"}, + removed_version="23.02.00", + deprecated_version="22.02.00", +) def binary_opening(image, footprint=None, out=None): """Return fast binary morphological opening of an image. @@ -193,7 +206,7 @@ def binary_opening(image, footprint=None, out=None): would apply a 9x1 footprint followed by a 1x9 footprint resulting in a net effect that is the same as ``footprint=cp.ones((9, 9))``, but with lower computational cost. Most of the builtin footprints such as - ``skimage.morphology.disk`` provide an option to automically generate a + ``skimage.morphology.disk`` provide an option to automatically generate a footprint sequence of this type. """ @@ -203,9 +216,11 @@ def binary_opening(image, footprint=None, out=None): @default_footprint -@deprecate_kwarg(kwarg_mapping={'selem': 'footprint'}, - removed_version="23.02.00", - deprecated_version="22.02.00") +@deprecate_kwarg( + kwarg_mapping={"selem": "footprint"}, + removed_version="23.02.00", + deprecated_version="22.02.00", +) def binary_closing(image, footprint=None, out=None): """Return fast binary morphological closing of an image. @@ -244,7 +259,7 @@ def binary_closing(image, footprint=None, out=None): would apply a 9x1 footprint followed by a 1x9 footprint resulting in a net effect that is the same as ``footprint=cp.ones((9, 9))``, but with lower computational cost. Most of the builtin footprints such as - ``skimage.morphology.disk`` provide an option to automically generate a + ``skimage.morphology.disk`` provide an option to automatically generate a footprint sequence of this type. """ diff --git a/python/cucim/src/cucim/skimage/morphology/footprints.py b/python/cucim/src/cucim/skimage/morphology/footprints.py index 98a68d2dc..cae094a04 100644 --- a/python/cucim/src/cucim/skimage/morphology/footprints.py +++ b/python/cucim/src/cucim/skimage/morphology/footprints.py @@ -15,13 +15,15 @@ # See _nsphere_series_decomposition for full details. _nsphere_decompositions = {} _nsphere_decompositions[2] = np.load( - os.path.join(os.path.dirname(__file__), 'disk_decompositions.npy')) + os.path.join(os.path.dirname(__file__), "disk_decompositions.npy") +) _nsphere_decompositions[3] = np.load( - os.path.join(os.path.dirname(__file__), 'ball_decompositions.npy')) + os.path.join(os.path.dirname(__file__), "ball_decompositions.npy") +) def _footprint_is_sequence(footprint): - if hasattr(footprint, '__cuda_array_interface__'): + if hasattr(footprint, "__cuda_array_interface__"): return False def _validate_sequence_element(t): @@ -29,12 +31,13 @@ def _validate_sequence_element(t): isinstance(t, Sequence) and len(t) == 2 and ( - hasattr(t[0], '__cuda_array_interface__') + hasattr(t[0], "__cuda_array_interface__") # can be a shape tuple for square/rectangular footprints or isinstance(t[0], tuple) ) and isinstance(t[1], Integral) ) + if isinstance(footprint, Sequence): if all(isinstance(t, int) for t in footprint): # allow pass through of a single shape tuple @@ -75,9 +78,7 @@ def _shape_from_sequence(footprints, require_odd_size=False): def _odd_size(size, require_odd_size): if require_odd_size and size % 2 == 0: - raise ValueError( - "expected all footprint elements to have odd size" - ) + raise ValueError("expected all footprint elements to have odd size") for d in range(ndim): fp, nreps = footprints[0] @@ -175,19 +176,22 @@ def square(width, dtype=None, *, decomposition=None): else: return cp.ones((width, width), dtype=dtype) - if decomposition == 'separable' or width % 2 == 0: + if decomposition == "separable" or width % 2 == 0: if dtype is None: sequence = (((width, 1), 1), ((1, width), 1)) else: - sequence = ((cp.ones((width, 1), dtype=dtype), 1), - (cp.ones((1, width), dtype=dtype), 1)) - elif decomposition == 'sequence': + sequence = ( + (cp.ones((width, 1), dtype=dtype), 1), + (cp.ones((1, width), dtype=dtype), 1), + ) + elif decomposition == "sequence": # only handles odd widths if dtype is None: sequence = (((3, 3), _decompose_size(width, 3)),) else: - sequence = ((cp.ones((3, 3), dtype=dtype), - _decompose_size(width, 3)),) + sequence = ( + (cp.ones((3, 3), dtype=dtype), _decompose_size(width, 3)), + ) else: raise ValueError(f"Unrecognized decomposition: {decomposition}") return sequence @@ -272,7 +276,7 @@ def rectangle(nrows, ncols, dtype=None, *, decomposition=None): even_rows = nrows % 2 == 0 even_cols = ncols % 2 == 0 - if decomposition == 'separable' or even_rows or even_cols: + if decomposition == "separable" or even_rows or even_cols: if dtype is None: sequence = [((nrows, 1), 1), ((1, ncols), 1)] else: @@ -280,7 +284,7 @@ def rectangle(nrows, ncols, dtype=None, *, decomposition=None): (cp.ones((nrows, 1), dtype=dtype), 1), (cp.ones((1, ncols), dtype=dtype), 1), ] - elif decomposition == 'sequence': + elif decomposition == "sequence": # this branch only support odd nrows, ncols sq_size = 3 sq_reps = _decompose_size(min(nrows, ncols), sq_size) @@ -291,23 +295,15 @@ def rectangle(nrows, ncols, dtype=None, *, decomposition=None): if nrows > ncols: nextra = nrows - ncols if dtype is None: - sequence.append( - ((nextra + 1, 1), 1) - ) + sequence.append(((nextra + 1, 1), 1)) else: - sequence.append( - (cp.ones((nextra + 1, 1), dtype=dtype), 1) - ) + sequence.append((cp.ones((nextra + 1, 1), dtype=dtype), 1)) elif ncols > nrows: nextra = ncols - nrows if dtype is None: - sequence.append( - ((1, nextra + 1), 1) - ) + sequence.append(((1, nextra + 1), 1)) else: - sequence.append( - (cp.ones((1, nextra + 1), dtype=dtype), 1) - ) + sequence.append((cp.ones((1, nextra + 1), dtype=dtype), 1)) else: raise ValueError(f"Unrecognized decomposition: {decomposition}") return tuple(sequence) @@ -357,12 +353,12 @@ def diamond(radius, dtype=cp.uint8, *, decomposition=None): """ if decomposition is None: # CuPy Backend: grid is usually small -> faster to generate it in NumPy - L = np.arange(0, radius * 2 + 1) - I, J = np.meshgrid(L, L, sparse=True) + sz = np.arange(0, radius * 2 + 1) + ii, jj = np.meshgrid(sz, sz, sparse=True) return cp.asarray( - np.abs(I - radius) + np.abs(J - radius) <= radius, dtype=dtype + np.abs(ii - radius) + np.abs(jj - radius) <= radius, dtype=dtype ) - elif decomposition == 'sequence': + elif decomposition == "sequence": fp = diamond(1, dtype=dtype, decomposition=None) nreps = _decompose_size(2 * radius + 1, fp.shape[0]) footprint = ((fp, nreps),) @@ -444,9 +440,9 @@ def _nsphere_series_decomposition(radius, ndim, dtype=None): sequence.append((cp.asarray(d), num_diamond)) if num_square > 0: if dtype is None: - sequence.append(((3, ) * ndim, num_square)) + sequence.append(((3,) * ndim, num_square)) else: - sequence.append((cp.ones((3, ) * ndim, dtype=_dtype), num_square)) + sequence.append((cp.ones((3,) * ndim, dtype=_dtype), num_square)) return tuple(sequence) @@ -465,9 +461,7 @@ def _t_shaped_element_series(ndim=2, dtype=cp.uint8): if ndim == 2: # The n-dimensional case produces the same set of footprints, but # the 2D example is retained here for clarity. - t0 = np.array([[1, 1, 1], - [0, 1, 0], - [0, 1, 0]], dtype=dtype) + t0 = np.array([[1, 1, 1], [0, 1, 0], [0, 1, 0]], dtype=dtype) t90 = cp.asarray(np.rot90(t0, 1)) t180 = cp.asarray(np.rot90(t0, 2)) t270 = cp.asarray(np.rot90(t0, 3)) @@ -574,11 +568,12 @@ def disk(radius, dtype=cp.uint8, *, strict_radius=True, decomposition=None): if not strict_radius: radius += 0.5 return cp.asarray((X * X + Y * Y) <= radius * radius, dtype=dtype) - elif decomposition == 'sequence': + elif decomposition == "sequence": sequence = _nsphere_series_decomposition(radius, ndim=2, dtype=dtype) - elif decomposition == 'crosses': - fp = disk(radius, dtype, strict_radius=strict_radius, - decomposition=None) + elif decomposition == "crosses": + fp = disk( + radius, dtype, strict_radius=strict_radius, decomposition=None + ) sequence = _cross_decomposition(fp) return sequence @@ -599,7 +594,7 @@ def _cross(r0, r1, dtype=cp.uint8): def _cross_decomposition(footprint, dtype=cp.uint8): - """ Decompose a symmetric convex footprint into cross-shaped elements. + """Decompose a symmetric convex footprint into cross-shaped elements. This is a decomposition of the footprint into a sequence of (possibly asymmetric) cross-shaped elements. This technique was proposed in @@ -612,7 +607,7 @@ def _cross_decomposition(footprint, dtype=cp.uint8): :DOI:`10.1117/12.23608` """ footprint = cp.asnumpy(footprint) - quadrant = footprint[footprint.shape[0] // 2:, footprint.shape[1] // 2:] + quadrant = footprint[footprint.shape[0] // 2 :, footprint.shape[1] // 2 :] col_sums = quadrant.sum(0, dtype=int) col_sums = np.concatenate((col_sums, np.asarray([0], dtype=int))) i_prev = 0 @@ -706,14 +701,14 @@ def ellipse(width, height, dtype=cp.uint8, *, decomposition=None): # Note: no CUDA counterpart for draw.ellipse so compute in NumPy # CuPy Backend: grid is usually small -> faster to generate it in NumPy return cp.asarray(footprint) - elif decomposition == 'crosses': + elif decomposition == "crosses": fp = ellipse(width, height, dtype, decomposition=None) sequence = _cross_decomposition(fp) return sequence def cube(width, dtype=None, *, decomposition=None): - """ Generates a cube-shaped footprint. + """Generates a cube-shaped footprint. This is the 3D equivalent of a square. Every pixel along the perimeter has a chessboard distance @@ -767,10 +762,12 @@ def cube(width, dtype=None, *, decomposition=None): return (width, width, width) else: return cp.ones((width, width, width), dtype=dtype) - if decomposition == 'separable' or width % 2 == 0: + if decomposition == "separable" or width % 2 == 0: if dtype is None: sequence = ( - ((width, 1, 1), 1), ((1, width, 1), 1), ((1, 1, width), 1) + ((width, 1, 1), 1), + ((1, width, 1), 1), + ((1, 1, width), 1), ) else: sequence = ( @@ -778,12 +775,10 @@ def cube(width, dtype=None, *, decomposition=None): (cp.ones((1, width, 1), dtype=dtype), 1), (cp.ones((1, 1, width), dtype=dtype), 1), ) - elif decomposition == 'sequence': + elif decomposition == "sequence": # only handles odd widths if dtype is None: - sequence = ( - ((3, 3, 3), _decompose_size(width, 3)), - ) + sequence = (((3, 3, 3), _decompose_size(width, 3)),) else: sequence = ( (cp.ones((3, 3, 3), dtype=dtype), _decompose_size(width, 3)), @@ -839,13 +834,13 @@ def octahedron(radius, dtype=cp.uint8, *, decomposition=None): if decomposition is None: n = 2 * radius + 1 Z, Y, X = np.ogrid[ - -radius:radius:n * 1j, - -radius:radius:n * 1j, - -radius:radius:n * 1j, + -radius : radius : n * 1j, + -radius : radius : n * 1j, + -radius : radius : n * 1j, ] s = np.abs(X) + np.abs(Y) + np.abs(Z) footprint = cp.array(s <= radius, dtype=dtype) - elif decomposition == 'sequence': + elif decomposition == "sequence": fp = octahedron(1, dtype=dtype, decomposition=None) nreps = _decompose_size(2 * radius + 1, fp.shape[0]) footprint = ((fp, nreps),) @@ -912,15 +907,15 @@ def ball(radius, dtype=cp.uint8, *, strict_radius=True, decomposition=None): if decomposition is None: n = 2 * radius + 1 Z, Y, X = np.ogrid[ - -radius:radius:n * 1j, - -radius:radius:n * 1j, - -radius:radius:n * 1j, + -radius : radius : n * 1j, + -radius : radius : n * 1j, + -radius : radius : n * 1j, ] s = X * X + Y * Y + Z * Z if not strict_radius: radius += 0.5 return cp.array(s <= radius * radius, dtype=dtype) - elif decomposition == 'sequence': + elif decomposition == "sequence": sequence = _nsphere_series_decomposition(radius, ndim=3, dtype=dtype) else: raise ValueError(f"Unrecognized decomposition: {decomposition}") @@ -990,7 +985,7 @@ def octagon(m, n, dtype=cp.uint8, *, decomposition=None): footprint[m + n - 1, -1] = 1 footprint = convex_hull_image(footprint).astype(dtype) footprint = cp.array(footprint) - elif decomposition == 'sequence': + elif decomposition == "sequence": # special handling for edge cases with small m and/or n if m <= 2 and n <= 2: return ((octagon(m, n, dtype=dtype, decomposition=None), 1),) @@ -1001,7 +996,7 @@ def octagon(m, n, dtype=cp.uint8, *, decomposition=None): n -= 1 sequence = [] if m > 1: - sequence += list(square(m, dtype=dtype, decomposition='sequence')) + sequence += list(square(m, dtype=dtype, decomposition="sequence")) if n > 0: sequence += [(diamond(1, dtype=dtype, decomposition=None), n)] footprint = tuple(sequence) @@ -1047,7 +1042,7 @@ def star(a, dtype=cp.uint8): m = 2 * a + 1 n = a // 2 footprint_square = np.zeros((m + 2 * n, m + 2 * n)) - footprint_square[n:m + n, n:m + n] = 1 + footprint_square[n : m + n, n : m + n] = 1 c = (m + 2 * n - 1) // 2 footprint_rotated = np.zeros((m + 2 * n, m + 2 * n)) diff --git a/python/cucim/src/cucim/skimage/morphology/gray.py b/python/cucim/src/cucim/skimage/morphology/gray.py index 0de4bb12d..9f7eea0a3 100644 --- a/python/cucim/src/cucim/skimage/morphology/gray.py +++ b/python/cucim/src/cucim/skimage/morphology/gray.py @@ -13,8 +13,14 @@ from .footprints import _footprint_is_sequence, _shape_from_sequence from .misc import default_footprint -__all__ = ['erosion', 'dilation', 'opening', 'closing', 'white_tophat', - 'black_tophat'] +__all__ = [ + "erosion", + "dilation", + "opening", + "closing", + "white_tophat", + "black_tophat", +] def _iterate_gray_func(gray_func, image, footprints, out): @@ -117,8 +123,8 @@ def _invert_footprint(footprint): References ---------- - .. [1] https://github.com/scipy/scipy/blob/ec20ababa400e39ac3ffc9148c01ef86d5349332/scipy/ndimage/morphology.py#L1285 # noqa - """ + .. [1] https://github.com/scipy/scipy/blob/ec20ababa400e39ac3ffc9148c01ef86d5349332/scipy/ndimage/morphology.py#L1285 + """ # noqa: E501 if isinstance(footprint, tuple): # fully populated rectangle is symmetric return footprint @@ -146,6 +152,7 @@ def pad_for_eccentric_footprints(func): -------- opening, closing. """ + @functools.wraps(func) def func_out(image, footprint, out=None, *args, **kwargs): pad_widths = [] @@ -168,7 +175,7 @@ def func_out(image, footprint, out=None, *args, **kwargs): axis_pad_width = 0 pad_widths.append((axis_pad_width,) * 2) if padding: - image = pad(image, pad_widths, mode='edge') + image = pad(image, pad_widths, mode="edge") out_temp = cp.empty_like(image) else: out_temp = out @@ -178,12 +185,16 @@ def func_out(image, footprint, out=None, *args, **kwargs): else: out = out_temp return out + return func_out @default_footprint -@deprecate_kwarg(kwarg_mapping={'selem': 'footprint'}, - removed_version="23.02.00", deprecated_version="22.02.00") +@deprecate_kwarg( + kwarg_mapping={"selem": "footprint"}, + removed_version="23.02.00", + deprecated_version="22.02.00", +) def erosion(image, footprint=None, out=None, shift_x=False, shift_y=False): """Return grayscale morphological erosion of an image. @@ -226,7 +237,7 @@ def erosion(image, footprint=None, out=None, shift_x=False, shift_y=False): would apply a 9x1 footprint followed by a 1x9 footprint resulting in a net effect that is the same as ``footprint=cp.ones((9, 9))``, but with lower computational cost. Most of the builtin footprints such as - ``skimage.morphology.disk`` provide an option to automically generate a + ``skimage.morphology.disk`` provide an option to automatically generate a footprint sequence of this type. Examples @@ -251,8 +262,9 @@ def erosion(image, footprint=None, out=None, shift_x=False, shift_y=False): out = cp.empty_like(image) if _footprint_is_sequence(footprint): - footprints = tuple((_shift_footprint(fp, shift_x, shift_y), n) - for fp, n in footprint) + footprints = tuple( + (_shift_footprint(fp, shift_x, shift_y), n) for fp, n in footprint + ) return _iterate_gray_func(ndi.grey_erosion, image, footprints, out) if isinstance(footprint, tuple): @@ -273,8 +285,11 @@ def erosion(image, footprint=None, out=None, shift_x=False, shift_y=False): @default_footprint -@deprecate_kwarg(kwarg_mapping={'selem': 'footprint'}, - removed_version="23.02.00", deprecated_version="22.02.00") +@deprecate_kwarg( + kwarg_mapping={"selem": "footprint"}, + removed_version="23.02.00", + deprecated_version="22.02.00", +) def dilation(image, footprint=None, out=None, shift_x=False, shift_y=False): """Return grayscale morphological dilation of an image. @@ -318,7 +333,7 @@ def dilation(image, footprint=None, out=None, shift_x=False, shift_y=False): would apply a 9x1 footprint followed by a 1x9 footprint resulting in a net effect that is the same as ``footprint=cp.ones((9, 9))``, but with lower computational cost. Most of the builtin footprints such as - ``skimage.morphology.disk`` provide an option to automically generate a + ``skimage.morphology.disk`` provide an option to automatically generate a footprint sequence of this type. Examples @@ -373,8 +388,11 @@ def dilation(image, footprint=None, out=None, shift_x=False, shift_y=False): return out -@deprecate_kwarg(kwarg_mapping={'selem': 'footprint'}, - removed_version="23.02.00", deprecated_version="22.02.00") +@deprecate_kwarg( + kwarg_mapping={"selem": "footprint"}, + removed_version="23.02.00", + deprecated_version="22.02.00", +) @default_footprint @pad_for_eccentric_footprints def opening(image, footprint=None, out=None): @@ -412,7 +430,7 @@ def opening(image, footprint=None, out=None): would apply a 9x1 footprint followed by a 1x9 footprint resulting in a net effect that is the same as ``footprint=cp.ones((9, 9))``, but with lower computational cost. Most of the builtin footprints such as - ``skimage.morphology.disk`` provide an option to automically generate a + ``skimage.morphology.disk`` provide an option to automatically generate a footprint sequence of this type. Examples @@ -439,8 +457,11 @@ def opening(image, footprint=None, out=None): return out -@deprecate_kwarg(kwarg_mapping={'selem': 'footprint'}, - removed_version="23.02.00", deprecated_version="22.02.00") +@deprecate_kwarg( + kwarg_mapping={"selem": "footprint"}, + removed_version="23.02.00", + deprecated_version="22.02.00", +) @default_footprint @pad_for_eccentric_footprints def closing(image, footprint=None, out=None): @@ -478,7 +499,7 @@ def closing(image, footprint=None, out=None): would apply a 9x1 footprint followed by a 1x9 footprint resulting in a net effect that is the same as ``footprint=cp.ones((9, 9))``, but with lower computational cost. Most of the builtin footprints such as - ``skimage.morphology.disk`` provide an option to automically generate a + ``skimage.morphology.disk`` provide an option to automatically generate a footprint sequence of this type. Examples @@ -523,8 +544,11 @@ def _white_tophat_seqence(image, footprints, out): @default_footprint -@deprecate_kwarg(kwarg_mapping={'selem': 'footprint'}, - removed_version="23.02.00", deprecated_version="22.02.00") +@deprecate_kwarg( + kwarg_mapping={"selem": "footprint"}, + removed_version="23.02.00", + deprecated_version="22.02.00", +) def white_tophat(image, footprint=None, out=None): """Return white top hat of an image. @@ -559,7 +583,7 @@ def white_tophat(image, footprint=None, out=None): would apply a 9x1 footprint followed by a 1x9 footprint resulting in a net effect that is the same as ``footprint=cp.ones((9, 9))``, but with lower computational cost. Most of the builtin footprints such as - ``skimage.morphology.disk`` provide an option to automically generate a + ``skimage.morphology.disk`` provide an option to automatically generate a footprint sequence of this type. See Also @@ -622,8 +646,11 @@ def white_tophat(image, footprint=None, out=None): @default_footprint -@deprecate_kwarg(kwarg_mapping={'selem': 'footprint'}, - removed_version="23.02.00", deprecated_version="22.02.00") +@deprecate_kwarg( + kwarg_mapping={"selem": "footprint"}, + removed_version="23.02.00", + deprecated_version="22.02.00", +) def black_tophat(image, footprint=None, out=None): """Return black top hat of an image. @@ -659,7 +686,7 @@ def black_tophat(image, footprint=None, out=None): would apply a 9x1 footprint followed by a 1x9 footprint resulting in a net effect that is the same as ``footprint=cp.ones((9, 9))``, but with lower computational cost. Most of the builtin footprints such as - ``skimage.morphology.disk`` provide an option to automically generate a + ``skimage.morphology.disk`` provide an option to automatically generate a footprint sequence of this type. See Also diff --git a/python/cucim/src/cucim/skimage/morphology/grayreconstruct.py b/python/cucim/src/cucim/skimage/morphology/grayreconstruct.py index a04a41b8a..7e0ba77ea 100644 --- a/python/cucim/src/cucim/skimage/morphology/grayreconstruct.py +++ b/python/cucim/src/cucim/skimage/morphology/grayreconstruct.py @@ -16,12 +16,15 @@ from .._shared.utils import deprecate_kwarg -old_reconstruction_pyx = Version(skimage.__version__) < Version('0.20.0') +old_reconstruction_pyx = Version(skimage.__version__) < Version("0.20.0") -@deprecate_kwarg(kwarg_mapping={'selem': 'footprint'}, - removed_version="23.02.00", deprecated_version="22.02.00") -def reconstruction(seed, mask, method='dilation', footprint=None, offset=None): +@deprecate_kwarg( + kwarg_mapping={"selem": "footprint"}, + removed_version="23.02.00", + deprecated_version="22.02.00", +) +def reconstruction(seed, mask, method="dilation", footprint=None, offset=None): """Perform a morphological reconstruction of an image. Morphological reconstruction by dilation is similar to basic morphological @@ -133,13 +136,17 @@ def reconstruction(seed, mask, method='dilation', footprint=None, offset=None): from ..filters._rank_order import rank_order assert tuple(seed.shape) == tuple(mask.shape) - if method == 'dilation' and cp.any(seed > mask): # synchronize! - raise ValueError("Intensity of seed image must be less than that " - "of the mask image for reconstruction by dilation.") - - elif method == 'erosion' and cp.any(seed < mask): # synchronize! - raise ValueError("Intensity of seed image must be greater than that " - "of the mask image for reconstruction by erosion.") + if method == "dilation" and cp.any(seed > mask): # synchronize! + raise ValueError( + "Intensity of seed image must be less than that " + "of the mask image for reconstruction by dilation." + ) + + elif method == "erosion" and cp.any(seed < mask): # synchronize! + raise ValueError( + "Intensity of seed image must be greater than that " + "of the mask image for reconstruction by erosion." + ) try: from skimage.morphology._grayreconstruct import reconstruction_loop @@ -172,18 +179,21 @@ def reconstruction(seed, mask, method='dilation', footprint=None, offset=None): footprint[tuple(slice(d, d + 1) for d in offset)] = False # Make padding for edges of reconstructed image so we can ignore boundaries - dims = (2, ) + \ - tuple(s1 + s2 - 1 for s1, s2 in zip(seed.shape, footprint.shape)) + dims = (2,) + tuple( + s1 + s2 - 1 for s1, s2 in zip(seed.shape, footprint.shape) + ) inside_slices = tuple(slice(o, o + s) for o, s in zip(offset, seed.shape)) # Set padded region to minimum image intensity and mask along first axis so # we can interleave image and mask pixels when sorting. - if method == 'dilation': + if method == "dilation": pad_value = cp.min(seed).item() - elif method == 'erosion': + elif method == "erosion": pad_value = cp.max(seed).item() else: - raise ValueError("Reconstruction method can be one of 'erosion' " - f"or 'dilation'. Got '{method}'.") + raise ValueError( + "Reconstruction method can be one of 'erosion' " + f"or 'dilation'. Got '{method}'." + ) # CuPy Backend: modified to allow images_dtype based on input dtype # instead of float64 images_dtype = np.promote_types(seed.dtype, mask.dtype) @@ -206,19 +216,24 @@ def reconstruction(seed, mask, method='dilation', footprint=None, offset=None): # a flattened array value_stride = np.array(images.strides[1:]) // images.dtype.itemsize image_stride = images.strides[0] // images.dtype.itemsize - footprint_mgrid = np.mgrid[[slice(-o, d - o) - for d, o in zip(footprint.shape, offset)]] + footprint_mgrid = np.mgrid[ + [slice(-o, d - o) for d, o in zip(footprint.shape, offset)] + ] footprint_offsets = footprint_mgrid[:, footprint].transpose() - nb_strides = np.array([np.sum(value_stride * footprint_offset) - for footprint_offset in footprint_offsets], - signed_int_dtype) + nb_strides = np.array( + [ + np.sum(value_stride * footprint_offset) + for footprint_offset in footprint_offsets + ], + signed_int_dtype, + ) # CuPy Backend: changed flatten to ravel to avoid copy images = images.ravel() # Erosion goes smallest to largest; dilation goes largest to smallest. index_sorted = cp.argsort(images).astype(signed_int_dtype, copy=False) - if method == 'dilation': + if method == "dilation": index_sorted = index_sorted[::-1] # Make a linked list of pixels sorted by value. -1 is the list terminator. @@ -229,17 +244,16 @@ def reconstruction(seed, mask, method='dilation', footprint=None, offset=None): next[index_sorted[:-1]] = index_sorted[1:] # Cython inner-loop compares the rank of pixel values. - if method == 'dilation': + if method == "dilation": value_rank, value_map = rank_order(images) - elif method == 'erosion': + elif method == "erosion": value_rank, value_map = rank_order(-images) value_map = -value_map # TODO: implement reconstruction_loop on the GPU? For now, run it on host. start = index_sorted[0] value_rank = cp.asnumpy(value_rank.astype(unsigned_int_dtype, copy=False)) - reconstruction_loop(value_rank, prev, next, nb_strides, start, - image_stride) + reconstruction_loop(value_rank, prev, next, nb_strides, start, image_stride) # Reshape reconstructed image to original image shape and remove padding. value_rank = cp.asarray(value_rank[:image_stride]) diff --git a/python/cucim/src/cucim/skimage/morphology/grey.py b/python/cucim/src/cucim/skimage/morphology/grey.py index bd2c6ee64..bb892ec3c 100644 --- a/python/cucim/src/cucim/skimage/morphology/grey.py +++ b/python/cucim/src/cucim/skimage/morphology/grey.py @@ -1,14 +1,27 @@ import warnings -from .gray import (black_tophat, closing, dilation, erosion, opening, # noqa - white_tophat) +from .gray import ( # noqa + black_tophat, + closing, + dilation, + erosion, + opening, + white_tophat, +) -__all__ = ['erosion', 'dilation', 'opening', 'closing', 'white_tophat', - 'black_tophat'] +__all__ = [ + "erosion", + "dilation", + "opening", + "closing", + "white_tophat", + "black_tophat", +] warnings.warn( "Importing from cucim.skimage.morphology.grey is deprecated. " "Please import from cucim.skimage.morphology instead.", - FutureWarning, stacklevel=2 + FutureWarning, + stacklevel=2, ) diff --git a/python/cucim/src/cucim/skimage/morphology/greyreconstruct.py b/python/cucim/src/cucim/skimage/morphology/greyreconstruct.py index a95421ccf..1f78b4e6a 100644 --- a/python/cucim/src/cucim/skimage/morphology/greyreconstruct.py +++ b/python/cucim/src/cucim/skimage/morphology/greyreconstruct.py @@ -5,5 +5,6 @@ warnings.warn( "Importing from cucim.skimage.morphology.greyreconstruct is deprecated. " "Please import from cucim.skimage.morphology instead.", - FutureWarning, stacklevel=2 + FutureWarning, + stacklevel=2, ) diff --git a/python/cucim/src/cucim/skimage/morphology/misc.py b/python/cucim/src/cucim/skimage/morphology/misc.py index ad25cbff4..da54280ee 100644 --- a/python/cucim/src/cucim/skimage/morphology/misc.py +++ b/python/cucim/src/cucim/skimage/morphology/misc.py @@ -9,12 +9,18 @@ # Our function names don't exactly correspond to ndimages. # This dictionary translates from our names to scipy's. -funcs = ('erosion', 'dilation', 'opening', 'closing') -skimage2ndimage = {x: 'grey_' + x for x in funcs} +funcs = ("erosion", "dilation", "opening", "closing") +skimage2ndimage = {x: "grey_" + x for x in funcs} # These function names are the same in ndimage. -funcs = ('binary_erosion', 'binary_dilation', 'binary_opening', - 'binary_closing', 'black_tophat', 'white_tophat') +funcs = ( + "binary_erosion", + "binary_dilation", + "binary_opening", + "binary_closing", + "black_tophat", + "white_tophat", +) skimage2ndimage.update({x: x for x in funcs}) @@ -47,8 +53,10 @@ def func_out(image, footprint=None, *args, **kwargs): def _check_dtype_supported(ar): # Should use `issubdtype` for bool below, but there's a bug in numpy 1.7 if not (ar.dtype == bool or cp.issubdtype(ar.dtype, cp.integer)): - raise TypeError("Only bool or integer image types are supported. " - f"Got {ar.dtype}.") + raise TypeError( + "Only bool or integer image types are supported. " + f"Got {ar.dtype}." + ) def remove_small_objects(ar, min_size=64, connectivity=1, *, out=None): @@ -128,13 +136,17 @@ def remove_small_objects(ar, min_size=64, connectivity=1, *, out=None): try: component_sizes = cp.bincount(ccs.ravel()) except ValueError: - raise ValueError("Negative value labels are not supported. Try " - "relabeling the input with `scipy.ndimage.label` or " - "`skimage.morphology.label`.") + raise ValueError( + "Negative value labels are not supported. Try " + "relabeling the input with `scipy.ndimage.label` or " + "`skimage.morphology.label`." + ) if len(component_sizes) == 2 and out.dtype != bool: - warn("Only one label was provided to `remove_small_objects`. " - "Did you mean to use a boolean array?") + warn( + "Only one label was provided to `remove_small_objects`. " + "Did you mean to use a boolean array?" + ) too_small = component_sizes < min_size too_small_mask = too_small[ccs] @@ -207,8 +219,11 @@ def remove_small_holes(ar, area_threshold=64, connectivity=1, *, out=None): # Creates warning if image is an integer image if ar.dtype != bool: - warn("Any labeled images will be returned as a boolean array. " - "Did you mean to use a boolean array?", UserWarning) + warn( + "Any labeled images will be returned as a boolean array. " + "Did you mean to use a boolean array?", + UserWarning, + ) if out is not None: if out.dtype != bool: diff --git a/python/cucim/src/cucim/skimage/morphology/tests/test_binary.py b/python/cucim/src/cucim/skimage/morphology/tests/test_binary.py index fd075bdac..834dad5de 100755 --- a/python/cucim/src/cucim/skimage/morphology/tests/test_binary.py +++ b/python/cucim/src/cucim/skimage/morphology/tests/test_binary.py @@ -21,8 +21,8 @@ def test_non_square_image(): @pytest.mark.parametrize( - 'function', - ['binary_erosion', 'binary_dilation', 'binary_closing', 'binary_opening'] + "function", + ["binary_erosion", "binary_dilation", "binary_closing", "binary_opening"], ) def test_selem_kwarg_deprecation(function): with expected_warnings(["`selem` is a deprecated argument name"]): @@ -58,12 +58,12 @@ def test_binary_opening(): def _get_decomp_test_data(function, ndim=2): - if function == 'binary_erosion': - img = cp.ones((17, ) * ndim, dtype=cp.uint8) - img[(8, ) * ndim] = 0 - elif function == 'binary_dilation': - img = cp.zeros((17, ) * ndim, dtype=cp.uint8) - img[(8, ) * ndim] = 1 + if function == "binary_erosion": + img = cp.ones((17,) * ndim, dtype=cp.uint8) + img[(8,) * ndim] = 0 + elif function == "binary_dilation": + img = cp.zeros((17,) * ndim, dtype=cp.uint8) + img[(8,) * ndim] = 1 else: img = cp.asarray(data.binary_blobs(32, n_dim=ndim, seed=1)) return img @@ -74,7 +74,7 @@ def _get_decomp_test_data(function, ndim=2): ["binary_erosion", "binary_dilation", "binary_closing", "binary_opening"], ) @pytest.mark.parametrize("size", (3, 4, 11)) -@pytest.mark.parametrize("decomposition", ['separable', 'sequence']) +@pytest.mark.parametrize("decomposition", ["separable", "sequence"]) def test_square_decomposition(function, size, decomposition): """Validate footprint decomposition for various shapes. @@ -95,7 +95,7 @@ def test_square_decomposition(function, size, decomposition): ) @pytest.mark.parametrize("nrows", (3, 4, 11)) @pytest.mark.parametrize("ncols", (3, 4, 11)) -@pytest.mark.parametrize("decomposition", ['separable', 'sequence']) +@pytest.mark.parametrize("decomposition", ["separable", "sequence"]) def test_rectangle_decomposition(function, nrows, ncols, decomposition): """Validate footprint decomposition for various shapes. @@ -116,7 +116,7 @@ def test_rectangle_decomposition(function, nrows, ncols, decomposition): ) @pytest.mark.parametrize("m", (0, 1, 2, 3, 4, 5)) @pytest.mark.parametrize("n", (0, 1, 2, 3, 4, 5)) -@pytest.mark.parametrize("decomposition", ['sequence']) +@pytest.mark.parametrize("decomposition", ["sequence"]) def test_octagon_decomposition(function, m, n, decomposition): """Validate footprint decomposition for various shapes. @@ -140,7 +140,7 @@ def test_octagon_decomposition(function, m, n, decomposition): ["binary_erosion", "binary_dilation", "binary_closing", "binary_opening"], ) @pytest.mark.parametrize("radius", (1, 2, 5)) -@pytest.mark.parametrize("decomposition", ['sequence']) +@pytest.mark.parametrize("decomposition", ["sequence"]) def test_diamond_decomposition(function, radius, decomposition): """Validate footprint decomposition for various shapes. @@ -160,7 +160,7 @@ def test_diamond_decomposition(function, radius, decomposition): ["binary_erosion", "binary_dilation", "binary_closing", "binary_opening"], ) @pytest.mark.parametrize("size", (3, 4, 5)) -@pytest.mark.parametrize("decomposition", ['separable', 'sequence']) +@pytest.mark.parametrize("decomposition", ["separable", "sequence"]) def test_cube_decomposition(function, size, decomposition): """Validate footprint decomposition for various shapes. @@ -180,7 +180,7 @@ def test_cube_decomposition(function, size, decomposition): ["binary_erosion", "binary_dilation", "binary_closing", "binary_opening"], ) @pytest.mark.parametrize("radius", (1, 2, 3)) -@pytest.mark.parametrize("decomposition", ['sequence']) +@pytest.mark.parametrize("decomposition", ["sequence"]) def test_octahedron_decomposition(function, radius, decomposition): """Validate footprint decomposition for various shapes. @@ -215,8 +215,12 @@ def test_out_argument(): testing.assert_array_equal(out, func(img, footprint)) -binary_functions = [morphology.binary_erosion, morphology.binary_dilation, - morphology.binary_opening, morphology.binary_closing] +binary_functions = [ + morphology.binary_erosion, + morphology.binary_dilation, + morphology.binary_opening, + morphology.binary_closing, +] @pytest.mark.parametrize("function", binary_functions) @@ -256,7 +260,8 @@ def test_3d_fallback_default_footprint(): binary_3d_fallback_functions = [ - morphology.binary_opening, morphology.binary_closing + morphology.binary_opening, + morphology.binary_closing, ] @@ -274,9 +279,9 @@ def test_3d_fallback_cube_footprint(function): def test_2d_ndimage_equivalence(): image = cp.zeros((9, 9), cp.uint16) - image[2:-2, 2:-2] = 2 ** 14 - image[3:-3, 3:-3] = 2 ** 15 - image[4, 4] = 2 ** 16 - 1 + image[2:-2, 2:-2] = 2**14 + image[3:-3, 3:-3] = 2**15 + image[4, 4] = 2**16 - 1 bin_opened = morphology.binary_opening(image) bin_closed = morphology.binary_closing(image) @@ -291,9 +296,9 @@ def test_2d_ndimage_equivalence(): def test_binary_output_2d(): image = cp.zeros((9, 9), cp.uint16) - image[2:-2, 2:-2] = 2 ** 14 - image[3:-3, 3:-3] = 2 ** 15 - image[4, 4] = 2 ** 16 - 1 + image[2:-2, 2:-2] = 2**14 + image[3:-3, 3:-3] = 2**15 + image[4, 4] = 2**16 - 1 bin_opened = morphology.binary_opening(image) bin_closed = morphology.binary_closing(image) @@ -312,9 +317,9 @@ def test_binary_output_2d(): def test_binary_output_3d(): image = cp.zeros((9, 9, 9), cp.uint16) - image[2:-2, 2:-2, 2:-2] = 2 ** 14 - image[3:-3, 3:-3, 3:-3] = 2 ** 15 - image[4, 4, 4] = 2 ** 16 - 1 + image[2:-2, 2:-2, 2:-2] = 2**14 + image[3:-3, 3:-3, 3:-3] = 2**15 + image[4, 4, 4] = 2**16 - 1 bin_opened = morphology.binary_opening(image) bin_closed = morphology.binary_closing(image) diff --git a/python/cucim/src/cucim/skimage/morphology/tests/test_footprints.py b/python/cucim/src/cucim/skimage/morphology/tests/test_footprints.py index 160b31bf6..1c46d970b 100644 --- a/python/cucim/src/cucim/skimage/morphology/tests/test_footprints.py +++ b/python/cucim/src/cucim/skimage/morphology/tests/test_footprints.py @@ -20,7 +20,7 @@ def test_square_footprint(self): """Test square footprints""" for k in range(0, 5): actual_mask = footprints.square(k, dtype=cp.uint8) - expected_mask = np.ones((k, k), dtype='uint8') + expected_mask = np.ones((k, k), dtype="uint8") assert_array_equal(expected_mask, actual_mask) def test_rectangle_footprint(self): @@ -28,14 +28,14 @@ def test_rectangle_footprint(self): for i in range(0, 5): for j in range(0, 5): actual_mask = footprints.rectangle(i, j, dtype=cp.uint8) - expected_mask = np.ones((i, j), dtype='uint8') + expected_mask = np.ones((i, j), dtype="uint8") assert_array_equal(expected_mask, actual_mask) def test_cube_footprint(self): """Test cube footprints""" for k in range(0, 5): actual_mask = footprints.cube(k, dtype=cp.uint8) - expected_mask = np.ones((k, k, k), dtype='uint8') + expected_mask = np.ones((k, k, k), dtype="uint8") assert_array_equal(expected_mask, actual_mask) def strel_worker(self, fn, func): @@ -80,8 +80,9 @@ def test_footprint_ball(self): def test_footprint_octahedron(self): """Test octahedron footprints""" - self.strel_worker_3d("data/diamond-matlab-output.npz", - footprints.octahedron) + self.strel_worker_3d( + "data/diamond-matlab-output.npz", footprints.octahedron + ) def test_footprint_octagon(self): """Test octagon footprints""" @@ -158,7 +159,7 @@ def test_footprint_star(self): @pytest.mark.parametrize( - 'function, args, supports_sequence_decomposition', + "function, args, supports_sequence_decomposition", [ (footprints.disk, (3,), True), (footprints.ball, (3,), True), @@ -170,28 +171,31 @@ def test_footprint_star(self): (footprints.ellipse, (3, 4), False), (footprints.octagon, (3, 4), True), (footprints.star, (3,), False), - ] + ], ) @pytest.mark.parametrize("dtype", [np.uint8, np.float64]) -def test_footprint_dtype(function, args, supports_sequence_decomposition, - dtype): +def test_footprint_dtype( + function, args, supports_sequence_decomposition, dtype +): # make sure footprint dtype matches what was requested footprint = function(*args, dtype=dtype) assert footprint.dtype == dtype if supports_sequence_decomposition: - sequence = function(*args, dtype=dtype, decomposition='sequence') + sequence = function(*args, dtype=dtype, decomposition="sequence") assert all([fp_tuple[0].dtype == dtype for fp_tuple in sequence]) @pytest.mark.parametrize("function", ["disk", "ball"]) -@pytest.mark.parametrize("radius", [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 20, 50, 75, - 100]) +@pytest.mark.parametrize( + "radius", [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 20, 50, 75, 100] +) def test_nsphere_series_approximation(function, radius): fp_func = getattr(footprints, function) expected = fp_func(radius, strict_radius=False, decomposition=None) - footprint_sequence = fp_func(radius, strict_radius=False, - decomposition="sequence") + footprint_sequence = fp_func( + radius, strict_radius=False, decomposition="sequence" + ) approximate = footprints.footprint_from_sequence(footprint_sequence) assert approximate.shape == expected.shape @@ -209,8 +213,9 @@ def test_nsphere_series_approximation(function, radius): def test_disk_crosses_approximation(radius, strict_radius): fp_func = footprints.disk expected = fp_func(radius, strict_radius=strict_radius, decomposition=None) - footprint_sequence = fp_func(radius, strict_radius=strict_radius, - decomposition="crosses") + footprint_sequence = fp_func( + radius, strict_radius=strict_radius, decomposition="crosses" + ) approximate = footprints.footprint_from_sequence(footprint_sequence) assert approximate.shape == expected.shape diff --git a/python/cucim/src/cucim/skimage/morphology/tests/test_gray.py b/python/cucim/src/cucim/skimage/morphology/tests/test_gray.py index e863690ab..0b2754df3 100755 --- a/python/cucim/src/cucim/skimage/morphology/tests/test_gray.py +++ b/python/cucim/src/cucim/skimage/morphology/tests/test_gray.py @@ -14,19 +14,20 @@ @pytest.fixture def cam_image(): from skimage import data + return cp.ascontiguousarray(cp.array(data.camera()[64:112, 64:96])) @pytest.fixture def cell3d_image(): from skimage import data + return cp.ascontiguousarray( cp.array(data.cells3d()[30:48, 0, 20:36, 20:32]) ) class TestMorphology: - # These expected outputs were generated with skimage v0.12.1 # using: # @@ -36,34 +37,46 @@ class TestMorphology: # np.savez_compressed('gray_morph_output.npz', **output) def _build_expected_output(self): - funcs = (morphology.erosion, morphology.dilation, morphology.opening, - morphology.closing, morphology.white_tophat, - morphology.black_tophat) - footprints_2D = (morphology.square, morphology.diamond, - morphology.disk, morphology.star) + funcs = ( + morphology.erosion, + morphology.dilation, + morphology.opening, + morphology.closing, + morphology.white_tophat, + morphology.black_tophat, + ) + footprints_2D = ( + morphology.square, + morphology.diamond, + morphology.disk, + morphology.star, + ) - image = img_as_ubyte(transform.downscale_local_mean( - color.rgb2gray(cp.array(data.coffee())), (20, 20))) + image = img_as_ubyte( + transform.downscale_local_mean( + color.rgb2gray(cp.array(data.coffee())), (20, 20) + ) + ) output = {} for n in range(1, 4): for footprint in footprints_2D: for func in funcs: - key = '{0}_{1}_{2}'.format( - footprint.__name__, n, func.__name__) + key = "{0}_{1}_{2}".format( + footprint.__name__, n, func.__name__ + ) output[key] = func(image, footprint(n)) return output def test_gray_morphology(self): - expected = dict(np.load(fetch('data/gray_morph_output.npz'))) + expected = dict(np.load(fetch("data/gray_morph_output.npz"))) calculated = self._build_expected_output() for k, v in calculated.items(): cp.testing.assert_array_equal(cp.asarray(expected[k]), v) class TestEccentricStructuringElements: - def setup_method(self): self.black_pixel = 255 * cp.ones((4, 4), dtype=cp.uint8) self.black_pixel[1, 1] = 0 @@ -120,9 +133,14 @@ def test_black_tophat_white_pixel(self): assert cp.all(tophat == 0) -gray_functions = [morphology.erosion, morphology.dilation, - morphology.opening, morphology.closing, - morphology.white_tophat, morphology.black_tophat] +gray_functions = [ + morphology.erosion, + morphology.dilation, + morphology.opening, + morphology.closing, + morphology.white_tophat, + morphology.black_tophat, +] @pytest.mark.parametrize("function", gray_functions) @@ -182,10 +200,10 @@ def test_3d_fallback_white_tophat(): image[3, 2:5, 2:5] = 1 image[4, 3:5, 3:5] = 1 - with expected_warnings([r'operator.*deprecated|\A\Z']): + with expected_warnings([r"operator.*deprecated|\A\Z"]): new_image = morphology.white_tophat(image) footprint = ndi.generate_binary_structure(3, 1) - with expected_warnings([r'operator.*deprecated|\A\Z']): + with expected_warnings([r"operator.*deprecated|\A\Z"]): image_expected = ndi.white_tophat( image.view(dtype=cp.uint8), footprint=footprint ) @@ -198,10 +216,10 @@ def test_3d_fallback_black_tophat(): image[3, 2:5, 2:5] = 0 image[4, 3:5, 3:5] = 0 - with expected_warnings([r'operator.*deprecated|\A\Z']): + with expected_warnings([r"operator.*deprecated|\A\Z"]): new_image = morphology.black_tophat(image) footprint = ndi.generate_binary_structure(3, 1) - with expected_warnings([r'operator.*deprecated|\A\Z']): + with expected_warnings([r"operator.*deprecated|\A\Z"]): image_expected = ndi.black_tophat( image.view(dtype=cp.uint8), footprint=footprint ) @@ -316,8 +334,15 @@ def test_deprecated_import(): @pytest.mark.parametrize( - 'function', ['erosion', 'dilation', 'closing', 'opening', 'white_tophat', - 'black_tophat'], + "function", + [ + "erosion", + "dilation", + "closing", + "opening", + "white_tophat", + "black_tophat", + ], ) def test_selem_kwarg_deprecation(function): with expected_warnings(["`selem` is a deprecated argument name"]): @@ -325,11 +350,18 @@ def test_selem_kwarg_deprecation(function): @pytest.mark.parametrize( - "function", ["erosion", "dilation", "closing", "opening", "white_tophat", - "black_tophat"], + "function", + [ + "erosion", + "dilation", + "closing", + "opening", + "white_tophat", + "black_tophat", + ], ) @pytest.mark.parametrize("size", (7,)) -@pytest.mark.parametrize("decomposition", ['separable', 'sequence']) +@pytest.mark.parametrize("decomposition", ["separable", "sequence"]) def test_square_decomposition(cam_image, function, size, decomposition): """Validate footprint decomposition for various shapes. @@ -344,14 +376,22 @@ def test_square_decomposition(cam_image, function, size, decomposition): @pytest.mark.parametrize( - "function", ["erosion", "dilation", "closing", "opening", "white_tophat", - "black_tophat"], + "function", + [ + "erosion", + "dilation", + "closing", + "opening", + "white_tophat", + "black_tophat", + ], ) @pytest.mark.parametrize("nrows", (3, 11)) @pytest.mark.parametrize("ncols", (3, 11)) -@pytest.mark.parametrize("decomposition", ['separable', 'sequence']) -def test_rectangle_decomposition(cam_image, function, nrows, ncols, - decomposition): +@pytest.mark.parametrize("decomposition", ["separable", "sequence"]) +def test_rectangle_decomposition( + cam_image, function, nrows, ncols, decomposition +): """Validate footprint decomposition for various shapes. comparison is made to the case without decomposition. @@ -365,11 +405,18 @@ def test_rectangle_decomposition(cam_image, function, nrows, ncols, @pytest.mark.parametrize( - "function", ["erosion", "dilation", "closing", "opening", "white_tophat", - "black_tophat"], + "function", + [ + "erosion", + "dilation", + "closing", + "opening", + "white_tophat", + "black_tophat", + ], ) @pytest.mark.parametrize("radius", (2, 3)) -@pytest.mark.parametrize("decomposition", ['sequence']) +@pytest.mark.parametrize("decomposition", ["sequence"]) def test_diamond_decomposition(cam_image, function, radius, decomposition): """Validate footprint decomposition for various shapes. @@ -384,12 +431,19 @@ def test_diamond_decomposition(cam_image, function, radius, decomposition): @pytest.mark.parametrize( - "function", ["erosion", "dilation", "closing", "opening", "white_tophat", - "black_tophat"], + "function", + [ + "erosion", + "dilation", + "closing", + "opening", + "white_tophat", + "black_tophat", + ], ) @pytest.mark.parametrize("m", (0, 1, 3, 5)) @pytest.mark.parametrize("n", (0, 1, 2, 3)) -@pytest.mark.parametrize("decomposition", ['sequence']) +@pytest.mark.parametrize("decomposition", ["sequence"]) def test_octagon_decomposition(cam_image, function, m, n, decomposition): """Validate footprint decomposition for various shapes. @@ -408,11 +462,18 @@ def test_octagon_decomposition(cam_image, function, m, n, decomposition): @pytest.mark.parametrize( - "function", ["erosion", "dilation", "closing", "opening", "white_tophat", - "black_tophat"], + "function", + [ + "erosion", + "dilation", + "closing", + "opening", + "white_tophat", + "black_tophat", + ], ) @pytest.mark.parametrize("size", (5,)) -@pytest.mark.parametrize("decomposition", ['separable', 'sequence']) +@pytest.mark.parametrize("decomposition", ["separable", "sequence"]) def test_cube_decomposition(cell3d_image, function, size, decomposition): """Validate footprint decomposition for various shapes. @@ -427,13 +488,21 @@ def test_cube_decomposition(cell3d_image, function, size, decomposition): @pytest.mark.parametrize( - "function", ["erosion", "dilation", "closing", "opening", "white_tophat", - "black_tophat"], + "function", + [ + "erosion", + "dilation", + "closing", + "opening", + "white_tophat", + "black_tophat", + ], ) @pytest.mark.parametrize("radius", (3,)) -@pytest.mark.parametrize("decomposition", ['sequence']) -def test_octahedron_decomposition(cell3d_image, function, radius, - decomposition): +@pytest.mark.parametrize("decomposition", ["sequence"]) +def test_octahedron_decomposition( + cell3d_image, function, radius, decomposition +): """Validate footprint decomposition for various shapes. comparison is made to the case without decomposition. diff --git a/python/cucim/src/cucim/skimage/morphology/tests/test_isotropic.py b/python/cucim/src/cucim/skimage/morphology/tests/test_isotropic.py index d0ecdb219..cdaee5bb1 100644 --- a/python/cucim/src/cucim/skimage/morphology/tests/test_isotropic.py +++ b/python/cucim/src/cucim/skimage/morphology/tests/test_isotropic.py @@ -8,13 +8,14 @@ from cucim.skimage.util import img_as_bool img = color.rgb2gray(cp.asarray(data.astronaut())) -bw_img = img > 100 / 255. +bw_img = img > 100 / 255.0 def test_non_square_image(): isotropic_res = morphology.isotropic_erosion(bw_img[:100, :200], 3) - binary_res = img_as_bool(morphology.binary_erosion( - bw_img[:100, :200], morphology.disk(3))) + binary_res = img_as_bool( + morphology.binary_erosion(bw_img[:100, :200], morphology.disk(3)) + ) assert_array_equal(isotropic_res, binary_res) @@ -41,7 +42,7 @@ def _disk_with_spacing( if not strict_radius: radius += 0.5 - return cp.asarray((X ** 2 + Y ** 2) <= radius ** 2, dtype=dtype) + return cp.asarray((X**2 + Y**2) <= radius**2, dtype=dtype) def test_isotropic_erosion_spacing(): @@ -57,8 +58,8 @@ def test_isotropic_erosion_spacing(): def test_isotropic_dilation(): isotropic_res = morphology.isotropic_dilation(bw_img, 3) binary_res = img_as_bool( - morphology.binary_dilation( - bw_img, morphology.disk(3))) + morphology.binary_dilation(bw_img, morphology.disk(3)) + ) assert_array_equal(isotropic_res, binary_res) @@ -82,16 +83,18 @@ def test_footprint_overflow(): img = cp.zeros((20, 20), dtype=bool) img[2:19, 2:19] = True isotropic_res = morphology.isotropic_erosion(img, 9) - binary_res = img_as_bool( - morphology.binary_erosion(img, morphology.disk(9)) - ) + binary_res = img_as_bool(morphology.binary_erosion(img, morphology.disk(9))) assert_array_equal(isotropic_res, binary_res) -@pytest.mark.parametrize('out_dtype', [bool, cp.uint8, cp.int32]) +@pytest.mark.parametrize("out_dtype", [bool, cp.uint8, cp.int32]) def test_out_argument(out_dtype): - for func in (morphology.isotropic_erosion, morphology.isotropic_dilation, - morphology.isotropic_opening, morphology.isotropic_closing): + for func in ( + morphology.isotropic_erosion, + morphology.isotropic_dilation, + morphology.isotropic_opening, + morphology.isotropic_closing, + ): radius = 3 img = cp.ones((10, 10), dtype=bool) img[2:5, 2:5] = 0 diff --git a/python/cucim/src/cucim/skimage/morphology/tests/test_misc.py b/python/cucim/src/cucim/skimage/morphology/tests/test_misc.py index cc365f31f..11250e6f7 100755 --- a/python/cucim/src/cucim/skimage/morphology/tests/test_misc.py +++ b/python/cucim/src/cucim/skimage/morphology/tests/test_misc.py @@ -36,8 +36,11 @@ def test_two_connectivity(): def test_in_place(): image = test_image.copy() observed = remove_small_objects(image, min_size=6, out=image) - assert_equal(observed is image, True, - "remove_small_objects in_place argument failed.") + assert_equal( + observed is image, + True, + "remove_small_objects in_place argument failed.", + ) @pytest.mark.parametrize("in_dtype", [bool, int, cp.int32]) @@ -94,7 +97,7 @@ def test_single_label_warning(): [1, 1, 1, 0, 0], [1, 1, 1, 0, 0]], int) # fmt: on - with expected_warnings(['use a boolean array?']): + with expected_warnings(["use a boolean array?"]): remove_small_objects(image, min_size=6) @@ -148,16 +151,18 @@ def test_two_connectivity_holes(): [0, 0, 0, 0, 0, 0, 0, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 1, 1, 1]], bool) # fmt: on - observed = remove_small_holes(test_holes_image, area_threshold=3, - connectivity=2) + observed = remove_small_holes( + test_holes_image, area_threshold=3, connectivity=2 + ) assert_array_equal(observed, expected) def test_in_place_holes(): image = test_holes_image.copy() observed = remove_small_holes(image, area_threshold=3, out=image) - assert_equal(observed is image, True, - "remove_small_holes in_place argument failed.") + assert_equal( + observed is image, True, "remove_small_holes in_place argument failed." + ) def test_out_remove_small_holes(): @@ -195,7 +200,7 @@ def test_labeled_image_holes(): [0, 0, 0, 0, 0, 0, 0, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 1, 1, 1]], dtype=bool) # fmt: on - with expected_warnings(['returned as a boolean array']): + with expected_warnings(["returned as a boolean array"]): observed = remove_small_holes(labeled_holes_image, area_threshold=3) assert_array_equal(observed, expected) @@ -220,7 +225,7 @@ def test_uint_image_holes(): [0, 0, 0, 0, 0, 0, 0, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 1, 1, 1]], dtype=bool) # fmt: on - with expected_warnings(['returned as a boolean array']): + with expected_warnings(["returned as a boolean array"]): observed = remove_small_holes(labeled_holes_image, area_threshold=3) assert_array_equal(observed, expected) @@ -237,7 +242,7 @@ def test_label_warning_holes(): [0, 0, 0, 0, 0, 0, 0, 2, 2, 2]], dtype=int) # fmt: on - with expected_warnings(['use a boolean array?']): + with expected_warnings(["use a boolean array?"]): remove_small_holes(labeled_holes_image, area_threshold=3) remove_small_holes(labeled_holes_image.astype(bool), area_threshold=3) diff --git a/python/cucim/src/cucim/skimage/morphology/tests/test_reconstruction.py b/python/cucim/src/cucim/skimage/morphology/tests/test_reconstruction.py index 97f9259a7..14bec4402 100644 --- a/python/cucim/src/cucim/skimage/morphology/tests/test_reconstruction.py +++ b/python/cucim/src/cucim/skimage/morphology/tests/test_reconstruction.py @@ -20,13 +20,15 @@ def test_zeros(): """Test reconstruction with image and mask of zeros""" assert_array_almost_equal( - reconstruction(cp.zeros((5, 7)), cp.zeros((5, 7))), 0) + reconstruction(cp.zeros((5, 7)), cp.zeros((5, 7))), 0 + ) def test_image_equals_mask(): """Test reconstruction where the image and mask are the same""" assert_array_almost_equal( - reconstruction(cp.ones((7, 5)), cp.ones((7, 5))), 1) + reconstruction(cp.ones((7, 5)), cp.ones((7, 5))), 1 + ) def test_image_less_than_mask(): @@ -82,18 +84,16 @@ def test_fill_hole(): seed = cp.array([0, 8, 8, 8, 8, 8, 8, 8, 8, 0]) mask = cp.array([0, 3, 6, 2, 1, 1, 1, 4, 2, 0]) result = reconstruction(seed, mask, method="erosion") - assert_array_almost_equal( - result, cp.array([0, 3, 6, 4, 4, 4, 4, 4, 2, 0]) - ) + assert_array_almost_equal(result, cp.array([0, 3, 6, 4, 4, 4, 4, 4, 2, 0])) def test_invalid_seed(): seed = cp.ones((5, 5)) mask = cp.ones((5, 5)) with pytest.raises(ValueError): - reconstruction(seed * 2, mask, method='dilation') + reconstruction(seed * 2, mask, method="dilation") with pytest.raises(ValueError): - reconstruction(seed * 0.5, mask, method='erosion') + reconstruction(seed * 0.5, mask, method="erosion") def test_invalid_footprint(): @@ -131,8 +131,13 @@ def test_invalid_offset_not_none(): [1, 1, 1, 1, 1, 4, 4, 4]]) # fmt: on with pytest.raises(ValueError): - reconstruction(image, mask, method='dilation', - footprint=cp.ones((3, 3)), offset=cp.array([3, 0])) + reconstruction( + image, + mask, + method="dilation", + footprint=cp.ones((3, 3)), + offset=cp.array([3, 0]), + ) def test_offset_not_none(): @@ -142,8 +147,15 @@ def test_offset_not_none(): expected = cp.array([0, 3, 6, 6, 6, 6, 6, 4, 4, 0]) assert_array_almost_equal( - reconstruction(seed, mask, method='dilation', - footprint=cp.ones(3), offset=cp.array([0])), expected) + reconstruction( + seed, + mask, + method="dilation", + footprint=cp.ones(3), + offset=cp.array([0]), + ), + expected, + ) def test_reconstruction_float_inputs(): diff --git a/python/cucim/src/cucim/skimage/registration/_masked_phase_cross_correlation.py b/python/cucim/src/cucim/skimage/registration/_masked_phase_cross_correlation.py index 643e7076a..eb803879c 100644 --- a/python/cucim/src/cucim/skimage/registration/_masked_phase_cross_correlation.py +++ b/python/cucim/src/cucim/skimage/registration/_masked_phase_cross_correlation.py @@ -19,9 +19,13 @@ from .._shared.utils import _supported_float_type -def _masked_phase_cross_correlation(reference_image, moving_image, - reference_mask, moving_mask=None, - overlap_ratio=0.3): +def _masked_phase_cross_correlation( + reference_image, + moving_image, + reference_mask, + moving_mask=None, + overlap_ratio=0.3, +): """Masked image translation registration by masked normalized cross-correlation. @@ -68,21 +72,29 @@ def _masked_phase_cross_correlation(reference_image, moving_image, if reference_image.shape != moving_image.shape: raise ValueError( "Input images have different shapes, moving_mask must " - "be explicitly set.") + "be explicitly set." + ) moving_mask = cp.array(reference_mask, dtype=bool, copy=True) # We need masks to be of the same size as their respective images - for (im, mask) in [(reference_image, reference_mask), - (moving_image, moving_mask)]: + for im, mask in [ + (reference_image, reference_mask), + (moving_image, moving_mask), + ]: if im.shape != mask.shape: raise ValueError( - "Image sizes must match their respective mask sizes.") - - xcorr = cross_correlate_masked(moving_image, reference_image, - moving_mask, reference_mask, - axes=tuple(range(moving_image.ndim)), - mode='full', - overlap_ratio=overlap_ratio) + "Image sizes must match their respective mask sizes." + ) + + xcorr = cross_correlate_masked( + moving_image, + reference_image, + moving_mask, + reference_mask, + axes=tuple(range(moving_image.ndim)), + mode="full", + overlap_ratio=overlap_ratio, + ) # Generalize to the average of multiple equal maxima maxima = cp.stack(cp.nonzero(xcorr == xcorr.max()), axis=1) @@ -99,8 +111,9 @@ def _masked_phase_cross_correlation(reference_image, moving_image, return -shifts + (size_mismatch / 2) -def cross_correlate_masked(arr1, arr2, m1, m2, mode='full', axes=(-2, -1), - overlap_ratio=0.3): +def cross_correlate_masked( + arr1, arr2, m1, m2, mode="full", axes=(-2, -1), overlap_ratio=0.3 +): """ Masked normalized cross-correlation between arrays. @@ -154,15 +167,13 @@ def cross_correlate_masked(arr1, arr2, m1, m2, mode='full', axes=(-2, -1), Pattern Recognition, pp. 2918-2925 (2010). :DOI:`10.1109/CVPR.2010.5540032` """ - if mode not in {'full', 'same'}: + if mode not in {"full", "same"}: raise ValueError(f"Correlation mode '{mode}' is not valid.") fixed_image = arr1 moving_image = arr2 - float_dtype = _supported_float_type( - [fixed_image.dtype, moving_image.dtype] - ) - if float_dtype.kind == 'c': + float_dtype = _supported_float_type([fixed_image.dtype, moving_image.dtype]) + if float_dtype.kind == "c": raise ValueError("complex-valued arr1, arr2 are not supported") fixed_image = fixed_image.astype(float_dtype, copy=False) @@ -173,11 +184,12 @@ def cross_correlate_masked(arr1, arr2, m1, m2, mode='full', axes=(-2, -1), # Array dimensions along non-transformation axes should be equal. all_axes = set(range(fixed_image.ndim)) - for axis in (all_axes - set(axes)): + for axis in all_axes - set(axes): if fixed_image.shape[axis] != moving_image.shape[axis]: raise ValueError( - f'Array shapes along non-transformation axes should be ' - f'equal, but dimensions along axis {axis} are not.') + f"Array shapes along non-transformation axes should be " + f"equal, but dimensions along axis {axis} are not." + ) # Determine final size along transformation axes # Note that it might be faster to compute Fourier transform in a slightly @@ -185,8 +197,9 @@ def cross_correlate_masked(arr1, arr2, m1, m2, mode='full', axes=(-2, -1), # we slice back to`final_shape` using `final_slice`. final_shape = list(arr1.shape) for axis in axes: - final_shape[axis] = fixed_image.shape[axis] + \ - moving_image.shape[axis] - 1 + final_shape[axis] = ( + fixed_image.shape[axis] + moving_image.shape[axis] - 1 + ) final_shape = tuple(final_shape) final_slice = tuple([slice(0, int(sz)) for sz in final_shape]) @@ -227,23 +240,30 @@ def ifft(x): number_overlap_masked_px[:] = cp.fmax(number_overlap_masked_px, eps) masked_correlated_fixed_fft = ifft(rotated_moving_mask_fft * fixed_fft) masked_correlated_rotated_moving_fft = ifft( - fixed_mask_fft * rotated_moving_fft) + fixed_mask_fft * rotated_moving_fft + ) numerator = ifft(rotated_moving_fft * fixed_fft) - numerator -= masked_correlated_fixed_fft * \ - masked_correlated_rotated_moving_fft / number_overlap_masked_px + numerator -= ( + masked_correlated_fixed_fft + * masked_correlated_rotated_moving_fft + / number_overlap_masked_px + ) fixed_squared_fft = fft(cp.square(fixed_image)) fixed_denom = ifft(rotated_moving_mask_fft * fixed_squared_fft) - fixed_denom -= cp.square(masked_correlated_fixed_fft) / \ - number_overlap_masked_px + fixed_denom -= ( + cp.square(masked_correlated_fixed_fft) / number_overlap_masked_px + ) fixed_denom[:] = cp.fmax(fixed_denom, 0.0) rotated_moving_squared_fft = fft(cp.square(rotated_moving_image)) moving_denom = ifft(fixed_mask_fft * rotated_moving_squared_fft) - moving_denom -= cp.square(masked_correlated_rotated_moving_fft) / \ - number_overlap_masked_px + moving_denom -= ( + cp.square(masked_correlated_rotated_moving_fft) + / number_overlap_masked_px + ) moving_denom[:] = cp.fmax(moving_denom, 0.0) @@ -254,9 +274,8 @@ def ifft(x): denom = denom[final_slice] number_overlap_masked_px = number_overlap_masked_px[final_slice] - if mode == 'same': - _centering = partial(_centered, - newshape=fixed_image.shape, axes=axes) + if mode == "same": + _centering = partial(_centered, newshape=fixed_image.shape, axes=axes) denom = _centering(denom) numerator = _centering(numerator) number_overlap_masked_px = _centering(number_overlap_masked_px) @@ -272,8 +291,9 @@ def ifft(x): cp.clip(out, a_min=-1, a_max=1, out=out) # Apply overlap ratio threshold - number_px_threshold = overlap_ratio * np.max(number_overlap_masked_px, - axis=axes, keepdims=True) + number_px_threshold = overlap_ratio * np.max( + number_overlap_masked_px, axis=axes, keepdims=True + ) out[number_overlap_masked_px < number_px_threshold] = 0.0 return out diff --git a/python/cucim/src/cucim/skimage/registration/_optical_flow.py b/python/cucim/src/cucim/skimage/registration/_optical_flow.py index bc8d0bf2a..3feb28f7a 100644 --- a/python/cucim/src/cucim/skimage/registration/_optical_flow.py +++ b/python/cucim/src/cucim/skimage/registration/_optical_flow.py @@ -15,8 +15,17 @@ from ._optical_flow_utils import coarse_to_fine, get_warp_points -def _tvl1(reference_image, moving_image, flow0, attachment, tightness, - num_warp, num_iter, tol, prefilter): +def _tvl1( + reference_image, + moving_image, + flow0, + attachment, + tightness, + num_warp, + num_iter, + tol, + prefilter, +): """TV-L1 solver for optical flow estimation. Parameters @@ -53,9 +62,11 @@ def _tvl1(reference_image, moving_image, flow0, attachment, tightness, """ dtype = reference_image.dtype - grid = cp.meshgrid(*[cp.arange(n, dtype=dtype) - for n in reference_image.shape], - indexing='ij', sparse=True) + grid = cp.meshgrid( + *[cp.arange(n, dtype=dtype) for n in reference_image.shape], + indexing="ij", + sparse=True, + ) dt = 0.5 / reference_image.ndim reg_num_iter = 2 @@ -66,8 +77,14 @@ def _tvl1(reference_image, moving_image, flow0, attachment, tightness, flow_current = flow_previous = flow0 g = cp.zeros((reference_image.ndim,) + reference_image.shape, dtype=dtype) - proj = cp.zeros((reference_image.ndim, reference_image.ndim,) - + reference_image.shape, dtype=dtype) + proj = cp.zeros( + ( + reference_image.ndim, + reference_image.ndim, + ) + + reference_image.shape, + dtype=dtype, + ) s_g = [slice(None)] * g.ndim s_p = [slice(None)] * proj.ndim @@ -75,11 +92,13 @@ def _tvl1(reference_image, moving_image, flow0, attachment, tightness, for _ in range(num_warp): if prefilter: - flow_current = ndi.median_filter(flow_current, - [1] + reference_image.ndim * [3]) + flow_current = ndi.median_filter( + flow_current, [1] + reference_image.ndim * [3] + ) - image1_warp = warp(moving_image, get_warp_points(grid, flow_current), - mode='edge') + image1_warp = warp( + moving_image, get_warp_points(grid, flow_current), mode="edge" + ) # output_as_array=True stacks the gradients along the first axis grad = gradient(image1_warp, output_as_array=True) NI = (grad * grad).sum(0) @@ -88,7 +107,6 @@ def _tvl1(reference_image, moving_image, flow0, attachment, tightness, rho_0 = image1_warp - reference_image - (grad * flow_current).sum(0) for _ in range(num_iter): - # Data term rho = rho_0 + (grad * flow_current).sum(0) @@ -142,10 +160,18 @@ def _tvl1(reference_image, moving_image, flow0, attachment, tightness, return flow_current -def optical_flow_tvl1(reference_image, moving_image, - *, - attachment=15, tightness=0.3, num_warp=5, num_iter=10, - tol=1e-4, prefilter=False, dtype=cp.float32): +def optical_flow_tvl1( + reference_image, + moving_image, + *, + attachment=15, + tightness=0.3, + num_warp=5, + num_iter=10, + tol=1e-4, + prefilter=False, + dtype=cp.float32, +): r"""Coarse to fine optical flow estimator. The TV-L1 solver is applied at each level of the image @@ -220,9 +246,15 @@ def optical_flow_tvl1(reference_image, moving_image, """ - solver = partial(_tvl1, attachment=attachment, - tightness=tightness, num_warp=num_warp, num_iter=num_iter, - tol=tol, prefilter=prefilter) + solver = partial( + _tvl1, + attachment=attachment, + tightness=tightness, + num_warp=num_warp, + num_iter=num_iter, + tol=tol, + prefilter=prefilter, + ) if cp.dtype(dtype) != _supported_float_type(dtype): msg = f"dtype={dtype} is not supported. Try 'float32' or 'float64.'" @@ -231,8 +263,9 @@ def optical_flow_tvl1(reference_image, moving_image, return coarse_to_fine(reference_image, moving_image, solver, dtype=dtype) -def _ilk(reference_image, moving_image, flow0, radius, num_warp, gaussian, - prefilter): +def _ilk( + reference_image, moving_image, flow0, radius, num_warp, gaussian, prefilter +): """Iterative Lucas-Kanade (iLK) solver for optical flow estimation. Parameters @@ -268,10 +301,11 @@ def _ilk(reference_image, moving_image, flow0, radius, num_warp, gaussian, if gaussian: sigma = ndim * (size / 4,) - filter_func = partial(gaussian_filter, sigma=sigma, mode='mirror') + filter_func = partial(gaussian_filter, sigma=sigma, mode="mirror") else: - filter_func = partial(ndi.uniform_filter, size=ndim * (size,), - mode='mirror') + filter_func = partial( + ndi.uniform_filter, size=ndim * (size,), mode="mirror" + ) flow = flow0 # For each pixel location (i, j), the optical flow X = flow[:, i, j] @@ -280,20 +314,24 @@ def _ilk(reference_image, moving_image, flow0, radius, num_warp, gaussian, A = cp.zeros(reference_image.shape + (ndim, ndim), dtype=dtype) b = cp.zeros(reference_image.shape + (ndim,), dtype=dtype) - grid = cp.meshgrid(*[cp.arange(n, dtype=dtype) - for n in reference_image.shape], - indexing='ij', sparse=True) + grid = cp.meshgrid( + *[cp.arange(n, dtype=dtype) for n in reference_image.shape], + indexing="ij", + sparse=True, + ) for _ in range(num_warp): if prefilter: flow = ndi.median_filter(flow, (1,) + ndim * (3,)) - moving_image_warp = warp(moving_image, get_warp_points(grid, flow), - mode='edge') + moving_image_warp = warp( + moving_image, get_warp_points(grid, flow), mode="edge" + ) # output_as_array=True stacks the gradients along the first axis grad = gradient(moving_image_warp, output_as_array=True) - error_image = ((grad * flow).sum(axis=0) - + reference_image - moving_image_warp) + error_image = ( + (grad * flow).sum(axis=0) + reference_image - moving_image_warp + ) # Local linear systems creation for i, j in combinations_with_replacement(range(ndim), 2): @@ -313,9 +351,16 @@ def _ilk(reference_image, moving_image, flow0, radius, num_warp, gaussian, return flow -def optical_flow_ilk(reference_image, moving_image, *, - radius=7, num_warp=10, gaussian=False, - prefilter=False, dtype=cp.float32): +def optical_flow_ilk( + reference_image, + moving_image, + *, + radius=7, + num_warp=10, + gaussian=False, + prefilter=False, + dtype=cp.float32, +): """Coarse to fine optical flow estimator. The iterative Lucas-Kanade (iLK) solver is applied at each level @@ -381,8 +426,13 @@ def optical_flow_ilk(reference_image, moving_image, *, """ - solver = partial(_ilk, radius=radius, num_warp=num_warp, gaussian=gaussian, - prefilter=prefilter) + solver = partial( + _ilk, + radius=radius, + num_warp=num_warp, + gaussian=gaussian, + prefilter=prefilter, + ) if cp.dtype(dtype) != _supported_float_type(dtype): msg = f"dtype={dtype} is not supported. Try 'float32' or 'float64.'" diff --git a/python/cucim/src/cucim/skimage/registration/_optical_flow_utils.py b/python/cucim/src/cucim/skimage/registration/_optical_flow_utils.py index e6ffa08ef..2f21d91ea 100644 --- a/python/cucim/src/cucim/skimage/registration/_optical_flow_utils.py +++ b/python/cucim/src/cucim/skimage/registration/_optical_flow_utils.py @@ -59,8 +59,9 @@ def resize_flow(flow, shape): for _ in shape: scale_factor = scale_factor[..., cp.newaxis] - rflow = scale_factor * ndi.zoom(flow, [1] + scale, order=0, - mode='nearest', prefilter=False) + rflow = scale_factor * ndi.zoom( + flow, [1] + scale, order=0, mode="nearest", prefilter=False + ) return rflow @@ -99,8 +100,9 @@ def get_pyramid(I, downscale=2.0, nlevel=10, min_size=16): # noqa return pyramid[::-1] -def coarse_to_fine(I0, I1, solver, downscale=2, nlevel=10, min_size=16, - dtype=np.float32): +def coarse_to_fine( + I0, I1, solver, downscale=2, nlevel=10, min_size=16, dtype=np.float32 +): """Generic coarse to fine solver. Parameters @@ -130,18 +132,20 @@ def coarse_to_fine(I0, I1, solver, downscale=2, nlevel=10, min_size=16, if I0.shape != I1.shape: raise ValueError("Input images should have the same shape") - if np.dtype(dtype).char not in 'efdg': - raise ValueError("Only floating point data type are valid" - " for optical flow") + if np.dtype(dtype).char not in "efdg": + raise ValueError( + "Only floating point data type are valid" " for optical flow" + ) - pyramid = list(zip(get_pyramid(_convert(I0, dtype), - downscale, nlevel, min_size), - get_pyramid(_convert(I1, dtype), - downscale, nlevel, min_size))) + pyramid = list( + zip( + get_pyramid(_convert(I0, dtype), downscale, nlevel, min_size), + get_pyramid(_convert(I1, dtype), downscale, nlevel, min_size), + ) + ) # Initialization to 0 at coarsest level. - flow = cp.zeros((pyramid[0][0].ndim, ) + pyramid[0][0].shape, - dtype=dtype) + flow = cp.zeros((pyramid[0][0].ndim,) + pyramid[0][0].shape, dtype=dtype) flow = solver(pyramid[0][0], pyramid[0][1], flow) diff --git a/python/cucim/src/cucim/skimage/registration/_phase_cross_correlation.py b/python/cucim/src/cucim/skimage/registration/_phase_cross_correlation.py index d38126cb8..4fcc497a0 100644 --- a/python/cucim/src/cucim/skimage/registration/_phase_cross_correlation.py +++ b/python/cucim/src/cucim/skimage/registration/_phase_cross_correlation.py @@ -15,8 +15,9 @@ from ._masked_phase_cross_correlation import _masked_phase_cross_correlation -def _upsampled_dft(data, upsampled_region_size, - upsample_factor=1, axis_offsets=None): +def _upsampled_dft( + data, upsampled_region_size, upsample_factor=1, axis_offsets=None +): """ Upsampled DFT by matrix multiplication. @@ -57,23 +58,28 @@ def _upsampled_dft(data, upsampled_region_size, upsampled_region_size = [upsampled_region_size] * data.ndim else: if len(upsampled_region_size) != data.ndim: - raise ValueError("shape of upsampled region sizes must be equal " - "to input data's number of dimensions.") + raise ValueError( + "shape of upsampled region sizes must be equal " + "to input data's number of dimensions." + ) if axis_offsets is None: axis_offsets = [0] * data.ndim else: if len(axis_offsets) != data.ndim: - raise ValueError("number of axis offsets must be equal to input " - "data's number of dimensions.") + raise ValueError( + "number of axis offsets must be equal to input " + "data's number of dimensions." + ) im2pi = 1j * 2 * np.pi dim_properties = list(zip(data.shape, upsampled_region_size, axis_offsets)) - for (n_items, ups_size, ax_offset) in dim_properties[::-1]: - kernel = ((cp.arange(ups_size) - ax_offset)[:, None] - * fft.fftfreq(n_items, upsample_factor)) + for n_items, ups_size, ax_offset in dim_properties[::-1]: + kernel = (cp.arange(ups_size) - ax_offset)[:, None] * fft.fftfreq( + n_items, upsample_factor + ) kernel = cp.exp(-im2pi * kernel) # CuPy Backend: use kernel of same precision as the data kernel = kernel.astype(data.dtype, copy=False) @@ -110,8 +116,9 @@ def _compute_error(cross_correlation_max, src_amp, target_amp): target_amp : float The normalized average image intensity of the target image """ - error = 1.0 - cross_correlation_max * cross_correlation_max.conj() /\ - (src_amp * target_amp) + error = 1.0 - cross_correlation_max * cross_correlation_max.conj() / ( + src_amp * target_amp + ) return cp.sqrt(cp.abs(error)) @@ -150,12 +157,11 @@ def _disambiguate_shift(reference_image, moving_image, shift): """ shape = reference_image.shape positive_shift = [shift_i % s for shift_i, s in zip(shift, shape)] - negative_shift = [shift_i - s - for shift_i, s in zip(positive_shift, shape)] + negative_shift = [shift_i - s for shift_i, s in zip(positive_shift, shape)] subpixel = any(s % 1 != 0 for s in shift) interp_order = 3 if subpixel else 0 shifted = ndi.shift( - moving_image, shift, mode='grid-wrap', order=interp_order + moving_image, shift, mode="grid-wrap", order=interp_order ) indices = tuple(round(s) for s in positive_shift) splits_per_dim = [(slice(0, i), slice(i, None)) for i in indices] @@ -182,12 +188,19 @@ def _disambiguate_shift(reference_image, moving_image, shift): return real_shift -def phase_cross_correlation(reference_image, moving_image, *, - upsample_factor=1, space="real", - disambiguate=False, - return_error=True, reference_mask=None, - moving_mask=None, overlap_ratio=0.3, - normalization="phase"): +def phase_cross_correlation( + reference_image, + moving_image, + *, + upsample_factor=1, + space="real", + disambiguate=False, + return_error=True, + reference_mask=None, + moving_mask=None, + overlap_ratio=0.3, + normalization="phase", +): """Efficient subpixel image translation registration by cross-correlation. This code gives the same precision as the FFT upsampled cross-correlation @@ -299,6 +312,7 @@ def phase_cross_correlation(reference_image, moving_image, *, Pattern Recognition, pp. 2918-2925 (2010). :DOI:`10.1109/CVPR.2010.5540032` """ + def warn_return_error(): warnings.warn( "In scikit-image 0.22, phase_cross_correlation will start " @@ -310,9 +324,13 @@ def warn_return_error(): ) if (reference_mask is not None) or (moving_mask is not None): - shift = _masked_phase_cross_correlation(reference_image, moving_image, - reference_mask, moving_mask, - overlap_ratio) + shift = _masked_phase_cross_correlation( + reference_image, + moving_image, + reference_mask, + moving_mask, + overlap_ratio, + ) if return_error == "always": return shift, np.nan, np.nan else: @@ -324,11 +342,11 @@ def warn_return_error(): raise ValueError("images must be same shape") # assume complex data is already in Fourier space - if space.lower() == 'fourier': + if space.lower() == "fourier": src_freq = reference_image target_freq = moving_image # real data needs to be fft'd. - elif space.lower() == 'real': + elif space.lower() == "real": src_freq = fft.fftn(reference_image) target_freq = fft.fftn(moving_image) else: @@ -349,8 +367,10 @@ def warn_return_error(): int(cp.argmax(cp.abs(cross_correlation))), cross_correlation.shape ) midpoint = tuple(float(axis_size // 2) for axis_size in shape) - shift = tuple(_max - axis_size if _max > mid else _max - for _max, mid, axis_size in zip(maxima, midpoint, shape)) + shift = tuple( + _max - axis_size if _max > mid else _max + for _max, mid, axis_size in zip(maxima, midpoint, shape) + ) if upsample_factor == 1: if return_error: @@ -366,8 +386,9 @@ def warn_return_error(): # Initial shift estimate in upsampled grid # shift = cp.around(shift * upsample_factor) / upsample_factor upsample_factor = float(upsample_factor) - shift = tuple(round(s * upsample_factor) / upsample_factor - for s in shift) + shift = tuple( + round(s * upsample_factor) / upsample_factor for s in shift + ) upsampled_region_size = math.ceil(upsample_factor * 1.5) # Center of output array at dftshift + 1 dftshift = float(upsampled_region_size // 2) @@ -375,14 +396,17 @@ def warn_return_error(): sample_region_offset = tuple( dftshift - s * upsample_factor for s in shift ) - cross_correlation = _upsampled_dft(image_product.conj(), - upsampled_region_size, - upsample_factor, - sample_region_offset).conj() + cross_correlation = _upsampled_dft( + image_product.conj(), + upsampled_region_size, + upsample_factor, + sample_region_offset, + ).conj() # Locate maximum and map back to original pixel grid - maxima = np.unravel_index(int(cp.argmax(cp.abs(cross_correlation))), - cross_correlation.shape) + maxima = np.unravel_index( + int(cp.argmax(cp.abs(cross_correlation))), cross_correlation.shape + ) CCmax = cross_correlation[maxima] maxima = tuple(float(m) - dftshift for m in maxima) @@ -403,7 +427,7 @@ def warn_return_error(): ) if disambiguate: - if space.lower() != 'real': + if space.lower() != "real": reference_image = fft.ifftn(reference_image) moving_image = fft.ifftn(moving_image) shift = _disambiguate_shift(reference_image, moving_image, shift) @@ -417,9 +441,13 @@ def warn_return_error(): "keywords, eg: " "phase_cross_correlation(reference_image, moving_image, " "reference_mask=~np.isnan(reference_image), " - "moving_mask=~np.isnan(moving_image))") + "moving_mask=~np.isnan(moving_image))" + ) - return shift, _compute_error(CCmax, src_amp, target_amp),\ - _compute_phasediff(CCmax) + return ( + shift, + _compute_error(CCmax, src_amp, target_amp), + _compute_phasediff(CCmax), + ) else: return shift diff --git a/python/cucim/src/cucim/skimage/registration/tests/test_ilk.py b/python/cucim/src/cucim/skimage/registration/tests/test_ilk.py index 06550469a..d7876452c 100644 --- a/python/cucim/src/cucim/skimage/registration/tests/test_ilk.py +++ b/python/cucim/src/cucim/skimage/registration/tests/test_ilk.py @@ -7,9 +7,9 @@ from cucim.skimage.registration import optical_flow_ilk -@pytest.mark.parametrize('dtype', [cp.float16, cp.float32, cp.float64]) -@pytest.mark.parametrize('gaussian', [True, False]) -@pytest.mark.parametrize('prefilter', [True, False]) +@pytest.mark.parametrize("dtype", [cp.float16, cp.float32, cp.float64]) +@pytest.mark.parametrize("gaussian", [True, False]) +@pytest.mark.parametrize("prefilter", [True, False]) def test_2d_motion(dtype, gaussian, prefilter): # Generate synthetic data rnd = np.random.default_rng(0) @@ -19,21 +19,30 @@ def test_2d_motion(dtype, gaussian, prefilter): image1 = image1.astype(dtype, copy=False) float_dtype = _supported_float_type(dtype) # Estimate the flow - flow = optical_flow_ilk(image0, image1, - gaussian=gaussian, prefilter=prefilter, - dtype=float_dtype) + flow = optical_flow_ilk( + image0, + image1, + gaussian=gaussian, + prefilter=prefilter, + dtype=float_dtype, + ) assert flow.dtype == float_dtype # Assert that the average absolute error is less then half a pixel assert abs(flow - gt_flow).mean() < 0.5 if dtype != float_dtype: with pytest.raises(ValueError): - optical_flow_ilk(image0, image1, gaussian=gaussian, - prefilter=prefilter, dtype=dtype) + optical_flow_ilk( + image0, + image1, + gaussian=gaussian, + prefilter=prefilter, + dtype=dtype, + ) -@pytest.mark.parametrize('gaussian', [True, False]) -@pytest.mark.parametrize('prefilter', [True, False]) +@pytest.mark.parametrize("gaussian", [True, False]) +@pytest.mark.parametrize("prefilter", [True, False]) def test_3d_motion(gaussian, prefilter): # Generate synthetic data rnd = np.random.default_rng(123) @@ -41,8 +50,9 @@ def test_3d_motion(gaussian, prefilter): image0 = cp.asarray(image0) gt_flow, image1 = _sin_flow_gen(image0, npics=3) # Estimate the flow - flow = optical_flow_ilk(image0, image1, radius=5, - gaussian=gaussian, prefilter=prefilter) + flow = optical_flow_ilk( + image0, image1, radius=5, gaussian=gaussian, prefilter=prefilter + ) # Assert that the average absolute error is less then half a pixel assert abs(flow - gt_flow).mean() < 0.5 @@ -75,14 +85,14 @@ def test_optical_flow_dtype(): image0 = cp.asarray(image0) gt_flow, image1 = _sin_flow_gen(image0) # Estimate the flow at double precision - flow_f64 = optical_flow_ilk(image0, image1, dtype='float64') + flow_f64 = optical_flow_ilk(image0, image1, dtype="float64") - assert flow_f64.dtype == 'float64' + assert flow_f64.dtype == "float64" # Estimate the flow at single precision - flow_f32 = optical_flow_ilk(image0, image1, dtype='float32') + flow_f32 = optical_flow_ilk(image0, image1, dtype="float32") - assert flow_f32.dtype == 'float32' + assert flow_f32.dtype == "float32" # Assert that floating point precision does not affect the quality # of the estimated flow @@ -102,4 +112,4 @@ def test_wrong_dtype(): rnd = np.random.default_rng(0) img = rnd.normal(size=(256, 256)) with pytest.raises(ValueError): - u, v = optical_flow_ilk(img, img, dtype='int') + u, v = optical_flow_ilk(img, img, dtype="int") diff --git a/python/cucim/src/cucim/skimage/registration/tests/test_masked_phase_cross_correlation.py b/python/cucim/src/cucim/skimage/registration/tests/test_masked_phase_cross_correlation.py index 404f64132..e83c638fa 100644 --- a/python/cucim/src/cucim/skimage/registration/tests/test_masked_phase_cross_correlation.py +++ b/python/cucim/src/cucim/skimage/registration/tests/test_masked_phase_cross_correlation.py @@ -1,8 +1,7 @@ import cupy as cp import numpy as np import pytest -from cupyx.scipy.ndimage import fourier_shift -from cupyx.scipy.ndimage import shift as real_shift +from cupyx.scipy.ndimage import fourier_shift, shift as real_shift from numpy.testing import assert_almost_equal from skimage.data import camera from skimage.io import imread @@ -10,12 +9,13 @@ from cucim.skimage._shared.fft import fftmodule as fft from cucim.skimage._shared.testing import fetch from cucim.skimage._shared.utils import _supported_float_type -from cucim.skimage.registration._masked_phase_cross_correlation import \ - _masked_phase_cross_correlation as masked_register_translation -from cucim.skimage.registration._masked_phase_cross_correlation import \ - cross_correlate_masked -from cucim.skimage.registration._phase_cross_correlation import \ - phase_cross_correlation +from cucim.skimage.registration._masked_phase_cross_correlation import ( + _masked_phase_cross_correlation as masked_register_translation, + cross_correlate_masked, +) +from cucim.skimage.registration._phase_cross_correlation import ( + phase_cross_correlation, +) def test_masked_registration_vs_phase_cross_correlation(): @@ -23,15 +23,18 @@ def test_masked_registration_vs_phase_cross_correlation(): phase_cross_correlation in the case of trivial masks.""" reference_image = cp.array(camera()) shift = (-7, 12) - shifted = cp.real(fft.ifft2(fourier_shift( - fft.fft2(reference_image), shift))) + shifted = cp.real( + fft.ifft2(fourier_shift(fft.fft2(reference_image), shift)) + ) trivial_mask = cp.ones_like(reference_image) nonmasked_result, *_ = phase_cross_correlation(reference_image, shifted) - masked_result = masked_register_translation(reference_image, - shifted, - reference_mask=trivial_mask, - overlap_ratio=1 / 10) + masked_result = masked_register_translation( + reference_image, + shifted, + reference_mask=trivial_mask, + overlap_ratio=1 / 10, + ) cp.testing.assert_array_equal(nonmasked_result, masked_result) @@ -44,22 +47,27 @@ def test_masked_registration_random_masks(): reference_image = cp.array(camera()) shift = (-7, 12) - shifted = cp.real(fft.ifft2(fourier_shift( - fft.fft2(reference_image), shift))) + shifted = cp.real( + fft.ifft2(fourier_shift(fft.fft2(reference_image), shift)) + ) # Random masks with 75% of pixels being valid ref_mask = np.random.choice( - [True, False], reference_image.shape, p=[3 / 4, 1 / 4]) + [True, False], reference_image.shape, p=[3 / 4, 1 / 4] + ) shifted_mask = np.random.choice( - [True, False], shifted.shape, p=[3 / 4, 1 / 4]) + [True, False], shifted.shape, p=[3 / 4, 1 / 4] + ) ref_mask = cp.asarray(ref_mask) shifted_mask = cp.asarray(shifted_mask) - measured_shift = masked_register_translation(reference_image, - shifted, - reference_mask=ref_mask, - moving_mask=shifted_mask) + measured_shift = masked_register_translation( + reference_image, + shifted, + reference_mask=ref_mask, + moving_mask=shifted_mask, + ) cp.testing.assert_array_equal(measured_shift, -cp.asarray(shift)) @@ -67,7 +75,7 @@ def test_masked_registration_random_masks(): def test_masked_registration_3d_contiguous_mask(): """masked_register_translation should be able to register translations between volumes with contiguous masks.""" - brain = pytest.importorskip('skimage.data.brain') + brain = pytest.importorskip("skimage.data.brain") ref_vol = cp.array(brain()[:, ::2, ::2]) offset = (1, -5, 10) @@ -93,17 +101,20 @@ def test_masked_registration_random_masks_non_equal_sizes(): reference_image = cp.array(camera()) shift = (-7, 12) - shifted = cp.real(fft.ifft2(fourier_shift( - fft.fft2(reference_image), shift))) + shifted = cp.real( + fft.ifft2(fourier_shift(fft.fft2(reference_image), shift)) + ) # Crop the shifted image shifted = shifted[64:-64, 64:-64] # Random masks with 75% of pixels being valid ref_mask = np.random.choice( - [True, False], reference_image.shape, p=[3 / 4, 1 / 4]) + [True, False], reference_image.shape, p=[3 / 4, 1 / 4] + ) shifted_mask = np.random.choice( - [True, False], shifted.shape, p=[3 / 4, 1 / 4]) + [True, False], shifted.shape, p=[3 / 4, 1 / 4] + ) reference_image = cp.asarray(reference_image) shifted = cp.asarray(shifted) @@ -111,7 +122,8 @@ def test_masked_registration_random_masks_non_equal_sizes(): reference_image, shifted, reference_mask=cp.ones_like(ref_mask), - moving_mask=cp.ones_like(shifted_mask)) + moving_mask=cp.ones_like(shifted_mask), + ) cp.testing.assert_array_equal(measured_shift, -cp.asarray(shift)) @@ -124,29 +136,44 @@ def test_masked_registration_padfield_data(): shifts = [(75, 75), (-130, 130), (130, 130)] for xi, yi in shifts: - - fixed_image = cp.array(imread( - fetch('registration/tests/data/OriginalX{:d}Y{:d}.png' - ''.format(xi, yi)))) - moving_image = cp.array(imread( - fetch('registration/tests/data/TransformedX{:d}Y{:d}.png' - ''.format(xi, yi)))) + fixed_image = cp.array( + imread( + fetch( + "registration/tests/data/OriginalX{:d}Y{:d}.png" + "".format(xi, yi) + ) + ) + ) + moving_image = cp.array( + imread( + fetch( + "registration/tests/data/TransformedX{:d}Y{:d}.png" + "".format(xi, yi) + ) + ) + ) # Valid pixels are 1 fixed_mask = fixed_image != 0 moving_mask = moving_image != 0 # Note that shifts in x and y and shifts in cols and rows - shift_y, shift_x = cp.asnumpy(masked_register_translation( - fixed_image, moving_image, reference_mask=fixed_mask, - moving_mask=moving_mask, overlap_ratio=0.1)) + shift_y, shift_x = cp.asnumpy( + masked_register_translation( + fixed_image, + moving_image, + reference_mask=fixed_mask, + moving_mask=moving_mask, + overlap_ratio=0.1, + ) + ) # Note: by looking at the test code from Padfield's # MaskedFFTRegistrationCode repository, the # shifts were not xi and yi, but xi and -yi np.testing.assert_array_equal((shift_x, shift_y), (-xi, yi)) -@pytest.mark.parametrize('dtype', [cp.float16, cp.float32, cp.float64]) +@pytest.mark.parametrize("dtype", [cp.float16, cp.float32, cp.float64]) def test_cross_correlate_masked_output_shape(dtype): """Masked normalized cross-correlation should return a shape of N + M + 1 for each transform axis.""" @@ -164,13 +191,15 @@ def test_cross_correlate_masked_output_shape(dtype): float_dtype = _supported_float_type(dtype) full_xcorr = cross_correlate_masked( - arr1, arr2, m1, m2, axes=(0, 1, 2), mode='full') + arr1, arr2, m1, m2, axes=(0, 1, 2), mode="full" + ) assert full_xcorr.dtype.kind != "c" # grlee77: output should be real assert full_xcorr.shape == expected_full_shape assert full_xcorr.dtype == float_dtype same_xcorr = cross_correlate_masked( - arr1, arr2, m1, m2, axes=(0, 1, 2), mode='same') + arr1, arr2, m1, m2, axes=(0, 1, 2), mode="same" + ) assert same_xcorr.shape == expected_same_shape assert same_xcorr.dtype == float_dtype @@ -269,15 +298,18 @@ def test_cross_correlate_masked_over_axes(): # Loop over last axis with_loop = cp.empty_like(arr1, dtype=np.complex128) for index in range(arr1.shape[-1]): - with_loop[:, :, index] = cross_correlate_masked(arr1[:, :, index], - arr2[:, :, index], - m1[:, :, index], - m2[:, :, index], - axes=(0, 1), - mode='same') + with_loop[:, :, index] = cross_correlate_masked( + arr1[:, :, index], + arr2[:, :, index], + m1[:, :, index], + m2[:, :, index], + axes=(0, 1), + mode="same", + ) over_axes = cross_correlate_masked( - arr1, arr2, m1, m2, axes=(0, 1), mode='same') + arr1, arr2, m1, m2, axes=(0, 1), mode="same" + ) cp.testing.assert_array_almost_equal(with_loop, over_axes) @@ -296,8 +328,9 @@ def test_cross_correlate_masked_autocorrelation_trivial_masks(): m1 = cp.asarray(m1) m2 = cp.asarray(m2) - xcorr = cross_correlate_masked(arr1, arr1, m1, m2, axes=(0, 1), - mode='same', overlap_ratio=0).real + xcorr = cross_correlate_masked( + arr1, arr1, m1, m2, axes=(0, 1), mode="same", overlap_ratio=0 + ).real max_index = cp.unravel_index(cp.argmax(xcorr), xcorr.shape) max_index = tuple(map(int, max_index)) diff --git a/python/cucim/src/cucim/skimage/registration/tests/test_phase_cross_correlation.py b/python/cucim/src/cucim/skimage/registration/tests/test_phase_cross_correlation.py index 857c1d625..da089ed67 100644 --- a/python/cucim/src/cucim/skimage/registration/tests/test_phase_cross_correlation.py +++ b/python/cucim/src/cucim/skimage/registration/tests/test_phase_cross_correlation.py @@ -12,24 +12,28 @@ from cucim.skimage._shared.fft import fftmodule as fft from cucim.skimage.data import binary_blobs from cucim.skimage.registration._phase_cross_correlation import ( - _upsampled_dft, phase_cross_correlation) + _upsampled_dft, + phase_cross_correlation, +) -@pytest.mark.parametrize('normalization', [None, 'phase']) +@pytest.mark.parametrize("normalization", [None, "phase"]) def test_correlation(normalization): reference_image = fft.fftn(cp.array(camera())) shift = (-7, 12) shifted_image = fourier_shift(reference_image, shift) # pixel precision - result, _, _ = phase_cross_correlation(reference_image, - shifted_image, - space="fourier", - normalization=normalization) + result, _, _ = phase_cross_correlation( + reference_image, + shifted_image, + space="fourier", + normalization=normalization, + ) assert_allclose(result[:2], -cp.array(shift)) -@pytest.mark.parametrize('normalization', ['nonexisting']) +@pytest.mark.parametrize("normalization", ["nonexisting"]) def test_correlation_invalid_normalization(normalization): reference_image = fft.fftn(cp.array(camera())) shift = (-7, 12) @@ -37,28 +41,32 @@ def test_correlation_invalid_normalization(normalization): # pixel precision with pytest.raises(ValueError): - phase_cross_correlation(reference_image, - shifted_image, - space="fourier", - normalization=normalization) + phase_cross_correlation( + reference_image, + shifted_image, + space="fourier", + normalization=normalization, + ) -@pytest.mark.parametrize('normalization', [None, 'phase']) +@pytest.mark.parametrize("normalization", [None, "phase"]) def test_subpixel_precision(normalization): reference_image = fft.fftn(cp.array(camera())) subpixel_shift = (-2.4, 1.32) shifted_image = fourier_shift(reference_image, subpixel_shift) # subpixel precision - result, _, _ = phase_cross_correlation(reference_image, - shifted_image, - upsample_factor=100, - space="fourier", - normalization=normalization) + result, _, _ = phase_cross_correlation( + reference_image, + shifted_image, + upsample_factor=100, + space="fourier", + normalization=normalization, + ) assert_allclose(result[:2], -cp.array(subpixel_shift), atol=0.05) -@pytest.mark.parametrize('dtype', [cp.float16, cp.float32, cp.float64]) +@pytest.mark.parametrize("dtype", [cp.float16, cp.float32, cp.float64]) def test_real_input(dtype): reference_image = cp.array(camera()).astype(dtype, copy=False) subpixel_shift = (-2.4, 1.32) @@ -66,9 +74,9 @@ def test_real_input(dtype): shifted_image = fft.ifftn(shifted_image).real.astype(dtype, copy=False) # subpixel precision - result, error, diffphase = phase_cross_correlation(reference_image, - shifted_image, - upsample_factor=100) + result, error, diffphase = phase_cross_correlation( + reference_image, shifted_image, upsample_factor=100 + ) assert isinstance(result, tuple) assert all(isinstance(s, float) for s in result) assert_allclose(result[:2], -cp.array(subpixel_shift), atol=0.05) @@ -81,10 +89,9 @@ def test_size_one_dimension_input(): shifted_image = fourier_shift(reference_image, subpixel_shift) # subpixel precision - result, error, diffphase = phase_cross_correlation(reference_image, - shifted_image, - upsample_factor=20, - space="fourier") + result, error, diffphase = phase_cross_correlation( + reference_image, shifted_image, upsample_factor=20, space="fourier" + ) assert_allclose(result[:2], -cp.array((-2.4, 0)), atol=0.05) @@ -94,19 +101,18 @@ def test_3d_input(): shift = (-2.0, 1.0, 5.0) shifted_image = fourier_shift(reference_image, shift) - result, error, diffphase = phase_cross_correlation(reference_image, - shifted_image, - space="fourier") + result, error, diffphase = phase_cross_correlation( + reference_image, shifted_image, space="fourier" + ) assert_allclose(result, -cp.array(shift), atol=0.05) # subpixel precision now available for 3-D data subpixel_shift = (-2.3, 1.7, 5.4) shifted_image = fourier_shift(reference_image, subpixel_shift) - result, error, diffphase = phase_cross_correlation(reference_image, - shifted_image, - upsample_factor=100, - space="fourier") + result, error, diffphase = phase_cross_correlation( + reference_image, shifted_image, upsample_factor=100, space="fourier" + ) assert_allclose(result, -cp.array(subpixel_shift), atol=0.05) @@ -133,7 +139,7 @@ def test_wrong_input(): image = cp.ones((5, 5)) image[0][0] = cp.nan template = cp.ones((5, 5)) - with expected_warnings([r'invalid value encountered in true_divide|\A\Z']): + with expected_warnings([r"invalid value encountered in true_divide|\A\Z"]): with pytest.raises(ValueError): phase_cross_correlation(template, image, return_error=True) @@ -143,9 +149,9 @@ def test_4d_input_pixel(): reference_image = fft.fftn(phantom) shift = (-2.0, 1.0, 5.0, -3) shifted_image = fourier_shift(reference_image, shift) - result, error, diffphase = phase_cross_correlation(reference_image, - shifted_image, - space="fourier") + result, error, diffphase = phase_cross_correlation( + reference_image, shifted_image, space="fourier" + ) assert_allclose(result, -cp.array(shift), atol=0.05) @@ -154,28 +160,24 @@ def test_4d_input_subpixel(): reference_image = fft.fftn(phantom) subpixel_shift = (-2.3, 1.7, 5.4, -3.2) shifted_image = fourier_shift(reference_image, subpixel_shift) - result, error, diffphase = phase_cross_correlation(reference_image, - shifted_image, - upsample_factor=10, - space="fourier") + result, error, diffphase = phase_cross_correlation( + reference_image, shifted_image, upsample_factor=10, space="fourier" + ) assert_allclose(result, -cp.array(subpixel_shift), atol=0.05) def test_mismatch_upsampled_region_size(): with pytest.raises(ValueError): - _upsampled_dft( - cp.ones((4, 4)), - upsampled_region_size=[3, 2, 1, 4]) + _upsampled_dft(cp.ones((4, 4)), upsampled_region_size=[3, 2, 1, 4]) def test_mismatch_offsets_size(): with pytest.raises(ValueError): - _upsampled_dft(cp.ones((4, 4)), 3, - axis_offsets=[3, 2, 1, 4]) + _upsampled_dft(cp.ones((4, 4)), 3, axis_offsets=[3, 2, 1, 4]) @pytest.mark.parametrize( - ('shift0', 'shift1'), + ("shift0", "shift1"), itertools.product((100, -100, 350, -350), (100, -100, 350, -350)), ) @cp.testing.with_requires("scikit-image>=0.20") @@ -200,7 +202,7 @@ def test_disambiguate_2d(shift0, shift1): reference = image[slice0] moving = image[slice1] computed_shift, _, _ = phase_cross_correlation( - reference, moving, disambiguate=True, return_error='always' + reference, moving, disambiguate=True, return_error="always" ) np.testing.assert_equal(shift, computed_shift) @@ -213,6 +215,6 @@ def test_disambiguate_zero_shift(): """ image = cp.array(camera()) computed_shift, _, _ = phase_cross_correlation( - image, image, disambiguate=True, return_error='always' + image, image, disambiguate=True, return_error="always" ) assert computed_shift == (0, 0) diff --git a/python/cucim/src/cucim/skimage/registration/tests/test_tvl1.py b/python/cucim/src/cucim/skimage/registration/tests/test_tvl1.py index 75403d7b2..fa706a9d3 100644 --- a/python/cucim/src/cucim/skimage/registration/tests/test_tvl1.py +++ b/python/cucim/src/cucim/skimage/registration/tests/test_tvl1.py @@ -27,7 +27,7 @@ def _sin_flow_gen(image0, max_motion=4.5, npics=5): first component and the corresponding warped image. """ - grid = cp.meshgrid(*[cp.arange(n) for n in image0.shape], indexing='ij') + grid = cp.meshgrid(*[cp.arange(n) for n in image0.shape], indexing="ij") grid = cp.stack(grid) # TODO: make upstream scikit-image PR changing gt_flow dtype to float gt_flow = cp.zeros_like(grid, dtype=float) @@ -38,7 +38,7 @@ def _sin_flow_gen(image0, max_motion=4.5, npics=5): return gt_flow, image1 -@pytest.mark.parametrize('dtype', [cp.float16, cp.float32, cp.float64]) +@pytest.mark.parametrize("dtype", [cp.float16, cp.float32, cp.float64]) def test_2d_motion(dtype): # Generate synthetic data rnd = cp.random.RandomState(0) @@ -57,7 +57,7 @@ def test_2d_motion(dtype): optical_flow_tvl1(image0, image1, attachment=5, dtype=dtype) -@pytest.mark.parametrize('dtype', [cp.float32, cp.float64]) +@pytest.mark.parametrize("dtype", [cp.float32, cp.float64]) def test_3d_motion(dtype): # Generate synthetic data rnd = np.random.RandomState(0) diff --git a/python/cucim/src/cucim/skimage/restoration/_denoise.py b/python/cucim/src/cucim/skimage/restoration/_denoise.py index bdf72c947..f09829783 100644 --- a/python/cucim/src/cucim/skimage/restoration/_denoise.py +++ b/python/cucim/src/cucim/skimage/restoration/_denoise.py @@ -91,11 +91,14 @@ def _denoise_tv_chambolle_nd(image, weight=0.1, eps=2.0e-4, max_num_iter=200): return out -@utils.deprecate_kwarg({'n_iter_max': 'max_num_iter'}, - removed_version="23.02.00", - deprecated_version="22.06.00") -def denoise_tv_chambolle(image, weight=0.1, eps=2.0e-4, max_num_iter=200, *, - channel_axis=None): +@utils.deprecate_kwarg( + {"n_iter_max": "max_num_iter"}, + removed_version="23.02.00", + deprecated_version="22.06.00", +) +def denoise_tv_chambolle( + image, weight=0.1, eps=2.0e-4, max_num_iter=200, *, channel_axis=None +): r"""Perform total variation denoising in nD. Given :math:`f`, a noisy image (input data), @@ -192,7 +195,7 @@ def denoise_tv_chambolle(image, weight=0.1, eps=2.0e-4, max_num_iter=200, *, """ # noqa im_type = image.dtype - if not im_type.kind == 'f': + if not im_type.kind == "f": image = img_as_float(image) # enforce float16->float32 and float128->float64 @@ -204,8 +207,9 @@ def denoise_tv_chambolle(image, weight=0.1, eps=2.0e-4, max_num_iter=200, *, _at = functools.partial(utils.slice_at_axis, axis=channel_axis) out = cp.zeros_like(image) for c in range(image.shape[channel_axis]): - out[_at(c)] = _denoise_tv_chambolle_nd(image[_at(c)], weight, eps, - max_num_iter) + out[_at(c)] = _denoise_tv_chambolle_nd( + image[_at(c)], weight, eps, max_num_iter + ) else: out = _denoise_tv_chambolle_nd(image, weight, eps, max_num_iter) return out diff --git a/python/cucim/src/cucim/skimage/restoration/deconvolution.py b/python/cucim/src/cucim/skimage/restoration/deconvolution.py index f9dda2e30..4484b3b58 100644 --- a/python/cucim/src/cucim/skimage/restoration/deconvolution.py +++ b/python/cucim/src/cucim/skimage/restoration/deconvolution.py @@ -134,8 +134,9 @@ def wiener(image, psf, balance, reg=None, is_real=True, clip=True): areg2 *= areg2 wiener_filter = cp.conj(trans_func) / (atf2 + balance * areg2) if is_real: - deconv = uft.uirfftn(wiener_filter * uft.urfftn(image), - shape=image.shape) + deconv = uft.uirfftn( + wiener_filter * uft.urfftn(image), shape=image.shape + ) else: deconv = uft.uifftn(wiener_filter * uft.ufftn(image)) @@ -146,10 +147,21 @@ def wiener(image, psf, balance, reg=None, is_real=True, clip=True): return deconv -@deprecate_kwarg({'random_state': 'seed'}, removed_version="23.08.00", - deprecated_version="24.06.00") -def unsupervised_wiener(image, psf, reg=None, user_params=None, is_real=True, - clip=True, *, seed=None): +@deprecate_kwarg( + {"random_state": "seed"}, + removed_version="23.08.00", + deprecated_version="24.06.00", +) +def unsupervised_wiener( + image, + psf, + reg=None, + user_params=None, + is_real=True, + clip=True, + *, + seed=None, +): """Unsupervised Wiener-Hunt deconvolution. Return the deconvolution with a Wiener-Hunt approach, where the @@ -251,18 +263,23 @@ def unsupervised_wiener(image, psf, reg=None, user_params=None, is_real=True, """ if user_params is not None: - for s in ('max', 'min'): - if (s + '_iter') in user_params: + for s in ("max", "min"): + if (s + "_iter") in user_params: warning_msg = ( - f'`{s}_iter` is a deprecated key for `user_params`. ' - f'It will be removed in version 1.0. ' - f'Use `{s}_num_iter` instead.' + f"`{s}_iter` is a deprecated key for `user_params`. " + f"It will be removed in version 1.0. " + f"Use `{s}_num_iter` instead." ) warnings.warn(warning_msg, FutureWarning) - user_params[s + '_num_iter'] = user_params.pop(s + '_iter') - - params = {'threshold': 1e-4, 'max_num_iter': 200, - 'min_num_iter': 30, 'burnin': 15, 'callback': None} + user_params[s + "_num_iter"] = user_params.pop(s + "_iter") + + params = { + "threshold": 1e-4, + "max_num_iter": 200, + "min_num_iter": 30, + "burnin": 15, + "callback": None, + } params.update(user_params or {}) if reg is None: @@ -337,41 +354,40 @@ def unsupervised_wiener(image, psf, reg=None, user_params=None, is_real=True, gn_chain.append( rng.gamma( image.size / 2, - 2 / uft.image_quad_norm(data_spectrum - - x_sample * - trans_fct) + 2 / uft.image_quad_norm(data_spectrum - x_sample * trans_fct), ).astype(float_type, copy=False) ) # sample of Eq. 31 p(gx | x^k, gn^k-1, y) gx_chain.append( rng.gamma( - (image.size - 1) / 2, - 2 / uft.image_quad_norm(x_sample * reg) + (image.size - 1) / 2, 2 / uft.image_quad_norm(x_sample * reg) ).astype(float_type, copy=False) ) # current empirical average - if iteration > params['burnin']: + if iteration > params["burnin"]: x_postmean = prev_x_postmean + x_sample - if iteration > (params['burnin'] + 1): - current = x_postmean / (iteration - params['burnin']) - previous = prev_x_postmean / (iteration - params['burnin'] - 1) - delta = cp.sum(cp.abs(current - previous)) / \ - cp.sum(cp.abs(x_postmean)) / (iteration - params['burnin']) + if iteration > (params["burnin"] + 1): + current = x_postmean / (iteration - params["burnin"]) + previous = prev_x_postmean / (iteration - params["burnin"] - 1) + delta = ( + cp.sum(cp.abs(current - previous)) + / cp.sum(cp.abs(x_postmean)) + / (iteration - params["burnin"]) + ) prev_x_postmean = x_postmean # stop of the algorithm - if ( - (iteration > params['min_num_iter']) - and (delta < params['threshold']) + if (iteration > params["min_num_iter"]) and ( + delta < params["threshold"] ): break # Empirical average \approx POSTMEAN Eq. 44 - x_postmean = x_postmean / (iteration - params['burnin']) + x_postmean = x_postmean / (iteration - params["burnin"]) if is_real: x_postmean = uft.uirfft2(x_postmean, shape=image.shape) else: @@ -381,7 +397,7 @@ def unsupervised_wiener(image, psf, reg=None, user_params=None, is_real=True, x_postmean[x_postmean > 1] = 1 x_postmean[x_postmean < -1] = -1 - return (x_postmean, {'noise': gn_chain, 'prior': gx_chain}) + return (x_postmean, {"noise": gn_chain, "prior": gx_chain}) def richardson_lucy(image, psf, num_iter=50, clip=True, filter_epsilon=None): @@ -438,12 +454,12 @@ def richardson_lucy(image, psf, num_iter=50, clip=True, filter_epsilon=None): eps = 1e-12 for _ in range(num_iter): - conv = signal.convolve(im_deconv, psf, mode='same') + eps + conv = signal.convolve(im_deconv, psf, mode="same") + eps if filter_epsilon: relative_blur = cp.where(conv < filter_epsilon, 0, image / conv) else: relative_blur = image / conv - im_deconv *= signal.convolve(relative_blur, psf_mirror, mode='same') + im_deconv *= signal.convolve(relative_blur, psf_mirror, mode="same") if clip: im_deconv[im_deconv > 1] = 1 diff --git a/python/cucim/src/cucim/skimage/restoration/j_invariant.py b/python/cucim/src/cucim/skimage/restoration/j_invariant.py index 1e19a92ca..ffc3cef91 100644 --- a/python/cucim/src/cucim/skimage/restoration/j_invariant.py +++ b/python/cucim/src/cucim/skimage/restoration/j_invariant.py @@ -37,7 +37,7 @@ def _interpolate_image(image, *, multichannel=False): # CuPy Backend: refactored below to avoid for loop if multichannel: conv_filter = conv_filter[..., np.newaxis] - interp = ndi.convolve(image, conv_filter, mode='mirror') + interp = ndi.convolve(image, conv_filter, mode="mirror") return interp @@ -89,8 +89,9 @@ def _generate_grid_slice(shape, *, offset, stride=3): return mask -def denoise_invariant(image, denoise_function, *, stride=4, masks=None, - denoiser_kwargs=None): +def denoise_invariant( + image, denoise_function, *, stride=4, masks=None, denoiser_kwargs=None +): """Apply a J-invariant version of `denoise_function`. Parameters @@ -150,20 +151,23 @@ def denoise_invariant(image, denoise_function, *, stride=4, masks=None, if denoiser_kwargs is None: denoiser_kwargs = {} - if 'multichannel' in denoiser_kwargs: - multichannel = denoiser_kwargs['multichannel'] + if "multichannel" in denoiser_kwargs: + multichannel = denoiser_kwargs["multichannel"] else: - multichannel = denoiser_kwargs.get('channel_axis', None) is not None + multichannel = denoiser_kwargs.get("channel_axis", None) is not None interp = _interpolate_image(image, multichannel=multichannel) output = cp.zeros_like(image) if masks is None: spatialdims = image.ndim if not multichannel else image.ndim - 1 - n_masks = stride ** spatialdims - masks = (_generate_grid_slice(image.shape[:spatialdims], - offset=idx, stride=stride) - for idx in range(n_masks)) + n_masks = stride**spatialdims + masks = ( + _generate_grid_slice( + image.shape[:spatialdims], offset=idx, stride=stride + ) + for idx in range(n_masks) + ) for mask in masks: input_image = image.copy() @@ -194,9 +198,15 @@ def _product_from_dict(dictionary): yield dict(zip(keys, element)) -def calibrate_denoiser(image, denoise_function, denoise_parameters, *, - stride=4, approximate_loss=True, - extra_output=False): +def calibrate_denoiser( + image, + denoise_function, + denoise_parameters, + *, + stride=4, + approximate_loss=True, + extra_output=False, +): """Calibrate a denoising function and return optimal J-invariant version. The returned function is partially evaluated with optimal parameter values @@ -270,10 +280,11 @@ def calibrate_denoiser(image, denoise_function, denoise_parameters, *, """ # noqa parameters_tested, losses = _calibrate_denoiser_search( - image, denoise_function, + image, + denoise_function, denoise_parameters=denoise_parameters, stride=stride, - approximate_loss=approximate_loss + approximate_loss=approximate_loss, ) idx = np.argmin(losses) @@ -292,8 +303,14 @@ def calibrate_denoiser(image, denoise_function, denoise_parameters, *, return best_denoise_function -def _calibrate_denoiser_search(image, denoise_function, denoise_parameters, *, - stride=4, approximate_loss=True): +def _calibrate_denoiser_search( + image, + denoise_function, + denoise_parameters, + *, + stride=4, + approximate_loss=True, +): """Return a parameter search history with losses for a denoise function. Parameters @@ -325,28 +342,30 @@ def _calibrate_denoiser_search(image, denoise_function, denoise_parameters, *, losses = [] for denoiser_kwargs in parameters_tested: - if 'multichannel' in denoiser_kwargs: - multichannel = denoiser_kwargs['multichannel'] + if "multichannel" in denoiser_kwargs: + multichannel = denoiser_kwargs["multichannel"] else: - multichannel = \ - denoiser_kwargs.get('channel_axis', None) is not None + multichannel = denoiser_kwargs.get("channel_axis", None) is not None if not approximate_loss: denoised = denoise_invariant( - image, denoise_function, + image, + denoise_function, stride=stride, - denoiser_kwargs=denoiser_kwargs + denoiser_kwargs=denoiser_kwargs, ) loss = mean_squared_error(image, denoised) else: spatialdims = image.ndim if not multichannel else image.ndim - 1 - n_masks = stride ** spatialdims - mask = _generate_grid_slice(image.shape[:spatialdims], - offset=n_masks // 2, stride=stride) + n_masks = stride**spatialdims + mask = _generate_grid_slice( + image.shape[:spatialdims], offset=n_masks // 2, stride=stride + ) masked_denoised = denoise_invariant( - image, denoise_function, + image, + denoise_function, masks=[mask], - denoiser_kwargs=denoiser_kwargs + denoiser_kwargs=denoiser_kwargs, ) loss = mean_squared_error(image[mask], masked_denoised[mask]) diff --git a/python/cucim/src/cucim/skimage/restoration/tests/test_denoise.py b/python/cucim/src/cucim/skimage/restoration/tests/test_denoise.py index 5f76561cd..ae281a634 100644 --- a/python/cucim/src/cucim/skimage/restoration/tests/test_denoise.py +++ b/python/cucim/src/cucim/skimage/restoration/tests/test_denoise.py @@ -36,7 +36,7 @@ pass -@pytest.mark.parametrize('dtype', float_dtypes) +@pytest.mark.parametrize("dtype", float_dtypes) def test_denoise_tv_chambolle_2d(dtype): # astronaut image img = astro_gray.astype(dtype, copy=True) @@ -58,19 +58,21 @@ def test_denoise_tv_chambolle_2d(dtype): grad = ndi.morphological_gradient(cp.asnumpy(img), size=((3, 3))) grad_denoised = ndi.morphological_gradient( - cp.asnumpy(denoised_astro), size=((3, 3))) + cp.asnumpy(denoised_astro), size=((3, 3)) + ) # test if the total variation has decreased assert grad_denoised.dtype == float_dtype - assert np.sqrt((grad_denoised ** 2).sum()) < np.sqrt((grad ** 2).sum()) + assert np.sqrt((grad_denoised**2).sum()) < np.sqrt((grad**2).sum()) -@pytest.mark.parametrize('channel_axis', [0, 1, 2, -1]) +@pytest.mark.parametrize("channel_axis", [0, 1, 2, -1]) def test_denoise_tv_chambolle_multichannel(channel_axis): denoised0 = restoration.denoise_tv_chambolle(astro[..., 0], weight=0.1) img = cp.moveaxis(astro, -1, channel_axis) - denoised = restoration.denoise_tv_chambolle(img, weight=0.1, - channel_axis=channel_axis) + denoised = restoration.denoise_tv_chambolle( + img, weight=0.1, channel_axis=channel_axis + ) _at = functools.partial(slice_at_axis, axis=channel_axis % img.ndim) assert_array_equal(denoised[_at(0)], denoised0) @@ -81,10 +83,10 @@ def test_denoise_tv_chambolle_multichannel(channel_axis): denoised0 = restoration.denoise_tv_chambolle(astro3[..., 0], weight=0.1) astro3 = cp.moveaxis(astro3, -1, channel_axis) - denoised = restoration.denoise_tv_chambolle(astro3, weight=0.1, - channel_axis=channel_axis) - _at = functools.partial(slice_at_axis, - axis=channel_axis % astro3.ndim) + denoised = restoration.denoise_tv_chambolle( + astro3, weight=0.1, channel_axis=channel_axis + ) + _at = functools.partial(slice_at_axis, axis=channel_axis % astro3.ndim) assert_array_equal(denoised[_at(0)], denoised0) @@ -93,8 +95,7 @@ def test_denoise_tv_chambolle_float_result_range(): img = astro_gray int_astro = cp.multiply(img, 255).astype(np.uint8) assert cp.max(int_astro) > 1 - denoised_int_astro = restoration.denoise_tv_chambolle(int_astro, - weight=0.1) + denoised_int_astro = restoration.denoise_tv_chambolle(int_astro, weight=0.1) # test if the value range of output float data is within [0.0:1.0] assert denoised_int_astro.dtype == _supported_float_type(int_astro.dtype) assert cp.max(denoised_int_astro) <= 1.0 @@ -104,7 +105,7 @@ def test_denoise_tv_chambolle_float_result_range(): def test_denoise_tv_chambolle_3d(): """Apply the TV denoising algorithm on a 3D image representing a sphere.""" x, y, z = cp.ogrid[0:40, 0:40, 0:40] - mask = (x - 22) ** 2 + (y - 20) ** 2 + (z - 17) ** 2 < 8 ** 2 + mask = (x - 22) ** 2 + (y - 20) ** 2 + (z - 17) ** 2 < 8**2 mask = 100 * mask.astype(float) mask += 60 mask += 20 * cp.random.rand(*mask.shape) @@ -128,7 +129,7 @@ def test_denoise_tv_chambolle_1d(): def test_denoise_tv_chambolle_4d(): - """ TV denoising for a 4D input.""" + """TV denoising for a 4D input.""" im = 255 * cp.random.rand(8, 8, 8, 8) im = im.astype(np.uint8) res = restoration.denoise_tv_chambolle(im, weight=0.1) @@ -150,6 +151,9 @@ def test_denoise_tv_chambolle_weighting(): w = 0.2 denoised_2d = restoration.denoise_tv_chambolle(img2d, weight=w) denoised_4d = restoration.denoise_tv_chambolle(img4d, weight=w) - assert (structural_similarity(denoised_2d, - denoised_4d[:, :, 0, 0], - data_range=1.0) > 0.98) + assert ( + structural_similarity( + denoised_2d, denoised_4d[:, :, 0, 0], data_range=1.0 + ) + > 0.98 + ) diff --git a/python/cucim/src/cucim/skimage/restoration/tests/test_j_invariant.py b/python/cucim/src/cucim/skimage/restoration/tests/test_j_invariant.py index f42bc252f..584e4a711 100644 --- a/python/cucim/src/cucim/skimage/restoration/tests/test_j_invariant.py +++ b/python/cucim/src/cucim/skimage/restoration/tests/test_j_invariant.py @@ -2,6 +2,7 @@ import numpy as np import pytest from skimage.data import camera, chelsea + # from cucim.skimage.restoration import denoise_wavelet from skimage.restoration import denoise_wavelet @@ -38,7 +39,7 @@ def test_denoise_invariant(): assert denoised_mse < original_mse -@pytest.mark.parametrize('dtype', [cp.float16, cp.float32, cp.float64]) +@pytest.mark.parametrize("dtype", [cp.float16, cp.float32, cp.float64]) def test_denoise_invariant_color(dtype): denoised_img_color = denoise_invariant( noisy_img_color.astype(dtype), @@ -61,24 +62,27 @@ def test_denoise_invariant_3d(): def test_calibrate_denoiser_extra_output(): - parameter_ranges = {'sigma': np.linspace(0.1, 1, 5) / 2} + parameter_ranges = {"sigma": np.linspace(0.1, 1, 5) / 2} _, (parameters_tested, losses) = calibrate_denoiser( noisy_img, _denoise_wavelet, denoise_parameters=parameter_ranges, - extra_output=True + extra_output=True, ) - all_denoised = [denoise_invariant(noisy_img, _denoise_wavelet, - denoiser_kwargs=denoiser_kwargs) - for denoiser_kwargs in parameters_tested] + all_denoised = [ + denoise_invariant( + noisy_img, _denoise_wavelet, denoiser_kwargs=denoiser_kwargs + ) + for denoiser_kwargs in parameters_tested + ] ground_truth_losses = [float(mse(img, test_img)) for img in all_denoised] assert np.argmin(losses) == np.argmin(ground_truth_losses) def test_calibrate_denoiser(): - parameter_ranges = {'sigma': np.linspace(0.1, 1, 5) / 2} + parameter_ranges = {"sigma": np.linspace(0.1, 1, 5) / 2} denoiser = calibrate_denoiser( noisy_img, _denoise_wavelet, denoise_parameters=parameter_ranges @@ -104,7 +108,7 @@ def test_calibrate_denoiser_tv(): def test_input_image_not_modified(): input_image = noisy_img.copy() - parameter_ranges = {'sigma': np.random.random(5) / 2} + parameter_ranges = {"sigma": np.random.random(5) / 2} calibrate_denoiser( input_image, _denoise_wavelet, denoise_parameters=parameter_ranges ) diff --git a/python/cucim/src/cucim/skimage/restoration/tests/test_restoration.py b/python/cucim/src/cucim/skimage/restoration/tests/test_restoration.py index adbf0833b..697364972 100644 --- a/python/cucim/src/cucim/skimage/restoration/tests/test_restoration.py +++ b/python/cucim/src/cucim/skimage/restoration/tests/test_restoration.py @@ -39,7 +39,7 @@ def _get_rtol_atol(dtype): return rtol, atol -@pytest.mark.parametrize('dtype', [cp.float16, cp.float32, cp.float64]) +@pytest.mark.parametrize("dtype", [cp.float16, cp.float32, cp.float64]) def test_wiener(dtype): psf = np.ones((5, 5), dtype=dtype) / 25 data = signal.convolve2d(cp.asnumpy(test_img), psf, "same") @@ -53,26 +53,25 @@ def test_wiener(dtype): assert deconvolved.dtype == _supported_float_type(dtype) rtol, atol = _get_rtol_atol(dtype) - path = fetch('restoration/tests/camera_wiener.npy') - cp.testing.assert_allclose( - deconvolved, np.load(path), rtol=rtol, atol=atol) + path = fetch("restoration/tests/camera_wiener.npy") + cp.testing.assert_allclose(deconvolved, np.load(path), rtol=rtol, atol=atol) _, laplacian = uft.laplacian(2, data.shape) otf = uft.ir2tf(psf, data.shape, is_real=False) assert otf.real.dtype == _supported_float_type(dtype) - deconvolved = restoration.wiener(data, otf, 0.05, - reg=laplacian, - is_real=False) + deconvolved = restoration.wiener( + data, otf, 0.05, reg=laplacian, is_real=False + ) assert deconvolved.real.dtype == _supported_float_type(dtype) - cp.testing.assert_allclose(cp.real(deconvolved), - np.load(path), - rtol=rtol, atol=atol) + cp.testing.assert_allclose( + cp.real(deconvolved), np.load(path), rtol=rtol, atol=atol + ) -@pytest.mark.parametrize('dtype', [cp.float16, cp.float32, cp.float64]) +@pytest.mark.parametrize("dtype", [cp.float16, cp.float32, cp.float64]) def test_unsupervised_wiener(dtype): psf = np.ones((5, 5), dtype=dtype) / 25 - data = signal.convolve2d(cp.asnumpy(test_img), psf, 'same') + data = signal.convolve2d(cp.asnumpy(test_img), psf, "same") seed = 16829302 # keep old-style RandomState here for compatibility with previously stored # reference data in camera_unsup.npy and camera_unsup2.npy @@ -104,9 +103,11 @@ def test_unsupervised_wiener(dtype): otf, reg=laplacian, is_real=False, - user_params={"callback": lambda x: None, - "max_num_iter": 200, - "min_num_iter": 30}, + user_params={ + "callback": lambda x: None, + "max_num_iter": 200, + "min_num_iter": 30, + }, seed=seed, )[0] assert deconvolved2.real.dtype == float_type @@ -121,17 +122,25 @@ def test_unsupervised_wiener(dtype): def test_unsupervised_wiener_deprecated_user_param(): psf = np.ones((5, 5), dtype=float) / 25 - data = signal.convolve2d(cp.asnumpy(test_img), psf, 'same') + data = signal.convolve2d(cp.asnumpy(test_img), psf, "same") data = cp.array(data) psf = cp.array(psf) otf = uft.ir2tf(psf, data.shape, is_real=False) _, laplacian = uft.laplacian(2, data.shape) - with expected_warnings(["`max_iter` is a deprecated key", - "`min_iter` is a deprecated key", - "`random_state` is a deprecated argument name"]): + with expected_warnings( + [ + "`max_iter` is a deprecated key", + "`min_iter` is a deprecated key", + "`random_state` is a deprecated argument name", + ] + ): restoration.unsupervised_wiener( - data, otf, reg=laplacian, is_real=False, - user_params={"max_iter": 200, "min_iter": 30}, random_state=5 + data, + otf, + reg=laplacian, + is_real=False, + user_params={"max_iter": 200, "min_iter": 30}, + random_state=5, ) @@ -161,7 +170,7 @@ def test_image_shape(): def test_richardson_lucy(): rstate = np.random.RandomState(0) psf = np.ones((5, 5)) / 25 - data = signal.convolve2d(cp.asnumpy(test_img), psf, 'same') + data = signal.convolve2d(cp.asnumpy(test_img), psf, "same") np.random.seed(0) data += 0.1 * data.std() * rstate.standard_normal(data.shape) @@ -169,12 +178,12 @@ def test_richardson_lucy(): psf = cp.asarray(psf) deconvolved = restoration.richardson_lucy(data, psf, 5) - path = fetch('restoration/tests/camera_rl.npy') + path = fetch("restoration/tests/camera_rl.npy") cp.testing.assert_allclose(deconvolved, np.load(path), rtol=1e-4) -@pytest.mark.parametrize('dtype_image', [cp.float16, cp.float32, cp.float64]) -@pytest.mark.parametrize('dtype_psf', [cp.float32, cp.float64]) +@pytest.mark.parametrize("dtype_image", [cp.float16, cp.float32, cp.float64]) +@pytest.mark.parametrize("dtype_psf", [cp.float32, cp.float64]) def test_richardson_lucy_filtered(dtype_image, dtype_psf): if dtype_image == cp.float64: atol = 1e-8 @@ -185,12 +194,11 @@ def test_richardson_lucy_filtered(dtype_image, dtype_psf): psf = cp.ones((5, 5), dtype=dtype_psf) / 25 data = cp.array( - signal.convolve2d(cp.asnumpy(test_img_astro), cp.asnumpy(psf), 'same'), - dtype=dtype_image) - deconvolved = restoration.richardson_lucy(data, psf, 5, - filter_epsilon=1e-6) + signal.convolve2d(cp.asnumpy(test_img_astro), cp.asnumpy(psf), "same"), + dtype=dtype_image, + ) + deconvolved = restoration.richardson_lucy(data, psf, 5, filter_epsilon=1e-6) assert deconvolved.dtype == _supported_float_type(data.dtype) - path = fetch('restoration/tests/astronaut_rl.npy') - cp.testing.assert_allclose(deconvolved, np.load(path), rtol=1e-3, - atol=atol) + path = fetch("restoration/tests/astronaut_rl.npy") + cp.testing.assert_allclose(deconvolved, np.load(path), rtol=1e-3, atol=atol) diff --git a/python/cucim/src/cucim/skimage/restoration/uft.py b/python/cucim/src/cucim/skimage/restoration/uft.py index 3a98cfa92..813d500f0 100644 --- a/python/cucim/src/cucim/skimage/restoration/uft.py +++ b/python/cucim/src/cucim/skimage/restoration/uft.py @@ -89,7 +89,7 @@ def uifftn(inarray, dim=None): """ if dim is None: dim = inarray.ndim - outarray = fft.ifftn(inarray, axes=range(-dim, 0), norm='ortho') + outarray = fft.ifftn(inarray, axes=range(-dim, 0), norm="ortho") return outarray @@ -130,7 +130,7 @@ def urfftn(inarray, dim=None): """ if dim is None: dim = inarray.ndim - outarray = fft.rfftn(inarray, axes=range(-dim, 0), norm='ortho') + outarray = fft.rfftn(inarray, axes=range(-dim, 0), norm="ortho") return outarray @@ -175,7 +175,7 @@ def uirfftn(inarray, dim=None, shape=None): """ if dim is None: dim = inarray.ndim - outarray = fft.irfftn(inarray, shape, axes=range(-dim, 0), norm='ortho') + outarray = fft.irfftn(inarray, shape, axes=range(-dim, 0), norm="ortho") return outarray @@ -413,9 +413,9 @@ def ir2tf(imp_resp, shape, dim=None, is_real=True): # problem. Work with odd and even size. for axis, axis_size in enumerate(imp_resp.shape): if axis >= imp_resp.ndim - dim: - irpadded = cp.roll(irpadded, - shift=-math.floor(axis_size / 2), - axis=axis) + irpadded = cp.roll( + irpadded, shift=-math.floor(axis_size / 2), axis=axis + ) func = fft.rfftn if is_real else fft.fftn out = func(irpadded, axes=(range(-dim, 0))) return out @@ -455,7 +455,7 @@ def laplacian(ndim, shape, is_real=True, *, dtype=None): """ if dtype is None: dtype = cp.float64 if is_real else cp.complex128 - elif np.dtype(dtype).kind != 'f': + elif np.dtype(dtype).kind != "f": raise ValueError("dtype must be a floating point dtype") # CuPy Backend: assemble the small kernel on the host and then transfer it diff --git a/python/cucim/src/cucim/skimage/segmentation/__init__.py b/python/cucim/src/cucim/skimage/segmentation/__init__.py index 55b65058f..6d9544aa8 100644 --- a/python/cucim/src/cucim/skimage/segmentation/__init__.py +++ b/python/cucim/src/cucim/skimage/segmentation/__init__.py @@ -3,9 +3,13 @@ from ._expand_labels import expand_labels from ._join import join_segmentations, relabel_sequential from .boundaries import find_boundaries, mark_boundaries -from .morphsnakes import (checkerboard_level_set, disk_level_set, - inverse_gaussian_gradient, morphological_chan_vese, - morphological_geodesic_active_contour) +from .morphsnakes import ( + checkerboard_level_set, + disk_level_set, + inverse_gaussian_gradient, + morphological_chan_vese, + morphological_geodesic_active_contour, +) from .random_walker_segmentation import random_walker __all__ = [ diff --git a/python/cucim/src/cucim/skimage/segmentation/_chan_vese.py b/python/cucim/src/cucim/skimage/segmentation/_chan_vese.py index 84a7feb2b..2b7df17af 100644 --- a/python/cucim/src/cucim/skimage/segmentation/_chan_vese.py +++ b/python/cucim/src/cucim/skimage/segmentation/_chan_vese.py @@ -88,7 +88,7 @@ def _cv_calculate_variation(image, phi, mu, lambda1, lambda2, dt): C1-4 notation is taken. """ eta = 1e-16 - P = pad(phi, 1, mode='edge') + P = pad(phi, 1, mode="edge") x_end = P[1:-1, 2:] x_mid = P[1:-1, 1:-1] @@ -111,15 +111,15 @@ def _cv_calculate_variation(image, phi, mu, lambda1, lambda2, dt): @cp.fuse() -def _cv_heavyside(x, eps=1.): +def _cv_heavyside(x, eps=1.0): """Returns the result of a regularised heavyside function of the input value(s). """ - return 0.5 * (1. + (2. / cp.pi) * cp.arctan(x / eps)) + return 0.5 * (1.0 + (2.0 / cp.pi) * cp.arctan(x / eps)) @cp.fuse() -def _cv_delta(x, eps=1.): +def _cv_delta(x, eps=1.0): """Returns the result of a regularised dirac function of the input value(s). """ @@ -134,8 +134,7 @@ def _fused_inplace_eps_div(num, denom, eps): def _cv_calculate_averages(image, H, Hinv): - """Returns the average values 'inside' and 'outside'. - """ + """Returns the average values 'inside' and 'outside'.""" Hsum = cp.sum(H) Hinvsum = cp.sum(Hinv) avg_inside = cp.sum(image * H) @@ -160,7 +159,7 @@ def _cv_difference_from_average_term(image, Hphi, lambda_pos, lambda_neg): """Returns the 'energy' contribution due to the difference from the average value within a region at each point. """ - Hinv = 1. - Hphi + Hinv = 1.0 - Hphi (c1, c2) = _cv_calculate_averages(image, Hphi, Hinv) out = _fused_difference_op1(image, c1, Hphi, lambda_pos) out += _fused_difference_op1(image, c2, Hinv, lambda_neg) @@ -179,9 +178,8 @@ def _fused_edge_length(mu, phi, x_start, x_end, y_start, y_end): def _cv_edge_length_term(phi, mu): - """Returns the 'curvature' of a level set 'phi'. - """ - P = pad(phi, 1, mode='edge') + """Returns the 'curvature' of a level set 'phi'.""" + P = pad(phi, 1, mode="edge") y_start = P[:-2, 1:-1] y_end = P[2:, 1:-1] x_start = P[1:-1, :-2] @@ -236,7 +234,7 @@ def _cv_large_disk(image_size): res = cp.ones(image_size, dtype=bool) centerY = int((image_size[0] - 1) / 2) centerX = int((image_size[1] - 1) / 2) - res[centerY, centerX] = 0. + res[centerY, centerX] = 0.0 radius = float(min(centerX, centerY)) out = radius - distance_transform_edt(res) out /= radius @@ -251,7 +249,7 @@ def _cv_small_disk(image_size): res = cp.ones(image_size, dtype=bool) centerY = int((image_size[0] - 1) / 2) centerX = int((image_size[1] - 1) / 2) - res[centerY, centerX] = 0. + res[centerY, centerX] = 0.0 radius = float(min(centerX, centerY)) / 2.0 out = radius - distance_transform_edt(res) out /= radius * 3 @@ -259,14 +257,13 @@ def _cv_small_disk(image_size): def _cv_init_level_set(init_level_set, image_shape, dtype=cp.float64): - """Generates an initial level set function conditional on input arguments. - """ + """Generates an initial level set function conditional on input arguments.""" # noqa: E501 if type(init_level_set) == str: - if init_level_set == 'checkerboard': + if init_level_set == "checkerboard": res = _cv_checkerboard(image_shape, 5, dtype) - elif init_level_set == 'disk': + elif init_level_set == "disk": res = _cv_large_disk(image_shape) - elif init_level_set == 'small disk': + elif init_level_set == "small disk": res = _cv_small_disk(image_shape) else: raise ValueError("Incorrect name for starting level set preset.") @@ -275,11 +272,22 @@ def _cv_init_level_set(init_level_set, image_shape, dtype=cp.float64): return res.astype(dtype, copy=False) -@deprecate_kwarg({'max_iter': 'max_num_iter'}, removed_version="23.02.00", - deprecated_version="22.02.00") -def chan_vese(image, mu=0.25, lambda1=1.0, lambda2=1.0, tol=1e-3, - max_num_iter=500, dt=0.5, init_level_set='checkerboard', - extended_output=False): +@deprecate_kwarg( + {"max_iter": "max_num_iter"}, + removed_version="23.02.00", + deprecated_version="22.02.00", +) +def chan_vese( + image, + mu=0.25, + lambda1=1.0, + lambda2=1.0, + tol=1e-3, + max_num_iter=500, + dt=0.5, + init_level_set="checkerboard", + extended_output=False, +): """Chan-Vese segmentation algorithm. Active contour model by evolving a level set. Can be used to @@ -410,8 +418,10 @@ def chan_vese(image, mu=0.25, lambda1=1.0, lambda2=1.0, tol=1e-3, float_dtype = _supported_float_type(image.dtype) phi = _cv_init_level_set(init_level_set, image.shape, dtype=float_dtype) if type(phi) != cp.ndarray or phi.shape != image.shape: - raise ValueError("The dimensions of initial level set do not " - "match the dimensions of image.") + raise ValueError( + "The dimensions of initial level set do not " + "match the dimensions of image." + ) image = image.astype(float_dtype, copy=False) image = image - cp.min(image) diff --git a/python/cucim/src/cucim/skimage/segmentation/_clear_border.py b/python/cucim/src/cucim/skimage/segmentation/_clear_border.py index 07ad03842..11374d3dc 100644 --- a/python/cucim/src/cucim/skimage/segmentation/_clear_border.py +++ b/python/cucim/src/cucim/skimage/segmentation/_clear_border.py @@ -81,13 +81,15 @@ def clear_border(labels, buffer_size=0, bgval=0, mask=None, *, out=None): raise ValueError("buffer size may not be greater than labels size") if out is not None: - cp.copyto(out, labels, casting='no') + cp.copyto(out, labels, casting="no") else: out = labels.copy() if mask is not None: - err_msg = (f'labels and mask should have the same shape but ' - f'are {out.shape} and {mask.shape}') + err_msg = ( + f"labels and mask should have the same shape but " + f"are {out.shape} and {mask.shape}" + ) if out.shape != mask.shape: raise ValueError(err_msg) if mask.dtype != bool: diff --git a/python/cucim/src/cucim/skimage/segmentation/_expand_labels.py b/python/cucim/src/cucim/skimage/segmentation/_expand_labels.py index d7261f815..c888f6ffe 100644 --- a/python/cucim/src/cucim/skimage/segmentation/_expand_labels.py +++ b/python/cucim/src/cucim/skimage/segmentation/_expand_labels.py @@ -6,21 +6,23 @@ def expand_labels(label_image, distance=1): """Expand labels in label image by ``distance`` pixels without overlapping. - Given a label image, ``expand_labels`` grows label regions (connected components) - outwards by up to ``distance`` pixels without overflowing into neighboring regions. - More specifically, each background pixel that is within Euclidean distance - of <= ``distance`` pixels of a connected component is assigned the label of that - connected component. - Where multiple connected components are within ``distance`` pixels of a background - pixel, the label value of the closest connected component will be assigned (see - Notes for the case of multiple labels at equal distance). + Given a label image, ``expand_labels`` grows label regions (connected + components) outwards by up to ``distance`` pixels without overflowing into + neighboring regions. More specifically, each background pixel that is + within Euclidean distance of <= ``distance`` pixels of a connected + component is assigned the label of that connected component. + + Where multiple connected components are within ``distance`` pixels of a + background pixel, the label value of the closest connected component will + be assigned (see Notes for the case of multiple labels at equal distance). Parameters ---------- label_image : ndarray of dtype int label image distance : float - Euclidean distance in pixels by which to grow the labels. Default is one. + Euclidean distance in pixels by which to grow the labels. Default is + one. Returns ------- @@ -30,12 +32,12 @@ def expand_labels(label_image, distance=1): Notes ----- Where labels are spaced more than ``distance`` pixels are apart, this is - equivalent to a morphological dilation with a disc or hyperball of radius ``distance``. - However, in contrast to a morphological dilation, ``expand_labels`` will - not expand a label region into a neighboring region. + equivalent to a morphological dilation with a disc or hyperball of radius + ``distance``. However, in contrast to a morphological dilation, + ``expand_labels`` will not expand a label region into a neighboring region. - This implementation of ``expand_labels`` is derived from CellProfiler [1]_, where - it is known as module "IdentifySecondaryObjects (Distance-N)" [2]_. + This implementation of ``expand_labels`` is derived from CellProfiler [1]_, + where it is known as module "IdentifySecondaryObjects (Distance-N)" [2]_. There is an important edge case when a pixel has the same distance to multiple regions, as it is not defined which region expands into that @@ -44,12 +46,12 @@ def expand_labels(label_image, distance=1): See Also -------- - :func:`cucim.skimage.measure.label`, :func:`cucim.skimage.morphology.dilation` # noqa + :func:`cucim.skimage.measure.label`, :func:`cucim.skimage.morphology.dilation` References ---------- .. [1] https://cellprofiler.org - .. [2] https://github.com/CellProfiler/CellProfiler/blob/082930ea95add7b72243a4fa3d39ae5145995e9c/cellprofiler/modules/identifysecondaryobjects.py#L559 # noqa + .. [2] https://github.com/CellProfiler/CellProfiler/blob/082930ea95add7b72243a4fa3d39ae5145995e9c/cellprofiler/modules/identifysecondaryobjects.py#L559 Examples -------- @@ -77,7 +79,7 @@ def expand_labels(label_image, distance=1): array([[2, 1, 1, 0], [2, 2, 0, 0], [2, 3, 3, 0]]) - """ + """ # noqa: E501 distances, nearest_label_coords = distance_transform_edt( label_image == 0, return_indices=True diff --git a/python/cucim/src/cucim/skimage/segmentation/_join.py b/python/cucim/src/cucim/skimage/segmentation/_join.py index 6227a2fea..b6842d055 100644 --- a/python/cucim/src/cucim/skimage/segmentation/_join.py +++ b/python/cucim/src/cucim/skimage/segmentation/_join.py @@ -43,8 +43,10 @@ def join_segmentations(s1, s2, return_mapping: bool = False): [4, 5, 5, 3]]) """ if s1.shape != s2.shape: - raise ValueError("Cannot join segmentations of different shape. " - f"s1.shape: {s1.shape}, s2.shape: {s2.shape}") + raise ValueError( + "Cannot join segmentations of different shape. " + f"s1.shape: {s1.shape}, s2.shape: {s2.shape}" + ) s1_relabeled, _, backward_map1 = relabel_sequential(s1) s2_relabeled, _, backward_map2 = relabel_sequential(s2) factor = s2.max() + 1 @@ -55,10 +57,12 @@ def join_segmentations(s1, s2, return_mapping: bool = False): # Determine label mapping labels_j = cp.unique(j_initial) labels_s1_relabeled, labels_s2_relabeled = cp.divmod(labels_j, factor) - map_j_to_s1 = ArrayMap(map_j_to_j_initial.in_values, - backward_map1[labels_s1_relabeled]) - map_j_to_s2 = ArrayMap(map_j_to_j_initial.in_values, - backward_map2[labels_s2_relabeled]) + map_j_to_s1 = ArrayMap( + map_j_to_j_initial.in_values, backward_map1[labels_s1_relabeled] + ) + map_j_to_s2 = ArrayMap( + map_j_to_j_initial.in_values, backward_map2[labels_s2_relabeled] + ) return j, map_j_to_s1, map_j_to_s2 diff --git a/python/cucim/src/cucim/skimage/segmentation/boundaries.py b/python/cucim/src/cucim/skimage/segmentation/boundaries.py index 5ac19ff2a..7bdfb24ad 100644 --- a/python/cucim/src/cucim/skimage/segmentation/boundaries.py +++ b/python/cucim/src/cucim/skimage/segmentation/boundaries.py @@ -26,8 +26,9 @@ def _find_boundaries_subpixel(label_img): ndim = label_img.ndim max_label = cp.iinfo(label_img.dtype).max - label_img_expanded = cp.full([(2 * s - 1) for s in label_img.shape], - max_label, label_img.dtype) + label_img_expanded = cp.full( + [(2 * s - 1) for s in label_img.shape], max_label, label_img.dtype + ) pixels = (slice(None, None, 2),) * ndim label_img_expanded[pixels] = label_img @@ -35,7 +36,7 @@ def _find_boundaries_subpixel(label_img): # ElementwiseKernel that counts # of unique values. # at most 2**ndim non max_label pixels in a 3**ndim shape neighborhood - max_possible_unique = 2 ** ndim + max_possible_unique = 2**ndim # Count the number of unique values aside from max_label or # the background. @@ -43,7 +44,7 @@ def _find_boundaries_subpixel(label_img): rank_prev = ndi.minimum_filter(label_img_expanded, size=3) for n in range(1, max_possible_unique + 1): rank = ndi.rank_filter(label_img_expanded, n, size=3) - n_unique += (rank != rank_prev) + n_unique += rank != rank_prev rank_prev = rank # Boundaries occur where there is more than 1 unique value @@ -161,26 +162,28 @@ def find_boundaries(label_img, connectivity=1, mode="thick", background=0): [False, True, True, False, False], [False, True, True, False, False]]) """ - if label_img.dtype == 'bool': + if label_img.dtype == "bool": label_img = label_img.astype(cp.uint8) ndim = label_img.ndim footprint = ndi.generate_binary_structure(ndim, connectivity) - if mode != 'subpixel': - boundaries = (dilation(label_img, footprint) - != erosion(label_img, footprint)) - if mode == 'inner': + if mode != "subpixel": + boundaries = dilation(label_img, footprint) != erosion( + label_img, footprint + ) + if mode == "inner": foreground_image = label_img != background boundaries &= foreground_image - elif mode == 'outer': + elif mode == "outer": max_label = cp.iinfo(label_img.dtype).max background_image = label_img == background footprint = ndi.generate_binary_structure(ndim, ndim) inverted_background = cp.array(label_img, copy=True) inverted_background[background_image] = max_label - adjacent_objects = ((dilation(label_img, footprint) != - erosion(inverted_background, footprint)) & - ~background_image) - boundaries &= (background_image | adjacent_objects) + adjacent_objects = ( + dilation(label_img, footprint) + != erosion(inverted_background, footprint) + ) & ~background_image + boundaries &= background_image | adjacent_objects return boundaries else: boundaries = _find_boundaries_subpixel(label_img) @@ -188,9 +191,16 @@ def find_boundaries(label_img, connectivity=1, mode="thick", background=0): # Cupy Backend: added order keyword-only parameter -def mark_boundaries(image, label_img, color=(1, 1, 0), - outline_color=None, mode='outer', background_label=0, - *, order=3): +def mark_boundaries( + image, + label_img, + color=(1, 1, 0), + outline_color=None, + mode="outer", + background_label=0, + *, + order=3, +): """Return image with boundaries between labeled regions highlighted. Parameters @@ -231,15 +241,20 @@ def mark_boundaries(image, label_img, color=(1, 1, 0), marked = marked.astype(float_dtype, copy=False) if marked.ndim == 2: marked = gray2rgb(marked) - if mode == 'subpixel': + if mode == "subpixel": # Here, we want to interpose an extra line of pixels between # each original line - except for the last axis which holds # the RGB information. ``ndi.zoom`` then performs the (cubic) # interpolation, filling in the values of the interposed pixels - marked = ndi.zoom(marked, [2 - 1 / s for s in marked.shape[:-1]] + [1], - mode='mirror', order=order) - boundaries = find_boundaries(label_img, mode=mode, - background=background_label) + marked = ndi.zoom( + marked, + [2 - 1 / s for s in marked.shape[:-1]] + [1], + mode="mirror", + order=order, + ) + boundaries = find_boundaries( + label_img, mode=mode, background=background_label + ) if outline_color is not None: outlines = dilation(boundaries, square(3)) marked[outlines] = outline_color diff --git a/python/cucim/src/cucim/skimage/segmentation/morphsnakes.py b/python/cucim/src/cucim/skimage/segmentation/morphsnakes.py index 775cb39d5..c974f0ee0 100644 --- a/python/cucim/src/cucim/skimage/segmentation/morphsnakes.py +++ b/python/cucim/src/cucim/skimage/segmentation/morphsnakes.py @@ -11,16 +11,16 @@ from .._shared._gradient import gradient from .._shared.utils import check_nD, deprecate_kwarg -__all__ = ['morphological_chan_vese', - 'morphological_geodesic_active_contour', - 'inverse_gaussian_gradient', - 'disk_level_set', - 'checkerboard_level_set' - ] +__all__ = [ + "morphological_chan_vese", + "morphological_geodesic_active_contour", + "inverse_gaussian_gradient", + "disk_level_set", + "checkerboard_level_set", +] class _fcycle: - def __init__(self, iterable): """Call functions from the iterable each time it is called.""" self.funcs = cycle(iterable) @@ -32,10 +32,12 @@ def __call__(self, *args, **kwargs): # SI and IS operators for 2D and 3D. def _get_P2(): - _P2 = [cp.eye(3), - cp.array([[0, 1, 0]] * 3), - cp.array(np.flipud(np.eye(3))), - cp.array(np.rot90([[0, 1, 0]] * 3))] + _P2 = [ + cp.eye(3), + cp.array([[0, 1, 0]] * 3), + cp.array(np.flipud(np.eye(3))), + cp.array(np.rot90([[0, 1, 0]] * 3)), + ] return _P2 @@ -76,8 +78,12 @@ def inf_sup(u, footprints, workspace=None): return dilations.min(0) -_curvop = _fcycle([lambda u, f, w: sup_inf(inf_sup(u, f, w), f, w), # SIoIS - lambda u, f, w: inf_sup(sup_inf(u, f, w), f, w)]) # ISoSI +_curvop = _fcycle( + [ + lambda u, f, w: sup_inf(inf_sup(u, f, w), f, w), # SIoIS + lambda u, f, w: inf_sup(sup_inf(u, f, w), f, w), + ] +) # ISoSI def _check_input(image, init_level_set): @@ -85,8 +91,10 @@ def _check_input(image, init_level_set): check_nD(image, [2, 3]) if len(image.shape) != len(init_level_set.shape): - raise ValueError("The dimensions of the initial level set do not " - "match the dimensions of the image.") + raise ValueError( + "The dimensions of the initial level set do not " + "match the dimensions of the image." + ) def _init_level_set(init_level_set, image_shape): @@ -95,13 +103,14 @@ def _init_level_set(init_level_set, image_shape): If `init_level_set` is not a string, it is returned as is. """ if isinstance(init_level_set, str): - if init_level_set == 'checkerboard': + if init_level_set == "checkerboard": res = checkerboard_level_set(image_shape) - elif init_level_set == 'disk': + elif init_level_set == "disk": res = disk_level_set(image_shape) else: - raise ValueError("`init_level_set` not in " - "['checkerboard', 'circle', 'disk']") + raise ValueError( + "`init_level_set` not in " "['checkerboard', 'circle', 'disk']" + ) else: res = init_level_set return res @@ -211,7 +220,7 @@ def inverse_gaussian_gradient(image, alpha=100.0, sigma=5.0): Preprocessed image (or volume) suitable for `morphological_geodesic_active_contour`. """ - gradnorm = ndi.gaussian_gradient_magnitude(image, sigma, mode='nearest') + gradnorm = ndi.gaussian_gradient_magnitude(image, sigma, mode="nearest") return _fused_inverse_kernel(gradnorm, alpha) @@ -222,7 +231,12 @@ def _abs_grad_kernel(gx, gy): @cp.fuse() def _fused_variance_kernel( - image, c1, c2, lam1, lam2, abs_du, + image, + c1, + c2, + lam1, + lam2, + abs_du, ): difference_term = image - c1 difference_term *= difference_term @@ -238,12 +252,20 @@ def _fused_variance_kernel( return aux_lt0, aux_gt0 -@deprecate_kwarg({'iterations': 'num_iter'}, - removed_version="23.02.00", - deprecated_version="22.02.00") -def morphological_chan_vese(image, num_iter, init_level_set='checkerboard', - smoothing=1, lambda1=1, lambda2=1, - iter_callback=lambda x: None): +@deprecate_kwarg( + {"iterations": "num_iter"}, + removed_version="23.02.00", + deprecated_version="22.02.00", +) +def morphological_chan_vese( + image, + num_iter, + init_level_set="checkerboard", + smoothing=1, + lambda1=1, + lambda2=1, + iter_callback=lambda x: None, +): """Morphological Active Contours without Edges (MorphACWE) Active contours without edges implemented with morphological operators. It @@ -323,13 +345,13 @@ def morphological_chan_vese(image, num_iter, init_level_set='checkerboard', elif _misc.ndim(u) == 3: footprints = _get_P3() else: - raise ValueError("u has an invalid number of dimensions " - "(should be 2 or 3)") + raise ValueError( + "u has an invalid number of dimensions " "(should be 2 or 3)" + ) workspace = cp.empty(((len(footprints),) + u.shape), dtype=u.dtype) iter_callback(u) for i in range(num_iter): - # inside = u > 0 # outside = u <= 0 c0 = (image * (1 - u)).sum() @@ -355,13 +377,20 @@ def morphological_chan_vese(image, num_iter, init_level_set='checkerboard', return u -@deprecate_kwarg({'iterations': 'num_iter'}, - removed_version="23.02.00", - deprecated_version="22.02.00") -def morphological_geodesic_active_contour(gimage, num_iter, - init_level_set='disk', smoothing=1, - threshold='auto', balloon=0, - iter_callback=lambda x: None): +@deprecate_kwarg( + {"iterations": "num_iter"}, + removed_version="23.02.00", + deprecated_version="22.02.00", +) +def morphological_geodesic_active_contour( + gimage, + num_iter, + init_level_set="disk", + smoothing=1, + threshold="auto", + balloon=0, + iter_callback=lambda x: None, +): """Morphological Geodesic Active Contours (MorphGAC). Geodesic active contours implemented with morphological operators. It can @@ -444,7 +473,7 @@ def morphological_geodesic_active_contour(gimage, num_iter, _check_input(image, init_level_set) - if threshold == 'auto': + if threshold == "auto": threshold = cp.percentile(image, 40) structure = cp.ones((3,) * len(image.shape), dtype=cp.int8) @@ -460,14 +489,14 @@ def morphological_geodesic_active_contour(gimage, num_iter, elif _misc.ndim(u) == 3: footprints = _get_P3() else: - raise ValueError("u has an invalid number of dimensions " - "(should be 2 or 3)") + raise ValueError( + "u has an invalid number of dimensions " "(should be 2 or 3)" + ) workspace = cp.empty(((len(footprints),) + u.shape), dtype=u.dtype) iter_callback(u) for _ in range(num_iter): - # Balloon if balloon > 0: aux = ndi.binary_dilation(u, structure) diff --git a/python/cucim/src/cucim/skimage/segmentation/random_walker_segmentation.py b/python/cucim/src/cucim/skimage/segmentation/random_walker_segmentation.py index 9702d0d57..717920c32 100644 --- a/python/cucim/src/cucim/skimage/segmentation/random_walker_segmentation.py +++ b/python/cucim/src/cucim/skimage/segmentation/random_walker_segmentation.py @@ -51,12 +51,13 @@ def _make_graph_edges_3d(n_x, n_y, n_z): Graph edges with each column describing a node-id pair. """ vertices = cp.arange(n_x * n_y * n_z).reshape((n_x, n_y, n_z)) - edges_deep = cp.stack((vertices[..., :-1].ravel(), - vertices[..., 1:].ravel()), axis=0) - edges_right = cp.stack((vertices[:, :-1].ravel(), - vertices[:, 1:].ravel()), axis=0) - edges_down = cp.stack((vertices[:-1].ravel(), vertices[1:].ravel()), - axis=0) + edges_deep = cp.stack( + (vertices[..., :-1].ravel(), vertices[..., 1:].ravel()), axis=0 + ) + edges_right = cp.stack( + (vertices[:, :-1].ravel(), vertices[:, 1:].ravel()), axis=0 + ) + edges_down = cp.stack((vertices[:-1].ravel(), vertices[1:].ravel()), axis=0) edges = cp.concatenate((edges_deep, edges_right, edges_down), axis=1) return edges @@ -65,13 +66,23 @@ def _compute_weights_3d(data, spacing, beta, eps, multichannel): # Weight calculation is main difference in multispectral version # Original gradient**2 replaced with sum of gradients ** 2 gradients = cp.concatenate( - [cp.diff(data[..., 0], axis=ax).ravel() / spacing[ax] - for ax in [2, 1, 0] if data.shape[ax] > 1], axis=0) + [ + cp.diff(data[..., 0], axis=ax).ravel() / spacing[ax] + for ax in [2, 1, 0] + if data.shape[ax] > 1 + ], + axis=0, + ) gradients *= gradients for channel in range(1, data.shape[-1]): grad = cp.concatenate( - [cp.diff(data[..., channel], axis=ax).ravel() / spacing[ax] - for ax in [2, 1, 0] if data.shape[ax] > 1], axis=0) + [ + cp.diff(data[..., channel], axis=ax).ravel() / spacing[ax] + for ax in [2, 1, 0] + if data.shape[ax] > 1 + ], + axis=0, + ) grad *= grad gradients += grad @@ -89,16 +100,19 @@ def _compute_weights_3d(data, spacing, beta, eps, multichannel): def _build_laplacian(data, spacing, mask, beta, multichannel): l_x, l_y, l_z = data.shape[:3] edges = _make_graph_edges_3d(l_x, l_y, l_z) - weights = _compute_weights_3d(data, spacing, beta=beta, eps=1.e-10, - multichannel=multichannel) + weights = _compute_weights_3d( + data, spacing, beta=beta, eps=1.0e-10, multichannel=multichannel + ) # assert weights.dtype == utils._supported_float_type(data.dtype) if mask is not None: # Remove edges of the graph connected to masked nodes, as well # as corresponding weights of the edges. - mask0 = cp.concatenate([mask[..., :-1].ravel(), mask[:, :-1].ravel(), - mask[:-1].ravel()]) - mask1 = cp.concatenate([mask[..., 1:].ravel(), mask[:, 1:].ravel(), - mask[1:].ravel()]) + mask0 = cp.concatenate( + [mask[..., :-1].ravel(), mask[:, :-1].ravel(), mask[:-1].ravel()] + ) + mask1 = cp.concatenate( + [mask[..., 1:].ravel(), mask[:, 1:].ravel(), mask[1:].ravel()] + ) ind_mask = cp.logical_and(mask0, mask1) edges, weights = edges[:, ind_mask], weights[ind_mask] @@ -111,16 +125,18 @@ def _build_laplacian(data, spacing, mask, beta, multichannel): i_indices = edges.ravel() j_indices = edges[::-1].ravel() data = cp.concatenate((weights, weights)) - lap = sparse.coo_matrix((data, (i_indices, j_indices)), - shape=(pixel_nb, pixel_nb)) + lap = sparse.coo_matrix( + (data, (i_indices, j_indices)), shape=(pixel_nb, pixel_nb) + ) # need CSR instead of COO for indexing used later in _build_linear_system lap = lap.tocsr() lap.setdiag(-cp.ravel(lap.sum(axis=0))) return lap -def _build_linear_system(data, spacing, labels, nlabels, mask, - beta, multichannel): +def _build_linear_system( + data, spacing, labels, nlabels, mask, beta, multichannel +): """ Build the matrix A and rhs B of the linear system to solve. A and B are two block of the laplacian of the image graph. @@ -135,8 +151,9 @@ def _build_linear_system(data, spacing, labels, nlabels, mask, unlabeled_indices = indices[~seeds_mask] seeds_indices = indices[seeds_mask] - lap_sparse = _build_laplacian(data, spacing, mask=mask, - beta=beta, multichannel=multichannel) + lap_sparse = _build_laplacian( + data, spacing, mask=mask, beta=beta, multichannel=multichannel + ) rows = lap_sparse[unlabeled_indices, :] lap_sparse = rows[:, unlabeled_indices] @@ -146,8 +163,9 @@ def _build_linear_system(data, spacing, labels, nlabels, mask, # CuPy Backend: sparse matrices are only implemented for floating point # dtypes, so have to convert bool->float32 here seeds_mask = sparse.csc_matrix( - cp.stack([(seeds == lab) for lab in range(1, nlabels + 1)], - axis=-1).astype(np.float32) + cp.stack( + [(seeds == lab) for lab in range(1, nlabels + 1)], axis=-1 + ).astype(np.float32) ) rhs = B.dot(seeds_mask) @@ -155,18 +173,17 @@ def _build_linear_system(data, spacing, labels, nlabels, mask, def _solve_linear_system(lap_sparse, B, tol, mode): - if mode is None: - mode = 'cg_j' + mode = "cg_j" - if mode == 'cg_mg' and not amg_loaded: + if mode == "cg_mg" and not amg_loaded: utils.warn( '"cg_mg" not available. The "cg_j" mode will be used instead.', - stacklevel=2 + stacklevel=2, ) - mode = 'cg_j' + mode = "cg_j" - if mode == 'bf': + if mode == "bf": # toarray will give a C contiguous output as desired B = B.T.toarray() X = cp.zeros_like(B) @@ -174,9 +191,9 @@ def _solve_linear_system(lap_sparse, B, tol, mode): X[n, :] = spsolve(lap_sparse, b) else: maxiter = None - if mode == 'cg': + if mode == "cg": M = None - elif mode == 'cg_j': + elif mode == "cg_j": M = sparse.diags(1.0 / lap_sparse.diagonal()) else: raise NotImplementedError("cg_mg not implemented") @@ -187,12 +204,13 @@ def _solve_linear_system(lap_sparse, B, tol, mode): # maxiter = 30 cg_out = [ cg(lap_sparse, B[:, i].toarray(), tol=tol, M=M, maxiter=maxiter) - for i in range(B.shape[1])] + for i in range(B.shape[1]) + ] if any([info > 0 for _, info in cg_out]): utils.warn( "Conjugate gradient convergence to tolerance not achieved. " "Consider decreasing beta to improve system conditionning.", - stacklevel=2 + stacklevel=2, ) X = cp.stack([x for x, _ in cg_out], axis=0) @@ -200,17 +218,18 @@ def _solve_linear_system(lap_sparse, B, tol, mode): def _preprocess(labels): - label_values, inv_idx = cp.unique(labels, return_inverse=True) if label_values.max() <= 0: - raise ValueError('No seeds provided in label image: please ensure ' - 'it contains at least one positive value') + raise ValueError( + "No seeds provided in label image: please ensure " + "it contains at least one positive value" + ) if not (label_values == 0).any(): utils.warn( - 'Random walker only segments unlabeled areas, where labels == 0. ' - 'No zero valued areas in labels were found. Returning provided ' - 'labels.', - stacklevel=2 + "Random walker only segments unlabeled areas, where labels == 0. " + "No zero valued areas in labels were found. Returning provided " + "labels.", + stacklevel=2, ) return labels, None, None, None, None @@ -232,14 +251,15 @@ def _preprocess(labels): if label_values[0] < 0 or cp.any(isolated): # synchronize! isolated = cp.logical_and( cp.logical_not(ndi.binary_propagation(pos_mask, mask=mask)), - null_mask) + null_mask, + ) labels[isolated] = -1 if cp.all(isolated[null_mask]): utils.warn( - 'All unlabeled pixels are isolated, they could not be ' - 'determined by the random walker algorithm.', - stacklevel=2 + "All unlabeled pixels are isolated, they could not be " + "determined by the random walker algorithm.", + stacklevel=2, ) return labels, None, None, None, None @@ -253,7 +273,7 @@ def _preprocess(labels): zero_idx = cp.searchsorted(label_values, cp.array(0)) labels = cp.atleast_3d(inv_idx.reshape(labels.shape) - zero_idx) - nlabels = label_values[zero_idx + 1:].shape[0] + nlabels = label_values[zero_idx + 1 :].shape[0] inds_isolated_seeds = cp.nonzero(isolated) isolated_values = labels[inds_isolated_seeds] @@ -262,9 +282,19 @@ def _preprocess(labels): @utils.channel_as_last_axis(multichannel_output=False) -def random_walker(data, labels, beta=130, mode='cg_j', tol=1.e-3, copy=True, - return_full_prob=False, spacing=None, *, prob_tol=1e-3, - channel_axis=None): +def random_walker( + data, + labels, + beta=130, + mode="cg_j", + tol=1.0e-3, + copy=True, + return_full_prob=False, + spacing=None, + *, + prob_tol=1e-3, + channel_axis=None, +): """Random walker algorithm for segmentation from markers. Random walker algorithm is implemented for gray-level or multichannel @@ -415,12 +445,13 @@ def random_walker(data, labels, beta=130, mode='cg_j', tol=1.e-3, copy=True, """ # Parse input data - if mode not in ('cg_mg', 'cg', 'bf', 'cg_j', None): + if mode not in ("cg_mg", "cg", "bf", "cg_j", None): raise ValueError( "{mode} is not a valid mode. Valid modes are 'cg_mg'," - " 'cg', 'cg_j', 'bf' and None".format(mode=mode)) + " 'cg', 'cg_j', 'bf' and None".format(mode=mode) + ) - if data.dtype.kind == 'f': + if data.dtype.kind == "f": float_dtype = cp.promote_types(data.dtype, cp.float32) else: float_dtype = cp.float64 @@ -431,11 +462,13 @@ def random_walker(data, labels, beta=130, mode='cg_j', tol=1.e-3, copy=True, elif len(spacing) == labels.ndim: if len(spacing) == 2: # Need a dummy spacing for singleton 3rd dim - spacing = cp.r_[spacing, 1.] + spacing = cp.r_[spacing, 1.0] spacing = cp.asarray(spacing, dtype=float_dtype) else: - raise ValueError('Input argument `spacing` incorrect, should be an ' - 'iterable with one number per spatial dimension.') + raise ValueError( + "Input argument `spacing` incorrect, should be an " + "iterable with one number per spatial dimension." + ) # This algorithm expects 4-D arrays of floats, where the first three # dimensions are spatial and the final denotes channels. 2-D images have @@ -446,17 +479,20 @@ def random_walker(data, labels, beta=130, mode='cg_j', tol=1.e-3, copy=True, multichannel = channel_axis is not None if not multichannel: if data.ndim not in (2, 3): - raise ValueError('For non-multichannel input, data must be of ' - 'dimension 2 or 3.') + raise ValueError( + "For non-multichannel input, data must be of " + "dimension 2 or 3." + ) if data.shape != labels.shape: - raise ValueError('Incompatible data and labels shapes.') + raise ValueError("Incompatible data and labels shapes.") data = cp.atleast_3d(img_as_float(data))[..., cp.newaxis] else: if data.ndim not in (3, 4): - raise ValueError('For multichannel input, data must have 3 or 4 ' - 'dimensions.') + raise ValueError( + "For multichannel input, data must have 3 or 4 " "dimensions." + ) if data.shape[:-1] != labels.shape: - raise ValueError('Incompatible data and labels shapes.') + raise ValueError("Incompatible data and labels shapes.") data = img_as_float(data) if data.ndim == 3: # 2D multispectral, needs singleton in 3rd axis data = data[:, :, cp.newaxis, :] @@ -467,8 +503,9 @@ def random_walker(data, labels, beta=130, mode='cg_j', tol=1.e-3, copy=True, if copy: labels = cp.copy(labels) - (labels, nlabels, mask, - inds_isolated_seeds, isolated_values) = _preprocess(labels) + (labels, nlabels, mask, inds_isolated_seeds, isolated_values) = _preprocess( + labels + ) if isolated_values is None: # No non isolated zero valued areas in labels were @@ -477,14 +514,15 @@ def random_walker(data, labels, beta=130, mode='cg_j', tol=1.e-3, copy=True, # Return the concatenation of the masks of each unique label unique_labels = cp.unique(labels) labels = cp.atleast_3d(labels) - return cp.concatenate([labels == lab - for lab in unique_labels if lab > 0], - axis=-1) + return cp.concatenate( + [labels == lab for lab in unique_labels if lab > 0], axis=-1 + ) return labels # Build the linear system (lap_sparse, B) - lap_sparse, B = _build_linear_system(data, spacing, labels, nlabels, mask, - beta, multichannel) + lap_sparse, B = _build_linear_system( + data, spacing, labels, nlabels, mask, beta, multichannel + ) # Solve the linear system lap_sparse X = B # where X[i, j] is the probability that a marker of label i arrives @@ -493,8 +531,8 @@ def random_walker(data, labels, beta=130, mode='cg_j', tol=1.e-3, copy=True, if X.min() < -prob_tol or X.max() > 1 + prob_tol: utils.warn( - 'The probability range is outside [0, 1] given the tolerance ' - '`prob_tol`. Consider decreasing `beta` and/or decreasing `tol`.' + "The probability range is outside [0, 1] given the tolerance " + "`prob_tol`. Consider decreasing `beta` and/or decreasing `tol`." ) # Build the output according to return_full_prob value diff --git a/python/cucim/src/cucim/skimage/segmentation/tests/test_boundaries.py b/python/cucim/src/cucim/skimage/segmentation/tests/test_boundaries.py index 261ef7382..cad5f5346 100644 --- a/python/cucim/src/cucim/skimage/segmentation/tests/test_boundaries.py +++ b/python/cucim/src/cucim/skimage/segmentation/tests/test_boundaries.py @@ -45,7 +45,7 @@ def test_find_boundaries_bool(): @pytest.mark.parametrize( - 'dtype', [cp.uint8, cp.float16, cp.float32, cp.float64] + "dtype", [cp.uint8, cp.float16, cp.float32, cp.float64] ) def test_mark_boundaries(dtype): image = cp.zeros((10, 10), dtype=dtype) @@ -64,7 +64,7 @@ def test_mark_boundaries(dtype): [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]) # fmt: on - marked = mark_boundaries(image, label_image, color=white, mode='thick') + marked = mark_boundaries(image, label_image, color=white, mode="thick") assert marked.dtype == _supported_float_type(dtype) result = cp.mean(marked, axis=-1) assert_array_equal(result, ref) @@ -80,8 +80,9 @@ def test_mark_boundaries(dtype): [0, 2, 2, 2, 2, 2, 2, 2, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]) # fmt: on - marked = mark_boundaries(image, label_image, color=white, - outline_color=(2, 2, 2), mode='thick') + marked = mark_boundaries( + image, label_image, color=white, outline_color=(2, 2, 2), mode="thick" + ) result = cp.mean(marked, axis=-1) assert_array_equal(result, ref) @@ -102,12 +103,12 @@ def test_mark_boundaries_bool(): [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]) # fmt: on - marked = mark_boundaries(image, label_image, color=white, mode='thick') + marked = mark_boundaries(image, label_image, color=white, mode="thick") result = cp.mean(marked, axis=-1) assert_array_equal(result, ref) -@pytest.mark.parametrize('dtype', [cp.float16, cp.float32, cp.float64]) +@pytest.mark.parametrize("dtype", [cp.float16, cp.float32, cp.float64]) def test_mark_boundaries_subpixel(dtype): # fmt: off labels = cp.array([[0, 0, 0, 0], @@ -121,7 +122,7 @@ def test_mark_boundaries_subpixel(dtype): # Note: use np.round until cp.around is fixed upstream image = cp.asarray(np.round(np.random.rand(*labels.shape), 2)) image = image.astype(dtype, copy=False) - marked = mark_boundaries(image, labels, color=white, mode='subpixel') + marked = mark_boundaries(image, labels, color=white, mode="subpixel") assert marked.dtype == _supported_float_type(dtype) marked_proj = cp.asarray(cp.around(cp.mean(marked, axis=-1), 2)) diff --git a/python/cucim/src/cucim/skimage/segmentation/tests/test_chan_vese.py b/python/cucim/src/cucim/skimage/segmentation/tests/test_chan_vese.py index cf092d900..d14da55b6 100644 --- a/python/cucim/src/cucim/skimage/segmentation/tests/test_chan_vese.py +++ b/python/cucim/src/cucim/skimage/segmentation/tests/test_chan_vese.py @@ -7,7 +7,7 @@ from cucim.skimage.segmentation import chan_vese -@pytest.mark.parametrize('dtype', [cp.float32, cp.float64]) +@pytest.mark.parametrize("dtype", [cp.float32, cp.float64]) def test_chan_vese_flat_level_set(dtype): # because the algorithm evolves the level set around the # zero-level, it the level-set has no zero level, the algorithm @@ -39,7 +39,7 @@ def test_chan_vese_simple_shape(): @pytest.mark.parametrize( - 'dtype', [cp.uint8, cp.float16, cp.float32, cp.float64] + "dtype", [cp.uint8, cp.float16, cp.float32, cp.float64] ) def test_chan_vese_extended_output(dtype): img = cp.zeros((10, 10), dtype=dtype) @@ -53,15 +53,20 @@ def test_chan_vese_extended_output(dtype): def test_chan_vese_remove_noise(): ref = cp.zeros((10, 10)) - ref[1:6, 1:6] = cp.array([[0, 1, 1, 1, 0], - [1, 1, 1, 1, 1], - [1, 1, 1, 1, 1], - [1, 1, 1, 1, 1], - [0, 1, 1, 1, 0]]) + ref[1:6, 1:6] = cp.array( + [ + [0, 1, 1, 1, 0], + [1, 1, 1, 1, 1], + [1, 1, 1, 1, 1], + [1, 1, 1, 1, 1], + [0, 1, 1, 1, 0], + ] + ) img = ref.copy() img[8, 3] = 1 - result = chan_vese(img, mu=0.3, tol=1e-3, max_num_iter=100, dt=10, - init_level_set="disk").astype(float) + result = chan_vese( + img, mu=0.3, tol=1e-3, max_num_iter=100, dt=10, init_level_set="disk" + ).astype(float) assert_array_equal(result, ref) @@ -77,8 +82,9 @@ def test_chan_vese_gap_closing(): ref[8:15, :] = cp.ones((7, 20)) img = ref.copy() img[:, 6] = cp.zeros(20) - result = chan_vese(img, mu=0.7, tol=1e-3, max_num_iter=1000, dt=1000, - init_level_set="disk").astype(float) + result = chan_vese( + img, mu=0.7, tol=1e-3, max_num_iter=1000, dt=1000, init_level_set="disk" + ).astype(float) assert_array_equal(result, ref) diff --git a/python/cucim/src/cucim/skimage/segmentation/tests/test_clear_border.py b/python/cucim/src/cucim/skimage/segmentation/tests/test_clear_border.py index 8c21d9e19..aa70b185e 100644 --- a/python/cucim/src/cucim/skimage/segmentation/tests/test_clear_border.py +++ b/python/cucim/src/cucim/skimage/segmentation/tests/test_clear_border.py @@ -5,6 +5,7 @@ def test_clear_border(): + # fmt: off image = cp.array( [[0, 0, 0, 0, 0, 0, 0, 1, 0], [1, 1, 0, 0, 1, 0, 0, 1, 0], @@ -13,6 +14,7 @@ def test_clear_border(): [0, 1, 1, 1, 1, 1, 1, 1, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0]] ) + # fmt: on # test default case result = clear_border(image.copy()) @@ -30,12 +32,16 @@ def test_clear_border(): assert_array_equal(result, cp.full_like(image, 2)) # test mask - mask = cp.array([[0, 0, 1, 1, 1, 1, 1, 1, 1], - [0, 0, 1, 1, 1, 1, 1, 1, 1], - [1, 1, 1, 1, 1, 1, 1, 1, 1], - [1, 1, 1, 1, 1, 1, 1, 1, 1], - [1, 1, 1, 1, 1, 1, 1, 1, 1], - [1, 1, 1, 1, 1, 1, 1, 1, 1]]).astype(bool) + mask = cp.array( + [ + [0, 0, 1, 1, 1, 1, 1, 1, 1], + [0, 0, 1, 1, 1, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 1, 1, 1, 1], + ] + ).astype(bool) result = clear_border(image.copy(), mask=mask) ref = image.copy() ref[1:3, 0:2] = 0 @@ -43,6 +49,7 @@ def test_clear_border(): def test_clear_border_3d(): + # fmt: off image = cp.array( [[[0, 0, 0, 0], [0, 0, 0, 0], @@ -57,6 +64,7 @@ def test_clear_border_3d(): [0, 0, 0, 0], [0, 0, 0, 0]]] ) + # fmt: on # test default case result = clear_border(image.copy()) ref = image.copy() @@ -78,6 +86,7 @@ def test_clear_border_3d(): def test_clear_border_non_binary(): + # fmt: off image = cp.array([[1, 2, 3, 1, 2], [3, 3, 5, 4, 2], [3, 4, 5, 4, 2], @@ -88,12 +97,13 @@ def test_clear_border_non_binary(): [0, 0, 5, 4, 0], [0, 4, 5, 4, 0], [0, 0, 0, 0, 0]]) - + # fmt: on assert_array_equal(result, expected) assert not cp.all(image == result) def test_clear_border_non_binary_3d(): + # fmt: off image3d = cp.array( [[[1, 2, 3, 1, 2], [3, 3, 3, 4, 2], @@ -124,12 +134,14 @@ def test_clear_border_non_binary_3d(): [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]]] ) + # fmt: on assert_array_equal(result, expected) assert not cp.all(image3d == result) def test_clear_border_non_binary_inplace(): + # fmt: off image = cp.array([[1, 2, 3, 1, 2], [3, 3, 5, 4, 2], [3, 4, 5, 4, 2], @@ -139,12 +151,13 @@ def test_clear_border_non_binary_inplace(): [0, 0, 5, 4, 0], [0, 4, 5, 4, 0], [0, 0, 0, 0, 0]]) - + # fmt: on assert_array_equal(result, expected) assert_array_equal(image, result) def test_clear_border_non_binary_inplace_3d(): + # fmt: off image3d = cp.array( [[[1, 2, 3, 1, 2], [3, 3, 3, 4, 2], @@ -175,12 +188,13 @@ def test_clear_border_non_binary_inplace_3d(): [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]]] ) - + # fmt: on assert_array_equal(result, expected) assert_array_equal(image3d, result) def test_clear_border_non_binary_out(): + # fmt: off image = cp.array([[1, 2, 3, 1, 2], [3, 3, 5, 4, 2], [3, 4, 5, 4, 2], @@ -191,12 +205,13 @@ def test_clear_border_non_binary_out(): [0, 0, 5, 4, 0], [0, 4, 5, 4, 0], [0, 0, 0, 0, 0]]) - + # fmt: on assert_array_equal(result, expected) assert_array_equal(out, result) def test_clear_border_non_binary_out_3d(): + # fmt: off image3d = cp.array( [[[1, 2, 3, 1, 2], [3, 3, 3, 4, 2], @@ -228,6 +243,7 @@ def test_clear_border_non_binary_out_3d(): [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]]] ) + # fmt: on assert_array_equal(result, expected) assert_array_equal(out, result) diff --git a/python/cucim/src/cucim/skimage/segmentation/tests/test_expand_labels.py b/python/cucim/src/cucim/skimage/segmentation/tests/test_expand_labels.py index 2fe1df30c..d839ecd88 100644 --- a/python/cucim/src/cucim/skimage/segmentation/tests/test_expand_labels.py +++ b/python/cucim/src/cucim/skimage/segmentation/tests/test_expand_labels.py @@ -103,27 +103,24 @@ [0, 0, 0, 0], [0, 0, 0, 0], ], - [ [0, 0, 0, 0], [0, 3, 3, 0], [0, 0, 0, 0], [0, 0, 0, 0], ], - [ [0, 0, 0, 0], [0, 3, 0, 0], [0, 0, 0, 0], [0, 0, 5, 0], ], - [ [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 5, 0], - ] + ], ] ) @@ -135,33 +132,28 @@ [3, 3, 3, 3], [0, 3, 5, 0], ], - [ [3, 3, 3, 3], [3, 3, 3, 3], [3, 3, 3, 3], [0, 5, 5, 5], ], - [ [3, 3, 3, 3], [3, 3, 3, 3], [3, 3, 5, 5], [5, 5, 5, 5], ], - [ [3, 3, 3, 0], [3, 3, 3, 0], [3, 3, 5, 5], [5, 5, 5, 5], - ] + ], ] ) -SAMPLE_EDGECASE_BEHAVIOUR = cp.array( - [[0, 1, 0, 0], [2, 0, 0, 0], [0, 3, 0, 0]] -) +SAMPLE_EDGECASE_BEHAVIOUR = cp.array([[0, 1, 0, 0], [2, 0, 0, 0], [0, 3, 0, 0]]) @pytest.mark.parametrize( @@ -172,8 +164,8 @@ (SAMPLE2D, SAMPLE2D_EXPANDED_1_5, 1.5), (EDGECASE1D, EDGECASE1D_EXPANDED_3, 3), (EDGECASE2D, EDGECASE2D_EXPANDED_4, 4), - (SAMPLE3D, SAMPLE3D_EXPANDED_2, 2) - ] + (SAMPLE3D, SAMPLE3D_EXPANDED_2, 2), + ], ) def test_expand_labels(input_array, expected_output, expand_distance): if input_array.ndim == 1: @@ -184,8 +176,8 @@ def test_expand_labels(input_array, expected_output, expand_distance): assert_array_equal(expanded, expected_output) -@pytest.mark.parametrize('ndim', [2, 3]) -@pytest.mark.parametrize('distance', range(6)) +@pytest.mark.parametrize("ndim", [2, 3]) +@pytest.mark.parametrize("distance", range(6)) def test_binary_blobs(ndim, distance): """Check some invariants with label expansion. @@ -211,7 +203,7 @@ def test_binary_blobs(ndim, distance): def test_edge_case_behaviour(): - """ Check edge case behavior to detect upstream changes + """Check edge case behavior to detect upstream changes For edge cases where a pixel has the same distance to several regions, lexicographical order seems to determine which region gets to expand diff --git a/python/cucim/src/cucim/skimage/segmentation/tests/test_join.py b/python/cucim/src/cucim/skimage/segmentation/tests/test_join.py index e68df7b8e..03e8a12c6 100644 --- a/python/cucim/src/cucim/skimage/segmentation/tests/test_join.py +++ b/python/cucim/src/cucim/skimage/segmentation/tests/test_join.py @@ -113,8 +113,9 @@ def test_relabel_sequential_signed_overflow(): imax = cp.iinfo(cp.int32).max labels = cp.array([0, 1, 99, 42, 42], dtype=cp.int32) output, fw, inv = relabel_sequential(labels, offset=imax) - reference = cp.array([0, imax, imax + 2, imax + 1, imax + 1], - dtype=cp.uint32) + reference = cp.array( + [0, imax, imax + 2, imax + 1, imax + 1], dtype=cp.uint32 + ) assert_array_equal(output, reference) assert output.dtype == reference.dtype @@ -126,12 +127,23 @@ def test_very_large_labels(): assert int(cp.max(output)) == imax + 2 -@pytest.mark.parametrize('dtype', (np.byte, np.short, np.intc, int, - np.longlong, np.ubyte, np.ushort, - np.uintc, np.uint, np.ulonglong)) -@pytest.mark.parametrize('data_already_sequential', (False, True)) -def test_relabel_sequential_int_dtype_stability(data_already_sequential, - dtype): +@pytest.mark.parametrize( + "dtype", + ( + np.byte, + np.short, + np.intc, + int, + np.longlong, + np.ubyte, + np.ushort, + np.uintc, + np.uint, + np.ulonglong, + ), +) +@pytest.mark.parametrize("data_already_sequential", (False, True)) +def test_relabel_sequential_int_dtype_stability(data_already_sequential, dtype): if data_already_sequential: ar = cp.asarray([1, 3, 0, 2, 5, 4], dtype=dtype) else: @@ -156,10 +168,9 @@ def test_relabel_sequential_negative_values(): relabel_sequential(ar) -@pytest.mark.parametrize('offset', (0, -3)) -@pytest.mark.parametrize('data_already_sequential', (False, True)) -def test_relabel_sequential_nonpositive_offset(data_already_sequential, - offset): +@pytest.mark.parametrize("offset", (0, -3)) +@pytest.mark.parametrize("data_already_sequential", (False, True)) +def test_relabel_sequential_nonpositive_offset(data_already_sequential, offset): if data_already_sequential: ar = cp.array([1, 3, 0, 2, 5, 4]) else: @@ -168,11 +179,12 @@ def test_relabel_sequential_nonpositive_offset(data_already_sequential, relabel_sequential(ar, offset=offset) -@pytest.mark.parametrize('offset', (1, 5)) -@pytest.mark.parametrize('with0', (False, True)) -@pytest.mark.parametrize('input_starts_at_offset', (False, True)) -def test_relabel_sequential_already_sequential(offset, with0, - input_starts_at_offset): +@pytest.mark.parametrize("offset", (1, 5)) +@pytest.mark.parametrize("with0", (False, True)) +@pytest.mark.parametrize("input_starts_at_offset", (False, True)) +def test_relabel_sequential_already_sequential( + offset, with0, input_starts_at_offset +): if with0: ar = cp.array([1, 3, 0, 2, 5, 4]) else: diff --git a/python/cucim/src/cucim/skimage/segmentation/tests/test_morphsnakes.py b/python/cucim/src/cucim/skimage/segmentation/tests/test_morphsnakes.py index 8067ac0f1..fa64820aa 100644 --- a/python/cucim/src/cucim/skimage/segmentation/tests/test_morphsnakes.py +++ b/python/cucim/src/cucim/skimage/segmentation/tests/test_morphsnakes.py @@ -3,15 +3,17 @@ from cupy.testing import assert_array_equal from cucim.skimage._shared.testing import expected_warnings -from cucim.skimage.segmentation import (disk_level_set, - inverse_gaussian_gradient, - morphological_chan_vese, - morphological_geodesic_active_contour) +from cucim.skimage.segmentation import ( + disk_level_set, + inverse_gaussian_gradient, + morphological_chan_vese, + morphological_geodesic_active_contour, +) def gaussian_blob(): coords = cp.mgrid[-5:6, -5:6] - sqrdistances = (coords ** 2).sum(0) + sqrdistances = (coords**2).sum(0) return cp.exp(-sqrdistances / 10) @@ -22,8 +24,9 @@ def test_morphsnakes_incorrect_image_shape(): with pytest.raises(ValueError): morphological_chan_vese(img, num_iter=1, init_level_set=ls) with pytest.raises(ValueError): - morphological_geodesic_active_contour(img, num_iter=1, - init_level_set=ls) + morphological_geodesic_active_contour( + img, num_iter=1, init_level_set=ls + ) def test_morphsnakes_incorrect_ndim(): @@ -33,8 +36,9 @@ def test_morphsnakes_incorrect_ndim(): with pytest.raises(ValueError): morphological_chan_vese(img, num_iter=1, init_level_set=ls) with pytest.raises(ValueError): - morphological_geodesic_active_contour(img, num_iter=1, - init_level_set=ls) + morphological_geodesic_active_contour( + img, num_iter=1, init_level_set=ls + ) def test_morphsnakes_black(): @@ -47,15 +51,15 @@ def test_morphsnakes_black(): acwe_ls = morphological_chan_vese(img, num_iter=6, init_level_set=ls) assert_array_equal(acwe_ls, ref_zeros) - gac_ls = morphological_geodesic_active_contour(img, num_iter=6, - init_level_set=ls) + gac_ls = morphological_geodesic_active_contour( + img, num_iter=6, init_level_set=ls + ) assert_array_equal(gac_ls, ref_zeros) - gac_ls2 = morphological_geodesic_active_contour(img, num_iter=6, - init_level_set=ls, - balloon=1, threshold=-1, - smoothing=0) + gac_ls2 = morphological_geodesic_active_contour( + img, num_iter=6, init_level_set=ls, balloon=1, threshold=-1, smoothing=0 + ) assert_array_equal(gac_ls2, ref_ones) @@ -73,8 +77,9 @@ def test_morphsnakes_iterations_kwarg_deprecation(): assert_array_equal(acwe_ls, ref_zeros) with expected_warnings(["`iterations` is a deprecated argument"]): - gac_ls = morphological_geodesic_active_contour(img, iterations=6, - init_level_set=ls) + gac_ls = morphological_geodesic_active_contour( + img, iterations=6, init_level_set=ls + ) assert_array_equal(gac_ls, ref_zeros) @@ -110,9 +115,9 @@ def test_morphsnakes_simple_shape_geodesic_active_contour(): [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=cp.int8) # fmt: on - gac_ls = morphological_geodesic_active_contour(gimg, num_iter=10, - init_level_set=ls, - balloon=-1) + gac_ls = morphological_geodesic_active_contour( + gimg, num_iter=10, init_level_set=ls, balloon=-1 + ) assert_array_equal(gac_ls, ref) assert gac_ls.dtype == cp.int8 @@ -120,7 +125,7 @@ def test_morphsnakes_simple_shape_geodesic_active_contour(): def test_init_level_sets(): image = cp.zeros((6, 6)) - checkerboard_ls = morphological_chan_vese(image, 0, 'checkerboard') + checkerboard_ls = morphological_chan_vese(image, 0, "checkerboard") # fmt: off checkerboard_ref = cp.array([[0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 0, 1], @@ -150,8 +155,7 @@ def test_morphsnakes_3d(): def callback(x): evolution.append(x.sum()) - ls = morphological_chan_vese(image, 5, 'disk', - iter_callback=callback) + ls = morphological_chan_vese(image, 5, "disk", iter_callback=callback) # Check that the initial disk level set is correct assert evolution[0] == 81 diff --git a/python/cucim/src/cucim/skimage/segmentation/tests/test_random_walker.py b/python/cucim/src/cucim/skimage/segmentation/tests/test_random_walker.py index f0860182a..efcae02c5 100644 --- a/python/cucim/src/cucim/skimage/segmentation/tests/test_random_walker.py +++ b/python/cucim/src/cucim/skimage/segmentation/tests/test_random_walker.py @@ -24,12 +24,15 @@ def make_2d_syntheticdata(lx, ly=None): np.random.seed(1234) data = np.zeros((lx, ly)) + 0.1 * np.random.randn(lx, ly) small_l = int(lx // 5) - data[lx // 2 - small_l:lx // 2 + small_l, - ly // 2 - small_l:ly // 2 + small_l] = 1 - data[lx // 2 - small_l + 1:lx // 2 + small_l - 1, - ly // 2 - small_l + 1:ly // 2 + small_l - 1] = ( - 0.1 * np.random.randn(2 * small_l - 2, 2 * small_l - 2)) - data[lx // 2 - small_l, ly // 2 - small_l // 8:ly // 2 + small_l // 8] = 0 + data[ + lx // 2 - small_l : lx // 2 + small_l, + ly // 2 - small_l : ly // 2 + small_l, + ] = 1 + data[ + lx // 2 - small_l + 1 : lx // 2 + small_l - 1, + ly // 2 - small_l + 1 : ly // 2 + small_l - 1, + ] = 0.1 * np.random.randn(2 * small_l - 2, 2 * small_l - 2) + data[lx // 2 - small_l, ly // 2 - small_l // 8 : ly // 2 + small_l // 8] = 0 seeds = np.zeros_like(data) seeds[lx // 5, ly // 5] = 1 seeds[lx // 2 + small_l // 4, ly // 2 - small_l // 4] = 2 @@ -44,26 +47,32 @@ def make_3d_syntheticdata(lx, ly=None, lz=None): np.random.seed(1234) data = np.zeros((lx, ly, lz)) + 0.1 * np.random.randn(lx, ly, lz) small_l = int(lx // 5) - data[lx // 2 - small_l:lx // 2 + small_l, - ly // 2 - small_l:ly // 2 + small_l, - lz // 2 - small_l:lz // 2 + small_l] = 1 - data[lx // 2 - small_l + 1:lx // 2 + small_l - 1, - ly // 2 - small_l + 1:ly // 2 + small_l - 1, - lz // 2 - small_l + 1:lz // 2 + small_l - 1] = 0 + data[ + lx // 2 - small_l : lx // 2 + small_l, + ly // 2 - small_l : ly // 2 + small_l, + lz // 2 - small_l : lz // 2 + small_l, + ] = 1 + data[ + lx // 2 - small_l + 1 : lx // 2 + small_l - 1, + ly // 2 - small_l + 1 : ly // 2 + small_l - 1, + lz // 2 - small_l + 1 : lz // 2 + small_l - 1, + ] = 0 # make a hole hole_size = np.max([1, small_l // 8]) - data[lx // 2 - small_l, - ly // 2 - hole_size:ly // 2 + hole_size, - lz // 2 - hole_size:lz // 2 + hole_size] = 0 + data[ + lx // 2 - small_l, + ly // 2 - hole_size : ly // 2 + hole_size, + lz // 2 - hole_size : lz // 2 + hole_size, + ] = 0 seeds = np.zeros_like(data) seeds[lx // 5, ly // 5, lz // 5] = 1 - seeds[lx // 2 + small_l // 4, - ly // 2 - small_l // 4, - lz // 2 - small_l // 4] = 2 + seeds[ + lx // 2 + small_l // 4, ly // 2 - small_l // 4, lz // 2 - small_l // 4 + ] = 2 return cp.array(data), cp.array(seeds) -@testing.parametrize('dtype', [cp.float16, cp.float32, cp.float64]) +@testing.parametrize("dtype", [cp.float16, cp.float32, cp.float64]) def test_2d_bf(dtype): lx = 70 ly = 100 @@ -72,71 +81,75 @@ def test_2d_bf(dtype): beta = 90 if dtype == np.float64 else 25 data, labels = make_2d_syntheticdata(lx, ly) - labels_bf = random_walker(data, labels, beta=beta, mode='bf') + labels_bf = random_walker(data, labels, beta=beta, mode="bf") assert (labels_bf[25:45, 40:60] == 2).all() assert data.shape == labels.shape - full_prob_bf = random_walker(data, labels, beta=beta, mode='bf', - return_full_prob=True) - assert (full_prob_bf[1, 25:45, 40:60] >= - full_prob_bf[0, 25:45, 40:60]).all() + full_prob_bf = random_walker( + data, labels, beta=beta, mode="bf", return_full_prob=True + ) + assert ( + full_prob_bf[1, 25:45, 40:60] >= full_prob_bf[0, 25:45, 40:60] + ).all() assert data.shape == labels.shape # Now test with more than two labels labels[55, 80] = 3 - full_prob_bf = random_walker(data, labels, beta=beta, mode='bf', - return_full_prob=True) - assert (full_prob_bf[1, 25:45, 40:60] >= - full_prob_bf[0, 25:45, 40:60]).all() + full_prob_bf = random_walker( + data, labels, beta=beta, mode="bf", return_full_prob=True + ) + assert ( + full_prob_bf[1, 25:45, 40:60] >= full_prob_bf[0, 25:45, 40:60] + ).all() assert len(full_prob_bf) == 3 assert data.shape == labels.shape -@testing.parametrize('dtype', [cp.float16, cp.float32, cp.float64]) +@testing.parametrize("dtype", [cp.float16, cp.float32, cp.float64]) def test_2d_cg(dtype): lx = 70 ly = 100 data, labels = make_2d_syntheticdata(lx, ly) data = data.astype(dtype, copy=False) - labels_cg = random_walker(data, labels, beta=90, mode='cg') + labels_cg = random_walker(data, labels, beta=90, mode="cg") assert (labels_cg[25:45, 40:60] == 2).all() assert data.shape == labels.shape - full_prob = random_walker(data, labels, beta=90, mode='cg', - return_full_prob=True) - assert (full_prob[1, 25:45, 40:60] >= - full_prob[0, 25:45, 40:60]).all() + full_prob = random_walker( + data, labels, beta=90, mode="cg", return_full_prob=True + ) + assert (full_prob[1, 25:45, 40:60] >= full_prob[0, 25:45, 40:60]).all() assert data.shape == labels.shape -@testing.parametrize('dtype', [cp.float16, cp.float32, cp.float64]) +@testing.parametrize("dtype", [cp.float16, cp.float32, cp.float64]) def test_2d_cg_mg(dtype): lx = 70 ly = 100 data, labels = make_2d_syntheticdata(lx, ly) data = data.astype(dtype, copy=False) with expected_warnings(['"cg_mg" not available', cupy_warning]): - labels_cg_mg = random_walker(data, labels, beta=90, mode='cg_mg') + labels_cg_mg = random_walker(data, labels, beta=90, mode="cg_mg") assert (labels_cg_mg[25:45, 40:60] == 2).all() assert data.shape == labels.shape with expected_warnings(['"cg_mg" not available', cupy_warning]): - full_prob = random_walker(data, labels, beta=90, mode='cg_mg', - return_full_prob=True) - assert (full_prob[1, 25:45, 40:60] >= - full_prob[0, 25:45, 40:60]).all() + full_prob = random_walker( + data, labels, beta=90, mode="cg_mg", return_full_prob=True + ) + assert (full_prob[1, 25:45, 40:60] >= full_prob[0, 25:45, 40:60]).all() assert data.shape == labels.shape -@testing.parametrize('dtype', [cp.float16, cp.float32, cp.float64]) +@testing.parametrize("dtype", [cp.float16, cp.float32, cp.float64]) def test_2d_cg_j(dtype): lx = 70 ly = 100 data, labels = make_2d_syntheticdata(lx, ly) data = data.astype(dtype, copy=False) - labels_cg = random_walker(data, labels, beta=90, mode='cg_j') + labels_cg = random_walker(data, labels, beta=90, mode="cg_j") assert (labels_cg[25:45, 40:60] == 2).all() assert data.shape == labels.shape - full_prob = random_walker(data, labels, beta=90, mode='cg_j', - return_full_prob=True) - assert (full_prob[1, 25:45, 40:60] - >= full_prob[0, 25:45, 40:60]).all() + full_prob = random_walker( + data, labels, beta=90, mode="cg_j", return_full_prob=True + ) + assert (full_prob[1, 25:45, 40:60] >= full_prob[0, 25:45, 40:60]).all() assert data.shape == labels.shape @@ -147,7 +160,7 @@ def test_types(): data = 255 * (data - data.min()) // (data.max() - data.min()) data = data.astype(cp.uint8) with expected_warnings(['"cg_mg" not available', cupy_warning]): - labels_cg_mg = random_walker(data, labels, beta=90, mode='cg_mg') + labels_cg_mg = random_walker(data, labels, beta=90, mode="cg_mg") assert (labels_cg_mg[25:45, 40:60] == 2).all() assert data.shape == labels.shape @@ -157,7 +170,7 @@ def test_reorder_labels(): ly = 100 data, labels = make_2d_syntheticdata(lx, ly) labels[labels == 2] = 4 - labels_bf = random_walker(data, labels, beta=90, mode='bf') + labels_bf = random_walker(data, labels, beta=90, mode="bf") assert (labels_bf[25:45, 40:60] == 2).all() assert data.shape == labels.shape @@ -167,7 +180,7 @@ def test_reorder_labels_cg(): ly = 100 data, labels = make_2d_syntheticdata(lx, ly) labels[labels == 2] = 4 - labels_bf = random_walker(data, labels, beta=90, mode='cg') + labels_bf = random_walker(data, labels, beta=90, mode="cg") assert (labels_bf[25:45, 40:60] == 2).all() assert data.shape == labels.shape @@ -187,26 +200,22 @@ def test_2d_laplacian_size(): # test case from: https://github.com/scikit-image/scikit-image/issues/5034 # The markers here were modified from the ones in the original issue to # avoid a singular matrix, but still reproduce the issue. - data = cp.asarray([[12823, 12787, 12710], - [12883, 13425, 12067], - [11934, 11929, 12309]]) - markers = cp.asarray([[0, -1, 2], - [0, -1, 0], - [1, 0, -1]]) - expected_labels = cp.asarray([[1, -1, 2], - [1, -1, 2], - [1, 1, -1]]) + data = cp.asarray( + [[12823, 12787, 12710], [12883, 13425, 12067], [11934, 11929, 12309]] + ) + markers = cp.asarray([[0, -1, 2], [0, -1, 0], [1, 0, -1]]) + expected_labels = cp.asarray([[1, -1, 2], [1, -1, 2], [1, 1, -1]]) labels = random_walker(data, markers, beta=10) cp.testing.assert_array_equal(labels, expected_labels) -@testing.parametrize('dtype', [cp.float32, cp.float64]) +@testing.parametrize("dtype", [cp.float32, cp.float64]) def test_3d(dtype): n = 30 lx, ly, lz = n, n, n data, labels = make_3d_syntheticdata(lx, ly, lz) data = data.astype(dtype, copy=False) - labels = random_walker(data, labels, mode='cg') + labels = random_walker(data, labels, mode="cg") assert (labels.reshape(data.shape)[13:17, 13:17, 13:17] == 2).all() assert data.shape == labels.shape @@ -216,40 +225,41 @@ def test_3d_inactive(): lx, ly, lz = n, n, n data, labels = make_3d_syntheticdata(lx, ly, lz) labels[5:25, 26:29, 26:29] = -1 - labels = random_walker(data, labels, mode='cg') + labels = random_walker(data, labels, mode="cg") assert (labels.reshape(data.shape)[13:17, 13:17, 13:17] == 2).all() assert data.shape == labels.shape -@testing.parametrize('channel_axis', [0, 1, -1]) -@testing.parametrize('dtype', [cp.float32, cp.float64]) +@testing.parametrize("channel_axis", [0, 1, -1]) +@testing.parametrize("dtype", [cp.float32, cp.float64]) def test_multispectral_2d(dtype, channel_axis): lx, ly = 70, 100 data, labels = make_2d_syntheticdata(lx, ly) data = data.astype(dtype, copy=False) data = data[..., cp.newaxis].repeat(2, axis=-1) # Expect identical output data = cp.moveaxis(data, -1, channel_axis) - with expected_warnings(['The probability range is outside', cupy_warning]): - multi_labels = random_walker(data, labels, mode='cg', - channel_axis=channel_axis) + with expected_warnings(["The probability range is outside", cupy_warning]): + multi_labels = random_walker( + data, labels, mode="cg", channel_axis=channel_axis + ) data = cp.moveaxis(data, channel_axis, -1) assert data[..., 0].shape == labels.shape - random_walker(data[..., 0], labels, mode='cg') + random_walker(data[..., 0], labels, mode="cg") assert (multi_labels.reshape(labels.shape)[25:45, 40:60] == 2).all() assert data[..., 0].shape == labels.shape -@testing.parametrize('dtype', [cp.float32, cp.float64]) +@testing.parametrize("dtype", [cp.float32, cp.float64]) def test_multispectral_3d(dtype): n = 30 lx, ly, lz = n, n, n data, labels = make_3d_syntheticdata(lx, ly, lz) data = data.astype(dtype, copy=False) data = data[..., cp.newaxis].repeat(2, axis=-1) # Expect identical output - multi_labels = random_walker(data, labels, mode='cg', channel_axis=-1) + multi_labels = random_walker(data, labels, mode="cg", channel_axis=-1) assert data[..., 0].shape == labels.shape - single_labels = random_walker(data[..., 0], labels, mode='cg') + single_labels = random_walker(data[..., 0], labels, mode="cg") assert (multi_labels.reshape(labels.shape)[13:17, 13:17, 13:17] == 2).all() assert (single_labels.reshape(labels.shape)[13:17, 13:17, 13:17] == 2).all() assert data[..., 0].shape == labels.shape @@ -263,21 +273,22 @@ def test_spacing_0(): # Rescale `data` along Z axis data_aniso = cp.zeros((n, n, n // 2)) for i, yz in enumerate(data): - data_aniso[i, :, :] = resize(yz, (n, n // 2), - mode='constant', - anti_aliasing=False) + data_aniso[i, :, :] = resize( + yz, (n, n // 2), mode="constant", anti_aliasing=False + ) # Generate new labels small_l = int(lx // 5) labels_aniso = cp.zeros_like(data_aniso) labels_aniso[lx // 5, ly // 5, lz // 5] = 1 - labels_aniso[lx // 2 + small_l // 4, - ly // 2 - small_l // 4, - lz // 4 - small_l // 8] = 2 + labels_aniso[ + lx // 2 + small_l // 4, ly // 2 - small_l // 4, lz // 4 - small_l // 8 + ] = 2 # Test with `spacing` kwarg - labels_aniso = random_walker(data_aniso, labels_aniso, mode='cg', - spacing=cp.array((1.0, 1.0, 0.5))) + labels_aniso = random_walker( + data_aniso, labels_aniso, mode="cg", spacing=cp.array((1.0, 1.0, 0.5)) + ) assert (labels_aniso[13:17, 13:17, 7:9] == 2).all() @@ -291,44 +302,45 @@ def test_spacing_1(): # `resize` is not yet 3D capable, so this must be done by looping in 2D. data_aniso = cp.zeros((n, n * 2, n)) for i, yz in enumerate(data): - data_aniso[i, :, :] = resize(yz, (n * 2, n), - mode='constant', - anti_aliasing=False) + data_aniso[i, :, :] = resize( + yz, (n * 2, n), mode="constant", anti_aliasing=False + ) # Generate new labels small_l = int(lx // 5) labels_aniso = cp.zeros_like(data_aniso) labels_aniso[lx // 5, ly // 5, lz // 5] = 1 - labels_aniso[lx // 2 + small_l // 4, - ly - small_l // 2, - lz // 2 - small_l // 4] = 2 + labels_aniso[ + lx // 2 + small_l // 4, ly - small_l // 2, lz // 2 - small_l // 4 + ] = 2 # Test with `spacing` kwarg # First, anisotropic along Y - labels_aniso = random_walker(data_aniso, labels_aniso, mode='cg', - spacing=cp.array((1., 2., 1.))) + labels_aniso = random_walker( + data_aniso, labels_aniso, mode="cg", spacing=cp.array((1.0, 2.0, 1.0)) + ) assert (labels_aniso[13:17, 26:34, 13:17] == 2).all() # Rescale `data` along X axis # `resize` is not yet 3D capable, so this must be done by looping in 2D. data_aniso = cp.zeros((n, n * 2, n)) for i in range(data.shape[1]): - data_aniso[i, :, :] = resize(data[:, 1, :], (n * 2, n), - mode='constant', - anti_aliasing=False) + data_aniso[i, :, :] = resize( + data[:, 1, :], (n * 2, n), mode="constant", anti_aliasing=False + ) # Generate new labels small_l = int(lx // 5) labels_aniso2 = cp.zeros_like(data_aniso) labels_aniso2[lx // 5, ly // 5, lz // 5] = 1 - labels_aniso2[lx - small_l // 2, - ly // 2 + small_l // 4, - lz // 2 - small_l // 4] = 2 + labels_aniso2[ + lx - small_l // 2, ly // 2 + small_l // 4, lz // 2 - small_l // 4 + ] = 2 # Anisotropic along X - labels_aniso2 = random_walker(data_aniso, - labels_aniso2, - mode='cg', spacing=cp.array((2., 1., 1.))) + labels_aniso2 = random_walker( + data_aniso, labels_aniso2, mode="cg", spacing=cp.array((2.0, 1.0, 1.0)) + ) assert (labels_aniso2[26:34, 13:17, 13:17] == 2).all() @@ -344,8 +356,9 @@ def test_trivial_cases(): # When all voxels are labeled AND return_full_prob is True labels[:, :5] = 3 - expected = cp.concatenate(((labels == 1)[..., cp.newaxis], - (labels == 3)[..., cp.newaxis]), axis=2) + expected = cp.concatenate( + ((labels == 1)[..., cp.newaxis], (labels == 3)[..., cp.newaxis]), axis=2 + ) with expected_warnings(["Returning provided labels", cupy_warning]): test = random_walker(img, labels, return_full_prob=True) cp.testing.assert_array_equal(test, expected) @@ -353,8 +366,9 @@ def test_trivial_cases(): # Unlabeled voxels not connected to seed, so nothing can be done img = cp.full((10, 10), False) object_A = np.array([(6, 7), (6, 8), (7, 7), (7, 8)]) - object_B = np.array([(3, 1), (4, 1), (2, 2), (3, 2), (4, 2), (2, 3), - (3, 3)]) + object_B = np.array( + [(3, 1), (4, 1), (2, 2), (3, 2), (4, 2), (2, 3), (3, 3)] + ) for x, y in np.vstack((object_A, object_B)): img[y][x] = True @@ -380,7 +394,7 @@ def test_length2_spacing(): labels = cp.zeros((10, 10), dtype=cp.uint8) labels[2, 4] = 1 labels[6, 8] = 4 - random_walker(img, labels, spacing=cp.array((1., 2.))) + random_walker(img, labels, spacing=cp.array((1.0, 2.0))) def test_bad_inputs(): @@ -395,7 +409,7 @@ def test_bad_inputs(): # Too many dimensions np.random.seed(42) img = cp.array(np.random.normal(size=(3, 3, 3, 3, 3))) - labels = cp.arange(3 ** 5).reshape(img.shape) + labels = cp.arange(3**5).reshape(img.shape) with testing.raises(ValueError): random_walker(img, labels) with testing.raises(ValueError): @@ -413,13 +427,13 @@ def test_bad_inputs(): img = cp.array(np.random.normal(size=(10, 10))) labels = cp.zeros((10, 10)) with testing.raises(ValueError): - random_walker(img, labels, mode='bad') + random_walker(img, labels, mode="bad") def test_isolated_seeds(): np.random.seed(0) a = cp.array(np.random.random((7, 7))) - mask = - np.ones(a.shape) + mask = -np.ones(a.shape) # This pixel is an isolated seed mask[1, 1] = 1 # Unlabeled pixels @@ -430,10 +444,10 @@ def test_isolated_seeds(): mask = cp.array(mask) # Test that no error is raised, and that labels of isolated seeds are OK - with expected_warnings(['The probability range is outside', cupy_warning]): + with expected_warnings(["The probability range is outside", cupy_warning]): res = random_walker(a, mask) assert res[1, 1] == 1 - with expected_warnings(['The probability range is outside', cupy_warning]): + with expected_warnings(["The probability range is outside", cupy_warning]): res = random_walker(a, mask, return_full_prob=True) assert res[0, 1, 1] == 1 assert res[1, 1, 1] == 0 @@ -442,7 +456,7 @@ def test_isolated_seeds(): def test_isolated_area(): np.random.seed(0) a = cp.array(np.random.random((7, 7))) - mask = - np.ones(a.shape) + mask = -np.ones(a.shape) # This pixel is an isolated seed mask[1, 1] = 0 # Unlabeled pixels @@ -453,10 +467,10 @@ def test_isolated_area(): mask = cp.array(mask) # Test that no error is raised, and that labels of isolated seeds are OK - with expected_warnings(['The probability range is outside', cupy_warning]): + with expected_warnings(["The probability range is outside", cupy_warning]): res = random_walker(a, mask) assert res[1, 1] == 0 - with expected_warnings(['The probability range is outside', cupy_warning]): + with expected_warnings(["The probability range is outside", cupy_warning]): res = random_walker(a, mask, return_full_prob=True) assert res[0, 1, 1] == 0 assert res[1, 1, 1] == 0 @@ -465,7 +479,7 @@ def test_isolated_area(): def test_prob_tol(): np.random.seed(0) a = cp.array(np.random.random((7, 7))) - mask = - np.ones(a.shape) + mask = -np.ones(a.shape) # This pixel is an isolated seed mask[1, 1] = 1 # Unlabeled pixels @@ -475,7 +489,7 @@ def test_prob_tol(): mask[6, 6] = 1 mask = cp.array(mask) - with expected_warnings(['The probability range is outside', cupy_warning]): + with expected_warnings(["The probability range is outside", cupy_warning]): res = random_walker(a, mask, return_full_prob=True) # Lower beta, no warning is expected. diff --git a/python/cucim/src/cucim/skimage/transform/__init__.py b/python/cucim/src/cucim/skimage/transform/__init__.py index 106d92592..81fe7c6e8 100644 --- a/python/cucim/src/cucim/skimage/transform/__init__.py +++ b/python/cucim/src/cucim/skimage/transform/__init__.py @@ -1,13 +1,33 @@ -from ._geometric import (AffineTransform, EssentialMatrixTransform, - EuclideanTransform, FundamentalMatrixTransform, - PiecewiseAffineTransform, PolynomialTransform, - ProjectiveTransform, SimilarityTransform, - estimate_transform, matrix_transform) -from ._warps import (downscale_local_mean, rescale, resize, resize_local_mean, - rotate, swirl, warp, warp_coords, warp_polar) +from ._geometric import ( + AffineTransform, + EssentialMatrixTransform, + EuclideanTransform, + FundamentalMatrixTransform, + PiecewiseAffineTransform, + PolynomialTransform, + ProjectiveTransform, + SimilarityTransform, + estimate_transform, + matrix_transform, +) +from ._warps import ( + downscale_local_mean, + rescale, + resize, + resize_local_mean, + rotate, + swirl, + warp, + warp_coords, + warp_polar, +) from .integral import integral_image, integrate -from .pyramids import (pyramid_expand, pyramid_gaussian, pyramid_laplacian, - pyramid_reduce) +from .pyramids import ( + pyramid_expand, + pyramid_gaussian, + pyramid_laplacian, + pyramid_reduce, +) __all__ = [ "integral_image", diff --git a/python/cucim/src/cucim/skimage/transform/_geometric.py b/python/cucim/src/cucim/skimage/transform/_geometric.py index 4da5b11f4..9b5644bd7 100644 --- a/python/cucim/src/cucim/skimage/transform/_geometric.py +++ b/python/cucim/src/cucim/skimage/transform/_geometric.py @@ -19,8 +19,9 @@ def _affine_matrix_from_vector(v): d = (1 + math.sqrt(1 + 4 * nparam)) / 2 - 1 dimensionality = round(d) # round to prevent approx errors if d != dimensionality: - raise ValueError('Invalid number of elements for ' - f'linearized matrix: {nparam}') + raise ValueError( + "Invalid number of elements for " f"linearized matrix: {nparam}" + ) matrix = np.eye(dimensionality + 1) matrix[:-1, :] = np.reshape(v, (dimensionality, dimensionality + 1)) return matrix @@ -78,9 +79,11 @@ def _center_and_normalize_points(points): # small value; ie, we don't need to worry about numerical stability here, # only actual 0. if rms == 0: - return (xp.full((d + 1, d + 1), xp.nan), - xp.full_like(points, xp.nan), - True) + return ( + xp.full((d + 1, d + 1), xp.nan), + xp.full_like(points, xp.nan), + True, + ) norm_factor = math.sqrt(d) / rms @@ -181,9 +184,8 @@ def _umeyama(src, dst, estimate_scale): class GeometricTransform: - """Base class for geometric transformations. + """Base class for geometric transformations.""" - """ def __call__(self, coords): """Apply forward transformation. @@ -239,9 +241,7 @@ def residuals(self, src, dst): return xp.sqrt(xp.sum((self(src) - dst) ** 2, axis=1)) def __add__(self, other): - """Combine this transformation with another. - - """ + """Combine this transformation with another.""" raise NotImplementedError() @@ -679,14 +679,14 @@ def _apply_mat(self, coords, matrix): dst[:, ndim] = tmp # rescale to homogeneous coordinates - dst[:, :ndim] /= dst[:, ndim:ndim + 1] + dst[:, :ndim] /= dst[:, ndim : ndim + 1] return dst[:, :ndim] def __array__(self, dtype=None): """Return the transform parameters as an array. - Note, __array__ is not currently supported by CuPy + Note, __array__ is not currently supported by CuPy """ if dtype is None: return self.params @@ -802,12 +802,12 @@ def estimate(self, src, dst, weights=None): A = xp.zeros((n * d, (d + 1) ** 2)) for ddim in range(d): A[ - ddim * n:(ddim + 1) * n, ddim * (d + 1):ddim * (d + 1) + d + ddim * n : (ddim + 1) * n, ddim * (d + 1) : ddim * (d + 1) + d ] = src - A[ddim * n:(ddim + 1) * n, ddim * (d + 1) + d] = 1 - A[ddim * n:(ddim + 1) * n, -d - 1:-1] = src - A[ddim * n:(ddim + 1) * n, -1] = -1 - A[ddim * n:(ddim + 1) * n, -d - 1:] *= -dst[:, ddim:(ddim + 1)] + A[ddim * n : (ddim + 1) * n, ddim * (d + 1) + d] = 1 + A[ddim * n : (ddim + 1) * n, -d - 1 : -1] = src + A[ddim * n : (ddim + 1) * n, -1] = -1 + A[ddim * n : (ddim + 1) * n, -d - 1 :] *= -dst[:, ddim : (ddim + 1)] # Select relevant columns, depending on params A = A[:, list(self._coeffs) + [-1]] @@ -878,8 +878,8 @@ def __add__(self, other): def __nice__(self): """common 'paramstr' used by __str__ and __repr__""" - npstring = np.array2string(cp.asnumpy(self.params), separator=', ') - paramstr = 'matrix=\n' + textwrap.indent(npstring, ' ') + npstring = np.array2string(cp.asnumpy(self.params), separator=", ") + paramstr = "matrix=\n" + textwrap.indent(npstring, " ") return paramstr def __repr__(self): @@ -887,14 +887,14 @@ def __repr__(self): paramstr = self.__nice__() classname = self.__class__.__name__ classstr = classname - return '<{}({}) at {}>'.format(classstr, paramstr, hex(id(self))) + return "<{}({}) at {}>".format(classstr, paramstr, hex(id(self))) def __str__(self): """Add standard str formatting around a __nice__ string""" paramstr = self.__nice__() classname = self.__class__.__name__ classstr = classname - return '<{}({})>'.format(classstr, paramstr) + return "<{}({})>".format(classstr, paramstr) @property def dimensionality(self): @@ -1010,17 +1010,29 @@ class AffineTransform(ProjectiveTransform): https://en.wikipedia.org/wiki/Shear_mapping """ - def __init__(self, matrix=None, scale=None, rotation=None, shear=None, - translation=None, *, dimensionality=2, xp=cp): - params = any(param is not None - for param in (scale, rotation, shear, translation)) + def __init__( + self, + matrix=None, + scale=None, + rotation=None, + shear=None, + translation=None, + *, + dimensionality=2, + xp=cp, + ): + params = any( + param is not None for param in (scale, rotation, shear, translation) + ) # these parameters get overwritten if a higher-D matrix is given self._coeffs = range(dimensionality * (dimensionality + 1)) if params and matrix is not None: - raise ValueError("You cannot specify the transformation matrix and" - " the implicit parameters at the same time.") + raise ValueError( + "You cannot specify the transformation matrix and" + " the implicit parameters at the same time." + ) if params and dimensionality > 2: raise ValueError("Parameter input is only supported in 2D.") @@ -1082,7 +1094,7 @@ def scale(self): else: ss = xp.sum(self.params * self.params, axis=0) ss[1] = ss[1] / (math.tan(self.shear) ** 2 + 1) - return xp.sqrt(ss)[:self.dimensionality] + return xp.sqrt(ss)[: self.dimensionality] @property def rotation(self): @@ -1103,7 +1115,7 @@ def shear(self): @property def translation(self): - return self.params[0:self.dimensionality, self.dimensionality] + return self.params[0 : self.dimensionality, self.dimensionality] # CuPy Backend: TODO: PiecewiseAffineTransform is inefficient currently @@ -1294,8 +1306,7 @@ def _euler_rotation(axis, angle): i = axis s = (-1) ** i * _sin(angle) c = _cos(angle) - R2 = np.array([[c, -s], # noqa - [s, c]]) # noqa + R2 = np.array([[c, -s], [s, c]]) # noqa # noqa Ri = np.eye(3) # We need the axes other than the rotation axis, in the right order: # 0 -> (1, 2); 1 -> (0, 2); 2 -> (0, 1). @@ -1379,13 +1390,22 @@ class EuclideanTransform(ProjectiveTransform): .. [1] https://en.wikipedia.org/wiki/Rotation_matrix#In_three_dimensions """ - def __init__(self, matrix=None, rotation=None, translation=None, *, - dimensionality=2, xp=cp,): + def __init__( + self, + matrix=None, + rotation=None, + translation=None, + *, + dimensionality=2, + xp=cp, + ): params_given = rotation is not None or translation is not None if params_given and matrix is not None: - raise ValueError("You cannot specify the transformation matrix and" - " the implicit parameters at the same time.") + raise ValueError( + "You cannot specify the transformation matrix and" + " the implicit parameters at the same time." + ) elif matrix is not None: if matrix.shape[0] != matrix.shape[1]: raise ValueError("Invalid shape of transformation matrix.") @@ -1469,12 +1489,12 @@ def rotation(self): return self.params[:3, :3] else: raise NotImplementedError( - 'Rotation only implemented for 2D and 3D transforms.' + "Rotation only implemented for 2D and 3D transforms." ) @property def translation(self): - return self.params[0:self.dimensionality, self.dimensionality] + return self.params[0 : self.dimensionality, self.dimensionality] class SimilarityTransform(EuclideanTransform): @@ -1518,15 +1538,26 @@ class SimilarityTransform(EuclideanTransform): """ - def __init__(self, matrix=None, scale=None, rotation=None, - translation=None, *, dimensionality=2, xp=cp): + def __init__( + self, + matrix=None, + scale=None, + rotation=None, + translation=None, + *, + dimensionality=2, + xp=cp, + ): self.params = None - params = any(param is not None - for param in (scale, rotation, translation)) + params = any( + param is not None for param in (scale, rotation, translation) + ) if params and matrix is not None: - raise ValueError("You cannot specify the transformation matrix and" - " the implicit parameters at the same time.") + raise ValueError( + "You cannot specify the transformation matrix and" + " the implicit parameters at the same time." + ) elif matrix is not None: if matrix.ndim != 2 or matrix.shape[0] != matrix.shape[1]: raise ValueError("Invalid shape of transformation matrix.") @@ -1535,7 +1566,7 @@ def __init__(self, matrix=None, scale=None, rotation=None, dimensionality = matrix.shape[0] - 1 if params: if dimensionality not in (2, 3): - raise ValueError('Parameters only supported for 2D and 3D.') + raise ValueError("Parameters only supported for 2D and 3D.") matrix = xp.eye(dimensionality + 1, dtype=float) if scale is None: scale = 1 @@ -1593,7 +1624,8 @@ def scale(self): return math.pow(np.linalg.det(cp.asnumpy(self.params)), 1 / 3) else: raise NotImplementedError( - 'Scale is only implemented for 2D and 3D.') + "Scale is only implemented for 2D and 3D." + ) class PolynomialTransform(GeometricTransform): @@ -1703,8 +1735,8 @@ def estimate(self, src, dst, order=2, weights=None): pidx = 0 for j in range(order + 1): for i in range(j + 1): - A[:rows, pidx] = xs ** (j - i) * ys ** i - A[rows:, pidx + u // 2] = xs ** (j - i) * ys ** i + A[:rows, pidx] = xs ** (j - i) * ys**i + A[rows:, pidx + u // 2] = xs ** (j - i) * ys**i pidx += 1 A[:rows, -1] = xd @@ -1751,29 +1783,30 @@ def __call__(self, coords): pidx = 0 for j in range(order + 1): for i in range(j + 1): - dst[:, 0] += self.params[0, pidx] * x ** (j - i) * y ** i - dst[:, 1] += self.params[1, pidx] * x ** (j - i) * y ** i + dst[:, 0] += self.params[0, pidx] * x ** (j - i) * y**i + dst[:, 1] += self.params[1, pidx] * x ** (j - i) * y**i pidx += 1 return dst def inverse(self, coords): raise Exception( - 'There is no explicit way to do the inverse polynomial ' - 'transformation. Instead, estimate the inverse transformation ' - 'parameters by exchanging source and destination coordinates,' - 'then apply the forward transformation.') + "There is no explicit way to do the inverse polynomial " + "transformation. Instead, estimate the inverse transformation " + "parameters by exchanging source and destination coordinates," + "then apply the forward transformation." + ) TRANSFORMS = { - 'euclidean': EuclideanTransform, - 'similarity': SimilarityTransform, - 'affine': AffineTransform, - 'piecewise-affine': PiecewiseAffineTransform, - 'projective': ProjectiveTransform, - 'fundamental': FundamentalMatrixTransform, - 'essential': EssentialMatrixTransform, - 'polynomial': PolynomialTransform, + "euclidean": EuclideanTransform, + "similarity": SimilarityTransform, + "affine": AffineTransform, + "piecewise-affine": PiecewiseAffineTransform, + "projective": ProjectiveTransform, + "fundamental": FundamentalMatrixTransform, + "essential": EssentialMatrixTransform, + "polynomial": PolynomialTransform, } @@ -1842,8 +1875,9 @@ def estimate_transform(ttype, src, dst, *args, **kwargs): """ ttype = ttype.lower() if ttype not in TRANSFORMS: - raise ValueError('the transformation type \'%s\' is not' - 'implemented' % ttype) + raise ValueError( + "the transformation type '%s' is not" "implemented" % ttype + ) tform = TRANSFORMS[ttype](dimensionality=src.shape[1]) tform.estimate(src, dst, *args, **kwargs) diff --git a/python/cucim/src/cucim/skimage/transform/_warps.py b/python/cucim/src/cucim/skimage/transform/_warps.py index 343f1f0f7..6ba5a85a6 100644 --- a/python/cucim/src/cucim/skimage/transform/_warps.py +++ b/python/cucim/src/cucim/skimage/transform/_warps.py @@ -5,13 +5,21 @@ import cucim.skimage._vendored.ndimage as ndi -from .._shared.utils import (_to_ndimage_mode, _validate_interpolation_order, - channel_as_last_axis, convert_to_float, - safe_as_int, warn) +from .._shared.utils import ( + _to_ndimage_mode, + _validate_interpolation_order, + channel_as_last_axis, + convert_to_float, + safe_as_int, + warn, +) from .._vendored import pad from ..measure import block_reduce -from ._geometric import (AffineTransform, ProjectiveTransform, - SimilarityTransform) +from ._geometric import ( + AffineTransform, + ProjectiveTransform, + SimilarityTransform, +) HOMOGRAPHY_TRANSFORMS = ( SimilarityTransform, @@ -55,20 +63,31 @@ def _preprocess_resize_output_shape(image, output_shape): input_shape = image.shape if output_ndim > image.ndim: # append dimensions to input_shape - input_shape += (1, ) * (output_ndim - image.ndim) + input_shape += (1,) * (output_ndim - image.ndim) image = cp.reshape(image, input_shape) elif output_ndim == image.ndim - 1: # multichannel case: append shape of last axis - output_shape = output_shape + (image.shape[-1], ) + output_shape = output_shape + (image.shape[-1],) elif output_ndim < image.ndim: - raise ValueError("output_shape length cannot be smaller than the " - "image number of dimensions") + raise ValueError( + "output_shape length cannot be smaller than the " + "image number of dimensions" + ) return image, output_shape -def resize(image, output_shape, order=None, mode='reflect', cval=0, clip=None, - preserve_range=False, anti_aliasing=None, anti_aliasing_sigma=None): +def resize( + image, + output_shape, + order=None, + mode="reflect", + cval=0, + clip=None, + preserve_range=False, + anti_aliasing=None, + anti_aliasing_sigma=None, +): """Resize image to match a certain size. Performs interpolation to up-size or down-size N-dimensional images. Note @@ -151,9 +170,10 @@ def resize(image, output_shape, order=None, mode='reflect', cval=0, clip=None, if anti_aliasing is None: anti_aliasing = ( - not input_type == bool and - not (cp.issubdtype(input_type, cp.integer) and order == 0) and - any(x < y for x, y in zip(output_shape, input_shape))) + not input_type == bool + and not (cp.issubdtype(input_type, cp.integer) and order == 0) + and any(x < y for x, y in zip(output_shape, input_shape)) + ) if input_type == bool and anti_aliasing: raise ValueError("anti_aliasing must be False for boolean images") @@ -177,34 +197,59 @@ def resize(image, output_shape, order=None, mode='reflect', cval=0, clip=None, elif len(anti_aliasing_sigma) != len(factors): raise ValueError("invalid anti_aliasing_sigma length") if any(sigma < 0 for sigma in anti_aliasing_sigma): - raise ValueError("Anti-aliasing standard deviation must be " - "greater than or equal to zero") - elif any(((sigma > 0) & (factor <= 1)) - for factor, sigma in zip(factors, anti_aliasing_sigma)): - warn("Anti-aliasing standard deviation greater than zero but " - "not down-sampling along all axes") + raise ValueError( + "Anti-aliasing standard deviation must be " + "greater than or equal to zero" + ) + elif any( + ((sigma > 0) & (factor <= 1)) + for factor, sigma in zip(factors, anti_aliasing_sigma) + ): + warn( + "Anti-aliasing standard deviation greater than zero but " + "not down-sampling along all axes" + ) # TODO: CuPy itself should do this grid-constant->constant conversion # make upstream PR for SciPy-compatible behavior - _ndi_mode = {'grid-constant': 'constant', 'grid-wrap':'wrap'}.get(ndi_mode, ndi_mode) # noqa + _ndi_mode = {"grid-constant": "constant", "grid-wrap": "wrap"}.get( + ndi_mode, ndi_mode + ) # noqa # keep ndi.gaussian_filter rather than cucim.skimage.filters.gaussian # to avoid undesired dtype coercion - filtered = ndi.gaussian_filter(image, anti_aliasing_sigma, cval=cval, - mode=_ndi_mode) + filtered = ndi.gaussian_filter( + image, anti_aliasing_sigma, cval=cval, mode=_ndi_mode + ) else: filtered = image zoom_factors = [1 / f for f in factors] - out = ndi.zoom(filtered, zoom_factors, order=order, mode=ndi_mode, - cval=cval, grid_mode=True) + out = ndi.zoom( + filtered, + zoom_factors, + order=order, + mode=ndi_mode, + cval=cval, + grid_mode=True, + ) _clip_warp_output(image, out, mode, cval, order, clip) return out @channel_as_last_axis() -def rescale(image, scale, order=None, mode='reflect', cval=0, clip=None, - preserve_range=False, anti_aliasing=None, anti_aliasing_sigma=None, - *, channel_axis=None): +def rescale( + image, + scale, + order=None, + mode="reflect", + cval=0, + clip=None, + preserve_range=False, + anti_aliasing=None, + anti_aliasing_sigma=None, + *, + channel_axis=None, +): """Scale image by a certain factor. Performs interpolation to up-scale or down-scale N-dimensional images. @@ -285,10 +330,12 @@ def rescale(image, scale, order=None, mode='reflect', cval=0, clip=None, scale = np.atleast_1d(scale) multichannel = channel_axis is not None if len(scale) > 1: - if ((not multichannel and len(scale) != image.ndim) or - (multichannel and len(scale) != image.ndim - 1)): - raise ValueError("Supply a single scale, or one value per spatial " - "axis") + if (not multichannel and len(scale) != image.ndim) or ( + multichannel and len(scale) != image.ndim - 1 + ): + raise ValueError( + "Supply a single scale, or one value per spatial " "axis" + ) if multichannel: scale = np.concatenate((scale, [1])) orig_shape = np.asarray(image.shape) @@ -296,21 +343,28 @@ def rescale(image, scale, order=None, mode='reflect', cval=0, clip=None, if multichannel: # don't scale channel dimension output_shape[-1] = orig_shape[-1] - return resize(image, output_shape, order=order, mode=mode, cval=cval, - clip=clip, preserve_range=preserve_range, - anti_aliasing=anti_aliasing, - anti_aliasing_sigma=anti_aliasing_sigma) + return resize( + image, + output_shape, + order=order, + mode=mode, + cval=cval, + clip=clip, + preserve_range=preserve_range, + anti_aliasing=anti_aliasing, + anti_aliasing_sigma=anti_aliasing_sigma, + ) -def _ndimage_affine(image, matrix, output_shape, order, mode, cval, clip, - preserve_range): +def _ndimage_affine( + image, matrix, output_shape, order, mode, cval, clip, preserve_range +): """Thin wrapper around scipy.ndimage.affine_transform Validates input and handles clipping of output in the same way as ``warp``. """ if image.size == 0: - raise ValueError("Cannot warp empty image with dimensions", - image.shape) + raise ValueError("Cannot warp empty image with dimensions", image.shape) order = _validate_interpolation_order(image.dtype, order) @@ -331,23 +385,29 @@ def _ndimage_affine(image, matrix, output_shape, order, mode, cval, clip, prefilter = order > 1 ndi_mode = _to_ndimage_mode(mode) - warped = ndi.affine_transform(image, matrix, prefilter=prefilter, - mode=ndi_mode, order=order, cval=cval, - output_shape=tuple(output_shape)) + warped = ndi.affine_transform( + image, + matrix, + prefilter=prefilter, + mode=ndi_mode, + order=order, + cval=cval, + output_shape=tuple(output_shape), + ) _clip_warp_output(image, warped, mode, cval, order, clip) return warped -def _ndimage_rotate(image, angle, resize, order, mode, cval, clip, - preserve_range): +def _ndimage_rotate( + image, angle, resize, order, mode, cval, clip, preserve_range +): """Thin wrapper around scipy.ndimage.rotate Validates input and handles clipping of output in the same way as ``warp``. """ if image.size == 0: - raise ValueError("Cannot warp empty image with dimensions", - image.shape) + raise ValueError("Cannot warp empty image with dimensions", image.shape) order = _validate_interpolation_order(image.dtype, order) @@ -361,14 +421,30 @@ def _ndimage_rotate(image, angle, resize, order, mode, cval, clip, prefilter = order > 1 ndi_mode = _to_ndimage_mode(mode) - warped = ndi.rotate(image, angle, reshape=resize, prefilter=prefilter, - mode=ndi_mode, order=order, cval=cval) + warped = ndi.rotate( + image, + angle, + reshape=resize, + prefilter=prefilter, + mode=ndi_mode, + order=order, + cval=cval, + ) _clip_warp_output(image, warped, mode, cval, order, clip) return warped -def rotate(image, angle, resize=False, center=None, order=None, - mode='constant', cval=0, clip=True, preserve_range=False): +def rotate( + image, + angle, + resize=False, + center=None, + order=None, + mode="constant", + cval=0, + clip=True, + preserve_range=False, +): """Rotate image by a certain angle around its center. Parameters @@ -442,7 +518,7 @@ def rotate(image, angle, resize=False, center=None, order=None, rows, cols = image.shape[0], image.shape[1] if image.dtype == cp.float16: image = image.astype(cp.float32) - img_center = np.array((cols, rows)) / 2. - 0.5 + img_center = np.array((cols, rows)) / 2.0 - 0.5 if center is None: center = img_center centered = True @@ -453,8 +529,14 @@ def rotate(image, angle, resize=False, center=None, order=None, if centered and not resize: # can use cupyx.scipy.ndimage.rotate return _ndimage_rotate( - image, angle, resize, order=order, mode=mode, cval=cval, clip=clip, - preserve_range=preserve_range + image, + angle, + resize, + order=order, + mode=mode, + cval=cval, + clip=clip, + preserve_range=preserve_range, ) # rotation around center @@ -509,8 +591,14 @@ def rotate(image, angle, resize=False, center=None, order=None, affine_params = cp.asarray(affine_params) return _ndimage_affine( - image, affine_params, output_shape=output_shape, order=order, - mode=mode, cval=cval, clip=clip, preserve_range=preserve_range + image, + affine_params, + output_shape=output_shape, + order=order, + mode=mode, + cval=cval, + clip=clip, + preserve_range=preserve_range, ) @@ -582,9 +670,19 @@ def _swirl_mapping(xy, center, rotation, strength, radius): return xy -def swirl(image, center=None, strength=1, radius=100, rotation=0, - output_shape=None, order=None, mode='reflect', cval=0, clip=None, - preserve_range=False): +def swirl( + image, + center=None, + strength=1, + radius=100, + rotation=0, + output_shape=None, + order=None, + mode="reflect", + cval=0, + clip=None, + preserve_range=False, +): """Perform a swirl transformation. Parameters @@ -636,14 +734,24 @@ def swirl(image, center=None, strength=1, radius=100, rotation=0, if center is None: center = np.array(image.shape)[:2][::-1] / 2 - warp_args = {'center': center, - 'rotation': rotation, - 'strength': strength, - 'radius': radius} - - return warp(image, _swirl_mapping, map_args=warp_args, - output_shape=output_shape, order=order, mode=mode, cval=cval, - clip=clip, preserve_range=preserve_range) + warp_args = { + "center": center, + "rotation": rotation, + "strength": strength, + "radius": radius, + } + + return warp( + image, + _swirl_mapping, + map_args=warp_args, + output_shape=output_shape, + order=order, + mode=mode, + cval=cval, + clip=clip, + preserve_range=preserve_range, + ) def _stackcopy(a, b): @@ -805,7 +913,7 @@ def _clip_warp_output(input_image, output_image, mode, cval, order, clip): # Check if cval has been used such that it expands the effective input # range preserve_cval = ( - mode == 'constant' + mode == "constant" and not min_val <= cval <= max_val and min_func(output_image) <= cval <= max_func(output_image) ) @@ -820,8 +928,17 @@ def _clip_warp_output(input_image, output_image, mode, cval, order, clip): cp.clip(output_image, min_val, max_val, out=output_image) -def warp(image, inverse_map, map_args=None, output_shape=None, order=None, - mode='constant', cval=0., clip=None, preserve_range=False): +def warp( + image, + inverse_map, + map_args=None, + output_shape=None, + order=None, + mode="constant", + cval=0.0, + clip=None, + preserve_range=False, +): """Warp an image according to a given coordinate transformation. Parameters @@ -966,8 +1083,7 @@ def warp(image, inverse_map, map_args=None, output_shape=None, order=None, map_args = {} if image.size == 0: - raise ValueError("Cannot warp empty image with dimensions", - image.shape) + raise ValueError("Cannot warp empty image with dimensions", image.shape) order = _validate_interpolation_order(image.dtype, order) if image.dtype.kind == "c": @@ -985,7 +1101,10 @@ def warp(image, inverse_map, map_args=None, output_shape=None, order=None, else: output_shape = safe_as_int(output_shape) - if isinstance(inverse_map, cp.ndarray) and inverse_map.shape == (3, 3,): + if isinstance(inverse_map, cp.ndarray) and inverse_map.shape == ( + 3, + 3, + ): # inverse_map is a transformation matrix as numpy array, # this is only used for order >= 4. inverse_map = ProjectiveTransform(matrix=inverse_map) @@ -999,9 +1118,11 @@ def warp(image, inverse_map, map_args=None, output_shape=None, order=None, # coordinates. This is only supported for 2(+1)-D images. if image.ndim < 2 or image.ndim > 3: - raise ValueError("Only 2-D images (grayscale or color) are " - "supported, when providing a callable " - "`inverse_map`.") + raise ValueError( + "Only 2-D images (grayscale or color) are " + "supported, when providing a callable " + "`inverse_map`." + ) def coord_map(*args): return inverse_map(*args, **map_args) @@ -1010,8 +1131,7 @@ def coord_map(*args): # Input image is 2D and has color channel, but output_shape is # given for 2-D images. Automatically add the color channel # dimensionality. - output_shape = (output_shape[0], output_shape[1], - input_shape[2]) + output_shape = (output_shape[0], output_shape[1], input_shape[2]) coords = warp_coords(coord_map, output_shape) @@ -1019,8 +1139,14 @@ def coord_map(*args): prefilter = order > 1 ndi_mode = _to_ndimage_mode(mode) - warped = ndi.map_coordinates(image, coords, prefilter=prefilter, - mode=ndi_mode, order=order, cval=cval) + warped = ndi.map_coordinates( + image, + coords, + prefilter=prefilter, + mode=ndi_mode, + order=order, + cval=cval, + ) _clip_warp_output(image, warped, mode, cval, order, clip) return warped @@ -1089,8 +1215,16 @@ def _log_polar_mapping(output_coords, k_angle, k_radius, center): @channel_as_last_axis() -def warp_polar(image, center=None, *, radius=None, output_shape=None, - scaling='linear', channel_axis=None, **kwargs): +def warp_polar( + image, + center=None, + *, + radius=None, + output_shape=None, + scaling="linear", + channel_axis=None, + **kwargs, +): """Remap image to polar or log-polar coordinates space. Parameters @@ -1150,19 +1284,23 @@ def warp_polar(image, center=None, *, radius=None, output_shape=None, """ multichannel = channel_axis is not None if image.ndim != 2 and not multichannel: - raise ValueError(f'Input array must be 2-dimensional when ' - f'`channel_axis=None`, got {image.ndim}') + raise ValueError( + f"Input array must be 2-dimensional when " + f"`channel_axis=None`, got {image.ndim}" + ) if image.ndim != 3 and multichannel: - raise ValueError(f'Input array must be 3-dimensional when ' - f'`channel_axis` is specified, got {image.ndim}') + raise ValueError( + f"Input array must be 3-dimensional when " + f"`channel_axis` is specified, got {image.ndim}" + ) if center is None: center = (np.array(image.shape)[:2] / 2) - 0.5 if radius is None: w, h = np.array(image.shape)[:2] / 2 - radius = np.sqrt(w ** 2 + h ** 2) + radius = np.sqrt(w**2 + h**2) if output_shape is None: height = 360 @@ -1173,20 +1311,21 @@ def warp_polar(image, center=None, *, radius=None, output_shape=None, height = output_shape[0] width = output_shape[1] - if scaling == 'linear': + if scaling == "linear": k_radius = width / radius map_func = _linear_polar_mapping - elif scaling == 'log': + elif scaling == "log": k_radius = width / math.log(radius) map_func = _log_polar_mapping else: raise ValueError("Scaling value must be in {'linear', 'log'}") k_angle = height / (2 * np.pi) - warp_args = {'k_angle': k_angle, 'k_radius': k_radius, 'center': center} + warp_args = {"k_angle": k_angle, "k_radius": k_radius, "center": center} - warped = warp(image, map_func, map_args=warp_args, - output_shape=output_shape, **kwargs) + warped = warp( + image, map_func, map_args=warp_args, output_shape=output_shape, **kwargs + ) return warped @@ -1217,18 +1356,25 @@ def _local_mean_weights(old_size, new_size, grid_mode, dtype): new_breaks = cp.linspace(0, old_size, num=new_size + 1, dtype=dtype) else: old, new = old_size - 1, new_size - 1 - old_breaks = pad(cp.linspace(0.5, old - 0.5, old, dtype=dtype), - 1, 'constant', constant_values=(0, old)) + old_breaks = pad( + cp.linspace(0.5, old - 0.5, old, dtype=dtype), + 1, + "constant", + constant_values=(0, old), + ) if new == 0: val = np.inf else: val = 0.5 * old / new - new_breaks = pad(cp.linspace(val, old - val, new, dtype=dtype), - 1, 'constant', constant_values=(0, old)) + new_breaks = pad( + cp.linspace(val, old - val, new, dtype=dtype), + 1, + "constant", + constant_values=(0, old), + ) upper = cp.minimum(new_breaks[1:, np.newaxis], old_breaks[np.newaxis, 1:]) - lower = cp.maximum(new_breaks[:-1, np.newaxis], - old_breaks[np.newaxis, :-1]) + lower = cp.maximum(new_breaks[:-1, np.newaxis], old_breaks[np.newaxis, :-1]) weights = cp.maximum(upper - lower, 0) weights /= weights.sum(axis=1, keepdims=True) @@ -1236,8 +1382,14 @@ def _local_mean_weights(old_size, new_size, grid_mode, dtype): return weights -def resize_local_mean(image, output_shape, grid_mode=True, - preserve_range=False, *, channel_axis=None): +def resize_local_mean( + image, + output_shape, + grid_mode=True, + preserve_range=False, + *, + channel_axis=None, +): """Resize an array with the local mean / bilinear scaling. Parameters @@ -1326,8 +1478,9 @@ def resize_local_mean(image, output_shape, grid_mode=True, # move channels to last position in output_shape channel_axis = channel_axis % image.ndim output_shape = ( - output_shape[:channel_axis] + output_shape[channel_axis:] + - (nc,) + output_shape[:channel_axis] + + output_shape[channel_axis:] + + (nc,) ) else: raise ValueError( @@ -1336,13 +1489,13 @@ def resize_local_mean(image, output_shape, grid_mode=True, ) resized = image else: - resized, output_shape = _preprocess_resize_output_shape(image, - output_shape) + resized, output_shape = _preprocess_resize_output_shape( + image, output_shape + ) resized = convert_to_float(resized, preserve_range) dtype = resized.dtype - for axis, (old_size, new_size) in enumerate(zip(image.shape, - output_shape)): + for axis, (old_size, new_size) in enumerate(zip(image.shape, output_shape)): if old_size == new_size: continue weights = _local_mean_weights(old_size, new_size, grid_mode, dtype) diff --git a/python/cucim/src/cucim/skimage/transform/integral.py b/python/cucim/src/cucim/skimage/transform/integral.py index 28794f6bb..dccfcb204 100644 --- a/python/cucim/src/cucim/skimage/transform/integral.py +++ b/python/cucim/src/cucim/skimage/transform/integral.py @@ -36,7 +36,7 @@ def integral_image(image, *, dtype=None): ACM SIGGRAPH Computer Graphics, vol. 18, 1984, pp. 207-212. """ - if dtype is None and image.real.dtype.kind == 'f': + if dtype is None and image.real.dtype.kind == "f": # default to at least double precision cumsum for accuracy dtype = np.promote_types(image.dtype, np.float64) @@ -90,13 +90,13 @@ def integrate(ii, start, end): # convert negative indices into equivalent positive indices start_negatives = start < 0 end_negatives = end < 0 - start = (start + total_shape) * start_negatives + \ - start * ~(start_negatives) # noqa - end = (end + total_shape) * end_negatives + \ - end * ~(end_negatives) # noqa + start = (start + total_shape) * start_negatives + start * ~( + start_negatives + ) # noqa + end = (end + total_shape) * end_negatives + end * ~(end_negatives) # noqa if np.any((end - start) < 0): - raise IndexError('end coordinates must be greater or equal to start') + raise IndexError("end coordinates must be greater or equal to start") # bit_perm is the total number of terms in the expression # of S. For example, in the case of a 4x4 2D image @@ -111,7 +111,7 @@ def integrate(ii, start, end): S = 0 else: S = cp.zeros(rows) - bit_perm = 2 ** ii.ndim + bit_perm = 2**ii.ndim width = len(bin(bit_perm - 1)[2:]) # Sum of a (hyper)cube, from an integral image is computed using @@ -130,16 +130,18 @@ def integrate(ii, start, end): for i in range(bit_perm): # for all permutations # boolean permutation array eg [True, False] for '10' binary = bin(i)[2:].zfill(width) - bool_mask = [bit == '1' for bit in binary] + bool_mask = [bit == "1" for bit in binary] sign = (-1) ** sum(bool_mask) # determine sign of permutation - bad = [np.any(((start[r] - 1) * bool_mask) < 0) - for r in range(rows)] # find out bad start rows + bad = [ + np.any(((start[r] - 1) * bool_mask) < 0) for r in range(rows) + ] # find out bad start rows # find corner for each row - corner_points = (end * (np.invert(bool_mask))) + \ - ((start - 1) * bool_mask) # noqa + corner_points = (end * (np.invert(bool_mask))) + ( + (start - 1) * bool_mask + ) # noqa # CuPy Backend: TODO: check efficiency here if scalar_output: diff --git a/python/cucim/src/cucim/skimage/transform/pyramids.py b/python/cucim/src/cucim/skimage/transform/pyramids.py index 41cfb1c06..3608c0de4 100644 --- a/python/cucim/src/cucim/skimage/transform/pyramids.py +++ b/python/cucim/src/cucim/skimage/transform/pyramids.py @@ -18,19 +18,33 @@ def _smooth(image, sigma, mode, cval, channel_axis): sigma = (sigma,) * (image.ndim - 1) else: channel_axis = None - gaussian(image, sigma, output=smoothed, mode=mode, cval=cval, - channel_axis=channel_axis) + gaussian( + image, + sigma, + output=smoothed, + mode=mode, + cval=cval, + channel_axis=channel_axis, + ) return smoothed def _check_factor(factor): if factor <= 1: - raise ValueError('scale factor must be greater than 1') - - -def pyramid_reduce(image, downscale=2, sigma=None, order=1, - mode='reflect', cval=0, preserve_range=False, *, - channel_axis=None): + raise ValueError("scale factor must be greater than 1") + + +def pyramid_reduce( + image, + downscale=2, + sigma=None, + order=1, + mode="reflect", + cval=0, + preserve_range=False, + *, + channel_axis=None, +): """Smooth and then downsample image. Parameters @@ -87,15 +101,29 @@ def pyramid_reduce(image, downscale=2, sigma=None, order=1, sigma = 2 * downscale / 6.0 smoothed = _smooth(image, sigma, mode, cval, channel_axis) - out = resize(smoothed, out_shape, order=order, mode=mode, cval=cval, - anti_aliasing=False) + out = resize( + smoothed, + out_shape, + order=order, + mode=mode, + cval=cval, + anti_aliasing=False, + ) return out -def pyramid_expand(image, upscale=2, sigma=None, order=1, - mode='reflect', cval=0, preserve_range=False, *, - channel_axis=None): +def pyramid_expand( + image, + upscale=2, + sigma=None, + order=1, + mode="reflect", + cval=0, + preserve_range=False, + *, + channel_axis=None, +): """Upsample and then smooth image. Parameters @@ -150,16 +178,26 @@ def pyramid_expand(image, upscale=2, sigma=None, order=1, # automatically determine sigma which covers > 99% of distribution sigma = 2 * upscale / 6.0 - resized = resize(image, out_shape, order=order, - mode=mode, cval=cval, anti_aliasing=False) + resized = resize( + image, out_shape, order=order, mode=mode, cval=cval, anti_aliasing=False + ) out = _smooth(resized, sigma, mode, cval, channel_axis) return out -def pyramid_gaussian(image, max_layer=-1, downscale=2, sigma=None, order=1, - mode='reflect', cval=0, preserve_range=False, *, - channel_axis=None): +def pyramid_gaussian( + image, + max_layer=-1, + downscale=2, + sigma=None, + order=1, + mode="reflect", + cval=0, + preserve_range=False, + *, + channel_axis=None, +): """Yield images of the Gaussian pyramid formed by the input image. Recursively applies the `pyramid_reduce` function to the image, and yields @@ -226,8 +264,15 @@ def pyramid_gaussian(image, max_layer=-1, downscale=2, sigma=None, order=1, while layer != max_layer: layer += 1 - layer_image = pyramid_reduce(prev_layer_image, downscale, sigma, order, - mode, cval, channel_axis=channel_axis) + layer_image = pyramid_reduce( + prev_layer_image, + downscale, + sigma, + order, + mode, + cval, + channel_axis=channel_axis, + ) prev_shape = current_shape prev_layer_image = layer_image @@ -240,9 +285,18 @@ def pyramid_gaussian(image, max_layer=-1, downscale=2, sigma=None, order=1, yield layer_image -def pyramid_laplacian(image, max_layer=-1, downscale=2, sigma=None, order=1, - mode='reflect', cval=0, preserve_range=False, *, - channel_axis=None): +def pyramid_laplacian( + image, + max_layer=-1, + downscale=2, + sigma=None, + order=1, + mode="reflect", + cval=0, + preserve_range=False, + *, + channel_axis=None, +): """Yield images of the laplacian pyramid formed by the input image. Each layer contains the difference between the downsampled and the @@ -326,20 +380,25 @@ def pyramid_laplacian(image, max_layer=-1, downscale=2, sigma=None, order=1, max_layer = math.ceil(math.log(max(shape_without_channels), downscale)) for layer in range(max_layer): - if channel_axis is not None: out_shape = tuple( math.ceil(d / float(downscale)) if ax != channel_axis else d for ax, d in enumerate(current_shape) ) else: - out_shape = tuple(math.ceil(d / float(downscale)) - for d in current_shape) + out_shape = tuple( + math.ceil(d / float(downscale)) for d in current_shape + ) - resized_image = resize(smoothed_image, out_shape, order=order, - mode=mode, cval=cval, anti_aliasing=False) - smoothed_image = _smooth(resized_image, sigma, mode, cval, - channel_axis) + resized_image = resize( + smoothed_image, + out_shape, + order=order, + mode=mode, + cval=cval, + anti_aliasing=False, + ) + smoothed_image = _smooth(resized_image, sigma, mode, cval, channel_axis) current_shape = resized_image.shape yield resized_image - smoothed_image diff --git a/python/cucim/src/cucim/skimage/transform/tests/test_geometric.py b/python/cucim/src/cucim/skimage/transform/tests/test_geometric.py index d9aee75db..3a979000d 100644 --- a/python/cucim/src/cucim/skimage/transform/tests/test_geometric.py +++ b/python/cucim/src/cucim/skimage/transform/tests/test_geometric.py @@ -6,17 +6,24 @@ import pytest from cupy.testing import assert_array_almost_equal, assert_array_equal -from cucim.skimage.transform import (AffineTransform, EssentialMatrixTransform, - EuclideanTransform, - FundamentalMatrixTransform, - PiecewiseAffineTransform, - PolynomialTransform, ProjectiveTransform, - SimilarityTransform, estimate_transform, - matrix_transform) -from cucim.skimage.transform._geometric import (GeometricTransform, - _affine_matrix_from_vector, - _center_and_normalize_points, - _euler_rotation_matrix) +from cucim.skimage.transform import ( + AffineTransform, + EssentialMatrixTransform, + EuclideanTransform, + FundamentalMatrixTransform, + PiecewiseAffineTransform, + PolynomialTransform, + ProjectiveTransform, + SimilarityTransform, + estimate_transform, + matrix_transform, +) +from cucim.skimage.transform._geometric import ( + GeometricTransform, + _affine_matrix_from_vector, + _center_and_normalize_points, + _euler_rotation_matrix, +) # fmt: off SRC = cp.array([ @@ -43,11 +50,16 @@ def test_estimate_transform(): - for tform in ('euclidean', 'similarity', 'affine', 'projective', - 'polynomial'): + for tform in ( + "euclidean", + "similarity", + "affine", + "projective", + "polynomial", + ): estimate_transform(tform, SRC[:2, :], DST[:2, :]) with pytest.raises(ValueError): - estimate_transform('foobar', SRC[:2, :], DST[:2, :]) + estimate_transform("foobar", SRC[:2, :], DST[:2, :]) def test_matrix_transform(): @@ -57,13 +69,13 @@ def test_matrix_transform(): def test_euclidean_estimation(): # exact solution - tform = estimate_transform('euclidean', SRC[:2, :], SRC[:2, :] + 10) + tform = estimate_transform("euclidean", SRC[:2, :], SRC[:2, :] + 10) assert_array_almost_equal(tform(SRC[:2, :]), SRC[:2, :] + 10) assert_array_almost_equal(tform.params[0, 0], tform.params[1, 1]) assert_array_almost_equal(tform.params[0, 1], -tform.params[1, 0]) # over-determined - tform2 = estimate_transform('euclidean', SRC, DST) + tform2 = estimate_transform("euclidean", SRC, DST) assert_array_almost_equal(tform2.inverse(tform2(SRC)), SRC) assert_array_almost_equal(tform2.params[0, 0], tform2.params[1, 1]) assert_array_almost_equal(tform2.params[0, 1], -tform2.params[1, 0]) @@ -84,8 +96,9 @@ def test_3d_euclidean_estimation(): dst_points = [] for pt in src_points: pt_r = pt.reshape(3, 1) - dst = cp.matmul(rotation_matrix, pt_r) + \ - translation_vector.reshape(3, 1) + dst = cp.matmul(rotation_matrix, pt_r) + translation_vector.reshape( + 3, 1 + ) dst = dst.reshape(3) dst_points.append(dst) @@ -129,13 +142,13 @@ def test_euclidean_init(): def test_similarity_estimation(): # exact solution - tform = estimate_transform('similarity', SRC[:2, :], DST[:2, :]) + tform = estimate_transform("similarity", SRC[:2, :], DST[:2, :]) assert_array_almost_equal(tform(SRC[:2, :]), DST[:2, :]) assert_array_almost_equal(tform.params[0, 0], tform.params[1, 1]) assert_array_almost_equal(tform.params[0, 1], -tform.params[1, 0]) # over-determined - tform2 = estimate_transform('similarity', SRC, DST) + tform2 = estimate_transform("similarity", SRC, DST) assert_array_almost_equal(tform2.inverse(tform2(SRC)), SRC) assert_array_almost_equal(tform2.params[0, 0], tform2.params[1, 1]) assert_array_almost_equal(tform2.params[0, 1], -tform2.params[1, 0]) @@ -161,8 +174,9 @@ def test_3d_similarity_estimation(): dst_points = [] for pt in src_points: pt_r = pt.reshape(3, 1) - dst = cp.matmul(rotation_matrix, pt_r) + \ - translation_vector.reshape(3, 1) + dst = cp.matmul(rotation_matrix, pt_r) + translation_vector.reshape( + 3, 1 + ) dst = dst.reshape(3) dst_points.append(dst) @@ -183,8 +197,9 @@ def test_similarity_init(): scale = 0.1 rotation = 1 translation = (1, 1) - tform = SimilarityTransform(scale=scale, rotation=rotation, - translation=translation) + tform = SimilarityTransform( + scale=scale, rotation=rotation, translation=translation + ) assert_array_almost_equal(tform.scale, scale) assert_array_almost_equal(tform.rotation, rotation) assert_array_almost_equal(tform.translation, translation) @@ -199,8 +214,9 @@ def test_similarity_init(): scale = 0.1 rotation = 0 translation = (1, 1) - tform = SimilarityTransform(scale=scale, rotation=rotation, - translation=translation) + tform = SimilarityTransform( + scale=scale, rotation=rotation, translation=translation + ) assert_array_almost_equal(tform.scale, scale) assert_array_almost_equal(tform.rotation, rotation) assert_array_almost_equal(tform.translation, translation) @@ -209,8 +225,9 @@ def test_similarity_init(): scale = 0.1 rotation = np.pi / 2 translation = (1, 1) - tform = SimilarityTransform(scale=scale, rotation=rotation, - translation=translation) + tform = SimilarityTransform( + scale=scale, rotation=rotation, translation=translation + ) assert_array_almost_equal(tform.scale, scale) assert_array_almost_equal(tform.rotation, rotation) assert_array_almost_equal(tform.translation, translation) @@ -220,9 +237,13 @@ def test_similarity_init(): scale = 1.0 rotation = np.pi / 2 translation = (0, 0) - params = np.array([[0, -1, 1.33226763e-15], - [1, 2.22044605e-16, -1.33226763e-15], - [0, 0, 1]]) + params = np.array( + [ + [0, -1, 1.33226763e-15], + [1, 2.22044605e-16, -1.33226763e-15], + [0, 0, 1], + ] + ) tform = SimilarityTransform(params) assert_array_almost_equal(tform.scale, scale) assert_array_almost_equal(tform.rotation, rotation) @@ -231,11 +252,11 @@ def test_similarity_init(): def test_affine_estimation(): # exact solution - tform = estimate_transform('affine', SRC[:3, :], DST[:3, :]) + tform = estimate_transform("affine", SRC[:3, :], DST[:3, :]) assert_array_almost_equal(tform(SRC[:3, :]), DST[:3, :]) # over-determined - tform2 = estimate_transform('affine', SRC, DST) + tform2 = estimate_transform("affine", SRC, DST) assert_array_almost_equal(tform2.inverse(tform2(SRC)), SRC) # via estimate method @@ -250,8 +271,9 @@ def test_affine_init(): rotation = 1 shear = 0.1 translation = (1, 1) - tform = AffineTransform(scale=scale, rotation=rotation, shear=shear, - translation=translation) + tform = AffineTransform( + scale=scale, rotation=rotation, shear=shear, translation=translation + ) assert_array_almost_equal(tform.scale, scale) assert_array_almost_equal(tform.rotation, rotation) assert_array_almost_equal(tform.shear, shear) @@ -265,8 +287,10 @@ def test_affine_init(): assert_array_almost_equal(tform2.translation, translation) # scalar vs. tuple scale arguments - assert_array_almost_equal(AffineTransform(scale=0.5).scale, - AffineTransform(scale=(0.5, 0.5)).scale) + assert_array_almost_equal( + AffineTransform(scale=0.5).scale, + AffineTransform(scale=(0.5, 0.5)).scale, + ) @pytest.mark.parametrize("xp", [np, cp]) @@ -322,7 +346,7 @@ def test_fundamental_matrix_estimation(xp): 0.878616, 0.602447, 0.642616, 1.028681]).reshape(-1, 2) # noqa # fmt: on - tform = estimate_transform('fundamental', src, dst) + tform = estimate_transform("fundamental", src, dst) # Reference values obtained using COLMAP SfM library. # fmt: off @@ -340,44 +364,52 @@ def test_fundamental_matrix_estimation(xp): @pytest.mark.parametrize("xp", [np, cp]) def test_fundamental_matrix_residuals(xp): essential_matrix_tform = EssentialMatrixTransform( - rotation=xp.eye(3), translation=xp.array([1, 0, 0]), xp=xp) + rotation=xp.eye(3), translation=xp.array([1, 0, 0]), xp=xp + ) tform = FundamentalMatrixTransform() tform.params = essential_matrix_tform.params src = xp.array([[0, 0], [0, 0], [0, 0]]) dst = xp.array([[2, 0], [2, 1], [2, 2]]) assert_array_almost_equal( - tform.residuals(src, dst) ** 2, xp.array([0, 0.5, 2])) + tform.residuals(src, dst) ** 2, xp.array([0, 0.5, 2]) + ) @pytest.mark.parametrize("xp", [np, cp]) def test_fundamental_matrix_forward(xp): essential_matrix_tform = EssentialMatrixTransform( - rotation=xp.eye(3), translation=xp.array([1, 0, 0]), xp=xp) + rotation=xp.eye(3), translation=xp.array([1, 0, 0]), xp=xp + ) tform = FundamentalMatrixTransform() tform.params = essential_matrix_tform.params src = xp.array([[0, 0], [0, 1], [1, 1]]) assert_array_almost_equal( - tform(src), xp.array([[0, -1, 0], [0, -1, 1], [0, -1, 1]])) + tform(src), xp.array([[0, -1, 0], [0, -1, 1], [0, -1, 1]]) + ) @pytest.mark.parametrize("xp", [np, cp]) def test_fundamental_matrix_inverse(xp): essential_matrix_tform = EssentialMatrixTransform( - rotation=xp.eye(3), translation=xp.array([1, 0, 0]), xp=xp) + rotation=xp.eye(3), translation=xp.array([1, 0, 0]), xp=xp + ) tform = FundamentalMatrixTransform() tform.params = essential_matrix_tform.params src = xp.array([[0, 0], [0, 1], [1, 1]]) assert_array_almost_equal( - tform.inverse(src), xp.array([[0, 1, 0], [0, 1, -1], [0, 1, -1]])) + tform.inverse(src), xp.array([[0, 1, 0], [0, 1, -1], [0, 1, -1]]) + ) @pytest.mark.parametrize("xp", [np, cp]) def test_essential_matrix_init(xp): - tform = EssentialMatrixTransform(rotation=xp.eye(3), - translation=xp.array([0, 0, 1]), xp=xp) + tform = EssentialMatrixTransform( + rotation=xp.eye(3), translation=xp.array([0, 0, 1]), xp=xp + ) assert_array_equal( - tform.params, xp.array([0, -1, 0, 1, 0, 0, 0, 0, 0]).reshape(3, 3)) + tform.params, xp.array([0, -1, 0, 1, 0, 0, 0, 0, 0]).reshape(3, 3) + ) @pytest.mark.parametrize("xp", [np, cp]) @@ -392,7 +424,7 @@ def test_essential_matrix_estimation(xp): 0.839509, 0.087290, 1.779735, 1.116857, # noqa 0.878616, 0.602447, 0.642616, 1.028681]).reshape(-1, 2) # noqa # fmt: on - tform = estimate_transform('essential', src, dst) + tform = estimate_transform("essential", src, dst) # Reference values obtained using COLMAP SfM library. # fmt: off @@ -409,39 +441,45 @@ def test_essential_matrix_estimation(xp): @pytest.mark.parametrize("xp", [np, cp]) def test_essential_matrix_forward(xp): - tform = EssentialMatrixTransform(rotation=xp.eye(3), - translation=xp.array([1, 0, 0]), xp=xp) + tform = EssentialMatrixTransform( + rotation=xp.eye(3), translation=xp.array([1, 0, 0]), xp=xp + ) src = xp.array([[0, 0], [0, 1], [1, 1]]) assert_array_almost_equal( - tform(src), xp.array([[0, -1, 0], [0, -1, 1], [0, -1, 1]])) + tform(src), xp.array([[0, -1, 0], [0, -1, 1], [0, -1, 1]]) + ) @pytest.mark.parametrize("xp", [np, cp]) def test_essential_matrix_inverse(xp): - tform = EssentialMatrixTransform(rotation=xp.eye(3), - translation=xp.array([1, 0, 0]), xp=xp) + tform = EssentialMatrixTransform( + rotation=xp.eye(3), translation=xp.array([1, 0, 0]), xp=xp + ) src = xp.array([[0, 0], [0, 1], [1, 1]]) - assert_array_almost_equal(tform.inverse(src), - xp.array([[0, 1, 0], [0, 1, -1], [0, 1, -1]])) + assert_array_almost_equal( + tform.inverse(src), xp.array([[0, 1, 0], [0, 1, -1], [0, 1, -1]]) + ) @pytest.mark.parametrize("xp", [np, cp]) def test_essential_matrix_residuals(xp): - tform = EssentialMatrixTransform(rotation=xp.eye(3), - translation=xp.array([1, 0, 0]), xp=xp) + tform = EssentialMatrixTransform( + rotation=xp.eye(3), translation=xp.array([1, 0, 0]), xp=xp + ) src = xp.array([[0, 0], [0, 0], [0, 0]]) dst = xp.array([[2, 0], [2, 1], [2, 2]]) assert_array_almost_equal( - tform.residuals(src, dst) ** 2, xp.array([0, 0.5, 2])) + tform.residuals(src, dst) ** 2, xp.array([0, 0.5, 2]) + ) def test_projective_estimation(): # exact solution - tform = estimate_transform('projective', SRC[:4, :], DST[:4, :]) + tform = estimate_transform("projective", SRC[:4, :], DST[:4, :]) assert_array_almost_equal(tform(SRC[:4, :]), DST[:4, :]) # over-determined - tform2 = estimate_transform('projective', SRC, DST) + tform2 = estimate_transform("projective", SRC, DST) assert_array_almost_equal(tform2.inverse(tform2(SRC)), SRC) # via estimate method @@ -451,33 +489,34 @@ def test_projective_estimation(): def test_projective_weighted_estimation(): - # Exact solution with same points, and unity weights - tform = estimate_transform('projective', SRC[:4, :], DST[:4, :]) - tform_w = estimate_transform('projective', - SRC[:4, :], DST[:4, :], cp.ones(4)) + tform = estimate_transform("projective", SRC[:4, :], DST[:4, :]) + tform_w = estimate_transform( + "projective", SRC[:4, :], DST[:4, :], cp.ones(4) + ) assert_array_almost_equal(tform.params, tform_w.params) # Over-determined solution with same points, and unity weights - tform = estimate_transform('projective', SRC, DST) - tform_w = estimate_transform('projective', - SRC, DST, cp.ones(SRC.shape[0])) + tform = estimate_transform("projective", SRC, DST) + tform_w = estimate_transform("projective", SRC, DST, cp.ones(SRC.shape[0])) assert_array_almost_equal(tform.params, tform_w.params) # Repeating a point, but setting its weight small, should give nearly # the same result. point_weights = cp.ones(SRC.shape[0] + 1) point_weights[0] = 1.0e-15 - tform1 = estimate_transform('projective', SRC, DST) - tform2 = estimate_transform('projective', - SRC[cp.arange(-1, SRC.shape[0]), :], - DST[cp.arange(-1, SRC.shape[0]), :], - point_weights) + tform1 = estimate_transform("projective", SRC, DST) + tform2 = estimate_transform( + "projective", + SRC[cp.arange(-1, SRC.shape[0]), :], + DST[cp.arange(-1, SRC.shape[0]), :], + point_weights, + ) assert_array_almost_equal(tform1.params, tform2.params, decimal=3) def test_projective_init(): - tform = estimate_transform('projective', SRC, DST) + tform = estimate_transform("projective", SRC, DST) # init with transformation matrix tform2 = ProjectiveTransform(tform.params) assert_array_almost_equal(tform2.params, tform.params) @@ -485,7 +524,7 @@ def test_projective_init(): def test_polynomial_estimation(): # over-determined - tform = estimate_transform('polynomial', SRC, DST, order=10) + tform = estimate_transform("polynomial", SRC, DST, order=10) assert_array_almost_equal(tform(SRC), DST, 6) # via estimate method @@ -496,37 +535,37 @@ def test_polynomial_estimation(): def test_polynomial_weighted_estimation(): # Over-determined solution with same points, and unity weights - tform = estimate_transform('polynomial', SRC, DST, order=10) - tform_w = estimate_transform('polynomial', - SRC, - DST, - order=10, - weights=cp.ones(SRC.shape[0])) + tform = estimate_transform("polynomial", SRC, DST, order=10) + tform_w = estimate_transform( + "polynomial", SRC, DST, order=10, weights=cp.ones(SRC.shape[0]) + ) assert_array_almost_equal(tform.params, tform_w.params) # Repeating a point, but setting its weight small, should give nearly # the same result. point_weights = cp.ones(SRC.shape[0] + 1) point_weights[0] = 1.0e-15 - tform1 = estimate_transform('polynomial', SRC, DST, order=10) - tform2 = estimate_transform('polynomial', - SRC[cp.arange(-1, SRC.shape[0]), :], - DST[cp.arange(-1, SRC.shape[0]), :], - order=10, - weights=point_weights) + tform1 = estimate_transform("polynomial", SRC, DST, order=10) + tform2 = estimate_transform( + "polynomial", + SRC[cp.arange(-1, SRC.shape[0]), :], + DST[cp.arange(-1, SRC.shape[0]), :], + order=10, + weights=point_weights, + ) assert_array_almost_equal(tform1.params, tform2.params, decimal=4) def test_polynomial_init(): - tform = estimate_transform('polynomial', SRC, DST, order=10) + tform = estimate_transform("polynomial", SRC, DST, order=10) # init with transformation parameters tform2 = PolynomialTransform(tform.params) assert_array_almost_equal(tform2.params, tform.params) def test_polynomial_default_order(): - tform = estimate_transform('polynomial', SRC, DST) - tform2 = estimate_transform('polynomial', SRC, DST, order=2) + tform = estimate_transform("polynomial", SRC, DST) + tform2 = estimate_transform("polynomial", SRC, DST, order=2) assert_array_almost_equal(tform2.params, tform.params) @@ -538,13 +577,13 @@ def test_polynomial_inverse(): def test_union(): tform1 = SimilarityTransform(scale=0.1, rotation=0.3) tform2 = SimilarityTransform(scale=0.1, rotation=0.9) - tform3 = SimilarityTransform(scale=0.1 ** 2, rotation=0.3 + 0.9) + tform3 = SimilarityTransform(scale=0.1**2, rotation=0.3 + 0.9) tform = tform1 + tform2 assert_array_almost_equal(tform.params, tform3.params) tform1 = AffineTransform(scale=(0.1, 0.1), rotation=0.3) tform2 = SimilarityTransform(scale=0.1, rotation=0.9) - tform3 = SimilarityTransform(scale=0.1 ** 2, rotation=0.3 + 0.9) + tform3 = SimilarityTransform(scale=0.1**2, rotation=0.3 + 0.9) tform = tform1 + tform2 assert_array_almost_equal(tform.params, tform3.params) assert tform.__class__ == ProjectiveTransform @@ -632,14 +671,17 @@ def test_invalid_input(xp): with pytest.raises(ValueError): EssentialMatrixTransform(rotation=xp.eye(3)) with pytest.raises(ValueError): - EssentialMatrixTransform(rotation=xp.eye(3), - translation=xp.zeros((2,)), xp=xp) + EssentialMatrixTransform( + rotation=xp.eye(3), translation=xp.zeros((2,)), xp=xp + ) with pytest.raises(ValueError): - EssentialMatrixTransform(rotation=xp.eye(3), - translation=xp.zeros((2,)), xp=xp) + EssentialMatrixTransform( + rotation=xp.eye(3), translation=xp.zeros((2,)), xp=xp + ) with pytest.raises(ValueError): - EssentialMatrixTransform(rotation=xp.eye(3), - translation=xp.zeros((3,)), xp=xp) + EssentialMatrixTransform( + rotation=xp.eye(3), translation=xp.zeros((3,)), xp=xp + ) def test_degenerate(xp=cp): @@ -680,17 +722,35 @@ def test_degenerate(xp=cp): assert not tform.estimate(src, dst) assert xp.all(xp.isnan(tform.params)) - # The tesselation on the following points produces one degenerate affine + # The tessellation on the following points produces one degenerate affine # warp within PiecewiseAffineTransform. - src = xp.asarray([ - [0, 192, 256], [0, 256, 256], [5, 0, 192], [5, 64, 0], [5, 64, 64], - [5, 64, 256], [5, 192, 192], [5, 256, 256], [0, 192, 256], - ]) + src = xp.asarray( + [ + [0, 192, 256], + [0, 256, 256], + [5, 0, 192], + [5, 64, 0], + [5, 64, 64], + [5, 64, 256], + [5, 192, 192], + [5, 256, 256], + [0, 192, 256], + ] + ) - dst = xp.asarray([ - [0, 142, 206], [0, 206, 206], [5, -50, 142], [5, 14, 0], [5, 14, 64], - [5, 14, 206], [5, 142, 142], [5, 206, 206], [0, 142, 206], - ]) + dst = xp.asarray( + [ + [0, 142, 206], + [0, 206, 206], + [5, -50, 142], + [5, 14, 0], + [5, 14, 64], + [5, 14, 206], + [5, 142, 142], + [5, 206, 206], + [0, 142, 206], + ] + ) tform = PiecewiseAffineTransform() assert not tform.estimate(src, dst) assert np.all(np.isnan(tform.affines[4].params)) # degenerate affine @@ -701,7 +761,7 @@ def test_degenerate(xp=cp): assert not xp.all(xp.isnan(affine.params)) -@pytest.mark.parametrize('xp', [np, cp]) +@pytest.mark.parametrize("xp", [np, cp]) def test_normalize_degenerate_points(xp): """Return nan matrix *of appropriate size* when point is repeated.""" pts = xp.array([[73.42834308, 94.2977623]] * 3) @@ -726,8 +786,8 @@ def test_projective_repr(xp): # fmt: on # Hack the escaped regex to allow whitespace before each number for # compatibility with different numpy versions. - want = want.replace('0\\.', ' *0\\.') - want = want.replace('1\\.', ' *1\\.') + want = want.replace("0\\.", " *0\\.") + want = want.replace("1\\.", " *1\\.") assert re.match(want, repr(tform)) @@ -745,8 +805,8 @@ def test_projective_str(xp): # fmt: on # Hack the escaped regex to allow whitespace before each number for # compatibility with different numpy versions. - want = want.replace('0\\.', ' *0\\.') - want = want.replace('1\\.', ' *1\\.') + want = want.replace("0\\.", " *0\\.") + want = want.replace("1\\.", " *1\\.") print(want) assert re.match(want, str(tform)) @@ -769,10 +829,12 @@ def test_estimate_affine_3d(xp): src = np.random.random((25, ndim)) * 2 ** np.arange(7, 7 + ndim) src = xp.asarray(src) matrix = xp.array( - [[4.8, 0.1, 0.2, 25], - [0.0, 1.0, 0.1, 30], - [0.0, 0.0, 1.0, -2], - [0.0, 0.0, 0.0, 1.]] + [ + [4.8, 0.1, 0.2, 25], + [0.0, 1.0, 0.1, 30], + [0.0, 0.0, 1.0, -2], + [0.0, 0.0, 0.0, 1.0], + ] ) tf = AffineTransform(matrix=matrix) dst = tf(src) @@ -802,9 +864,7 @@ def test_fundamental_3d_not_implemented(): def test_affine_transform_from_linearized_parameters(): - mat = np.concatenate( - (np.random.random((3, 4)), np.eye(4)[-1:]), axis=0 - ) + mat = np.concatenate((np.random.random((3, 4)), np.eye(4)[-1:]), axis=0) v = mat[:-1].ravel() mat_from_v = _affine_matrix_from_vector(v) tf = AffineTransform(matrix=mat_from_v) @@ -851,11 +911,11 @@ def test_euclidean_param_defaults(xp): @pytest.mark.parametrize("xp", [np, cp]) def test_similarity_transform_params(xp): with pytest.raises(ValueError): - _ = SimilarityTransform(translation=(4, 5, 6, 7), dimensionality=4, - xp=xp) + _ = SimilarityTransform( + translation=(4, 5, 6, 7), dimensionality=4, xp=xp + ) tf = SimilarityTransform(scale=4, dimensionality=3, xp=xp) - assert_array_equal(tf(xp.asarray([[1, 1, 1]])), - xp.asarray([[4, 4, 4]])) + assert_array_equal(tf(xp.asarray([[1, 1, 1]])), xp.asarray([[4, 4, 4]])) @pytest.mark.parametrize("xp", [np, cp]) diff --git a/python/cucim/src/cucim/skimage/transform/tests/test_integral.py b/python/cucim/src/cucim/skimage/transform/tests/test_integral.py index 5cee98ba1..0010b2b03 100644 --- a/python/cucim/src/cucim/skimage/transform/tests/test_integral.py +++ b/python/cucim/src/cucim/skimage/transform/tests/test_integral.py @@ -11,15 +11,15 @@ @pytest.mark.parametrize( - 'dtype', [cp.float16, cp.float32, cp.float64, cp.uint8, cp.int32] + "dtype", [cp.float16, cp.float32, cp.float64, cp.uint8, cp.int32] ) -@pytest.mark.parametrize('dtype_as_kwarg', [False, True]) +@pytest.mark.parametrize("dtype_as_kwarg", [False, True]) def test_integral_image_validity(dtype, dtype_as_kwarg): rstate = np.random.default_rng(1234) dtype_kwarg = dtype if dtype_as_kwarg else None y = cp.asarray((rstate.random((20, 20)) * 255).astype(dtype)) out = integral_image(y, dtype=dtype_kwarg) - if y.dtype.kind == 'f': + if y.dtype.kind == "f": if dtype_as_kwarg: assert out.dtype == dtype rtol = 1e-3 if dtype == np.float16 else 1e-7 diff --git a/python/cucim/src/cucim/skimage/transform/tests/test_pyramids.py b/python/cucim/src/cucim/skimage/transform/tests/test_pyramids.py index 2211f2adc..da022aeb3 100644 --- a/python/cucim/src/cucim/skimage/transform/tests/test_pyramids.py +++ b/python/cucim/src/cucim/skimage/transform/tests/test_pyramids.py @@ -13,27 +13,29 @@ image_gray = image[..., 0] -@pytest.mark.parametrize('channel_axis', [0, 1, -1]) +@pytest.mark.parametrize("channel_axis", [0, 1, -1]) def test_pyramid_reduce_rgb(channel_axis): image = cp.array(data.astronaut()) rows, cols, dim = image.shape image_ = cp.moveaxis(image, source=-1, destination=channel_axis) - out_ = pyramids.pyramid_reduce(image_, downscale=2, - channel_axis=channel_axis) + out_ = pyramids.pyramid_reduce( + image_, downscale=2, channel_axis=channel_axis + ) out = cp.moveaxis(out_, channel_axis, -1) assert_array_equal(out.shape, (rows / 2, cols / 2, dim)) def test_pyramid_reduce_gray(): rows, cols = image_gray.shape - out1 = pyramids.pyramid_reduce(image_gray, downscale=2, - channel_axis=None) + out1 = pyramids.pyramid_reduce(image_gray, downscale=2, channel_axis=None) assert_array_equal(out1.shape, (rows / 2, cols / 2)) assert_almost_equal(float(out1.ptp()), 1.0, decimal=2) - out2 = pyramids.pyramid_reduce(image_gray, downscale=2, - channel_axis=None, preserve_range=True) - assert_almost_equal(float(out2.ptp()) / float(image_gray.ptp()), 1.0, - decimal=2) + out2 = pyramids.pyramid_reduce( + image_gray, downscale=2, channel_axis=None, preserve_range=True + ) + assert_almost_equal( + float(out2.ptp()) / float(image_gray.ptp()), 1.0, decimal=2 + ) def test_pyramid_reduce_gray_defaults(): @@ -47,19 +49,18 @@ def test_pyramid_reduce_gray_defaults(): def test_pyramid_reduce_nd(): for ndim in [1, 2, 3, 4]: - img = cp.random.randn(*((8, ) * ndim)) + img = cp.random.randn(*((8,) * ndim)) out = pyramids.pyramid_reduce(img, downscale=2, channel_axis=None) expected_shape = cp.asarray(img.shape) / 2 assert_array_equal(out.shape, expected_shape) -@pytest.mark.parametrize('channel_axis', [0, 1, 2, -1, -2, -3]) +@pytest.mark.parametrize("channel_axis", [0, 1, 2, -1, -2, -3]) def test_pyramid_expand_rgb(channel_axis): image = cp.array(data.astronaut()) rows, cols, dim = image.shape image = cp.moveaxis(image, source=-1, destination=channel_axis) - out = pyramids.pyramid_expand(image, upscale=2, - channel_axis=channel_axis) + out = pyramids.pyramid_expand(image, upscale=2, channel_axis=channel_axis) expected_shape = [rows * 2, cols * 2] expected_shape.insert(channel_axis % image.ndim, dim) assert_array_equal(out.shape, expected_shape) @@ -74,31 +75,32 @@ def test_pyramid_expand_gray(): def test_pyramid_expand_nd(): for ndim in [1, 2, 3, 4]: img = cp.random.randn(*((4,) * ndim)) - out = pyramids.pyramid_expand(img, upscale=2, - channel_axis=None) + out = pyramids.pyramid_expand(img, upscale=2, channel_axis=None) expected_shape = cp.asarray(img.shape) * 2 assert_array_equal(out.shape, expected_shape) -@pytest.mark.parametrize('channel_axis', [0, 1, 2, -1, -2, -3]) +@pytest.mark.parametrize("channel_axis", [0, 1, 2, -1, -2, -3]) def test_build_gaussian_pyramid_rgb(channel_axis): image = cp.array(data.astronaut()) rows, cols, dim = image.shape image = cp.moveaxis(image, source=-1, destination=channel_axis) - pyramid = pyramids.pyramid_gaussian(image, downscale=2, - channel_axis=channel_axis) + pyramid = pyramids.pyramid_gaussian( + image, downscale=2, channel_axis=channel_axis + ) for layer, out in enumerate(pyramid): - layer_shape = [rows / 2 ** layer, cols / 2 ** layer] + layer_shape = [rows / 2**layer, cols / 2**layer] layer_shape.insert(channel_axis % image.ndim, dim) assert out.shape == tuple(layer_shape) def test_build_gaussian_pyramid_gray(): rows, cols = image_gray.shape - pyramid = pyramids.pyramid_gaussian(image_gray, downscale=2, - channel_axis=None) + pyramid = pyramids.pyramid_gaussian( + image_gray, downscale=2, channel_axis=None + ) for layer, out in enumerate(pyramid): - layer_shape = (rows / 2 ** layer, cols / 2 ** layer) + layer_shape = (rows / 2**layer, cols / 2**layer) assert_array_equal(out.shape, layer_shape) @@ -106,7 +108,7 @@ def test_build_gaussian_pyramid_gray_defaults(): rows, cols = image_gray.shape pyramid = pyramids.pyramid_gaussian(image_gray) for layer, out in enumerate(pyramid): - layer_shape = (rows / 2 ** layer, cols / 2 ** layer) + layer_shape = (rows / 2**layer, cols / 2**layer) assert_array_equal(out.shape, layer_shape) @@ -114,22 +116,22 @@ def test_build_gaussian_pyramid_nd(): for ndim in [1, 2, 3, 4]: img = cp.random.randn(*((8,) * ndim)) original_shape = cp.asarray(img.shape) - pyramid = pyramids.pyramid_gaussian(img, downscale=2, - channel_axis=None) + pyramid = pyramids.pyramid_gaussian(img, downscale=2, channel_axis=None) for layer, out in enumerate(pyramid): - layer_shape = original_shape / 2 ** layer + layer_shape = original_shape / 2**layer assert_array_equal(out.shape, layer_shape) -@pytest.mark.parametrize('channel_axis', [0, 1, 2, -1, -2, -3]) +@pytest.mark.parametrize("channel_axis", [0, 1, 2, -1, -2, -3]) def test_build_laplacian_pyramid_rgb(channel_axis): image = cp.array(data.astronaut()) rows, cols, dim = image.shape image = cp.moveaxis(image, source=-1, destination=channel_axis) - pyramid = pyramids.pyramid_laplacian(image, downscale=2, - channel_axis=channel_axis) + pyramid = pyramids.pyramid_laplacian( + image, downscale=2, channel_axis=channel_axis + ) for layer, out in enumerate(pyramid): - layer_shape = [rows / 2 ** layer, cols / 2 ** layer] + layer_shape = [rows / 2**layer, cols / 2**layer] layer_shape.insert(channel_axis % image.ndim, dim) assert out.shape == tuple(layer_shape) @@ -138,7 +140,7 @@ def test_build_laplacian_pyramid_defaults(): rows, cols = image_gray.shape pyramid = pyramids.pyramid_laplacian(image_gray) for layer, out in enumerate(pyramid): - layer_shape = (rows / 2 ** layer, cols / 2 ** layer) + layer_shape = (rows / 2**layer, cols / 2**layer) assert_array_equal(out.shape, layer_shape) @@ -146,14 +148,15 @@ def test_build_laplacian_pyramid_nd(): for ndim in [1, 2, 3, 4]: img = cp.random.randn(*(16,) * ndim) original_shape = cp.asarray(img.shape) - pyramid = pyramids.pyramid_laplacian(img, downscale=2, - channel_axis=None) + pyramid = pyramids.pyramid_laplacian( + img, downscale=2, channel_axis=None + ) for layer, out in enumerate(pyramid): - layer_shape = original_shape / 2 ** layer + layer_shape = original_shape / 2**layer assert_array_equal(out.shape, layer_shape) -@pytest.mark.parametrize('channel_axis', [0, 1, 2, -1, -2, -3]) +@pytest.mark.parametrize("channel_axis", [0, 1, 2, -1, -2, -3]) def test_laplacian_pyramid_max_layers(channel_axis): for downscale in [2, 3, 5, 7]: if channel_axis is None: @@ -167,11 +170,11 @@ def test_laplacian_pyramid_max_layers(channel_axis): shape.insert(channel_axis % ndim, n_channels) shape = tuple(shape) img = cp.ones(shape) - pyramid = pyramids.pyramid_laplacian(img, downscale=downscale, - channel_axis=channel_axis) + pyramid = pyramids.pyramid_laplacian( + img, downscale=downscale, channel_axis=channel_axis + ) max_layer = math.ceil(math.log(max(shape_without_channels), downscale)) for layer, out in enumerate(pyramid): - if channel_axis is None: out_shape_without_channels = out.shape else: @@ -199,10 +202,10 @@ def test_check_factor(): @pytest.mark.parametrize( - 'dtype', ['float16', 'float32', 'float64', 'uint8', 'int64'] + "dtype", ["float16", "float32", "float64", "uint8", "int64"] ) @pytest.mark.parametrize( - 'pyramid_func', [pyramids.pyramid_gaussian, pyramids.pyramid_laplacian] + "pyramid_func", [pyramids.pyramid_gaussian, pyramids.pyramid_laplacian] ) def test_pyramid_dtype_support(pyramid_func, dtype): img = cp.ones((32, 8), dtype=dtype) diff --git a/python/cucim/src/cucim/skimage/transform/tests/test_warps.py b/python/cucim/src/cucim/skimage/transform/tests/test_warps.py index dbb47738e..22a5aad60 100644 --- a/python/cucim/src/cucim/skimage/transform/tests/test_warps.py +++ b/python/cucim/src/cucim/skimage/transform/tests/test_warps.py @@ -3,8 +3,11 @@ import cupy as cp import numpy as np import pytest -from cupy.testing import (assert_allclose, assert_array_almost_equal, - assert_array_equal) +from cupy.testing import ( + assert_allclose, + assert_array_almost_equal, + assert_array_equal, +) from cupyx.scipy.ndimage import map_coordinates from numpy.testing import assert_almost_equal, assert_equal from skimage.color.colorconv import rgb2gray @@ -14,15 +17,25 @@ from cucim.skimage._shared._warnings import expected_warnings from cucim.skimage._shared.utils import _supported_float_type from cucim.skimage.feature.peak import peak_local_max -from cucim.skimage.transform._geometric import (AffineTransform, - ProjectiveTransform, - SimilarityTransform) -from cucim.skimage.transform._warps import (_linear_polar_mapping, - _log_polar_mapping, _stackcopy, - downscale_local_mean, rescale, - resize, resize_local_mean, rotate, - swirl, warp, warp_coords, - warp_polar) +from cucim.skimage.transform._geometric import ( + AffineTransform, + ProjectiveTransform, + SimilarityTransform, +) +from cucim.skimage.transform._warps import ( + _linear_polar_mapping, + _log_polar_mapping, + _stackcopy, + downscale_local_mean, + rescale, + resize, + resize_local_mean, + rotate, + swirl, + warp, + warp_coords, + warp_polar, +) from cucim.skimage.util.dtype import _convert, img_as_float # from skimage._shared.testing import test_parallel @@ -68,7 +81,7 @@ def shift(xy): assert_array_almost_equal(outx, refx) -@cp.testing.with_requires('cupy>=9.0.0b2') +@cp.testing.with_requires("cupy>=9.0.0b2") def test_warp_matrix(): x = cp.zeros((5, 5), dtype=cp.float64) x[2, 2] = 1 @@ -103,22 +116,24 @@ def test_warp_nd(): assert_array_almost_equal(outx, refx) -@cp.testing.with_requires('cupy>=9.0.0b2') +@cp.testing.with_requires("cupy>=9.0.0b2") def test_warp_clip(): x = cp.zeros((5, 5), dtype=cp.float64) x[2, 2] = 1 - outx = rescale(x, 3, order=3, clip=False, anti_aliasing=False, - mode='constant') + outx = rescale( + x, 3, order=3, clip=False, anti_aliasing=False, mode="constant" + ) assert outx.min() < 0 - outx = rescale(x, 3, order=3, clip=True, anti_aliasing=False, - mode='constant') + outx = rescale( + x, 3, order=3, clip=True, anti_aliasing=False, mode="constant" + ) assert_almost_equal(float(outx.min()), 0) assert_almost_equal(float(outx.max()), 1) -@pytest.mark.parametrize('order', [0, 1]) +@pytest.mark.parametrize("order", [0, 1]) def test_warp_clip_image_containing_nans(order): # Test that clipping works as intended on an image with NaNs # Orders >1 do not produce good output when the input image has @@ -133,7 +148,7 @@ def test_warp_clip_image_containing_nans(order): assert_almost_equal(cp.nanmax(outx).item(), 2) -@pytest.mark.parametrize('order', [0, 1]) +@pytest.mark.parametrize("order", [0, 1]) def test_warp_clip_cval_is_nan(order): # Test that clipping works as intended when cval is NaN # Orders > 1 do not produce good output when cval is NaN, so those @@ -148,7 +163,7 @@ def test_warp_clip_cval_is_nan(order): assert_almost_equal(cp.nanmax(outx).item(), 2) -@pytest.mark.parametrize('order', range(6)) +@pytest.mark.parametrize("order", range(6)) def test_warp_clip_cval_outside_input_range(order): # Test that clipping behavior considers cval part of the input range @@ -159,8 +174,9 @@ def test_warp_clip_cval_outside_input_range(order): # The corners should be cval for all interpolation orders outx = cp.asnumpy(outx) - assert_array_almost_equal([outx[0, 0], outx[0, -1], - outx[-1, 0], outx[-1, -1]], 2) + assert_array_almost_equal( + [outx[0, 0], outx[0, -1], outx[-1, 0], outx[-1, -1]], 2 + ) # For all interpolation orders other than nearest-neighbor, the clipped # output should have some pixels with values between the input (1) and @@ -169,7 +185,7 @@ def test_warp_clip_cval_outside_input_range(order): assert np.sum(np.less(1, outx) * np.less(outx, 2)) > 0 -@pytest.mark.parametrize('order', range(6)) +@pytest.mark.parametrize("order", range(6)) def test_warp_clip_cval_not_used(order): # Test that clipping does not consider cval part of the input range if it # is not used in the output image @@ -181,7 +197,7 @@ def test_warp_clip_cval_not_used(order): # that cval will not actually be used scale = 15 / (15 + 2) transform = AffineTransform(scale=scale, translation=(1, 1)) - outx = warp(x, transform, mode='constant', order=order, cval=0, clip=True) + outx = warp(x, transform, mode="constant", order=order, cval=0, clip=True) # At higher orders of interpolation, the transformed image has overshoots # beyond the input range that should be clipped to the range 1 to 2. Even @@ -200,13 +216,11 @@ def test_homography(): [0, 0, 1]]) # noqa # fmt: on - x90 = warp(x, - inverse_map=ProjectiveTransform(M).inverse, - order=1) + x90 = warp(x, inverse_map=ProjectiveTransform(M).inverse, order=1) assert_array_almost_equal(x90, cp.rot90(x)) -@pytest.mark.parametrize('dtype', [cp.float16, cp.float32, cp.float64]) +@pytest.mark.parametrize("dtype", [cp.float16, cp.float32, cp.float64]) def test_rotate(dtype): x = cp.zeros((5, 5), dtype=dtype) x[1, 1] = 1 @@ -259,9 +273,9 @@ def test_rotate_resize_90(): assert x90.shape == (230, 470) -@pytest.mark.parametrize('dtype', [cp.float16, cp.float32, cp.float64]) -@pytest.mark.parametrize('resize', [False, True]) -@pytest.mark.parametrize('ndim', [2, 3, 4]) +@pytest.mark.parametrize("dtype", [cp.float16, cp.float32, cp.float64]) +@pytest.mark.parametrize("resize", [False, True]) +@pytest.mark.parametrize("ndim", [2, 3, 4]) def test_rotate_nd(dtype, resize, ndim): # verify fix for issue: https://github.com/rapidsai/cucim/issues/431 x = cp.zeros((5, 5) + (4,) * (ndim - 2), dtype=dtype) @@ -276,8 +290,9 @@ def test_rescale(): # same scale factor x = cp.zeros((5, 5), dtype=cp.float64) x[1, 1] = 1 - scaled = rescale(x, 2, order=0, - channel_axis=None, anti_aliasing=False, mode='constant') + scaled = rescale( + x, 2, order=0, channel_axis=None, anti_aliasing=False, mode="constant" + ) ref = cp.zeros((10, 10)) ref[2:4, 2:4] = 1 assert_array_almost_equal(scaled, ref) @@ -286,8 +301,14 @@ def test_rescale(): x = cp.zeros((5, 5), dtype=cp.float64) x[1, 1] = 1 - scaled = rescale(x, (2, 1), order=0, - channel_axis=None, anti_aliasing=False, mode='constant') + scaled = rescale( + x, + (2, 1), + order=0, + channel_axis=None, + anti_aliasing=False, + mode="constant", + ) ref = cp.zeros((10, 5)) ref[2:4, 1] = 1 assert_array_almost_equal(scaled, ref) @@ -296,70 +317,83 @@ def test_rescale(): def test_rescale_invalid_scale(): x = cp.zeros((10, 10, 3)) with pytest.raises(ValueError): - rescale(x, (2, 2), - channel_axis=None, anti_aliasing=False, mode='constant') + rescale( + x, (2, 2), channel_axis=None, anti_aliasing=False, mode="constant" + ) with pytest.raises(ValueError): - rescale(x, (2, 2, 2), - channel_axis=-1, anti_aliasing=False, mode='constant') + rescale( + x, (2, 2, 2), channel_axis=-1, anti_aliasing=False, mode="constant" + ) def test_rescale_multichannel(): # 1D + channels x = cp.zeros((8, 3), dtype=cp.float64) - scaled = rescale(x, 2, order=0, channel_axis=-1, anti_aliasing=False, - mode='constant') + scaled = rescale( + x, 2, order=0, channel_axis=-1, anti_aliasing=False, mode="constant" + ) assert scaled.shape == (16, 3) # 2D - scaled = rescale(x, 2, order=0, channel_axis=None, anti_aliasing=False, - mode='constant') + scaled = rescale( + x, 2, order=0, channel_axis=None, anti_aliasing=False, mode="constant" + ) assert scaled.shape == (16, 6) # 2D + channels x = cp.zeros((8, 8, 3), dtype=cp.float64) - scaled = rescale(x, 2, order=0, channel_axis=-1, anti_aliasing=False, - mode='constant') + scaled = rescale( + x, 2, order=0, channel_axis=-1, anti_aliasing=False, mode="constant" + ) assert scaled.shape == (16, 16, 3) # 3D - scaled = rescale(x, 2, order=0, channel_axis=None, anti_aliasing=False, - mode='constant') + scaled = rescale( + x, 2, order=0, channel_axis=None, anti_aliasing=False, mode="constant" + ) assert scaled.shape == (16, 16, 6) # 3D + channels x = cp.zeros((8, 8, 8, 3), dtype=cp.float64) - scaled = rescale(x, 2, order=0, channel_axis=-1, anti_aliasing=False, - mode='constant') + scaled = rescale( + x, 2, order=0, channel_axis=-1, anti_aliasing=False, mode="constant" + ) assert scaled.shape == (16, 16, 16, 3) # 4D - scaled = rescale(x, 2, order=0, channel_axis=None, anti_aliasing=False, - mode='constant') + scaled = rescale( + x, 2, order=0, channel_axis=None, anti_aliasing=False, mode="constant" + ) assert scaled.shape == (16, 16, 16, 6) -@pytest.mark.parametrize('channel_axis', [0, 1, 2, -1]) +@pytest.mark.parametrize("channel_axis", [0, 1, 2, -1]) def test_rescale_channel_axis_multiscale(channel_axis): x = cp.zeros((5, 5, 3), dtype=cp.float64) x = cp.moveaxis(x, -1, channel_axis) - scaled = rescale(x, scale=(2, 1), order=0, channel_axis=channel_axis, - anti_aliasing=False, mode='constant') + scaled = rescale( + x, + scale=(2, 1), + order=0, + channel_axis=channel_axis, + anti_aliasing=False, + mode="constant", + ) scaled = cp.moveaxis(scaled, channel_axis, -1) assert scaled.shape == (10, 5, 3) def test_rescale_multichannel_defaults(): x = cp.zeros((8, 3), dtype=cp.float64) - scaled = rescale(x, 2, order=0, anti_aliasing=False, mode='constant') + scaled = rescale(x, 2, order=0, anti_aliasing=False, mode="constant") assert_equal(scaled.shape, (16, 6)) x = cp.zeros((8, 8, 3), dtype=cp.float64) - scaled = rescale(x, 2, order=0, anti_aliasing=False, mode='constant') + scaled = rescale(x, 2, order=0, anti_aliasing=False, mode="constant") assert_equal(scaled.shape, (16, 16, 6)) def test_resize2d(): x = cp.zeros((5, 5), dtype=cp.float64) x[1, 1] = 1 - resized = resize(x, (10, 10), order=0, anti_aliasing=False, - mode='constant') + resized = resize(x, (10, 10), order=0, anti_aliasing=False, mode="constant") ref = cp.zeros((10, 10)) ref[2:4, 2:4] = 1 assert_array_almost_equal(resized, ref) @@ -369,16 +403,16 @@ def test_resize3d_keep(): # keep 3rd dimension x = cp.zeros((5, 5, 3), dtype=cp.float64) x[1, 1, :] = 1 - resized = resize(x, (10, 10), order=0, anti_aliasing=False, - mode='constant') + resized = resize(x, (10, 10), order=0, anti_aliasing=False, mode="constant") with pytest.raises(ValueError): # output_shape too short - resize(x, (10,), order=0, anti_aliasing=False, mode='constant') + resize(x, (10,), order=0, anti_aliasing=False, mode="constant") ref = cp.zeros((10, 10, 3)) ref[2:4, 2:4, :] = 1 assert_array_almost_equal(resized, ref) - resized = resize(x, (10, 10, 3), order=0, anti_aliasing=False, - mode='constant') + resized = resize( + x, (10, 10, 3), order=0, anti_aliasing=False, mode="constant" + ) assert_array_almost_equal(resized, ref) @@ -386,8 +420,9 @@ def test_resize3d_resize(): # resize 3rd dimension x = cp.zeros((5, 5, 3), dtype=cp.float64) x[1, 1, :] = 1 - resized = resize(x, (10, 10, 1), order=0, anti_aliasing=False, - mode='constant') + resized = resize( + x, (10, 10, 1), order=0, anti_aliasing=False, mode="constant" + ) ref = cp.zeros((10, 10, 1)) ref[2:4, 2:4] = 1 assert_array_almost_equal(resized, ref) @@ -397,8 +432,9 @@ def test_resize3d_2din_3dout(): # 3D output with 2D input x = cp.zeros((5, 5), dtype=cp.float64) x[1, 1] = 1 - resized = resize(x, (10, 10, 1), order=0, anti_aliasing=False, - mode='constant') + resized = resize( + x, (10, 10, 1), order=0, anti_aliasing=False, mode="constant" + ) ref = cp.zeros((10, 10, 1)) ref[2:4, 2:4] = 1 assert_array_almost_equal(resized, ref) @@ -409,8 +445,9 @@ def test_resize2d_4d(): x = cp.zeros((5, 5), dtype=cp.float64) x[1, 1] = 1 out_shape = (10, 10, 1, 1) - resized = resize(x, out_shape, order=0, anti_aliasing=False, - mode='constant') + resized = resize( + x, out_shape, order=0, anti_aliasing=False, mode="constant" + ) ref = cp.zeros(out_shape) ref[2:4, 2:4, ...] = 1 assert_array_almost_equal(resized, ref) @@ -421,8 +458,9 @@ def test_resize_nd(): shape = 2 + np.arange(dim) * 2 x = cp.ones(tuple(shape)) out_shape = cp.asarray(shape) * 1.5 - resized = resize(x, out_shape, order=0, mode='reflect', - anti_aliasing=False) + resized = resize( + x, out_shape, order=0, mode="reflect", anti_aliasing=False + ) expected_shape = 1.5 * shape assert_equal(resized.shape, expected_shape) assert cp.all(resized == 1) @@ -433,8 +471,9 @@ def test_resize3d_bilinear(): x = cp.zeros((5, 5, 2), dtype=cp.float64) x[1, 1, 0] = 0 x[1, 1, 1] = 1 - resized = resize(x, (10, 10, 1), order=1, mode='constant', - anti_aliasing=False) + resized = resize( + x, (10, 10, 1), order=1, mode="constant", anti_aliasing=False + ) ref = cp.zeros((10, 10, 1)) ref[1:5, 1:5, :] = 0.03125 ref[1:5, 2:4, :] = 0.09375 @@ -459,10 +498,10 @@ def test_resize_dtype(): assert resize(x_f32, (10, 10), preserve_range=True).dtype == x_f32.dtype -@pytest.mark.parametrize('order', [0, 1]) -@pytest.mark.parametrize('preserve_range', [True, False]) -@pytest.mark.parametrize('anti_aliasing', [True, False]) -@pytest.mark.parametrize('dtype', [cp.float64, cp.uint8]) +@pytest.mark.parametrize("order", [0, 1]) +@pytest.mark.parametrize("preserve_range", [True, False]) +@pytest.mark.parametrize("anti_aliasing", [True, False]) +@pytest.mark.parametrize("dtype", [cp.float64, cp.uint8]) def test_resize_clip(order, preserve_range, anti_aliasing, dtype): # test if clip as expected if dtype == cp.uint8 and (preserve_range or order == 0): @@ -474,18 +513,23 @@ def test_resize_clip(order, preserve_range, anti_aliasing, dtype): x *= 255 else: x[0, 0] = cp.NaN - resized = resize(x, (3, 3), order=order, preserve_range=preserve_range, - anti_aliasing=anti_aliasing) + resized = resize( + x, + (3, 3), + order=order, + preserve_range=preserve_range, + anti_aliasing=anti_aliasing, + ) assert abs(float(cp.nanmax(resized)) - expected_max) < 1e-14 -@pytest.mark.parametrize('dtype', [cp.float16, cp.float32, cp.float64]) +@pytest.mark.parametrize("dtype", [cp.float16, cp.float32, cp.float64]) def test_swirl(dtype): image = img_as_float(cp.array(checkerboard())).astype(dtype, copy=False) float_dtype = _supported_float_type(dtype) - swirl_params = {'radius': 80, 'rotation': 0, 'order': 2, 'mode': 'reflect'} + swirl_params = {"radius": 80, "rotation": 0, "order": 2, "mode": "reflect"} # with expected_warnings(["Bi-quadratic.*bug"]): swirled = swirl(image, strength=10, **swirl_params) @@ -494,7 +538,7 @@ def test_swirl(dtype): assert cp.mean(cp.abs(image - unswirled)) < 0.01 - swirl_params.pop('mode') + swirl_params.pop("mode") # with expected_warnings(["Bi-quadratic.*bug"]): swirled = swirl(image, strength=10, **swirl_params) @@ -529,7 +573,7 @@ def test_warp_identity(): assert cp.all(0 == warped_rgb_img[:, :, 1]) -@cp.testing.with_requires('cupy>=9.0.0b2') +@cp.testing.with_requires("cupy>=9.0.0b2") def test_warp_coords_example(): image = cp.array(astronaut().astype(cp.float32)) assert 3 == image.shape[2] @@ -539,12 +583,12 @@ def test_warp_coords_example(): @pytest.mark.parametrize( - 'dtype', [cp.uint8, cp.int32, cp.float16, cp.float32, cp.float64] + "dtype", [cp.uint8, cp.int32, cp.float16, cp.float32, cp.float64] ) def test_downsize(dtype): x = cp.zeros((10, 10), dtype=dtype) x[2:4, 2:4] = 1 - scaled = resize(x, (5, 5), order=0, anti_aliasing=False, mode='constant') + scaled = resize(x, (5, 5), order=0, anti_aliasing=False, mode="constant") expected_dtype = cp.float32 if dtype == cp.float16 else dtype assert scaled.dtype == expected_dtype assert_equal(scaled.shape, (5, 5)) @@ -556,7 +600,7 @@ def test_downsize(dtype): def test_downsize_anti_aliasing(): x = cp.zeros((10, 10), dtype=cp.float64) x[2, 2] = 1 - scaled = resize(x, (5, 5), order=1, anti_aliasing=True, mode='constant') + scaled = resize(x, (5, 5), order=1, anti_aliasing=True, mode="constant") assert_equal(scaled.shape, (5, 5)) assert cp.all(scaled[:3, :3] > 0) assert_equal(float(scaled[3:, :].sum()), 0) @@ -564,42 +608,97 @@ def test_downsize_anti_aliasing(): sigma = 0.125 out_size = (5, 5) - resize(x, out_size, order=1, mode='constant', - anti_aliasing=True, anti_aliasing_sigma=sigma) - resize(x, out_size, order=1, mode='edge', - anti_aliasing=True, anti_aliasing_sigma=sigma) - resize(x, out_size, order=1, mode='symmetric', - anti_aliasing=True, anti_aliasing_sigma=sigma) - resize(x, out_size, order=1, mode='reflect', - anti_aliasing=True, anti_aliasing_sigma=sigma) - resize(x, out_size, order=1, mode='wrap', - anti_aliasing=True, anti_aliasing_sigma=sigma) + resize( + x, + out_size, + order=1, + mode="constant", + anti_aliasing=True, + anti_aliasing_sigma=sigma, + ) + resize( + x, + out_size, + order=1, + mode="edge", + anti_aliasing=True, + anti_aliasing_sigma=sigma, + ) + resize( + x, + out_size, + order=1, + mode="symmetric", + anti_aliasing=True, + anti_aliasing_sigma=sigma, + ) + resize( + x, + out_size, + order=1, + mode="reflect", + anti_aliasing=True, + anti_aliasing_sigma=sigma, + ) + resize( + x, + out_size, + order=1, + mode="wrap", + anti_aliasing=True, + anti_aliasing_sigma=sigma, + ) with pytest.raises(ValueError): # Unknown mode, or cannot translate mode - resize(x, out_size, order=1, mode='non-existent', - anti_aliasing=True, anti_aliasing_sigma=sigma) + resize( + x, + out_size, + order=1, + mode="non-existent", + anti_aliasing=True, + anti_aliasing_sigma=sigma, + ) def test_downsize_anti_aliasing_invalid_stddev(): x = cp.zeros((10, 10), dtype=cp.float64) with pytest.raises(ValueError): - resize(x, (5, 5), order=0, anti_aliasing=True, anti_aliasing_sigma=-1, - mode='constant') + resize( + x, + (5, 5), + order=0, + anti_aliasing=True, + anti_aliasing_sigma=-1, + mode="constant", + ) with expected_warnings(["Anti-aliasing standard deviation greater"]): - resize(x, (5, 15), order=0, anti_aliasing=True, - anti_aliasing_sigma=(1, 1), mode="reflect") - resize(x, (5, 15), order=0, anti_aliasing=True, - anti_aliasing_sigma=(0, 1), mode="reflect") + resize( + x, + (5, 15), + order=0, + anti_aliasing=True, + anti_aliasing_sigma=(1, 1), + mode="reflect", + ) + resize( + x, + (5, 15), + order=0, + anti_aliasing=True, + anti_aliasing_sigma=(0, 1), + mode="reflect", + ) @pytest.mark.parametrize( - 'dtype', [cp.uint8, cp.int32, cp.float16, cp.float32, cp.float64] + "dtype", [cp.uint8, cp.int32, cp.float16, cp.float32, cp.float64] ) def test_downscale(dtype): x = cp.zeros((10, 10), dtype=dtype) x[2:4, 2:4] = 1 - scaled = rescale(x, 0.5, order=0, anti_aliasing=False, - channel_axis=None, mode='constant') + scaled = rescale( + x, 0.5, order=0, anti_aliasing=False, channel_axis=None, mode="constant" + ) expected_dtype = cp.float32 if dtype == cp.float16 else dtype assert scaled.dtype == expected_dtype assert_equal(scaled.shape, (5, 5)) @@ -611,8 +710,9 @@ def test_downscale(dtype): def test_downscale_anti_aliasing(): x = cp.zeros((10, 10), dtype=cp.float64) x[2, 2] = 1 - scaled = rescale(x, 0.5, order=1, anti_aliasing=True, - channel_axis=None, mode='constant') + scaled = rescale( + x, 0.5, order=1, anti_aliasing=True, channel_axis=None, mode="constant" + ) assert_equal(scaled.shape, (5, 5)) assert cp.all(scaled[:3, :3] > 0) assert_equal(float(scaled[3:, :].sum()), 0) @@ -627,23 +727,21 @@ def test_downscale_to_the_limit(): @pytest.mark.parametrize( - 'dtype', [cp.uint8, cp.int32, cp.float16, cp.float32, cp.float64] + "dtype", [cp.uint8, cp.int32, cp.float16, cp.float32, cp.float64] ) def test_downscale_local_mean(dtype): image1 = cp.arange(4 * 6, dtype=dtype).reshape(4, 6) out1 = downscale_local_mean(image1, (2, 3)) - float_dtype = dtype if cp.dtype(dtype).kind == 'f' else cp.float64 + float_dtype = dtype if cp.dtype(dtype).kind == "f" else cp.float64 assert out1.dtype == float_dtype - expected1 = np.array([[4., 7.], - [16., 19.]]) + expected1 = np.array([[4.0, 7.0], [16.0, 19.0]]) assert_array_equal(expected1, out1) image2 = cp.arange(5 * 8, dtype=dtype).reshape(5, 8) out2 = downscale_local_mean(image2, (4, 5)) assert out2.dtype == float_dtype - expected2 = np.array([[14., 10.8], - [8.5, 5.7]]) + expected2 = np.array([[14.0, 10.8], [8.5, 5.7]]) rtol = 1e-3 if dtype == cp.float16 else 1e-7 assert_allclose(expected2, out2, rtol=rtol) @@ -671,19 +769,42 @@ def test_slow_warp_nonint_oshape(): def test_keep_range(): image = cp.linspace(0, 2, 25).reshape(5, 5) - out = rescale(image, 2, preserve_range=False, clip=True, order=0, - mode='constant', channel_axis=None, anti_aliasing=False) + out = rescale( + image, + 2, + preserve_range=False, + clip=True, + order=0, + mode="constant", + channel_axis=None, + anti_aliasing=False, + ) assert out.min() == 0 assert out.max() == 2 - out = rescale(image, 2, preserve_range=True, clip=True, order=0, - mode='constant', channel_axis=None, anti_aliasing=False) + out = rescale( + image, + 2, + preserve_range=True, + clip=True, + order=0, + mode="constant", + channel_axis=None, + anti_aliasing=False, + ) assert out.min() == 0 assert out.max() == 2 - out = rescale(image.astype(cp.uint8), 2, preserve_range=False, - mode='constant', channel_axis=None, anti_aliasing=False, - clip=True, order=0) + out = rescale( + image.astype(cp.uint8), + 2, + preserve_range=False, + mode="constant", + channel_axis=None, + anti_aliasing=False, + clip=True, + order=0, + ) assert out.min() == 0 assert out.max() == 2 @@ -751,7 +872,7 @@ def test_log_polar_mapping(): assert cp.allclose(coords, ground_truth) -@pytest.mark.parametrize('dtype', [cp.float16, cp.float32, cp.float64]) +@pytest.mark.parametrize("dtype", [cp.float16, cp.float32, cp.float64]) def test_linear_warp_polar(dtype): radii = [5, 10, 15, 20] image = cp.zeros([51, 51]) @@ -766,17 +887,23 @@ def test_linear_warp_polar(dtype): assert np.alltrue([peak in radii for peak in peaks]) -@pytest.mark.parametrize('dtype', [cp.float16, cp.float32, cp.float64]) +@pytest.mark.parametrize("dtype", [cp.float16, cp.float32, cp.float64]) def test_log_warp_polar(dtype): - radii = [math.exp(2), math.exp(3), math.exp(4), math.exp(5), - math.exp(5) - 1, math.exp(5) + 1] + radii = [ + math.exp(2), + math.exp(3), + math.exp(4), + math.exp(5), + math.exp(5) - 1, + math.exp(5) + 1, + ] radii = [int(x) for x in radii] image = cp.zeros([301, 301]) for rad in radii: rr, cc, val = circle_perimeter_aa(150, 150, rad) image[rr, cc] = val image = image.astype(dtype, copy=False) - warped = warp_polar(image, radius=200, scaling='log') + warped = warp_polar(image, radius=200, scaling="log") assert warped.dtype == _supported_float_type(dtype) profile = warped.mean(axis=0) peaks_coord = peak_local_max(profile) @@ -847,9 +974,8 @@ def test_bool_nonzero_order_errors(order): warp(img, cp.eye(3), order=order) -@pytest.mark.parametrize('dtype', [cp.uint8, bool, cp.float32, cp.float64]) +@pytest.mark.parametrize("dtype", [cp.uint8, bool, cp.float32, cp.float64]) def test_order_0_warp_dtype(dtype): - img = _convert(cp.array(astronaut()[:10, :10, 0]), dtype) assert resize(img, (12, 12), order=0).dtype == dtype @@ -860,12 +986,10 @@ def test_order_0_warp_dtype(dtype): @pytest.mark.parametrize( - 'dtype', - [cp.uint8, cp.float16, cp.float32, cp.float64] + "dtype", [cp.uint8, cp.float16, cp.float32, cp.float64] ) -@pytest.mark.parametrize('order', [1, 3, 5]) +@pytest.mark.parametrize("order", [1, 3, 5]) def test_nonzero_order_warp_dtype(dtype, order): - img = _convert(cp.array(astronaut()[:10, :10, 0]), dtype) float_dtype = _supported_float_type(dtype) @@ -886,7 +1010,7 @@ def test_resize_local_mean2d(): assert_array_almost_equal(resized, ref) -@pytest.mark.parametrize('channel_axis', [0, 1, 2, -1, -2, -3]) +@pytest.mark.parametrize("channel_axis", [0, 1, 2, -1, -2, -3]) def test_resize_local_mean3d_keep(channel_axis): # keep 3rd dimension nch = 3 @@ -899,7 +1023,7 @@ def test_resize_local_mean3d_keep(channel_axis): resized = cp.moveaxis(resized, channel_axis, -1) with pytest.raises(ValueError): # output_shape too short - resize_local_mean(x, (10, )) + resize_local_mean(x, (10,)) ref = cp.zeros((10, 10, nch)) ref[2:4, 2:4, :] = 1 assert_array_almost_equal(resized, ref) @@ -985,22 +1109,32 @@ def test_resize_local_mean_dtype(): x_u8 = x.astype(cp.uint8) x_b = x.astype(bool) - assert resize_local_mean(x, (10, 10), - preserve_range=False).dtype == x.dtype - assert resize_local_mean(x, (10, 10), - preserve_range=True).dtype == x.dtype - assert resize_local_mean(x_u8, (10, 10), - preserve_range=False).dtype == cp.float32 - assert resize_local_mean(x_u8, (10, 10), - preserve_range=True).dtype == cp.float32 - assert resize_local_mean(x_b, (10, 10), - preserve_range=False).dtype == cp.float32 - assert resize_local_mean(x_b, (10, 10), - preserve_range=True).dtype == cp.float32 - assert resize_local_mean(x_f32, (10, 10), - preserve_range=False).dtype == x_f32.dtype - assert resize_local_mean(x_f32, (10, 10), - preserve_range=True).dtype == x_f32.dtype + assert resize_local_mean(x, (10, 10), preserve_range=False).dtype == x.dtype + assert resize_local_mean(x, (10, 10), preserve_range=True).dtype == x.dtype + assert ( + resize_local_mean(x_u8, (10, 10), preserve_range=False).dtype + == cp.float32 + ) + assert ( + resize_local_mean(x_u8, (10, 10), preserve_range=True).dtype + == cp.float32 + ) + assert ( + resize_local_mean(x_b, (10, 10), preserve_range=False).dtype + == cp.float32 + ) + assert ( + resize_local_mean(x_b, (10, 10), preserve_range=True).dtype + == cp.float32 + ) + assert ( + resize_local_mean(x_f32, (10, 10), preserve_range=False).dtype + == x_f32.dtype + ) + assert ( + resize_local_mean(x_f32, (10, 10), preserve_range=True).dtype + == x_f32.dtype + ) def test_nn_resize_int_img(): diff --git a/python/cucim/src/cucim/skimage/util/__init__.py b/python/cucim/src/cucim/skimage/util/__init__.py index cbfd2e608..434709449 100644 --- a/python/cucim/src/cucim/skimage/util/__init__.py +++ b/python/cucim/src/cucim/skimage/util/__init__.py @@ -1,8 +1,16 @@ from ._invert import invert from ._map_array import map_array from .arraycrop import crop -from .dtype import (dtype_limits, img_as_bool, img_as_float, img_as_float32, - img_as_float64, img_as_int, img_as_ubyte, img_as_uint) +from .dtype import ( + dtype_limits, + img_as_bool, + img_as_float, + img_as_float32, + img_as_float64, + img_as_int, + img_as_ubyte, + img_as_uint, +) from .noise import random_noise from .shape import view_as_blocks, view_as_windows diff --git a/python/cucim/src/cucim/skimage/util/_invert.py b/python/cucim/src/cucim/skimage/util/_invert.py index c75beadfb..2d328ad98 100644 --- a/python/cucim/src/cucim/skimage/util/_invert.py +++ b/python/cucim/src/cucim/skimage/util/_invert.py @@ -62,7 +62,7 @@ def invert(image, signed_float=False): >>> invert(img4, signed_float=True) array([[-0. , -1. , 1. , 0.25]]) """ - if image.dtype == 'bool': + if image.dtype == "bool": inverted = ~image elif np.issubdtype(image.dtype, np.unsignedinteger): max_val = dtype_limits(image, clip_negative=False)[1] diff --git a/python/cucim/src/cucim/skimage/util/_map_array.py b/python/cucim/src/cucim/skimage/util/_map_array.py index bbc1a4dca..c5421dc00 100644 --- a/python/cucim/src/cucim/skimage/util/_map_array.py +++ b/python/cucim/src/cucim/skimage/util/_map_array.py @@ -47,7 +47,7 @@ def map_array(input_arr, input_vals, output_vals, out=None): if not cp.issubdtype(input_arr.dtype, cp.integer): raise TypeError( - 'The dtype of an array to be remapped should be integer.' + "The dtype of an array to be remapped should be integer." ) # We ravel the input array for simplicity of iteration in Cython: orig_shape = input_arr.shape @@ -59,18 +59,18 @@ def map_array(input_arr, input_vals, output_vals, out=None): out = cp.empty(orig_shape, dtype=output_vals.dtype) elif out.shape != orig_shape: raise ValueError( - 'If out array is provided, it should have the same shape as ' - f'the input array. Input array has shape {orig_shape}, provided ' - f'output array has shape {out.shape}.' + "If out array is provided, it should have the same shape as " + f"the input array. Input array has shape {orig_shape}, provided " + f"output array has shape {out.shape}." ) try: out_view = out.view() out_view.shape = (-1,) # no-copy reshape/ravel except AttributeError: # if out strides are not compatible with 0-copy raise ValueError( - 'If out array is provided, it should be either contiguous ' - f'or 1-dimensional. Got array with shape {out.shape} and ' - f'strides {out.strides}.' + "If out array is provided, it should be either contiguous " + f"or 1-dimensional. Got array with shape {out.shape} and " + f"strides {out.strides}." ) # ensure all arrays have matching types before sending to Cython @@ -166,25 +166,32 @@ def dtype(self): return self.out_values.dtype def __repr__(self): - return f'ArrayMap({repr(self.in_values)}, {repr(self.out_values)})' + return f"ArrayMap({repr(self.in_values)}, {repr(self.out_values)})" def __str__(self): if len(self.in_values) <= self._max_str_lines + 1: rows = range(len(self.in_values)) - string = '\n'.join( - ['ArrayMap:'] + - [f' {self.in_values[i]} → {self.out_values[i]}' for i in rows] + string = "\n".join( + ["ArrayMap:"] + + [ + f" {self.in_values[i]} → {self.out_values[i]}" + for i in rows + ] ) else: rows0 = list(range(0, self._max_str_lines // 2)) rows1 = list(range(-self._max_str_lines // 2, 0)) - string = '\n'.join( - ['ArrayMap:'] + - [f' {self.in_values[i]} → {self.out_values[i]}' - for i in rows0] + - [' ...'] + - [f' {self.in_values[i]} → {self.out_values[i]}' - for i in rows1] + string = "\n".join( + ["ArrayMap:"] + + [ + f" {self.in_values[i]} → {self.out_values[i]}" + for i in rows0 + ] + + [" ..."] + + [ + f" {self.in_values[i]} → {self.out_values[i]}" + for i in rows1 + ] ) return string @@ -197,9 +204,7 @@ def __getitem__(self, index): index = cp.asarray([index]) elif isinstance(index, slice): start = index.start or 0 # treat None or 0 the same way - stop = (index.stop - if index.stop is not None - else len(self)) + stop = index.stop if index.stop is not None else len(self) step = index.step index = cp.arange(start, stop, step) if index.dtype == bool: diff --git a/python/cucim/src/cucim/skimage/util/arraycrop.py b/python/cucim/src/cucim/skimage/util/arraycrop.py index 59893dddb..fb74661e1 100644 --- a/python/cucim/src/cucim/skimage/util/arraycrop.py +++ b/python/cucim/src/cucim/skimage/util/arraycrop.py @@ -5,10 +5,10 @@ import cupy as cp -__all__ = ['crop'] +__all__ = ["crop"] -def crop(ar, crop_width, copy=False, order='K'): +def crop(ar, crop_width, copy=False, order="K"): """Crop array `ar` by `crop_width` along each dimension. Parameters @@ -63,8 +63,7 @@ def crop(ar, crop_width, copy=False, order='K'): "a single pair, or a single integer" ) - slices = tuple(slice(a, ar.shape[i] - b) - for i, (a, b) in enumerate(crops)) + slices = tuple(slice(a, ar.shape[i] - b) for i, (a, b) in enumerate(crops)) if copy: cropped = cp.array(ar[slices], order=order, copy=True) else: diff --git a/python/cucim/src/cucim/skimage/util/dtype.py b/python/cucim/src/cucim/skimage/util/dtype.py index 4554efe3a..866d515ee 100644 --- a/python/cucim/src/cucim/skimage/util/dtype.py +++ b/python/cucim/src/cucim/skimage/util/dtype.py @@ -6,9 +6,16 @@ from .._shared.utils import _supported_float_type -__all__ = ['img_as_float32', 'img_as_float64', 'img_as_float', - 'img_as_int', 'img_as_uint', 'img_as_ubyte', - 'img_as_bool', 'dtype_limits'] +__all__ = [ + "img_as_float32", + "img_as_float64", + "img_as_float", + "img_as_int", + "img_as_uint", + "img_as_ubyte", + "img_as_bool", + "dtype_limits", +] # For integers Numpy uses `_integer_types` basis internally, and builds a leaky # `cupy.XintYY` abstraction on top of it. This leads to situations when, for @@ -19,27 +26,38 @@ # For convenience, for these dtypes we indicate also the possible bit depths # (some of them are platform specific). For the details, see: # http://www.unix.org/whitepapers/64bit.html -_integer_types = (cp.byte, cp.ubyte, # 8 bits - cp.short, cp.ushort, # 16 bits - cp.intc, cp.uintc, # 16 or 32 or 64 bits - int, cp.int_, cp.uint, # 32 or 64 bits - cp.longlong, cp.ulonglong) # 64 bits -_integer_ranges = {t: (cp.iinfo(t).min, cp.iinfo(t).max) - for t in _integer_types} -dtype_range = {bool: (False, True), - cp.bool_: (False, True), - float: (-1, 1), - cp.float_: (-1, 1), - cp.float16: (-1, 1), - cp.float32: (-1, 1), - cp.float64: (-1, 1)} +_integer_types = ( + cp.byte, + cp.ubyte, # 8 bits + cp.short, + cp.ushort, # 16 bits + cp.intc, + cp.uintc, # 16 or 32 or 64 bits + int, + cp.int_, + cp.uint, # 32 or 64 bits + cp.longlong, + cp.ulonglong, +) # 64 bits +_integer_ranges = { + t: (cp.iinfo(t).min, cp.iinfo(t).max) for t in _integer_types +} +dtype_range = { + bool: (False, True), + cp.bool_: (False, True), + float: (-1, 1), + cp.float_: (-1, 1), + cp.float16: (-1, 1), + cp.float32: (-1, 1), + cp.float64: (-1, 1), +} dtype_range.update(_integer_ranges) with warnings.catch_warnings(): - warnings.filterwarnings('ignore', category=DeprecationWarning) + warnings.filterwarnings("ignore", category=DeprecationWarning) # cp.bool8 is a deprecated alias of cp.bool_ - if hasattr(cp, 'bool8'): + if hasattr(cp, "bool8"): dtype_range[cp.bool8] = (False, True) _supported_types = list(dtype_range.keys()) @@ -108,8 +126,11 @@ def _dtype_bits(kind, bits, itemsize=1): """ - s = next(i for i in (itemsize, ) + (2, 4, 8) if - bits < (i * 8) or (bits == (i * 8) and kind == 'u')) + s = next( + i + for i in (itemsize,) + (2, 4, 8) + if bits < (i * 8) or (bits == (i * 8) and kind == "u") + ) return cp.dtype(kind + str(s)) @@ -137,16 +158,18 @@ def _scale(a, n, m, copy=True): Output image array. Has the same kind as `a`. """ kind = a.dtype.kind - if n > m and a.max() < 2 ** m: + if n > m and a.max() < 2**m: mnew = math.ceil(m / 2) * 2 if mnew > m: - dtype = f'int{mnew}' + dtype = f"int{mnew}" else: - dtype = f'uint{mnew}' + dtype = f"uint{mnew}" n = math.ceil(n / 2) * 2 - warn(f'Downcasting {a.dtype} to {dtype} without scaling because max ' - f'value {a.max()} fits in {dtype}', - stacklevel=3) + warn( + f"Downcasting {a.dtype} to {dtype} without scaling because max " + f"value {a.max()} fits in {dtype}", + stacklevel=3, + ) return a.astype(_dtype_bits(kind, m)) elif n == m: return a.copy() if copy else a @@ -154,17 +177,18 @@ def _scale(a, n, m, copy=True): # downscale with precision loss if copy: b = cp.empty(a.shape, _dtype_bits(kind, m)) - cp.floor_divide(a, 2**(n - m), out=b, dtype=a.dtype, - casting='unsafe') + cp.floor_divide( + a, 2 ** (n - m), out=b, dtype=a.dtype, casting="unsafe" + ) return b else: - a //= 2**(n - m) + a //= 2 ** (n - m) return a elif m % n == 0: # exact upscale to a multiple of `n` bits if copy: b = cp.empty(a.shape, _dtype_bits(kind, m)) - cp.multiply(a, (2 ** m - 1) // (2 ** n - 1), out=b, dtype=b.dtype) + cp.multiply(a, (2**m - 1) // (2**n - 1), out=b, dtype=b.dtype) return b else: a = a.astype(_dtype_bits(kind, m, a.dtype.itemsize), copy=False) @@ -176,13 +200,13 @@ def _scale(a, n, m, copy=True): o = (m // n + 1) * n if copy: b = cp.empty(a.shape, _dtype_bits(kind, o)) - cp.multiply(a, (2 ** o - 1) // (2 ** n - 1), out=b, dtype=b.dtype) - b //= 2**(o - m) + cp.multiply(a, (2**o - 1) // (2**n - 1), out=b, dtype=b.dtype) + b //= 2 ** (o - m) return b else: a = a.astype(_dtype_bits(kind, o, a.dtype.itemsize), copy=False) a *= (2**o - 1) // (2**n - 1) - a //= 2**(o - m) + a //= 2 ** (o - m) return a @@ -261,30 +285,31 @@ def _convert(image, dtype, force_copy=False, uniform=False): return image if not (dtype_in in _supported_types and dtype_out in _supported_types): - raise ValueError(f'Cannot convert from {dtypeobj_in} to ' - f'{dtypeobj_out}.') + raise ValueError( + f"Cannot convert from {dtypeobj_in} to " f"{dtypeobj_out}." + ) - if kind_in in 'ui': + if kind_in in "ui": imin_in = cp.iinfo(dtype_in).min imax_in = cp.iinfo(dtype_in).max - if kind_out in 'ui': + if kind_out in "ui": imin_out = cp.iinfo(dtype_out).min imax_out = cp.iinfo(dtype_out).max # any -> binary - if kind_out == 'b': + if kind_out == "b": return image > dtype_in(dtype_range[dtype_in][1] / 2) # binary -> any - if kind_in == 'b': + if kind_in == "b": result = image.astype(dtype_out) - if kind_out != 'f': + if kind_out != "f": result *= dtype_out(dtype_range[dtype_out][1]) return result # float -> any - if kind_in == 'f': - if kind_out == 'f': + if kind_in == "f": + if kind_out == "f": # float -> float return image.astype(dtype_out) @@ -292,52 +317,52 @@ def _convert(image, dtype, force_copy=False, uniform=False): raise ValueError("Images of type float must be between -1 and 1.") # floating point -> integer # use float type that can represent output integer type - computation_type = _dtype_itemsize(itemsize_out, dtype_in, - cp.float32, cp.float64) + computation_type = _dtype_itemsize( + itemsize_out, dtype_in, cp.float32, cp.float64 + ) if not uniform: - if kind_out == 'u': - image_out = cp.multiply(image, imax_out, - dtype=computation_type) + if kind_out == "u": + image_out = cp.multiply(image, imax_out, dtype=computation_type) else: - image_out = cp.multiply(image, (imax_out - imin_out) / 2, - dtype=computation_type) + image_out = cp.multiply( + image, (imax_out - imin_out) / 2, dtype=computation_type + ) image_out -= 1.0 / 2.0 cp.rint(image_out, out=image_out) cp.clip(image_out, imin_out, imax_out, out=image_out) - elif kind_out == 'u': - image_out = cp.multiply(image, imax_out + 1, - dtype=computation_type) + elif kind_out == "u": + image_out = cp.multiply(image, imax_out + 1, dtype=computation_type) cp.clip(image_out, 0, imax_out, out=image_out) else: - image_out = cp.multiply(image, (imax_out - imin_out + 1.0) / 2.0, - dtype=computation_type) + image_out = cp.multiply( + image, (imax_out - imin_out + 1.0) / 2.0, dtype=computation_type + ) cp.floor(image_out, out=image_out) cp.clip(image_out, imin_out, imax_out, out=image_out) return image_out.astype(dtype_out) # signed/unsigned int -> float - if kind_out == 'f': + if kind_out == "f": # use float type that can exactly represent input integers - computation_type = _dtype_itemsize(itemsize_in, dtype_out, - cp.float32, cp.float64) + computation_type = _dtype_itemsize( + itemsize_in, dtype_out, cp.float32, cp.float64 + ) - if kind_in == 'u': + if kind_in == "u": # using cp.divide or cp.multiply doesn't copy the data # until the computation time - image = cp.multiply(image, 1. / imax_in, - dtype=computation_type) + image = cp.multiply(image, 1.0 / imax_in, dtype=computation_type) # DirectX uses this conversion also for signed ints # if imin_in: # cp.maximum(image, -1.0, out=image) - elif kind_in == 'i': + elif kind_in == "i": # From DirectX conversions: # The most negative value maps to -1.0f # Every other value is converted to a float (call it c) # and then result = c * (1.0f / (2⁽ⁿ⁻¹⁾-1)). - image = cp.multiply(image, 1. / imax_in, - dtype=computation_type) + image = cp.multiply(image, 1.0 / imax_in, dtype=computation_type) cp.maximum(image, -1.0, out=image) else: image = cp.add(image, 0.5, dtype=computation_type) @@ -346,8 +371,8 @@ def _convert(image, dtype, force_copy=False, uniform=False): return image.astype(dtype_out, copy=False) # unsigned int -> signed/unsigned int - if kind_in == 'u': - if kind_out == 'i': + if kind_in == "u": + if kind_out == "i": # unsigned int -> signed int image = _scale(image, 8 * itemsize_in, 8 * itemsize_out - 1) return image.view(dtype_out) @@ -356,17 +381,17 @@ def _convert(image, dtype, force_copy=False, uniform=False): return _scale(image, 8 * itemsize_in, 8 * itemsize_out) # signed int -> unsigned int - if kind_out == 'u': + if kind_out == "u": image = _scale(image, 8 * itemsize_in - 1, 8 * itemsize_out) result = cp.empty(image.shape, dtype_out) - cp.maximum(image, 0, out=result, dtype=image.dtype, casting='unsafe') + cp.maximum(image, 0, out=result, dtype=image.dtype, casting="unsafe") return result # signed int -> signed int if itemsize_in > itemsize_out: return _scale(image, 8 * itemsize_in - 1, 8 * itemsize_out - 1) - image = image.astype(_dtype_bits('i', itemsize_out * 8)) + image = image.astype(_dtype_bits("i", itemsize_out * 8)) image -= imin_in image = _scale(image, 8 * itemsize_in, 8 * itemsize_out, copy=False) image += imin_out @@ -374,15 +399,22 @@ def _convert(image, dtype, force_copy=False, uniform=False): def convert(image, dtype, force_copy=False, uniform=False): - warn("The use of this function is discouraged as its behavior may change " - "dramatically in scikit-image 1.0. This function will be removed" - "in scikit-image 1.0.", FutureWarning, stacklevel=2) - return _convert(image=image, dtype=dtype, - force_copy=force_copy, uniform=uniform) + warn( + "The use of this function is discouraged as its behavior may change " + "dramatically in scikit-image 1.0. This function will be removed" + "in scikit-image 1.0.", + FutureWarning, + stacklevel=2, + ) + return _convert( + image=image, dtype=dtype, force_copy=force_copy, uniform=uniform + ) if _convert.__doc__ is not None: - convert.__doc__ = _convert.__doc__ + """ + convert.__doc__ = ( + _convert.__doc__ + + """ Warns ----- @@ -393,6 +425,7 @@ def convert(image, dtype, force_copy=False, uniform=False): dramatically in scikit-image 1.0. This function will be removed in scikit-image 1.0. """ + ) def img_as_float32(image, force_copy=False): diff --git a/python/cucim/src/cucim/skimage/util/lookfor.py b/python/cucim/src/cucim/skimage/util/lookfor.py index 2c7b6996c..bfebe734b 100644 --- a/python/cucim/src/cucim/skimage/util/lookfor.py +++ b/python/cucim/src/cucim/skimage/util/lookfor.py @@ -28,4 +28,4 @@ def lookfor(what): cucim.skimage.registration.optical_flow_tvl1 Coarse to fine optical flow estimator. """ - return np.lookfor(what, sys.modules[__name__.split('.')[0]]) + return np.lookfor(what, sys.modules[__name__.split(".")[0]]) diff --git a/python/cucim/src/cucim/skimage/util/noise.py b/python/cucim/src/cucim/skimage/util/noise.py index a14f8008d..192e453e0 100644 --- a/python/cucim/src/cucim/skimage/util/noise.py +++ b/python/cucim/src/cucim/skimage/util/noise.py @@ -2,10 +2,10 @@ from .dtype import img_as_float -__all__ = ['random_noise'] +__all__ = ["random_noise"] -def random_noise(image, mode='gaussian', seed=None, clip=True, **kwargs): +def random_noise(image, mode="gaussian", seed=None, clip=True, **kwargs): """ Function to add random noise of various types to a floating-point image. @@ -103,27 +103,30 @@ def random_noise(image, mode='gaussian', seed=None, clip=True, **kwargs): cp.random.seed(seed=seed) allowedtypes = { - 'gaussian': 'gaussian_values', - 'localvar': 'localvar_values', - 'poisson': 'poisson_values', - 'salt': 'sp_values', - 'pepper': 'sp_values', - 's&p': 's&p_values', - 'speckle': 'gaussian_values'} + "gaussian": "gaussian_values", + "localvar": "localvar_values", + "poisson": "poisson_values", + "salt": "sp_values", + "pepper": "sp_values", + "s&p": "s&p_values", + "speckle": "gaussian_values", + } kwdefaults = { - 'mean': 0.0, - 'var': 0.01, - 'amount': 0.05, - 'salt_vs_pepper': 0.5, - 'local_vars': cp.zeros_like(image) + 0.01} + "mean": 0.0, + "var": 0.01, + "amount": 0.05, + "salt_vs_pepper": 0.5, + "local_vars": cp.zeros_like(image) + 0.01, + } allowedkwargs = { - 'gaussian_values': ['mean', 'var'], - 'localvar_values': ['local_vars'], - 'sp_values': ['amount'], - 's&p_values': ['amount', 'salt_vs_pepper'], - 'poisson_values': []} + "gaussian_values": ["mean", "var"], + "localvar_values": ["local_vars"], + "sp_values": ["amount"], + "s&p_values": ["amount", "salt_vs_pepper"], + "poisson_values": [], + } for key in kwargs: if key not in allowedkwargs[allowedtypes[mode]]: @@ -135,15 +138,16 @@ def random_noise(image, mode='gaussian', seed=None, clip=True, **kwargs): for kw in allowedkwargs[allowedtypes[mode]]: kwargs.setdefault(kw, kwdefaults[kw]) - if mode == 'gaussian': - noise = cp.random.normal(kwargs['mean'], kwargs['var'] ** 0.5, - image.shape) + if mode == "gaussian": + noise = cp.random.normal( + kwargs["mean"], kwargs["var"] ** 0.5, image.shape + ) out = image + noise - elif mode == 'localvar': + elif mode == "localvar": # Ensure local variance input is correct - if (kwargs['local_vars'] <= 0).any(): - raise ValueError('All values of `local_vars` must be > 0.') + if (kwargs["local_vars"] <= 0).any(): + raise ValueError("All values of `local_vars` must be > 0.") # Safe shortcut usage broadcasts kwargs['local_vars'] as a ufunc @@ -153,7 +157,7 @@ def random_noise(image, mode='gaussian', seed=None, clip=True, **kwargs): 0, kwargs["local_vars"] ** 0.5, kwargs["local_vars"].shape ) - elif mode == 'poisson': + elif mode == "poisson": # Determine unique values in image & calculate the next power of two vals = len(cp.unique(image)) vals = 2 ** cp.ceil(cp.log2(vals)) @@ -170,31 +174,42 @@ def random_noise(image, mode='gaussian', seed=None, clip=True, **kwargs): if low_clip == -1.0: out = out * (old_max + 1.0) - 1.0 - elif mode == 'salt': + elif mode == "salt": # Re-call function with mode='s&p' and p=1 (all salt noise) - out = random_noise(image, mode='s&p', seed=seed, - amount=kwargs['amount'], salt_vs_pepper=1.) + out = random_noise( + image, + mode="s&p", + seed=seed, + amount=kwargs["amount"], + salt_vs_pepper=1.0, + ) - elif mode == 'pepper': + elif mode == "pepper": # Re-call function with mode='s&p' and p=1 (all pepper noise) - out = random_noise(image, mode='s&p', seed=seed, - amount=kwargs['amount'], salt_vs_pepper=0.) + out = random_noise( + image, + mode="s&p", + seed=seed, + amount=kwargs["amount"], + salt_vs_pepper=0.0, + ) - elif mode == 's&p': + elif mode == "s&p": out = image.copy() - p = kwargs['amount'] - q = kwargs['salt_vs_pepper'] - flipped = cp.random.choice([True, False], size=image.shape, - p=[p, 1 - p]) - salted = cp.random.choice([True, False], size=image.shape, - p=[q, 1 - q]) + p = kwargs["amount"] + q = kwargs["salt_vs_pepper"] + flipped = cp.random.choice( + [True, False], size=image.shape, p=[p, 1 - p] + ) + salted = cp.random.choice([True, False], size=image.shape, p=[q, 1 - q]) peppered = ~salted out[flipped & salted] = 1 out[flipped & peppered] = low_clip - elif mode == 'speckle': - noise = cp.random.normal(kwargs['mean'], kwargs['var'] ** 0.5, - image.shape) + elif mode == "speckle": + noise = cp.random.normal( + kwargs["mean"], kwargs["var"] ** 0.5, image.shape + ) out = image + image * noise # Clip back to original range, if necessary diff --git a/python/cucim/src/cucim/skimage/util/tests/test_dtype.py b/python/cucim/src/cucim/skimage/util/tests/test_dtype.py index d96eaa72e..1f3d5d8ef 100644 --- a/python/cucim/src/cucim/skimage/util/tests/test_dtype.py +++ b/python/cucim/src/cucim/skimage/util/tests/test_dtype.py @@ -5,21 +5,34 @@ import pytest from cupy.testing import assert_array_equal -from cucim.skimage import (img_as_float, img_as_float32, img_as_float64, - img_as_int, img_as_ubyte, img_as_uint) +from cucim.skimage import ( + img_as_float, + img_as_float32, + img_as_float64, + img_as_int, + img_as_ubyte, + img_as_uint, +) from cucim.skimage._shared._warnings import expected_warnings from cucim.skimage.util.dtype import _convert, convert -dtype_range = {cp.uint8: (0, 255), - cp.uint16: (0, 65535), - cp.int8: (-128, 127), - cp.int16: (-32768, 32767), - cp.float32: (-1.0, 1.0), - cp.float64: (-1.0, 1.0)} - - -img_funcs = (img_as_int, img_as_float64, img_as_float32, - img_as_uint, img_as_ubyte) +dtype_range = { + cp.uint8: (0, 255), + cp.uint16: (0, 65535), + cp.int8: (-128, 127), + cp.int16: (-32768, 32767), + cp.float32: (-1.0, 1.0), + cp.float64: (-1.0, 1.0), +} + + +img_funcs = ( + img_as_int, + img_as_float64, + img_as_float32, + img_as_uint, + img_as_ubyte, +) dtypes_for_img_funcs = (cp.int16, cp.float64, cp.float32, cp.uint16, cp.ubyte) img_funcs_and_types = zip(img_funcs, dtypes_for_img_funcs) @@ -47,21 +60,29 @@ def test_range(dtype, f_and_dt): omin = 0 imin = 0 - _verify_range("From %s to %s" % (cp.dtype(dtype), cp.dtype(dt)), - y, omin, omax, np.dtype(dt)) + _verify_range( + "From %s to %s" % (cp.dtype(dtype), cp.dtype(dt)), + y, + omin, + omax, + np.dtype(dt), + ) # Add non-standard data types that are allowed by the `_convert` function. dtype_range_extra = dtype_range.copy() -dtype_range_extra.update({cp.int32: (-2147483648, 2147483647), - cp.uint32: (0, 4294967295)}) +dtype_range_extra.update( + {cp.int32: (-2147483648, 2147483647), cp.uint32: (0, 4294967295)} +) -dtype_pairs = [(cp.uint8, cp.uint32), - (cp.int8, cp.uint32), - (cp.int8, cp.int32), - (cp.int32, cp.int8), - (cp.float64, cp.float32), - (cp.int32, cp.float32)] +dtype_pairs = [ + (cp.uint8, cp.uint32), + (cp.int8, cp.uint32), + (cp.int8, cp.int32), + (cp.int32, cp.int8), + (cp.float64, cp.float32), + (cp.int32, cp.float32), +] @pytest.mark.parametrize("dtype_in, dt", dtype_pairs) @@ -74,13 +95,18 @@ def test_range_extra_dtypes(dtype_in, dt): y = _convert(x, dt) omin, omax = dtype_range_extra[dt] - _verify_range("From %s to %s" % (cp.dtype(dtype_in), cp.dtype(dt)), - y, omin, omax, cp.dtype(dt)) + _verify_range( + "From %s to %s" % (cp.dtype(dtype_in), cp.dtype(dt)), + y, + omin, + omax, + cp.dtype(dt), + ) def test_downcast(): x = cp.arange(10).astype(cp.uint64) - with expected_warnings(['Downcasting']): + with expected_warnings(["Downcasting"]): y = img_as_int(x) assert cp.allclose(y, x.astype(cp.int16)) assert y.dtype == cp.int16, y.dtype @@ -114,10 +140,12 @@ def test_bool(): img_ = cp.zeros((10, 10), cp.bool_) img[1, 1] = True img_[1, 1] = True - for (func, dt) in [(img_as_int, cp.int16), - (img_as_float, cp.float64), - (img_as_uint, cp.uint16), - (img_as_ubyte, cp.ubyte)]: + for func, dt in [ + (img_as_int, cp.int16), + (img_as_float, cp.float64), + (img_as_uint, cp.uint16), + (img_as_ubyte, cp.ubyte), + ]: converted = func(img) assert cp.sum(converted) == dtype_range[dt][1] converted_ = func(img_) @@ -149,17 +177,26 @@ def test_float32_passthrough(): assert y.dtype == x.dtype -float_dtype_list = [float, float, cp.double, cp.single, cp.float32, - cp.float64, 'float32', 'float64'] +float_dtype_list = [ + float, + float, + cp.double, + cp.single, + cp.float32, + cp.float64, + "float32", + "float64", +] def test_float_conversion_dtype(): - """Test any convertion from a float dtype to an other.""" + """Test any conversion from a float dtype to an other.""" x = cp.array([-1, 1]) - # Test all combinations of dtypes convertions - dtype_combin = np.array(np.meshgrid(float_dtype_list, - float_dtype_list)).T.reshape(-1, 2) + # Test all combinations of dtypes conversions + dtype_combin = np.array( + np.meshgrid(float_dtype_list, float_dtype_list) + ).T.reshape(-1, 2) for dtype_in, dtype_out in dtype_combin: x = x.astype(dtype_in) @@ -171,9 +208,10 @@ def test_float_conversion_dtype_warns(): """Test that convert issues a warning when called""" x = np.array([-1, 1]) - # Test all combinations of dtypes convertions - dtype_combin = np.array(np.meshgrid(float_dtype_list, - float_dtype_list)).T.reshape(-1, 2) + # Test all combinations of dtypes conversions + dtype_combin = np.array( + np.meshgrid(float_dtype_list, float_dtype_list) + ).T.reshape(-1, 2) for dtype_in, dtype_out in dtype_combin: x = x.astype(dtype_in) diff --git a/python/cucim/src/cucim/skimage/util/tests/test_invert.py b/python/cucim/src/cucim/skimage/util/tests/test_invert.py index f97976146..91a2703e6 100644 --- a/python/cucim/src/cucim/skimage/util/tests/test_invert.py +++ b/python/cucim/src/cucim/skimage/util/tests/test_invert.py @@ -7,7 +7,7 @@ def test_invert_bool(): - dtype = 'bool' + dtype = "bool" image = cp.zeros((3, 3), dtype=dtype) upper_dtype_limit = dtype_limits(image, clip_negative=False)[1] image[1, :] = upper_dtype_limit @@ -18,7 +18,7 @@ def test_invert_bool(): def test_invert_uint8(): - dtype = 'uint8' + dtype = "uint8" image = cp.zeros((3, 3), dtype=dtype) upper_dtype_limit = dtype_limits(image, clip_negative=False)[1] image[1, :] = upper_dtype_limit @@ -29,10 +29,11 @@ def test_invert_uint8(): def test_invert_int8(): - dtype = 'int8' + dtype = "int8" image = cp.zeros((3, 3), dtype=dtype) - lower_dtype_limit, upper_dtype_limit = \ - dtype_limits(image, clip_negative=False) + lower_dtype_limit, upper_dtype_limit = dtype_limits( + image, clip_negative=False + ) image[1, :] = lower_dtype_limit image[2, :] = upper_dtype_limit expected = cp.zeros((3, 3), dtype=dtype) @@ -44,10 +45,11 @@ def test_invert_int8(): def test_invert_float64_signed(): - dtype = 'float64' + dtype = "float64" image = cp.zeros((3, 3), dtype=dtype) - lower_dtype_limit, upper_dtype_limit = \ - dtype_limits(image, clip_negative=False) + lower_dtype_limit, upper_dtype_limit = dtype_limits( + image, clip_negative=False + ) image[1, :] = lower_dtype_limit image[2, :] = upper_dtype_limit expected = cp.zeros((3, 3), dtype=dtype) @@ -58,10 +60,11 @@ def test_invert_float64_signed(): def test_invert_float64_unsigned(): - dtype = 'float64' + dtype = "float64" image = cp.zeros((3, 3), dtype=dtype) - lower_dtype_limit, upper_dtype_limit = \ - dtype_limits(image, clip_negative=True) + lower_dtype_limit, upper_dtype_limit = dtype_limits( + image, clip_negative=True + ) image[2, :] = upper_dtype_limit expected = cp.zeros((3, 3), dtype=dtype) expected[0, :] = upper_dtype_limit diff --git a/python/cucim/src/cucim/skimage/util/tests/test_map_array.py b/python/cucim/src/cucim/skimage/util/tests/test_map_array.py index 95db44806..640af0b31 100644 --- a/python/cucim/src/cucim/skimage/util/tests/test_map_array.py +++ b/python/cucim/src/cucim/skimage/util/tests/test_map_array.py @@ -27,7 +27,7 @@ def test_arraymap_long_str(): in_values = cp.unique(labels) out_values = cp.random.random(in_values.shape) m = ArrayMap(in_values, out_values) - assert len(str(m).split('\n')) == m._max_str_lines + 2 + assert len(str(m).split("\n")) == m._max_str_lines + 2 def test_arraymap_update(): diff --git a/python/cucim/src/cucim/skimage/util/tests/test_random_noise.py b/python/cucim/src/cucim/skimage/util/tests/test_random_noise.py index ae601e4bb..5040e1f02 100644 --- a/python/cucim/src/cucim/skimage/util/tests/test_random_noise.py +++ b/python/cucim/src/cucim/skimage/util/tests/test_random_noise.py @@ -18,7 +18,7 @@ def test_set_seed(): def test_salt(): seed = 42 cam = img_as_float(camerad) - cam_noisy = random_noise(cam, seed=seed, mode='salt', amount=0.15) + cam_noisy = random_noise(cam, seed=seed, mode="salt", amount=0.15) saltmask = cam != cam_noisy # Ensure all changes are to 1.0 @@ -31,14 +31,14 @@ def test_salt(): def test_salt_p1(): image = cp.random.rand(2, 3) - noisy = random_noise(image, mode='salt', amount=1) + noisy = random_noise(image, mode="salt", amount=1) assert_array_equal(noisy, [[1, 1, 1], [1, 1, 1]]) def test_singleton_dim(): """Ensure images where size of a given dimension is 1 work correctly.""" image = cp.random.rand(1, 20) - noisy = random_noise(image, mode='salt', amount=0.1, seed=42) + noisy = random_noise(image, mode="salt", amount=0.1, seed=42) assert cp.sum(noisy == 1) == 3 # GRL: modified to match value for CuPy @@ -47,7 +47,7 @@ def test_pepper(): cam = img_as_float(camerad) data_signed = cam * 2.0 - 1.0 # Same image, on range [-1, 1] - cam_noisy = random_noise(cam, seed=seed, mode='pepper', amount=0.15) + cam_noisy = random_noise(cam, seed=seed, mode="pepper", amount=0.15) peppermask = cam != cam_noisy # Ensure all changes are to 1.0 @@ -59,29 +59,33 @@ def test_pepper(): # Check to make sure pepper gets added properly to signed images orig_zeros = (data_signed == -1).sum() - cam_noisy_signed = random_noise(data_signed, seed=seed, mode='pepper', - amount=.15) + cam_noisy_signed = random_noise( + data_signed, seed=seed, mode="pepper", amount=0.15 + ) - proportion = (float((cam_noisy_signed == -1).sum() - orig_zeros) / - (cam.shape[0] * cam.shape[1])) + proportion = float((cam_noisy_signed == -1).sum() - orig_zeros) / ( + cam.shape[0] * cam.shape[1] + ) assert 0.11 < proportion <= 0.15 def test_salt_and_pepper(): seed = 42 cam = img_as_float(camerad) - cam_noisy = random_noise(cam, seed=seed, mode='s&p', amount=0.15, - salt_vs_pepper=0.25) - saltmask = cp.logical_and(cam != cam_noisy, cam_noisy == 1.) - peppermask = cp.logical_and(cam != cam_noisy, cam_noisy == 0.) + cam_noisy = random_noise( + cam, seed=seed, mode="s&p", amount=0.15, salt_vs_pepper=0.25 + ) + saltmask = cp.logical_and(cam != cam_noisy, cam_noisy == 1.0) + peppermask = cp.logical_and(cam != cam_noisy, cam_noisy == 0.0) # Ensure all changes are to 0. or 1. assert_allclose(cam_noisy[saltmask], cp.ones(int(saltmask.sum()))) assert_allclose(cam_noisy[peppermask], cp.zeros(int(peppermask.sum()))) # Ensure approximately correct amount of noise was added - proportion = float( - saltmask.sum() + peppermask.sum()) / (cam.shape[0] * cam.shape[1]) + proportion = float(saltmask.sum() + peppermask.sum()) / ( + cam.shape[0] * cam.shape[1] + ) assert 0.11 < proportion <= 0.18 # Verify the relative amount of salt vs. pepper is close to expected @@ -107,8 +111,9 @@ def test_localvar(): local_vars[64:, :64] = 0.25 local_vars[64:, 64:] = 0.45 - data_gaussian = random_noise(data, mode='localvar', seed=seed, - local_vars=local_vars, clip=False) + data_gaussian = random_noise( + data, mode="localvar", seed=seed, local_vars=local_vars, clip=False + ) assert 0.0 < data_gaussian[:64, :64].var() < 0.002 assert 0.095 < data_gaussian[:64, 64:].var() < 0.105 assert 0.245 < data_gaussian[64:, :64].var() < 0.255 @@ -117,33 +122,36 @@ def test_localvar(): # Ensure local variance bounds checking works properly bad_local_vars = cp.zeros_like(data) with pytest.raises(ValueError): - random_noise(data, mode='localvar', seed=seed, - local_vars=bad_local_vars) + random_noise( + data, mode="localvar", seed=seed, local_vars=bad_local_vars + ) bad_local_vars += 0.1 bad_local_vars[0, 0] = -1 with pytest.raises(ValueError): - random_noise(data, mode='localvar', seed=seed, - local_vars=bad_local_vars) + random_noise( + data, mode="localvar", seed=seed, local_vars=bad_local_vars + ) def test_speckle(): seed = 42 data = cp.zeros((128, 128)) + 0.1 cp.random.seed(seed=seed) - noise = cp.random.normal(0.1, 0.02 ** 0.5, (128, 128)) + noise = cp.random.normal(0.1, 0.02**0.5, (128, 128)) expected = cp.clip(data + data * noise, 0, 1) - data_speckle = random_noise(data, mode='speckle', seed=seed, mean=0.1, - var=0.02) + data_speckle = random_noise( + data, mode="speckle", seed=seed, mean=0.1, var=0.02 + ) assert_allclose(expected, data_speckle) def test_poisson(): seed = 42 data = camerad # 512x512 grayscale uint8 - cam_noisy = random_noise(data, mode='poisson', seed=seed) - cam_noisy2 = random_noise(data, mode='poisson', seed=seed, clip=False) + cam_noisy = random_noise(data, mode="poisson", seed=seed) + cam_noisy2 = random_noise(data, mode="poisson", seed=seed, clip=False) cp.random.seed(seed=seed) expected = cp.random.poisson(img_as_float(data) * 256) / 256.0 @@ -157,16 +165,18 @@ def test_clip_poisson(): data_signed = img_as_float(data) * 2.0 - 1.0 # Same image, on range [-1, 1] # Signed and unsigned, clipped - cam_poisson = random_noise(data, mode='poisson', seed=seed, clip=True) - cam_poisson2 = random_noise(data_signed, mode='poisson', seed=seed, - clip=True) + cam_poisson = random_noise(data, mode="poisson", seed=seed, clip=True) + cam_poisson2 = random_noise( + data_signed, mode="poisson", seed=seed, clip=True + ) assert (cam_poisson.max() == 1.0) and (cam_poisson.min() == 0.0) assert (cam_poisson2.max() == 1.0) and (cam_poisson2.min() == -1.0) # Signed and unsigned, unclipped - cam_poisson = random_noise(data, mode='poisson', seed=seed, clip=False) - cam_poisson2 = random_noise(data_signed, mode='poisson', seed=seed, - clip=False) + cam_poisson = random_noise(data, mode="poisson", seed=seed, clip=False) + cam_poisson2 = random_noise( + data_signed, mode="poisson", seed=seed, clip=False + ) assert (cam_poisson.max() > 1.15) and (cam_poisson.min() == 0.0) assert (cam_poisson2.max() > 1.3) and (cam_poisson2.min() == -1.0) @@ -177,16 +187,18 @@ def test_clip_gaussian(): data_signed = img_as_float(data) * 2.0 - 1.0 # Same image, on range [-1, 1] # Signed and unsigned, clipped - cam_gauss = random_noise(data, mode='gaussian', seed=seed, clip=True) - cam_gauss2 = random_noise(data_signed, mode='gaussian', seed=seed, - clip=True) + cam_gauss = random_noise(data, mode="gaussian", seed=seed, clip=True) + cam_gauss2 = random_noise( + data_signed, mode="gaussian", seed=seed, clip=True + ) assert (cam_gauss.max() == 1.0) and (cam_gauss.min() == 0.0) assert (cam_gauss2.max() == 1.0) and (cam_gauss2.min() == -1.0) # Signed and unsigned, unclipped - cam_gauss = random_noise(data, mode='gaussian', seed=seed, clip=False) - cam_gauss2 = random_noise(data_signed, mode='gaussian', seed=seed, - clip=False) + cam_gauss = random_noise(data, mode="gaussian", seed=seed, clip=False) + cam_gauss2 = random_noise( + data_signed, mode="gaussian", seed=seed, clip=False + ) assert (cam_gauss.max() > 1.22) and (cam_gauss.min() < -0.33) assert (cam_gauss2.max() > 1.219) and (cam_gauss2.min() < -1.219) @@ -197,16 +209,18 @@ def test_clip_speckle(): data_signed = img_as_float(data) * 2.0 - 1.0 # Same image, on range [-1, 1] # Signed and unsigned, clipped - cam_speckle = random_noise(data, mode='speckle', seed=seed, clip=True) - cam_speckle_sig = random_noise(data_signed, mode='speckle', seed=seed, - clip=True) + cam_speckle = random_noise(data, mode="speckle", seed=seed, clip=True) + cam_speckle_sig = random_noise( + data_signed, mode="speckle", seed=seed, clip=True + ) assert (cam_speckle.max() == 1.0) and (cam_speckle.min() == 0.0) assert (cam_speckle_sig.max() == 1.0) and (cam_speckle_sig.min() == -1.0) # Signed and unsigned, unclipped - cam_speckle = random_noise(data, mode='speckle', seed=seed, clip=False) - cam_speckle_sig = random_noise(data_signed, mode='speckle', seed=seed, - clip=False) + cam_speckle = random_noise(data, mode="speckle", seed=seed, clip=False) + cam_speckle_sig = random_noise( + data_signed, mode="speckle", seed=seed, clip=False + ) assert (cam_speckle.max() > 1.219) and (cam_speckle.min() == 0.0) assert (cam_speckle_sig.max() > 1.219) and (cam_speckle_sig.min() < -1.219) @@ -214,4 +228,4 @@ def test_clip_speckle(): def test_bad_mode(): data = cp.zeros((64, 64)) with pytest.raises(KeyError): - random_noise(data, 'perlin') + random_noise(data, "perlin") diff --git a/python/cucim/src/cucim/time.py b/python/cucim/src/cucim/time.py index d1a2f8086..a35ceec90 100644 --- a/python/cucim/src/cucim/time.py +++ b/python/cucim/src/cucim/time.py @@ -1,3 +1,3 @@ from .skimage._vendored.time import repeat -__all__ = ['repeat'] +__all__ = ["repeat"] diff --git a/python/cucim/src/localtest.py b/python/cucim/src/localtest.py index b255b8287..452e6bc47 100644 --- a/python/cucim/src/localtest.py +++ b/python/cucim/src/localtest.py @@ -37,7 +37,7 @@ # A tuple of dimension sizes (in the order of `dims`). print(img.shape) # Returns size as a tuple for the given dimension order. -print(img.size('XYC')) +print(img.size("XYC")) # The data type of the image. print(img.dtype) # A channel name list. @@ -106,16 +106,19 @@ def load_tile_cucim(slide, start_loc, tile_size): for h in range(start_location, height, tile_size): for w in range(start_location, width, tile_size): count += 1 - start_loc_iter = ((w, h) - for h in range(start_location, height, tile_size) - for w in range(start_location, width, tile_size)) + start_loc_iter = ( + (w, h) + for h in range(start_location, height, tile_size) + for w in range(start_location, width, tile_size) + ) with Timer(" Thread elapsed time (OpenSlide)") as timer: with concurrent.futures.ThreadPoolExecutor( max_workers=num_workers ) as executor: executor.map( lambda start_loc: load_tile_openslide( - slide, start_loc, tile_size), + slide, start_loc, tile_size + ), start_loc_iter, ) openslide_time = timer.elapsed_time() @@ -123,9 +126,11 @@ def load_tile_cucim(slide, start_loc, tile_size): cucim_time = 0 slide = CuImage(input_file) - start_loc_iter = ((w, h) - for h in range(start_location, height, tile_size) - for w in range(start_location, width, tile_size)) + start_loc_iter = ( + (w, h) + for h in range(start_location, height, tile_size) + for w in range(start_location, width, tile_size) + ) with Timer(" Thread elapsed time (cuCIM)") as timer: with concurrent.futures.ThreadPoolExecutor( max_workers=num_workers @@ -136,10 +141,16 @@ def load_tile_cucim(slide, start_loc, tile_size): ) cucim_time = timer.elapsed_time() cucim_tot_time += cucim_time - print(" Performance gain (OpenSlide/cuCIM): {}".format( - openslide_time / cucim_time)) + print( + " Performance gain (OpenSlide/cuCIM): {}".format( + openslide_time / cucim_time + ) + ) print("Total time (OpenSlide):", openslide_tot_time) print("Total time (cuCIM):", cucim_tot_time) -print("Average performance gain (OpenSlide/cuCIM): {}".format( - openslide_tot_time / cucim_tot_time)) +print( + "Average performance gain (OpenSlide/cuCIM): {}".format( + openslide_tot_time / cucim_tot_time + ) +) diff --git a/python/cucim/tests/fixtures/testimage.py b/python/cucim/tests/fixtures/testimage.py index 343698676..a4a9b9583 100644 --- a/python/cucim/tests/fixtures/testimage.py +++ b/python/cucim/tests/fixtures/testimage.py @@ -22,159 +22,188 @@ def gen_image(tmpdir_factory, recipe, resolution=None): - dataset_path = tmpdir_factory.mktemp('datasets').strpath + dataset_path = tmpdir_factory.mktemp("datasets").strpath dataset_gen = ImageGenerator(dataset_path, [recipe], [resolution]) image_path = dataset_gen.gen() return (dataset_path, image_path[0]) # tiff_stripe_32x24_16 -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") def testimg_tiff_stripe_32x24_16_jpeg(tmpdir_factory): - dataset_path, image_path = gen_image(tmpdir_factory, 'tiff') + dataset_path, image_path = gen_image(tmpdir_factory, "tiff") yield image_path # Clean up fake dataset folder shutil.rmtree(dataset_path) -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") def testimg_tiff_stripe_32x24_16_deflate(tmpdir_factory): dataset_path, image_path = gen_image( - tmpdir_factory, 'tiff::stripe:32x24:16:deflate') + tmpdir_factory, "tiff::stripe:32x24:16:deflate" + ) yield image_path # Clean up fake dataset folder shutil.rmtree(dataset_path) -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") def testimg_tiff_stripe_32x24_16_raw(tmpdir_factory): dataset_path, image_path = gen_image( - tmpdir_factory, 'tiff::stripe:32x24:16:raw') + tmpdir_factory, "tiff::stripe:32x24:16:raw" + ) yield image_path # Clean up fake dataset folder shutil.rmtree(dataset_path) -@pytest.fixture(scope='session', params=[ - lazy_fixture('testimg_tiff_stripe_32x24_16_jpeg'), - lazy_fixture('testimg_tiff_stripe_32x24_16_deflate'), - lazy_fixture('testimg_tiff_stripe_32x24_16_raw') -]) +@pytest.fixture( + scope="session", + params=[ + lazy_fixture("testimg_tiff_stripe_32x24_16_jpeg"), + lazy_fixture("testimg_tiff_stripe_32x24_16_deflate"), + lazy_fixture("testimg_tiff_stripe_32x24_16_raw"), + ], +) def testimg_tiff_stripe_32x24_16(request): return request.param # tiff_stripe_4096x4096_256 -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") def testimg_tiff_stripe_4096x4096_256_jpeg(tmpdir_factory): dataset_path, image_path = gen_image( - tmpdir_factory, 'tiff::stripe:4096x4096:256:jpeg') + tmpdir_factory, "tiff::stripe:4096x4096:256:jpeg" + ) yield image_path # Clean up fake dataset folder shutil.rmtree(dataset_path) -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") def testimg_tiff_stripe_4096x4096_256_deflate(tmpdir_factory): dataset_path, image_path = gen_image( - tmpdir_factory, 'tiff::stripe:4096x4096:256:deflate') + tmpdir_factory, "tiff::stripe:4096x4096:256:deflate" + ) yield image_path # Clean up fake dataset folder shutil.rmtree(dataset_path) -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") def testimg_tiff_stripe_4096x4096_256_raw(tmpdir_factory): dataset_path, image_path = gen_image( - tmpdir_factory, 'tiff::stripe:4096x4096:256:raw') + tmpdir_factory, "tiff::stripe:4096x4096:256:raw" + ) yield image_path # Clean up fake dataset folder shutil.rmtree(dataset_path) -@pytest.fixture(scope='session', params=[ - lazy_fixture('testimg_tiff_stripe_4096x4096_256_jpeg'), - lazy_fixture('testimg_tiff_stripe_4096x4096_256_deflate'), - lazy_fixture('testimg_tiff_stripe_4096x4096_256_raw') -]) +@pytest.fixture( + scope="session", + params=[ + lazy_fixture("testimg_tiff_stripe_4096x4096_256_jpeg"), + lazy_fixture("testimg_tiff_stripe_4096x4096_256_deflate"), + lazy_fixture("testimg_tiff_stripe_4096x4096_256_raw"), + ], +) def testimg_tiff_stripe_4096x4096_256(request): return request.param # tiff_stripe_100000x100000_256 -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") def testimg_tiff_stripe_100000x100000_256_jpeg(tmpdir_factory): dataset_path, image_path = gen_image( - tmpdir_factory, 'tiff::stripe:100000x100000:256:jpeg') + tmpdir_factory, "tiff::stripe:100000x100000:256:jpeg" + ) yield image_path # Clean up fake dataset folder shutil.rmtree(dataset_path) -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") def testimg_tiff_stripe_100000x100000_256_deflate(tmpdir_factory): dataset_path, image_path = gen_image( - tmpdir_factory, 'tiff::stripe:100000x100000:256:deflate') + tmpdir_factory, "tiff::stripe:100000x100000:256:deflate" + ) yield image_path # Clean up fake dataset folder shutil.rmtree(dataset_path) -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") def testimg_tiff_stripe_100000x100000_256_raw(tmpdir_factory): dataset_path, image_path = gen_image( - tmpdir_factory, 'tiff::stripe:100000x100000:256:raw') + tmpdir_factory, "tiff::stripe:100000x100000:256:raw" + ) yield image_path # Clean up fake dataset folder shutil.rmtree(dataset_path) -@pytest.fixture(scope='session', params=[ - lazy_fixture('testimg_tiff_stripe_100000x100000_256_jpeg'), - lazy_fixture('testimg_tiff_stripe_100000x100000_256_deflate'), - lazy_fixture('testimg_tiff_stripe_100000x100000_256_raw') -]) +@pytest.fixture( + scope="session", + params=[ + lazy_fixture("testimg_tiff_stripe_100000x100000_256_jpeg"), + lazy_fixture("testimg_tiff_stripe_100000x100000_256_deflate"), + lazy_fixture("testimg_tiff_stripe_100000x100000_256_raw"), + ], +) def testimg_tiff_stripe_100000x100000_256(request): return request.param # testimg_tiff_stripe_4096_4096_256_jpeg_resolution -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") def testimg_tiff_stripe_4096_4096_256_jpeg_resolution_3_5_centimeter( - tmpdir_factory): + tmpdir_factory, +): resolution = (0.3, 0.5, "CENTIMETER") dataset_path, image_path = gen_image( - tmpdir_factory, 'tiff::stripe:4096x4096:256:jpeg', resolution) + tmpdir_factory, "tiff::stripe:4096x4096:256:jpeg", resolution + ) yield image_path, resolution # Clean up fake dataset folder shutil.rmtree(dataset_path) -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") def testimg_tiff_stripe_4096_4096_256_jpeg_resolution_4_7_inch(tmpdir_factory): resolution = (0.4, 0.7, "INCH") dataset_path, image_path = gen_image( - tmpdir_factory, 'tiff::stripe:4096x4096:256:jpeg', resolution) + tmpdir_factory, "tiff::stripe:4096x4096:256:jpeg", resolution + ) yield image_path, resolution # Clean up fake dataset folder shutil.rmtree(dataset_path) -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") def testimg_tiff_stripe_4096_4096_256_jpeg_resolution_9_1_none(tmpdir_factory): resolution = (9, 1, "NONE") dataset_path, image_path = gen_image( - tmpdir_factory, 'tiff::stripe:4096x4096:256:jpeg', resolution) + tmpdir_factory, "tiff::stripe:4096x4096:256:jpeg", resolution + ) yield image_path, resolution # Clean up fake dataset folder shutil.rmtree(dataset_path) -@pytest.fixture(scope='session', params=[ - lazy_fixture( - 'testimg_tiff_stripe_4096_4096_256_jpeg_resolution_3_5_centimeter'), - lazy_fixture('testimg_tiff_stripe_4096_4096_256_jpeg_resolution_4_7_inch'), - lazy_fixture('testimg_tiff_stripe_4096_4096_256_jpeg_resolution_9_1_none'), -]) +@pytest.fixture( + scope="session", + params=[ + lazy_fixture( + "testimg_tiff_stripe_4096_4096_256_jpeg_resolution_3_5_centimeter" + ), + lazy_fixture( + "testimg_tiff_stripe_4096_4096_256_jpeg_resolution_4_7_inch" + ), + lazy_fixture( + "testimg_tiff_stripe_4096_4096_256_jpeg_resolution_9_1_none" + ), + ], +) def testimg_tiff_stripe_4096_4096_256_jpeg_resolution(request): return request.param diff --git a/python/cucim/tests/performance/clara/test_read_region_memory_usage.py b/python/cucim/tests/performance/clara/test_read_region_memory_usage.py index 0ae507ed5..aba340eea 100644 --- a/python/cucim/tests/performance/clara/test_read_region_memory_usage.py +++ b/python/cucim/tests/performance/clara/test_read_region_memory_usage.py @@ -20,10 +20,11 @@ def test_read_region_cuda_memleak(testimg_tiff_stripe_4096x4096_256_jpeg): import GPUtil + gpus = GPUtil.getGPUs() if len(gpus) == 0: - pytest.skip('No gpu available') + pytest.skip("No gpu available") img = open_image_cucim(testimg_tiff_stripe_4096x4096_256_jpeg) @@ -31,7 +32,7 @@ def test_read_region_cuda_memleak(testimg_tiff_stripe_4096x4096_256_jpeg): mem_usage_history = [gpu.memoryUsed] for i in range(10): - _ = img.read_region(device='cuda') + _ = img.read_region(device="cuda") gpus = GPUtil.getGPUs() gpu = gpus[0] mem_usage_history.append(gpu.memoryUsed) @@ -50,6 +51,7 @@ def test_read_region_cpu_memleak(testimg_tiff_stripe_4096x4096_256): import os import psutil + process = psutil.Process(os.getpid()) img = open_image_cucim(testimg_tiff_stripe_4096x4096_256) @@ -71,17 +73,20 @@ def test_read_random_region_cpu_memleak(testimg_tiff_stripe_4096x4096_256): import random import psutil + process = psutil.Process(os.getpid()) img = open_image_cucim(testimg_tiff_stripe_4096x4096_256) iteration = 1000 mem_usage_history = [process.memory_info().rss] * iteration - level_count = img.resolutions['level_count'] + level_count = img.resolutions["level_count"] for i in range(iteration): - location = (random.randrange(-2048, 4096 + 2048), - random.randrange(-2048, 4096 + 2048)) + location = ( + random.randrange(-2048, 4096 + 2048), + random.randrange(-2048, 4096 + 2048), + ) level = random.randrange(0, level_count) _ = img.read_region(location, (256, 256), level) mem_usage_history[i] = process.memory_info().rss diff --git a/python/cucim/tests/unit/clara/converter/test_converter.py b/python/cucim/tests/unit/clara/converter/test_converter.py index fa5bc30c3..6e54640ea 100644 --- a/python/cucim/tests/unit/clara/converter/test_converter.py +++ b/python/cucim/tests/unit/clara/converter/test_converter.py @@ -18,8 +18,8 @@ def test_image_converter_stripe_4096x4096_256_jpeg( - tmp_path, - testimg_tiff_stripe_4096x4096_256_jpeg): + tmp_path, testimg_tiff_stripe_4096x4096_256_jpeg +): import tifffile from cucim.clara.converter import tiff diff --git a/python/cucim/tests/unit/clara/test_image_cache.py b/python/cucim/tests/unit/clara/test_image_cache.py index f61026bf1..f6f7d199f 100644 --- a/python/cucim/tests/unit/clara/test_image_cache.py +++ b/python/cucim/tests/unit/clara/test_image_cache.py @@ -16,6 +16,7 @@ def test_get_nocache(): from cucim import CuImage + cache = CuImage.cache() assert int(cache.type) == 0 @@ -32,13 +33,14 @@ def test_get_nocache(): # {'type': 'nocache', 'memory_capacity': 1024, 'capacity': 5461, # 'mutex_pool_capacity': 11117, 'list_padding': 10000, # 'extra_shared_memory_size': 100, 'record_stat': False} - assert config['type'] == 'nocache' - assert not config['record_stat'] + assert config["type"] == "nocache" + assert not config["record_stat"] def test_get_per_process_cache(): from cucim import CuImage - cache = CuImage.cache('per_process', memory_capacity=2048) + + cache = CuImage.cache("per_process", memory_capacity=2048) assert int(cache.type) == 1 assert cache.memory_size == 0 assert cache.memory_capacity == 2**20 * 2048 @@ -53,14 +55,15 @@ def test_get_per_process_cache(): # {'type': 'per_process', 'memory_capacity': 2048, 'capacity': 10922, # 'mutex_pool_capacity': 11117, 'list_padding': 10000, # 'extra_shared_memory_size': 100, 'record_stat': False} - assert config['type'] == 'per_process' - assert config['memory_capacity'] == 2048 - assert not config['record_stat'] + assert config["type"] == "per_process" + assert config["memory_capacity"] == 2048 + assert not config["record_stat"] def test_get_shared_memory_cache(): from cucim import CuImage - cache = CuImage.cache('shared_memory', memory_capacity=2048) + + cache = CuImage.cache("shared_memory", memory_capacity=2048) assert int(cache.type) == 2 assert cache.memory_size == 0 # It allocates additional memory @@ -76,9 +79,9 @@ def test_get_shared_memory_cache(): # {'type': 'shared_memory', 'memory_capacity': 2048, 'capacity': 10922, # 'mutex_pool_capacity': 11117, 'list_padding': 10000, # 'extra_shared_memory_size': 100, 'record_stat': False} - assert config['type'] == 'shared_memory' - assert config['memory_capacity'] == 2048 - assert not config['record_stat'] + assert config["type"] == "shared_memory" + assert config["memory_capacity"] == 2048 + assert not config["record_stat"] def test_preferred_memory_capacity(testimg_tiff_stripe_32x24_16_jpeg): @@ -88,23 +91,25 @@ def test_preferred_memory_capacity(testimg_tiff_stripe_32x24_16_jpeg): img = CuImage(testimg_tiff_stripe_32x24_16_jpeg) # same with `img.resolutions["level_dimensions"][0]` - image_size = img.size('XY') # 32x24 - tile_size = img.resolutions['level_tile_sizes'][0] # 16x16 + image_size = img.size("XY") # 32x24 + tile_size = img.resolutions["level_tile_sizes"][0] # 16x16 patch_size = (tile_size[0] * 2, tile_size[0] * 2) - bytes_per_pixel = 3 # default: 3 + bytes_per_pixel = 3 # default: 3 # Below three statements are the same. memory_capacity = preferred_memory_capacity(img, patch_size=patch_size) memory_capacity2 = preferred_memory_capacity( - None, image_size, tile_size, patch_size, bytes_per_pixel) + None, image_size, tile_size, patch_size, bytes_per_pixel + ) memory_capacity3 = preferred_memory_capacity( - None, image_size, patch_size=patch_size) + None, image_size, patch_size=patch_size + ) assert memory_capacity == memory_capacity2 # 1 == 1 assert memory_capacity2 == memory_capacity3 # 1 == 1 # You can also manually set capacity` (e.g., `capacity=500`) - cache = CuImage.cache('per_process', memory_capacity=memory_capacity) + cache = CuImage.cache("per_process", memory_capacity=memory_capacity) assert int(cache.type) == 1 assert cache.memory_size == 0 assert cache.memory_capacity == 2**20 * 1 @@ -115,8 +120,12 @@ def test_preferred_memory_capacity(testimg_tiff_stripe_32x24_16_jpeg): assert cache.miss_count == 0 basic_memory_capacity = preferred_memory_capacity( - None, image_size=(1024 * 1024, 1024 * 1024), tile_size=(256, 256), - patch_size=(256, 256), bytes_per_pixel=3) + None, + image_size=(1024 * 1024, 1024 * 1024), + tile_size=(256, 256), + patch_size=(256, 256), + bytes_per_pixel=3, + ) assert basic_memory_capacity == 1536 # https://godbolt.org/z/jY7G84xzT @@ -125,13 +134,21 @@ def test_reserve_more_cache_memory(): from cucim.clara.cache import preferred_memory_capacity memory_capacity = preferred_memory_capacity( - None, image_size=(1024 * 1024, 1024 * 1024), tile_size=(256, 256), - patch_size=(256, 256), bytes_per_pixel=3) + None, + image_size=(1024 * 1024, 1024 * 1024), + tile_size=(256, 256), + patch_size=(256, 256), + bytes_per_pixel=3, + ) new_memory_capacity = preferred_memory_capacity( - None, image_size=(1024 * 1024, 1024 * 1024), tile_size=(256, 256), - patch_size=(512, 512), bytes_per_pixel=3) - - cache = CuImage.cache('per_process', memory_capacity=memory_capacity) + None, + image_size=(1024 * 1024, 1024 * 1024), + tile_size=(256, 256), + patch_size=(512, 512), + bytes_per_pixel=3, + ) + + cache = CuImage.cache("per_process", memory_capacity=memory_capacity) assert int(cache.type) == 1 assert cache.memory_size == 0 assert cache.memory_capacity == 2**20 * 1536 @@ -162,7 +179,7 @@ def test_reserve_more_cache_memory(): assert cache.hit_count == 0 assert cache.miss_count == 0 - cache = CuImage.cache('no_cache') + cache = CuImage.cache("no_cache") # Set new cache will reset memory size assert int(cache.type) == 0 assert cache.memory_size == 0 @@ -181,7 +198,8 @@ def test_cache_hit_miss(testimg_tiff_stripe_32x24_16_jpeg): img = CuImage(testimg_tiff_stripe_32x24_16_jpeg) memory_capacity = preferred_memory_capacity(img, patch_size=(16, 16)) cache = CuImage.cache( - 'per_process', memory_capacity=memory_capacity, record_stat=True) + "per_process", memory_capacity=memory_capacity, record_stat=True + ) img.read_region((0, 0), (8, 8)) assert (cache.hit_count, cache.miss_count) == (0, 1) @@ -206,7 +224,7 @@ def test_cache_hit_miss(testimg_tiff_stripe_32x24_16_jpeg): assert cache.size == 1 assert cache.capacity == 5 - cache = CuImage.cache('no_cache') + cache = CuImage.cache("no_cache") assert int(cache.type) == 0 assert cache.memory_size == 0 diff --git a/python/cucim/tests/unit/clara/test_load_image.py b/python/cucim/tests/unit/clara/test_load_image.py index 7c173ba46..5a234c08a 100644 --- a/python/cucim/tests/unit/clara/test_load_image.py +++ b/python/cucim/tests/unit/clara/test_load_image.py @@ -20,4 +20,4 @@ def test_load_non_existing_image(): with pytest.raises(ValueError, match=r"Cannot open .*"): - _ = open_image_cucim('/tmp/non_existing_image.tif') + _ = open_image_cucim("/tmp/non_existing_image.tif") diff --git a/python/cucim/tests/unit/clara/test_load_image_metadata.py b/python/cucim/tests/unit/clara/test_load_image_metadata.py index 2452d4324..971e4e171 100644 --- a/python/cucim/tests/unit/clara/test_load_image_metadata.py +++ b/python/cucim/tests/unit/clara/test_load_image_metadata.py @@ -13,9 +13,10 @@ # limitations under the License. # -from ...util.io import open_image_cucim import math +from ...util.io import open_image_cucim + def test_load_image_metadata(testimg_tiff_stripe_32x24_16): import numpy as np @@ -25,15 +26,15 @@ def test_load_image_metadata(testimg_tiff_stripe_32x24_16): # True if image data is loaded & available. assert img.is_loaded # A device type. - assert str(img.device) == 'cpu' + assert str(img.device) == "cpu" # The number of dimensions. assert img.ndim == 3 # A string containing a list of dimensions being requested. - assert img.dims == 'YXC' + assert img.dims == "YXC" # A tuple of dimension sizes (in the order of `dims`). assert img.shape == [24, 32, 3] # Returns size as a tuple for the given dimension order. - assert img.size('XYC') == [32, 24, 3] + assert img.size("XYC") == [32, 24, 3] # The data type of the image. dtype = img.dtype assert dtype.code == 1 @@ -42,26 +43,26 @@ def test_load_image_metadata(testimg_tiff_stripe_32x24_16): # The typestr of the image. assert np.dtype(img.typestr) == np.uint8 # A channel name list. - assert img.channel_names == ['R', 'G', 'B'] + assert img.channel_names == ["R", "G", "B"] # Returns physical size in tuple. assert img.spacing() == [1.0, 1.0, 1.0] # Units for each spacing element (size is same with `ndim`). - assert img.spacing_units() == ['', '', 'color'] + assert img.spacing_units() == ["", "", "color"] # Physical location of (0, 0, 0) (size is always 3). assert img.origin == [0.0, 0.0, 0.0] # Direction cosines (size is always 3x3). assert img.direction == [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]] # Coordinate frame in which the direction cosines are measured. # Available Coordinate frame is not finalized yet. - assert img.coord_sys == 'LPS' + assert img.coord_sys == "LPS" # Returns a set of associated image names. assert img.associated_images == set() # Returns a dict that includes resolution information. assert img.resolutions == { - 'level_count': 1, - 'level_dimensions': ((32, 24),), - 'level_downsamples': (1.0,), - 'level_tile_sizes': ((16, 16),) + "level_count": 1, + "level_dimensions": ((32, 24),), + "level_downsamples": (1.0,), + "level_tile_sizes": ((16, 16),), } # A metadata object as `dict` metadata = img.metadata @@ -72,7 +73,9 @@ def test_load_image_metadata(testimg_tiff_stripe_32x24_16): assert img.raw_metadata == '{"axes": "YXC", "shape": [24, 32, 3]}' -def test_load_image_resolution_metadata(testimg_tiff_stripe_4096_4096_256_jpeg_resolution): # noqa: E501 +def test_load_image_resolution_metadata( + testimg_tiff_stripe_4096_4096_256_jpeg_resolution, +): # noqa: E501 image, resolution = testimg_tiff_stripe_4096_4096_256_jpeg_resolution img = open_image_cucim(image) @@ -92,32 +95,46 @@ def test_load_image_resolution_metadata(testimg_tiff_stripe_4096_4096_256_jpeg_r spacing_unit = "" # Returns physical size in tuple. - assert all(map(lambda a, b: math.isclose(a, b, rel_tol=0.1), - img.spacing(), (y_spacing, x_spacing, 1.0))) + assert all( + map( + lambda a, b: math.isclose(a, b, rel_tol=0.1), + img.spacing(), + (y_spacing, x_spacing, 1.0), + ) + ) # Units for each spacing element (size is same with `ndim`). - assert img.spacing_units() == [spacing_unit, spacing_unit, 'color'] + assert img.spacing_units() == [spacing_unit, spacing_unit, "color"] # A metadata object as `dict` metadata = img.metadata print(metadata) assert isinstance(metadata, dict) assert len(metadata) == 2 # 'cucim' and 'tiff' - assert math.isclose(metadata['tiff']['x_resolution'], - x_resolution, rel_tol=0.00001) - assert math.isclose(metadata['tiff']['y_resolution'], - y_resolution, rel_tol=0.00001) + assert math.isclose( + metadata["tiff"]["x_resolution"], x_resolution, rel_tol=0.00001 + ) + assert math.isclose( + metadata["tiff"]["y_resolution"], y_resolution, rel_tol=0.00001 + ) unit_value = resolution_unit.lower() if resolution_unit != "NONE" else "" - assert metadata['tiff']['resolution_unit'] == unit_value + assert metadata["tiff"]["resolution_unit"] == unit_value # Check if lower resolution image's metadata has lower physical spacing. - num_levels = img.resolutions['level_count'] + num_levels = img.resolutions["level_count"] for level in range(num_levels): lowres_img = img.read_region((0, 0), (100, 100), level=level) lowres_downsample = img.resolutions["level_downsamples"][level] - assert all(map(lambda a, b: math.isclose(a, b, rel_tol=0.1), - lowres_img.spacing(), - (y_spacing / lowres_downsample, - x_spacing / lowres_downsample, 1.0))) + assert all( + map( + lambda a, b: math.isclose(a, b, rel_tol=0.1), + lowres_img.spacing(), + ( + y_spacing / lowres_downsample, + x_spacing / lowres_downsample, + 1.0, + ), + ) + ) def test_load_rgba_image_metadata(tmpdir): @@ -127,15 +144,17 @@ def test_load_rgba_image_metadata(tmpdir): """ import numpy as np from tifffile import imwrite + from cucim import CuImage # Test with a 4-channel image img_array = np.ones((32, 32, 3)).astype(np.uint8) - print(f'RGB image shape: {img_array.shape}') + print(f"RGB image shape: {img_array.shape}") img_array = np.concatenate( [img_array, 255 * np.ones_like(img_array[..., 0])[..., np.newaxis]], - axis=2) - print(f'RGBA image shape: {img_array.shape}') + axis=2, + ) + print(f"RGBA image shape: {img_array.shape}") file_path_4ch = str(tmpdir.join("small_rgba_4ch.tiff")) imwrite(file_path_4ch, img_array, shape=img_array.shape, tile=(16, 16)) @@ -148,8 +167,9 @@ def test_load_rgba_image_metadata(tmpdir): # Test with a 1-channel image img_1ch_array = np.ones((32, 32, 1)).astype(np.uint8) file_path_1ch = str(tmpdir.join("small_rgba_1ch.tiff")) - imwrite(file_path_1ch, img_1ch_array, - shape=img_1ch_array.shape, tile=(16, 16)) + imwrite( + file_path_1ch, img_1ch_array, shape=img_1ch_array.shape, tile=(16, 16) + ) obj = CuImage(file_path_1ch) assert obj.shape == [32, 32, 4] @@ -163,9 +183,11 @@ def test_load_slow_path_warning(tmpdir, capfd): - https://github.com/rapidsai/cucim/issues/230 """ - import numpy as np import re + + import numpy as np from tifffile import imwrite + from cucim import CuImage # Test with a 1-channel image @@ -187,7 +209,7 @@ def test_load_slow_path_warning(tmpdir, capfd): # Check the warning message warning_message = re.findall( - r"\[Warning\] Loading image\('.*'\) with a slow-path", - captured.err) + r"\[Warning\] Loading image\('.*'\) with a slow-path", captured.err + ) assert len(captured.err) > 0 assert len(warning_message) == 2 diff --git a/python/cucim/tests/unit/clara/test_tiff_read_region.py b/python/cucim/tests/unit/clara/test_tiff_read_region.py index 4428742d1..9fafe431c 100644 --- a/python/cucim/tests/unit/clara/test_tiff_read_region.py +++ b/python/cucim/tests/unit/clara/test_tiff_read_region.py @@ -21,18 +21,23 @@ def test_tiff_stripe_inner(testimg_tiff_stripe_32x24_16): cucim_img = open_image_cucim(testimg_tiff_stripe_32x24_16) - width, height = cucim_img.size('XY') - tile_width, tile_height = cucim_img.resolutions['level_tile_sizes'][0] + width, height = cucim_img.size("XY") + tile_width, tile_height = cucim_img.resolutions["level_tile_sizes"][0] # List of ((, ), (), ()) - region_list = [((0, 0), (width, height)), # whole - ((0, 0), (tile_width // 2, tile_height // 2)), # left-top - ((tile_width // 2, tile_height // 2), - (tile_width, tile_height)), # middle - ((width - (tile_width // 2), height - (tile_height // 2)), - (tile_width // 2, tile_height // 2)), # right-bottom - ] - for (start_pos, size) in region_list: + region_list = [ + ((0, 0), (width, height)), # whole + ((0, 0), (tile_width // 2, tile_height // 2)), # left-top + ( + (tile_width // 2, tile_height // 2), + (tile_width, tile_height), + ), # middle + ( + (width - (tile_width // 2), height - (tile_height // 2)), + (tile_width // 2, tile_height // 2), + ), # right-bottom + ] + for start_pos, size in region_list: cucim_arr = np.asarray(cucim_img.read_region(start_pos, size)) # Not all channel values are zero, so we need to check that. @@ -42,21 +47,30 @@ def test_tiff_stripe_inner(testimg_tiff_stripe_32x24_16): def test_tiff_stripe_boundary(testimg_tiff_stripe_32x24_16): cucim_img = open_image_cucim(testimg_tiff_stripe_32x24_16) - width, height = cucim_img.size('XY') - tile_width, tile_height = cucim_img.resolutions['level_tile_sizes'][0] + width, height = cucim_img.size("XY") + tile_width, tile_height = cucim_img.resolutions["level_tile_sizes"][0] # List of ((, ), (), ()) - region_list = [((-(tile_width // 2), -(tile_height // 2)), - (tile_width, tile_height)), # left top - ((width - (tile_width // 2), -(tile_height // 2)), - (tile_width, tile_height)), # right top - ((-(tile_width // 2), height - (tile_height // 2)), - (tile_width, tile_height)), # left bottom - ((width - (tile_width // 2), height - (tile_height // 2)), - (tile_width, tile_height)), # right bottom - ] - - for (start_pos, size) in region_list: + region_list = [ + ( + (-(tile_width // 2), -(tile_height // 2)), + (tile_width, tile_height), + ), # left top + ( + (width - (tile_width // 2), -(tile_height // 2)), + (tile_width, tile_height), + ), # right top + ( + (-(tile_width // 2), height - (tile_height // 2)), + (tile_width, tile_height), + ), # left bottom + ( + (width - (tile_width // 2), height - (tile_height // 2)), + (tile_width, tile_height), + ), # right bottom + ] + + for start_pos, size in region_list: cucim_arr = np.asarray(cucim_img.read_region(start_pos, size)) # Not all channel values are zero, so we need to check that. channel_value_count = np.count_nonzero(cucim_arr, axis=2) @@ -67,17 +81,22 @@ def test_tiff_stripe_boundary(testimg_tiff_stripe_32x24_16): def test_tiff_stripe_outside(testimg_tiff_stripe_32x24_16): cucim_img = open_image_cucim(testimg_tiff_stripe_32x24_16) - width, height = cucim_img.size('XY') - tile_width, tile_height = cucim_img.resolutions['level_tile_sizes'][0] + width, height = cucim_img.size("XY") + tile_width, tile_height = cucim_img.resolutions["level_tile_sizes"][0] # List of ((, ), (), ()) - region_list = [((-width - tile_width, -height - tile_height), - (tile_width, tile_height)), # left top (outside) - ((width + tile_height, height + tile_height), - (tile_width, tile_height)), # right bottom (outside) - ] - - for (start_pos, size) in region_list: + region_list = [ + ( + (-width - tile_width, -height - tile_height), + (tile_width, tile_height), + ), # left top (outside) + ( + (width + tile_height, height + tile_height), + (tile_width, tile_height), + ), # right bottom (outside) + ] + + for start_pos, size in region_list: cucim_arr = np.asarray(cucim_img.read_region(start_pos, size)) # All channel values should be zero, so we need to check that. channel_value_count = np.count_nonzero(cucim_arr, axis=2) @@ -98,7 +117,7 @@ def test_tiff_outside_of_resolution_level(testimg_tiff_stripe_4096x4096_256): def test_tiff_stripe_multiresolution(testimg_tiff_stripe_4096x4096_256): cucim_img = open_image_cucim(testimg_tiff_stripe_4096x4096_256) - level_count = cucim_img.resolutions['level_count'] + level_count = cucim_img.resolutions["level_count"] assert level_count == 6 start_pos, size = ((0, 0), (256, 256)) @@ -107,16 +126,17 @@ def test_tiff_stripe_multiresolution(testimg_tiff_stripe_4096x4096_256): # Not all channel values are zero, so we need to check that. channel_value_count = np.count_nonzero(cucim_arr, axis=2) count_all_zero = np.count_nonzero(channel_value_count == 0) - img_size = cucim_img.resolutions['level_dimensions'][level] + img_size = cucim_img.resolutions["level_dimensions"][level] # Only outside of the box is zero. - assert count_all_zero == 256 * 256 - (min(img_size[0], 256) * - min(img_size[1], 256)) + assert count_all_zero == 256 * 256 - ( + min(img_size[0], 256) * min(img_size[1], 256) + ) def test_region_image_level_data(testimg_tiff_stripe_4096x4096_256): cucim_img = open_image_cucim(testimg_tiff_stripe_4096x4096_256) - level_count = cucim_img.resolutions['level_count'] + level_count = cucim_img.resolutions["level_count"] start_pos, size = ((-10, -20), (300, 400)) for level in range(level_count): @@ -133,7 +153,7 @@ def test_region_image_dtype(testimg_tiff_stripe_4096x4096_256): cucim_img = open_image_cucim(testimg_tiff_stripe_4096x4096_256) - level_count = cucim_img.resolutions['level_count'] + level_count = cucim_img.resolutions["level_count"] start_pos, size = ((0, 10), (20, 30)) for level in range(level_count): @@ -150,29 +170,29 @@ def test_array_interface_support(testimg_tiff_stripe_32x24_16_jpeg): # {'data': (45867600, False), 'strides': None, # 'descr': [('', '|u1')], 'typestr': '|u1', # 'shape': (24, 32, 3), 'version': 3} - assert array_interface['data'][0] is not None - assert not array_interface['data'][1] - assert array_interface['strides'] is None - assert array_interface['descr'] - assert array_interface['shape'] == tuple(whole_img.shape) - assert array_interface['version'] == 3 + assert array_interface["data"][0] is not None + assert not array_interface["data"][1] + assert array_interface["strides"] is None + assert array_interface["descr"] + assert array_interface["shape"] == tuple(whole_img.shape) + assert array_interface["version"] == 3 def test_cuda_array_interface_support(testimg_tiff_stripe_32x24_16_jpeg): img = open_image_cucim(testimg_tiff_stripe_32x24_16_jpeg) - whole_img = img.read_region(device='cuda') + whole_img = img.read_region(device="cuda") array_interface = whole_img.__cuda_array_interface__ print(array_interface) # {'data': (81888083968, False), 'strides': None, # 'descr': [('', '|u1')], 'typestr': '|u1', 'shape': (24, 32, 3), # 'version': 3, 'mask': None, 'stream': 1} - assert array_interface['data'][0] is not None - assert not array_interface['data'][1] - assert array_interface['strides'] is None - assert array_interface['descr'] - assert array_interface['typestr'] - assert array_interface['shape'] == tuple(whole_img.shape) - assert array_interface['version'] == 3 - assert array_interface['mask'] is None - assert array_interface['stream'] == 1 + assert array_interface["data"][0] is not None + assert not array_interface["data"][1] + assert array_interface["strides"] is None + assert array_interface["descr"] + assert array_interface["typestr"] + assert array_interface["shape"] == tuple(whole_img.shape) + assert array_interface["version"] == 3 + assert array_interface["mask"] is None + assert array_interface["stream"] == 1 diff --git a/python/cucim/tests/unit/core/test_stain_normalizer.py b/python/cucim/tests/unit/core/test_stain_normalizer.py index ee66ecf67..aa969c523 100644 --- a/python/cucim/tests/unit/core/test_stain_normalizer.py +++ b/python/cucim/tests/unit/core/test_stain_normalizer.py @@ -16,18 +16,23 @@ import cupy as cp import pytest -from cucim.core.operations.color import (normalize_colors_pca, - stain_extraction_pca) +from cucim.core.operations.color import ( + normalize_colors_pca, + stain_extraction_pca, +) -class TestStainExtractorMacenko(): +class TestStainExtractorMacenko: @pytest.mark.parametrize( - 'image, ErrorClass', + "image, ErrorClass", [ - (cp.full((3, 2, 4), -1), ValueError), # negative value + (cp.full((3, 2, 4), -1), ValueError), # negative value (cp.full((3, 2, 4), 256), ValueError), # out of range value (None, TypeError), - (cp.full((3, 2, 4), 240), ValueError), # uniformly below the beta threshold # noqa + ( + cp.full((3, 2, 4), 240), + ValueError, + ), # uniformly below the beta threshold # noqa ], ) def test_transparent_image(self, image, ErrorClass): @@ -42,12 +47,12 @@ def test_transparent_image(self, image, ErrorClass): stain_extraction_pca(image) @pytest.mark.parametrize( - 'image', + "image", [ None, cp.full((3, 2, 4), 100), # uniform, above beta absorbance thresh. cp.full((3, 2, 4), 150), # uniform, above beta absorbance thresh. - ] + ], ) def test_identical_result_vectors(self, image): """ @@ -66,7 +71,7 @@ def test_identical_result_vectors(self, image): cp.testing.assert_array_equal(result[:, 0], result[:, 1]) @pytest.mark.parametrize( - 'image, expected', + "image, expected", [ (None, None), # uniformly zero -> two identical stains extracted @@ -76,9 +81,9 @@ def test_identical_result_vectors(self, image): [ [0.0, 0.0], [0.70710678, 0.70710678], - [0.70710678, 0.70710678] + [0.70710678, 0.70710678], ] - ) + ), ), # input pixels not uniformly filled, leading to two different # stains extracted @@ -98,7 +103,7 @@ def test_identical_result_vectors(self, image): ] ), ), - ] + ], ) def test_result_value(self, image, expected): """ @@ -135,15 +140,15 @@ def test_result_value(self, image, expected): cp.testing.assert_allclose(result, expected) -class TestStainNormalizerMacenko(): +class TestStainNormalizerMacenko: @pytest.mark.parametrize( - 'image', + "image", [ - cp.full((3, 2, 4), -1), # negative value case + cp.full((3, 2, 4), -1), # negative value case cp.full((3, 2, 4), 256), # out of range value None, cp.full((3, 2, 5), 240), # uniformly below the beta threshold - ] + ], ) def test_transparent_image(self, image): """ @@ -161,7 +166,7 @@ def test_transparent_image(self, image): normalize_colors_pca(image) @pytest.mark.parametrize( - 'kwargs, image, expected', + "kwargs, image, expected", [ # 1.) invalid image ({}, None, None), @@ -234,7 +239,7 @@ def test_transparent_image(self, image): ] ), ], - ] + ], ) def test_result_value(self, kwargs, image, expected): """Test that an input image returns an expected normalized image.""" diff --git a/python/cucim/tests/unit/test_init.py b/python/cucim/tests/unit/test_init.py index 6cd7d5fd9..3cbb817ef 100644 --- a/python/cucim/tests/unit/test_init.py +++ b/python/cucim/tests/unit/test_init.py @@ -16,29 +16,33 @@ def test_is_available(): - with patch('cucim._is_cupy_available', False): - with patch('cucim._is_clara_available', False): + with patch("cucim._is_cupy_available", False): + with patch("cucim._is_clara_available", False): import cucim + assert cucim.is_available() is False assert cucim.is_available("skimage") is False assert cucim.is_available("clara") is False assert cucim.is_available("unknown") is False - with patch('cucim._is_clara_available', True): + with patch("cucim._is_clara_available", True): import cucim + assert cucim.is_available() is False assert cucim.is_available("skimage") is False assert cucim.is_available("clara") is True assert cucim.is_available("unknown") is False - with patch('cucim._is_cupy_available', True): - with patch('cucim._is_clara_available', False): + with patch("cucim._is_cupy_available", True): + with patch("cucim._is_clara_available", False): import cucim + assert cucim.is_available() is False assert cucim.is_available("skimage") is True assert cucim.is_available("clara") is False assert cucim.is_available("unknown") is False - with patch('cucim._is_clara_available', True): + with patch("cucim._is_clara_available", True): import cucim + assert cucim.is_available() is True assert cucim.is_available("skimage") is True assert cucim.is_available("clara") is True diff --git a/python/cucim/tests/util/gen_image.py b/python/cucim/tests/util/gen_image.py index 645db1f79..9334c691c 100644 --- a/python/cucim/tests/util/gen_image.py +++ b/python/cucim/tests/util/gen_image.py @@ -16,6 +16,7 @@ import argparse import logging import os + import tifffile try: @@ -23,9 +24,7 @@ except ImportError: from gen_tiff import TiffGenerator -GENERATOR_MAP = { - 'tiff': TiffGenerator() -} +GENERATOR_MAP = {"tiff": TiffGenerator()} class ImageGenerator: @@ -38,29 +37,30 @@ def __init__(self, dest, recipes, resolutions=None, logger=None): resolutions = [(1, 1, "CENTIMETER")] * len(recipes) if len(resolutions) != len(recipes): raise RuntimeError( - 'Number of resolutions must be equal to number of recipes') + "Number of resolutions must be equal to number of recipes" + ) self.resolutions = resolutions def gen(self): - results = [] for recipe, resolution in zip(self.recipes, self.resolutions): - items = recipe.split(':') + items = recipe.split(":") item_len = len(items) if not (1 <= item_len <= 6): raise RuntimeError( - 'Value should be ' - + 'type[:subpath:pattern:image_size:tile_size:compression]' - + ' format') + "Value should be " + + "type[:subpath:pattern:image_size:tile_size:compression]" + + " format" + ) kind = items[0] - subpath = '' if item_len == 1 else items[1] - pattern = 'stripe' if item_len <= 2 else items[2] - image_size_str = '32x24' if item_len <= 3 else items[3] - image_size = list(map(lambda x: int(x), image_size_str.split('x'))) + subpath = "" if item_len == 1 else items[1] + pattern = "stripe" if item_len <= 2 else items[2] + image_size_str = "32x24" if item_len <= 3 else items[3] + image_size = list(map(lambda x: int(x), image_size_str.split("x"))) tile_size = 16 if item_len <= 4 else int(items[4]) - compression = 'jpeg' if item_len <= 5 else items[5] + compression = "jpeg" if item_len <= 5 else items[5] dest_folder = os.path.join(self.dest, subpath) os.makedirs(dest_folder, exist_ok=True) @@ -68,18 +68,21 @@ def gen(self): generator_obj = GENERATOR_MAP.get(kind) if generator_obj is None: raise RuntimeError( - "There is no generator for '{}'".format(kind)) + "There is no generator for '{}'".format(kind) + ) image_data = generator_obj.get_image( - pattern=pattern, image_size=image_size) + pattern=pattern, image_size=image_size + ) if image_data is None: raise RuntimeError( - f'No data generated from [pattern={pattern},' - + f' image_size={image_size}, tile_size={tile_size},' - + f' compression={compression}, resolution={resolution}].') + f"No data generated from [pattern={pattern}," + + f" image_size={image_size}, tile_size={tile_size}," + + f" compression={compression}, resolution={resolution}]." + ) - file_name = f'{kind}_{pattern}_{image_size_str}_{tile_size}' + file_name = f"{kind}_{pattern}_{image_size_str}_{tile_size}" if resolution is None or len(resolution) == 2: unit = None elif len(resolution) == 3: @@ -87,40 +90,43 @@ def gen(self): resolution = resolution[:2] if unit is None: unit = tifffile.RESUNIT.NONE - image_path = generator_obj.save_image(image_data, - dest_folder, - file_name=file_name, - kind=kind, - subpath=subpath, - pattern=pattern, - image_size=image_size, - tile_size=tile_size, - compression=compression, - resolution=resolution, - resolutionunit=unit) - self.logger.info(' Generated %s...', image_path) + image_path = generator_obj.save_image( + image_data, + dest_folder, + file_name=file_name, + kind=kind, + subpath=subpath, + pattern=pattern, + image_size=image_size, + tile_size=tile_size, + compression=compression, + resolution=resolution, + resolutionunit=unit, + ) + self.logger.info(" Generated %s...", image_path) results.append(image_path) - self.logger.info('[Finished] Dataset generation') + self.logger.info("[Finished] Dataset generation") return results def main(): logging.basicConfig(level=logging.INFO) - parser = argparse.ArgumentParser( - description='Generate Image Data') + parser = argparse.ArgumentParser(description="Generate Image Data") parser.add_argument( - 'recipes', - metavar='type[:subpath:pattern:image_size:tile_size:compression]', - default=['tiff::stripe:32x24:16'], nargs='+', - help='data set type with pattern to write ' - + '(default: tiff::stripe:32x24:16:jpeg') - - parser.add_argument('--dest', '-d', default='.', help='destination folder') + "recipes", + metavar="type[:subpath:pattern:image_size:tile_size:compression]", + default=["tiff::stripe:32x24:16"], + nargs="+", + help="data set type with pattern to write " + + "(default: tiff::stripe:32x24:16:jpeg", + ) + + parser.add_argument("--dest", "-d", default=".", help="destination folder") args = parser.parse_args() generator = ImageGenerator(args.dest, args.recipes) generator.gen() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/python/cucim/tests/util/gen_tiff.py b/python/cucim/tests/util/gen_tiff.py index 324f8db14..f3ee8231b 100644 --- a/python/cucim/tests/util/gen_tiff.py +++ b/python/cucim/tests/util/gen_tiff.py @@ -20,13 +20,10 @@ import numpy as np from tifffile import TiffWriter -COMPRESSION_MAP = {'jpeg': ('jpeg', 95), - 'deflate': 'deflate', - 'raw': None} +COMPRESSION_MAP = {"jpeg": ("jpeg", 95), "deflate": "deflate", "raw": None} class TiffGenerator: - def get_image(self, pattern, image_size): try: func = getattr(self, pattern) @@ -37,9 +34,20 @@ def get_image(self, pattern, image_size): return func(image_size) return None - def save_image(self, image_data, dest_folder, file_name, kind, subpath, - pattern, image_size, tile_size, compression, resolution, - resolutionunit): + def save_image( + self, + image_data, + dest_folder, + file_name, + kind, + subpath, + pattern, + image_size, + tile_size, + compression, + resolution, + resolutionunit, + ): # You can add pyramid images (0: largest resolution) if isinstance(image_data, list): arr_stack = image_data @@ -54,7 +62,8 @@ def save_image(self, image_data, dest_folder, file_name, kind, subpath, # save as tif tiff_file_name = str( - (Path(dest_folder) / f'{file_name}.tif').absolute()) + (Path(dest_folder) / f"{file_name}.tif").absolute() + ) level_resolution = None with TiffWriter(tiff_file_name, bigtiff=True) as tif: @@ -62,8 +71,10 @@ def save_image(self, image_data, dest_folder, file_name, kind, subpath, src_arr = arr_stack[level] if resolution: - level_resolution = (resolution[0] / (level + 1), - resolution[1] / (level + 1)) + level_resolution = ( + resolution[0] / (level + 1), + resolution[1] / (level + 1), + ) tif.write( src_arr, @@ -80,7 +91,6 @@ def save_image(self, image_data, dest_folder, file_name, kind, subpath, return tiff_file_name def stripe(self, image_size): - if 256 <= image_size[0] <= 4096: pyramid = True else: @@ -102,9 +112,10 @@ def stripe(self, image_size): area = reduce(lambda x, y: x * y, size) # Use mmap if image size is larger than 1GB if area * 3 > 2**20 * 1024: - file_name = str(Path(mkdtemp()) / 'memmap.dat') - array = np.memmap(file_name, dtype=np.uint8, - mode='w+', shape=tuple(shape)) + file_name = str(Path(mkdtemp()) / "memmap.dat") + array = np.memmap( + file_name, dtype=np.uint8, mode="w+", shape=tuple(shape) + ) else: array = np.zeros(shape, dtype=np.uint8) diff --git a/python/cucim/tests/util/io.py b/python/cucim/tests/util/io.py index 8192d176f..aecb76ad2 100644 --- a/python/cucim/tests/util/io.py +++ b/python/cucim/tests/util/io.py @@ -13,7 +13,9 @@ # limitations under the License. # + def open_image_cucim(file_path): from cucim import CuImage + img = CuImage(file_path) return img diff --git a/python/cucim/tox.ini b/python/cucim/tox.ini index 522be886a..04fe6475b 100644 --- a/python/cucim/tox.ini +++ b/python/cucim/tox.ini @@ -48,7 +48,8 @@ commands = deps = docutils check-manifest - flake8 + black + ruff readme-renderer pygments isort @@ -59,7 +60,8 @@ skip_install = true commands = twine check dist/*.whl check-manifest {toxinidir} - flake8 + ruff . + black --check . isort --verbose --check-only --diff --filter-files . [testenv:docs] diff --git a/python/cucim/versioneer.py b/python/cucim/versioneer.py deleted file mode 100644 index 3aae392db..000000000 --- a/python/cucim/versioneer.py +++ /dev/null @@ -1,1825 +0,0 @@ - -# Version: 0.18 - -"""The Versioneer - like a rocketeer, but for versions. - -The Versioneer -============== - -* like a rocketeer, but for versions! -* https://github.com/warner/python-versioneer -* Brian Warner -* License: Public Domain -* Compatible With: python2.6, 2.7, 3.2, 3.3, 3.4, 3.5, 3.6, and pypy -* [![Latest Version] -(https://pypip.in/version/versioneer/badge.svg?style=flat) -](https://pypi.python.org/pypi/versioneer/) -* [![Build Status] -(https://travis-ci.org/warner/python-versioneer.png?branch=master) -](https://travis-ci.org/warner/python-versioneer) - -This is a tool for managing a recorded version number in distutils-based -python projects. The goal is to remove the tedious and error-prone "update -the embedded version string" step from your release process. Making a new -release should be as easy as recording a new tag in your version-control -system, and maybe making new tarballs. - - -## Quick Install - -* `pip install versioneer` to somewhere to your $PATH -* add a `[versioneer]` section to your setup.cfg (see below) -* run `versioneer install` in your source tree, commit the results - -## Version Identifiers - -Source trees come from a variety of places: - -* a version-control system checkout (mostly used by developers) -* a nightly tarball, produced by build automation -* a snapshot tarball, produced by a web-based VCS browser, like github's - "tarball from tag" feature -* a release tarball, produced by "setup.py sdist", distributed through PyPI - -Within each source tree, the version identifier (either a string or a number, -this tool is format-agnostic) can come from a variety of places: - -* ask the VCS tool itself, e.g. "git describe" (for checkouts), which knows - about recent "tags" and an absolute revision-id -* the name of the directory into which the tarball was unpacked -* an expanded VCS keyword ($Id$, etc) -* a `_version.py` created by some earlier build step - -For released software, the version identifier is closely related to a VCS -tag. Some projects use tag names that include more than just the version -string (e.g. "myproject-1.2" instead of just "1.2"), in which case the tool -needs to strip the tag prefix to extract the version identifier. For -unreleased software (between tags), the version identifier should provide -enough information to help developers recreate the same tree, while also -giving them an idea of roughly how old the tree is (after version 1.2, before -version 1.3). Many VCS systems can report a description that captures this, -for example `git describe --tags --dirty --always` reports things like -"0.7-1-g574ab98-dirty" to indicate that the checkout is one revision past the -0.7 tag, has a unique revision id of "574ab98", and is "dirty" (it has -uncommitted changes. - -The version identifier is used for multiple purposes: - -* to allow the module to self-identify its version: `myproject.__version__` -* to choose a name and prefix for a 'setup.py sdist' tarball - -## Theory of Operation - -Versioneer works by adding a special `_version.py` file into your source -tree, where your `__init__.py` can import it. This `_version.py` knows how to -dynamically ask the VCS tool for version information at import time. - -`_version.py` also contains `$Revision$` markers, and the installation -process marks `_version.py` to have this marker rewritten with a tag name -during the `git archive` command. As a result, generated tarballs will -contain enough information to get the proper version. - -To allow `setup.py` to compute a version too, a `versioneer.py` is added to -the top level of your source tree, next to `setup.py` and the `setup.cfg` -that configures it. This overrides several distutils/setuptools commands to -compute the version when invoked, and changes `setup.py build` and `setup.py -sdist` to replace `_version.py` with a small static file that contains just -the generated version data. - -## Installation - -See [INSTALL.md](./INSTALL.md) for detailed installation instructions. - -## Version-String Flavors - -Code which uses Versioneer can learn about its version string at runtime by -importing `_version` from your main `__init__.py` file and running the -`get_versions()` function. From the "outside" (e.g. in `setup.py`), you can -import the top-level `versioneer.py` and run `get_versions()`. - -Both functions return a dictionary with different flavors of version -information: - -* `['version']`: A condensed version string, rendered using the selected - style. This is the most commonly used value for the project's version - string. The default "pep440" style yields strings like `0.11`, - `0.11+2.g1076c97`, or `0.11+2.g1076c97.dirty`. See the "Styles" section - below for alternative styles. - -* `['full-revisionid']`: detailed revision identifier. For Git, this is the - full SHA1 commit id, e.g. "1076c978a8d3cfc70f408fe5974aa6c092c949ac". - -* `['date']`: Date and time of the latest `HEAD` commit. For Git, it is the - commit date in ISO 8601 format. This will be None if the date is not - available. - -* `['dirty']`: a boolean, True if the tree has uncommitted changes. Note that - this is only accurate if run in a VCS checkout, otherwise it is likely to - be False or None - -* `['error']`: if the version string could not be computed, this will be set - to a string describing the problem, otherwise it will be None. It may be - useful to throw an exception in setup.py if this is set, to avoid e.g. - creating tarballs with a version string of "unknown". - -Some variants are more useful than others. Including `full-revisionid` in a -bug report should allow developers to reconstruct the exact code being tested -(or indicate the presence of local changes that should be shared with the -developers). `version` is suitable for display in an "about" box or a CLI -`--version` output: it can be easily compared against release notes and lists -of bugs fixed in various releases. - -The installer adds the following text to your `__init__.py` to place a basic -version in `YOURPROJECT.__version__`: - - from ._version import get_versions - __version__ = get_versions()['version'] - del get_versions - -## Styles - -The setup.cfg `style=` configuration controls how the VCS information is -rendered into a version string. - -The default style, "pep440", produces a PEP440-compliant string, equal to the -un-prefixed tag name for actual releases, and containing an additional "local -version" section with more detail for in-between builds. For Git, this is -TAG[+DISTANCE.gHEX[.dirty]] , using information from `git describe --tags ---dirty --always`. For example "0.11+2.g1076c97.dirty" indicates that the -tree is like the "1076c97" commit but has uncommitted changes (".dirty"), and -that this commit is two revisions ("+2") beyond the "0.11" tag. For released -software (exactly equal to a known tag), the identifier will only contain the -stripped tag, e.g. "0.11". - -Other styles are available. See [details.md](details.md) in the Versioneer -source tree for descriptions. - -## Debugging - -Versioneer tries to avoid fatal errors: if something goes wrong, it will tend -to return a version of "0+unknown". To investigate the problem, run `setup.py -version`, which will run the version-lookup code in a verbose mode, and will -display the full contents of `get_versions()` (including the `error` string, -which may help identify what went wrong). - -## Known Limitations - -Some situations are known to cause problems for Versioneer. This details the -most significant ones. More can be found on Github -[issues page](https://github.com/warner/python-versioneer/issues). - -### Subprojects - -Versioneer has limited support for source trees in which `setup.py` is not in -the root directory (e.g. `setup.py` and `.git/` are *not* siblings). The are -two common reasons why `setup.py` might not be in the root: - -* Source trees which contain multiple subprojects, such as - [Buildbot](https://github.com/buildbot/buildbot), which contains both - "master" and "slave" subprojects, each with their own `setup.py`, - `setup.cfg`, and `tox.ini`. Projects like these produce multiple PyPI - distributions (and upload multiple independently-installable tarballs). -* Source trees whose main purpose is to contain a C library, but which also - provide bindings to Python (and perhaps other langauges) in subdirectories. - -Versioneer will look for `.git` in parent directories, and most operations -should get the right version string. However `pip` and `setuptools` have bugs -and implementation details which frequently cause `pip install .` from a -subproject directory to fail to find a correct version string (so it usually -defaults to `0+unknown`). - -`pip install --editable .` should work correctly. `setup.py install` might -work too. - -Pip-8.1.1 is known to have this problem, but hopefully it will get fixed in -some later version. - -[Bug #38](https://github.com/warner/python-versioneer/issues/38) is tracking -this issue. The discussion in -[PR #61](https://github.com/warner/python-versioneer/pull/61) describes the -issue from the Versioneer side in more detail. -[pip PR#3176](https://github.com/pypa/pip/pull/3176) and -[pip PR#3615](https://github.com/pypa/pip/pull/3615) contain work to improve -pip to let Versioneer work correctly. - -Versioneer-0.16 and earlier only looked for a `.git` directory next to the -`setup.cfg`, so subprojects were completely unsupported with those releases. - -### Editable installs with setuptools <= 18.5 - -`setup.py develop` and `pip install --editable .` allow you to install a -project into a virtualenv once, then continue editing the source code (and -test) without re-installing after every change. - -"Entry-point scripts" (`setup(entry_points={"console_scripts": ..})`) are a -convenient way to specify executable scripts that should be installed along -with the python package. - -These both work as expected when using modern setuptools. When using -setuptools-18.5 or earlier, however, certain operations will cause -`pkg_resources.DistributionNotFound` errors when running the entrypoint -script, which must be resolved by re-installing the package. This happens -when the install happens with one version, then the egg_info data is -regenerated while a different version is checked out. Many setup.py commands -cause egg_info to be rebuilt (including `sdist`, `wheel`, and installing into -a different virtualenv), so this can be surprising. - -[Bug #83](https://github.com/warner/python-versioneer/issues/83) describes -this one, but upgrading to a newer version of setuptools should probably -resolve it. - -### Unicode version strings - -While Versioneer works (and is continually tested) with both Python 2 and -Python 3, it is not entirely consistent with bytes-vs-unicode distinctions. -Newer releases probably generate unicode version strings on py2. It's not -clear that this is wrong, but it may be surprising for applications when then -write these strings to a network connection or include them in bytes-oriented -APIs like cryptographic checksums. - -[Bug #71](https://github.com/warner/python-versioneer/issues/71) investigates -this question. - - -## Updating Versioneer - -To upgrade your project to a new release of Versioneer, do the following: - -* install the new Versioneer (`pip install -U versioneer` or equivalent) -* edit `setup.cfg`, if necessary, to include any new configuration settings - indicated by the release notes. See [UPGRADING](./UPGRADING.md) for details. -* re-run `versioneer install` in your source tree, to replace - `SRC/_version.py` -* commit any changed files - -## Future Directions - -This tool is designed to make it easily extended to other version-control -systems: all VCS-specific components are in separate directories like -src/git/ . The top-level `versioneer.py` script is assembled from these -components by running make-versioneer.py . In the future, make-versioneer.py -will take a VCS name as an argument, and will construct a version of -`versioneer.py` that is specific to the given VCS. It might also take the -configuration arguments that are currently provided manually during -installation by editing setup.py . Alternatively, it might go the other -direction and include code from all supported VCS systems, reducing the -number of intermediate scripts. - - -## License - -To make Versioneer easier to embed, all its code is dedicated to the public -domain. The `_version.py` that it creates is also in the public domain. -Specifically, both are released under the Creative Commons "Public Domain -Dedication" license (CC0-1.0), as described in -https://creativecommons.org/publicdomain/zero/1.0/ . - -""" - -from __future__ import print_function - -try: - import configparser -except ImportError: - import ConfigParser as configparser - -import errno -import json -import os -import re -import subprocess -import sys - - -class VersioneerConfig: - """Container for Versioneer configuration parameters.""" - - -def get_root(): - """Get the project root directory. - - We require that all commands are run from the project root, i.e. the - directory that contains setup.py, setup.cfg, and versioneer.py . - """ - root = os.path.realpath(os.path.abspath(os.getcwd())) - setup_py = os.path.join(root, "setup.py") - versioneer_py = os.path.join(root, "versioneer.py") - if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)): - # allow 'python path/to/setup.py COMMAND' - root = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0]))) - setup_py = os.path.join(root, "setup.py") - versioneer_py = os.path.join(root, "versioneer.py") - if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)): - err = ("Versioneer was unable to run the project root directory. " - "Versioneer requires setup.py to be executed from " - "its immediate directory (like 'python setup.py COMMAND'), " - "or in a way that lets it use sys.argv[0] to find the root " - "(like 'python path/to/setup.py COMMAND').") - raise VersioneerBadRootError(err) - try: - # Certain runtime workflows (setup.py install/develop in a setuptools - # tree) execute all dependencies in a single python process, so - # "versioneer" may be imported multiple times, and python's shared - # module-import table will cache the first one. So we can't use - # os.path.dirname(__file__), as that will find whichever - # versioneer.py was first imported, even in later projects. - me = os.path.realpath(os.path.abspath(__file__)) - me_dir = os.path.normcase(os.path.splitext(me)[0]) - vsr_dir = os.path.normcase(os.path.splitext(versioneer_py)[0]) - if me_dir != vsr_dir: - print("Warning: build in %s is using versioneer.py from %s" - % (os.path.dirname(me), versioneer_py)) - except NameError: - pass - return root - - -def get_config_from_root(root): - """Read the project setup.cfg file to determine Versioneer config.""" - # This might raise EnvironmentError (if setup.cfg is missing), or - # configparser.NoSectionError (if it lacks a [versioneer] section), or - # configparser.NoOptionError (if it lacks "VCS="). See the docstring at - # the top of versioneer.py for instructions on writing your setup.cfg . - setup_cfg = os.path.join(root, "setup.cfg") - parser = configparser.SafeConfigParser() - with open(setup_cfg, "r") as f: - parser.readfp(f) - VCS = parser.get("versioneer", "VCS") # mandatory - - def get(parser, name): - if parser.has_option("versioneer", name): - return parser.get("versioneer", name) - return None - cfg = VersioneerConfig() - cfg.VCS = VCS - cfg.style = get(parser, "style") or "" - cfg.versionfile_source = get(parser, "versionfile_source") - cfg.versionfile_build = get(parser, "versionfile_build") - cfg.tag_prefix = get(parser, "tag_prefix") - if cfg.tag_prefix in ("''", '""'): - cfg.tag_prefix = "" - cfg.parentdir_prefix = get(parser, "parentdir_prefix") - cfg.verbose = get(parser, "verbose") - return cfg - - -class NotThisMethod(Exception): - """Exception raised if a method is not valid for the current scenario.""" - - -# these dictionaries contain VCS-specific tools -LONG_VERSION_PY = {} -HANDLERS = {} - - -def register_vcs_handler(vcs, method): # decorator - """Decorator to mark a method as the handler for a particular VCS.""" - def decorate(f): - """Store f in HANDLERS[vcs][method].""" - if vcs not in HANDLERS: - HANDLERS[vcs] = {} - HANDLERS[vcs][method] = f - return f - return decorate - - -def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, - env=None): - """Call the given command(s).""" - assert isinstance(commands, list) - p = None - for c in commands: - try: - dispcmd = str([c] + args) - # remember shell=False, so use git.cmd on windows, not just git - p = subprocess.Popen([c] + args, cwd=cwd, env=env, - stdout=subprocess.PIPE, - stderr=(subprocess.PIPE if hide_stderr - else None)) - break - except EnvironmentError: - e = sys.exc_info()[1] - if e.errno == errno.ENOENT: - continue - if verbose: - print("unable to run %s" % dispcmd) - print(e) - return None, None - else: - if verbose: - print("unable to find command, tried %s" % (commands,)) - return None, None - stdout = p.communicate()[0].strip() - if sys.version_info[0] >= 3: - stdout = stdout.decode() - if p.returncode != 0: - if verbose: - print("unable to run %s (error)" % dispcmd) - print("stdout was %s" % stdout) - return None, p.returncode - return stdout, p.returncode - - -LONG_VERSION_PY['git'] = ''' -# This file helps to compute a version number in source trees obtained from -# git-archive tarball (such as those provided by githubs download-from-tag -# feature). Distribution tarballs (built by setup.py sdist) and build -# directories (produced by setup.py build) will contain a much shorter file -# that just contains the computed version number. - -# This file is released into the public domain. Generated by -# versioneer-0.18 (https://github.com/warner/python-versioneer) - -"""Git implementation of _version.py.""" - -import errno -import os -import re -import subprocess -import sys - - -def get_keywords(): - """Get the keywords needed to look up the version information.""" - # these strings will be replaced by git during git-archive. - # setup.py/versioneer.py will grep for the variable names, so they must - # each be defined on a line of their own. _version.py will just call - # get_keywords(). - git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s" - git_full = "%(DOLLAR)sFormat:%%H%(DOLLAR)s" - git_date = "%(DOLLAR)sFormat:%%ci%(DOLLAR)s" - keywords = {"refnames": git_refnames, "full": git_full, "date": git_date} - return keywords - - -class VersioneerConfig: - """Container for Versioneer configuration parameters.""" - - -def get_config(): - """Create, populate and return the VersioneerConfig() object.""" - # these strings are filled in when 'setup.py versioneer' creates - # _version.py - cfg = VersioneerConfig() - cfg.VCS = "git" - cfg.style = "%(STYLE)s" - cfg.tag_prefix = "%(TAG_PREFIX)s" - cfg.parentdir_prefix = "%(PARENTDIR_PREFIX)s" - cfg.versionfile_source = "%(VERSIONFILE_SOURCE)s" - cfg.verbose = False - return cfg - - -class NotThisMethod(Exception): - """Exception raised if a method is not valid for the current scenario.""" - - -LONG_VERSION_PY = {} -HANDLERS = {} - - -def register_vcs_handler(vcs, method): # decorator - """Decorator to mark a method as the handler for a particular VCS.""" - def decorate(f): - """Store f in HANDLERS[vcs][method].""" - if vcs not in HANDLERS: - HANDLERS[vcs] = {} - HANDLERS[vcs][method] = f - return f - return decorate - - -def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, - env=None): - """Call the given command(s).""" - assert isinstance(commands, list) - p = None - for c in commands: - try: - dispcmd = str([c] + args) - # remember shell=False, so use git.cmd on windows, not just git - p = subprocess.Popen([c] + args, cwd=cwd, env=env, - stdout=subprocess.PIPE, - stderr=(subprocess.PIPE if hide_stderr - else None)) - break - except EnvironmentError: - e = sys.exc_info()[1] - if e.errno == errno.ENOENT: - continue - if verbose: - print("unable to run %%s" %% dispcmd) - print(e) - return None, None - else: - if verbose: - print("unable to find command, tried %%s" %% (commands,)) - return None, None - stdout = p.communicate()[0].strip() - if sys.version_info[0] >= 3: - stdout = stdout.decode() - if p.returncode != 0: - if verbose: - print("unable to run %%s (error)" %% dispcmd) - print("stdout was %%s" %% stdout) - return None, p.returncode - return stdout, p.returncode - - -def versions_from_parentdir(parentdir_prefix, root, verbose): - """Try to determine the version from the parent directory name. - - Source tarballs conventionally unpack into a directory that includes both - the project name and a version string. We will also support searching up - two directory levels for an appropriately named parent directory - """ - rootdirs = [] - - for i in range(3): - dirname = os.path.basename(root) - if dirname.startswith(parentdir_prefix): - return {"version": dirname[len(parentdir_prefix):], - "full-revisionid": None, - "dirty": False, "error": None, "date": None} - else: - rootdirs.append(root) - root = os.path.dirname(root) # up a level - - if verbose: - print("Tried directories %%s but none started with prefix %%s" %% - (str(rootdirs), parentdir_prefix)) - raise NotThisMethod("rootdir doesn't start with parentdir_prefix") - - -@register_vcs_handler("git", "get_keywords") -def git_get_keywords(versionfile_abs): - """Extract version information from the given file.""" - # the code embedded in _version.py can just fetch the value of these - # keywords. When used from setup.py, we don't want to import _version.py, - # so we do it with a regexp instead. This function is not used from - # _version.py. - keywords = {} - try: - f = open(versionfile_abs, "r") - for line in f.readlines(): - if line.strip().startswith("git_refnames ="): - mo = re.search(r'=\s*"(.*)"', line) - if mo: - keywords["refnames"] = mo.group(1) - if line.strip().startswith("git_full ="): - mo = re.search(r'=\s*"(.*)"', line) - if mo: - keywords["full"] = mo.group(1) - if line.strip().startswith("git_date ="): - mo = re.search(r'=\s*"(.*)"', line) - if mo: - keywords["date"] = mo.group(1) - f.close() - except EnvironmentError: - pass - return keywords - - -@register_vcs_handler("git", "keywords") -def git_versions_from_keywords(keywords, tag_prefix, verbose): - """Get version information from git keywords.""" - if not keywords: - raise NotThisMethod("no keywords at all, weird") - date = keywords.get("date") - if date is not None: - # git-2.2.0 added "%%cI", which expands to an ISO-8601 -compliant - # datestamp. However we prefer "%%ci" (which expands to an "ISO-8601 - # -like" string, which we must then edit to make compliant), because - # it's been around since git-1.5.3, and it's too difficult to - # discover which version we're using, or to work around using an - # older one. - date = date.strip().replace(" ", "T", 1).replace(" ", "", 1) - refnames = keywords["refnames"].strip() - if refnames.startswith("$Format"): - if verbose: - print("keywords are unexpanded, not using") - raise NotThisMethod("unexpanded keywords, not a git-archive tarball") - refs = set([r.strip() for r in refnames.strip("()").split(",")]) - # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of - # just "foo-1.0". If we see a "tag: " prefix, prefer those. - TAG = "tag: " - tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)]) - if not tags: - # Either we're using git < 1.8.3, or there really are no tags. We use - # a heuristic: assume all version tags have a digit. The old git %%d - # expansion behaves like git log --decorate=short and strips out the - # refs/heads/ and refs/tags/ prefixes that would let us distinguish - # between branches and tags. By ignoring refnames without digits, we - # filter out many common branch names like "release" and - # "stabilization", as well as "HEAD" and "master". - tags = set([r for r in refs if re.search(r'\d', r)]) - if verbose: - print("discarding '%%s', no digits" %% ",".join(refs - tags)) - if verbose: - print("likely tags: %%s" %% ",".join(sorted(tags))) - for ref in sorted(tags): - # sorting will prefer e.g. "2.0" over "2.0rc1" - if ref.startswith(tag_prefix): - r = ref[len(tag_prefix):] - if verbose: - print("picking %%s" %% r) - return {"version": r, - "full-revisionid": keywords["full"].strip(), - "dirty": False, "error": None, - "date": date} - # no suitable tags, so version is "0+unknown", but full hex is still there - if verbose: - print("no suitable tags, using unknown + full revision id") - return {"version": "0+unknown", - "full-revisionid": keywords["full"].strip(), - "dirty": False, "error": "no suitable tags", "date": None} - - -@register_vcs_handler("git", "pieces_from_vcs") -def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): - """Get version from 'git describe' in the root of the source tree. - - This only gets called if the git-archive 'subst' keywords were *not* - expanded, and _version.py hasn't already been rewritten with a short - version string, meaning we're inside a checked out source tree. - """ - GITS = ["git"] - if sys.platform == "win32": - GITS = ["git.cmd", "git.exe"] - - out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, - hide_stderr=True) - if rc != 0: - if verbose: - print("Directory %%s not under git control" %% root) - raise NotThisMethod("'git rev-parse --git-dir' returned error") - - # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty] - # if there isn't one, this yields HEX[-dirty] (no NUM) - describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty", - "--always", "--long", - "--match", "%%s*" %% tag_prefix], - cwd=root) - # --long was added in git-1.5.5 - if describe_out is None: - raise NotThisMethod("'git describe' failed") - describe_out = describe_out.strip() - full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root) - if full_out is None: - raise NotThisMethod("'git rev-parse' failed") - full_out = full_out.strip() - - pieces = {} - pieces["long"] = full_out - pieces["short"] = full_out[:7] # maybe improved later - pieces["error"] = None - - # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty] - # TAG might have hyphens. - git_describe = describe_out - - # look for -dirty suffix - dirty = git_describe.endswith("-dirty") - pieces["dirty"] = dirty - if dirty: - git_describe = git_describe[:git_describe.rindex("-dirty")] - - # now we have TAG-NUM-gHEX or HEX - - if "-" in git_describe: - # TAG-NUM-gHEX - mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe) - if not mo: - # unparseable. Maybe git-describe is misbehaving? - pieces["error"] = ("unable to parse git-describe output: '%%s'" - %% describe_out) - return pieces - - # tag - full_tag = mo.group(1) - if not full_tag.startswith(tag_prefix): - if verbose: - fmt = "tag '%%s' doesn't start with prefix '%%s'" - print(fmt %% (full_tag, tag_prefix)) - pieces["error"] = ("tag '%%s' doesn't start with prefix '%%s'" - %% (full_tag, tag_prefix)) - return pieces - pieces["closest-tag"] = full_tag[len(tag_prefix):] - - # distance: number of commits since tag - pieces["distance"] = int(mo.group(2)) - - # commit: short hex revision ID - pieces["short"] = mo.group(3) - - else: - # HEX: no tags - pieces["closest-tag"] = None - count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], - cwd=root) - pieces["distance"] = int(count_out) # total number of commits - - # commit date: see ISO-8601 comment in git_versions_from_keywords() - date = run_command(GITS, ["show", "-s", "--format=%%ci", "HEAD"], - cwd=root)[0].strip() - pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1) - - return pieces - - -def plus_or_dot(pieces): - """Return a + if we don't already have one, else return a .""" - if "+" in pieces.get("closest-tag", ""): - return "." - return "+" - - -def render_pep440(pieces): - """Build up version string, with post-release "local version identifier". - - Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you - get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty - - Exceptions: - 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty] - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - if pieces["distance"] or pieces["dirty"]: - rendered += plus_or_dot(pieces) - rendered += "%%d.g%%s" %% (pieces["distance"], pieces["short"]) - if pieces["dirty"]: - rendered += ".dirty" - else: - # exception #1 - rendered = "0+untagged.%%d.g%%s" %% (pieces["distance"], - pieces["short"]) - if pieces["dirty"]: - rendered += ".dirty" - return rendered - - -def render_pep440_pre(pieces): - """TAG[.post.devDISTANCE] -- No -dirty. - - Exceptions: - 1: no tags. 0.post.devDISTANCE - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - if pieces["distance"]: - rendered += ".post.dev%%d" %% pieces["distance"] - else: - # exception #1 - rendered = "0.post.dev%%d" %% pieces["distance"] - return rendered - - -def render_pep440_post(pieces): - """TAG[.postDISTANCE[.dev0]+gHEX] . - - The ".dev0" means dirty. Note that .dev0 sorts backwards - (a dirty tree will appear "older" than the corresponding clean one), - but you shouldn't be releasing software with -dirty anyways. - - Exceptions: - 1: no tags. 0.postDISTANCE[.dev0] - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - if pieces["distance"] or pieces["dirty"]: - rendered += ".post%%d" %% pieces["distance"] - if pieces["dirty"]: - rendered += ".dev0" - rendered += plus_or_dot(pieces) - rendered += "g%%s" %% pieces["short"] - else: - # exception #1 - rendered = "0.post%%d" %% pieces["distance"] - if pieces["dirty"]: - rendered += ".dev0" - rendered += "+g%%s" %% pieces["short"] - return rendered - - -def render_pep440_old(pieces): - """TAG[.postDISTANCE[.dev0]] . - - The ".dev0" means dirty. - - Eexceptions: - 1: no tags. 0.postDISTANCE[.dev0] - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - if pieces["distance"] or pieces["dirty"]: - rendered += ".post%%d" %% pieces["distance"] - if pieces["dirty"]: - rendered += ".dev0" - else: - # exception #1 - rendered = "0.post%%d" %% pieces["distance"] - if pieces["dirty"]: - rendered += ".dev0" - return rendered - - -def render_git_describe(pieces): - """TAG[-DISTANCE-gHEX][-dirty]. - - Like 'git describe --tags --dirty --always'. - - Exceptions: - 1: no tags. HEX[-dirty] (note: no 'g' prefix) - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - if pieces["distance"]: - rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"]) - else: - # exception #1 - rendered = pieces["short"] - if pieces["dirty"]: - rendered += "-dirty" - return rendered - - -def render_git_describe_long(pieces): - """TAG-DISTANCE-gHEX[-dirty]. - - Like 'git describe --tags --dirty --always -long'. - The distance/hash is unconditional. - - Exceptions: - 1: no tags. HEX[-dirty] (note: no 'g' prefix) - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"]) - else: - # exception #1 - rendered = pieces["short"] - if pieces["dirty"]: - rendered += "-dirty" - return rendered - - -def render(pieces, style): - """Render the given version pieces into the requested style.""" - if pieces["error"]: - return {"version": "unknown", - "full-revisionid": pieces.get("long"), - "dirty": None, - "error": pieces["error"], - "date": None} - - if not style or style == "default": - style = "pep440" # the default - - if style == "pep440": - rendered = render_pep440(pieces) - elif style == "pep440-pre": - rendered = render_pep440_pre(pieces) - elif style == "pep440-post": - rendered = render_pep440_post(pieces) - elif style == "pep440-old": - rendered = render_pep440_old(pieces) - elif style == "git-describe": - rendered = render_git_describe(pieces) - elif style == "git-describe-long": - rendered = render_git_describe_long(pieces) - else: - raise ValueError("unknown style '%%s'" %% style) - - return {"version": rendered, "full-revisionid": pieces["long"], - "dirty": pieces["dirty"], "error": None, - "date": pieces.get("date")} - - -def get_versions(): - """Get version information or return default if unable to do so.""" - # I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have - # __file__, we can work backwards from there to the root. Some - # py2exe/bbfreeze/non-CPython implementations don't do __file__, in which - # case we can only use expanded keywords. - - cfg = get_config() - verbose = cfg.verbose - - try: - return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, - verbose) - except NotThisMethod: - pass - - try: - root = os.path.realpath(__file__) - # versionfile_source is the relative path from the top of the source - # tree (where the .git directory might live) to this file. Invert - # this to find the root from __file__. - for i in cfg.versionfile_source.split('/'): - root = os.path.dirname(root) - except NameError: - return {"version": "0+unknown", "full-revisionid": None, - "dirty": None, - "error": "unable to find root of source tree", - "date": None} - - try: - pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose) - return render(pieces, cfg.style) - except NotThisMethod: - pass - - try: - if cfg.parentdir_prefix: - return versions_from_parentdir(cfg.parentdir_prefix, root, verbose) - except NotThisMethod: - pass - - return {"version": "0+unknown", "full-revisionid": None, - "dirty": None, - "error": "unable to compute version", "date": None} -''' - - -@register_vcs_handler("git", "get_keywords") -def git_get_keywords(versionfile_abs): - """Extract version information from the given file.""" - # the code embedded in _version.py can just fetch the value of these - # keywords. When used from setup.py, we don't want to import _version.py, - # so we do it with a regexp instead. This function is not used from - # _version.py. - keywords = {} - try: - f = open(versionfile_abs, "r") - for line in f.readlines(): - if line.strip().startswith("git_refnames ="): - mo = re.search(r'=\s*"(.*)"', line) - if mo: - keywords["refnames"] = mo.group(1) - if line.strip().startswith("git_full ="): - mo = re.search(r'=\s*"(.*)"', line) - if mo: - keywords["full"] = mo.group(1) - if line.strip().startswith("git_date ="): - mo = re.search(r'=\s*"(.*)"', line) - if mo: - keywords["date"] = mo.group(1) - f.close() - except EnvironmentError: - pass - return keywords - - -@register_vcs_handler("git", "keywords") -def git_versions_from_keywords(keywords, tag_prefix, verbose): - """Get version information from git keywords.""" - if not keywords: - raise NotThisMethod("no keywords at all, weird") - date = keywords.get("date") - if date is not None: - # git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant - # datestamp. However we prefer "%ci" (which expands to an "ISO-8601 - # -like" string, which we must then edit to make compliant), because - # it's been around since git-1.5.3, and it's too difficult to - # discover which version we're using, or to work around using an - # older one. - date = date.strip().replace(" ", "T", 1).replace(" ", "", 1) - refnames = keywords["refnames"].strip() - if refnames.startswith("$Format"): - if verbose: - print("keywords are unexpanded, not using") - raise NotThisMethod("unexpanded keywords, not a git-archive tarball") - refs = set([r.strip() for r in refnames.strip("()").split(",")]) - # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of - # just "foo-1.0". If we see a "tag: " prefix, prefer those. - TAG = "tag: " - tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)]) - if not tags: - # Either we're using git < 1.8.3, or there really are no tags. We use - # a heuristic: assume all version tags have a digit. The old git %d - # expansion behaves like git log --decorate=short and strips out the - # refs/heads/ and refs/tags/ prefixes that would let us distinguish - # between branches and tags. By ignoring refnames without digits, we - # filter out many common branch names like "release" and - # "stabilization", as well as "HEAD" and "master". - tags = set([r for r in refs if re.search(r'\d', r)]) - if verbose: - print("discarding '%s', no digits" % ",".join(refs - tags)) - if verbose: - print("likely tags: %s" % ",".join(sorted(tags))) - for ref in sorted(tags): - # sorting will prefer e.g. "2.0" over "2.0rc1" - if ref.startswith(tag_prefix): - r = ref[len(tag_prefix):] - if verbose: - print("picking %s" % r) - return {"version": r, - "full-revisionid": keywords["full"].strip(), - "dirty": False, "error": None, - "date": date} - # no suitable tags, so version is "0+unknown", but full hex is still there - if verbose: - print("no suitable tags, using unknown + full revision id") - return {"version": "0+unknown", - "full-revisionid": keywords["full"].strip(), - "dirty": False, "error": "no suitable tags", "date": None} - - -@register_vcs_handler("git", "pieces_from_vcs") -def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): - """Get version from 'git describe' in the root of the source tree. - - This only gets called if the git-archive 'subst' keywords were *not* - expanded, and _version.py hasn't already been rewritten with a short - version string, meaning we're inside a checked out source tree. - """ - GITS = ["git"] - if sys.platform == "win32": - GITS = ["git.cmd", "git.exe"] - - out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, - hide_stderr=True) - if rc != 0: - if verbose: - print("Directory %s not under git control" % root) - raise NotThisMethod("'git rev-parse --git-dir' returned error") - - # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty] - # if there isn't one, this yields HEX[-dirty] (no NUM) - describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty", - "--always", "--long", - "--match", "%s*" % tag_prefix], - cwd=root) - # --long was added in git-1.5.5 - if describe_out is None: - raise NotThisMethod("'git describe' failed") - describe_out = describe_out.strip() - full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root) - if full_out is None: - raise NotThisMethod("'git rev-parse' failed") - full_out = full_out.strip() - - pieces = {} - pieces["long"] = full_out - pieces["short"] = full_out[:7] # maybe improved later - pieces["error"] = None - - # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty] - # TAG might have hyphens. - git_describe = describe_out - - # look for -dirty suffix - dirty = git_describe.endswith("-dirty") - pieces["dirty"] = dirty - if dirty: - git_describe = git_describe[:git_describe.rindex("-dirty")] - - # now we have TAG-NUM-gHEX or HEX - - if "-" in git_describe: - # TAG-NUM-gHEX - mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe) - if not mo: - # unparseable. Maybe git-describe is misbehaving? - pieces["error"] = ("unable to parse git-describe output: '%s'" - % describe_out) - return pieces - - # tag - full_tag = mo.group(1) - if not full_tag.startswith(tag_prefix): - if verbose: - fmt = "tag '%s' doesn't start with prefix '%s'" - print(fmt % (full_tag, tag_prefix)) - pieces["error"] = ("tag '%s' doesn't start with prefix '%s'" - % (full_tag, tag_prefix)) - return pieces - pieces["closest-tag"] = full_tag[len(tag_prefix):] - - # distance: number of commits since tag - pieces["distance"] = int(mo.group(2)) - - # commit: short hex revision ID - pieces["short"] = mo.group(3) - - else: - # HEX: no tags - pieces["closest-tag"] = None - count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], - cwd=root) - pieces["distance"] = int(count_out) # total number of commits - - # commit date: see ISO-8601 comment in git_versions_from_keywords() - date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"], - cwd=root)[0].strip() - pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1) - - return pieces - - -def do_vcs_install(manifest_in, versionfile_source, ipy): - """Git-specific installation logic for Versioneer. - - For Git, this means creating/changing .gitattributes to mark _version.py - for export-subst keyword substitution. - """ - GITS = ["git"] - if sys.platform == "win32": - GITS = ["git.cmd", "git.exe"] - files = [manifest_in, versionfile_source] - if ipy: - files.append(ipy) - try: - me = __file__ - if me.endswith(".pyc") or me.endswith(".pyo"): - me = os.path.splitext(me)[0] + ".py" - versioneer_file = os.path.relpath(me) - except NameError: - versioneer_file = "versioneer.py" - files.append(versioneer_file) - present = False - try: - f = open(".gitattributes", "r") - for line in f.readlines(): - if line.strip().startswith(versionfile_source): - if "export-subst" in line.strip().split()[1:]: - present = True - f.close() - except EnvironmentError: - pass - if not present: - f = open(".gitattributes", "a+") - f.write("%s export-subst\n" % versionfile_source) - f.close() - files.append(".gitattributes") - run_command(GITS, ["add", "--"] + files) - - -def versions_from_parentdir(parentdir_prefix, root, verbose): - """Try to determine the version from the parent directory name. - - Source tarballs conventionally unpack into a directory that includes both - the project name and a version string. We will also support searching up - two directory levels for an appropriately named parent directory - """ - rootdirs = [] - - for i in range(3): - dirname = os.path.basename(root) - if dirname.startswith(parentdir_prefix): - return {"version": dirname[len(parentdir_prefix):], - "full-revisionid": None, - "dirty": False, "error": None, "date": None} - else: - rootdirs.append(root) - root = os.path.dirname(root) # up a level - - if verbose: - print("Tried directories %s but none started with prefix %s" % - (str(rootdirs), parentdir_prefix)) - raise NotThisMethod("rootdir doesn't start with parentdir_prefix") - - -SHORT_VERSION_PY = """ -# This file was generated by 'versioneer.py' (0.18) from -# revision-control system data, or from the parent directory name of an -# unpacked source archive. Distribution tarballs contain a pre-generated copy -# of this file. - -import json - -version_json = ''' -%s -''' # END VERSION_JSON - - -def get_versions(): - return json.loads(version_json) -""" - - -def versions_from_file(filename): - """Try to determine the version from _version.py if present.""" - try: - with open(filename) as f: - contents = f.read() - except EnvironmentError: - raise NotThisMethod("unable to read _version.py") - mo = re.search(r"version_json = '''\n(.*)''' # END VERSION_JSON", - contents, re.M | re.S) - if not mo: - mo = re.search(r"version_json = '''\r\n(.*)''' # END VERSION_JSON", - contents, re.M | re.S) - if not mo: - raise NotThisMethod("no version_json in _version.py") - return json.loads(mo.group(1)) - - -def write_to_version_file(filename, versions): - """Write the given version number to the given _version.py file.""" - os.unlink(filename) - contents = json.dumps(versions, sort_keys=True, - indent=1, separators=(",", ": ")) - with open(filename, "w") as f: - f.write(SHORT_VERSION_PY % contents) - - print("set %s to '%s'" % (filename, versions["version"])) - - -def plus_or_dot(pieces): - """Return a + if we don't already have one, else return a .""" - if "+" in pieces.get("closest-tag", ""): - return "." - return "+" - - -def render_pep440(pieces): - """Build up version string, with post-release "local version identifier". - - Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you - get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty - - Exceptions: - 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty] - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - if pieces["distance"] or pieces["dirty"]: - rendered += plus_or_dot(pieces) - rendered += "%d.g%s" % (pieces["distance"], pieces["short"]) - if pieces["dirty"]: - rendered += ".dirty" - else: - # exception #1 - rendered = "0+untagged.%d.g%s" % (pieces["distance"], - pieces["short"]) - if pieces["dirty"]: - rendered += ".dirty" - return rendered - - -def render_pep440_pre(pieces): - """TAG[.post.devDISTANCE] -- No -dirty. - - Exceptions: - 1: no tags. 0.post.devDISTANCE - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - if pieces["distance"]: - rendered += ".post.dev%d" % pieces["distance"] - else: - # exception #1 - rendered = "0.post.dev%d" % pieces["distance"] - return rendered - - -def render_pep440_post(pieces): - """TAG[.postDISTANCE[.dev0]+gHEX] . - - The ".dev0" means dirty. Note that .dev0 sorts backwards - (a dirty tree will appear "older" than the corresponding clean one), - but you shouldn't be releasing software with -dirty anyways. - - Exceptions: - 1: no tags. 0.postDISTANCE[.dev0] - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - if pieces["distance"] or pieces["dirty"]: - rendered += ".post%d" % pieces["distance"] - if pieces["dirty"]: - rendered += ".dev0" - rendered += plus_or_dot(pieces) - rendered += "g%s" % pieces["short"] - else: - # exception #1 - rendered = "0.post%d" % pieces["distance"] - if pieces["dirty"]: - rendered += ".dev0" - rendered += "+g%s" % pieces["short"] - return rendered - - -def render_pep440_old(pieces): - """TAG[.postDISTANCE[.dev0]] . - - The ".dev0" means dirty. - - Eexceptions: - 1: no tags. 0.postDISTANCE[.dev0] - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - if pieces["distance"] or pieces["dirty"]: - rendered += ".post%d" % pieces["distance"] - if pieces["dirty"]: - rendered += ".dev0" - else: - # exception #1 - rendered = "0.post%d" % pieces["distance"] - if pieces["dirty"]: - rendered += ".dev0" - return rendered - - -def render_git_describe(pieces): - """TAG[-DISTANCE-gHEX][-dirty]. - - Like 'git describe --tags --dirty --always'. - - Exceptions: - 1: no tags. HEX[-dirty] (note: no 'g' prefix) - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - if pieces["distance"]: - rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) - else: - # exception #1 - rendered = pieces["short"] - if pieces["dirty"]: - rendered += "-dirty" - return rendered - - -def render_git_describe_long(pieces): - """TAG-DISTANCE-gHEX[-dirty]. - - Like 'git describe --tags --dirty --always -long'. - The distance/hash is unconditional. - - Exceptions: - 1: no tags. HEX[-dirty] (note: no 'g' prefix) - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) - else: - # exception #1 - rendered = pieces["short"] - if pieces["dirty"]: - rendered += "-dirty" - return rendered - - -def render(pieces, style): - """Render the given version pieces into the requested style.""" - if pieces["error"]: - return {"version": "unknown", - "full-revisionid": pieces.get("long"), - "dirty": None, - "error": pieces["error"], - "date": None} - - if not style or style == "default": - style = "pep440" # the default - - if style == "pep440": - rendered = render_pep440(pieces) - elif style == "pep440-pre": - rendered = render_pep440_pre(pieces) - elif style == "pep440-post": - rendered = render_pep440_post(pieces) - elif style == "pep440-old": - rendered = render_pep440_old(pieces) - elif style == "git-describe": - rendered = render_git_describe(pieces) - elif style == "git-describe-long": - rendered = render_git_describe_long(pieces) - else: - raise ValueError("unknown style '%s'" % style) - - return {"version": rendered, "full-revisionid": pieces["long"], - "dirty": pieces["dirty"], "error": None, - "date": pieces.get("date")} - - -class VersioneerBadRootError(Exception): - """The project root directory is unknown or missing key files.""" - - -def get_versions(verbose=False): - """Get the project version from whatever source is available. - - Returns dict with two keys: 'version' and 'full'. - """ - if "versioneer" in sys.modules: - # see the discussion in cmdclass.py:get_cmdclass() - del sys.modules["versioneer"] - - root = get_root() - cfg = get_config_from_root(root) - - assert cfg.VCS is not None, "please set [versioneer]VCS= in setup.cfg" - handlers = HANDLERS.get(cfg.VCS) - assert handlers, "unrecognized VCS '%s'" % cfg.VCS - verbose = verbose or cfg.verbose - assert cfg.versionfile_source is not None, \ - "please set versioneer.versionfile_source" - assert cfg.tag_prefix is not None, "please set versioneer.tag_prefix" - - versionfile_abs = os.path.join(root, cfg.versionfile_source) - - # extract version from first of: _version.py, VCS command (e.g. 'git - # describe'), parentdir. This is meant to work for developers using a - # source checkout, for users of a tarball created by 'setup.py sdist', - # and for users of a tarball/zipball created by 'git archive' or github's - # download-from-tag feature or the equivalent in other VCSes. - - get_keywords_f = handlers.get("get_keywords") - from_keywords_f = handlers.get("keywords") - if get_keywords_f and from_keywords_f: - try: - keywords = get_keywords_f(versionfile_abs) - ver = from_keywords_f(keywords, cfg.tag_prefix, verbose) - if verbose: - print("got version from expanded keyword %s" % ver) - return ver - except NotThisMethod: - pass - - try: - ver = versions_from_file(versionfile_abs) - if verbose: - print("got version from file %s %s" % (versionfile_abs, ver)) - return ver - except NotThisMethod: - pass - - from_vcs_f = handlers.get("pieces_from_vcs") - if from_vcs_f: - try: - pieces = from_vcs_f(cfg.tag_prefix, root, verbose) - ver = render(pieces, cfg.style) - if verbose: - print("got version from VCS %s" % ver) - return ver - except NotThisMethod: - pass - - try: - if cfg.parentdir_prefix: - ver = versions_from_parentdir(cfg.parentdir_prefix, root, verbose) - if verbose: - print("got version from parentdir %s" % ver) - return ver - except NotThisMethod: - pass - - if verbose: - print("unable to compute version") - - return {"version": "0+unknown", "full-revisionid": None, - "dirty": None, "error": "unable to compute version", - "date": None} - - -def get_version(): - """Get the short version string for this project.""" - return get_versions()["version"] - - -def get_cmdclass(): - """Get the custom setuptools/distutils subclasses used by Versioneer.""" - if "versioneer" in sys.modules: - del sys.modules["versioneer"] - # this fixes the "python setup.py develop" case (also 'install' and - # 'easy_install .'), in which subdependencies of the main project are - # built (using setup.py bdist_egg) in the same python process. Assume - # a main project A and a dependency B, which use different versions - # of Versioneer. A's setup.py imports A's Versioneer, leaving it in - # sys.modules by the time B's setup.py is executed, causing B to run - # with the wrong versioneer. Setuptools wraps the sub-dep builds in a - # sandbox that restores sys.modules to it's pre-build state, so the - # parent is protected against the child's "import versioneer". By - # removing ourselves from sys.modules here, before the child build - # happens, we protect the child from the parent's versioneer too. - # Also see https://github.com/warner/python-versioneer/issues/52 - - cmds = {} - - # we add "version" to both distutils and setuptools - from distutils.core import Command - - class cmd_version(Command): - description = "report generated version string" - user_options = [] - boolean_options = [] - - def initialize_options(self): - pass - - def finalize_options(self): - pass - - def run(self): - vers = get_versions(verbose=True) - print("Version: %s" % vers["version"]) - print(" full-revisionid: %s" % vers.get("full-revisionid")) - print(" dirty: %s" % vers.get("dirty")) - print(" date: %s" % vers.get("date")) - if vers["error"]: - print(" error: %s" % vers["error"]) - cmds["version"] = cmd_version - - # we override "build_py" in both distutils and setuptools - # - # most invocation pathways end up running build_py: - # distutils/build -> build_py - # distutils/install -> distutils/build ->.. - # setuptools/bdist_wheel -> distutils/install ->.. - # setuptools/bdist_egg -> distutils/install_lib -> build_py - # setuptools/install -> bdist_egg ->.. - # setuptools/develop -> ? - # pip install: - # copies source tree to a tempdir before running egg_info/etc - # if .git isn't copied too, 'git describe' will fail - # then does setup.py bdist_wheel, or sometimes setup.py install - # setup.py egg_info -> ? - - # we override different "build_py" commands for both environments - if "setuptools" in sys.modules: - from setuptools.command.build_py import build_py as _build_py - else: - from distutils.command.build_py import build_py as _build_py - - class cmd_build_py(_build_py): - def run(self): - root = get_root() - cfg = get_config_from_root(root) - versions = get_versions() - _build_py.run(self) - # now locate _version.py in the new build/ directory and replace - # it with an updated value - if cfg.versionfile_build: - target_versionfile = os.path.join(self.build_lib, - cfg.versionfile_build) - print("UPDATING %s" % target_versionfile) - write_to_version_file(target_versionfile, versions) - cmds["build_py"] = cmd_build_py - - if "cx_Freeze" in sys.modules: # cx_freeze enabled? - from cx_Freeze.dist import build_exe as _build_exe - - # nczeczulin reports that py2exe won't like the pep440-style string - # as FILEVERSION, but it can be used for PRODUCTVERSION, e.g. - # setup(console=[{ - # "version": versioneer.get_version().split("+", 1)[0], # FILEVERSION - # "product_version": versioneer.get_version(), - # ... - - class cmd_build_exe(_build_exe): - def run(self): - root = get_root() - cfg = get_config_from_root(root) - versions = get_versions() - target_versionfile = cfg.versionfile_source - print("UPDATING %s" % target_versionfile) - write_to_version_file(target_versionfile, versions) - - _build_exe.run(self) - os.unlink(target_versionfile) - with open(cfg.versionfile_source, "w") as f: - LONG = LONG_VERSION_PY[cfg.VCS] - f.write(LONG % - {"DOLLAR": "$", - "STYLE": cfg.style, - "TAG_PREFIX": cfg.tag_prefix, - "PARENTDIR_PREFIX": cfg.parentdir_prefix, - "VERSIONFILE_SOURCE": cfg.versionfile_source, - }) - cmds["build_exe"] = cmd_build_exe - del cmds["build_py"] - - if 'py2exe' in sys.modules: # py2exe enabled? - try: - from py2exe.distutils_buildexe import py2exe as _py2exe # py3 - except ImportError: - from py2exe.build_exe import py2exe as _py2exe # py2 - - class cmd_py2exe(_py2exe): - def run(self): - root = get_root() - cfg = get_config_from_root(root) - versions = get_versions() - target_versionfile = cfg.versionfile_source - print("UPDATING %s" % target_versionfile) - write_to_version_file(target_versionfile, versions) - - _py2exe.run(self) - os.unlink(target_versionfile) - with open(cfg.versionfile_source, "w") as f: - LONG = LONG_VERSION_PY[cfg.VCS] - f.write(LONG % - {"DOLLAR": "$", - "STYLE": cfg.style, - "TAG_PREFIX": cfg.tag_prefix, - "PARENTDIR_PREFIX": cfg.parentdir_prefix, - "VERSIONFILE_SOURCE": cfg.versionfile_source, - }) - cmds["py2exe"] = cmd_py2exe - - # we override different "sdist" commands for both environments - if "setuptools" in sys.modules: - from setuptools.command.sdist import sdist as _sdist - else: - from distutils.command.sdist import sdist as _sdist - - class cmd_sdist(_sdist): - def run(self): - versions = get_versions() - self._versioneer_generated_versions = versions - # unless we update this, the command will keep using the old - # version - self.distribution.metadata.version = versions["version"] - return _sdist.run(self) - - def make_release_tree(self, base_dir, files): - root = get_root() - cfg = get_config_from_root(root) - _sdist.make_release_tree(self, base_dir, files) - # now locate _version.py in the new base_dir directory - # (remembering that it may be a hardlink) and replace it with an - # updated value - target_versionfile = os.path.join(base_dir, cfg.versionfile_source) - print("UPDATING %s" % target_versionfile) - write_to_version_file(target_versionfile, - self._versioneer_generated_versions) - cmds["sdist"] = cmd_sdist - - return cmds - - -CONFIG_ERROR = """ -setup.cfg is missing the necessary Versioneer configuration. You need -a section like: - - [versioneer] - VCS = git - style = pep440 - versionfile_source = src/myproject/_version.py - versionfile_build = myproject/_version.py - tag_prefix = - parentdir_prefix = myproject- - -You will also need to edit your setup.py to use the results: - - import versioneer - setup(version=versioneer.get_version(), - cmdclass=versioneer.get_cmdclass(), ...) - -Please read the docstring in ./versioneer.py for configuration instructions, -edit setup.cfg, and re-run the installer or 'python versioneer.py setup'. -""" - -SAMPLE_CONFIG = """ -# See the docstring in versioneer.py for instructions. Note that you must -# re-run 'versioneer.py setup' after changing this section, and commit the -# resulting files. - -[versioneer] -#VCS = git -#style = pep440 -#versionfile_source = -#versionfile_build = -#tag_prefix = -#parentdir_prefix = - -""" - -INIT_PY_SNIPPET = """ -from ._version import get_versions -__version__ = get_versions()['version'] -del get_versions -""" - - -def do_setup(): - """Main VCS-independent setup function for installing Versioneer.""" - root = get_root() - try: - cfg = get_config_from_root(root) - except (EnvironmentError, configparser.NoSectionError, - configparser.NoOptionError) as e: - if isinstance(e, (EnvironmentError, configparser.NoSectionError)): - print("Adding sample versioneer config to setup.cfg", - file=sys.stderr) - with open(os.path.join(root, "setup.cfg"), "a") as f: - f.write(SAMPLE_CONFIG) - print(CONFIG_ERROR, file=sys.stderr) - return 1 - - print(" creating %s" % cfg.versionfile_source) - with open(cfg.versionfile_source, "w") as f: - LONG = LONG_VERSION_PY[cfg.VCS] - f.write(LONG % {"DOLLAR": "$", - "STYLE": cfg.style, - "TAG_PREFIX": cfg.tag_prefix, - "PARENTDIR_PREFIX": cfg.parentdir_prefix, - "VERSIONFILE_SOURCE": cfg.versionfile_source, - }) - - ipy = os.path.join(os.path.dirname(cfg.versionfile_source), - "__init__.py") - if os.path.exists(ipy): - try: - with open(ipy, "r") as f: - old = f.read() - except EnvironmentError: - old = "" - if INIT_PY_SNIPPET not in old: - print(" appending to %s" % ipy) - with open(ipy, "a") as f: - f.write(INIT_PY_SNIPPET) - else: - print(" %s unmodified" % ipy) - else: - print(" %s doesn't exist, ok" % ipy) - ipy = None - - # Make sure both the top-level "versioneer.py" and versionfile_source - # (PKG/_version.py, used by runtime code) are in MANIFEST.in, so - # they'll be copied into source distributions. Pip won't be able to - # install the package without this. - manifest_in = os.path.join(root, "MANIFEST.in") - simple_includes = set() - try: - with open(manifest_in, "r") as f: - for line in f: - if line.startswith("include "): - for include in line.split()[1:]: - simple_includes.add(include) - except EnvironmentError: - pass - # That doesn't cover everything MANIFEST.in can do - # (http://docs.python.org/2/distutils/sourcedist.html#commands), so - # it might give some false negatives. Appending redundant 'include' - # lines is safe, though. - if "versioneer.py" not in simple_includes: - print(" appending 'versioneer.py' to MANIFEST.in") - with open(manifest_in, "a") as f: - f.write("include versioneer.py\n") - else: - print(" 'versioneer.py' already in MANIFEST.in") - if cfg.versionfile_source not in simple_includes: - print(" appending versionfile_source ('%s') to MANIFEST.in" % - cfg.versionfile_source) - with open(manifest_in, "a") as f: - f.write("include %s\n" % cfg.versionfile_source) - else: - print(" versionfile_source already in MANIFEST.in") - - # Make VCS-specific changes. For git, this means creating/changing - # .gitattributes to mark _version.py for export-subst keyword - # substitution. - do_vcs_install(manifest_in, cfg.versionfile_source, ipy) - return 0 - - -def scan_setup_py(): - """Validate the contents of setup.py against Versioneer's expectations.""" - found = set() - setters = False - errors = 0 - with open("setup.py", "r") as f: - for line in f.readlines(): - if "import versioneer" in line: - found.add("import") - if "versioneer.get_cmdclass()" in line: - found.add("cmdclass") - if "versioneer.get_version()" in line: - found.add("get_version") - if "versioneer.VCS" in line: - setters = True - if "versioneer.versionfile_source" in line: - setters = True - if len(found) != 3: - print("") - print("Your setup.py appears to be missing some important items") - print("(but I might be wrong). Please make sure it has something") - print("roughly like the following:") - print("") - print(" import versioneer") - print(" setup( version=versioneer.get_version(),") - print(" cmdclass=versioneer.get_cmdclass(), ...)") - print("") - errors += 1 - if setters: - print("You should remove lines like 'versioneer.VCS = ' and") - print("'versioneer.versionfile_source = ' . This configuration") - print("now lives in setup.cfg, and should be removed from setup.py") - print("") - errors += 1 - return errors - - -if __name__ == "__main__": - cmd = sys.argv[1] - if cmd == "setup": - errors = do_setup() - errors += scan_setup_py() - if errors: - sys.exit(1) diff --git a/python/pybind11/cucim_pydoc.h b/python/pybind11/cucim_pydoc.h index 18408d391..f5c0a0346 100644 --- a/python/pybind11/cucim_pydoc.h +++ b/python/pybind11/cucim_pydoc.h @@ -155,7 +155,7 @@ A channel name list. PYDOC(spacing, R"doc( Returns physical size in tuple. -If `dim_order` is specified, it returns phisical size for the dimensions. +If `dim_order` is specified, it returns physical size for the dimensions. If a dimension given by the `dim_order` doesn't exist, it returns 1.0 by default for the missing dimension. Args: diff --git a/run b/run index b39fee6ac..b66c2f75a 100755 --- a/run +++ b/run @@ -598,247 +598,6 @@ copy_nvjpeg_files_() { fi } -build_python_package_desc() { echo 'Build Python package - -Note: This command does not remove `dist` folder before building. -' -} -build_python_package() { - local ret=0 - local old_opt="$(shopt -op errexit);$(shopt -op nounset)" # save old shopts - set -eu - - # Copy cufile SDK from host system to temp/cuda - copy_gds_files_ - # Copy nvjpeg SDK from host system to temp/cuda - copy_nvjpeg_files_ - - run_command ${TOP}/dockcross-manylinux2014-x64 ./run build_python_package_ - ret=$? - - eval "${old_opt}" # restore old shopts - - return ${ret} -} - -repair_wheel_() { - local wheel="$1" - local dest="${2:-./}" - local PLAT="${3:-manylinux2014_x86_64}" - - - if ! auditwheel show "$wheel"; then - echo "Skipping non-platform wheel ${wheel}" - else - $(head -1 $(which auditwheel) | cut -d'!' -f2) $TOP/scripts/auditwheel_repair.py repair --plat "${PLAT}" -w ${dest} "${wheel}" - fi -} - -build_python_package_() { - local SRC_ROOT=${1:-${TOP}} - local BUILD_ROOT=${2:-${TOP}/temp} - local DEST_ROOT=${3:-${TOP}/dist} - local CUCIM_SDK_PATH=${4:-${BUILD_ROOT}/libcucim} - - local old_opt="$(shopt -op errexit);$(shopt -op nounset)" # save old shopts - local major_version="$(cat ${TOP}/VERSION | cut -d. -f1)" # major version number - set -eu - - # Clear CMakeCache.txt to use the latest options - run_command rm -f ${BUILD_ROOT}/libcucim/CMakeCache.txt - run_command rm -f ${BUILD_ROOT}/cuslide/CMakeCache.txt - run_command rm -f ${BUILD_ROOT}/cumed/CMakeCache.txt - run_command rm -f ${BUILD_ROOT}/cucim/CMakeCache.txt - - # Create a folder for .whl file that repair_wheel_ is not applied. - TEMP_PYPKG_DIR=${BUILD_ROOT}/py_pkg # $(mktemp -d) - c_echo b "TEMP_PYPKG_DIR=${TEMP_PYPKG_DIR}" - mkdir -p $TEMP_PYPKG_DIR - rm -rf $TEMP_PYPKG_DIR/* - # trap 'rm -rf ${TEMP_PYPKG_DIR}' EXIT - - local CMAKE_CMD=${CMAKE_CMD:-cmake} - local CMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE:-Release} - local NUM_THREADS=${NUM_THREADS:-$(nproc)} - - local PLAT=manylinux2014_x86_64 - - local pybins - - mkdir -p ${DEST_ROOT} - - # Remove existing library files at build root - rm -rf ${BUILD_ROOT}/libcucim/lib/* - rm -rf ${BUILD_ROOT}/libcucim/install/lib*/lib* - rm -rf ${BUILD_ROOT}/cuslide/lib/* - rm -rf ${BUILD_ROOT}/cuslide/install/lib*/cucim* - rm -rf ${BUILD_ROOT}/cumed/lib/* - rm -rf ${BUILD_ROOT}/cumed/install/lib*/cucim* - rm -rf ${BUILD_ROOT}/cucim/lib/cucim/*.so* - rm -rf ${BUILD_ROOT}/cucim/install/lib/*.so* - - # Build libcucim - ${CMAKE_CMD} -S ${SRC_ROOT} -B ${BUILD_ROOT}/libcucim \ - -DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE} \ - -DCMAKE_INSTALL_PREFIX=${CUCIM_SDK_PATH}/install - ${CMAKE_CMD} --build ${BUILD_ROOT}/libcucim --config ${CMAKE_BUILD_TYPE} --target cucim -- -j ${NUM_THREADS} - ${CMAKE_CMD} --build ${BUILD_ROOT}/libcucim --config ${CMAKE_BUILD_TYPE} --target install -- -j ${NUM_THREADS} - - # Build cuslide plugin - ${CMAKE_CMD} -S ${SRC_ROOT}/cpp/plugins/cucim.kit.cuslide -B ${BUILD_ROOT}/cuslide \ - -DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE} \ - -DCMAKE_INSTALL_PREFIX=${BUILD_ROOT}/cuslide/install \ - -DCUCIM_SDK_PATH=${CUCIM_SDK_PATH} - ${CMAKE_CMD} --build ${BUILD_ROOT}/cuslide --config ${CMAKE_BUILD_TYPE} --target cucim.kit.cuslide -- -j ${NUM_THREADS} - ${CMAKE_CMD} --build ${BUILD_ROOT}/cuslide --config ${CMAKE_BUILD_TYPE} --target install -- -j ${NUM_THREADS} - - # Build cumed plugin - ${CMAKE_CMD} -S ${SRC_ROOT}/cpp/plugins/cucim.kit.cumed -B ${BUILD_ROOT}/cumed \ - -DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE} \ - -DCMAKE_INSTALL_PREFIX=${BUILD_ROOT}/cumed/install \ - -DCUCIM_SDK_PATH=${CUCIM_SDK_PATH} - ${CMAKE_CMD} --build ${BUILD_ROOT}/cumed --config ${CMAKE_BUILD_TYPE} --target cucim.kit.cumed -- -j ${NUM_THREADS} - ${CMAKE_CMD} --build ${BUILD_ROOT}/cumed --config ${CMAKE_BUILD_TYPE} --target install -- -j ${NUM_THREADS} - - # Build Python bind - pybins="$(echo /opt/python/*/bin)" - if [ "${pybins}" = "/opt/python/*/bin" ]; then - pybins="$(dirname $(which python3))" # for building at host - fi - for PYBIN in ${pybins}; do - local python_library=$(${PYBIN}/python3 -c "import distutils.sysconfig as sysconfig; import os; print(os.path.join(sysconfig.get_config_var('LIBDIR'), sysconfig.get_config_var('LDLIBRARY')))") - local python_include_dir=$(${PYBIN}/python3 -c "from distutils.sysconfig import get_python_inc; print(get_python_inc())") - - ${CMAKE_CMD} -S ${SRC_ROOT}/python -B ${BUILD_ROOT}/cucim \ - -DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE} \ - -DCMAKE_INSTALL_PREFIX=${BUILD_ROOT}/cucim/install \ - -DCUCIM_SDK_PATH=${CUCIM_SDK_PATH} \ - -DPYTHON_EXECUTABLE=${PYBIN}/python3 \ - -DPYTHON_LIBRARY=${python_library} \ - -DPYTHON_INCLUDE_DIR=${python_include_dir} - ${CMAKE_CMD} --build ${BUILD_ROOT}/cucim --config ${CMAKE_BUILD_TYPE} --target cucim -- -j ${NUM_THREADS} - ${CMAKE_CMD} --build ${BUILD_ROOT}/cucim --config ${CMAKE_BUILD_TYPE} --target install -- -j ${NUM_THREADS} - done - - # Remove existing library files at python/cucim/src/cucim/clara - rm -f ${SRC_ROOT}/python/cucim/src/cucim/clara/*.so* - - # Copy .so files to pybind's build folder - # (it uses -P to copy symbolic links as they are) - cp -P ${BUILD_ROOT}/libcucim/install/lib*/lib* ${BUILD_ROOT}/cucim/install/lib/ - cp -P ${BUILD_ROOT}/cuslide/install/lib*/cucim* ${BUILD_ROOT}/cucim/install/lib/ - cp -P ${BUILD_ROOT}/cumed/install/lib*/cucim* ${BUILD_ROOT}/cucim/install/lib/ - - # Copy .so files from pybind's build folder to cucim Python source folder - # Since wheel file doesn't support symbolic link (https://github.com/pypa/wheel/issues/203), - # we don't need to copy symbolic links. Instead copy only libcucim.so.${major_version} (without symbolic link) - #find ${BUILD_ROOT}/cucim/install/lib -maxdepth 1 -type f -exec cp {} ${SRC_ROOT}/python/cucim/src/cucim/clara/ \; - cp ${BUILD_ROOT}/cucim/install/lib/_cucim.*.so ${SRC_ROOT}/python/cucim/src/cucim/clara/ - cp ${BUILD_ROOT}/cucim/install/lib/cucim.*.so ${SRC_ROOT}/python/cucim/src/cucim/clara/ - cp ${BUILD_ROOT}/cucim/install/lib/libcucim.so.${major_version} ${SRC_ROOT}/python/cucim/src/cucim/clara/ - find ${BUILD_ROOT}/cucim/install/lib -maxdepth 1 -type f -name "lib*.so" -exec cp {} ${SRC_ROOT}/python/cucim/src/cucim/clara/ \; - - cd ${SRC_ROOT}/python/cucim - - # Remove build folder - rm -rf ${SRC_ROOT}/python/cucim/build - # Compile wheels (one python binary is enough) - pybins="$(echo /opt/python/*/bin)" - [ ! -e /opt/python/cp36-cp36m/bin ] && pybins="$(dirname $(which python3))" # for building at host - - pybins="$(echo /opt/python/*/bin)" - if [ "${pybins}" = "/opt/python/*/bin" ]; then # if multiple python binaries not found - pybins="$(dirname $(which python3))" # for building at host - else - pybins=/opt/python/cp36-cp36m/bin # use Python 3.6 for executing setup.py - fi - - for PYBIN in ${pybins}; do # /opt/python/*/bin - run_command "${PYBIN}/python3" setup.py bdist_wheel -p $PLAT -d ${TEMP_PYPKG_DIR} - done - - # Do not bundle external shared libraries for now. - # (CUDA-related libraries cannot be redistributed without EULA messages confirmed by user) - # Here, we just copy to dist folder. - # cp ${TEMP_PYPKG_DIR}/*.whl ${DEST_ROOT}/ - - # Bundle external shared libraries into the wheels - for whl in ${TEMP_PYPKG_DIR}/*.whl; do - repair_wheel_ "$whl" "${DEST_ROOT}/" "${PLAT}" - done - - # run_command rm -rf ${TEMP_PYPKG_DIR} - # trap -- EXIT - - # Copy cpp packages and examples - mkdir -p ${DEST_ROOT}/examples/cpp - cp -P -r ${BUILD_ROOT}/libcucim/install ${DEST_ROOT}/ - cp -P -r ${BUILD_ROOT}/cuslide/install/lib/*.so ${DEST_ROOT}/install/lib/ - cp -P -r ${BUILD_ROOT}/cumed/install/lib/*.so ${DEST_ROOT}/install/lib/ - cp -r ${SRC_ROOT}/examples/cpp/tiff_image ${DEST_ROOT}/examples/cpp/ - - cp ${BUILD_ROOT}/libcucim/CMakeLists.txt.examples.release ${DEST_ROOT}/examples/cpp/CMakeLists.txt - - # # Install packages and test - # for PYBIN in /opt/python/*/bin/; do - # "${PYBIN}/pip" install python-manylinux-demo --no-index -f /io/wheelhouse - # (cd "$HOME"; "${PYBIN}/nosetests" pymanylinuxdemo) - # done - # python setup.py bdist_wheel -p manylinux2014-x86_64 - - eval "${old_opt}" # restore old shopts -} - -build_package_desc() { echo 'Build package for release (& gen_docs) -' -} -build_package() { - local SRC_ROOT=${1:-${TOP}} - local BUILD_ROOT=${2:-${TOP}/temp} - local DEST_ROOT=${3:-${TOP}/dist} - local CUCIM_SDK_PATH=${4:-${BUILD_ROOT}/libcucim} - - # Clean dist folder - mkdir -p ${DEST_ROOT} - [ -n "${DEST_ROOT}" ] && run_command sudo rm -rf ${DEST_ROOT}/* - - build_python_package ${SRC_ROOT} ${BUILD_ROOT} ${DEST_ROOT} ${CUCIM_SDK_PATH} - gen_docs ${DEST_ROOT}/docs - copy_data_ ${SRC_ROOT} ${BUILD_ROOT} ${DEST_ROOT} - - # Copy the built wheel file into ${TOP}/notebooks - run_command cp ${DEST_ROOT}/*.whl ${TOP}/notebooks/ -} - -copy_data_() { - c_echo W "Copy necessary files for packaging..." - local SRC_ROOT=${1:-${TOP}} - local BUILD_ROOT=${2:-${TOP}/temp} - local DEST_ROOT=${3:-${TOP}/dist} - - # Create notebooks folder (add notebooks and scripts) - run_command mkdir -p ${DEST_ROOT}/notebooks/static_images - run_command cp $(git ls-files ${SRC_ROOT}/notebooks/*.ipynb) ${DEST_ROOT}/notebooks/ - run_command cp ${SRC_ROOT}/notebooks/static_images/*.png ${DEST_ROOT}/notebooks/static_images/ - - # Create docker folder - run_command mkdir -p ${DEST_ROOT}/docker - run_command cp ${SRC_ROOT}/docker/*-jupyter{,-gds,.txt} ${DEST_ROOT}/docker/ - run_command cp ${SRC_ROOT}/docker/*-claratrain{,.txt} ${DEST_ROOT}/docker/ - run_command cp ${SRC_ROOT}/docker/*-cmake ${DEST_ROOT}/docker/ - run_command cp ${SRC_ROOT}/docker/cufile.json ${DEST_ROOT}/docker/ # Copy cufile.json - - # Copy main script - run_command cp ${SRC_ROOT}/scripts/run-dist ${DEST_ROOT}/run - - # Create .dockerignore to speed up docker run - echo "notebooks" > ${DEST_ROOT}/.dockerignore - - # Copy license files - run_command cp -r ${SRC_ROOT}/3rdparty ${SRC_ROOT}/LICENSE* ${DEST_ROOT}/ -} - - #================================================================================== # Section: Test #================================================================================== @@ -941,269 +700,6 @@ download_testdata() { fi } -launch_notebooks_desc() { echo 'Launch jupyter notebooks - -Arguments: - -p - port number - -h - hostname to serve documentation on (default: 0.0.0.0) - -g - launch GDS-enabled container -' -} -launch_notebooks() { - local OPTIND - local port=$(get_unused_ports 1 10000 10030) - local host='0.0.0.0' - local gds_postfix='' - local gds_nvme_path='' - - while getopts 'p:h:g:' option; - do - case "${option}" in - p) - port="$OPTARG" - ;; - h) - host="$OPTARG" - ;; - g) - gds_postfix='-gds' - [ -z "$OPTARG" ] && c_echo_err R "Please specify NVMe path!" && return 1 - gds_nvme_path=$(readlink -f "$OPTARG") - [ ! -d "$gds_nvme_path" ] && c_echo_err R "Folder $gds_nvme_path doesn't exist!" && return 1 - - # Copy cufile SDK from host system to temp/cuda - copy_gds_files_ - ;; - *) - return 1 - esac - done - shift $((OPTIND-1)) - - download_testdata - - run_command cp ${TOP}/dist/*.whl ${TOP}/notebooks - - run_command docker build --runtime nvidia -t cucim-jupyter${gds_postfix} -f ${TOP}/docker/Dockerfile-jupyter${gds_postfix}-dev ${TOP} - - [ $? -ne 0 ] && return 1 - - c_echo W "Port " G "$port" W " would be used...(" B "http://$(hostname -I | cut -d' ' -f 1):${port}" W ")" - - if [ -z "${gds_postfix}" ]; then - run_command docker run --runtime nvidia --gpus all -it --rm \ - -v ${TOP}/notebooks:/notebooks \ - -p ${port}:${port} \ - cucim-jupyter \ - -c "echo -n 'Enter New Password: '; jupyter lab --ServerApp.password=\"\$(python3 -u -c \"from jupyter_server.auth import passwd;pw=input();print(passwd(pw));\" | egrep 'sha|argon')\" --ServerApp.root_dir=/notebooks --allow-root --port=${port} --ip=${host} --no-browser" - else - local MNT_PATH=/nvme - local GDS_IMAGE=cucim-jupyter${gds_postfix} - - local BUILD_VER=`uname -r` - local NV_DRIVER=`nvidia-smi -q -i 0 | sed -n 's/Driver Version.*: *\(.*\) *$/\1/p'` - echo "using nvidia driver version $NV_DRIVER on kernel $BUILD_VER" - - local ofed_version=$(ofed_info -s | grep MLNX) - if [ $? -eq 0 ]; then - local rdma_core=$(dpkg -s libibverbs-dev | grep "Source: rdma-core") - if [ $? -eq 0 ]; then - local CONFIG_MOFED_VERSION=$(echo $ofed_version | cut -d '-' -f 2) - echo "Found MOFED version $CONFIG_MOFED_VERSION" - fi - local MLNX_SRCS="--volume /usr/src/mlnx-ofed-kernel-${CONFIG_MOFED_VERSION}:/usr/src/mlnx-ofed-kernel-${CONFIG_MOFED_VERSION}:ro" - local MOFED_DEVS="--net=host --volume /sys/class/infiniband_verbs:/sys/class/infiniband_verbs/ " - fi - - docker run \ - --ipc host \ - -it \ - --rm \ - --gpus all \ - -v ${TOP}/notebooks:/notebooks \ - -p ${port}:${port} \ - --volume /run/udev:/run/udev:ro \ - --volume /sys/kernel/config:/sys/kernel/config/ \ - --volume /usr/src/nvidia-$NV_DRIVER:/usr/src/nvidia-$NV_DRIVER:ro ${MLNX_SRCS}\ - --volume /dev:/dev:ro \ - --privileged \ - --env NV_DRIVER=${NV_DRIVER} \ - --volume /lib/modules/$BUILD_VER/:/lib/modules/$BUILD_VER \ - --volume "${MNT_PATH}:/notebooks/nvme:rw" \ - ${MOFED_DEVS} \ - ${GDS_IMAGE} \ - -c "echo -n 'Enter New Password: '; jupyter lab --ServerApp.password=\"\$(python3 -u -c \"from jupyter_server.auth import passwd;pw=input();print(passwd(pw));\" | egrep 'sha|argon')\" --ServerApp.root_dir=/notebooks --allow-root --port=${port} --ip=${host} --no-browser" - fi - -} - -#================================================================================== -# Section: Documentation -#================================================================================== - -install_tox_() { - if ! command -v tox > /dev/null; then - c_echo G "tox" W " doesn't exists. Installing " G "tox" W "..." - if [ -n "${CONDA_PREFIX}" ]; then - run_command pip3 install tox - else - if [ -n "${VIRTUAL_ENV}" ]; then - run_command pip3 install tox - else - run_command pip3 install --user tox - fi - fi - hash -r - fi -} - -gen_docs_desc() { echo 'Generate document - -Generated docs would be avaialable at ${TOP}/dist/docs. - -Returns: - None - - Exit code: - exit code returned from generating document -' -} -gen_docs() { - local OUTPUT_FOLDER=${1:-${TOP}/dist/docs} - local ret=0 - pushd ${TOP}/python/cucim > /dev/null - - # Install prerequisites - install_tox_ - - # Remove existing files in dist/docs - run_command rm -rf ${OUTPUT_FOLDER}/* - - # Copy notebook files to python/cucim/dist/docs/notebooks - run_command mkdir -p ${TOP}/python/cucim/docs/notebooks - run_command rm -rf ${TOP}/python/cucim/docs/notebooks/* - run_command mkdir -p ${TOP}/python/cucim/docs/notebooks/static_images - run_command cp -r $(git ls-files ${TOP}/notebooks/*.ipynb) ${TOP}/python/cucim/docs/notebooks/ - run_command cp -r ${TOP}/notebooks/static_images/*.png ${TOP}/python/cucim/docs/notebooks/static_images/ - - tox -e docs -- ${OUTPUT_FOLDER} - ret=$? - # Remove jupyter_execute folder explicitly until the issue is solved - # https://github.com/executablebooks/MyST-NB/issues/129 - rm -rf $(dirname ${OUTPUT_FOLDER})/jupyter_execute - - popd > /dev/null - return $ret -} - -gen_docs_dev_desc() { echo 'Generate document - -Launch dev-server for sphinx. - -Generated docs would be avaialable at ${TOP}/python/cucim/dist/docs. - -Arguments: - -p - port number - -h - hostname to serve documentation on (default: 0.0.0.0) -Returns: - None - - Exit code: - exit code returned from generating document -' -} -gen_docs_dev() { - local OPTIND - local port=9999 - local host=0.0.0.0 - - while getopts 'p:h:' option; - do - case "${option}" in - p) - port="$OPTARG" - ;; - h) - host="$OPTARG" - ;; - *) - echo_err R "Invalid option!" - return 1 - esac - done - - pushd ${TOP}/python/cucim > /dev/null - - # Install prerequisites - install_tox_ - - # Remove existing files in python/cucim/dist/docs - run_command rm -rf ${TOP}/python/cucim/dist/docs/* - # Copy notebook files to python/cucim/dist/docs/notebooks - run_command mkdir -p ${TOP}/python/cucim/docs/notebooks/static_images - run_command rm -rf ${TOP}/python/cucim/docs/notebooks/* - run_command cp -r $(git ls-files ${TOP}/notebooks/*.ipynb) ${TOP}/python/cucim/docs/notebooks/ - run_command cp -r ${TOP}/notebooks/static_images/*.png ${TOP}/python/cucim/docs/notebooks/static_images/ - - run_command tox -e docs-dev -- --port ${port} --host ${host} docs dist/docs - popd > /dev/null -} - -publish_docs_desc() { echo 'Publish generated documents to the server - -Publish generated documents to $GITLAB_PUBLISH_PROJECT_URL -The web page is available at the followings: -- $GITLAB_PUBLISH_PROJECT_URL - -Arguments: - $1 - If specified, force creating a tag with the specified tag name. - Use "latest" tag to make public documents up to date. - -Returns: - None - - Exit code: - exit code returned from publishing documents -' -} -publish_docs() { - local release_version="$(cat ${TOP}/VERSION)" - local release_tag="${1:-v${release_version}}" - - # Import secrets - import_env_vars_ || return 1 - - c_echo W "Publishing documents to Gitlab Pages..." - TEMP_DOCS_DIR=$(mktemp -d) - c_echo r "TEMP_DOCS_DIR=${TEMP_DOCS_DIR}" - trap 'rm -rf ${TEMP_DOCS_DIR}' EXIT - - pushd ${TEMP_DOCS_DIR} > /dev/null - - git clone ${GITLAB_PUBLISH_GIT_URL} --branch init --single-branch - cd ${GITLAB_PUBLISH_PROJECT_NAME} - mkdir -p dist/docs - cp -rf ${TOP}/dist/docs/* dist/docs/ - git checkout -b pages - git add dist/docs - git commit -am "Upload home page v$(cat ${TOP}/VERSION)" - git tag -f ${release_tag} - git push -f origin pages - git push -f origin ${release_tag} - popd > /dev/null - - # Create tag if custom (such as 'latest') tag is specified - if [ -n "${1:-}" ]; then - git tag -f "$1" - git push -f origin "$1" - fi - - run_command rm -rf ${TEMP_DOCS_DIR} - trap -- EXIT - - c_echo W "Checkout the published webpage!" -} - #================================================================================== # Section: Release #================================================================================== @@ -1230,130 +726,6 @@ update_version() { return $ret } -import_env_vars_() { - if [ ! -e ${TOP}/.env ]; then - c_echo_err "File " G "${TOP}/.env " Z "is not found.\n" R \ - "Please create the file with the following environment variable!" - c_echo_err " - " W "GITLAB_ACCESS_TOKEN" - c_echo_err " - " W "GITLAB_PUBLISH_SERVER" - c_echo_err " - " W "GITLAB_PUBLISH_PROJECT_NAME" - c_echo_err " - " W "GITLAB_PUBLISH_PROJECT_ID" - c_echo_err " - " W "GITLAB_PUBLISH_PROJECT_URL" - c_echo_err " - " W "GITLAB_PUBLISH_GIT_URL" - return 1 - fi - - # Import environment variables - . ${TOP}/.env -} -release_package_desc() { echo 'Release generated packages to the server - -Release generated package files to $GITLAB_PUBLISH_PROJECT_URL -This leverages "release" environment in tox and uses - https://gitlab.com/alelec/gitlab-release -for uploading files to the server. - -Returns: - None - - Exit code: - exit code returned from releasing packages -' -} -release_package() { - local release_version="$(cat ${TOP}/VERSION)" - local release_tag="v${release_version}" - local wheel_file_name="cucim-${release_version}-py3-none-manylinux2014_x86_64.whl" - local package_file_name="cuCIM-${release_tag}-linux.tar.gz" - local package_data - local release_note_md="${TOP}/python/cucim/docs/release_notes/${release_tag}.md" - - # Import secrets - import_env_vars_ || return 1 - - # Publish docs first to create a release tag - publish_docs - - old_opt="$(shopt -op errexit);$(shopt -op nounset)" # save old shopts - set -eu - trap 'eval "${old_opt}"; unset old_opt' EXIT - - pushd ${TOP}/python/cucim/ > /dev/null - - # https://github.com/gitlabhq/gitlabhq/blob/master/doc/api/releases/index.md - # https://docs.gitlab.com/ee/api/releases/index.html#delete-a-release - - # install jq if not exists - if ! command -v jq > /dev/null; then - c_echo G "jq" W " doesn't exists. Installing " G "jq" W "..." - sudo apt-get install -y jq - hash -r - fi - - if curl --silent --header "PRIVATE-TOKEN: ${GITLAB_ACCESS_TOKEN}" "${GITLAB_PUBLISH_SERVER}/api/v4/projects/${GITLAB_PUBLISH_PROJECT_ID}/releases/${release_tag}" | jq '.name' -e > /dev/null; then - read -n 1 -r -p "$(c_str R "Do you want to delete existing release " G "${release_tag}" R " (y/n)?")" - echo - if [[ $REPLY =~ ^[Yy]$ ]] - then - c_echo W "Removing release " G "${release_tag}..." - curl --request DELETE --header "PRIVATE-TOKEN: ${GITLAB_ACCESS_TOKEN}" "${GITLAB_PUBLISH_SERVER}/api/v4/projects/${GITLAB_PUBLISH_PROJECT_ID}/releases/${release_tag}" | jq - else - return - fi - fi - - run_command cd ${TOP}/dist - run_command rm -rf ${package_file_name} - run_command tar -czvf ${package_file_name} --exclude='*.gz' ./* ./.dockerignore # --exclude="*.whl" - package_url="${GITLAB_PUBLISH_SERVER}$(curl --request POST \ - --header "PRIVATE-TOKEN: ${GITLAB_ACCESS_TOKEN}" \ - --form "file=@${package_file_name}" \ - "${GITLAB_PUBLISH_SERVER}/api/v4/projects/${GITLAB_PUBLISH_PROJECT_ID}/uploads" \ - | jq '.full_path' -r)" - - wheel_url="${GITLAB_PUBLISH_SERVER}$(curl --request POST \ - --header "PRIVATE-TOKEN: ${GITLAB_ACCESS_TOKEN}" \ - --form "file=@${wheel_file_name}" \ - "${GITLAB_PUBLISH_SERVER}/api/v4/projects/${GITLAB_PUBLISH_PROJECT_ID}/uploads" \ - | jq '.full_path' -r)" - - package_data="$(echo " -import json -release_note = open('${release_note_md}').read() -data = {'tag_name': '${release_tag}', - 'description': release_note, - 'assets': { - 'links': [ - {'name': '${package_file_name}', - 'url': '${package_url}' - }, - {'name': '${wheel_file_name}', - 'url': '${wheel_url}' - } - ] - } - } -print(json.dumps(data)) -" | python -)" - c_echo b "${package_data}" - - # Force-push tag - git tag -f ${release_tag} && git push -f origin ${release_tag} - - # https://docs.gitlab.com/ee/api/releases/#create-a-release - curl --header 'Content-Type: application/json' --header "PRIVATE-TOKEN: ${GITLAB_ACCESS_TOKEN}" \ - --data "${package_data}" \ - --request POST "${GITLAB_PUBLISH_SERVER}/api/v4/projects/${GITLAB_PUBLISH_PROJECT_ID}/releases" | jq - - popd > /dev/null - - # tox -c ${TOP}/python/cucim -e release -- ${TOP}/run release_package_to_gitlab - - eval "${old_opt}" # restore old shopts - trap -- EXIT - unset old_opt -} - parse_args() { local OPTIND while getopts 'yh' option; @@ -1418,21 +790,6 @@ main() { print_cmd_help_messages "${ARGS[@]}" exit 0 ;; - package) - build_package "${ARGS[@]}" - ;; - notebooks) - launch_notebooks "${ARGS[@]}" - ;; - docs) - gen_docs "${ARGS[@]}" - ;; - docs:dev) - gen_docs_dev "${ARGS[@]}" - ;; - release) - release_package "${ARGS[@]}" - ;; ''|main) print_usage ;; diff --git a/scripts/auditwheel_repair.py b/scripts/auditwheel_repair.py index f6f4bac65..c2461dc63 100644 --- a/scripts/auditwheel_repair.py +++ b/scripts/auditwheel_repair.py @@ -14,10 +14,7 @@ # limitations under the License. import functools -import glob import re -import sys -from os.path import join from unittest.mock import patch import auditwheel.elfutils @@ -57,12 +54,12 @@ # Parameters -PYTHON_EXTENSION_LIBRARIES = [ - r'cucim/libcucim\.so\.\d{1,2}' -] +PYTHON_EXTENSION_LIBRARIES = [r"cucim/libcucim\.so\.\d{1,2}"] # 1) auditwheel.elfutils.elf_is_python_extension replacement orig_elf_is_python_extension = auditwheel.elfutils.elf_is_python_extension + + @functools.wraps(orig_elf_is_python_extension) def elf_is_python_extension(fn, elf): if any(map(lambda x: re.fullmatch(x, fn), PYTHON_EXTENSION_LIBRARIES)): @@ -70,8 +67,11 @@ def elf_is_python_extension(fn, elf): return True, 3 return orig_elf_is_python_extension(fn, elf) + # 3) auditwheel.wheeltools.InWheelCtx.__enter__ replacement orig_inwheelctx_enter = InWheelCtx.__enter__ + + @functools.wraps(orig_inwheelctx_enter) def inwheelctx_enter(self): rtn = orig_inwheelctx_enter(self) @@ -90,12 +90,15 @@ def inwheelctx_enter(self): return rtn + # # sys.argv replacement -# testargs = ["auditwheel_repair.py", "repair", "--plat", "manylinux2014_x86_64", "-w", "wherehouse", "cuclara_image-0.1.1-py3-none-manylinux2014_x86_64.whl"] +# testargs = ["auditwheel_repair.py", "repair", "--plat", "manylinux2014_x86_64", "-w", "wherehouse", "cuclara_image-0.1.1-py3-none-manylinux2014_x86_64.whl"] # noqa: E501 # with patch.object(sys, 'argv', testargs): -if __name__ == '__main__': +if __name__ == "__main__": # Patch - with patch.object(auditwheel.elfutils, 'elf_is_python_extension', elf_is_python_extension): - with patch.object(InWheelCtx, '__enter__', inwheelctx_enter): + with patch.object( + auditwheel.elfutils, "elf_is_python_extension", elf_is_python_extension + ): + with patch.object(InWheelCtx, "__enter__", inwheelctx_enter): main()