diff --git a/poetry.lock b/poetry.lock index e56095003..2e14a367a 100644 --- a/poetry.lock +++ b/poetry.lock @@ -126,6 +126,25 @@ optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" version = "0.4.3" +[[package]] +category = "main" +description = "Composable style cycles" +name = "cycler" +optional = false +python-versions = "*" +version = "0.10.0" + +[package.dependencies] +six = "*" + +[[package]] +category = "main" +description = "Decorators for Humans" +name = "decorator" +optional = false +python-versions = ">=2.6, !=3.0.*, !=3.1.*" +version = "4.4.1" + [[package]] category = "main" description = "Internationalized Domain Names in Applications (IDNA)" @@ -134,6 +153,25 @@ optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" version = "2.8" +[[package]] +category = "main" +description = "Library for reading and writing a wide range of image, video, scientific, and volumetric data formats." +name = "imageio" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +version = "2.6.1" + +[package.dependencies] +numpy = "*" +pillow = "*" + +[package.extras] +ffmpeg = ["imageio-ffmpeg"] +fits = ["astropy"] +full = ["astropy", "gdal", "imageio-ffmpeg", "itk"] +gdal = ["gdal"] +itk = ["itk"] + [[package]] category = "dev" description = "Read metadata from Python packages" @@ -164,6 +202,17 @@ pyproject = ["toml"] requirements = ["pipreqs", "pip-api"] xdg_home = ["appdirs (>=1.4.0)"] +[[package]] +category = "main" +description = "A fast implementation of the Cassowary constraint solver" +name = "kiwisolver" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +version = "1.1.0" + +[package.dependencies] +setuptools = "*" + [[package]] category = "dev" description = "A fast and thorough lazy object proxy." @@ -172,6 +221,21 @@ optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" version = "1.4.3" +[[package]] +category = "main" +description = "Python plotting package" +name = "matplotlib" +optional = false +python-versions = ">=3.6" +version = "3.1.2" + +[package.dependencies] +cycler = ">=0.10" +kiwisolver = ">=1.0.1" +numpy = ">=1.11" +pyparsing = ">=2.0.1,<2.0.4 || >2.0.4,<2.1.2 || >2.1.2,<2.1.6 || >2.1.6" +python-dateutil = ">=2.1" + [[package]] category = "dev" description = "McCabe checker, plugin for flake8" @@ -200,6 +264,30 @@ version = "6.2.0" fast = ["fastnumbers (>=2.0.0)"] icu = ["PyICU (>=1.0.0)"] +[[package]] +category = "main" +description = "Python package for creating and manipulating graphs and networks" +name = "networkx" +optional = false +python-versions = ">=3.5" +version = "2.4" + +[package.dependencies] +decorator = ">=4.3.0" + +[package.extras] +all = ["numpy", "scipy", "pandas", "matplotlib", "pygraphviz", "pydot", "pyyaml", "gdal", "lxml", "pytest"] +gdal = ["gdal"] +lxml = ["lxml"] +matplotlib = ["matplotlib"] +numpy = ["numpy"] +pandas = ["pandas"] +pydot = ["pydot"] +pygraphviz = ["pygraphviz"] +pytest = ["pytest"] +pyyaml = ["pyyaml"] +scipy = ["scipy"] + [[package]] category = "main" description = "Access a multitude of neuroimaging data formats" @@ -304,7 +392,7 @@ isort = ">=4.2.5,<5" mccabe = ">=0.6,<0.7" [[package]] -category = "dev" +category = "main" description = "Python parsing module" name = "pyparsing" optional = false @@ -336,6 +424,28 @@ version = ">=0.12" [package.extras] testing = ["argcomplete", "hypothesis (>=3.56)", "mock", "nose", "requests", "xmlschema"] +[[package]] +category = "main" +description = "Extensions to the standard Python datetime module" +name = "python-dateutil" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" +version = "2.8.1" + +[package.dependencies] +six = ">=1.5" + +[[package]] +category = "main" +description = "PyWavelets, wavelet transform module" +name = "pywavelets" +optional = false +python-versions = ">=3.5" +version = "1.1.1" + +[package.dependencies] +numpy = ">=1.13.3" + [[package]] category = "main" description = "YAML parser and emitter for Python" @@ -362,6 +472,27 @@ urllib3 = ">=1.21.1,<1.25.0 || >1.25.0,<1.25.1 || >1.25.1,<1.26" security = ["pyOpenSSL (>=0.14)", "cryptography (>=1.3.4)", "idna (>=2.0.0)"] socks = ["PySocks (>=1.5.6,<1.5.7 || >1.5.7)", "win-inet-pton"] +[[package]] +category = "main" +description = "Image processing routines for SciPy" +name = "scikit-image" +optional = false +python-versions = ">=3.6" +version = "0.16.2" + +[package.dependencies] +PyWavelets = ">=0.4.0" +imageio = ">=2.3.0" +matplotlib = ">=2.0.0,<3.0.0 || >3.0.0" +networkx = ">=2.0" +pillow = ">=4.3.0" +scipy = ">=0.19.0" + +[package.extras] +docs = ["sphinx (>=1.3,<1.7.8 || >1.7.8)", "numpydoc (>=0.9)", "sphinx-gallery", "sphinx-copybutton", "pytest-runner", "scikit-learn", "matplotlib (>=3.0.1)", "dask (>=0.15.0)", "cloudpickle (>=0.2.1)"] +optional = ["simpleitk", "astropy (>=1.2.0)", "tifffile", "qtpy", "pyamg", "dask (>=0.15.0)", "cloudpickle (>=0.2.1)"] +test = ["pytest (!=3.7.3)", "pytest-cov", "pytest-localserver", "flake8", "codecov"] + [[package]] category = "main" description = "SciPy: Scientific Library for Python" @@ -464,7 +595,7 @@ docs = ["sphinx", "jaraco.packaging (>=3.2)", "rst.linker (>=1.9)"] testing = ["pathlib2", "contextlib2", "unittest2"] [metadata] -content-hash = "fa3824bee7012c6b2f52d0972a77158fb0204c2dde9a12d8f0b0ec3030b13e16" +content-hash = "273409b6423b78dda553b1088ce6abbf6324ceabfd02f925bbd39f1d35b259aa" python-versions = "^3.6" [metadata.files] @@ -546,10 +677,22 @@ colorama = [ {file = "colorama-0.4.3-py2.py3-none-any.whl", hash = "sha256:7d73d2a99753107a36ac6b455ee49046802e59d9d076ef8e47b61499fa29afff"}, {file = "colorama-0.4.3.tar.gz", hash = "sha256:e96da0d330793e2cb9485e9ddfd918d456036c7149416295932478192f4436a1"}, ] +cycler = [ + {file = "cycler-0.10.0-py2.py3-none-any.whl", hash = "sha256:1d8a5ae1ff6c5cf9b93e8811e581232ad8920aeec647c37316ceac982b08cb2d"}, + {file = "cycler-0.10.0.tar.gz", hash = "sha256:cd7b2d1018258d7247a71425e9f26463dfb444d411c39569972f4ce586b0c9d8"}, +] +decorator = [ + {file = "decorator-4.4.1-py2.py3-none-any.whl", hash = "sha256:5d19b92a3c8f7f101c8dd86afd86b0f061a8ce4540ab8cd401fa2542756bce6d"}, + {file = "decorator-4.4.1.tar.gz", hash = "sha256:54c38050039232e1db4ad7375cfce6748d7b41c29e95a081c8a6d2c30364a2ce"}, +] idna = [ {file = "idna-2.8-py2.py3-none-any.whl", hash = "sha256:ea8b7f6188e6fa117537c3df7da9fc686d485087abf6ac197f9c46432f7e4a3c"}, {file = "idna-2.8.tar.gz", hash = "sha256:c357b3f628cf53ae2c4c05627ecc484553142ca23264e593d327bcde5e9c3407"}, ] +imageio = [ + {file = "imageio-2.6.1-py3-none-any.whl", hash = "sha256:c9763e5c187ecf74091c845626b0bdcc6130a20a0de7a86ae0108e2b5335ed3f"}, + {file = "imageio-2.6.1.tar.gz", hash = "sha256:f44eb231b9df485874f2ffd22dfd0c3c711e7de076516b9374edea5c65bc67ae"}, +] importlib-metadata = [ {file = "importlib_metadata-1.3.0-py2.py3-none-any.whl", hash = "sha256:d95141fbfa7ef2ec65cfd945e2af7e5a6ddbd7c8d9a25e66ff3be8e3daf9f60f"}, {file = "importlib_metadata-1.3.0.tar.gz", hash = "sha256:073a852570f92da5f744a3472af1b61e28e9f78ccf0c9117658dc32b15de7b45"}, @@ -558,6 +701,45 @@ isort = [ {file = "isort-4.3.21-py2.py3-none-any.whl", hash = "sha256:6e811fcb295968434526407adb8796944f1988c5b65e8139058f2014cbe100fd"}, {file = "isort-4.3.21.tar.gz", hash = "sha256:54da7e92468955c4fceacd0c86bd0ec997b0e1ee80d97f67c35a78b719dccab1"}, ] +kiwisolver = [ + {file = "kiwisolver-1.1.0-cp27-cp27m-macosx_10_6_intel.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.whl", hash = "sha256:7f4dd50874177d2bb060d74769210f3bce1af87a8c7cf5b37d032ebf94f0aca3"}, + {file = "kiwisolver-1.1.0-cp27-cp27m-macosx_10_6_intel.whl", hash = "sha256:fe51b79da0062f8e9d49ed0182a626a7dc7a0cbca0328f612c6ee5e4711c81e4"}, + {file = "kiwisolver-1.1.0-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:f790f8b3dff3d53453de6a7b7ddd173d2e020fb160baff578d578065b108a05f"}, + {file = "kiwisolver-1.1.0-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:f2b22153870ca5cf2ab9c940d7bc38e8e9089fa0f7e5856ea195e1cf4ff43d5a"}, + {file = "kiwisolver-1.1.0-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:e8bf074363ce2babeb4764d94f8e65efd22e6a7c74860a4f05a6947afc020ff2"}, + {file = "kiwisolver-1.1.0-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:05b5b061e09f60f56244adc885c4a7867da25ca387376b02c1efc29cc16bcd0f"}, + {file = "kiwisolver-1.1.0-cp27-none-win32.whl", hash = "sha256:47b8cb81a7d18dbaf4fed6a61c3cecdb5adec7b4ac292bddb0d016d57e8507d5"}, + {file = "kiwisolver-1.1.0-cp27-none-win_amd64.whl", hash = "sha256:b64916959e4ae0ac78af7c3e8cef4becee0c0e9694ad477b4c6b3a536de6a544"}, + {file = "kiwisolver-1.1.0-cp34-cp34m-macosx_10_6_intel.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.whl", hash = "sha256:682e54f0ce8f45981878756d7203fd01e188cc6c8b2c5e2cf03675390b4534d5"}, + {file = "kiwisolver-1.1.0-cp34-cp34m-manylinux1_i686.whl", hash = "sha256:d52e3b1868a4e8fd18b5cb15055c76820df514e26aa84cc02f593d99fef6707f"}, + {file = "kiwisolver-1.1.0-cp34-cp34m-manylinux1_x86_64.whl", hash = "sha256:8aa7009437640beb2768bfd06da049bad0df85f47ff18426261acecd1cf00897"}, + {file = "kiwisolver-1.1.0-cp34-none-win32.whl", hash = "sha256:26f4fbd6f5e1dabff70a9ba0d2c4bd30761086454aa30dddc5b52764ee4852b7"}, + {file = "kiwisolver-1.1.0-cp34-none-win_amd64.whl", hash = "sha256:79bfb2f0bd7cbf9ea256612c9523367e5ec51d7cd616ae20ca2c90f575d839a2"}, + {file = "kiwisolver-1.1.0-cp35-cp35m-macosx_10_6_intel.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.whl", hash = "sha256:3b2378ad387f49cbb328205bda569b9f87288d6bc1bf4cd683c34523a2341efe"}, + {file = "kiwisolver-1.1.0-cp35-cp35m-macosx_10_6_intel.whl", hash = "sha256:aa716b9122307c50686356cfb47bfbc66541868078d0c801341df31dca1232a9"}, + {file = "kiwisolver-1.1.0-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:58e626e1f7dfbb620d08d457325a4cdac65d1809680009f46bf41eaf74ad0187"}, + {file = "kiwisolver-1.1.0-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:e3a21a720791712ed721c7b95d433e036134de6f18c77dbe96119eaf7aa08004"}, + {file = "kiwisolver-1.1.0-cp35-none-win32.whl", hash = "sha256:939f36f21a8c571686eb491acfffa9c7f1ac345087281b412d63ea39ca14ec4a"}, + {file = "kiwisolver-1.1.0-cp35-none-win_amd64.whl", hash = "sha256:9733b7f64bd9f807832d673355f79703f81f0b3e52bfce420fc00d8cb28c6a6c"}, + {file = "kiwisolver-1.1.0-cp36-cp36m-macosx_10_6_intel.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.whl", hash = "sha256:acc4df99308111585121db217681f1ce0eecb48d3a828a2f9bbf9773f4937e9e"}, + {file = "kiwisolver-1.1.0-cp36-cp36m-macosx_10_6_intel.whl", hash = "sha256:9105ce82dcc32c73eb53a04c869b6a4bc756b43e4385f76ea7943e827f529e4d"}, + {file = "kiwisolver-1.1.0-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:f16814a4a96dc04bf1da7d53ee8d5b1d6decfc1a92a63349bb15d37b6a263dd9"}, + {file = "kiwisolver-1.1.0-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:400599c0fe58d21522cae0e8b22318e09d9729451b17ee61ba8e1e7c0346565c"}, + {file = "kiwisolver-1.1.0-cp36-none-win32.whl", hash = "sha256:db1a5d3cc4ae943d674718d6c47d2d82488ddd94b93b9e12d24aabdbfe48caee"}, + {file = "kiwisolver-1.1.0-cp36-none-win_amd64.whl", hash = "sha256:5a52e1b006bfa5be04fe4debbcdd2688432a9af4b207a3f429c74ad625022641"}, + {file = "kiwisolver-1.1.0-cp37-cp37m-macosx_10_6_intel.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.whl", hash = "sha256:a02f6c3e229d0b7220bd74600e9351e18bc0c361b05f29adae0d10599ae0e326"}, + {file = "kiwisolver-1.1.0-cp37-cp37m-macosx_10_6_intel.whl", hash = "sha256:9491578147849b93e70d7c1d23cb1229458f71fc79c51d52dce0809b2ca44eea"}, + {file = "kiwisolver-1.1.0-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:5c7ca4e449ac9f99b3b9d4693debb1d6d237d1542dd6a56b3305fe8a9620f883"}, + {file = "kiwisolver-1.1.0-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:a0c0a9f06872330d0dd31b45607197caab3c22777600e88031bfe66799e70bb0"}, + {file = "kiwisolver-1.1.0-cp37-none-win32.whl", hash = "sha256:8944a16020c07b682df861207b7e0efcd2f46c7488619cb55f65882279119389"}, + {file = "kiwisolver-1.1.0-cp37-none-win_amd64.whl", hash = "sha256:d3fcf0819dc3fea58be1fd1ca390851bdb719a549850e708ed858503ff25d995"}, + {file = "kiwisolver-1.1.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:933df612c453928f1c6faa9236161a1d999a26cd40abf1dc5d7ebbc6dbfb8fca"}, + {file = "kiwisolver-1.1.0-cp38-cp38-manylinux1_i686.whl", hash = "sha256:d22702cadb86b6fcba0e6b907d9f84a312db9cd6934ee728144ce3018e715ee1"}, + {file = "kiwisolver-1.1.0-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:210d8c39d01758d76c2b9a693567e1657ec661229bc32eac30761fa79b2474b0"}, + {file = "kiwisolver-1.1.0-cp38-none-win32.whl", hash = "sha256:76275ee077772c8dde04fb6c5bc24b91af1bb3e7f4816fd1852f1495a64dad93"}, + {file = "kiwisolver-1.1.0-cp38-none-win_amd64.whl", hash = "sha256:3b15d56a9cd40c52d7ab763ff0bc700edbb4e1a298dc43715ecccd605002cf11"}, + {file = "kiwisolver-1.1.0.tar.gz", hash = "sha256:53eaed412477c836e1b9522c19858a8557d6e595077830146182225613b11a75"}, +] lazy-object-proxy = [ {file = "lazy-object-proxy-1.4.3.tar.gz", hash = "sha256:f3900e8a5de27447acbf900b4750b0ddfd7ec1ea7fbaf11dfa911141bc522af0"}, {file = "lazy_object_proxy-1.4.3-cp27-cp27m-macosx_10_13_x86_64.whl", hash = "sha256:a2238e9d1bb71a56cd710611a1614d1194dc10a175c1e08d75e1a7bcc250d442"}, @@ -581,6 +763,21 @@ lazy-object-proxy = [ {file = "lazy_object_proxy-1.4.3-cp38-cp38-win32.whl", hash = "sha256:5541cada25cd173702dbd99f8e22434105456314462326f06dba3e180f203dfd"}, {file = "lazy_object_proxy-1.4.3-cp38-cp38-win_amd64.whl", hash = "sha256:59f79fef100b09564bc2df42ea2d8d21a64fdcda64979c0fa3db7bdaabaf6239"}, ] +matplotlib = [ + {file = "matplotlib-3.1.2-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:98c2ffeab8b79a4e3a0af5dd9939f92980eb6e3fec10f7f313df5f35a84dacab"}, + {file = "matplotlib-3.1.2-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:2d6ab54015a7c0d727c33e36f85f5c5e4172059efdd067f7527f6e5d16ad01aa"}, + {file = "matplotlib-3.1.2-cp36-cp36m-win32.whl", hash = "sha256:819d4860315468b482f38f1afe45a5437f60f03eaede495d5ff89f2eeac89500"}, + {file = "matplotlib-3.1.2-cp36-cp36m-win_amd64.whl", hash = "sha256:08ccc8922eb4792b91c652d3e6d46b1c99073f1284d1b6705155643e8046463a"}, + {file = "matplotlib-3.1.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:1f9e885bfa1b148d16f82a6672d043ecf11197f6c71ae222d0546db706e52eb2"}, + {file = "matplotlib-3.1.2-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:8cc0e44905c2c8fda5637cad6f311eb9517017515a034247ab93d0cf99f8bb7a"}, + {file = "matplotlib-3.1.2-cp37-cp37m-win32.whl", hash = "sha256:161dcd807c0c3232f4dcd4a12a382d52004a498174cbfafd40646106c5bcdcc8"}, + {file = "matplotlib-3.1.2-cp37-cp37m-win_amd64.whl", hash = "sha256:ee59b7bb9eb75932fe3787e54e61c99b628155b0cedc907864f24723ba55b309"}, + {file = "matplotlib-3.1.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:5d2e408a2813abf664bd79431107543ecb449136912eb55bb312317edecf597e"}, + {file = "matplotlib-3.1.2-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:d59bb0e82002ac49f4152963f8a1079e66794a4f454457fd2f0dcc7bf0797d30"}, + {file = "matplotlib-3.1.2-cp38-cp38-win32.whl", hash = "sha256:61c8b740a008218eb604de518eb411c4953db0cb725dd0b32adf8a81771cab9e"}, + {file = "matplotlib-3.1.2-cp38-cp38-win_amd64.whl", hash = "sha256:80f10af8378fccc136da40ea6aa4a920767476cdfb3241acb93ef4f0465dbf57"}, + {file = "matplotlib-3.1.2.tar.gz", hash = "sha256:8e8e2c2fe3d873108735c6ee9884e6f36f467df4a143136209cff303b183bada"}, +] mccabe = [ {file = "mccabe-0.6.1-py2.py3-none-any.whl", hash = "sha256:ab8a6258860da4b6677da4bd2fe5dc2c659cff31b3ee4f7f5d64e79735b80d42"}, {file = "mccabe-0.6.1.tar.gz", hash = "sha256:dd8d182285a0fe56bace7f45b5e7d1a6ebcbf524e8f3bd87eb0f125271b8831f"}, @@ -593,6 +790,10 @@ natsort = [ {file = "natsort-6.2.0-py2.py3-none-any.whl", hash = "sha256:4f0de45639f1fa43dede43ae1919bcab66e0a6fc19b68ea24f0a250814b4f176"}, {file = "natsort-6.2.0.tar.gz", hash = "sha256:58c6fb2f355117e88a19808394ec1ed30a2ff881bdd2c81c436952caebd30668"}, ] +networkx = [ + {file = "networkx-2.4-py3-none-any.whl", hash = "sha256:cdfbf698749a5014bf2ed9db4a07a5295df1d3a53bf80bf3cbd61edf9df05fa1"}, + {file = "networkx-2.4.tar.gz", hash = "sha256:f8f4ff0b6f96e4f9b16af6b84622597b5334bf9cae8cf9b2e42e7985d5c95c64"}, +] nibabel = [ {file = "nibabel-2.5.1-py2-none-any.whl", hash = "sha256:88a8867aa5a1eec70dc74c880d149539918b2983430bf3c3f3bca0a46bd9a7f4"}, {file = "nibabel-2.5.1-py3-none-any.whl", hash = "sha256:44678e9ec6151643736329103987c70f4a7b5b251e2ebb7012648365f29e2324"}, @@ -693,6 +894,33 @@ pytest = [ {file = "pytest-5.3.2-py3-none-any.whl", hash = "sha256:e41d489ff43948babd0fad7ad5e49b8735d5d55e26628a58673c39ff61d95de4"}, {file = "pytest-5.3.2.tar.gz", hash = "sha256:6b571215b5a790f9b41f19f3531c53a45cf6bb8ef2988bc1ff9afb38270b25fa"}, ] +python-dateutil = [ + {file = "python-dateutil-2.8.1.tar.gz", hash = "sha256:73ebfe9dbf22e832286dafa60473e4cd239f8592f699aa5adaf10050e6e1823c"}, + {file = "python_dateutil-2.8.1-py2.py3-none-any.whl", hash = "sha256:75bb3f31ea686f1197762692a9ee6a7550b59fc6ca3a1f4b5d7e32fb98e2da2a"}, +] +pywavelets = [ + {file = "PyWavelets-1.1.1-cp35-cp35m-macosx_10_6_intel.whl", hash = "sha256:35959c041ec014648575085a97b498eafbbaa824f86f6e4a59bfdef8a3fe6308"}, + {file = "PyWavelets-1.1.1-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:55e39ec848ceec13c9fa1598253ae9dd5c31d09dfd48059462860d2b908fb224"}, + {file = "PyWavelets-1.1.1-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:c06d2e340c7bf8b9ec71da2284beab8519a3908eab031f4ea126e8ccfc3fd567"}, + {file = "PyWavelets-1.1.1-cp35-cp35m-win32.whl", hash = "sha256:be105382961745f88d8196bba5a69ee2c4455d87ad2a2e5d1eed6bd7fda4d3fd"}, + {file = "PyWavelets-1.1.1-cp35-cp35m-win_amd64.whl", hash = "sha256:076ca8907001fdfe4205484f719d12b4a0262dfe6652fa1cfc3c5c362d14dc84"}, + {file = "PyWavelets-1.1.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:7947e51ca05489b85928af52a34fe67022ab5b81d4ae32a4109a99e883a0635e"}, + {file = "PyWavelets-1.1.1-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:9e2528823ccf5a0a1d23262dfefe5034dce89cd84e4e124dc553dfcdf63ebb92"}, + {file = "PyWavelets-1.1.1-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:80b924edbc012ded8aa8b91cb2fd6207fb1a9a3a377beb4049b8a07445cec6f0"}, + {file = "PyWavelets-1.1.1-cp36-cp36m-win32.whl", hash = "sha256:d510aef84d9852653d079c84f2f81a82d5d09815e625f35c95714e7364570ad4"}, + {file = "PyWavelets-1.1.1-cp36-cp36m-win_amd64.whl", hash = "sha256:889d4c5c5205a9c90118c1980df526857929841df33e4cd1ff1eff77c6817a65"}, + {file = "PyWavelets-1.1.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:68b5c33741d26c827074b3d8f0251de1c3019bb9567b8d303eb093c822ce28f1"}, + {file = "PyWavelets-1.1.1-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:18a51b3f9416a2ae6e9a35c4af32cf520dd7895f2b69714f4aa2f4342fca47f9"}, + {file = "PyWavelets-1.1.1-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:cfe79844526dd92e3ecc9490b5031fca5f8ab607e1e858feba232b1b788ff0ea"}, + {file = "PyWavelets-1.1.1-cp37-cp37m-win32.whl", hash = "sha256:720dbcdd3d91c6dfead79c80bf8b00a1d8aa4e5d551dc528c6d5151e4efc3403"}, + {file = "PyWavelets-1.1.1-cp37-cp37m-win_amd64.whl", hash = "sha256:bc5e87b72371da87c9bebc68e54882aada9c3114e640de180f62d5da95749cd3"}, + {file = "PyWavelets-1.1.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:98b2669c5af842a70cfab33a7043fcb5e7535a690a00cd251b44c9be0be418e5"}, + {file = "PyWavelets-1.1.1-cp38-cp38-manylinux1_i686.whl", hash = "sha256:e02a0558e0c2ac8b8bbe6a6ac18c136767ec56b96a321e0dfde2173adfa5a504"}, + {file = "PyWavelets-1.1.1-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:6162dc0ae04669ea04b4b51420777b9ea2d30b0a9d02901b2a3b4d61d159c2e9"}, + {file = "PyWavelets-1.1.1-cp38-cp38-win32.whl", hash = "sha256:79f5b54f9dc353e5ee47f0c3f02bebd2c899d49780633aa771fed43fa20b3149"}, + {file = "PyWavelets-1.1.1-cp38-cp38-win_amd64.whl", hash = "sha256:935ff247b8b78bdf77647fee962b1cc208c51a7b229db30b9ba5f6da3e675178"}, + {file = "PyWavelets-1.1.1.tar.gz", hash = "sha256:1a64b40f6acb4ffbaccce0545d7fc641744f95351f62e4c6aaa40549326008c9"}, +] pyyaml = [ {file = "PyYAML-5.2-cp27-cp27m-win32.whl", hash = "sha256:35ace9b4147848cafac3db142795ee42deebe9d0dad885ce643928e88daebdcc"}, {file = "PyYAML-5.2-cp27-cp27m-win_amd64.whl", hash = "sha256:ebc4ed52dcc93eeebeae5cf5deb2ae4347b3a81c3fa12b0b8c976544829396a4"}, @@ -710,6 +938,23 @@ requests = [ {file = "requests-2.22.0-py2.py3-none-any.whl", hash = "sha256:9cf5292fcd0f598c671cfc1e0d7d1a7f13bb8085e9a590f48c010551dc6c4b31"}, {file = "requests-2.22.0.tar.gz", hash = "sha256:11e007a8a2aa0323f5a921e9e6a2d7e4e67d9877e85773fba9ba6419025cbeb4"}, ] +scikit-image = [ + {file = "scikit-image-0.16.2.tar.gz", hash = "sha256:dd7fbd32da74d4e9967dc15845f731f16e7966cee61f5dc0e12e2abb1305068c"}, + {file = "scikit_image-0.16.2-cp36-cp36m-macosx_10_6_intel.whl", hash = "sha256:0808ab5f8218d91a1c008036993636535a37efd67a52ab0f2e6e3f4b7e75aeda"}, + {file = "scikit_image-0.16.2-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:3af3d781ce085573ced37b2b5b9abfd32ce3d4723bd17f37e829025d189b0421"}, + {file = "scikit_image-0.16.2-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:063d1c20fcd53762f82ee58c29783ae4e8f6fbed445b41b704fa33b6f355729d"}, + {file = "scikit_image-0.16.2-cp36-cp36m-win32.whl", hash = "sha256:2a54bea469eb1b611bee1ce36e60710f5f94f29205bc5bd67a51793909b1e62b"}, + {file = "scikit_image-0.16.2-cp36-cp36m-win_amd64.whl", hash = "sha256:2d346d49b6852cffb47cbde995e2696d5b07f688d8c057a0a4548abf3a98f920"}, + {file = "scikit_image-0.16.2-cp37-cp37m-macosx_10_6_intel.whl", hash = "sha256:8b2b768b02c6b7476f2e16ddd91f827d3817aef73f82cf28bff7a8dcdfd8c55c"}, + {file = "scikit_image-0.16.2-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:3ad2efa792ab8de5fcefe6f4f5bc1ab64c411cdb5c829ce1526ab3a5a7729627"}, + {file = "scikit_image-0.16.2-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:2aa962aa82d815606d7dad7f045f5d7ca55c65b4320d47e15a98fc92612c2d6c"}, + {file = "scikit_image-0.16.2-cp37-cp37m-win32.whl", hash = "sha256:e774377876cb258e8f4d63f7809863f961c98aa02263b3ff54a39483bc6f7d26"}, + {file = "scikit_image-0.16.2-cp37-cp37m-win_amd64.whl", hash = "sha256:6786b127f33470fd843e644435522fbf43bce05c9f5527946c390ccb9e1cac27"}, + {file = "scikit_image-0.16.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:a48fb0d34a090b578b87ffebab0fe035295c1945dbc2b28e1a55ea2cf6031751"}, + {file = "scikit_image-0.16.2-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:41e28db0136f29ecd305bef0408fdfc64be9d415e54f5099a95555c65f5c1865"}, + {file = "scikit_image-0.16.2-cp38-cp38-win32.whl", hash = "sha256:0715b7940778ba5d73da3908d60ddf2eb93863f7c394493a522fe56d3859295c"}, + {file = "scikit_image-0.16.2-cp38-cp38-win_amd64.whl", hash = "sha256:e18d73cc8893e2268b172c29f9aab530faf8cd3b7c11ae0bee3e763d719d35c5"}, +] scipy = [ {file = "scipy-1.4.0-cp35-cp35m-macosx_10_6_intel.whl", hash = "sha256:1b51721e5792c4d722e0cab4daf82187d5f22f2e42c56cd247398a16e4d2f48d"}, {file = "scipy-1.4.0-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:cdbcfbe97a21fc6b3c55991a7bcd6a278a38bd197e2d274a45611cdc91beb5c0"}, diff --git a/pyproject.toml b/pyproject.toml index a179e77d1..e4d5264d4 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -17,6 +17,7 @@ cluster_tools = "1.46" natsort = "^6.2.0" psutil = "^5.6.7" nibabel = "^2.5.1" +scikit-image = "^0.16.2" [tool.poetry.dev-dependencies] pylint = "2.3.1" diff --git a/testdata/simple_tiff_dataset/color/1/00000.tif b/testdata/simple_tiff_dataset/color/1/00000.tif new file mode 100644 index 000000000..ae964cd4c Binary files /dev/null and b/testdata/simple_tiff_dataset/color/1/00000.tif differ diff --git a/testdata/simple_tiff_dataset/color/1/00001.tif b/testdata/simple_tiff_dataset/color/1/00001.tif new file mode 100644 index 000000000..ae964cd4c Binary files /dev/null and b/testdata/simple_tiff_dataset/color/1/00001.tif differ diff --git a/testdata/simple_tiff_dataset/color/1/00002.tif b/testdata/simple_tiff_dataset/color/1/00002.tif new file mode 100644 index 000000000..ae964cd4c Binary files /dev/null and b/testdata/simple_tiff_dataset/color/1/00002.tif differ diff --git a/testdata/simple_tiff_dataset/color/1/00003.tif b/testdata/simple_tiff_dataset/color/1/00003.tif new file mode 100644 index 000000000..ae964cd4c Binary files /dev/null and b/testdata/simple_tiff_dataset/color/1/00003.tif differ diff --git a/testdata/simple_tiff_dataset/color/1/00004.tif b/testdata/simple_tiff_dataset/color/1/00004.tif new file mode 100644 index 000000000..ae964cd4c Binary files /dev/null and b/testdata/simple_tiff_dataset/color/1/00004.tif differ diff --git a/testdata/simple_tiff_dataset/color/1/00005.tif b/testdata/simple_tiff_dataset/color/1/00005.tif new file mode 100644 index 000000000..ae964cd4c Binary files /dev/null and b/testdata/simple_tiff_dataset/color/1/00005.tif differ diff --git a/testdata/simple_tiff_dataset/color/1/00006.tif b/testdata/simple_tiff_dataset/color/1/00006.tif new file mode 100644 index 000000000..ae964cd4c Binary files /dev/null and b/testdata/simple_tiff_dataset/color/1/00006.tif differ diff --git a/testdata/simple_tiff_dataset/color/1/00007.tif b/testdata/simple_tiff_dataset/color/1/00007.tif new file mode 100644 index 000000000..ae964cd4c Binary files /dev/null and b/testdata/simple_tiff_dataset/color/1/00007.tif differ diff --git a/testdata/simple_tiff_dataset/color/1/00008.tif b/testdata/simple_tiff_dataset/color/1/00008.tif new file mode 100644 index 000000000..ae964cd4c Binary files /dev/null and b/testdata/simple_tiff_dataset/color/1/00008.tif differ diff --git a/testdata/simple_tiff_dataset/color/1/00009.tif b/testdata/simple_tiff_dataset/color/1/00009.tif new file mode 100644 index 000000000..ae964cd4c Binary files /dev/null and b/testdata/simple_tiff_dataset/color/1/00009.tif differ diff --git a/testdata/simple_tiff_dataset/datasource-properties.json b/testdata/simple_tiff_dataset/datasource-properties.json new file mode 100644 index 000000000..93ecef44f --- /dev/null +++ b/testdata/simple_tiff_dataset/datasource-properties.json @@ -0,0 +1,40 @@ +{ + "id": { + "name": "tiff_dataset_1", + "team": "" + }, + "scale": [ + 1 + ], + "dataLayers": [ + { + "name": "color", + "category": "color", + "elementClass": "uint8", + "num_channels": 1, + "boundingBox": { + "topLeft": [ + 0, + 0, + 0 + ], + "width": 265, + "height": 265, + "depth": 10 + }, + "wkwResolutions": [ + { + "resolution": [ + 1, + 1, + 1 + ] + } + ] + } + ], + "grid_shape": [ + 0, + 0 + ] +} diff --git a/testdata/simple_wk_dataset/color/1/header.wkw b/testdata/simple_wk_dataset/color/1/header.wkw new file mode 100644 index 000000000..70b76607f Binary files /dev/null and b/testdata/simple_wk_dataset/color/1/header.wkw differ diff --git a/testdata/simple_wk_dataset/color/1/z0/y0/x0.wkw b/testdata/simple_wk_dataset/color/1/z0/y0/x0.wkw new file mode 100644 index 000000000..3906c1471 Binary files /dev/null and b/testdata/simple_wk_dataset/color/1/z0/y0/x0.wkw differ diff --git a/testdata/simple_wk_dataset/datasource-properties.json b/testdata/simple_wk_dataset/datasource-properties.json new file mode 100644 index 000000000..7b13d0bf7 --- /dev/null +++ b/testdata/simple_wk_dataset/datasource-properties.json @@ -0,0 +1,38 @@ +{ + "id": { + "name": "simple_wk_dataset", + "team": "" + }, + "scale": [ + 1 + ], + "dataLayers": [ + { + "dataFormat": "wkw", + "name": "color", + "category": "color", + "elementClass": "uint8", + "num_channels": 3, + "boundingBox": { + "topLeft": [ + 0, + 0, + 0 + ], + "width": 24, + "height": 24, + "depth": 24 + }, + "wkwResolutions": [ + { + "resolution": [ + 1, + 1, + 1 + ], + "cube_length": 32 + } + ] + } + ] +} \ No newline at end of file diff --git a/tests/test_dataset.py b/tests/test_dataset.py new file mode 100644 index 000000000..3175eeada --- /dev/null +++ b/tests/test_dataset.py @@ -0,0 +1,532 @@ +import filecmp + +import numpy as np +from shutil import rmtree, copytree + +from wkcuber.api.Dataset import WKDataset, TiffDataset +from os import path, makedirs + +from wkcuber.api.Layer import Layer +from wkcuber.api.Properties import TiffProperties +from wkcuber.mag import Mag + + +def delete_dir(relative_path): + if path.exists(relative_path) and path.isdir(relative_path): + rmtree(relative_path) + + +def test_create_wk_dataset_with_layer_and_mag(): + delete_dir("./testoutput/wk_dataset") + + ds = WKDataset.create("./testoutput/wk_dataset", scale=(1, 1, 1)) + ds.add_layer("color", "color") + + ds.get_layer("color").add_mag("1") + ds.get_layer("color").add_mag("2-2-1") + + assert path.exists("./testoutput/wk_dataset/color/1") + assert path.exists("./testoutput/wk_dataset/color/2-2-1") + + assert len(ds.properties.data_layers) == 1 + assert len(ds.properties.data_layers["color"].wkw_magnifications) == 2 + + +def test_create_wk_dataset_with_explicit_header_fields(): + delete_dir("./testoutput/wk_dataset_advanced") + + ds = WKDataset.create("./testoutput/wk_dataset_advanced", scale=(1, 1, 1)) + ds.add_layer("color", "color", dtype=np.uint16, num_channels=3) + + ds.get_layer("color").add_mag("1", block_len=64, file_len=64) + ds.get_layer("color").add_mag("2-2-1") + + assert path.exists("./testoutput/wk_dataset_advanced/color/1") + assert path.exists("./testoutput/wk_dataset_advanced/color/2-2-1") + + assert len(ds.properties.data_layers) == 1 + assert len(ds.properties.data_layers["color"].wkw_magnifications) == 2 + + assert ds.properties.data_layers["color"].element_class == np.dtype(np.uint16) + assert ( + ds.properties.data_layers["color"].wkw_magnifications[0].cube_length == 64 * 64 + ) # mag "1" + assert ds.properties.data_layers["color"].wkw_magnifications[0].mag == Mag("1") + assert ( + ds.properties.data_layers["color"].wkw_magnifications[1].cube_length == 32 * 32 + ) # mag "2-2-1" (defaults are used) + assert ds.properties.data_layers["color"].wkw_magnifications[1].mag == Mag("2-2-1") + + +def test_create_tiff_dataset_with_layer_and_mag(): + # This test would be the same for WKDataset + + delete_dir("./testoutput/tiff_dataset") + + ds = WKDataset.create("./testoutput/tiff_dataset", scale=(1, 1, 1)) + ds.add_layer("color", Layer.COLOR_TYPE) + + ds.get_layer("color").add_mag("1") + ds.get_layer("color").add_mag("2-2-1") + + assert path.exists("./testoutput/tiff_dataset/color/1") + assert path.exists("./testoutput/tiff_dataset/color/2-2-1") + + assert len(ds.properties.data_layers) == 1 + assert len(ds.properties.data_layers["color"].wkw_magnifications) == 2 + + +def test_open_wk_dataset(): + ds = WKDataset("./testdata/simple_wk_dataset") + + assert len(ds.properties.data_layers) == 1 + assert len(ds.properties.data_layers["color"].wkw_magnifications) == 1 + + +def test_open_tiff_dataset(): + ds = TiffDataset("./testdata/simple_tiff_dataset") + + assert len(ds.properties.data_layers) == 1 + assert len(ds.properties.data_layers["color"].wkw_magnifications) == 1 + + +def test_view_read_with_open(): + # This test would be the same for TiffDataset + + wk_view = WKDataset("./testdata/simple_wk_dataset/").get_view( + "color", "1", size=(32, 32, 32) + ) + + assert not wk_view._is_opened + + with wk_view.open(): + assert wk_view._is_opened + + data = wk_view.read((10, 10, 10)) + assert data.shape == (3, 10, 10, 10) # three channel + + assert not wk_view._is_opened + + +def test_view_read_without_open(): + # This test would be the same for TiffDataset + + wk_view = WKDataset("./testdata/simple_wk_dataset/").get_view( + "color", "1", size=(32, 32, 32) + ) + + assert not wk_view._is_opened + + # 'read()' checks if it was already opened. If not, it opens and closes automatically + data = wk_view.read((10, 10, 10)) + assert data.shape == (3, 10, 10, 10) # three channel + + assert not wk_view._is_opened + + +def test_view_wk_write(): + delete_dir("./testoutput/simple_wk_dataset/") + copytree("./testdata/simple_wk_dataset/", "./testoutput/simple_wk_dataset/") + + wk_view = WKDataset("./testoutput/simple_wk_dataset/").get_view( + "color", "1", size=(100, 100, 100) + ) + + with wk_view.open(): + np.random.seed(1234) + write_data = (np.random.rand(3, 10, 10, 10) * 255).astype(np.uint8) + + wk_view.write(write_data) + + data = wk_view.read((10, 10, 10)) + assert np.array_equal(data, write_data) + + +def test_view_tiff_write(): + delete_dir("./testoutput/simple_tiff_dataset/") + copytree("./testdata/simple_tiff_dataset/", "./testoutput/simple_tiff_dataset/") + + tiff_view = TiffDataset("./testoutput/simple_tiff_dataset/").get_view( + "color", "1", size=(100, 100, 100) + ) + + with tiff_view.open(): + np.random.seed(1234) + write_data = (np.random.rand(5, 5, 5) * 255).astype(np.uint8) + + tiff_view.write(write_data) + + data = tiff_view.read((5, 5, 5)) + assert data.shape == (1, 5, 5, 5) # this dataset has only one channel + assert np.array_equal(data, np.expand_dims(write_data, 0)) + + +def test_view_tiff_write_out_of_bounds(): + new_dataset_path = "./testoutput/tiff_view_dataset_out_of_bounds/" + + delete_dir(new_dataset_path) + copytree("./testdata/simple_tiff_dataset/", new_dataset_path) + + tiff_view = TiffDataset(new_dataset_path).get_view( + "color", "1", size=(100, 100, 100) + ) + + with tiff_view.open(): + try: + tiff_view.write( + np.zeros((200, 200, 5), dtype=np.uint8) + ) # this is bigger than the bounding_box + raise Exception( + "The test 'test_view_tiff_write_out_of_bounds' did not throw an exception even though it should" + ) + except AssertionError: + pass + + +def test_view_wk_write_out_of_bounds(): + new_dataset_path = "./testoutput/wk_view_dataset_out_of_bounds/" + + delete_dir(new_dataset_path) + copytree("./testdata/simple_wk_dataset/", new_dataset_path) + + tiff_view = WKDataset(new_dataset_path).get_view("color", "1", size=(100, 100, 100)) + + with tiff_view.open(): + try: + tiff_view.write( + np.zeros((200, 200, 5), dtype=np.uint8) + ) # this is bigger than the bounding_box + raise Exception( + "The test 'test_view_wk_write_out_of_bounds' did not throw an exception even though it should" + ) + except AssertionError: + pass + + +def test_tiff_write_out_of_bounds(): + new_dataset_path = "./testoutput/simple_tiff_dataset_out_of_bounds/" + + delete_dir(new_dataset_path) + copytree("./testdata/simple_tiff_dataset/", new_dataset_path) + + ds = TiffDataset(new_dataset_path) + mag_dataset = ds.get_layer("color").get_mag("1") + + assert ds.properties.data_layers["color"].get_bounding_box_size() == (265, 265, 10) + mag_dataset.write( + np.zeros((300, 300, 15), dtype=np.uint8) + ) # this is bigger than the bounding_box + assert ds.properties.data_layers["color"].get_bounding_box_size() == (300, 300, 15) + + +def test_wk_write_out_of_bounds(): + new_dataset_path = "./testoutput/simple_wk_dataset_out_of_bounds/" + + delete_dir(new_dataset_path) + copytree("./testdata/simple_wk_dataset/", new_dataset_path) + + ds = WKDataset(new_dataset_path) + mag_dataset = ds.get_layer("color").get_mag("1") + + assert ds.properties.data_layers["color"].get_bounding_box_size() == (24, 24, 24) + mag_dataset.write( + np.zeros((3, 1, 1, 48), dtype=np.uint8) + ) # this is bigger than the bounding_box + assert ds.properties.data_layers["color"].get_bounding_box_size() == (24, 24, 48) + + +def test_update_new_bounding_box_offset(): + # This test would be the same for WKDataset + + delete_dir("./testoutput/tiff_dataset") + + ds = TiffDataset.create("./testoutput/tiff_dataset", scale=(1, 1, 1)) + mag = ds.add_layer("color", Layer.COLOR_TYPE).add_mag("1") + + assert ds.properties.data_layers["color"].bounding_box["topLeft"] == (-1, -1, -1) + + np.random.seed(1234) + write_data = (np.random.rand(10, 10, 10) * 255).astype(np.uint8) + mag.write( + write_data, offset=(10, 10, 10) + ) # the write method of MagDataset does always use the relative offset to (0, 0, 0) + assert ds.properties.data_layers["color"].bounding_box["topLeft"] == (10, 10, 10) + + mag.write( + write_data, offset=(5, 5, 20) + ) # the write method of MagDataset does always use the relative offset to (0, 0, 0) + assert ds.properties.data_layers["color"].bounding_box["topLeft"] == (5, 5, 10) + + +def test_tiff_write_multi_channel_uint8(): + dataset_path = "./testoutput/tiff_multichannel/" + delete_dir(dataset_path) + + ds_tiff = TiffDataset.create(dataset_path, scale=(1, 1, 1)) + mag = ds_tiff.add_layer("color", Layer.COLOR_TYPE, num_channels=3).add_mag("1") + + # 10 images (z-layers), each 250x250, dtype=np.uint8 + data = np.zeros((3, 250, 250, 10), dtype=np.uint8) + for h in range(10): + for i in range(250): + for j in range(250): + data[0, i, j, h] = i + data[1, i, j, h] = j + data[2, i, j, h] = 100 + + ds_tiff.get_layer("color").get_mag("1").write(data) + + assert np.array_equal(data, mag.read(size=(250, 250, 10))) + + +def test_wk_write_multi_channel_uint8(): + dataset_path = "./testoutput/wk_multichannel/" + delete_dir(dataset_path) + + ds_tiff = WKDataset.create(dataset_path, scale=(1, 1, 1)) + mag = ds_tiff.add_layer("color", Layer.COLOR_TYPE, num_channels=3).add_mag("1") + + # 10 images (z-layers), each 250x250, dtype=np.uint8 + data = np.zeros((3, 250, 250, 10), dtype=np.uint8) + for h in range(10): + for i in range(250): + for j in range(250): + data[0, i, j, h] = i + data[1, i, j, h] = j + data[2, i, j, h] = 100 + + ds_tiff.get_layer("color").get_mag("1").write(data) + + assert np.array_equal(data, mag.read(size=(250, 250, 10))) + + +def test_tiff_write_multi_channel_uint16(): + dataset_path = "./testoutput/tiff_multichannel/" + delete_dir(dataset_path) + + ds_tiff = TiffDataset.create(dataset_path, scale=(1, 1, 1)) + mag = ds_tiff.add_layer( + "color", Layer.COLOR_TYPE, num_channels=3, dtype=np.uint16 + ).add_mag("1") + + # 10 images (z-layers), each 250x250, dtype=np.uint16 + data = np.zeros((3, 250, 250, 10), dtype=np.uint16) + for h in range(10): + for i in range(250): + for j in range(250): + data[0, i, j, h] = i * 256 + data[1, i, j, h] = j * 256 + data[2, i, j, h] = 100 * 256 + + mag.write(data) + written_data = mag.read(size=(250, 250, 10)) + + print(written_data.dtype) + + assert np.array_equal(data, written_data) + + +def test_wk_write_multi_channel_uint16(): + dataset_path = "./testoutput/wk_multichannel/" + delete_dir(dataset_path) + + ds_tiff = WKDataset.create(dataset_path, scale=(1, 1, 1)) + mag = ds_tiff.add_layer( + "color", Layer.COLOR_TYPE, num_channels=3, dtype=np.uint16 + ).add_mag("1") + + # 10 images (z-layers), each 250x250, dtype=np.uint16 + data = np.zeros((3, 250, 250, 10), dtype=np.uint16) + for h in range(10): + for i in range(250): + for j in range(250): + data[0, i, j, h] = i * 256 + data[1, i, j, h] = j * 256 + data[2, i, j, h] = 100 * 256 + + mag.write(data) + written_data = mag.read(size=(250, 250, 10)) + + print(written_data.dtype) + + assert np.array_equal(data, written_data) + + +def test_wkw_empty_read(): + filename = "./testoutput/empty_wk_dataset" + delete_dir(filename) + + mag = ( + WKDataset.create(filename, scale=(1, 1, 1)) + .add_layer("color", Layer.COLOR_TYPE) + .add_mag("1") + ) + data = mag.read(size=(0, 0, 0), offset=(1, 1, 1)) + + assert data.shape == (1, 0, 0, 0) + + +def test_tiff_empty_read(): + filename = "./testoutput/empty_tiff_dataset" + delete_dir(filename) + + mag = ( + TiffDataset.create(filename, scale=(1, 1, 1)) + .add_layer("color", Layer.COLOR_TYPE) + .add_mag("1") + ) + data = mag.read(size=(0, 0, 0), offset=(1, 1, 1)) + + assert data.shape == (1, 0, 0, 0) + + +def test_tiff_read_padded_data(): + filename = "./testoutput/empty_tiff_dataset" + delete_dir(filename) + + mag = ( + TiffDataset.create(filename, scale=(1, 1, 1)) + .add_layer("color", Layer.COLOR_TYPE, num_channels=3) + .add_mag("1") + ) + # there are no tiffs yet, however, this should not fail but pad the data with zeros + data = mag.read(size=(10, 10, 10)) + + assert data.shape == (3, 10, 10, 10) + assert np.array_equal(data, np.zeros((3, 10, 10, 10))) + + +def test_wk_read_padded_data(): + filename = "./testoutput/empty_wk_dataset" + delete_dir(filename) + + mag = ( + WKDataset.create(filename, scale=(1, 1, 1)) + .add_layer("color", Layer.COLOR_TYPE, num_channels=3) + .add_mag("1") + ) + # there is no data yet, however, this should not fail but pad the data with zeros + data = mag.read(size=(10, 10, 10)) + + assert data.shape == (3, 10, 10, 10) + assert np.array_equal(data, np.zeros((3, 10, 10, 10))) + + +def test_read_and_write_of_properties(): + destination_path = "./testoutput/read_write_properties/" + delete_dir(destination_path) + source_file_name = "./testdata/simple_tiff_dataset/datasource-properties.json" + destination_file_name = destination_path + "datasource-properties.json" + + imported_properties = TiffProperties._from_json(source_file_name) + imported_properties._path = destination_file_name + makedirs(destination_path) + imported_properties._export_as_json() + + filecmp.cmp(source_file_name, destination_file_name) + + +def test_num_channel_mismatch_assertion(): + delete_dir("./testoutput/wk_dataset") + + ds = WKDataset.create("./testoutput/wk_dataset", scale=(1, 1, 1)) + mag = ds.add_layer("color", Layer.COLOR_TYPE, num_channels=1).add_mag( + "1" + ) # num_channel=1 is also the default + + np.random.seed(1234) + write_data = (np.random.rand(3, 10, 10, 10) * 255).astype(np.uint8) # 3 channels + + try: + mag.write(write_data) # there is a mismatch between the number of channels + raise Exception( + "The test 'test_num_channel_mismatch_assertion' did not throw an exception even though it should" + ) + except AssertionError: + pass + + +def test_get_or_add_layer(): + # This test would be the same for TiffDataset + + delete_dir("./testoutput/wk_dataset") + + ds = WKDataset.create("./testoutput/wk_dataset", scale=(1, 1, 1)) + + assert "color" not in ds.layers.keys() + + # layer did not exist before + layer = ds.get_or_add_layer( + "color", Layer.COLOR_TYPE, dtype=np.uint8, num_channels=1 + ) + assert "color" in ds.layers.keys() + assert layer.name == "color" + + # layer did exist before + layer = ds.get_or_add_layer( + "color", Layer.COLOR_TYPE, dtype=np.uint8, num_channels=1 + ) + assert "color" in ds.layers.keys() + assert layer.name == "color" + + try: + # layer did exist before but with another 'dtype' (this would work the same for 'category' and 'num_channels') + layer = ds.get_or_add_layer( + "color", Layer.COLOR_TYPE, dtype=np.uint16, num_channels=1 + ) + + raise Exception( + "The test 'test_get_or_add_layer' did not throw an exception even though it should" + ) + except AssertionError: + pass + + +def test_get_or_add_mag_for_wk(): + delete_dir("./testoutput/wk_dataset") + + layer = WKDataset.create("./testoutput/wk_dataset", scale=(1, 1, 1)).add_layer( + "color", Layer.COLOR_TYPE + ) + + assert "1" not in layer.mags.keys() + + # The mag did not exist before + mag = layer.get_or_add_mag("1", block_len=32, file_len=32, block_type=1) + assert "1" in layer.mags.keys() + assert mag.name == "1" + + # The mag did exist before + layer.get_or_add_mag("1", block_len=32, file_len=32, block_type=1) + assert "1" in layer.mags.keys() + assert mag.name == "1" + + try: + # mag did exist before but with another 'block_len' (this would work the same for 'file_len' and 'block_type') + mag = layer.get_or_add_mag("1", block_len=64, file_len=32, block_type=1) + + raise Exception( + "The test 'test_get_or_add_layer' did not throw an exception even though it should" + ) + except AssertionError: + pass + + +def test_get_or_add_mag_for_tiff(): + delete_dir("./testoutput/wk_dataset") + + layer = TiffDataset.create("./testoutput/wk_dataset", scale=(1, 1, 1)).add_layer( + "color", Layer.COLOR_TYPE + ) + + assert "1" not in layer.mags.keys() + + # The mag did not exist before + mag = layer.get_or_add_mag("1") + assert "1" in layer.mags.keys() + assert mag.name == "1" + + # The mag did exist before + layer.get_or_add_mag("1") + assert "1" in layer.mags.keys() + assert mag.name == "1" diff --git a/wkcuber/api/Dataset.py b/wkcuber/api/Dataset.py new file mode 100644 index 000000000..8da76905d --- /dev/null +++ b/wkcuber/api/Dataset.py @@ -0,0 +1,169 @@ +from shutil import rmtree +from abc import ABC, abstractmethod +from os import makedirs, path +from os.path import join, normpath, basename +from pathlib import Path +import numpy as np + +from wkcuber.api.Properties import WKProperties, TiffProperties, Properties +from wkcuber.api.Layer import Layer, WKLayer, TiffLayer +from wkcuber.api.View import View + + +class AbstractDataset(ABC): + @abstractmethod + def __init__(self, dataset_path): + properties = self._get_properties_type()._from_json( + join(dataset_path, Properties.FILE_NAME) + ) + self.layers = {} + self.path = Path(properties.path).parent + self.properties = properties + + # construct self.layer + for layer_name in self.properties.data_layers: + layer = self.properties.data_layers[layer_name] + self.add_layer( + layer.name, layer.category, layer.element_class, layer.num_channels + ) + for resolution in layer.wkw_magnifications: + self.layers[layer_name].setup_mag(resolution.mag.to_layer_name()) + + @classmethod + def create_with_properties(cls, properties): + dataset_path = path.dirname(properties.path) + # create directories on disk and write datasource-properties.json + try: + makedirs(dataset_path) + properties._export_as_json() + except OSError: + raise FileExistsError("Creation of Dataset {} failed".format(dataset_path)) + + # initialize object + return cls(dataset_path) + + @classmethod + @abstractmethod + def create(cls, dataset_path, scale): + pass + + def downsample(self, layer, target_mag_shape, source_mag): + raise NotImplemented() + + def get_properties(self) -> Properties: + return self.properties + + def get_layer(self, layer_name) -> Layer: + if layer_name not in self.layers.keys(): + raise IndexError( + "The layer {} is not a layer of this dataset".format(layer_name) + ) + return self.layers[layer_name] + + def add_layer(self, layer_name, category, dtype=np.dtype("uint8"), num_channels=1): + # normalize the value of dtype in case the parameter was passed as a string + dtype = np.dtype(dtype) + + if layer_name in self.layers.keys(): + raise IndexError( + "Adding layer {} failed. There is already a layer with this name".format( + layer_name + ) + ) + self.layers[layer_name] = self._create_layer(layer_name, dtype, num_channels) + self.properties._add_layer(layer_name, category, dtype.name, num_channels) + return self.layers[layer_name] + + def get_or_add_layer( + self, layer_name, category, dtype=None, num_channels=None + ) -> Layer: + if layer_name in self.layers.keys(): + assert self.properties.data_layers[layer_name].category == category, ( + f"Cannot get_or_add_layer: The layer '{layer_name}' already exists, but the categories do not match. " + + f"The category of the existing layer is '{self.properties.data_layers[layer_name].category}' " + + f"and the passed parameter is '{category}'." + ) + assert dtype is None or self.layers[layer_name].dtype == np.dtype(dtype), ( + f"Cannot get_or_add_layer: The layer '{layer_name}' already exists, but the dtypes do not match. " + + f"The dtype of the existing layer is '{self.layers[layer_name].dtype}' " + + f"and the passed parameter is '{dtype}'." + ) + assert ( + num_channels is None + or self.layers[layer_name].num_channels == num_channels + ), ( + f"Cannot get_or_add_layer: The layer '{layer_name}' already exists, but the number of channels do not match. " + + f"The number of channels of the existing layer are '{self.layers[layer_name].num_channels}' " + + f"and the passed parameter is '{num_channels}'." + ) + return self.layers[layer_name] + else: + return self.add_layer(layer_name, category, dtype, num_channels) + + def delete_layer(self, layer_name): + if layer_name not in self.layers.keys(): + raise IndexError( + f"Removing layer {layer_name} failed. There is no layer with this name" + ) + del self.layers[layer_name] + self.properties._delete_layer(layer_name) + # delete files on disk + rmtree(join(self.path, layer_name)) + + def get_view(self, layer_name, mag_name, size, global_offset=(0, 0, 0)) -> View: + layer = self.get_layer(layer_name) + mag = layer.get_mag(mag_name) + mag_file_path = path.join(self.path, layer.name, mag.name) + + return mag.get_view(mag_file_path, size=size, global_offset=global_offset) + + def _create_layer(self, layer_name, dtype, num_channels) -> Layer: + raise NotImplementedError + + @abstractmethod + def _get_properties_type(self): + pass + + +class WKDataset(AbstractDataset): + @classmethod + def create(cls, dataset_path, scale): + name = basename(normpath(dataset_path)) + properties = WKProperties(join(dataset_path, Properties.FILE_NAME), name, scale) + return WKDataset.create_with_properties(properties) + + def __init__(self, dataset_path): + super().__init__(dataset_path) + assert isinstance(self.properties, WKProperties) + + def to_tiff_dataset(self, new_dataset_path): + raise NotImplementedError # TODO; implement + + def _create_layer(self, layer_name, dtype, num_channels) -> Layer: + return WKLayer(layer_name, self, dtype, num_channels) + + def _get_properties_type(self): + return WKProperties + + +class TiffDataset(AbstractDataset): + @classmethod + def create(cls, dataset_path, scale): + name = basename(normpath(dataset_path)) + properties = TiffProperties( + join(dataset_path, Properties.FILE_NAME), name, scale + ) + return TiffDataset.create_with_properties(properties) + + def __init__(self, dataset_path): + super().__init__(dataset_path) + assert isinstance(self.properties, TiffProperties) + + def to_wk_dataset(self, new_dataset_path): + raise NotImplementedError # TODO; implement + + def _create_layer(self, layer_name, dtype, num_channels) -> Layer: + return TiffLayer(layer_name, self, dtype, num_channels) + + def _get_properties_type(self): + return TiffProperties diff --git a/wkcuber/api/Layer.py b/wkcuber/api/Layer.py new file mode 100644 index 000000000..4ac0a4b14 --- /dev/null +++ b/wkcuber/api/Layer.py @@ -0,0 +1,130 @@ +from shutil import rmtree +from os.path import join +from os import makedirs +from wkw import wkw + +from wkcuber.api.MagDataset import MagDataset, WKMagDataset, TiffMagDataset +from wkcuber.mag import Mag + + +class Layer: + + COLOR_TYPE = "color" + SEGMENTATION_TYPE = "segmentation" + + def __init__(self, name, dataset, dtype, num_channels): + self.name = name + self.dataset = dataset + self.dtype = dtype + self.num_channels = num_channels + self.mags = {} + + full_path = join(dataset.path, name) + makedirs(full_path, exist_ok=True) + + def get_mag(self, mag) -> MagDataset: + if mag not in self.mags.keys(): + raise IndexError("The mag {} is not a mag of this layer".format(mag)) + return self.mags[mag] + + def delete_mag(self, mag): + if mag not in self.mags.keys(): + raise IndexError( + "Deleting mag {} failed. There is no mag with this name".format(mag) + ) + + del self.mags[mag] + self.dataset.properties._delete_mag(self.name, mag) + # delete files on disk + full_path = join(self.dataset.path, self.name, mag) + rmtree(full_path) + + def _create_dir_for_mag(self, mag): + full_path = join(self.dataset.path, self.name, mag) + makedirs(full_path, exist_ok=True) + + def _assert_mag_does_not_exist_yet(self, mag): + if mag in self.mags.keys(): + raise IndexError( + "Adding mag {} failed. There is already a mag with this name".format( + mag + ) + ) + + +class WKLayer(Layer): + def add_mag(self, mag, block_len=32, file_len=32, block_type=1) -> WKMagDataset: + # normalize the name of the mag + mag = Mag(mag).to_layer_name() + + self._assert_mag_does_not_exist_yet(mag) + self._create_dir_for_mag(mag) + + self.mags[mag] = WKMagDataset.create(self, mag, block_len, file_len, block_type) + self.dataset.properties._add_mag(self.name, mag, block_len * file_len) + + return self.mags[mag] + + def get_or_add_mag( + self, mag, block_len=None, file_len=None, block_type=None + ) -> WKMagDataset: + # normalize the name of the mag + mag = Mag(mag).to_layer_name() + + if mag in self.mags.keys(): + assert ( + block_len is None or self.mags[mag].header.block_len == block_len + ), f"Cannot get_or_add_mag: The mag {mag} already exists, but the block lengths do not match" + assert ( + file_len is None or self.mags[mag].header.file_len == file_len + ), f"Cannot get_or_add_mag: The mag {mag} already exists, but the file lengths do not match" + assert ( + block_type is None or self.mags[mag].header.block_type == block_type + ), f"Cannot get_or_add_mag: The mag {mag} already exists, but the block types do not match" + return self.get_mag(mag) + else: + return self.add_mag(mag, block_len, file_len, block_type) + + def setup_mag(self, mag): + # This method is used to initialize the mag when opening the Dataset. This does not create e.g. the wk_header. + + # normalize the name of the mag + mag = Mag(mag).to_layer_name() + + self._assert_mag_does_not_exist_yet(mag) + + with wkw.Dataset.open(join(self.dataset.path, self.name, mag)) as wkw_dataset: + wk_header = wkw_dataset.header + + self.mags[mag] = WKMagDataset( + self, mag, wk_header.block_len, wk_header.file_len, wk_header.block_type + ) + self.dataset.properties._add_mag( + self.name, mag, wk_header.block_len * wk_header.file_len + ) + + +class TiffLayer(Layer): + def add_mag(self, mag) -> MagDataset: + # normalize the name of the mag + mag = Mag(mag).to_layer_name() + + self._assert_mag_does_not_exist_yet(mag) + self._create_dir_for_mag(mag) + + self.mags[mag] = TiffMagDataset.create(self, mag) + self.dataset.properties._add_mag(self.name, mag) + + return self.mags[mag] + + def get_or_add_mag(self, mag) -> MagDataset: + # normalize the name of the mag + mag = Mag(mag).to_layer_name() + + if mag in self.mags.keys(): + return self.get_mag(mag) + else: + return self.add_mag(mag) + + def setup_mag(self, mag): + self.add_mag(mag) diff --git a/wkcuber/api/MagDataset.py b/wkcuber/api/MagDataset.py new file mode 100644 index 000000000..bfed0018d --- /dev/null +++ b/wkcuber/api/MagDataset.py @@ -0,0 +1,116 @@ +from os.path import join + +from wkw import wkw +import numpy as np + +import wkcuber.api as api +from wkcuber.api.View import WKView, TiffView +from wkcuber.api.TiffData.TiffMag import TiffMagHeader + + +class MagDataset: + def __init__(self, layer, name): + self.layer = layer + self.name = name + self.header = self.get_header() + + file_path = join(self.layer.dataset.path, self.layer.name, self.name) + size = self.layer.dataset.properties.data_layers[ + self.layer.name + ].get_bounding_box_size() + offset = self.layer.dataset.properties.data_layers[ + self.layer.name + ].get_bounding_box_offset() + self.view = self.get_view( + file_path, size, global_offset=(0, 0, 0), is_bounded=False + ) + + def open(self): + self.view.initialize() + + def close(self): + self.view.close() + + def read(self, size, offset=(0, 0, 0)) -> np.array: + return self.view.read(size, offset) + + def write(self, data, offset=(0, 0, 0)): + self._assert_valid_num_channels(data.shape) + self.view.write(data, offset) + layer_properties = self.layer.dataset.properties.data_layers[self.layer.name] + current_offset = layer_properties.get_bounding_box_offset() + current_size = layer_properties.get_bounding_box_size() + + new_offset = ( + offset + if current_offset == (-1, -1, -1) + else tuple(min(x) for x in zip(current_offset, offset)) + ) + total_size = tuple(max(x) for x in zip(current_size, data.shape[-3:])) + self.view.size = total_size + + self.layer.dataset.properties._set_bounding_box_of_layer( + self.layer.name, new_offset, total_size + ) + + def get_header(self): + raise NotImplementedError + + def get_view(self, mag_file_path, size, global_offset, is_bounded=True): + raise NotImplementedError + + def _assert_valid_num_channels(self, write_data_shape): + num_channels = self.layer.num_channels + if len(write_data_shape) == 3: + assert ( + num_channels == 1 + ), f"The number of channels of the dataset ({num_channels}) does not match the number of channels of the passed data (1)" + else: + assert ( + num_channels == 3 + ), f"The number of channels of the dataset ({num_channels}) does not match the number of channels of the passed data ({write_data_shape[0]})" + + +class WKMagDataset(MagDataset): + def __init__(self, layer, name, block_len, file_len, block_type): + self.block_len = block_len + self.file_len = file_len + self.block_type = block_type + super().__init__(layer, name) + + def get_header(self) -> wkw.Header: + return wkw.Header( + voxel_type=self.layer.dtype, + num_channels=self.layer.num_channels, + version=1, + block_len=self.block_len, + file_len=self.file_len, + block_type=self.block_type, + ) + + def get_view(self, mag_file_path, size, global_offset, is_bounded=True) -> WKView: + return WKView(mag_file_path, self.header, size, global_offset, is_bounded) + + @classmethod + def create(cls, layer, name, block_len, file_len, block_type): + mag_dataset = cls(layer, name, block_len, file_len, block_type) + wkw.Dataset.create( + join(layer.dataset.path, layer.name, mag_dataset.name), mag_dataset.header + ) + + return mag_dataset + + +class TiffMagDataset(MagDataset): + def get_header(self) -> TiffMagHeader: + return TiffMagHeader( + dtype=self.layer.dtype, num_channels=self.layer.num_channels + ) + + def get_view(self, mag_file_path, size, global_offset, is_bounded=True) -> TiffView: + return TiffView(mag_file_path, self.header, size, global_offset, is_bounded) + + @classmethod + def create(cls, layer, name): + mag_dataset = cls(layer, name) + return mag_dataset diff --git a/wkcuber/api/Properties.py b/wkcuber/api/Properties.py new file mode 100644 index 000000000..4b2ab51c5 --- /dev/null +++ b/wkcuber/api/Properties.py @@ -0,0 +1,317 @@ +import json +import numpy as np + +from wkcuber.mag import Mag + + +class Resolution: + def _to_json(self) -> dict: + pass + + @classmethod + def _from_json(cls, json_data): + pass + + +class TiffResolution(Resolution): + def __init__(self, mag): + self._mag = Mag(mag) + + def _to_json(self) -> dict: + return {"resolution": self.mag.to_array()} + + @classmethod + def _from_json(cls, json_data): + return cls(json_data["resolution"]) + + @property + def mag(self) -> Mag: + return self._mag + + +class WkResolution(Resolution): + def __init__(self, mag, cube_length): + self._mag = Mag(mag) + self._cube_length = cube_length + + def _to_json(self) -> dict: + return {"resolution": self.mag.to_array(), "cube_length": self.cube_length} + + @classmethod + def _from_json(cls, json_data): + return cls(json_data["resolution"], json_data["cube_length"]) + + @property + def mag(self) -> Mag: + return self._mag + + @property + def cube_length(self) -> int: + return self._cube_length + + +class Properties: + + FILE_NAME = "datasource-properties.json" + + def __init__(self, path, name, scale, team="", data_layers=None): + self._path = path + self._name = name + self._team = team + self._scale = scale + if data_layers is None: + self._data_layers = {} + else: + self._data_layers = data_layers + + @classmethod + def _from_json(cls, path): + pass + + def _export_as_json(self): + pass + + def _add_layer(self, layer_name, category, element_class, num_channels=1): + # this layer is already in data_layers in case we reconstruct the dataset from a datasource-properties.json + if layer_name not in self.data_layers: + new_layer = LayerProperties( + layer_name, category, element_class, num_channels + ) + self.data_layers[layer_name] = new_layer + self._export_as_json() + + def _delete_layer(self, layer_name): + del self.data_layers[layer_name] + self._export_as_json() + + def _delete_mag(self, layer_name, mag): + self._data_layers[layer_name]._delete_resolution(mag) + self._export_as_json() + + def _set_bounding_box_of_layer(self, layer_name, offset, size): + self._data_layers[layer_name]._set_bounding_box_size(size) + self._data_layers[layer_name]._set_bounding_box_offset(offset) + self._export_as_json() + + @property + def name(self) -> str: + return self._name + + @property + def path(self) -> str: + return self._path + + @property + def team(self) -> str: + return self._team + + @property + def scale(self) -> tuple: + return self._scale + + @property + def data_layers(self) -> dict: + return self._data_layers + + +class WKProperties(Properties): + @classmethod + def _from_json(cls, path) -> Properties: + with open(path) as datasource_properties: + data = json.load(datasource_properties) + + # reconstruct data_layers + data_layers = {} + for layer in data["dataLayers"]: + data_layers[layer["name"]] = LayerProperties._from_json( + layer, WkResolution + ) + + return cls( + path, data["id"]["name"], data["scale"], data["id"]["team"], data_layers + ) + + def _export_as_json(self): + data = { + "id": {"name": self.name, "team": self.team}, + "scale": self.scale, + "dataLayers": [ + self.data_layers[layer_name]._to_json() + for layer_name in self.data_layers + ], + } + with open(self.path, "w") as outfile: + json.dump(data, outfile, indent=4, separators=(",", ": ")) + + def _add_mag(self, layer_name, mag, cube_length): + # this mag is already in wkw_magnifications in case we reconstruct the dataset from a datasource-properties.json + if not any( + [ + res.mag == Mag(mag) + for res in self.data_layers[layer_name].wkw_magnifications + ] + ): + self._data_layers[layer_name]._add_resolution( + WkResolution(mag, cube_length) + ) + self._export_as_json() + + +class TiffProperties(Properties): + def __init__(self, path, name, scale, team="", data_layers=None, grid_shape=(0, 0)): + super().__init__(path, name, scale, team, data_layers) + self._grid_shape = grid_shape + + @classmethod + def _from_json(cls, path) -> Properties: + with open(path) as datasource_properties: + data = json.load(datasource_properties) + + # reconstruct data_layers + data_layers = {} + for layer in data["dataLayers"]: + data_layers[layer["name"]] = LayerProperties._from_json( + layer, TiffResolution + ) + + return cls( + path, + data["id"]["name"], + data["scale"], + data["id"]["team"], + data_layers, + data["grid_shape"], + ) + + def _export_as_json(self): + data = { + "id": {"name": self.name, "team": self.team}, + "scale": self.scale, + "dataLayers": [ + self.data_layers[layer_name]._to_json() + for layer_name in self.data_layers + ], + "grid_shape": self.grid_shape, + } + with open(self.path, "w") as outfile: + json.dump(data, outfile, indent=4, separators=(",", ": ")) + + @property + def grid_shape(self) -> tuple: + return self._grid_shape + + def _add_mag(self, layer_name, mag): + # this mag is already in wkw_magnifications in case we reconstruct the dataset from a datasource-properties.json + if not any( + [ + res.mag == Mag(mag) + for res in self.data_layers[layer_name].wkw_magnifications + ] + ): + self.data_layers[layer_name]._add_resolution(TiffResolution(mag)) + self._export_as_json() + + +class LayerProperties: + def __init__( + self, + name, + category, + element_class, + num_channels, + bounding_box=None, + resolutions=None, + ): + self._name = name + self._category = category + self._element_class = element_class + self._num_channels = num_channels + self._bounding_box = bounding_box or { + "topLeft": (-1, -1, -1), + "width": 0, + "height": 0, + "depth": 0, + } + self._wkw_magnifications = resolutions or [] + + def _to_json(self): + return { + "name": self.name, + "category": self.category, + "elementClass": self.element_class, + "num_channels": self.num_channels, + "boundingBox": {} + if self.bounding_box is None + else { + "topLeft": self.bounding_box["topLeft"], + "width": self.bounding_box["width"], + "height": self.bounding_box["height"], + "depth": self.bounding_box["depth"], + }, + "wkwResolutions": [r._to_json() for r in self.wkw_magnifications], + } + + @classmethod + def _from_json(cls, json_data, resolution_type): + # create LayerProperties without resolutions + layer_properties = cls( + json_data["name"], + json_data["category"], + json_data["elementClass"], + json_data["num_channels"], + json_data["boundingBox"], + ) + + # add resolutions to LayerProperties + for resolution in json_data["wkwResolutions"]: + layer_properties._add_resolution(resolution_type._from_json(resolution)) + + return layer_properties + + def _add_resolution(self, resolution): + self._wkw_magnifications.append(resolution) + + def _delete_resolution(self, resolution): + self._wkw_magnifications.delete(resolution) + + def get_bounding_box_size(self) -> tuple: + return ( + self.bounding_box["width"], + self.bounding_box["height"], + self.bounding_box["depth"], + ) + + def get_bounding_box_offset(self) -> tuple: + return tuple(self.bounding_box["topLeft"]) + + def _set_bounding_box_size(self, size): + self._bounding_box["width"] = size[0] + self._bounding_box["height"] = size[1] + self._bounding_box["depth"] = size[2] + + def _set_bounding_box_offset(self, offset): + self._bounding_box["topLeft"] = offset + + @property + def name(self) -> str: + return self._name + + @property + def category(self) -> str: + return self._category + + @property + def element_class(self): + return self._element_class + + @property + def num_channels(self) -> int: + return self._num_channels + + @property + def bounding_box(self) -> dict: + return self._bounding_box + + @property + def wkw_magnifications(self) -> dict: + return self._wkw_magnifications diff --git a/wkcuber/api/TiffData/TiffMag.py b/wkcuber/api/TiffData/TiffMag.py new file mode 100644 index 000000000..de7b5a50d --- /dev/null +++ b/wkcuber/api/TiffData/TiffMag.py @@ -0,0 +1,227 @@ +from typing import Optional, List, Generator + +from skimage import io +import numpy as np +import os +from re import findall +from glob import iglob +from itertools import zip_longest + + +def replace_coordinate(pattern: str, coord_id: str, coord: int) -> str: + occurrences = findall("{" + coord_id + "+}", pattern) + for occurrence in occurrences: + number_of_digits = len(occurrence) - 2 + if number_of_digits > 1: + format_str = "0" + str(number_of_digits) + "d" + else: + format_str = "d" + pattern = pattern.replace(occurrence, format(coord, format_str), 1) + return pattern + + +def to_file_name(z) -> str: + return replace_coordinate("{zzzzz}.tif", "z", z) + + +def detect_value( + pattern_element: str, + ls_item: str, + dim: str, + ignore_dims: Optional[List[str]] = None, +) -> List[int]: + if ignore_dims is not None: + for ignore_dim in ignore_dims: + pattern_element = pattern_element.replace("{" + ignore_dim, ignore_dim) + pattern_element = pattern_element.replace(ignore_dim + "}", ignore_dim) + + if "{" + dim in pattern_element and dim + "}" in pattern_element: + open_position = pattern_element.find("{" + dim) + close_position = pattern_element.find(dim + "}") + try: + substring = ls_item[open_position:close_position] + return [int(substring)] + except ValueError: + raise ValueError( + f"Failed to autodetect tile ranges, there were files not matching the pattern: {ls_item} does not match {pattern_element}" + ) + return [] + + +class TiffMag: + def __init__(self, root, header): + x_range = [0] # currently tiled tiffs are not supported + y_range = [0] # currently tiled tiffs are not supported + + self.root = root + self.tiffs = dict() + self.dtype = header.dtype + self.num_channels = header.num_channels + + pattern = "{zzzzz}.tif" + + z_range = [ + detect_value(pattern, file_name, dim="z")[0] + for file_name in self.list_files() + ] + + for z in z_range: + self.tiffs[z] = TiffReader.open( + self.get_file_name_for_layer(z) + ) # open is lazy + + def read(self, off, shape) -> np.array: + if not self.has_only_one_channel(): + # modify the shape to also include the num_channels + shape = tuple(shape) + tuple([self.num_channels]) + + data = np.zeros(shape=shape, dtype=self.dtype) + for i, (z, offset, size) in enumerate( + self.calculate_relevant_slices(off, shape) + ): + if z in self.tiffs: + data[:, :, i] = np.array(self.tiffs[z].read(), self.dtype)[ + offset[0] : offset[0] + size[0], offset[1] : offset[1] + size[1] + ] + else: + shape_without_z = shape[:2] + shape[3:] + data[:, :, i] = np.zeros(shape_without_z, self.dtype) + + if self.has_only_one_channel(): + # convert data into shape with dedicated num_channels (len(data.shape) == 4) + # this only effects data where the num_channel is 1 and therefore len(data.shape) was 3 + # this makes it easier to handle both, multi-channel and single-channel, similar + data = np.expand_dims(data, 3) + + # reformat array to have the channels as the first index (similar to wkw) + data = np.moveaxis(data, -1, 0) + return data + + def write(self, off, data): + # convert data into shape with dedicated num_channels (len(data.shape) == 4) + # this only effects data where the num_channel is 1 and therefore len(data.shape) was 3 + # this makes it easier to handle both, multi-channel and single-channel, similar + data = data.reshape((-1,) + data.shape[-3:]) + + # reformat array to have the channels as the first index (similar to wkw) + data = np.moveaxis(data, 0, -1) + + self.assert_correct_data_format(data) + + for i, (z, offset, _) in enumerate( + self.calculate_relevant_slices(off, data.shape) + ): + # initialize images for z_layers that did not exist before + if z not in self.tiffs: + total_shape = [ + sum(x) + for x in zip_longest(data[:, :, i].shape, offset, fillvalue=0) + ] + if self.has_only_one_channel(): + # Convert single-channel data into the expected format + # E.g. convert shape (300, 300, 1) into (300, 300) + total_shape = tuple(total_shape)[:-1] + + self.tiffs[z] = TiffReader.init_tiff( + np.zeros(total_shape, self.dtype), self.get_file_name_for_layer(z) + ) + + # write new pixel data into the image + pixel_data = ( + data[:, :, i] if not self.has_only_one_channel() else data[:, :, i, 0] + ) + + self.tiffs[z].merge_with_image(pixel_data, offset) + + def compress(self, dst_path: str, compress_files: bool = False): + raise NotImplementedError + + def list_files(self): + file_paths = list(iglob(os.path.join(self.root, "*.tif"))) + + for file_path in file_paths: + yield os.path.relpath(os.path.normpath(file_path), self.root) + + def close(self): + return + + def calculate_relevant_slices(self, offset, shape): + for z in range(offset[2] + 1, offset[2] + shape[2] + 1): + yield tuple( + (z, offset[0:2], shape[0:2]) + ) # return tuple of important z layers an the x-y offset (without z offset) and the size (without z length) + + def has_only_one_channel(self) -> bool: + return self.num_channels == 1 + + def assert_correct_data_format(self, data): + if not len(data.shape) == 4: + raise AttributeError( + "The shape of the provided data does not match the expected shape." + ) + if not data.shape[3] == self.num_channels: + raise AttributeError( + f"The shape of the provided data does not match the expected shape. (Expected {self.num_channels} channels)" + ) + if not np.dtype(data.dtype) == self.dtype: + raise AttributeError( + f"The type of the provided data does not match the expected type. (Expected np.array of type {self.dtype.name})" + ) + + def get_file_name_for_layer(self, z) -> str: + return os.path.join(self.root, to_file_name(z)) + + @staticmethod + def open(root: str, header=None): + if header is None: + header = TiffMagHeader() + return TiffMag(root, header) + + def __enter__(self): + return self + + def __exit__(self, type, value, tb): + self.close() + + +class TiffMagHeader: + def __init__(self, pattern="{z}.tif", dtype=np.dtype("uint8"), num_channels=1): + self.pattern = pattern + self.dtype = np.dtype(dtype) + self.num_channels = num_channels + + +class TiffReader: + def __init__(self, file_name): + self.file_name = file_name + + @classmethod + def init_tiff(cls, pixels, file_name): + tr = TiffReader(file_name) + tr.write(pixels) + return tr + + @classmethod + def open(cls, file_name): + return cls(file_name) + + def read(self) -> np.array: + return io.imread(self.file_name) + + def write(self, pixels): + io.imsave(self.file_name, pixels, check_contrast=False) + + def merge_with_image(self, foreground_pixels, offset): + background_pixels = self.read() + bg_shape = background_pixels.shape + fg_shape = foreground_pixels.shape + + fg_shape_with_off = [sum(x) for x in zip_longest(fg_shape, offset, fillvalue=0)] + total_shape = [max(x) for x in zip(bg_shape, fg_shape_with_off)] + new_image = np.zeros(total_shape, dtype=background_pixels.dtype) + + new_image[0 : bg_shape[0], 0 : bg_shape[1]] = background_pixels + new_image[ + offset[0] : fg_shape_with_off[0], offset[1] : fg_shape_with_off[1] + ] = foreground_pixels + self.write(new_image) diff --git a/wkcuber/api/View.py b/wkcuber/api/View.py new file mode 100644 index 000000000..d2e79b42a --- /dev/null +++ b/wkcuber/api/View.py @@ -0,0 +1,119 @@ +import numpy as np +from wkw import Dataset + +from wkcuber.api.TiffData.TiffMag import TiffMag + + +class View: + def __init__( + self, + path_to_mag_dataset, + header, + size, + global_offset=(0, 0, 0), + is_bounded=True, + ): + self.dataset = None + self.path = path_to_mag_dataset + self.header = header + self.size = size + self.global_offset = global_offset + self.is_bounded = is_bounded + self._is_opened = False + + def open(self): + raise NotImplemented() + + def close(self): + if not self._is_opened: + raise Exception("Cannot close View: the view is not opened") + else: + self.dataset.close() + self.dataset = None + self._is_opened = False + + def write(self, data, offset=(0, 0, 0)): + # assert the size of the parameter data is not in conflict with the attribute self.size + assert_non_negative_offset(offset) + self.assert_bounds(offset, data.shape[-3:]) + + # calculate the absolute offset + absolute_offset = tuple(sum(x) for x in zip(self.global_offset, offset)) + + if not self._is_opened: + self.open() + + self.dataset.write(absolute_offset, data) + + if not self._is_opened: + self.close() + + def read(self, size=None, offset=(0, 0, 0)) -> np.array: + was_opened = self._is_opened + size = size or self.size + + # assert the parameter size is not in conflict with the attribute self.size + self.assert_bounds(offset, size) + + # calculate the absolute offset + absolute_offset = tuple(sum(x) for x in zip(self.global_offset, offset)) + + if not was_opened: + self.open() + + data = self.dataset.read(absolute_offset, size) + + if not was_opened: + self.close() + + return data + + def check_bounds(self, offset, size) -> bool: + for s1, s2, off in zip(self.size, size, offset): + if s2 + off > s1 and self.is_bounded: + return False + return True + + def assert_bounds(self, offset, size): + if not self.check_bounds(offset, size): + raise AssertionError( + f"Writing out of bounds: The passed parameter 'size' {size} exceeds the size of the current view ({self.size})" + ) + + def __enter__(self): + return self + + def __exit__(self, type, value, tb): + self.close() + + +class WKView(View): + def open(self): + if self._is_opened: + raise Exception("Cannot open view: the view is already opened") + else: + self.dataset = Dataset.open( + self.path + ) # No need to pass the header to the wkw.Dataset + self._is_opened = True + return self + + +class TiffView(View): + def open(self): + if self._is_opened: + raise Exception("Cannot open view: the view is already opened") + else: + self.dataset = TiffMag.open(self.path, self.header) + self._is_opened = True + return self + + +def assert_non_negative_offset(offset): + all_positive = all(i >= 0 for i in offset) + if not all_positive: + raise Exception( + "All elements of the offset need to be positive: %s" % "(" + + ",".join(map(str, offset)) + + ")" + )