From c14be20ab17d913f4e74be320f5ee744295e2ad9 Mon Sep 17 00:00:00 2001 From: Martijn Nagtegaal Date: Mon, 28 Mar 2022 16:27:45 +0200 Subject: [PATCH] First commit --- Environment.txt | 145 +++ LICENSE | 674 ++++++++++++++ MRFrecon/SPIJN2.py | 487 ++++++++++ MRFrecon/__init__.py | 8 + MRFrecon/backend.py | 1682 ++++++++++++++++++++++++++++++++++ MRFrecon/config.py | 44 + MRFrecon/d_sel_operations.py | 145 +++ MRFrecon/gen_phantom.py | 491 ++++++++++ MRFrecon/load_data.py | 128 +++ MRFrecon/mrf_recon.py | 383 ++++++++ MRFrecon/postprocessing.py | 609 ++++++++++++ README.rst | 134 +++ environment.yml | 12 + example.py | 101 ++ example_data/README.txt | 1 + example_data/config.ini | 132 +++ main_from_config_files.py | 74 ++ 17 files changed, 5250 insertions(+) create mode 100644 Environment.txt create mode 100644 LICENSE create mode 100644 MRFrecon/SPIJN2.py create mode 100644 MRFrecon/__init__.py create mode 100644 MRFrecon/backend.py create mode 100644 MRFrecon/config.py create mode 100644 MRFrecon/d_sel_operations.py create mode 100644 MRFrecon/gen_phantom.py create mode 100644 MRFrecon/load_data.py create mode 100644 MRFrecon/mrf_recon.py create mode 100644 MRFrecon/postprocessing.py create mode 100644 README.rst create mode 100644 environment.yml create mode 100644 example.py create mode 100644 example_data/README.txt create mode 100644 example_data/config.ini create mode 100644 main_from_config_files.py diff --git a/Environment.txt b/Environment.txt new file mode 100644 index 0000000..ef0b0ff --- /dev/null +++ b/Environment.txt @@ -0,0 +1,145 @@ +# This file may be used to create an environment using: +# $ conda create --name --file +# platform: win-64 +@EXPLICIT +https://repo.anaconda.com/pkgs/main/win-64/blas-1.0-mkl.conda +https://repo.anaconda.com/pkgs/main/win-64/ca-certificates-2022.3.18-haa95532_0.conda +https://repo.anaconda.com/pkgs/main/win-64/cudatoolkit-10.0.130-0.conda +https://repo.anaconda.com/pkgs/main/win-64/icc_rt-2019.0.0-h0cc432a_1.conda +https://repo.anaconda.com/pkgs/main/win-64/intel-openmp-2020.2-254.conda +https://repo.anaconda.com/pkgs/msys2/win-64/msys2-conda-epoch-20160418-1.tar.bz2 +https://repo.anaconda.com/pkgs/main/win-64/vs2015_runtime-14.16.27012-hf0eaf9b_3.conda +https://repo.anaconda.com/pkgs/main/win-64/winpty-0.4.3-4.conda +https://repo.anaconda.com/pkgs/main/win-64/cudnn-7.6.5-cuda10.0_0.conda +https://repo.anaconda.com/pkgs/msys2/win-64/m2w64-gmp-6.1.0-2.tar.bz2 +https://repo.anaconda.com/pkgs/msys2/win-64/m2w64-libwinpthread-git-5.0.0.4634.697f757-2.tar.bz2 +https://repo.anaconda.com/pkgs/main/win-64/mkl-2020.2-256.conda +https://repo.anaconda.com/pkgs/main/win-64/vc-14.1-h0510ff6_4.conda +https://repo.anaconda.com/pkgs/main/win-64/icu-58.2-ha925a31_3.conda +https://repo.anaconda.com/pkgs/main/win-64/jpeg-9b-hb83a4c4_2.conda +https://repo.anaconda.com/pkgs/main/win-64/lz4-c-1.9.2-hf4a77e7_3.conda +https://repo.anaconda.com/pkgs/msys2/win-64/m2w64-gcc-libs-core-5.3.0-7.tar.bz2 +https://repo.anaconda.com/pkgs/main/win-64/openssl-1.1.1n-h2bbff1b_0.conda +https://repo.anaconda.com/pkgs/main/win-64/tk-8.6.10-he774522_0.conda +https://repo.anaconda.com/pkgs/main/win-64/xz-5.2.5-h62dcd97_0.conda +https://repo.anaconda.com/pkgs/main/win-64/yaml-0.2.5-he774522_0.conda +https://repo.anaconda.com/pkgs/main/win-64/zlib-1.2.11-h62dcd97_4.conda +https://repo.anaconda.com/pkgs/main/win-64/hdf5-1.10.4-h7ebc959_0.conda +https://repo.anaconda.com/pkgs/main/win-64/libpng-1.6.37-h2a8f88b_0.conda +https://repo.anaconda.com/pkgs/msys2/win-64/m2w64-gcc-libgfortran-5.3.0-6.tar.bz2 +https://repo.anaconda.com/pkgs/main/win-64/sqlite-3.33.0-h2a8f88b_0.conda +https://repo.anaconda.com/pkgs/main/win-64/zstd-1.4.5-h04227a9_0.conda +https://repo.anaconda.com/pkgs/main/win-64/freetype-2.10.4-hd328e21_0.conda +https://repo.anaconda.com/pkgs/main/win-64/libtiff-4.1.0-h56a325e_1.conda +https://repo.anaconda.com/pkgs/msys2/win-64/m2w64-gcc-libs-5.3.0-7.tar.bz2 +https://repo.anaconda.com/pkgs/main/win-64/python-3.7.9-h60c2a47_0.conda +https://repo.anaconda.com/pkgs/main/win-64/qt-5.9.7-vc14h73c81de_0.conda +https://repo.anaconda.com/pkgs/main/noarch/attrs-21.4.0-pyhd3eb1b0_0.conda +https://repo.anaconda.com/pkgs/main/noarch/backcall-0.2.0-pyhd3eb1b0_0.tar.bz2 +https://repo.anaconda.com/pkgs/main/win-64/certifi-2021.10.8-py37haa95532_2.conda +https://repo.anaconda.com/pkgs/main/noarch/click-7.1.2-py_0.conda +https://repo.anaconda.com/pkgs/main/noarch/cloudpickle-1.6.0-py_0.conda +https://repo.anaconda.com/pkgs/main/noarch/colorama-0.4.4-pyhd3eb1b0_0.conda +https://repo.anaconda.com/pkgs/main/win-64/debugpy-1.5.1-py37hd77b12b_0.conda +https://repo.anaconda.com/pkgs/main/noarch/decorator-5.1.1-pyhd3eb1b0_0.conda +https://repo.anaconda.com/pkgs/main/noarch/defusedxml-0.7.1-pyhd3eb1b0_0.conda +https://repo.anaconda.com/pkgs/main/win-64/entrypoints-0.3-py37_0.conda +https://repo.anaconda.com/pkgs/main/win-64/fastrlock-0.5-py37ha925a31_0.conda +https://repo.anaconda.com/pkgs/main/noarch/fsspec-0.8.3-py_0.conda +https://repo.anaconda.com/pkgs/main/noarch/heapdict-1.0.1-py_0.conda +https://repo.anaconda.com/pkgs/main/noarch/ipython_genutils-0.2.0-pyhd3eb1b0_1.conda +https://repo.anaconda.com/pkgs/main/noarch/joblib-1.1.0-pyhd3eb1b0_0.conda +https://repo.anaconda.com/pkgs/main/win-64/kiwisolver-1.2.0-py37h74a9793_0.conda +https://repo.anaconda.com/pkgs/main/win-64/llvmlite-0.34.0-py37h1a82afc_4.conda +https://repo.anaconda.com/pkgs/main/win-64/locket-0.2.0-py37_1.conda +https://repo.anaconda.com/pkgs/main/win-64/markupsafe-1.1.1-py37hfa6e2cd_1.conda +https://repo.anaconda.com/pkgs/main/win-64/mistune-0.8.4-py37hfa6e2cd_1001.conda +https://repo.anaconda.com/pkgs/main/win-64/msgpack-python-1.0.0-py37h74a9793_1.conda +https://repo.anaconda.com/pkgs/main/noarch/nest-asyncio-1.5.1-pyhd3eb1b0_0.conda +https://repo.anaconda.com/pkgs/main/win-64/olefile-0.46-py37_0.conda +https://repo.anaconda.com/pkgs/main/noarch/pandocfilters-1.5.0-pyhd3eb1b0_0.conda +https://repo.anaconda.com/pkgs/main/noarch/parso-0.8.3-pyhd3eb1b0_0.conda +https://repo.anaconda.com/pkgs/main/noarch/pickleshare-0.7.5-pyhd3eb1b0_1003.conda +https://repo.anaconda.com/pkgs/main/noarch/prometheus_client-0.13.1-pyhd3eb1b0_0.conda +https://repo.anaconda.com/pkgs/main/win-64/psutil-5.7.2-py37he774522_0.conda +https://repo.anaconda.com/pkgs/main/noarch/pycparser-2.21-pyhd3eb1b0_0.conda +https://conda.anaconda.org/conda-forge/noarch/pydicom-2.0.0-pyh9f0ad1d_0.tar.bz2 +https://repo.anaconda.com/pkgs/main/noarch/pygments-2.11.2-pyhd3eb1b0_0.conda +https://repo.anaconda.com/pkgs/main/noarch/pyparsing-2.4.7-py_0.conda +https://repo.anaconda.com/pkgs/main/win-64/pyreadline-2.1-py37_1.conda +https://repo.anaconda.com/pkgs/main/win-64/pyrsistent-0.18.0-py37h196d8e1_0.conda +https://conda.anaconda.org/conda-forge/win-64/python_abi-3.7-1_cp37m.tar.bz2 +https://repo.anaconda.com/pkgs/main/noarch/pytz-2020.1-py_0.conda +https://repo.anaconda.com/pkgs/main/win-64/pywin32-227-py37he774522_1.conda +https://repo.anaconda.com/pkgs/main/win-64/pywinpty-0.5.7-py37_0.conda +https://repo.anaconda.com/pkgs/main/win-64/pyyaml-5.3.1-py37he774522_1.conda +https://repo.anaconda.com/pkgs/main/win-64/pyzmq-22.3.0-py37hd77b12b_2.conda +https://repo.anaconda.com/pkgs/main/noarch/send2trash-1.8.0-pyhd3eb1b0_1.conda +https://repo.anaconda.com/pkgs/main/win-64/sip-4.19.8-py37h6538335_0.conda +https://repo.anaconda.com/pkgs/main/noarch/six-1.15.0-py_0.conda +https://repo.anaconda.com/pkgs/main/noarch/sortedcontainers-2.2.2-py_0.conda +https://repo.anaconda.com/pkgs/main/noarch/tblib-1.7.0-py_0.conda +https://repo.anaconda.com/pkgs/main/noarch/testpath-0.5.0-pyhd3eb1b0_0.conda +https://repo.anaconda.com/pkgs/main/noarch/toolz-0.11.1-py_0.conda +https://repo.anaconda.com/pkgs/main/win-64/tornado-6.0.4-py37he774522_1.conda +https://repo.anaconda.com/pkgs/main/noarch/tqdm-4.50.2-py_0.conda +https://repo.anaconda.com/pkgs/main/noarch/traitlets-5.1.1-pyhd3eb1b0_0.conda +https://repo.anaconda.com/pkgs/main/noarch/typing_extensions-3.7.4.3-py_0.conda +https://repo.anaconda.com/pkgs/main/noarch/wcwidth-0.2.5-pyhd3eb1b0_0.conda +https://repo.anaconda.com/pkgs/main/win-64/webencodings-0.5.1-py37_1.conda +https://repo.anaconda.com/pkgs/main/noarch/wheel-0.35.1-py_0.conda +https://repo.anaconda.com/pkgs/main/win-64/wincertstore-0.2-py37_0.conda +https://repo.anaconda.com/pkgs/main/noarch/zipp-3.7.0-pyhd3eb1b0_0.conda +https://repo.anaconda.com/pkgs/main/win-64/cffi-1.15.0-py37h2bbff1b_1.conda +https://repo.anaconda.com/pkgs/main/win-64/cycler-0.10.0-py37_0.conda +https://repo.anaconda.com/pkgs/main/win-64/cytoolz-0.11.0-py37he774522_0.conda +https://repo.anaconda.com/pkgs/main/noarch/dask-core-2.30.0-py_0.conda +https://repo.anaconda.com/pkgs/main/win-64/importlib-metadata-4.8.2-py37haa95532_0.conda +https://repo.anaconda.com/pkgs/main/win-64/jedi-0.18.1-py37haa95532_1.conda +https://repo.anaconda.com/pkgs/main/win-64/jupyter_core-4.9.2-py37haa95532_0.conda +https://repo.anaconda.com/pkgs/main/noarch/jupyterlab_pygments-0.1.2-py_0.conda +https://repo.anaconda.com/pkgs/main/noarch/matplotlib-inline-0.1.2-pyhd3eb1b0_2.conda +https://repo.anaconda.com/pkgs/main/win-64/mkl-service-2.3.0-py37hb782905_0.conda +https://conda.anaconda.org/conda-forge/noarch/packaging-20.4-pyh9f0ad1d_0.tar.bz2 +https://repo.anaconda.com/pkgs/main/noarch/partd-1.1.0-py_0.conda +https://repo.anaconda.com/pkgs/main/win-64/pillow-8.1.0-py37h4fa10fc_0.conda +https://repo.anaconda.com/pkgs/main/noarch/prompt-toolkit-3.0.20-pyhd3eb1b0_0.conda +https://repo.anaconda.com/pkgs/main/win-64/pyqt-5.9.2-py37h6538335_2.conda +https://repo.anaconda.com/pkgs/main/noarch/python-dateutil-2.8.1-py_0.conda +https://repo.anaconda.com/pkgs/main/win-64/setuptools-50.3.0-py37h9490d1a_1.conda +https://repo.anaconda.com/pkgs/main/win-64/terminado-0.9.4-py37haa95532_0.conda +https://repo.anaconda.com/pkgs/main/noarch/typing-extensions-3.7.4.3-0.conda +https://repo.anaconda.com/pkgs/main/noarch/zict-2.0.0-py_0.conda +https://repo.anaconda.com/pkgs/main/win-64/argon2-cffi-bindings-21.2.0-py37h2bbff1b_0.conda +https://repo.anaconda.com/pkgs/main/noarch/bleach-4.1.0-pyhd3eb1b0_0.conda +https://repo.anaconda.com/pkgs/main/win-64/distributed-2.30.0-py37_0.conda +https://repo.anaconda.com/pkgs/main/noarch/importlib_metadata-4.8.2-hd3eb1b0_0.conda +https://repo.anaconda.com/pkgs/main/win-64/ipython-7.31.1-py37haa95532_0.conda +https://repo.anaconda.com/pkgs/main/noarch/jinja2-2.11.2-py_0.conda +https://repo.anaconda.com/pkgs/main/noarch/jupyter_client-7.1.2-pyhd3eb1b0_0.conda +https://repo.anaconda.com/pkgs/main/win-64/numpy-base-1.19.2-py37ha3acd2a_0.conda +https://repo.anaconda.com/pkgs/main/win-64/pip-20.2.4-py37_0.conda +https://repo.anaconda.com/pkgs/main/noarch/argon2-cffi-21.3.0-pyhd3eb1b0_0.conda +https://repo.anaconda.com/pkgs/main/win-64/ipykernel-6.9.1-py37haa95532_0.conda +https://repo.anaconda.com/pkgs/main/noarch/jsonschema-3.2.0-pyhd3eb1b0_2.conda +https://repo.anaconda.com/pkgs/main/noarch/nbformat-5.1.3-pyhd3eb1b0_0.conda +https://repo.anaconda.com/pkgs/main/noarch/nbclient-0.5.11-pyhd3eb1b0_0.conda +https://repo.anaconda.com/pkgs/main/win-64/nbconvert-6.1.0-py37haa95532_0.conda +https://repo.anaconda.com/pkgs/main/win-64/notebook-6.2.0-py37haa95532_0.conda +https://conda.anaconda.org/conda-forge/win-64/sycomore-1.2.1-py37h1ad3211_0.tar.bz2 +https://repo.anaconda.com/pkgs/main/win-64/bokeh-2.2.2-py37_0.conda +https://repo.anaconda.com/pkgs/main/win-64/cupy-6.0.0-py37h230ac6f_0.conda +https://repo.anaconda.com/pkgs/main/win-64/h5py-2.10.0-py37h5e291fa_0.conda +https://repo.anaconda.com/pkgs/main/win-64/matplotlib-3.3.1-0.conda +https://repo.anaconda.com/pkgs/main/win-64/matplotlib-base-3.3.1-py37hba9282a_0.conda +https://repo.anaconda.com/pkgs/main/win-64/mkl_fft-1.3.0-py37h46781fe_0.conda +https://repo.anaconda.com/pkgs/main/win-64/mkl_random-1.1.1-py37h47e9c7a_0.conda +https://repo.anaconda.com/pkgs/main/win-64/numpy-1.19.2-py37hadc3359_0.conda +https://repo.anaconda.com/pkgs/main/win-64/numba-0.51.2-py37hf9181ef_1.conda +https://repo.anaconda.com/pkgs/main/win-64/pandas-1.1.3-py37ha925a31_0.conda +https://repo.anaconda.com/pkgs/main/win-64/pywavelets-1.1.1-py37he774522_2.conda +https://repo.anaconda.com/pkgs/main/win-64/scipy-1.6.2-py37h14eb087_0.conda +https://repo.anaconda.com/pkgs/main/noarch/dask-2.30.0-py_0.conda +https://conda.anaconda.org/conda-forge/noarch/nibabel-3.2.0-py_0.tar.bz2 +https://repo.anaconda.com/pkgs/main/noarch/seaborn-0.11.0-py_0.conda +https://conda.anaconda.org/frankong/win-64/sigpy-0.1.23-py37_0.tar.bz2 diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..f288702 --- /dev/null +++ b/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/MRFrecon/SPIJN2.py b/MRFrecon/SPIJN2.py new file mode 100644 index 0000000..d59e2f1 --- /dev/null +++ b/MRFrecon/SPIJN2.py @@ -0,0 +1,487 @@ +# -*- coding: utf-8 -*- +""" + +@author: Martijn Nagtegaal +SPIJN with support for fixed parameters added + +""" +import os +import signal +import time + +import numpy as np +import scipy as sp +import scipy.optimize +from joblib import Parallel, delayed +from scipy import sparse as scsp + +from . import d_sel_operations as dso + + +# import multiprocessing +# @profile +def nnls(A, x, sparse=False): + res = scipy.optimize.nnls(A, x)[0] + if sparse: + # Better perform the transformation to sparsity for more columns at once. + return scsp.csr_matrix(res).T + else: + return res + + +def multiply(x, y): + if len(y.shape) == 1: + try: + y = np.broadcast_to(y[:, None], x.shape) + except ValueError: + y = np.broadcast_to(y[None, :], x.shape) + if scsp.issparse(x): + return x.multiply(y).tocsc() + else: + x *= y + return x + + +def norm(X, ord=None, axis=None): + if scsp.issparse(X): + return scsp.linalg.norm(X, ord=ord, axis=axis) + else: + return np.linalg.norm(X, ord=ord, axis=axis) + + +def __get_n_jobs(n_jobs=None): + if isinstance(n_jobs, int): + return n_jobs + else: + try: # Only works on some linux versions, better for cluster + return len(os.sched_getaffinity(0)) + except AttributeError: + return os.cpu_count() - 1 # Leave 1 unused, leave some space + + +def _get_n_jobs(n_jobs=None): + n_jobs = __get_n_jobs(n_jobs) + if n_jobs == 2: + n_jobs = 1 + return n_jobs + + +def maxcomps_repr(C, num_comp): + """Return the main components for each signal with indices""" + if scsp.issparse(C): + if not scsp.isspmatrix_csc(C): + C = C.tocsc() + Sfull = np.empty((C.shape[1], num_comp)) + np.nan + Cr = np.zeros_like(Sfull) + np.nan + for c in range(C.shape[1]): + cslice = slice(*C.indptr[c:c + 2]) + cind = C.indices[cslice] + if len(cind) == 0: + continue + cvals = C.data[cslice] + sind = np.argsort(-cvals) + nci = min([len(sind), num_comp]) + Sfull[c, :nci] = cind[sind][:nci] + Cr[c, :nci] = cvals[sind][:nci] + else: + Sfull = np.argsort(-C, axis=0)[:num_comp].T + Cr = np.empty_like(Sfull, dtype=float) + for k, (s, Cc) in enumerate(zip(Sfull, C.T)): + Cr[k] = Cc[s] + + return Cr, Sfull + + +# @profile +def lsqnonneg2(Y, red_dict, C0=None, out_z=None, out_rel_err=None, + fixed_par_img_masks=None, fixed_par_dict_masks=None, return_r=False, + L=0, S=None, n_jobs=5, sparse=False, yblock=1e5, verbose=False): + yblock = int(yblock) + red_dict = red_dict.T + n_jobs = _get_n_jobs(n_jobs) + print(f'number of cores {n_jobs}') + + if S is not None: + diclen = len(S) + elif fixed_par_dict_masks is None: + diclen = red_dict.shape[0] + else: + diclen = np.count_nonzero(fixed_par_dict_masks[0]) + z_shape = (diclen, Y.shape[1]) + if out_z is None and not sparse: + out_z = np.empty(z_shape) + elif sparse: + out_z = scsp.lil_matrix(z_shape) + # out_z_2 = np.empty((diclen,Y.shape[1])) + if out_rel_err is None: + out_rel_err = np.empty(Y.shape[1]) + with Parallel(n_jobs=n_jobs, ) as parallel: + + if fixed_par_dict_masks is not None: + + for i, (dictionary_mask, measurement_mask) in enumerate(zip(fixed_par_dict_masks, fixed_par_img_masks)): + s_sel = measurement_mask + R = red_dict[dictionary_mask] + if S is not None: + R = R[S] + s_indx = np.arange(len(s_sel))[s_sel] + # R1 = R.T + nsig = len(s_indx) + nblocks = int((nsig - 1) / yblock) + 1 + for k in range(nblocks): + + st = k * yblock + end = np.min([(k + 1) * yblock, nsig]) + if verbose == 2 and end / yblock > .1: + print('d_selection {}/{} nblocks {}/{}'.format( + i + 1, len(fixed_par_dict_masks), k + 1, nblocks)) + sl = slice(st, end) + if nsig > 0: + Ysub = Y[:, s_indx[sl]] + if n_jobs > 2: + res = np.asarray(parallel( + delayed(lambda y: sp.optimize.nnls(R.T, y)[0])(Ysub[:, i]) for i in + range(Ysub.shape[1]))) + else: + res = np.apply_along_axis(lambda x: nnls(R.T, x), 1, Ysub.T) + if sparse: + res[res < 1e-15] = 0 + res = res.T + out_z[:, s_indx[sl]] = res + # for i in s_indx: + # out_z[:,i],_ = sp.optimize.nnls(R1,Y[:,i]) + else: + R = red_dict + if S is not None: + R = R[S] + nsig = Y.shape[1] + nblocks = int((nsig - 1) / yblock) + 1 + for k in range(nblocks): + + st = k * yblock + end = np.min([(k + 1) * yblock, Y.shape[1]]) + sl = slice(st, end) + if verbose == 2 and end / yblock > .1: + print('nblocks {}/{}'.format(k + 1, nblocks)) + Ysub = Y[:, sl] + if n_jobs > 1: + res = np.asarray(parallel( + delayed(lambda y: scipy.optimize.nnls(R.T, y)[0])(Ysub[:, i]) for i in range(Ysub.shape[1]))) + else: + res = np.apply_along_axis(lambda x: nnls(R.T, x, sparse=False), 1, Ysub.T) + if sparse: + res[res < 1e-15] = 0 + res = res.T + if st == 0 and end == Y.shape[1]: + out_z[:, sl] = res + else: + out_z[:, sl] = res + + # for i in range(Y.shape[1]): + # out_z[:,i],_ = sp.optimize.nnls(R1,Y[:,i]) + + if sparse: + if not scsp.isspmatrix_csc(out_z): + out_z = scsp.csc_matrix(out_z) + + Ycalc = dso.vecmat(out_z.T, red_dict, fixed_par_img_masks=fixed_par_img_masks, + fixed_par_dict_masks=fixed_par_dict_masks, S=S) + r = Y - Ycalc.transpose() + out_rel_err = np.linalg.norm(r, axis=0) / np.linalg.norm(Y, axis=0) + # efficiency() + if not return_r: + return out_z, out_rel_err + else: + return out_z, out_rel_err, r + + +# @profile +def rewlsqnonneg2(Y, red_dict, w, C0=None, out_z=None, + out_rel_err=None, fixed_par_img_masks=None, + fixed_par_dict_masks=None, + return_r=False, L=0, S=None, debug=False, + n_jobs=5, sparse=False, yblock=1e5, verbose=False, norms=None): + """Function to replace: + W = scsp.diags(w**.5) + D = red_dict1 @ W + if L>0: + D = np.vstack((D,L**2*np.ones(D.shape[1]))) + Y1 = np.vstack((Y,np.zeros(Y.shape[1]))) + else: + Y1 = Y + Q,rel_err,r = lsqnonneg2(Y1,D,C0=C0,L=L,return_r = True,fixed_par_img_masks = fixed_par_img_masks,fixed_par_dict_masks=fixed_par_dict_masks,S=S) + C = W @ Q + + Performs a reweighted NN least squares signal-wise + """ + yblock = int(yblock) + red_dict = red_dict.T + n_jobs = _get_n_jobs(n_jobs) + print(f'number of cores {n_jobs}') + + # clear_pseudo_hist() + if S is not None: + diclen = len(S) + elif fixed_par_dict_masks is None: + diclen = red_dict.shape[0] + else: + diclen = np.count_nonzero(fixed_par_dict_masks[0]) + + z_shape = (diclen, Y.shape[1]) + if out_z is None and not sparse: + out_z = np.empty(z_shape) + elif sparse: + out_z = scsp.lil_matrix(z_shape) + if out_rel_err is None: + out_rel_err = np.empty(Y.shape[1]) + if w is None: + w = np.ones(diclen) + + # W = scsp.diags(w**.5) + Y1 = np.vstack((Y, np.zeros(Y.shape[1]))) + regfac = np.ones(len(w)) + with Parallel(n_jobs=n_jobs, ) as parallel: + if fixed_par_dict_masks is not None: + for i, (dictionary_mask, measurement_mask) in enumerate(zip(fixed_par_dict_masks, fixed_par_img_masks)): + s_sel = measurement_mask + R = red_dict[dictionary_mask] + regfac2 = regfac.copy() + if norms is not None: + regfac2 = norms[dictionary_mask] + if S is not None: + regfac2 = regfac2[S] + + if S is not None: + R = R[S] + + R = R.T * w ** .5 + R = np.vstack((R, L / regfac2)) + + s_indx = np.arange(len(s_sel))[s_sel] + nsig = len(s_indx) + nblocks = int((nsig - 1) / yblock) + 1 + for k in range(nblocks): + st = k * yblock + end = np.min([(k + 1) * yblock, nsig]) + if verbose == 2 and end / yblock > .1: + print('d_selection {}/{} nblocks {}/{}'.format( + i + 1, len(fixed_par_dict_masks), k + 1, nblocks)) + sl = slice(st, end) + if len(s_indx) > 0: + Ysub = Y1[:, s_indx[sl]] + if n_jobs >= 2: + res = np.asarray(parallel( + delayed(lambda y: sp.optimize.nnls(R, y)[0])(Ysub[:, i]) for i in + range(Ysub.shape[1]))).T + else: + res = np.apply_along_axis(lambda x: sp.optimize.nnls(R, x)[0], 0, Ysub) + if sparse: + res[res < 1e-15] = 0 + out_z[:, s_indx[sl]] = res + + # if len(s_indx)>0: + # out_z[:,s_indx] = np.apply_along_axis(lambda x:sp.optimize.nnls(R,x)[0],0,Y1[:,s_indx]) + + else: + R = red_dict + + if norms is not None: + regfac = norms + if S is not None: + R = R[S] + regfac = regfac[S] + R = R.T * w ** .5 + R = np.vstack((R, L / regfac)) + # out_z = np.apply_along_axis(lambda x:sp.optimize.nnls(R,x)[0],0,Y1) + nsig = Y.shape[1] + nblocks = int((nsig - 1) / yblock) + 1 + for k in range(nblocks): + + st = k * yblock + end = np.min([(k + 1) * yblock, Y.shape[1]]) + if verbose == 2 and end / yblock > .1: + print('nblocks {}/{}'.format(k + 1, nblocks)) + sl = slice(st, end) + Ysub = Y1[:, sl] + if n_jobs > 2: + res = np.asarray(parallel( + delayed(lambda y: sp.optimize.nnls(R, y)[0])(Ysub[:, i]) for i in range(Ysub.shape[1]))) + else: + res = np.apply_along_axis(lambda x: sp.optimize.nnls(R, x)[0], 0, Ysub).T + if sparse: + res[res < 1e-15] = 0 + res = res.T + out_z[:, sl] = res + # else: + # if n_jobs>2: + # out_z = np.asarray(Parallel(n_jobs=n_jobs)(delayed(lambda y: sp.optimize.nnls(R, y)[0])(Y1[:,i]) for i in range(Y1.shape[1])) ).T + # else: + # out_z = np.apply_along_axis(lambda x:sp.optimize.nnls(R,x)[0],0,Y1) + if sparse: + if not scsp.isspmatrix_csc(out_z): + out_z = scsp.csc_matrix(out_z) + out_z = multiply(out_z, w ** .5) + Ycalc = dso.vecmat(out_z.T, red_dict, fixed_par_img_masks=fixed_par_img_masks, + fixed_par_dict_masks=fixed_par_dict_masks, S=S) + r = Y - Ycalc.transpose() + out_rel_err = np.linalg.norm(r, axis=0) / np.linalg.norm(Y, axis=0) + if not return_r: + return out_z, out_rel_err + else: + return out_z, out_rel_err, r + + +def SPIJN(Y, red_dict, weight_type=1, num_comp=None, p=0, + max_iter=20, correct_im_size=False, + verbose=True, norm_sig=True, tol=1e-4, L=0, prun=2, + fixed_par_img_masks=None, fixed_par_dict_masks=None, bd_adj=False, + C1=None, n_jobs=None, minimal_comp=0, final_it=False, + sparse=False, yblock=1e4, debug=False, norms=None, pool=False): + """Perform the SPIJN algorithm + - weight_type: as given in the thesis + - num_comp: number of components + - p: value as used in the reweighting scheme 1. Default 0 works fine + - max_iter: maximum number of iterations, 20 is normally fine + - verbose: more output + - norm_sig: Normalise the signal or not. False is fine + - tol: Tolerance used, when are the iterations stopped, based on + ||C^k-C^{k+1}||_F^2/||C^k||_F^2 0 and num_comp[0] == 0: + num_comp[0] = minimal_comp + if norms is None: + norms = np.ones(red_dict.shape[1]) + eps = 1e-4 + if norm_sig: + norm_Y = np.linalg.norm(Y, ord=2, axis=0) + Y = Y / norm_Y + if correct_im_size: # Correct regularisation for number of voxels + L = L * np.log10(J) + w = np.ones(N) + t0 = time.process_time() + if C1 is None: + C1, r = lsqnonneg2(Y, red_dict, fixed_par_img_masks=fixed_par_img_masks, + fixed_par_dict_masks=fixed_par_dict_masks, n_jobs=n_jobs, + sparse=sparse, yblock=yblock, verbose=verbose) + print('matching time: {0:.5f}s'.format(time.process_time() - t0)) + else: + print('Reused old JNNLS solution') + C = C1.copy() + if prun == True: + prun = 1 + used_indices = np.arange(N) + WW = [w.copy()] + k = 0 + + prun_thres = 1e-15 + if weight_type > 0: + if fixed_par_dict_masks is not None and weight_type != 1: + print('Untested behaviour for weight_type {} with fixed parameters.'.format(weight_type)) + + # red_dict1 = red_dict + N1 = N + try: + for k in range(1, max_iter): + if prun and (str(prun).startswith('cont') or prun == k): + prune_comp = np.asarray(C.sum(axis=1) / J > prun_thres).flatten() + used_indices = used_indices[prune_comp] + C = C[prune_comp] + N1 = sum(prune_comp) + if verbose and np.any(~prune_comp): + print('Pruned percentage {},rest:{}.'.format(100 - N1 / N * 100, N1)) + if N1 < 1000: + n_jobs = 1 + w = w[prune_comp] + + if verbose == 2: + verbose = True + if weight_type == 1: + w = (norm(C, 2, axis=1) + eps) ** (1 - p / 2) + elif weight_type == 2: + w = (norm(C, 2, axis=1) ** 2 + eps * norm(C, 2, axis=1)) + if bd_adj and w[0] > 1: + w[0] = 1 + w[w < eps] = eps + if debug: + v = np.zeros(N) + v[used_indices] = w + v.append(w.copy()) + C0 = C.copy() + t0 = time.process_time() + C, rel_err, r = rewlsqnonneg2(Y, red_dict, w, C0=C0, out_z=C if not sparse else None, L=L, + return_r=True, fixed_par_img_masks=fixed_par_img_masks, + fixed_par_dict_masks=fixed_par_dict_masks, + S=used_indices, n_jobs=n_jobs, sparse=sparse, yblock=yblock, + verbose=verbose, norms=norms) + if verbose: print('matching time: {0:.5f}s'.format(time.process_time() - t0)) + + rel = norm(C - C0, ord='fro') / norm(C, ord='fro') + if verbose: + print('k: {} relative difference between iterations {},elements: {}'.format(k, rel, np.sum( + C.sum(1) > 1e-4))) + if rel < tol or np.isnan(rel) or np.sum(C.sum(1) > 1e-4) <= num_comp[0]: + if np.isnan(rel) or np.sum(C.sum(1) > 1e-4) < num_comp[0]: + C = C0 + if verbose: + print('Stopped after iteration {}'.format(k)) + break + + except KeyboardInterrupt: + C = C0 + if str(prun).startswith('cont') or (prun and k >= prun): + w0 = w + w = np.zeros(N) + w[used_indices] = w0 + C0 = C + if sparse: + C = scsp.lil_matrix((N, J)) + else: + C = np.zeros((N, J)) + C[used_indices] = C0 + if debug: + WW_ = WW + for ww in WW[len(WW[0]):]: + ww_ = np.zeros(N) + ww_[used_indices] = ww + WW_.append(ww_) + WW = WW_ + # Final solve: + if k > 0 and final_it: + prune_comp = np.asarray(C.sum(axis=1) > prun_thres).flatten() + used_indices = np.arange(len(prune_comp))[prune_comp] # Determine indices + C0, r = lsqnonneg2(Y, red_dict, S=used_indices, sparse=sparse, yblock=yblock) + rel_err = np.linalg.norm(r, axis=0) / np.linalg.norm(Y, axis=0) + if sparse: + C = scsp.lil_matrix((N, J)) + else: + C = np.zeros((N, J)) + C[prune_comp] = C0 + if sparse: + C = C.tocsc() + # Ycalc = dso.vecmat(C.T, red_dict.T, fixed_par_img_masks=fixed_par_img_masks, fixed_par_dict_masks=fixed_par_dict_masks).T + + # r = Y - Ycalc # red_dict@C + if norm_sig: + Ydiag = scsp.spdiags(norm_Y, 0, len(norm_Y), len(norm_Y)) + C = C @ Ydiag + Cr, Sfull = maxcomps_repr(C, num_comp[1]) + + # Components, indices, relative error, used weights and result of first iteration are returned. + + return Cr, Sfull, rel_err, WW, C1 diff --git a/MRFrecon/__init__.py b/MRFrecon/__init__.py new file mode 100644 index 0000000..113ae67 --- /dev/null +++ b/MRFrecon/__init__.py @@ -0,0 +1,8 @@ +from . import postprocessing as postprocessing +from .backend import MrfData +from .config import load_settings +from .load_data import load_dict +from .mrf_recon import recon_wavelet, recon_fbp, recon_sense, \ + recon_sense_into_spijn, recon_wavelet_into_spijn, \ + recon_direct_spijn, recon_admm_into_spijn, recon_lr_invert, \ + recon_lr_admm, recon_lr_invert_spijn diff --git a/MRFrecon/backend.py b/MRFrecon/backend.py new file mode 100644 index 0000000..21edd9e --- /dev/null +++ b/MRFrecon/backend.py @@ -0,0 +1,1682 @@ +import itertools +import math +import os +import signal +import sys +import time +import warnings + +import h5py +import matplotlib.pyplot as plt +import numpy as np +import scipy.ndimage +import sigpy as sp +import sigpy.mri as spmri +import sigpy.plot as pl +from PIL import Image +from tqdm import tqdm + +try: + import nibabel as nib +except ImportError: + print('Nibabel not found') + +from . import d_sel_operations as dso + +try: + # from SPIJN import SPIJN + from . import SPIJN2 + + SPIJN = SPIJN2.SPIJN +except ImportError: + raise ImportError('SPIJN was not found, retrieve it yourself from "https://github.com/MNagtegaal/SPIJN"') + + +def get_mask(mask_file): + maskimage = Image.open(mask_file).convert('1') + mask = np.array(maskimage) + + imagesize = mask.shape[0] + numslice = int(mask.shape[1] // imagesize) + if imagesize * numslice != mask.shape[1]: + print("Mask image of invalid shape, width is not an integer multiple of height") + sys.exit(1) + mask = mask.reshape((imagesize, imagesize, numslice), order='F') + return mask + + +class MrfData: + """ + * ksp: numpy array of shape (#slices, #coils, #dynamics, #spirals_per_image, #point_on_spiral) + * coord: numpy array of shape (#dynamics, #spirals_per_image, #point_on_spiral, #spiral_coordinates) + * dictmat: numpy array of shape (#atoms, #dynamics) + * t1list: list of t1 values of shape (#atoms) + * t2list: list of t2 values of shape (#atoms) + + """ + + def __init__(self, ksp: np.ndarray = None, + coord: np.ndarray = None, + dictmat: np.ndarray = None, + dictt1: np.ndarray = None, + dictt2: np.ndarray = None, + + dictb1: np.ndarray = None, + b1_map: np.ndarray = None, + norms: np.ndarray = None, + + spijn_mask: np.ndarray = None, + + maps: np.ndarray = None, + mask: np.ndarray = None, + phasecorrection: np.ndarray = None, + comp_mat: np.ndarray = None, + compress_dict: np.ndarray = None, + imgseq: np.ndarray = None, + comp: np.ndarray = None, + index: np.ndarray = None, + ): + self.ksp = ksp + self.coord = coord + self.dictmat = dictmat + self.dictt1 = dictt1 + self.dictt2 = dictt2 + self.maps = maps + self.mask = mask + self.phasecorrection = phasecorrection + self.comp_mat = comp_mat + self.compress_dict = compress_dict + self.imgseq = imgseq + self.comp = comp + self.spijn_mask = spijn_mask + self.dictb1 = dictb1 + self.set_b1_map(b1_map=b1_map) + # For handling fixed parameter maps + self.fixed_par_dict_masks = None + self.fixed_par_img_masks = None + self.index = index + self.norms = norms + self._S = None # Used for subselections in pruned subdictionaries + + def __len__(self): + if self.compress_dict is None: + return self.dictmat.shape[1] + return self.compress_dict.shape[1] + + def set_b1_map(self, b1_map): + self.b1_map = b1_map + self.fixed_b1 = self.dictb1 is not None and b1_map is not None + + @property + def numslice(self): + if self.ksp is not None: + return self.ksp.shape[0] + elif self.imgseq is not None: + return self.imgseq.shape[0] + + @property + def numcoil(self): + if self.ksp is not None: + return self.ksp.shape[1] + else: + return len(self.maps) + + @property + def numdyn(self): + return self.ksp.shape[2] + + @property + def rank(self): + if self.compress_dict is None: + return self.dictmat.shape[0] + return self.compress_dict.shape[0] + + @property + def imagesize(self): + + if self.maps is not None: + return self.maps.shape[-1] + elif self.mask is not None: + return self.mask.shape[-1] + return None + + @property + def num_dict_atoms(self): + """ + + Returns: Find number of dictionary atoms that are used in matching (excludes fixed par) + + """ + try: + if self.index is not None: + return len(self.index) + except AttributeError: + pass + if self.fixed_b1: + self.fixed_par_processing() + return self.fixed_par_dict_masks[0].sum() + elif self.compress_dict is not None: + return self.compress_dict.shape[-1] + else: + return self.dictmat.shape[-1] + + def to_image(self, vec: np.ndarray): + """ + Reshape back to image from flattened array, only works for square images. + Args: + vec: + + Returns: + + """ + return vec.reshape(self.imagesize, self.imagesize) + + @staticmethod + def to_vec(img: np.ndarray): + """ + Function to flatten an array, but then slightly different + Args: + img: + + Returns: + flattened array + + """ + return img.reshape(-1, 1).squeeze() + + def get_proxg(self, regtype: str = None, imshape: int = None, lam: float = None, lamscale: float = 1, + lambda_scales: np.ndarray = None, comp_device: int = None, + lambda_ksp: bool = False): + """ + Function to define obtain proxg object as used for soft thresholding in lr-inversion + Args: + regtype: (None, findif, wavelet, wavelet_sc) type of regularization to use + imshape: + lam: used lambda, convergence parameter + lamscale: lambda scaling option as: + lambda_scales = lamscale ** np.arange(imshape[1]) + lambda_scales: predefined lambda scales + comp_device: + lambda_ksp: preferred option for lambda scaling based on centre of ksp data + + Returns: + dictionary with proxg object. only g is not None, otherwise errors occured... + + """ + if regtype is not None and (imshape is None or lam is None or comp_device is None): + raise IOError( + f'Regularisation input values are wrong, imshape={imshape}, lam={lam}, comp_device={comp_device}') + + if lam == 0: + regtype = None + + if regtype == 'findif': + warnings.warn('regtype findif not overly tested') + G = sp.linop.FiniteDifference(imshape, axes=(-1, -2)) + proxg = sp.prox.L1Reg(G.oshape, lam) + + def g(x): + device = sp.get_device(x) + xp = device.xp + with device: + return lam * xp.sum(xp.abs(x)).item() + elif regtype == 'wavelet': + warnings.warn('regtype wavelet not preferred') + wave_name = 'db4' + W = sp.linop.Wavelet(imshape[-2:], wave_name=wave_name) + G = sp.linop.Wavelet(imshape[1:], wave_name=wave_name, axes=(-2, -1)) + G = sp.linop.Reshape((1,) + tuple(G.oshape), tuple(G.oshape)) * G * sp.linop.Reshape(G.ishape, imshape) + proxg = sp.prox.UnitaryTransform(sp.prox.L1Reg(G.oshape, lam), G) + + # proxg = sp.prox.Stack([proxg]*imshape[0]) + def g(input): + device = sp.get_device(input) + xp = device.xp + with device: + return lam * xp.sum(xp.abs(G(input))).item() + + g = None + elif regtype == 'wavelet_sc': + if lambda_ksp: + ksp_centr = self.ksp[0, 0, :, 0, 0] # np.linalg.norm(data2.ksp, axis=(1,4)).flatten() + lambda_scales = np.abs(self.comp_mat.T @ ksp_centr) + elif lambda_scales is None: + lambda_scales = lamscale ** np.arange(imshape[1]) + lambda_scales = lambda_scales.reshape(imshape[:2] + (1, 1)) + wave_name = 'db4' + W = sp.linop.Wavelet(imshape[-2:], wave_name=wave_name) + G = sp.linop.Wavelet(imshape[1:], wave_name=wave_name, axes=(-2, -1)) + G = sp.linop.Reshape((1,) + tuple(G.oshape), tuple(G.oshape)) * G * sp.linop.Reshape(G.ishape, imshape) + lambdas = sp.to_device(sp.util.resize(lam * lambda_scales, G.oshape[:2] + [1, 1]), comp_device) + + proxg = sp.prox.UnitaryTransform(sp.prox.L1Reg(G.oshape, lambdas), G) + + # proxg = sp.prox.Stack([proxg]*imshape[0]) + def g(input): + device = sp.get_device(input) + xp = device.xp + with device: + return xp.sum(lambdas * xp.abs(G(input))).item() + + g = None + G = None + else: + G = None + g = None + proxg = None + return {'G': G, 'g': g, 'proxg': proxg} + + def restr_par_list(self, par='t1', index=None): + """ + Restrict parameter list with respect to used indices + Args: + par: + index: + + Returns: + + """ + if self.index is None: + return eval('self.dict' + par) + else: + plist = eval('self.dict' + par) + return plist[self.index] + + @property + def restr_dictionary(self): + """ + Obtain a restricted dictionary when self.index is not None + """ + if self.index is None: + return self.dictmat + else: + if self.fixed_b1 and '_S' in self.__dir__() and self._S is not None: + S = self._S + fixed_par_dict_masks = self.fixed_par_dict_masks + indi = np.arange(self.dictmat.shape[1]) + m_indi = [] + for m in fixed_par_dict_masks: + m_indi.append(indi[m][S]) + m_indi = np.concatenate(m_indi) + else: + m_indi = self.index + return self.dictmat[:, m_indi] + + def dict_svd(self, rank: int, verbose=0, comp_device=-1): + """ Perform PCA on dictionary + + Args: + rank (int): Rank or the approximation + verbose (int): (optional) 0: works silently, 1: show plot + Output: + (array): low rank dicationary matrix of shape (R, timelength) + """ + if comp_device > -1: + try: + import cupy + gpu_dic = sp.to_device(self.restr_dictionary, comp_device) + u, _, _ = cupy.linalg.svd(gpu_dic, full_matrices=False) + u = sp.to_device(u, -1) + except ImportError: + u, s, vh = np.linalg.svd(self.restr_dictionary, full_matrices=False) + else: + u, s, vh = np.linalg.svd(self.restr_dictionary, full_matrices=False) + if verbose: + plt.figure() + plt.semilogy(s) + plt.title("Singular values") + plt.show() + # Cut svd matrix to rank R + ur = np.zeros((self.dictmat.shape[0], rank), dtype=u.dtype) + ur[:, 0:rank] = u[:, 0:rank] + + return ur + + @staticmethod + def _discretize_fixed_parameter_map(fixed_parameter_map: np.ndarray, fixed_parameter_val: np.ndarray, + ) -> object: + """ + Discretizes the fixed parameter map to available values in dictionary + + Args: + fixed_parameter_map (np.ndarray): array with values to discretize + fixed_parameter_val (np.ndarray): array with values to discretize to + + + Returns: + fixed_parameter_map2: fixed parameter map with discretized values + fixed_parameter_map2_ind: indices of discretized values with respect to fixed_parameter_val + """ + vals_avail = np.unique(fixed_parameter_val) + bins_p = (vals_avail[1:] + vals_avail[:-1]) / 2 + fixed_parameter_map2_ind = res_dig = np.searchsorted(bins_p, fixed_parameter_map) + fixed_parameter_map2 = vals_avail[res_dig] + fixed_parameter_map2[np.isnan(fixed_parameter_map)] = np.nan + return fixed_parameter_map2, fixed_parameter_map2_ind + + def fixed_par_processing(self, redo: bool = False, flatten: bool = True, mask: bool = False): + """ + Define fixed_par_dict_masks and fixed_par_img_masks, to know which part of the image is related to which + pixels. + + Both are lists consisting of boolean arrays, meant to select dictionary atoms or pixels for seperated processing + + Args: + redo (bool): redo on request + flatten (bool): flatten images + mask (bool): mask b1==0 + """ + try: + if not redo and isinstance(self.fixed_par_dict_masks, list) and isinstance(self.fixed_par_img_masks, list): + return + except AttributeError: + pass + if self.fixed_b1: + dictionary_masks, measurement_masks = self._fixed_par_processing(self.dictb1, self.b1_map, flatten=flatten, + mask=mask) + else: + # Create boring lists if not necessary + dictionary_masks = [np.ones(self.__len__(), dtype=bool)] + if self.imgseq is not None: + measurement_masks = [np.ones(self.imgseq.shape[:-1], dtype=bool)] + elif self.maps is not None: + measurement_masks = [np.ones(self.maps[:, 0].shape, dtype=bool)] + elif self.mask is not None: + measurement_masks = [np.ones(self.mask.shape, dtype=bool)] + elif self.imagesize is not None: + measurement_masks = [np.ones((1, self.imagesize, self.imagesize), dtype=bool)] + else: + measurement_masks = [slice(None)] + if flatten: + try: + measurement_masks = [m.flatten() for m in measurement_masks] + except: + pass + self.fixed_par_dict_masks = dictionary_masks + self.fixed_par_img_masks = measurement_masks + + @staticmethod + def _fixed_par_processing(dictb1: np.ndarray, + b1_map: np.ndarray, flatten: bool = True, mask: bool = False, + ) -> object: + """ + + Args: + dictb1 (np.ndarray): list of b1 values as used in dictionary + b1_map (np.ndarray): b1_map as scanned + flatten (bool): whether or not to flatten image + mask (bool): mask b1-values==0 + + Returns: + dictionary_masks (list): masks which part of the dictionary to use when + measurement_masks (list): masks which voxel belong to which part of the dictionary + + """ + b1_map = b1_map.copy() + if mask: + b1_map[b1_map == 0] = np.nan + else: + b1_map[np.isnan(b1_map)] = 1.0 + b1_map[b1_map == 0] = 1.0 + fixed_parameter_map, fixed_parameter_map_ind = \ + MrfData._discretize_fixed_parameter_map( + fixed_parameter_map=b1_map, fixed_parameter_val=dictb1) + if flatten: + fixed_parameter_map = fixed_parameter_map.flatten() + # Now we need to create two lists with masks + # to know which part of the dictionary should be used when + dictionary_masks = [] + measurement_masks = [] + par_list = dictb1 + for v in np.unique(par_list): # check for each fixed parameter value where it is represented + dictionary_masks.append(np.isclose(par_list, v)) + measurement_masks.append(np.isclose(fixed_parameter_map, v)) + # Check whether parameter can be used as fixed parameter... + for m in dictionary_masks: + if len(m) != len(dictionary_masks[0]): + raise ValueError + return dictionary_masks, measurement_masks + + @staticmethod + def density_comp_fact(coord: np.ndarray): + """ Calculate density compensation factor + + Args: + coord (array): coord array of shape (n_timeseries, n_samples, 2) + Output: + (array): Density compensation factors, array of shape (n_timeseries, n_samples) + Sources from: + Hoge, R. D., Kwan, R. K. S., & Pike, G. B. (1997). Density compensation functions for spiral MRI. Magnetic + Resonance in Medicine, 38(1), 117–128. https://doi.org/10.1002/mrm.1910380117 + """ + out = np.zeros(list(coord.shape)[:2]) + for i in range(coord.shape[0]): + k_mag = np.abs((coord[0, :, 0] ** 2 + coord[0, :, 1] ** 2) ** 0.5) + kdiff = np.append([0], np.diff(k_mag)) + out[i, :] = k_mag * np.abs(kdiff) + out[i, :] = k_mag * np.abs(kdiff) + return out + + def compress_dictionary(self, rank, comp_device=-1): + """ Compress_dictionary to low rank + + Compress the dictionary to low rank. Sets the compression matrix (comp_mat) and the compressed dictionary + (compress_dict) in the class attributes + self._S is used to restrict the dictionary according to dict-masks + + Args: + rank (int): rank of the low rank dictionary + + Output: + None + """ + self.comp_mat = self.dict_svd(rank, comp_device=comp_device) + self.compress_dict = np.matmul(self.comp_mat.T, self.dictmat) + return + + @staticmethod + def lr_sense_op(coord: np.ndarray, ur: np.ndarray, + imagesize: int, oversamp: float, maps: np.ndarray = None, + batchsize: int = None): + """Low Rank sense operator, returns sigpy operator for a single slice. + + Make Low rank sense operator from sigpy linear operators + Note: + only acts on a signe slice, + If maps are not supplied, only a LR single coil nufft is returned; else the full sense operator is returned + + Args: + coord (array): coordinate array of shape (timelength, coordlength, 2) + ur (array): low rank compression matrix of shape (rank, timelength) + imagesize (int): size of the image + oversamp (float): oversampling ratio + maps (array): (optional) sensitivity maps of birdcage coil of shape (n_coils, imgsize[0], imgsize[1]) + batchsize (int): (optional) batchsize for computation + + Output: + (Linop): low rank sense operator of shape + <[n_coils, timelength, coordlength] x [rank, imgsize[0], imgsize[1]]> + + Makes low rank sense operator from standard Sigpy linops + source: https://sigpy.readthedocs.io/ + """ + if batchsize is None: + batchsize = coord.shape[0] + rank = ur.shape[0] + imgseq_shape = [1, rank, imagesize, imagesize] + reshape_oshape = [imgseq_shape[0]] + [1] + imgseq_shape[1:] + to_coil_img = sp.linop.Reshape(ishape=imgseq_shape, oshape=reshape_oshape) + if maps is not None: + # Make multiply linop to multiply image sequence with sense maps + maps_op = sp.linop.Multiply(ishape=to_coil_img.oshape, + mult=maps[:, :, None, :, :]) * to_coil_img + else: + # Make empty multiply linop + maps_op = to_coil_img + + # Low rank interpolation linear operator + list_interp_linop = [] + # batchsize = settings.getint('lr_sense_batchsize') # overwrite batchsize + for i in range(math.ceil(coord.shape[0] / batchsize)): + start = i * batchsize + end = np.min([(i + 1) * batchsize, coord.shape[0]]) + sl = slice(start, end) + + interp_op = sp.linop.NUFFT(ishape=maps_op.oshape, coord=coord[sl, ...], + oversamp=oversamp) + fr_proj_op = sp.linop.Multiply(ishape=interp_op.oshape, + mult=ur[None, None, :, sl, None, None]) + sum_op = sp.linop.Sum(ishape=fr_proj_op.oshape, axes=(2,)) + single_op = sum_op * fr_proj_op * interp_op + list_interp_linop.append(single_op) + batch_lr_interp_op = sp.linop.Vstack(list_interp_linop, axis=2) + totalop = batch_lr_interp_op * maps_op + totalop.repr_str = 'LR Sense' + return totalop + + def mrf_espirit(self, imagesize, coil_batchsize=None, slice_batchsize=1, max_iter=50, + oversamp=1.25, + compute_device=-1, mask_thresh=0.0, verbose=1, + tol_fac=0): + """ Calculate sensitivity maps using ESPIRiT. + + Calculate the sensitivity maps from the kspace data. + First calculates the rank 1 image sequence per coil, than use ESPIRiT for sensitivity maps calculations + Updates sensitivity maps (maps) in class + ESPIRiT maps are masked by threshold, if a mask is supplied, that mask will be used instead + + Args: + imagesize (int): size of the reconstruction matrix + coil_batchsize (int): (optional) number of coils calculated simultaneously + (defaults to number of coils in kspace) + slice_batchsize (int): (optional) number of slices calculated simultaneously (defaults to 1) + max_iter (int): (optional) number of conjugate gradient iterations + oversamp (float): (optional) oversampling ratio + compute_device (int): (optional) -1 for cpu, 0 for gpu + mask_thresh (float): (optional) Threshold for masking the sensitivity maps, relative to max img amplitude + verbose (int): (optional) 0: work silently; 1: show progressbar; 2: show progressbar and plots + tol_fac (Float): (optional): tolerance to stop cg iterations + + Output: + + """ + print("Espirit Calibration") + if coil_batchsize is None: + coil_batchsize = self.numcoil + + if self.comp_mat is None: + # if uncompressed, compress to rank 1 + self.compress_dictionary(1, comp_device=compute_device) + + # Load additional data + img_oshape = (self.numslice, self.numcoil, imagesize, imagesize) + + # determine compute device + device = sp.Device(compute_device) + cpu = sp.Device(-1) + xp = device.xp + + # preallocate data arrays + comp_mat_gpu = sp.to_device(self.comp_mat[:, 0, None].T, device=device) + coord_gpu = sp.to_device(self.coord, device=device) + self.maps = np.zeros(img_oshape, dtype=self.ksp.dtype) + + # create linear operator + linop = self.lr_sense_op(coord_gpu, comp_mat_gpu, imagesize, oversamp) + + # Calculate rank 1 image sequence per coil + image = xp.zeros(img_oshape, dtype=np.complex) + loops = itertools.product(range(math.ceil(img_oshape[0] / slice_batchsize)), + range(math.ceil(img_oshape[1] / coil_batchsize))) + for sl, cl in list(loops): + # batch slices + start = sl * slice_batchsize + end = np.min([(sl + 1) * slice_batchsize, img_oshape[0]]) + slice_sl = slice(start, end) + + batch_linop = sp.linop.Diag([linop for _ in range(end - start)], iaxis=0, oaxis=0) + + # batch coils + start = cl * coil_batchsize + end = np.min([(cl + 1) * coil_batchsize, img_oshape[1]]) + coil_sl = slice(start, end) + # Construct batched linop + batch_linop = sp.linop.Diag([batch_linop for _ in range(end - start)], iaxis=1, oaxis=1) + + # load data 1 slice at a time to preserve memory + ksp_gpu_slice = sp.to_device(self.ksp[slice_sl, coil_sl, ...], device=device) + tol_sl = tol_fac * np.linalg.norm(self.ksp[slice_sl, coil_sl, ...]) + + # Calculation 1 slice at a time because spmri.app.EspiritCalib can only handle 1 slice at the same time + image[slice_sl, coil_sl, ...] = sp.app.LinearLeastSquares(batch_linop, ksp_gpu_slice, + max_iter=max_iter, + show_pbar=True, tol=tol_sl, ).run() + + # calculate sensitivity maps from coil images + for sl in range(self.numslice): + kspgrid_gpu = sp.fourier.fft(image[sl, ...], axes=(-1, -2)) + maps_gpu = spmri.app.EspiritCalib(kspgrid_gpu, device=device, show_pbar=True).run() + + # store all data in RAM + self.maps[sl, ...] = sp.to_device(maps_gpu, device=cpu) + + # Apply mask + if self.mask is None: + self.mask = np.empty((self.numslice, imagesize, imagesize), dtype=bool) + for sl in range(self.numslice): + mult = sp.linop.Multiply((imagesize, imagesize), self.maps[sl, ...]) + sumimg = np.abs(mult.H(sp.to_device(image[sl, ...], device=cpu))) + self.mask[sl, ...] = sumimg > (mask_thresh * np.max(sumimg)) + scipy.ndimage.binary_closing(self.mask[sl], iterations=3, output=self.mask[sl]) + + self.maps *= self.mask[:, None, ...] + + if verbose == 2: + pl.ImagePlot(np.abs(self.maps[0, ...]), z=0, title='Magnitude sensitivity maps estimated by ESPIRiT') + pl.ImagePlot(np.angle(self.maps[0, ...]), z=0, title='phase Sensitivity Maps Estimated by ESPIRiT') + pl.ImagePlot(np.abs(sp.to_device(image[0, ...], device=cpu)), z=0, title='Magnitude calibration image') + pl.ImagePlot(np.angle(sp.to_device(image[0, ...], device=cpu)), z=0, title='phase Calibration image') + plt.imshow(sp.to_device(self.mask[0, ...], device=cpu)) + plt.show() + + return + + def coil_compression(self, numvirtcoil): + """ Apply PCA coil compression + + Perform PCA on sensitivity maps and compress sensitivity maps and k-space + Updates ksp and maps in class + + Args: + numvirtcoil (int): Number of virtual coils to simulate + + Output: + None + """ + + if numvirtcoil is None: + return + + # Calculate LR maps + lr_maps_oshape = list(self.maps.shape) + lr_maps_oshape[1] = numvirtcoil + lr_maps = np.zeros(lr_maps_oshape, dtype=self.maps.dtype) + + lr_ksp_shape = list(self.ksp.shape) + lr_ksp_shape[1] = numvirtcoil + compress_ksp = np.zeros(lr_ksp_shape, dtype=self.maps.dtype) + for i in range(self.maps.shape[0]): + maps_reshape = self.maps[i, ...].reshape(self.maps.shape[1], -1).T + u, s, vh = np.linalg.svd(maps_reshape, full_matrices=False) + us = np.matmul(u, np.diag(s)) + lr_maps[i, ...] = us.T.reshape(self.maps.shape[1:])[0:numvirtcoil, ...] + + # compress kspace data + vhr = vh[0:numvirtcoil, :].conj() + kspdata_reshape = self.ksp[i, ...].reshape(self.ksp.shape[1], -1) + compress_ksp[i, ...] = np.matmul(vhr, kspdata_reshape).reshape(lr_ksp_shape[1:]) + + self.maps = lr_maps + self.ksp = compress_ksp + return + + def rotate2real(self): + """ Rotate a complex vlaued images sequence to a real real valued image sequence. + + Takes a low rank image sequence and removes the phase of the first image to make the image sequence close to a + real valued image sequence, then discard the imaginary part. + Takes data from imgseq and updates imgseq to a real valued image sequence + Data type is changed from complex to float. + + Whatch out! Overwrites imgseq + + Args: + None + + Output: + None + """ + phasecorrection = np.angle(self.imgseq[:, :, 0]) - np.pi + self.imgseq *= np.exp(1j * -phasecorrection)[:, :, np.newaxis] + self.phasecorrection = phasecorrection + + # discard left over imaginary part + self.imgseq = self.imgseq.real + return + + def filt_back_proj(self, imagesize, compute_device: int = -1, oversamp=1.25, verbose=1): + """ filtered back projection image reconstruction + + Calculate filtered back projection for each dynamic, updates the image sequence in class + + Args: + imagesize (int): shape of the image + compute_device (int): (optional) -1 for cpu, 0 for gpu + oversamp (float): (optional) oversampling ratio (default 1.25) + verbose (int): (optional) 0: work silently; 1: show progressbar; 2: show progressbar and plots + + Output: + None + """ + # sense recon every image, Single slice only + device = sp.Device(compute_device) + cpu = sp.Device(-1) + + coord_gpu = sp.to_device(self.coord, device=device) + self.imgseq = np.zeros((self.numslice, self.numcoil, imagesize ** 2, self.numdyn), dtype=self.ksp.dtype) + + loops = itertools.product(range(self.numslice), range(self.numcoil), range(self.numdyn)) + for sl, cl, dyn in tqdm(list(loops), disable=verbose == 0): + dcf = self.density_comp_fact(self.coord[dyn, ...]) + ksp_gpu = sp.to_device(self.ksp[sl, cl, dyn, ...] * dcf, device=device) + img = sp.fourier.nufft_adjoint(ksp_gpu, coord_gpu[dyn, ...], oshape=(imagesize, imagesize), + oversamp=oversamp) + self.imgseq[sl, cl, :, dyn] = sp.to_device(self.to_vec(img), device=cpu) + print('done') + + def senserecon(self, l2reg: float = 0, compute_device: int = -1, + verbose=1, tol_fac: float = 0): + """ Sense image reconstruction + + Calculate iterative sense reconstruction for each dynamic, updates the image sequence in class + + Args: + l2reg (float): (optional) regularization parameter for L2 regularization + compute_device (int): (optional) -1 for cpu, 0 for gpu + verbose (int): (optional) 0: work silently; 1: show progressbar; 2: show progressbar and plots + tol_fac (float): (optional) tolerance factor as used in least squares solve. + Output: + None + """ + # sense recon every image, Single slice only + device = sp.Device(compute_device) + cpu = sp.Device(-1) + + coord_gpu = sp.to_device(self.coord, device=device) + maps_gpu = sp.to_device(self.maps, device=device) + imgseq = sp.to_device(np.zeros((self.numslice, self.imagesize ** 2, self.numdyn), dtype=self.ksp.dtype), + device=device) + + loops = itertools.product(range(self.numslice), range(self.numdyn)) + for sl, dyn in tqdm(list(loops), desc="Sense reconstruction", disable=not verbose): + if tol_fac is not None: + tol_sl = tol_fac * np.linalg.norm(self.ksp[sl, :, dyn]) + ksp_gpu = sp.to_device(self.ksp[sl, :, dyn, ...], device=device) + imgseq[sl, :, dyn] = self.to_vec( + spmri.app.SenseRecon(ksp_gpu, maps_gpu[sl, ...], + coord=coord_gpu[dyn, ...], + lamda=l2reg, show_pbar=False, + device=device, tol=tol_sl).run()) + self.imgseq = sp.to_device(imgseq, device=cpu) + return + + def waveletrecon(self, lamda: float = 0, compute_device: int = -1, verbose=1, norm_ksp=True, tol=None, + tol_fac=1e-5): + """ Wavelet image reconstruction + + Calculate iterative wavelet reconstruction for each dynamic, updates the image sequence in class + + Args: + lamda (float): (optional) regularization parameter for wavelet regularization + compute_device (int): (optional) -1 for cpu, 0 for gpu + verbose (int): (optional) 0: work silently; 1: show progressbar; 2: show progressbar and plots + tol_fac (float): (optional) tolerance factor as used in least squares solve. + + Output: + None + """ + # sense recon every image, Single slice only + if norm_ksp: + ksp_norm_fact = np.linalg.norm(self.ksp[:, 0, :, 0, 0], axis=1) + self.ksp /= ksp_norm_fact[:, None, None, None, None] + + device = sp.Device(compute_device) + cpu = sp.Device(-1) + + coord_gpu = sp.to_device(self.coord, device=device) + maps_gpu = sp.to_device(self.maps, device=device) + imgseq = sp.to_device(np.zeros((self.numslice, self.imagesize ** 2, self.numdyn), dtype=self.ksp.dtype), + device=device) + + loops = itertools.product(range(self.numslice), range(self.numdyn)) + for sl, dyn in tqdm(list(loops), desc="Wavelet reconstruction", disable=not verbose): + if tol is None: # Set tol scaled with norm of the input data + tol_sl = tol_fac * np.linalg.norm(self.ksp[sl, :, dyn]) + else: + tol_sl = tol + ksp_gpu = sp.to_device(self.ksp[sl, :, dyn, ...], device=device) + imgseq[sl, :, dyn] = self.to_vec( + spmri.app.L1WaveletRecon(ksp_gpu, maps_gpu[sl, ...], coord=coord_gpu[dyn, ...], + lamda=lamda, show_pbar=False, device=device, tol=tol_sl).run()) + self.imgseq = sp.to_device(imgseq, device=cpu) + + if norm_ksp: + self.ksp *= ksp_norm_fact[:, None, None, None, None] + self.imgseq *= ksp_norm_fact[:, None, None] + + return + + def calc_PDHG_sigma(self, lamda=.01, sl=None, device=-1, maps=None): + + sigmas = [] + if sl is None: + sls = range(self.numslice) + elif isinstance(sl, list): + sls = sl + elif isinstance(sl, int): + sls = [sl] + else: + raise IOError(f'sl {sl} unknown') + + if maps is None: + maps = self.maps + + for sl_ in sls: + if sp.get_device(maps) != device and maps.ndim == 4: + if kmaps.shape[0] == 1: + kmaps = sp.to_device(maps[0], device=device) + else: + kmaps = sp.to_device(maps[sl_], device=device) + elif sp.get_device(maps) != device: + kmaps = sp.to_device(maps, device=device) + elif maps.ndim == 4: + if maps.shape[0] == 1: + kmaps = maps[0] + else: + kmaps = maps[sl_] + else: + kmaps = maps + sig = sp.mri.kspace_precond(kmaps, coord=sp.to_device(self.coord, device), lamda=lamda, device=device) + if sp.get_device(sig) != sp.get_device(maps): + sig = sp.to_device(sig, sp.get_device(maps)) + sigmas.append(sig) + if isinstance(sl, int): + sigmas = sigmas[0] + return sigmas + + def lr_inversion(self, batchsize=None, oversamp=1.25, warmstart=False, + lam: float = 0, bias: np.ndarray = None, + max_iter: int = 150, compute_device=-1, + verbose=1, lstsq_solver=None, sigmas=None, tol=None, + tol_fac=0.001, reg_kwargs=None, norm_ksp=True, **kwargs): + """" Low rank inversion image reconstruction + + Use the LR sense linop constructed in self.lr_sense_op() to reconstruct the signal with a linear least squares + solver for Low rank inversion of the signal to the Low rank image sequence. + updates the imgseq attribute in class + + Args: + batchsize (int): (optional) number of dynamics calculated simultaneously defaults to mridata.numdyn + oversamp (float): (optional) oversampling ratio (default 1.25) + warmstart (bool): (optional) use previous solution as a warm start, only works if previous solution exists. + lam (float): (optional) L2 regularization parameter + bias(array): (optional) Bias for the L2 regularization + max_iter(int): (optional) Amount of conjugate gradient iterations, default 150 + compute_device (int): (optional) -1 for cpu, 0 for gpu + verbose (int): 0: (optional) work silently; 1: show progressbar; 2: show progressbar and plots + tol: (optional) Stopping tolerance for LR inversion iterations + tol_fac: (optional, float): Scaling factor which is used for calculation of criterium for LR inversion + reg_kwargs (optional): Kwargs passed to get_proxg for (wavelet) regularization + **kwargs are pushed to linearleastsquares app + Output: + (array): image sequence of shape(imgseq[0] * imgseq[1], rank) + """ + print("imgseq by Low rank Inversion") + if batchsize is None: + batchsize = self.numdyn + if norm_ksp: + ksp_norm_fact = np.linalg.norm(self.ksp[:, 0, :, 0, 0], axis=1) + self.ksp /= ksp_norm_fact[:, None, None, None, None] + # Determine compute device and batch size + device = sp.Device(compute_device) + cpu = sp.Device(-1) + + # Move static data to GPU + coord_array_gpu = sp.to_device(self.coord, device=device) + ur_gpu = sp.to_device(self.comp_mat.conj().T, device=device) + + # preallocate output array + if self.imgseq is None or warmstart is False: + self.imgseq = np.zeros((self.numslice, self.imagesize ** 2, self.rank), dtype=self.ksp.dtype) + if reg_kwargs is not None: + regz = self.get_proxg(imshape=(1, self.rank, self.imagesize, self.imagesize), **reg_kwargs, + comp_device=device) + else: + regz = {} + # Loop over slices + slice_batchsize = 1 + for sl in range(math.ceil(self.numslice / slice_batchsize)): + # batch slices + start = sl * slice_batchsize + end = np.min([(sl + 1) * slice_batchsize, self.numslice]) + slice_sl = slice(start, end) + + if tol is None: # Set tol scaled with norm of the input data + tol_sl = tol_fac * np.linalg.norm(self.ksp[slice_sl]) + else: + tol_sl = tol + + # load slice specific data in VRAM + kspacedata_gpu = sp.to_device(self.ksp[slice_sl, ...], device=device) + if self.maps is not None: + maps_gpu = sp.to_device(self.maps[slice_sl, ...], device=device) + total_op = self.lr_sense_op(coord_array_gpu, ur_gpu, self.imagesize, oversamp, maps_gpu, batchsize) + + if bias is None: + gpu_bias = sp.to_device(np.zeros(total_op.ishape), device=device) + else: + gpu_bias = sp.to_device(bias[slice_sl, ...].T.reshape(total_op.ishape), device=device) + x_init_gpu = sp.to_device(self.imgseq[slice_sl, ...].T.reshape(total_op.ishape), device=device) + + # For PDHG define or get sigma and tau values, as preconditioner and saved other calculations respectivily. + if lstsq_solver == 'PrimalDualHybridGradient' and sigmas is None: + sigma = self.calc_PDHG_sigma(sl=sl, maps=maps_gpu, device=device) + elif lstsq_solver == 'PrimalDualHybridGradient' and sigmas is not None: + sigma = sigmas[sl] + else: + sigma = None + try: + tau = self.taus[sl] + except AttributeError: + if lstsq_solver == 'PrimalDualHybridGradient': + print('(re)Calculate tau for PDHG') + tau = None + # Load in VRAM + if lstsq_solver == 'PrimalDualHybridGradient' and sigma is not None and sp.get_device(sigma) != device: + sigma = sp.to_device(sigma, device=device) + if lstsq_solver == 'PrimalDualHybridGradient' and tau is not None and sp.get_device(tau) != device: + tau = sp.to_device(tau, device=device) + + # Solve linear least squared + lls_app = sp.app.LinearLeastSquares(total_op, kspacedata_gpu, x=x_init_gpu, + lamda=lam, z=gpu_bias, + max_iter=max_iter, save_objective_values=False, + show_pbar=verbose > 0, + solver=lstsq_solver, sigma=sigma, tol=tol_sl, tau=tau, **regz, + **kwargs) + + sliceresult = lls_app.run() + if lstsq_solver == 'PrimalDualHybridGradient' and hasattr(self, 'taus'): # Save tau + self.taus[sl] = sp.to_device(lls_app.tau, device=cpu) + # objval = lls_app.objective_values + + self.imgseq[slice_sl, ...] = sp.to_device(sliceresult, device=cpu).reshape(self.rank, -1).T + if norm_ksp: + self.ksp *= ksp_norm_fact[:, None, None, None, None] + self.imgseq *= ksp_norm_fact[:, None, None] + return + + def nnls(self, regparam: float = None, weights: np.ndarray = None, + overwrite_imgseq: np.ndarray = None, mask: np.ndarray = None, n_jobs: int = None, + verbose: bool = False, + norm_correction: bool = False): + """Solve Non-Negative Least Squares components from image sequence + + Calculate the non negative components from the image sequence. + If regparam and weights are supplied the nnls is solved with the reweighted norm + By default the image sequence stored in class attribute imgseq is used, unless overwrite_imgseq + + Args: + regparam (float): (optional) joint sparicty regularization parameter + weights (np.ndarray): (optional) weights for reweighing the dictionary + overwrite_imgseq (np.ndarray): (optional) Image sequence to use in stead of self.imgseq + mask (np.ndarray): (optional) mask as used to mask the image sequence (True are kept) + n_jobs (boolean): (optional) number of jobs created by joblibs, not always an improvement + norm_correction (boolean): (optional) correct for normalisation of the dictionary + + Output: + None + """ + mod_imgseq = self.imgseq.copy() + if overwrite_imgseq is not None: + mod_imgseq = overwrite_imgseq + self.fixed_par_processing(redo=True, flatten=True, mask=False) + fixed_par_dict_masks = self.fixed_par_dict_masks + fixed_par_img_masks = self.fixed_par_img_masks + + # modify dictionary + dictmat = self.compress_dict.copy() + + if mask is None: # Really only mask zeros + mask = np.abs(mod_imgseq[:, :, 0]) > 0 # essentially follows mask from espirit calibration + + # reshape into vector form + mod_imgseq_res = mod_imgseq.reshape(-1, mod_imgseq.shape[-1]) + mask_res = mask.reshape(-1) + fixed_par_img_masks = [m.flatten()[mask_res] for m in fixed_par_img_masks] + # preallocate array and calc mask + nnls_solve = np.zeros([np.prod(list(mod_imgseq.shape)[:-1]), + self.num_dict_atoms]) + + if weights is None and regparam is None: + nnls_solve[mask_res, :] = SPIJN2.lsqnonneg2( + mod_imgseq_res[mask_res, :].T, dictmat, + out_z=None, + fixed_par_img_masks=fixed_par_img_masks, + fixed_par_dict_masks=fixed_par_dict_masks, + n_jobs=n_jobs, S=self._S, verbose=verbose > 0 + )[0].T + else: + nnls_solve[mask_res, :] = SPIJN2.rewlsqnonneg2( + mod_imgseq_res[mask_res, :].T, dictmat, + weights, out_z=None, + fixed_par_img_masks=fixed_par_img_masks, + fixed_par_dict_masks=fixed_par_dict_masks, + L=regparam, n_jobs=n_jobs, S=self._S, + verbose=verbose > 0 + )[0].T + if self.norms is not None and norm_correction: + nnls_solve = dso.multiply(nnls_solve, 1 / self.norms, fixed_par_dict_masks=fixed_par_dict_masks, + fixed_par_img_masks=fixed_par_img_masks) + self.comp = nnls_solve.reshape(list(mod_imgseq.shape)[:-1] + [self.num_dict_atoms]) + return + + def lr_admm(self, admm_param: float, batchsize: int = None, oversamp: float = 1.25, + max_iter: int = 10, max_cg_iter: int = 20, compute_device=-1, + outpath=None, regparam=None, weights=None, + verbose=1, lstsq_solver=None, tol=None, tol_fac=0.001, + norm_ksp=True, n_jobs=None, norm_correction=False, **kwargs): + """" Low rank MC-ADMM image and component reconstruction + + Use the ADMM method to solve the constrained the LR inversion image reconstruction + Updates image sequence attribute in class to the real valued image sequence as calculated. + Updates components attribute in class + + Args: + admm_param (float): ADMM coupling parameter + batchsize (int): (optional) number of dynamics calculated simultaneously defaults to mridata.numdyn + oversamp (float): (optional) oversampling ratio (default 1.25) + max_iter (int): (optional) Amount of admm iterations, default 10 + max_cg_iter(int): (optional) Amount of conjugate gradient iterations, default 20 + compute_device (int): (optional) -1 for cpu, 0 for gpu + outpath (string): (optional) path to file location to store intermediate results + regparam (float): (optional) joint sparsity regularization parameter + weights (np.ndarray): (optional) weights for reweighing the dictionary + verbose (int): 0: (optional) work silently; 1: show progressbar; 2: show progressbar and plots + lstsq_solver (str) : (optional) Least squares solver to use, default CG + PrimalDualHybridGradient tries to + use a preconditioner + tol: (optional) Stopping tolerance for LR inversion iterations + tol_fac: (optional, float): Scaling factor which is used for calculation of criterion for LR inversion + + + Output: + (array): image sequence of shape(imgseq[0] * imgseq[1], rank) + """ + # Initialize ADMM variables + converged = False + it = 0 + imgseq_oshape = (self.numslice, self.imagesize ** 2, self.rank) + lag_mult = np.zeros(imgseq_oshape, dtype=np.float64) + residuals = np.zeros((4, max_iter), dtype=np.float64) + # obj_val_arr = np.zeros((max_iter, max_cg_iter + 1)) + local_admm_param = admm_param + + if norm_ksp: + ksp_norm_fact = np.linalg.norm(self.ksp[:, 0, :, 0, 0], axis=1) + self.ksp /= ksp_norm_fact[:, None, None, None, None] + + if lstsq_solver == 'PrimalDualHybridGradient': + sigmas = self.calc_PDHG_sigma(device=compute_device) + self.taus = [None] * self.numslice + else: + sigmas = None + + comp0 = None + + while it < max_iter and not converged: + print('Outer loop iteration {} of {}'.format(it + 1, max_iter)) + + # %% Step 1: Solve for phase rotated image sequence + if it == 0: + self.lr_inversion(batchsize=batchsize, oversamp=oversamp, + compute_device=compute_device, + max_iter=max_cg_iter, + lstsq_solver=lstsq_solver, sigmas=sigmas, tol=tol, tol_fac=tol_fac, norm_ksp=False, + **kwargs) + else: + l2bias = (dc - lag_mult) * np.exp(1j * self.phasecorrection)[:, :, np.newaxis] # DC-v + self.imgseq = self.imgseq.astype(np.complex) * np.exp(1j * self.phasecorrection)[:, :, np.newaxis] + self.lr_inversion(batchsize=batchsize, oversamp=oversamp, warmstart=True, lam=local_admm_param, + bias=l2bias, max_iter=max_cg_iter, compute_device=compute_device, + lstsq_solver=lstsq_solver, sigmas=sigmas, tol=tol, tol_fac=tol_fac, norm_ksp=False, + **kwargs) + + # rotate image sequence to real axis + self.rotate2real() + + # %% Step 2: Solve for components without joint sparsity + mod_imgseq = self.imgseq + lag_mult + if verbose: + print('NNLS solve') + self.nnls(regparam=regparam, weights=weights, + overwrite_imgseq=mod_imgseq, n_jobs=n_jobs, verbose=verbose) # Don't supply a mask here, + # that leads to zeros in unwanted places, leading to exploding errors in other places. + if comp0 is not None: + rel = np.linalg.norm(self.comp.flatten() - comp0.flatten()) / np.linalg.norm(self.comp.flatten()) + print(f'Iteration: {it}, nnls convergence:{rel}') + converged = rel < tol_fac / 10 + comp0 = self.comp.astype(np.float32).copy() + # %% Step 3: Update lagrange multiplier + dc = dso.vecmat(self.comp, self.compress_dict.T, + fixed_par_img_masks=[m.reshape(imgseq_oshape[:-1]) for m in self.fixed_par_img_masks], + fixed_par_dict_masks=self.fixed_par_dict_masks, + S=self._S).reshape(self.imgseq.shape) + # dc = (self.comp @ self.compress_dict.T).reshape(self.imgseq.shape) + lag_mult += self.imgseq - dc # v+x-dc + if verbose > 1: + for i in range(lag_mult.shape[2]): + plt.subplot(1, 3, 1) + plt.imshow(self.imgseq[0, :, i].reshape(224, 224)) + plt.colorbar() + plt.subplot(1, 3, 2) + plt.imshow((self.imgseq[0, :, i] - dc[0, :, i]).reshape(224, 224)) + plt.colorbar() + plt.subplot(1, 3, 3) + plt.imshow(dc[0, :, i].reshape(224, 224)) + plt.colorbar() + plt.show() + + # %% end of ADMM loop, the rest is for calculating residuals + # Forward linear operator + + if verbose: + list_linop = [] + for i in range(self.numslice): + singleslice_linop = self.lr_sense_op(self.coord, self.comp_mat.T, imagesize=self.imagesize, + oversamp=oversamp, maps=self.maps[i, None, ...], + batchsize=batchsize) + list_linop.append(singleslice_linop) + linop = sp.linop.Diag(list_linop, 0, 0) + # norm_y = np.linalg.norm(ksp) + + # re-apply phase offset correction to dc + dc_phasecorr = dc * np.exp(1j * self.phasecorrection)[:, :, np.newaxis] + + # |GFSDc-y| + dc_img = np.transpose(dc_phasecorr, (0, 2, 1)).reshape((self.numslice, self.rank, self.imagesize, + self.imagesize)).astype(np.complex128) + obj_val1 = np.linalg.norm(linop(dc_img) - self.ksp) + + print('Objective function value: |GFSDc-y| = {}'.format(obj_val1)) + residuals[0, it] = obj_val1 + + imgseq = self.imgseq.astype(np.complex) * np.exp(1j * self.phasecorrection)[:, :, np.newaxis] + + # |GFSx-y| + lr_imgseq_reshape = np.transpose(imgseq, (0, 2, 1)).reshape((self.numslice, self.rank, + self.imagesize, self.imagesize)).astype( + np.complex128) + obj_val2 = np.linalg.norm(linop(lr_imgseq_reshape) - self.ksp) + print('Obj_val, |GFSx-y| = {}'.format(obj_val2)) + residuals[1, it] = obj_val2 + + # |x-Dc| + obj_val3 = np.linalg.norm(imgseq - dc) + print('Objective function value: |x-Dc| = {}'.format(obj_val3)) + residuals[2, it] = obj_val3 + + # |GFSx-y| + mu1|c| + mu2|x-Dc-v| + residuals[3, it] = np.linalg.norm(linop(lr_imgseq_reshape) - self.ksp) ** 2 + \ + local_admm_param * np.linalg.norm(imgseq - dc + lag_mult) ** 2 + \ + local_admm_param * np.linalg.norm(lag_mult) ** 2 + print('Objective function value: |GFSx-y| + mu2|x-Dc+v| = {}'.format(residuals[3, it])) + + # convergence rate + if it > 0: + num = np.linalg.norm(imgseq_old - self.imgseq) + rate = num / np.linalg.norm(self.imgseq) + print('Convergence rate = {}'.format(rate)) + imgseq_old = self.imgseq + + if outpath is not None: + self.residuals = residuals + self.to_h5(outpath, 'admm{}.h5'.format(it)) + delattr(self, 'residuals') + + if verbose == 2: + imagesequence = np.transpose(self.imgseq, (0, 2, 1)).reshape((self.numslice, self.rank, + self.imagesize, self.imagesize)) + for i in range(imagesequence.shape[1]): + pl.ImagePlot(imagesequence[:, i, ...], z=0, title='Image sequence, rank {}'.format(i + 1)) + it += 1 + if lstsq_solver == 'PrimalDualHybridGradient': + del self.taus + if norm_ksp: + self.ksp *= ksp_norm_fact[:, None, None, None, None] + self.imgseq *= ksp_norm_fact[:, None, None] + self.comp *= ksp_norm_fact[:, None, None] + if norm_correction and self.norms is not None: + self.comp = dso.multiply(self.comp, 1 / self.norms, + fixed_par_img_masks=[m.reshape(imgseq_oshape[:-1]) for m in + self.fixed_par_img_masks], + fixed_par_dict_masks=self.fixed_par_dict_masks, + S=self._S) + return residuals[3, -1] + + def single_component_match(self, stepsize: int = 1000, verbose: int = 1, + absdict=False, calc_nrmse=True): + """ Single component matching + + Single component matching by way of largest inner product, the largest inner product of the signal with all the + dictionary atoms is assigned to the pixel in the final image. + Component are vectorirzed and stored in the comp attribute in order (PD, T1, T2, NRMSE) + + Args: + stepsize (int): (optional) stepsize states amount of pixels calculated simultaneously + verbose (int): (optional) 0: work silently; 1: show progressbar; 2: show progressbar and plots + absdict (bool): (optional) absolute value of dictionary, must use when image sequence is calculated with rss + Output: + + """ + print("Single component matching") + if stepsize is None: + stepsize = self.imagesize + self.fixed_par_processing(flatten=True) + imgseq = self.imgseq.reshape((-1, self.rank)) + # normalize dictionary + x_norm = np.linalg.norm(self.compress_dict, axis=0, keepdims=True) + dictmatnorm = self.compress_dict / x_norm + if absdict: + dictmatnorm = np.abs(dictmatnorm) + + # preallocate solution maps + pdimg = np.zeros(imgseq.shape[0]) + np.nan + t1img = np.zeros(imgseq.shape[0]) + np.nan + t2img = np.zeros(imgseq.shape[0]) + np.nan + nrmseimg = np.zeros(imgseq.shape[0]) + np.nan + # %% + # Create result matrices + indices = np.zeros(imgseq.shape[0], dtype=np.int32) + max_vals = np.zeros(imgseq.shape[0], dtype=np.complex128) + phaseimg = phase_vals = np.zeros(imgseq.shape[0], dtype=np.float64) + + for dictionary_mask, measurement_mask in tqdm(zip(self.fixed_par_dict_masks, self.fixed_par_img_masks), + disable=verbose == 0 or not self.fixed_b1, desc='Matching'): + if measurement_mask.sum() == 0: + continue + dd = dictmatnorm[:, dictionary_mask] + fixed_ind = np.arange(len(dictmatnorm.T))[dictionary_mask] + n_meas = np.sum(measurement_mask) + n_chunks = int(np.ceil(n_meas / float(stepsize))) + # loop over chunks + for chunk in tqdm(np.array_split(np.arange(n_meas), n_chunks), desc='Matching', + disable=verbose == 0 or self.fixed_b1): + mask_fl = np.zeros(n_meas, dtype=bool) + mask_fl[chunk] = True + + measurement_mask_chunk = measurement_mask.copy() + measurement_mask_chunk[measurement_mask_chunk] = mask_fl + + mask = measurement_mask_chunk + + maxes = np.tensordot(dd, imgseq[mask, :], axes=(0, 1)) + ind_match = np.argmax(np.abs(maxes), axis=0) + indices[mask] = ind_dict = fixed_ind[ind_match] + + mv = np.take_along_axis(maxes, np.expand_dims(ind_match, axis=0), axis=0) + mv /= x_norm[:, ind_dict] + max_vals[mask] = mv.flatten() + + phases = np.angle(maxes) + phase_vals[mask] = np.take_along_axis(phases, np.expand_dims(ind_match, axis=0), axis=0).flatten() + if calc_nrmse: + for k in chunk: + # Calculate NRMSE + nrmseimg[k] = np.sqrt( + np.sum((np.abs(dictmatnorm[:, indices[k]] * pdimg[k] - imgseq[k, :])) ** 2) / + dictmatnorm.shape[0]) + # return indices, max_vals, phase_vals + t1img = self.dictt1[indices] + t2img = self.dictt2[indices] + pdimg = max_vals + # %% + + # mask values + # t1img[pdimg < 0.3] = 0 + # t2img[pdimg < 0.3] = 0 + + if verbose > 1: + # plot results + plt.figure() + plt.subplot(2, 2, 1) + plt.imshow(self.to_image(t1img), origin="lower") + plt.title("T1 image") + plt.colorbar() + plt.subplot(2, 2, 2) + plt.imshow(self.to_image(t2img), origin="lower") + plt.title("T2 image") + plt.colorbar() + plt.subplot(2, 2, 3) + plt.imshow(self.to_image(pdimg), origin="lower") + plt.title("PD image") + plt.colorbar() + plt.subplot(2, 2, 4) + plt.imshow(self.to_image(np.divide(nrmseimg, pdimg, out=np.zeros_like(nrmseimg), where=pdimg != 0)), + origin="lower") + plt.title("RMSE image") + plt.colorbar() + plt.show() + print("max T1 value") + print(np.max(t1img)) + print("max T2 value") + print(np.max(t2img)) + + oshape = list(self.imgseq.shape)[:-1] + [5] + self.single_comp_names = np.asarray(['M0', 'T1', 'T2', 'NRMSE', 'Phase'], dtype='S') + self.single_comp = np.zeros(oshape, dtype=t1img.dtype) + self.single_comp[..., 0] = np.abs(pdimg.reshape(oshape[:-1])) + self.single_comp[..., 1] = t1img.reshape(oshape[:-1]) + self.single_comp[..., 2] = t2img.reshape(oshape[:-1]) + self.single_comp[..., 3] = nrmseimg.reshape(oshape[:-1]) + self.single_comp[..., 4] = np.angle(pdimg.reshape(oshape[:-1])) + return + + def save_single_comp_nii(self, output_path, aff=None, ): + if aff is None: + aff = np.zeros((4, 4)) + aff[0, 1] = 1 + aff[1, 2] = -1 + aff[2, 0] = 1 + aff[3, 3] = 1 + for i, name in enumerate(self.single_comp_names): + data = self.single_comp[..., i] + v = np.asarray([self.to_image(ii) for ii in data]) + v[(1 - self.mask).astype(bool)] = np.nan + img = nib.Nifti1Image(v, aff) + if isinstance(name, bytes): + name = name.decode() + nib.save(img, os.path.join(output_path, f'single_comp_{name}.nii.gz')) + return + + def plot_comp(self, normalize=False): + figs = [] + for j in range(self.numslice): + for i in range(len(self.index)): + figs.append(plt.figure()) + if normalize: + plt.imshow(self.to_image(self.comp[j, :, i] / self.comp[j].sum(axis=-1))) + else: + plt.imshow(self.to_image(self.comp[j, :, i])) + plt.colorbar() + if self.index[0] >= 0: + plt.title('$T_1={:.1f}, T_2={:.1f}$'.format(self.dictt1[self.index[i]], + self.dictt2[self.index[i]])) + plt.show() + return figs + + def spijn_solve(self, regparam, max_iter=20, verbose=1, + n_jobs=None, norm_correction=False): + """ Solve components using SPIJN algorithm + + Use the SPIJN algorithm to solve for the multi-component component maps from the image sequence + updates component maps in comp attribute and stores the component indices in index attribute + + Args: + regparam (float): joint sparsity regularization parameter + max_iter (int): (optional) maximum number of SPIJN iterations + verbose (int): (optional) 0: work silently; 1: show info in terminal; 2: show info in terminal and plots + + Output: + + """ + self.fixed_par_processing(True, True, False) + fixed_par_dict_masks = self.fixed_par_dict_masks + fixed_par_img_masks = self.fixed_par_img_masks + lr_imgseq = self.imgseq + dictmat = self.compress_dict + + # Transpose so input shape matches other functions in this file + lr_imgseq_flat = lr_imgseq.reshape(-1, self.rank).T + + # normalize dictionary + x_norm = np.linalg.norm(dictmat, axis=0, keepdims=True) + dictmat = dictmat / x_norm + + # Calculate mask + # avgimg = np.abs(np.sum(lr_imgseq, axis=0) / lr_imgseq.shape[0]) + # mask = avgimg > np.sum(avgimg) / (2*lr_imgseq.shape[1]) + + mask = np.abs(lr_imgseq_flat[0, :]) > 0.1 + try: + mask_im = self.spijn_mask.flatten() # np.abs(lr_imgseq_flat[0, :]) > 0.1 + print('SPIJN masking difference:', np.sum(mask) - np.sum(mask_im)) + except: + mask_im = mask + fixed_par_img_masks = [m[mask_im] for m in fixed_par_img_masks] + + if verbose == 2: + maskimg = mask.reshape(self.numslice, self.imagesize, self.imagesize) + pl.ImagePlot(maskimg, z=0, title='SPIJN mask') + + if regparam != 0: + print("Start SPIJN solve") + cr, sfull, rel_err, c1, _ = SPIJN( + lr_imgseq_flat[:, mask_im], dictmat, L=regparam, + correct_im_size=True, + max_iter=max_iter, tol=5e-4, + fixed_par_img_masks=fixed_par_img_masks, + fixed_par_dict_masks=fixed_par_dict_masks, norms=x_norm[0]) + + # fill data to image format to forget about a mask + cr_long = np.zeros((lr_imgseq_flat.shape[1], cr.shape[1]), dtype=cr.dtype) + sfull_long = np.zeros((lr_imgseq_flat.shape[1], cr.shape[1]), dtype=cr.dtype) + cr_long[mask_im, :] = cr + sfull_long[mask_im, :] = sfull + + # process data + self._S = comp_indices = np.unique(sfull[cr > .01]) # Determine the used indices + else: # no restrictions on components + print("Directly start NNLS solve") + comp_indices = range(self.num_dict_atoms) + + print(comp_indices) + num_comp = len(comp_indices) + + # cut down dictionary + fixed_par_img_masks = [m[mask] for m in self.fixed_par_img_masks] + + # solve for least squares solution + nnls_solve, ore = SPIJN2.lsqnonneg2( + lr_imgseq_flat[:, mask], dictmat, + S=comp_indices, fixed_par_img_masks=fixed_par_img_masks, + fixed_par_dict_masks=fixed_par_dict_masks, n_jobs=n_jobs, + verbose=verbose > 0 + ) + nnls_solve = dso.multiply(nnls_solve.T, 1 / x_norm.flatten(), + fixed_par_dict_masks=fixed_par_dict_masks, + fixed_par_img_masks=fixed_par_img_masks, + S=comp_indices) + if norm_correction and self.norms is not None: + nnls_solve = dso.multiply(nnls_solve, 1 / self.norms, + fixed_par_dict_masks=fixed_par_dict_masks, + fixed_par_img_masks=fixed_par_img_masks, + S=comp_indices) + # nnls_solve = np.apply_along_axis(lambda x: optimize.nnls(dictmat, x)[0], 0, lr_imgseq_flat[:, mask]) + components = np.zeros((lr_imgseq_flat.shape[1], num_comp), dtype=nnls_solve.dtype) + components[mask, :] = nnls_solve + + self.comp = components.reshape((self.numslice, lr_imgseq.shape[1], num_comp)) + + self.index = np.arange(self.compress_dict.shape[1])[self.fixed_par_dict_masks[0]][comp_indices] + # Plot solutions + if verbose == 2: + self.plot_comp() + return + + def spijn_from_ksp(self, admm_param, oversamp=1.25, max_admm_iter=10, max_cg_iter=20, p=0, max_iter=20, + verbose=True, norm_ksp=True, tol=1e-4, reg_param=0.0, correct_im_size=True, prun=2, init_rank=10, + compute_device=-1, min_iter=3, lstsq_solver='PrimalDualHybridGradient', tol_fac=0.001, + tol_admm=None, norm_correction=True, **kwargs): + """Direct reconstruction of joint sparse components from kspace data. + + Args: + admm_param (float): ADMM coupling parameter + oversamp (float): (optional) oversampling ratio (default 1.25) + max_admm_iter (int): (optional) Amount of admm iterations, default 10 + max_cg_iter(int): (optional) Amount of conjugate gradient iterations, default 20 + p (float): (optional) value as used in the reweighting scheme 1. Default 0 works fine + max_iter (int): (optional) maximum number of iterations, 20 is normally fine + verbose (int): (optional) more output + norm_ksp (bool): (optional) Normalise the signal or not. False is fine + tol (float): (optional) Tolerance used, when are the iterations stopped, based on + ||C^k-C^{k+1}||_F^2/||C^k||_F^2 0.1 + except: + mask_spijn = ... + + if self.comp_mat is None: + self.compress_dictionary(init_rank, comp_device=compute_device) + + t0 = time.clock() + admm_settings = dict(admm_param=admm_param, oversamp=oversamp, max_iter=max_admm_iter, + max_cg_iter=max_cg_iter, compute_device=compute_device, + verbose=verbose, lstsq_solver=lstsq_solver, tol=tol_admm, + tol_fac=tol_fac, norm_ksp=False, ) + if self.comp is None: # First iteration + rel_err = rel_err_old = self.lr_admm( + **admm_settings, **kwargs + ) # Perform real calculations + print('matching time it 1: {0:.5f}s'.format(time.clock() - t0)) + print('Relative output error = {}'.format(rel_err_old)) + prunn_comp = None + else: + print('Reused old first iteration solution') + + try: # Try-except is to stop iterations when it takes to long, making it possible to return the latest result + for k in range(1, max_iter): + # Calc weights + sel_comp = self.comp.reshape(-1, self.num_dict_atoms)[mask_spijn] + w = (np.linalg.norm(sel_comp, 2, axis=0) + eps) ** (1 - p / 2) + w[w < eps] = eps # prevent 0-weighting + if k >= prun: + # determine components to prune + prunn_comp = np.sum(sel_comp, axis=0) > 1e-12 + + # prune dictionary + # self.dictmat = self.dictmat[:, prunn_comp] + w = w[prunn_comp] # Weights are saved in a pruned version + self.index = self.index[prunn_comp] # This takes care of further pruning + self._S = self._S[prunn_comp] + print('index:', self.index) + print('S:', self._S) + + # compress + self.compress_dictionary(min(self.rank, len(w)), comp_device=compute_device) + + comp_old = self.comp.copy() + t0 = time.clock() + print('regparam = {}'.format(reg_param)) + rel_err = self.lr_admm(**admm_settings, + regparam=reg_param, + weights=w, + **kwargs) # Perform real calculations + if verbose: + print('matching time: {0:.5f}s'.format(time.clock() - t0)) + + # Calc residuals + # Determine relative convergence + rel_conv = np.abs(rel_err_old - rel_err) / rel_err + + if verbose: + print('k: {} relative difference between iterations {},elements: {}'.format(k, rel_conv, np.sum( + np.sum(self.comp, 1) > 1e-4))) + print('Relative output error = {}'.format(rel_err)) + + if verbose: + print("Number of components left: {}".format(self.num_dict_atoms)) + if verbose > 1: + if self.num_dict_atoms < 21: + value = np.sum(self.comp.reshape(-1, self.num_dict_atoms), 0) + for i in range(self.num_dict_atoms): + plt.imshow(np.abs(self.comp[0, :, i].reshape(self.imagesize, self.imagesize)), + origin='lower') + plt.colorbar() + plt.title( + "component {}, from SPIJN iteration {}, intensity value = {}".format(i, k, + value[i])) + plt.show() + + # Check termination conditions + if (rel_conv < tol and k >= min_iter) or np.isnan(rel_conv): # or np.sum(np.sum(C,1)>1e-4) 1: + Y = np.zeros(b.shape[:-1] + (D.shape[1],), dtype=b.dtype) + else: + Y = np.zeros(b.shape[:-1], dtype=b.dtype) + for d_sel, s_sel in zip(fixed_par_dict_masks, fixed_par_img_masks): + Dsel = D[d_sel] + if S is not None: + Dsel = Dsel[S] + Y[s_sel] = b[s_sel] @ Dsel + return Y + + +def matmul(D, C, fixed_par_img_masks=None, fixed_par_dict_masks=None, S=None): + """Perform b@D where D is sliced as fixed_par_dict_masks""" + if fixed_par_img_masks is None: + try: + Y = D @ C + except ValueError: + Y = D[S] @ C + else: + Y = np.zeros((D.shape[0], C.shape[1])) + for d_sel, s_sel in zip(fixed_par_dict_masks, fixed_par_img_masks): + Dsel = D[:, d_sel] + if S is not None: + Dsel = Dsel[S] + Y[:, s_sel] = Dsel @ C[:, s_sel] + return Y + + +def zsort(z, w, S, fixed_par_img_masks=None, fixed_par_dict_masks=None): + if fixed_par_img_masks is None: + A = np.einsum('ij,j->j', z, w[S]) + else: + A = np.zeros(len(S)) + for d_sel, s_sel in zip(fixed_par_dict_masks, fixed_par_img_masks): + A += np.einsum('ij,j->j', z[s_sel], (w[d_sel])[S]) + return A + + +# @profile +def JJNLS_weights(red_dict, C, w, eps, M, fixed_par_img_masks=None, fixed_par_dict_masks=None, S=None): + """ + Function to replace + DWD = red_dict1@scsp.diags(w)@red_dict1.T + w = np.linalg.norm(C,2,axis=1)**2+w-w**2*np.einsum('ij,ij->j',red_dict1,np.linalg.solve(eps*np.eye(M)+DWD,red_dict1)) + Probably not working! + + """ + if fixed_par_dict_masks is None: + if S is not None: + red_dict = red_dict[:, S] + DWD = red_dict @ scsp.diags(w) @ red_dict.T + if scsp.issparse(C): + w = scsp.linalg.norm(C, 2, axis=1) ** 2 + w - w ** 2 * np.einsum('ij,ij->j', red_dict, + np.linalg.solve(eps * np.eye(M) + DWD, + red_dict)) + else: + w = np.linalg.norm(C, 2, axis=1) ** 2 + w - w ** 2 * np.einsum('ij,ij->j', red_dict, + np.linalg.solve(eps * np.eye(M) + DWD, + red_dict)) + else: + w0 = w.copy() + w += np.linalg.norm(C, 2, axis=1) ** 2 + DWD = np.zeros(2 * (red_dict.shape[0],)) + for k in range(len(fixed_par_dict_masks)): + R = red_dict[:, fixed_par_dict_masks[k]] + if S is not None: + R = R[:, S] + DWD = R @ scsp.diags(w0 ** -1) @ R.T + w -= w0 ** 2 * np.einsum('ij,ij->j', R, np.linalg.solve(eps * np.eye(M) + DWD, R)) + + return w diff --git a/MRFrecon/gen_phantom.py b/MRFrecon/gen_phantom.py new file mode 100644 index 0000000..f78eb54 --- /dev/null +++ b/MRFrecon/gen_phantom.py @@ -0,0 +1,491 @@ +""" + Filename: gen_numerical_phantom.py + Author: Emiel Hartsema + Date last modified: 9-10-2020 + Python version: 2.8 + Describtion: generates MRF image sequence from brainweb database ground truth +""" +import os + +import numpy as np + +try: + import nibabel as nib +except ImportError: + print('Nibabel not found') +import matplotlib.pyplot as plt +from tqdm import tqdm +import h5py +import sigpy as sp +import sigpy.mri as spmri +import math + +import sys + + +def load_dict(settings): + """Load dictionary and manipulate with mrf functions + + Args: + - + + Output: + (array): dictmat rotated to the real axis. array of shape (numdyn, numatoms) + (list): list of T1 values corresponding to dictionary atoms + (list): list of T2 values corresponding to dictionary atoms + """ + if not (os.path.exists(settings['dictionary_path'])): + print('Dictionary file not found at: {}'.format(settings['dictionary_path'])) + sys.exit(-1) + if settings.getboolean('dict_corne_struct'): + raise NotImplementedError('This option is not available anymore') + + else: + with h5py.File(settings['dictionary_path'], 'r') as hf: + dictmat = hf.get('dictmat')[:].T.imag + t1list = hf.get('t1list')[:] + t2list = hf.get('t2list')[:] + if 'b1list' in hf: + b1list = hf.get('b1list')[:] + else: + b1list = None + return dictmat, t1list, t2list, b1list + + +def to_image(vec: np.ndarray): + return vec.reshape((np.sqrt(len(vec)).astype(int), np.sqrt(len(vec)).astype(int))) + + +def to_vec(img: np.ndarray): + return img.reshape(-1, 1).squeeze() + + +def load_components(settings): + """ Load components wrapper function + + Args: + settings file + Output: + Components (array): Array of component images of shape (n_components, imgshape[0], imgshape[1]) + """ + if settings['phantom_type'] == 'brainweb': + components = load_brainweb_database(settings, ) + elif settings['phantom_type'] == 'artificial': + components = load_artificial_phantom(settings, ) + else: + raise Exception("Unknown source in load_components(), check spelling") + + return components + + +def load_metadata(settings, ): + """ Load metadata wrapper funcion for for the multicomponent sources + + Args: + settings: config object used to retrieve options + + Output: + (several): component metadata + """ + if settings['phantom_type'] == 'brainweb': + return brainweb_metadata(settings) + elif settings['phantom_type'] == 'artificial': + return artificial_phantom_metadata(settings) + else: + raise Exception("Unknown source in load_metadata(), check spelling") + + +def brainweb_metadata(settings): + """ Brainweb metadata + Args: + - + + Output: + imgtype (list of strings): identifiers for brainweb files + titles (list of strings): titles for brainweb components + t1 (list): T1 values of components + t2 (list): T2 values of components + pd (list): Proton density values of components + """ + if settings.getboolean('noskull'): + imgtype = ['bck', 'csf', 'gm', 'wm'] + titles = ['Background', 'CSF', 'Grey matter', 'White matter'] + t1 = [0, 2569, 833, 500] + t2 = [0, 329, 83, 70] + # t2star = [0, 58, 69, 61] + pd = [0, 1, 0.86, 0.77] + else: + imgtype = ['bck', 'csf', 'gm', 'wm', 'fat', 'muscles', 'muscles_skin', 'skull', 'vessels', 'fat2', 'dura', + 'marrow'] + titles = ['Background', 'CSF', 'Grey matter', 'White matter', 'Fat', 'Muscle', 'Muscle skin', 'Skull', + 'Vessels', + 'Fat2', 'Dura', 'Marrow'] + t1 = [0, 2569, 833, 500, 350, 900, 2569, 0, 0, 500, 2569, 500] + t2 = [0, 329, 83, 70, 70, 47, 329, 0, 0, 70, 329, 70] + # t2star = [0, 58, 69, 61, 58, 30, 58, 0, 0, 61, 58, 61] + pd = [0, 1, 0.86, 0.77, 1, 1, 1, 0, 0, 0.77, 1, 0.77] + + return imgtype, titles, t1, t2, pd + + +def artificial_phantom_metadata(settings): + """ Artificial phantom metadata + Args: + settings: config object used to retrieve options + + Output: + imgtype (list of strings): identifiers for brainweb files, None for artificial phantom + titles (list of strings): titles for brainweb components + t1 (list): T1 values of components + t2 (list): T2 values of components + pd (list): Proton density values of components + """ + # load artificial image properties + imgtype = None + titles = ["background", "Component1", "Component2"] + t1 = [0, 500, 1500] + t2 = [0, 70, 210] + pd = [0, 1, 1] + return imgtype, titles, t1, t2, pd + + +def find_in_dict(dt1: np.ndarray, dt2: np.ndarray, t1: np.ndarray, t2: np.ndarray): + """ + + Args: + dt1 (list of float): T1 values in dictionary + dt2 (list of float): T2 values in dictionary + t1 (list of float): T1 values to be found in dictionary + t2 (list of float): T2 values to be found in dictionary + + Output + (list of int): dictionary indecies that best match the t1 and t2 values + """ + # find the dictionary entry that most closely matches the t1 and t2 values + index = np.zeros(len(t1), dtype=int) + for i in range(len(t1)): + t1diff = dt1 - t1[i] + t2diff = dt2 - t2[i] + error = np.square(t1diff) + np.square(t2diff) + index[i] = np.argmin(error) + # print("T1 values, provided, matched") + # print(t1[i]) + # print(dt1[index[i]]) + # print("T2 values, provided, matched") + # print(t2[i]) + # print(dt2[index[i]]) + print(index) + return index + + +def pad_image(data: np.ndarray, settings): + """ Pad image with zeros to match the desired imag shape, can also crop + + Args: + data (array): data array of shape (anything, anything) + settings: config object used to retrieve options + Output: + (array): data array padded to shape (imgsize[0], imgsize[1]) + """ + output = np.zeros((settings.getint('imagesize'), settings.getint('imagesize'))) + + # special exception for background image + if data[0, 0] > 0.9: + output = np.ones((settings.getint('imagesize'), settings.getint('imagesize'))) + + dv = (settings.getint('imagesize') - data.shape[0]) / 2 + dh = (settings.getint('imagesize') - data.shape[1]) / 2 + if dv < 0: + dv = -dv + dh = -dh + output = data[math.ceil(dv):-math.floor(dv), math.ceil(dh):-math.floor(dh)] + else: + output[math.ceil(dv):-math.floor(dv), math.ceil(dh):-math.floor(dh)] = data + return output + + +def load_brainweb_database(settings, ): + """ Load brainweb data components + + Args: + settings: config object used to retrieve options + + Output: + (array): component maps reshaped into vectors and stacked, array of shape (imgshape[0], imageshapep[1], n_maps) + """ + print("Loading images from Brainweb database") + # Identify slicenumbers + slicenums = list(map(int, settings['brainweb_slice_num'].split(","))) + + # Load metadata from bainweb database + imgtype, titles, t1, t2, pd = brainweb_metadata(settings) + groundtruth = np.zeros((len(slicenums), settings.getint('imagesize') * settings.getint('imagesize'), len(imgtype))) + for j in range(len(slicenums)): + for i in tqdm(range(len(imgtype))): + # assemble string + pathstring = settings['brainweb_path'] + settings['brainweb_subject'] + '_' + imgtype[i] + '.nii.gz' + nibfile = nib.load(pathstring) + + # get data from .nii file + data = nibfile.get_fdata() + + # get slice from data + imgslice = data[slicenums[j], :, :] + + # add padding to image + imgslice = pad_image(imgslice, settings) + + # store output + groundtruth[j, :, i] = to_vec(imgslice) + + # plot ground truth in subplot + if settings.getboolean('showplot'): + plt.subplot(3, 4, i + 1) + plt.imshow(imgslice, cmap='gray', origin="lower") + plt.title(titles[i]) + + ax = plt.gca() + ax.axes.xaxis.set_visible(False) + ax.axes.yaxis.set_visible(False) + if settings.getboolean('showplot'): + plt.show() + + return groundtruth + + +def load_artificial_phantom(settings): + """ Load artificial data components + + Args: + settings: config object used to retrieve options + + Output: + (array): component maps reshaped into vectors and stacked, array of shape (imgshape[0], imageshapep[1], n_maps) + """ + imgtype, titles, t1, t2, pd = artificial_phantom_metadata(settings) + # make circular mask + y, x = np.ogrid[-128:128, -128:128] + mask = x * x + y * y <= 90 * 90 + + # load components + blk = np.zeros((256, 256)) + cp1 = np.zeros((256, 256)) + cp2 = np.zeros((256, 256)) + cp1[:, 0:86] = np.ones((256, 86)) + cp1[:, 86:170] = np.ones((256, 84)) * 0.5 + cp2[:, 86:170] = np.ones((256, 84)) * 0.5 + cp2[:, 170:256] = np.ones((256, 86)) + + # mask components + blk[np.invert(mask)] = 1 + cp1[np.invert(mask)] = 0 + cp2[np.invert(mask)] = 0 + + components = np.zeros((256 * 256, 3)) + components[:, 0] = to_vec(blk) + components[:, 1] = to_vec(cp1) + components[:, 2] = to_vec(cp2) + + # plot components + if settings.getboolean('showplot'): + plt.figure() + for i in range(components.shape[1]): + plt.subplot(1, components.shape[1] + 1, i + 1) + plt.imshow(to_image(components[:, i]), cmap='gray', origin="lower") + plt.title(titles[i]) + plt.show() + return components + + +def gen_imgseq(groundtruth: np.ndarray, dictmat: np.ndarray, + dictt1: np.ndarray, dictt2: np.ndarray, settings): + """ Generate image sequences + Loop over components and apply dictionary to generate image sequences. These component specific image sequences are + added together to for the final image sequence. + + Args: + groundtruth (array): component maps reshaped into vectors and stacked, + array of shape (imgshape[0], imageshapep[1], n_maps) + dictmat (array): dictmat rotated to the real axis. array of shape + dictt1 (list): list of T1 values corresponding to dictionary atoms + dictt2 (list): list of T2 values corresponding to dictionary atoms + settings: config object used to retrieve options + Output: + (array): image sequence, images reshaped into vectors and stacked + """ + imgtype, titles, t1, t2, pd = load_metadata(settings['phantom_type']) + image_sequence = np.zeros((groundtruth.shape[0], groundtruth.shape[1], dictmat.shape[0]), dtype='float64') + + index = find_in_dict(dictt1, dictt2, t1, t2) + # print(dictt1[index]) + # print(dictt2[index]) + print("Generating image sequence") + # Make transformation matrix + trans_mat = np.matmul(dictmat[:, index], np.diag(pd)).T + + # calculate image sequence + oshape = image_sequence.shape[:2] + trans_mat.shape[1:] + image_sequence = (groundtruth.reshape(-1, groundtruth.shape[2]) @ trans_mat).reshape(oshape) + + return image_sequence + + +def subsample_imseq(imgseq, settings): + """ Subsample image sequence + Use the sense linop from the sigpy library to sample the image sequene. + + Args: + imgseq: image sequence, images reshaped into vectors and stacked + settings: config object used to retrieve options + Output: + kspace_array (array): k-space data array of shape (n_coils, timelength, coordlength) + coord_array (array): coord array of shape (timelength, coordlength, 2) + """ + print('Sample image sequence') + # Calculate birdcage maps + birdcagemaps = gen_birdcage(settings) + spiral_coord = gen_coord(settings, 0) + if settings.getboolean('showplot'): + plt.figure() + plt.plot(spiral_coord[:, 0], spiral_coord[:, 1]) + plt.show() + + ksp_oshape = list((imgseq.shape[0], birdcagemaps.shape[0], imgseq.shape[2], 1, spiral_coord.shape[0])) + kspace_array = np.zeros(ksp_oshape, dtype='complex128') + coord_array = np.zeros(ksp_oshape[2:] + [2]) + # loop over all images and timepoints + for i in tqdm(range(imgseq.shape[0] * imgseq.shape[2])): + im_slice = int(np.floor(i / imgseq.shape[2])) + timepoint = i % imgseq.shape[2] + + # generate spiral + spiral_coord = gen_coord(settings, timepoint) + coord_array[timepoint, 0, :, :] = spiral_coord + + # Sense operator split to apply noise over single coil images + ishape = birdcagemaps.shape[1:] + mapslinop = sp.linop.Multiply(ishape, birdcagemaps) + nufftlinop = sp.linop.NUFFT(mapslinop.oshape, coord_array[timepoint, ...]) + + # Calculate single coil images apply noise and calculate ksp trajectory + single_coil_images = mapslinop * to_image(imgseq[im_slice, :, timepoint]) + noisy_single_coil_img = add_noise_img(single_coil_images, settings.getint('phantom_snr')) + ksp_traj_linop = nufftlinop * noisy_single_coil_img + kspace_array[im_slice, :, timepoint, ...] = ksp_traj_linop + + return kspace_array, coord_array + + +def gen_coord(settings, index=0, interleaf=0, ): + """ Generate coordinates + + Args: + index (int): Defines how many times coord are rotated by the golden angle. + interleaf(int): Defines how many times another interleave has to be added, used as input and recursion. + settings: config object used to retrieve options + Output: + (array): coord array of shape (coordlength, 2) + + """ + if settings['samp_pattern'] == 'spiral': + # Generate spiral trajectory + # spiral(fov, N, f_sampling, R, ninterleaves, alpha, max_grad_amp, max_slew_rate, gamma=2.678e8): + # coord = spmri.spiral(0.24, 120, 1/4, 36, 1, 6, 0.03, 150) + coord = spmri.spiral(settings.getfloat('spir_fov'), settings.getint('spir_mat_size'), + settings.getfloat('spir_undersamp_freq'), settings.getfloat('spir_undersamp_phase'), 1, + settings.getfloat('spir_var_dens_fac'), settings.getfloat('spir_grad_amp'), + settings.getfloat('spir_max_slew_rate')) + elif settings['samp_pattern'] == 'radial': + rad = spmri.radial((32, 512, 2), (200, 200), golden=False) + rad = spmri.radial((settings.getint('rad_num_spokes'), settings.getint('rad_num_read_out'), 2), + (settings.getint('imagesize'), settings.getint('imagesize')), golden=False) + coord = np.zeros((rad.shape[0] * rad.shape[1], 2)) + coord[:, 0] = rad[:, :, 0].reshape(-1) + coord[:, 1] = rad[:, :, 1].reshape(-1) + else: + raise Exception("unknown sampling pattern, check spelling!") + + # Scale coordinages + # TDO: gen_coord(): this should not be required + coord = coord * (settings.getint('imagesize') / 256) + + # rotate by index * golden-angle + gold_ang = -np.pi * (3 - np.sqrt(5)) + comp_coord = (coord[:, 0] + 1j * coord[:, 1]) * np.exp(1j * gold_ang * index) + realcoord = np.real(comp_coord) + imgcoord = np.imag(comp_coord) + output = np.stack((realcoord, imgcoord), axis=1) + + if interleaf: + output = np.vstack((output, gen_coord(settings, index=index + 1, interleaf=interleaf - 1))) + return output + + +def gen_phantom(settings): + """ Generate numerical phantom Call this function to generate the phantom, first it loads the component maps + either from the brainweb databse or the artificial phantom hardcocded in the program. Afterwards it loads the + dictionary and calculates the image sequene. Finally the image sequence is sampled to the spiral coordinates. + + Args: + - + + Output: + kspace_array (array): k-space data array of shape (n_coils, timelength, coordlength) + coord_array (array): coord array of shape (timelength, coordlength, 2) + imgseq (array): Image sequence of numerical phantom, images reshaped into vectors and stacked + """ + + component_groundtruth = load_components(settings) + component_groundtruth[:, :, 0] = np.ones_like(component_groundtruth[:, :, 0]) + + dictmat, dictt1, dictt2, _ = load_dict(settings) + + imgseq = gen_imgseq(component_groundtruth, dictmat, dictt1, dictt2, settings=settings) + + # subsample the image + kspace_array, coord_array = subsample_imseq(imgseq, settings) + + # store to .h5 file + print("Saving result to .h5 file") + if not os.path.exists(settings['phantom_output_path']): + os.makedirs(settings['phantom_output_path']) + with h5py.File(settings['phantom_output_path'] + 'phantom.h5', 'w') as hf: + hf.create_dataset('groundtruth', data=component_groundtruth) + hf.create_dataset('kspace', data=kspace_array) + hf.create_dataset('coord', data=coord_array) + hf.create_dataset('imgseq', data=imgseq) + + return kspace_array, coord_array, imgseq + + +def gen_birdcage(settings): + """Generate sensitivity maps for birdcage coils + + Args: + - + + Output: + (array): Sensitivity maps of shape (n_coils, imagesize[0], imagesize[1]) + """ + cage_diam = settings.getfloat('cage_diam') # in meters + fov = settings.getfloat('coil_fov') # field of view in meters, 1mm voxel size + maps = spmri.birdcage_maps((settings.getint('num_sense_coils'), settings.getint('imagesize'), + settings.getint('imagesize')), cage_diam / fov, settings.getint('coils_per_ring')) + return maps + + +def add_noise_img(imgseq, snr): + if snr is not None: + for k in range(imgseq.shape[0]): + i = imgseq[k, ...] + i_max = np.abs(i).max() + imgseq[k, ...] += np.random.normal(0, i_max / snr, i.shape) + \ + 1j * np.random.normal(0, i_max / snr, i.shape) + return imgseq + + +if __name__ == "__main__": + from config import load_settings + + settings_ = load_settings() + gen_phantom(settings_) diff --git a/MRFrecon/load_data.py b/MRFrecon/load_data.py new file mode 100644 index 0000000..bc0b833 --- /dev/null +++ b/MRFrecon/load_data.py @@ -0,0 +1,128 @@ +""" +Script to load data, was originally in main.py +""" +import os +import warnings + +import h5py +import numpy as np +from PIL import Image + +try: + from DICOM_files_Philips import read_enh_dicom +except ImportError: + read_enh_dicom = False + +from .backend import MrfData +from .gen_phantom import gen_phantom, load_dict + + +# from preprocess_MRI_data import preprocess_mri_data + + +def load_b1(settings, ): + try: + if 'b1_map' in settings: + file = settings['b1_map'] + except FileNotFoundError: + print(f'File {file} not found during loading B1') + except BaseException as e: + print(f'{e} during loading B1') + + if file is None or not os.path.exists(file): + print(f'B1 file {file} not found') + return None + if file.endswith('dcm') and read_enh_dicom is not False: + b1_map = read_enh_dicom(file, slices='all', MP=True)['images'] / 100 + elif file.endswith('.npy'): + b1_map = np.load(file) + b1_map[b1_map == 0] = np.nan + b1_map = np.abs(b1_map[:, 0].transpose((0, 2, 1))) + return b1_map + + +def load_data(settings): + # test if we need to calculate kspace and coordinates + if settings['datasource'] == 'phantom': + # calculate kspace and coord from phantom + ksp, coord, groundtruth = gen_phantom(settings) + elif settings['datasource'] == 'h5': + if not (os.path.exists(settings['mri_data_path'])): + raise FileNotFoundError('Data file not found at: {}'.format(settings['mri_data_path'])) + with h5py.File(settings['mri_data_path'], 'r') as hf: + ksp = hf.get('kspace')[:] + coord = hf.get('coord')[:] + else: + warnings.warn("Skip to next run; MRI data source not identified in [{}]. Got [{}] expected phantom, " + "mri_data or h5".format(settings['path_extension'], settings['datasource'])) + + # load dictioary + dictmat, dictt1, dictt2, dictb1 = load_dict(settings) + + # Load B1 map + if 'b1_map' in settings: + b1_map = load_b1(settings) + else: + b1_map = None + + if dictb1 is not None and b1_map is None: + if len(np.unique(dictb1)) > 1: + warnings.warn('Dictionary contains b1 values, but no B1 map was provided...') + elif dictb1 is None and b1_map is not None: + raise IOError('Dictionary does not contain b1, but a B1map was provided.') + + if dictmat.shape[0] != coord.shape[0]: + if dictmat.shape[1] == coord.shape[0]: + dictmat = dictmat.T + warnings.warn( + 'Dictionary signal length and coord length did not match!, I transposed the dictionary matrix myself.') + else: + raise IOError('Dictionary signal length and coord length did not match!') + # reduce dictionary + reducedict = settings.getint('reduce_dict_length') + if reducedict is not None: + dictmat = dictmat[:reducedict, :] + ksp = ksp[:, :, :reducedict, :, :] + coord = coord[:reducedict, ...] + + norms = np.linalg.norm(dictmat, axis=0) + dictmat /= norms[None, :] + + retrospective_undersampling = settings.get('retrospective_undersampling', fallback=None) + if retrospective_undersampling is not None: + retrospective_undersampling = eval(retrospective_undersampling) + if retrospective_undersampling is not False: + if isinstance(retrospective_undersampling, int): + retrospective_undersampling = [retrospective_undersampling] + ksp = ksp[..., retrospective_undersampling, :] + coord = coord[:, retrospective_undersampling] + + # Setup the data class + data = MrfData() + data.ksp = ksp + data.coord = coord + data.dictmat = dictmat + data.dictt1 = dictt1 + data.dictt2 = dictt2 + data.dictb1 = dictb1 + data.set_b1_map(b1_map) + data.norms = norms + + def load_mask(mask_path): + maskimage = Image.open(mask_path).convert('1') + mask = np.array(maskimage) + + imagesize = mask.shape[0] + numslice = int(mask.shape[1] // imagesize) + if imagesize * numslice != mask.shape[1]: + raise IOError("Mask image of invalid shape, width is not an integer multiple of height") + mask = mask.reshape((imagesize, imagesize, numslice), order='F') + return mask.transpose(2, 0, 1) + + if settings.get('mask_path') is not None: + data.mask = load_mask(settings['mask_path']) + + if settings.get('mask_spijn_path') is not None: + data.spijn_mask = load_mask(settings['mask_spijn_path']) + + return data diff --git a/MRFrecon/mrf_recon.py b/MRFrecon/mrf_recon.py new file mode 100644 index 0000000..17549ca --- /dev/null +++ b/MRFrecon/mrf_recon.py @@ -0,0 +1,383 @@ +""" + Filename: mrf_recon.py + Author: Emiel Hartsema, Martijn Nagtegaal + Date last modified: 21-10-2021 + Python version: 3.7 + Description: reconstruction algorithms to reconstruct mrf maps +""" +import numpy as np + +from .backend import MrfData + +max_power_iter_def = 15 + + +def reg_params(settings): + res = {'regtype': settings.get('regtype', None), 'lam': settings.getfloat('regularization_lambda', None), + 'lamscale': settings.getfloat('regularization_lambda_scale', 1), + 'lambda_ksp': settings.getboolean('regularization_lambda_ksp', False)} + return res + + +def espirit_settings(settings): + return dict(imagesize=settings.getint('imagesize'), coil_batchsize=settings.getint('espirit_coil_batchsize'), + max_iter=settings.getint('max_espirit_cg_iter'), oversamp=settings.getfloat('oversamp_ratio'), + compute_device=settings.getint('compute_device'), + mask_thresh=settings.getfloat('espirit_mask_threshhold'), verbose=settings.getint('verbose'), + tol_fac=settings.getfloat('tol_fac'), + ) + + +def recon_settings(settings): + return dict(batchsize=settings.getint('lr_sense_batchsize'), + oversamp=settings.getfloat('oversamp_ratio'), + compute_device=settings.getint('compute_device'), + verbose=settings.getint('verbose'), + lstsq_solver=settings.get('lstsq_solver'), + tol_fac=settings.getfloat('tol_fac'), + norm_ksp=settings.getboolean('norm_kspace', True), + max_power_iter=settings.getint('max_power_iter', max_power_iter_def), + reg_kwargs=reg_params(settings) + ) + + +def recon_fbp(data, settings): + print("Start filtered back projection recon with single component matching") + data.filt_back_proj(settings.getint('imagesize'), compute_device=settings.getint('compute_device'), + oversamp=settings.getfloat('oversamp_ratio'), verbose=settings.getint('verbose')) + + # rrs image sequence + data.imgseq = np.sum(np.abs(data.imgseq) ** 2, axis=1) ** 0.5 + data.compress_dict = data.dictmat + data.maps = np.zeros((1, 224, 224)) # fake sensitivity maps in order to have the image size + + # Solve for components + data.single_component_match(verbose=settings.getint('verbose'), absdict=True) + + # Save to .nii.gz + data.save_single_comp_nii(settings['reconstruction_output_path']) + + # Save to .h5 + data.to_h5(settings['reconstruction_output_path'], save_raw=False, save_dict=False) + + +def recon_lr_invert(data, settings): + print("Start Low rank inversion reconstruction with single component matching") + # compress dictionary + data.compress_dictionary(settings.getint('reconstruction_rank'), comp_device=settings.getint('compute_device')) + + # Espirit calc for sensitivity maps + data.mrf_espirit(**espirit_settings(settings)) + + # compress coils + data.coil_compression(settings.getint('num_virt_coil')) + + # Low rank inversion image reconstruction + data.lr_inversion(**recon_settings(settings), + max_iter=settings.getint('max_cg_iter')) + + # # rotate image sequence to real axis + # data.rotate2real() + + # Solve for components + data.single_component_match(verbose=settings.getint('verbose'), calc_nrmse=True) + + # Save to .nii.gz + data.save_single_comp_nii(settings['reconstruction_output_path']) + + # Save to .h5 + data.to_h5(settings['reconstruction_output_path'], save_raw=False, save_dict=False) + + +def recon_lr_admm(data, settings): + print("Start Low rank ADMM reconstruction with single component matching") + # compress dictionary + data.compress_dictionary(settings.getint('reconstruction_rank'), comp_device=settings.getint('compute_device')) + + # Espirit calc for sensitivity maps + data.mrf_espirit(**espirit_settings(settings), ) + + # compress coils + data.coil_compression(settings.getint('num_virt_coil')) + + # Step for b1map processing, doesn't hurt if there is no b1map + data.fixed_par_processing(redo=True, flatten=True) + + # calc admm image sequence + data.lr_admm(admm_param=settings.getfloat('admm_param'), + max_iter=settings.getint('max_admm_iter'), + max_cg_iter=settings.getint('max_cg_iter'), + outpath=settings.get('admm_outpath'), + **recon_settings(settings)) + + # Solve for components + data.single_component_match(verbose=settings.getint('verbose')) + + # Save to .nii.gz + data.save_single_comp_nii(settings['reconstruction_output_path']) + + # Save to .h5 + data.to_h5(settings['reconstruction_output_path'], save_raw=False, save_dict=False) + + +def recon_lr_invert_spijn(data, settings): + print("Start lr inversion with SPIJN reconstruction") + # compress dictionary + data.compress_dictionary(settings.getint('reconstruction_rank'), comp_device=settings.getint('compute_device')) + + # Espirit calc for sensitivity maps + data.mrf_espirit(**espirit_settings(settings)) + # compress coils + data.coil_compression(settings.getint('num_virt_coil')) + + # Low rank inversion image reconstruction + data.lr_inversion( + max_iter=settings.getint('max_cg_iter'), + **recon_settings(settings)) + # rotate image sequence to real axis + data.rotate2real() + + # Step for b1map processing, doesn't hurt if there is no b1map + data.fixed_par_processing(redo=True, flatten=True) + + # Solve for components + data.spijn_solve(settings.getfloat('spijn_param'), + max_iter=settings.getint('max_spijn_iter'), + verbose=settings.getint('verbose'), norm_correction=False) + + # Save to .h5 + data.to_h5(settings['reconstruction_output_path'], save_raw=False, save_dict=False) + + +def recon_admm_into_spijn(data, settings): + print("Start ADMM into SPIJN reconstruction") + # compress dictionary + data.compress_dictionary(settings.getint('reconstruction_rank')) + + # Espirit calc for sensitivity maps + data.mrf_espirit(**espirit_settings(settings)) + + # compress coils + data.coil_compression(settings.getint('num_virt_coil')) + + # Step for b1map processing, doesn't hurt if there is no b1map + data.fixed_par_processing(redo=True, flatten=True) + + # calc admm image sequence + data.lr_admm(admm_param=settings.getfloat('admm_param'), + max_iter=settings.getint('max_admm_iter'), + max_cg_iter=settings.getint('max_cg_iter'), + outpath=settings.get('admm_outpath'), + **recon_settings(settings)) + + # calc spijn after convergence of admm + data.spijn_solve(settings.getfloat('spijn_param'), max_iter=settings.getint('max_spijn_iter'), + verbose=settings.getint('verbose'), norm_correction=False) + + # Save to .h5 + data.to_h5(settings['reconstruction_output_path'], save_raw=False, save_dict=False) + + +def recon_sense(data, settings): + print("Start Sense recon with single component matching") + + # Espirit calc for sensitivity maps + data.mrf_espirit(**espirit_settings(settings)) + + # sense image reconstruction + data.senserecon(compute_device=settings.getint('compute_device'), + verbose=settings.getint('verbose'), + tol_fac=settings.getfloat('tol_fac')) + + # store as lr image sequence for further processing + data.compress_dictionary(settings.getint('reconstruction_rank')) + data.imgseq_or = data.imgseq.copy() + data.imgseq = data.imgseq @ data.comp_mat + + # data.rotate2real() + + # Step for b1map processing, doesn't hurt if there is no b1map + data.fixed_par_processing(redo=True, flatten=True) + + # Solve for components + data.single_component_match(verbose=settings.getint('verbose')) + + # Save to .nii.gz + data.save_single_comp_nii(settings['reconstruction_output_path']) + + # Save to .h5 + data.to_h5(settings['reconstruction_output_path'], save_raw=False, save_dict=False) + + +def recon_sense_into_spijn(data, settings): + print("Start sense with SPIJN reconstruction") + + # Espirit calc for sensitivity maps + data.mrf_espirit(**espirit_settings(settings)) + + # sense image reconstruction + data.senserecon(compute_device=settings.getint('compute_device'), + verbose=settings.getint('verbose'), + tol_fac=settings.getfloat('tol_fac')) + + # store as lr image sequence for further processing + data.compress_dictionary(settings.getint('reconstruction_rank')) + data.imgseq = data.imgseq @ data.comp_mat + + data.rotate2real() + + # Step for b1map processing, doesn't hurt if there is no b1map + data.fixed_par_processing(redo=True, flatten=True) + + # Solve for components + data.spijn_solve(settings.getfloat('spijn_param'), max_iter=settings.getint('max_spijn_iter'), + verbose=settings.getint('verbose'), norm_correction=False) + + # Save to .h5 + data.to_h5(settings['reconstruction_output_path'], save_raw=False, save_dict=False) + + +def recon_wavelet(data, settings): + print("Start sense-wavelet with single component matching") + + # Espirit calc for sensitivity maps + data.mrf_espirit(**espirit_settings(settings)) + + data.coil_compression(settings.getint('num_virt_coil')) + + # sense image reconstruction + data.waveletrecon(lamda=settings.getfloat('regularization_lambda'), + compute_device=settings.getint('compute_device'), verbose=settings.getint('verbose'), + tol_fac=settings.getfloat('tol_fac'), + norm_ksp=settings.getboolean('norm_kspace', True), + ) + data.imgseq_or = data.imgseq.copy() + + # store as lr image sequence for further processing + data.compress_dictionary(settings.getint('reconstruction_rank')) + data.imgseq_or = data.imgseq.copy() + data.imgseq = data.imgseq @ data.comp_mat + + # Step for b1map processing, doesn't hurt if there is no b1map + data.fixed_par_processing(redo=True, flatten=True) + + # Solve for components + data.single_component_match(verbose=settings.getint('verbose')) + + # Save to .nii.gz + data.save_single_comp_nii(settings['reconstruction_output_path']) + + # Save to .h5 + data.to_h5(settings['reconstruction_output_path'], save_raw=False, save_dict=False) + + +def recon_wavelet_into_spijn(data, settings): + print("Start sense-wavelet with SPIJN reconstruction") + + # Espirit calc for sensitivity maps + data.mrf_espirit(**espirit_settings(settings)) + + data.coil_compression(settings.getint('num_virt_coil')) + + # sense image reconstruction + data.waveletrecon(lamda=settings.getfloat('regularization_lambda'), + compute_device=settings.getint('compute_device'), verbose=settings.getint('verbose'), + tol_fac=settings.getfloat('tol_fac'), + norm_ksp=settings.getboolean('norm_kspace', True), + ) + data.imgseq_or = data.imgseq.copy() + + # store as lr image sequence for further processing + data.compress_dictionary(settings.getint('reconstruction_rank')) + data.imgseq = data.imgseq @ data.comp_mat + + data.rotate2real() + + # Step for b1map processing, doesn't hurt if there is no b1map + data.fixed_par_processing(redo=True, flatten=True) + + # Solve for components + data.spijn_solve(settings.getfloat('spijn_param'), max_iter=settings.getint('max_spijn_iter'), + verbose=settings.getint('verbose'), norm_correction=False) + + # Save to .h5 + data.to_h5(settings['reconstruction_output_path'], save_raw=False, save_dict=False) + + +def recon_direct_spijn(data, settings): + # compress dictionary + data.compress_dictionary(settings.getint('reconstruction_rank')) + + # Espirit calc for sensitivity maps + data.mrf_espirit(**espirit_settings(settings)) + + # compress coils + data.coil_compression(settings.getint('num_virt_coil')) + + # Step for b1map processing, doesn't hurt if there is no b1map + data.fixed_par_processing(redo=True, flatten=True) + + data.spijn_from_ksp(admm_param=settings.getfloat('admm_param'), + max_admm_iter=settings.getint('max_admm_iter'), + max_cg_iter=settings.getint('max_cg_iter'), + max_iter=settings.getint('max_spijn_iter'), + reg_param=settings.getfloat('spijn_param'), norm_correction=False, + **recon_settings(settings) + ) + + # Save to .h5 + data.to_h5(settings['reconstruction_output_path'], save_raw=False, save_dict=False) + + +def recon_test(data, settings): + print("Recon direct recon followed by LR inv image reconstruction") + # compress dictionary + data.compress_dictionary(settings.getint('reconstruction_rank')) + + # Espirit calc for sensitivity maps + data.mrf_espirit(**espirit_settings(settings)) + + # compress coils + data.coil_compression(settings.getint('num_virt_coil')) + + # Step for b1map processing, doesn't hurt if there is no b1map + data.fixed_par_processing(redo=True, flatten=True) + + # Direct reconstruction + data.spijn_from_ksp(admm_param=settings.getfloat('admm_param'), oversamp=settings.getfloat('oversamp_ratio'), + max_admm_iter=settings.getint('max_admm_iter'), max_cg_iter=settings.getint('max_cg_iter'), + max_iter=settings.getint('max_spijn_iter'), verbose=settings.getint('verbose'), + reg_param=settings.getfloat('spijn_param'), compute_device=settings.getint('compute_device'), + lstsq_solver=settings.get('lstsq_solver'), + tol_fac=settings.getfloat('tol_fac'), + max_power_iter=settings.getint('max_power_iter', max_power_iter_def)) + + # Low rank inversion image reconstruction + data.lr_inversion(batchsize=settings.getint('lr_sense_batchsize'), oversamp=settings.getfloat('oversamp_ratio'), + max_iter=settings.getint('max_cg_iter') + 50, compute_device=settings.getint('compute_device'), + verbose=settings.getint('verbose'), lstsq_solver=settings.get('lstsq_solver'), + tol_fac=settings.getfloat('tol_fac'), max_power_iter=settings.getint('max_power_iter', + max_power_iter_def)) + + # rotate image sequence to real axis + data.rotate2real() + + # Solve for components + data.spijn_solve(0, max_iter=settings.getint('max_spijn_iter'), + verbose=settings.getint('verbose'), norm_correction=False) + + # Save to .h5 + data.to_h5(settings['reconstruction_output_path'], save_raw=False, save_dict=False) + + +if __name__ == "__main__": + from config import settings_ + + print("Loading data...") + data_ = MrfData.from_h5(settings_['mri_data_path']) + + # recon_lr_invert(data, settings) + recon_lr_invert_spijn(data_, settings_) + # recon_admm_into_spijn(data, settings) + # sense_into_spijn(data, settings) + # recon_direct_spijn(data, settings) diff --git a/MRFrecon/postprocessing.py b/MRFrecon/postprocessing.py new file mode 100644 index 0000000..496f1b5 --- /dev/null +++ b/MRFrecon/postprocessing.py @@ -0,0 +1,609 @@ +""" + Filename: postprocessing.py + Author: Emiel Hartsema + Date last modified: 25-11-2020 + Python version: 2.7 + Describtion: reconstruction algoritms to reconstruct mrf maps +""" + +import os +import os.path +import sys + +import h5py +import matplotlib.pyplot as plt +import numpy as np +import sigpy as sp +from tqdm import tqdm + +from .gen_phantom import to_image, load_dict + +try: + import nibabel as nib +except ImportError: + nib = False + +# Load data from configuration file +from .config import load_config_file + +from .backend import MrfData + + +def showcomponents(recondatapath, showplot=True): + # load dataset + with h5py.File(recondatapath + 'data.h5', 'r') as hf: + components = hf.get('comp')[0, ...] + dictindex = hf.get('index')[:] + dictt1 = hf.get('dictt1')[:] + dictt2 = hf.get('dictt2')[:] + img = -hf.get('imgseq')[0, :, 0] + dictmat = hf.get('dictmat')[:] + """ + # load components + #with h5py.File(MRI_DATA_PATH, 'r') as hf: + groundtruth = hf.get('groundtruth')[:] + mixed_gt = np.zeros((groundtruth.shape[0], 3)) + # WM component + # mixed_gt[:, 0] = 0.77 * np.sum(groundtruth[:, [3, 9, 11]], axis=1) + mixed_gt[:, 0] = 0.77 * np.sum(groundtruth[:, [3]], axis=1) + # GM component + # mixed_gt[:, 1] = 0.86 * np.sum(groundtruth[:, [2, 5]], axis=1) + mixed_gt[:, 1] = 0.86 * np.sum(groundtruth[:, [2]], axis=1) + # CSF component + # mixed_gt[:, 2] = 1.00 * np.sum(groundtruth[:,[1, 6, 10]], axis=1) + mixed_gt[:, 2] = 1.00 * np.sum(groundtruth[:, [1]], axis=1) + """ + if not os.path.exists(recondatapath + 'Components/'): + os.makedirs(recondatapath + 'Components/') + if not os.path.exists(recondatapath + 'Components_norm/'): + os.makedirs(recondatapath + 'Components_norm/') + + # Show 1st 3 components together + """ + for i in range(3): + plt.figure() + fig, ax = plt.subplots(1, 3) + image = to_image(components[:, i]) + gtimage = to_image(mixed_gt[:, i]) + ax[0].imshow(image) + ax[0].set_title('Reconstructed image') + ax[0].tick_params(axis='both', which='both', labelbottom=False, labelleft=False, bottom=False, left=False) + im1 = ax[1].imshow(image - gtimage) + ax[1].set_title('Difference') + ax[1].tick_params(axis='both', which='both', labelbottom=False, labelleft=False, bottom=False, left=False) + divider = make_axes_locatable(ax[1]) + cax = divider.append_axes("bottom", size="5%", pad=0.05) + plt.colorbar(im1, ax=ax[1], cax=cax, orientation='horizontal') + ax[2].imshow(gtimage) + ax[2].set_title('Ground truth') + ax[2].tick_params(axis='both', which='both', labelbottom=False, labelleft=False, bottom=False, left=False) + fig.suptitle( + '$T_1={:.1f}, T_2={:.1f}, Index={:.1f}$'.format(dictt1[dictindex[i]], dictt2[dictindex[i]], dictindex[i])) + plt.savefig(recondatapath + 'Components/component{index}'.format(index=i) + '.png') + if showplot: + plt.show() + """ + mask = np.zeros_like(img) + mask[img > 0.05 * max(img)] = 1 + mask = to_image(mask) + if showplot: + # plot rank 1 image + plt.imshow(to_image(img)) + plt.colorbar() + plt.show() + + plt.imshow(mask) + plt.show() + # compensate for reduction in dictionary length + dictlen = dictmat.shape[0] + og_dictmat, _, _ = load_dict() + og_dictmat = og_dictmat[:dictlen, :] + inv_scalefact = np.linalg.norm(og_dictmat, axis=0)[:] + inv_scalefact = inv_scalefact[dictindex] + """ + # normalize components + norm = np.linalg.norm(components, axis=1) + norm[norm<0.00001*max(norm)] = 1 + components /= norm[:, None] + """ + # show components seperately + for i in range(components.shape[1]): + plt.figure() + image = to_image(components[:, i]) / inv_scalefact[i] + plt.imshow(image) + plt.title('$T_1={:.1f}, T_2={:.1f}, Index={:.1f}$'.format(dictt1[dictindex[i]], + dictt2[dictindex[i]], dictindex[i])) + plt.colorbar() + plt.savefig(recondatapath + 'Components/component{index}'.format(index=i) + '.png') + if showplot: + plt.show() + else: + plt.close('all') + + # show components but normalized + sum_comp = to_image(np.sum(components, axis=1)) + for i in range(components.shape[1]): + plt.figure() + image = to_image(components[:, i]) / inv_scalefact[i] + plt.imshow(np.divide(image, sum_comp, out=np.zeros_like(image), where=mask.astype(np.bool))) + plt.title('$T_1={:.1f}, T_2={:.1f}, Index={:.1f}$'.format(dictt1[dictindex[i]], + dictt2[dictindex[i]], dictindex[i])) + plt.colorbar() + plt.savefig(recondatapath + 'Components_norm/component{index}'.format(index=i) + '.png') + if showplot: + plt.show() + else: + plt.close('all') + + +def showimgseq(recondatapath, showplot=True): + # load dataset + with h5py.File(recondatapath + 'data.h5', 'r') as hf: + lrimgseq = hf.get('imgseq')[0, ...] + + if not os.path.exists(recondatapath + 'Imgseq/'): + os.makedirs(recondatapath + 'Imgseq/') + + for i in range(lrimgseq.shape[1]): + plt.figure() + # plt.subplot(1, 2, 1) + plt.imshow(to_image(np.abs(lrimgseq[:, i])), origin='lower') + plt.axis('off') + # plt.title("Magnitude") + # plt.colorbar() + # plt.subplot(1, 2, 2) + # plt.imshow(to_image(np.angle(lrimgseq[:, i])), cmap='twilight') + # plt.title("Phase") + # plt.colorbar() + plt.savefig(recondatapath + 'Imgseq/image{index}'.format(index=i) + '.png') + if showplot: + plt.show() + else: + plt.close('all') + + +def showlagrange(recondatapath, showplot=True): + # load dataset + with h5py.File(recondatapath + 'data.h5', 'r') as hf: + lag_mult = hf.get('lagrange_multipliers')[:] + lrimgseq = hf.get('imgseq')[:] + + if not os.path.exists(recondatapath + 'Lagrange/'): + os.makedirs(recondatapath + 'Lagrange/') + + for i in range(lag_mult.shape[1]): + # Plot scaled lagrange multipliers + plt.figure() + plt.subplot(2, 3, 1) + plt.imshow(to_image(lag_mult[:, i].real)) + plt.title("Lagrange multiplier \n real part") + plt.colorbar() + + plt.subplot(2, 3, 4) + plt.imshow(to_image(lag_mult[:, i].imag)) + plt.title("imag part") + plt.colorbar() + + # plot image sequence + plt.subplot(2, 3, 2) + plt.imshow(to_image(lrimgseq[:, i].real)) + plt.title("Image \n real part") + plt.colorbar() + + plt.subplot(2, 3, 5) + plt.imshow(to_image(lrimgseq[:, i].imag)) + plt.title("imag part") + plt.colorbar() + + # Plot SPIJN input/discarded part + plt.subplot(2, 3, 3) + plt.imshow(to_image((lag_mult[:, i] + lrimgseq[:, i]).real)) + plt.title("SPIJN \n input") + plt.colorbar() + + plt.subplot(2, 3, 6) + plt.imshow(to_image((lag_mult[:, i] + lrimgseq[:, i]).imag)) + plt.title("discarded") + plt.colorbar() + + plt.savefig(recondatapath + 'Lagrange/lagrange{index}'.format(index=i) + '.png') + if showplot: + plt.show() + else: + plt.close('all') + + +def showerrortypes(recondatapath, phantomdatapath, showplot=True): + with h5py.File(phantomdatapath + 'phantom.h5', 'r') as hf: + originalimgseq = hf.get('imgseq')[:] + + with h5py.File(recondatapath + 'data.h5', 'r') as hf: + components = hf.get('comp')[:] + comp_index = hf.get('index')[:] + # reconimgseq = hf.get('frimgseq')[:] + + dictmat, dictt1, dictt2 = load_dict() + reconimgseq = np.matmul(components, dictmat[:, comp_index].T) + + diffimgseq = originalimgseq - reconimgseq + rssimg = np.sqrt(np.sum(np.square(diffimgseq), axis=2) / diffimgseq.shape[2]) + plt.figure() + plt.imshow(to_image(rssimg[0, :])) + plt.colorbar() + plt.savefig(recondatapath + 'error.png') + if showplot: + plt.show() + else: + plt.close('all') + + +def show_convergence(recondatapath, showplot=True): + with h5py.File(recondatapath + 'data.h5', 'r') as hf: + objvals = hf.get('objective_value')[:] + for i in range(objvals.shape[0]): + plt.semilogy(objvals[i, :], label='Iteration {}'.format(i + 1)) + plt.legend() + if showplot: + plt.show() + else: + plt.close('all') + + +def showresiduals(recondatapath, showplot=True): + with h5py.File(recondatapath + 'data.h5', 'r') as hf: + residual = hf.get('residuals')[:] + plt.plot(residual[0, :], label='|GFSDc-y|_F') + plt.plot(residual[1, :], label='|GFSx-y|_F') + plt.plot(residual[2, :], label='|x-Dc|_F') + plt.plot(residual[3, :], label='total') + plt.plot(residual[4, :], label='|GFSDc-y|_F+mu|c|') + plt.legend() + plt.savefig(recondatapath + 'Residuals' + '.png') + if showplot: + plt.show() + else: + plt.close('all') + + +def show_single_comp_match(recondatapath, showplot=True): + with h5py.File(recondatapath + 'data.h5', 'r') as hf: + t1img = hf.get('t1img')[:] + t2img = hf.get('t2img')[:] + pdimg = hf.get('pdimg')[:] + nrmseimg = hf.get('nrmseimg')[:] + # plot results + plt.figure() + plt.subplot(2, 2, 1) + plt.imshow(to_image(t1img), origin="lower") + plt.title("T1 image") + plt.colorbar() + plt.subplot(2, 2, 2) + plt.imshow(to_image(t2img), origin="lower") + plt.title("T2 image") + plt.colorbar() + plt.subplot(2, 2, 3) + plt.imshow(to_image(pdimg), origin="lower") + plt.title("PD image") + plt.scatter(130, 105, color='r', marker='x') # big blob + plt.scatter(95, 175, color='r', marker='x') # TL blob + plt.scatter(115, 175, color='r', marker='x') # TR blob + plt.scatter(95, 160, color='r', marker='x') # BL blob + plt.scatter(115, 160, color='r', marker='x') # BR blob + plt.colorbar() + plt.subplot(2, 2, 4) + plt.imshow(to_image(np.divide(nrmseimg, pdimg, out=np.zeros_like(nrmseimg), where=pdimg != 0)), origin="lower") + plt.title("RMSE image") + plt.colorbar() + plt.savefig(recondatapath + 'single_component_match' + '.png') + if showplot: + plt.show() + else: + plt.close('all') + + +def postpro_admm(recondatapath, phantom_output_path, max_admm_iter): + for i in tqdm(range(max_admm_iter)): + try: + showcomponents(recondatapath + '{}/'.format(i), False) + except TypeError: + print("no components") + showimgseq(recondatapath + '{}/'.format(i), False) + showlagrange(recondatapath + '{}/'.format(i), False) + try: + showerrortypes(recondatapath + '{}/'.format(i), phantom_output_path, False) + except: + pass + # print("No phantom data found, no error types calculated for admm iter: {}".format(i)) + showresiduals(recondatapath, False) + + +def bulk_postpro(path, only_first_run=False): + # load config file + config = load_config_file(path) + runs = list(config.sections())[1:] + if only_first_run: + runs = runs[0] + for run in runs: + print('Processing run: {}'.format(run)) + config['DEFAULT']['path_extension'] = run + settings = config[run] + + # check for missing files + if not os.path.exists(settings['reconstruction_output_path']): + print("Run {} not found, continuing...".format(settings['path_extension'])) + continue + + if settings['recon_alg'] in ('filt_back_proj', 'lr_filt_back_proj', 'invert'): + showimgseq(settings['reconstruction_output_path'], False) + show_single_comp_match(settings['reconstruction_output_path'], False) + elif settings['recon_alg'] in ('invert_into_spijn', 'admm_into_spijn', 'direct_spijn', 'sense_into_spijn'): + try: + showimgseq(settings['reconstruction_output_path'], False) + showcomponents(settings['reconstruction_output_path'], False) + except FileNotFoundError as e: + print(f'Saving figures {run} ' + e) + try: + showerrortypes(settings['reconstruction_output_path'], settings['phantom_output_path'], False) + except OSError: + pass + # print("No phantom data found, no error types calculated for run: {}".format(run)) + # elif settings['recon_alg'] in ('admm_into_spijn', 'admm_spijn'): + # postpro_admm(settings['reconstruction_output_path'], settings['phantom_output_path'], settings.getint('max_admm_iter')) + + +def multirun_convergence(configpath, only_first_run=False): + config = load_config_file(configpath) + runs = list(config.sections())[2:] + param = np.zeros(len(runs)) + i = 0 + for run in runs: + config['DEFAULT']['path_extension'] = run + settings = config[run] + combined_residuals = np.zeros((4, len(runs), settings.getint('max_admm_iter'))) + with h5py.File(settings['reconstruction_output_path'] + 'data.h5', 'r') as hf: + residual = hf.get('residuals')[:] + + combined_residuals[0, i, :] = residual[0, :] # |GFSDc-y| + combined_residuals[1, i, :] = residual[1, :] # |GFSx-y| + combined_residuals[2, i, :] = residual[2, :] # |Dc-x| + combined_residuals[3, i, :] = residual[3, :] # |Dc-x| + param[i] = settings['reconstruction_rank'] + + i += 1 + upperlim = 0 + plt.figure() + for i in range(combined_residuals.shape[1] - upperlim): + plt.semilogy(combined_residuals[0, i, :], label='rank = {}'.format(round(param[i], 2))) + plt.legend() + plt.title('|GFSDc-y|') + plt.show() + + plt.figure() + for i in range(combined_residuals.shape[1] - upperlim): + plt.semilogy(combined_residuals[1, i, :], label='rank = {}'.format(round(param[i], 2))) + plt.legend() + plt.title('|GFSx-y|') + plt.show() + + plt.figure() + for i in range(combined_residuals.shape[1] - upperlim): + plt.semilogy(combined_residuals[2, i, :], label='rank = {}'.format(round(param[i], 2))) + plt.legend() + plt.title('|Dc-x|') + plt.show() + + plt.figure() + for i in range(combined_residuals.shape[1] - upperlim): + plt.semilogy(combined_residuals[3, i, :], label='rank = {}'.format(round(param[i], 2))) + plt.legend() + plt.title('|GFSx-y|+mu1|c|+mu2|Dc-x+v|') + plt.show() + + +def imgseq_as_nifti(recondatapath): + with h5py.File(recondatapath + 'data.h5', 'r') as hf: + lrimgseq = hf.get('imgseq')[:] + imsize = int(np.sqrt(lrimgseq.shape[1])) + lrimgseq = lrimgseq.reshape(lrimgseq.shape[0], imsize, imsize, lrimgseq.shape[2]) + img = nib.Nifti1Image(np.transpose(lrimgseq, (2, 1, 0, 3)), np.diag([1, 1, 5, 1])) + nib.save(img, recondatapath + 'imgseq.nii.gz') + print("Done") + + +def components_as_nifti(recondatapath): + with h5py.File(recondatapath + 'data.h5', 'r') as hf: + components = hf.get('comp')[:] + dictindex = hf.get('index')[:] + dictmat = hf.get('dictmat')[:] + + # compensate for reduction in dictionary length + dictlen = dictmat.shape[0] + og_dictmat, _, _ = load_dict() + og_dictmat = og_dictmat[:dictlen, :] + inv_scalefact = np.linalg.norm(og_dictmat, axis=0)[:] + inv_scalefact = inv_scalefact[dictindex] + components /= inv_scalefact[None, None, :] + + imsize = int(np.sqrt(components.shape[1])) + lrimgseq = np.log(components.reshape(components.shape[0], imsize, imsize, components.shape[2])) + img = nib.Nifti1Image(np.transpose(lrimgseq, (2, 1, 0, 3)), np.diag([1, 1, 5, 1])) + nib.save(img, recondatapath + 'log_components.nii.gz') + print("Done") + + +def load_settings(configpath, run): + # Try to read path to config.ini file + try: + path = sys.argv[1] + except IndexError: + path = configpath + if path is None: + pass + elif not (os.path.exists(os.path.join(path, 'config.ini'))): + print('Config file not found at {}'.format(os.path.join(path, 'config.ini'))) + raise FileNotFoundError('Config file not found') + + config = load_config_file(path) + # get settings for runs + run_name = config['RUN'][str(run + 1)] + config['DEFAULT']['path_extension'] = run_name + return config[run_name] + + +def admm_convergence(configpath, num_runs=10, admm_iter=20, + save=True, comp_device=None, calc_ksp=False): + def comp2t12(t12list, comp, imagesize, t1or2, admm_it, mask=None): + # To calculate geometric mean of t1 or t2 + logt12 = np.log(t12list) + t12map = np.exp(np.dot(comp, logt12) / np.sum(comp, axis=-1)) + if mask is not None: + t12map[~mask] = 0 + # plt.imshow(t12map.reshape(imagesize, imagesize)) + # plt.title('T{} map from multicomponent from admm iter {}'.format(t1or2, admm_it)) + # plt.colorbar() + # plt.show() + return t12map + + settings = load_settings(configpath, 0) + meanresult = np.zeros((num_runs, admm_iter, 4)) + relmeanresult = np.zeros((num_runs, admm_iter, 4)) + stdresult = np.zeros((num_runs, admm_iter, 4)) + + if comp_device is None: + comp_device = settings.getint('compute_device') + # load dictionary + with h5py.File(settings['dictionary_path'], 'r') as hf: + t1list = hf.get('t1list')[:] + t2list = hf.get('t2list')[:] + # load ground truth + with h5py.File(settings['mri_data_path'], 'r') as hf: + groundtruth = np.squeeze(hf.get('groundtruth')[:]) + pd = [0, 1, 0.86, 0.77, 1, 1, 1, 0, 0, 0.77, 1, 0.77] + groundtruth = groundtruth @ np.diag(pd) + dict_index = [0, 3151, 1794, 1232, 899, 1852, 3151, 0, 0, 1232, 3151, 1232] + # Create Phantom reference values + phantomt1 = comp2t12(t1list[dict_index], groundtruth, 256, '1', 'phantom') + phantomt2 = comp2t12(t2list[dict_index], groundtruth, 256, '2', 'phantom') + phantompd = np.sum(groundtruth, axis=1) + mask = np.logical_and(phantomt1 > 0, phantompd > .1)[None] + admm_params = [] + for j in range(num_runs): + settings = load_settings(configpath, j) + admm_params.append(settings.getfloat('admm_param')) + for i in range(admm_iter): + # load dataset + + with h5py.File(settings.get('admm_outpath') + 'admm{}.h5'.format(i), 'r') as data: + imsize = settings.getint('imagesize') + numslice = data['imgseq'].shape[0] + comp = data['comp'][()] + + measpd = np.sum(comp / data['norms'][()], axis=-1) + + pdcorr = np.nanpercentile(measpd, 99) + comp /= pdcorr + measpd /= pdcorr + pderror = (measpd - phantompd) + pderrorrel = pderror / phantompd + # calc t1t2 maps with geometric mean + t1map = comp2t12(data['dictt1'][()], comp, + imsize, '1', i, mask) + t2map = comp2t12(data['dictt2'][()], comp, + imsize, '2', i, mask) + # calc linear operator + if calc_ksp: + if i == 0: + list_linop = [] + for k in range(numslice): + coord = sp.to_device(data['coord'][()], comp_device) + comp_mat = sp.to_device(data['comp_mat'][()].T, comp_device) + maps = sp.to_device(data['maps'][()], comp_device) + singleslice_linop = MrfData.lr_sense_op(coord, + comp_mat, imagesize=imsize, + oversamp=settings.getfloat('oversamp_ratio'), + maps=maps, + batchsize=settings.getint('lr_sense_batchsize')) + list_linop.append(singleslice_linop) + linop = sp.linop.Diag(list_linop, 0, 0) + # calc residual + # |GFSDc-y| + dc = (comp @ data['compress_dict'][()].T).reshape(data['imgseq'].shape) + + dc_phasecorr = dc * np.exp(1j * data['phasecorrection'][()])[:, :, None] + dc_img = np.transpose(dc_phasecorr, (0, 2, 1)).reshape(numslice, + settings.getint('reconstruction_rank'), + imsize, + imsize).astype(np.complex64) + dc_img_gpu = sp.to_device(dc_img, comp_device) + ldc_img_gpu = linop(dc_img_gpu) + kspgpu = sp.to_device(data['ksp'][()], comp_device) + xp = sp.get_array_module(kspgpu) + kdiff = ldc_img_gpu - kspgpu + ksperror = sp.to_device(xp.linalg.norm(kdiff), -1) + kspnorm = sp.to_device(xp.linalg.norm(kspgpu), -1) + else: + ksperror = np.nan + kspnorm = 1 + + t1error = t1map - phantomt1 + t1errorrel = t1error / phantomt1 + t2error = (t2map - phantomt2) + t2errorrel = t2error / phantomt2 + + meanresult[j, i, 0] = np.sqrt(np.nanmean(pderror[mask] ** 2)) + meanresult[j, i, 1] = np.sqrt(np.nanmean(t1error[mask] ** 2)) + meanresult[j, i, 2] = np.sqrt(np.nanmean(t2error[mask] ** 2)) + meanresult[j, i, 3] = ksperror + relmeanresult[j, i, 0] = np.sqrt(np.nanmean(pderrorrel[mask] ** 2)) + relmeanresult[j, i, 1] = np.sqrt(np.nanmean(t1errorrel[mask] ** 2)) + relmeanresult[j, i, 2] = np.sqrt(np.nanmean(t2errorrel[mask] ** 2)) + relmeanresult[j, i, 3] = ksperror / kspnorm + stdresult[j, i, 0] = np.abs(np.nanstd(pderrorrel[mask])) + stdresult[j, i, 1] = np.abs(np.nanstd(t1errorrel[mask])) + stdresult[j, i, 2] = np.abs(np.nanstd(t2errorrel[mask])) + # stdresult[j, i, 3] = np.abs(np.std(ksperror))/kspnorm + print('Finished run {}, admm iter {}'.format(j, i)) + if save: + with h5py.File(settings['phantom_output_path'] + "run{}".format(j + 1) + "result.h5", 'w') as hf: + hf.create_dataset('relmean', data=relmeanresult) + hf.create_dataset('mean', data=meanresult) + hf.create_dataset('std', data=stdresult) + if save: + with h5py.File(settings['phantom_output_path'] + "result.h5", 'w') as hf: + hf.create_dataset('relmean', data=relmeanresult) + hf.create_dataset('mean', data=meanresult) + hf.create_dataset('std', data=stdresult) + return meanresult + """ + plt.subplot(2, 2, 3) + plt.plot(meanresult[..., 0].T) + plt.legend(admm_params) + plt.title("proton density") + plt.subplot(2, 2, 1) + plt.plot(meanresult[..., 1].T) + plt.legend(admm_params) + plt.title("T1") + plt.subplot(2, 2, 2) + plt.plot(meanresult[..., 2].T) + plt.legend(admm_params) + plt.title("T2") + plt.subplot(2, 2, 4) + plt.plot(meanresult[..., 3].T) + plt.legend(admm_params) + plt.title("||GFSDc-y||") + plt.show() + """ + + +if __name__ == "__main__": + # data_loc = '/tudelft.net/' + # configpath = 'U:/mrflumc/Results_Emiel/admm_tuning_5mm/' + # settings = load_settings(configpath, 0) + admm_convergence('') + # showcomponents(r'output/in_vivo2/extend_dict/1sl_1_32/test_T400_rp25/') + # showimgseq(r'output/in_vivo2/extend_dict/1sl_1_32/test_T400_rp25/') + # bulk_postpro(r'output/in_vivo2/extend_dict_pdhg/1sl_1_32/') + # bulk_postpro(r'output/in_vivo2/extend_dict_pdhg/1sl_5_32/') + # bulk_postpro(r'output/in_vivo/extend_dict_pdhg/1sl_1_32/') + # bulk_postpro(r'output/in_vivo/extend_dict_pdhg/1sl_5_32/') + # bulk_postpro(r'output/in_vivo2/extend2/1sl_5_32/') diff --git a/README.rst b/README.rst new file mode 100644 index 0000000..0a1079a --- /dev/null +++ b/README.rst @@ -0,0 +1,134 @@ +This repository can be used to reconstruct MR Fingerprinting data and perform single and multi-component matching. + +This code heavily relies on `SigPy `_, so many thanks to the developers of this great package! + +============ +MrfData +============ +``MRFrecon\Backend.py`` contains a python class ``MrfData`` to reconstruct MRF data and calculate multi-component maps using joint sparsity [1]_ resulting in a small number of tissues over the complete region of interest. + +The class can: + +* reconstruct MRF image sequence from kspace data using: + * Filtered back projection + * Low rank Filtered back projection comparable to [2]_ + * Parallel image reconstruction + * Low rank inversion image reconstruction + * Low rank ADMM image reconstruction + +* calculate quantitative data + * Single component matching + * SPIJN (joint sparse components from an image sequence) + * Direct joint sparse components from kspace + +------------ +Installation +------------ +The package requires several packages + +* Numpy +* Sigpy +* Matplotlib +* h5py +* Scipy +* Cupy (optional) +* Nibabel (optional) + +``environment.yml`` contains these packages. + + +----- +Usage +----- +Load the data class from the module + +.. code-block:: Python + + from MRFrecon import MrfData + +Create an empty instance of the data class and add the raw mri data + +.. code-block:: Python + + data = MrfData() + data.ksp = ksp + data.coord = coord + data.dictmat = dictmat + data.dictt1 = dictt1 + data.dictt2 = dictt2 + + # equivalently + data = MrfData(ksp, coord, dictmat, dictt1, dictt2) + +Note the structure of the variables: + +* ksp: numpy array of shape (#slices, #coils, #dynamics, #spirals_per_image, #point_on_spiral) +* coord: numpy array of shape (#dynamics, #spirals_per_image, #point_on_spiral, #spiral_coordinates) +* dictmat: numpy array of shape (#atoms, #dynamics) +* t1list: list of :math:`T_1` values of shape (#atoms) +* t2list: list of :math:`T_2` values of shape (#atoms) + +With the data initialized, the class can solve for the image sequence and components. +The following code solves for the joint sparse components after low rank inversion image reconstruction. + +.. code-block:: Python + + # compress dictionary + data.compress_dictionary(10) + + # Espirit calc for sensitivity maps + data.mrf_espirit(256) + + # compress coils + data.coil_compression(5) + + # Low rank inversion image reconstruction + data.lr_inversion() + + # rotate image sequence to real axis + data.rotate2real() + + # Solve for components + data.spijn_solve(0.25) + + # Save to .h5 + data.to_h5('./') + + +A working version of this is contained in ``example.py`` + + +----- +Usage with config.ini files +----- +To smoothen this procedure and have more reusable methods or perform reconstruction with different settings, +a configparser has been used to use different config files (see example data) together with ``main_from_config_files.py`` this +allows to run the reconstruction from the terminal with predefined reconstruction steps as contained in ``mrf_recon.py``. +This is especially usefull on a cluster with a slurm job manager and the array command. + +------- +References +------- +.. [1] Nagtegaal, M, Koken, P, Amthor, T, et al. Fast multi-component analysis using a joint sparsity constraint for MR fingerprinting. Magn Reson Med. 2020; 83: 521– 534. https://doi.org/10.1002/mrm.27947 +.. [2] Assländer, J., Cloos, M.A., Knoll, F., Sodickson, D.K., Hennig, J. and Lattanzi, R. (2018), Low rank alternating direction method of multipliers reconstruction for MR fingerprinting. Magn. Reson. Med., 79: 83-96. https://doi.org/10.1002/mrm.26639 + +------- +Contact +------- +\(c\) Emiel Hartsema, July 2021 + +\(c\) Martijn Nagtegaal, March 2022 + +Technical University of Delft, Faculty of Applied Sciences + +emiel@hartsema.com + +M.A.Nagtegaal@tudelft.nl + +.. image:: https://camo.githubusercontent.com/4fde808ab45b0f7ec5763d9daf2e96192c9ca859792fd4531f86ace05da08230/68747470733a2f2f6431726b616237746c71793566312e636c6f756466726f6e742e6e65742f5f70726f6365737365645f2f362f312f63736d5f496d506879732d6c6f676f5f6d657425323074656b73745f643037366135636437362e706e67 + :alt: Imphys + +\ + +.. image:: https://seeklogo.com/images/T/TU_Delft-logo-D6086E1A70-seeklogo.com.png + :alt: TUDelft diff --git a/environment.yml b/environment.yml new file mode 100644 index 0000000..ef97d3f --- /dev/null +++ b/environment.yml @@ -0,0 +1,12 @@ +name: mrfrecon +channels: + - defaults + - conda-forge + - frankong +dependencies: + - python>=3.6 + - sigpy>=0.1.23 + - numpy + - scipy + - h5py + - matplotlib \ No newline at end of file diff --git a/example.py b/example.py new file mode 100644 index 0000000..d8c65a2 --- /dev/null +++ b/example.py @@ -0,0 +1,101 @@ +""" +Example script to show how to use the +""" + +import h5py + +from MRFrecon import MrfData + +# %% Load data +# Load data from numerical phantom +with h5py.File(r'example_data/num_phantom.h5', 'r') as hf: + ksp = hf.get('kspace')[:] + coord = hf.get('coord')[:] + +# Load dictionary +with h5py.File(r'example_data/dictionary.h5', 'r') as hf: + dictmat = hf.get('dictmat')[:].T.imag + dictt1 = hf.get('t1list')[:] + dictt2 = hf.get('t2list')[:] + +# %% Create the data class +data = MrfData() +data.ksp = ksp +data.coord = coord +data.dictmat = dictmat +data.dictt1 = dictt1 +data.dictt2 = dictt2 + +# equivalent to: +# data = MrfData(ksp, coord, dictmat, dictt1, dictt2) + +# make 2 copies +data2 = data.copy() +data3 = data.copy() + +# %% Solve LR inversion with single component matching +# very similar to (slightly different settings) +# recon_lr_invert(data, settings=load_settings('./example_data/config.ini',0)) + +# compress dictionary to rank 10 +data.compress_dictionary(10) + +compute_device = -1 +# Espirit calc for sensitivity maps, with reconstruction matrix of size 256 +data.mrf_espirit(256, compute_device=compute_device, tol_fac=.005, ) + +# compress data to 5 virtual coils +data.coil_compression(5) + +# Low rank inversion image reconstruction +data.lr_inversion(compute_device=compute_device, tol_fac=.005) + +# rotate image sequence to real axis +data.rotate2real() + +# Solve for single component MRF +data.single_component_match(verbose=2) + +# Save to .h5 +data.to_h5('./example_data/', 'lr_inv_single.h5') + +# %% Solve LR ADMM with SPIJN component reconstruction +# very similar to (slightly different settings) +# recon_admm_into_spijn(data, settings=load_settings('./example_data/config.ini',2)) + +# compress dictionary to rank 10 +data2.compress_dictionary(10) + +# Espirit calc for sensitivity maps, with reconstruction matrix of size 256 +data2.mrf_espirit(256, compute_device=compute_device) + +# compress data to 5 virtual coils +data2.coil_compression(5) + +# Low rank admm image reconstruction +data2.lr_admm(2e-3, compute_device=compute_device) +# Note: low rank admm does not need rotate2real()! + +# Solve for components with a joint sparse regularization parameter of 0.15 +data2.spijn_solve(0.15, verbose=2) + +# Save to .h5 +data2.to_h5('./example_data/', 'lr_admm_spijn.h5') + +# %% Solve direct reconstruction +# very similar to (slightly different settings) +# recon_admm_into_spijn(data, settings=load_settings('./example_data/config.ini',1)) +# compress dictionary to rank 10 +data3.compress_dictionary(10) + +# Espirit calc for sensitivity maps, with reconstruction matrix of size 256 +data3.mrf_espirit(256, compute_device=compute_device) + +# compress data to 5 virtual coils +data3.coil_compression(5) + +# Direct component reconstruction +data3.spijn_from_ksp(admm_param=2e-3, reg_param=0.25, compute_device=compute_device, verbose=2) + +# Save to .h5 +data3.to_h5('./example_data/', 'direct.h5') diff --git a/example_data/README.txt b/example_data/README.txt new file mode 100644 index 0000000..586c879 --- /dev/null +++ b/example_data/README.txt @@ -0,0 +1 @@ +Download data from https://figshare.com/s/243037b4f1ecdc467d38 or doi 10.4121/19434527 \ No newline at end of file diff --git a/example_data/config.ini b/example_data/config.ini new file mode 100644 index 0000000..3178e13 --- /dev/null +++ b/example_data/config.ini @@ -0,0 +1,132 @@ +[DEFAULT] +### General settings ### +#---------------------------------------# +imagesize = 224 +showplot = no +showprogressbar = yes +verbose = 1 + +#datasource can be: "phantom", "mri_data" or "h5" +datasource = h5 +# if datasource set to "h5", data sourced from mri_data_path +mri_data_path = %(configpath)snum_phantom.h5 + + +# recon_alg can be: +# recon + single comp matching: "fbp", "sense", "invert", "admm" +# recon + multi comp matching: "invert_into_spijn", "admm_into_spijn", "sense_into_spijn" +# direct multicomp recon: "direct_spijn" +recon_alg = invert + +#Dictionary +dictionary_path = %(configpath)sdictionary.h5 +dict_corne_struct = no + +# Compute device -1 for CPU, 0 for GPU +compute_device = 0 + +# Value "configpath" and "path_extension" dynamically added in config.py +phantom_output_path = %(configpath)s +reconstruction_output_path = %(configpath)s%(path_extension)s + +### Settings for phantom generation ### +#---------------------------------------# +# phantom_type can be "brainweb" or "artificial" +phantom_type = brainweb + +# if noskull is true, only generate phantom with GM, WM and CSF +noskull = no + +# Brainweb data +brainweb_path = brainweb_database/True/r111/ +brainweb_subject = subject04 +brainweb_slice_num = 80 + +# Phantom SNR, comment out for no noise. +phantom_snr = 70 + +# k-space trajectory, can be "spiral" or "radial" +samp_pattern = spiral + +# spiral settings - Mainly used for phantom creation +spir_fov = 0.24 +spir_mat_size = 120 +spir_undersamp_freq = 0.25 +spir_undersamp_phase = 36 +spir_var_dens_fac = 6 +spir_grad_amp = 0.03 +spir_max_slew_rate = 150 + +# Information about the head coil +num_sense_coils = 31 +cage_diam = 0.4 +coil_fov = 0.256 +coils_per_ring = 8 + +#### Settings for reconstruction ### +#---------------------------------------# +# Oversampling ration for NUFFT +oversamp_ratio = 1.25 + +# number of virtual coils for coil compression +num_virt_coil = 5 + +# Rank of the reconstruction +reconstruction_rank = 10 + +# Single component matching batch size, used for decreased computation time with increased memory usage +# The number represents the amount of pixels matched simultaniously, NUMBER MUST BE DEVISIBLE BY imagesize^2 !! +single_comp_match_batchsize = 1024 + +#-- Memory management parameters --# +# LR_Sense batch used for gpu memory reduction in excange for increased computation time. +# The number represents the amount of timepoints calculated simultaniously. +#lr_sense_batchsize = 200 + +# The number represents the amount of coils calculated simultaniously. +espirit_coil_batchsize = 32 + + +#-- parameters for itterative algorithms --# +# Espirit parameters +espirit_mask_threshhold = 0.05 +max_espirit_cg_iter = 50 +# mask_path = %(phantom_output_path)s../../extend_dict/prep/1sl_mask.png +# mask_spijn_path = %(phantom_output_path)s../../extend_dict/prep/1sl_mask_no_skull.png +spijn_param = 0.05 + +# Whether or not to normalize data based on k-space norm +norm_kspace = True + +# Iteration limits for iterative algorithms +max_admm_iter = 10 +max_cg_iter = 30 +max_spijn_iter = 30 +tol_fac = .05 +lstsq_solver = PrimalDualHybridGradient +max_power_iter = 20 + +# Coupling parameters +spijn_param = 0.20 +admm_param = 2e-3 + +# Regularization +regtype = wavelet_sc +regularization_lambda = 1e-06 +regularization_lambda_ksp = True + + +# section [RUN] is always required. +[RUN] +0 = recon_phantom_inv +1 = recon_phantom_kspijn +2 = recon_phantom_admm + +[recon_phantom_inv] +recon_alg = invert + +[recon_phantom_kspijn] +recon_alg = direct-spijn + +[recon_phantom_admm] +recon_alg = admm_into_spijn diff --git a/main_from_config_files.py b/main_from_config_files.py new file mode 100644 index 0000000..cfa5025 --- /dev/null +++ b/main_from_config_files.py @@ -0,0 +1,74 @@ +""" + Author: Emiel Hartsema, Martijn Nagtegaal + Institution: TU Delft + Python version: 3.7 +""" +import os +import time +import warnings + +from MRFrecon import load_data +from MRFrecon import load_settings +from MRFrecon.mrf_recon import recon_fbp, recon_lr_invert, recon_lr_admm, recon_sense, \ + recon_lr_invert_spijn, recon_admm_into_spijn, recon_sense_into_spijn, recon_direct_spijn, recon_test, \ + recon_wavelet_into_spijn, recon_wavelet + + +class Timer(object): + def __init__(self, name=None): + self.name = name + + def __enter__(self): + self.tstart = time.time() + + def __exit__(self, type, value, traceback): + if self.name: + print('[%s]' % self.name, ) + print('Elapsed time total calculation, poeh poeh: %.2f seconds' % (time.time() - self.tstart)) + + +if __name__ == "__main__": + settings = load_settings() + data = load_data(settings) + + os.makedirs(settings['reconstruction_output_path'], exist_ok=True) + + # Run reconstruction algorithm + if settings['recon_alg'] == 'fbp': + with Timer('fbp'): + recon_fbp(data, settings) + elif settings['recon_alg'] == 'invert': + with Timer(settings['recon_alg']): + recon_lr_invert(data, settings) + elif settings['recon_alg'] == 'admm': + with Timer(settings['recon_alg']): + recon_lr_admm(data, settings) + elif settings['recon_alg'] == 'sense': + with Timer(settings['recon_alg']): + recon_sense(data, settings) + elif settings['recon_alg'] == 'invert_into_spijn': + with Timer(settings['recon_alg']): + recon_lr_invert_spijn(data, settings) + elif settings['recon_alg'] == 'admm_into_spijn': + with Timer(settings['recon_alg']): + recon_admm_into_spijn(data, settings) + elif settings['recon_alg'] == 'sense_into_spijn': + with Timer(settings['recon_alg']): + recon_sense_into_spijn(data, settings) + elif settings['recon_alg'] == 'wavelet': + with Timer(settings['recon_alg']): + recon_wavelet(data, settings) + elif settings['recon_alg'] == 'wavelet_into_spijn': + with Timer(settings['recon_alg']): + recon_wavelet_into_spijn(data, settings) + elif settings['recon_alg'] == 'direct_spijn': + with Timer(settings['recon_alg']): + recon_direct_spijn(data, settings) + elif settings['recon_alg'] == 'test': + with Timer(settings['recon_alg']): + recon_test(data, settings) + else: + warnings.warn("Skip to next run; Reconstruction algorithm not identified in [{}]. Got [{}] excpected " + "filt_back_proj, lr_filt_back_proj, invert, invert_spijn or " + "admm_spijn".format(settings['path_extension'], settings['recon_alg'])) + print('Done')