From 8d4fdaa63e9c7f9a0a5c540a324b79f197fca5d8 Mon Sep 17 00:00:00 2001 From: Felix Zimmermann Date: Mon, 20 Jan 2025 13:52:59 +0100 Subject: [PATCH] Squashed commit of the following: commit 51aa45764e78da2d9e6bb6c971bc9183f3047a3e Author: Felix Zimmermann Date: Mon Jan 20 11:40:44 2025 +0100 fix commit 3d6589aecab4e7318fcf780cdf0476960832a619 Author: Felix Zimmermann Date: Mon Jan 20 11:36:28 2025 +0100 lose assert commit 16a8f218d96e473e5f8d5fae6db314981d68ba96 Author: Felix Zimmermann Date: Mon Jan 20 11:22:37 2025 +0100 assert commit 0716b42c9baf421636d38120256a68246e0fd0b0 Author: Felix Zimmermann Date: Mon Jan 20 11:08:39 2025 +0100 assert passes on my pc commit 6b08e95cd653d4005a830b317826c7091b5c1357 Author: Felix Zimmermann Date: Mon Jan 20 10:47:34 2025 +0100 update commit f1987b19118377588bb849287dac2cdc7598fa34 Merge: a98b9d19 82e58ed7 Author: Felix F Zimmermann Date: Mon Jan 20 10:33:22 2025 +0100 Merge branch 'main' into improvedoc commit a98b9d191ff3198fcaee53505ef63be66d3ba0dc Author: Felix Zimmermann Date: Mon Jan 20 00:32:18 2025 +0100 update commit b8ed0ec504979100884113daaa1b4321c7906aa5 Author: Felix Zimmermann Date: Mon Jan 20 00:28:51 2025 +0100 update commit 90d4ccb345327ada7055501c44a917a980aed32e Author: Felix Zimmermann Date: Sun Jan 19 23:56:25 2025 +0100 Squashed commit of the following: commit 82e58ed7681bbc8d8098a1e774eed4f0d0c7a7ff Author: Felix F Zimmermann Date: Sun Jan 19 23:46:29 2025 +0100 Fix import in utils (#614) commit 8b7ab3d1520698e61cd65f082a5b6b2b1f0452d2 Author: Felix F Zimmermann Date: Thu Jan 16 13:31:28 2025 +0100 Fix inverse softplus beta (#608) commit f23103e665780f0db5af3099e2e6ff6a751755ac Author: Christoph Kolbitsch Date: Fri Jan 17 08:03:22 2025 +0100 Some more small things (#611) commit d43c1b28deddf81d2ca14c936b8e56ea1389373e Author: Felix Zimmermann Date: Thu Jan 16 13:26:10 2025 +0100 example update commit 0b2eee8daa448886e972c90826341da3ac4c6c1e Author: Felix Zimmermann Date: Thu Jan 16 00:25:23 2025 +0100 cell break commit 084c07de14ecf2379ba5b9a355a2e0702d958bf0 Author: Felix Zimmermann Date: Thu Jan 16 00:18:32 2025 +0100 some more text commit 2383efc8c9d3149278244f0ed1a56039034f57f3 Author: Felix Zimmermann Date: Thu Jan 16 00:03:36 2025 +0100 fix constraint usage commit 87802bf10d8e428758d5100353ae7dcbd1030a9d Author: Felix Zimmermann Date: Wed Jan 15 23:32:28 2025 +0100 remove image commit d848caa3952a97eddc226dd42ec811dfc1bbe3af Author: Felix Zimmermann Date: Wed Jan 15 16:52:08 2025 +0100 notes commit 8aa586250602a3a7b86c4ae72172904e8f594b79 Author: Felix Zimmermann Date: Wed Jan 15 16:51:47 2025 +0100 rotation commit 1023027513b618126e2e8cf98b77a0e721f00c9a Author: Felix Zimmermann Date: Wed Jan 15 16:42:27 2025 +0100 update commit 14be293372ef93fdb7e909ca62fa99a8752043a4 Author: Felix Zimmermann Date: Wed Jan 15 16:41:45 2025 +0100 update commit c5e4f0142b4a00053cf3f34251355a046c374ed7 Author: Felix Zimmermann Date: Wed Jan 15 16:37:49 2025 +0100 fix link commit 51e7bd877b746a2bb992b2ab11e21fedb55af65f Author: Felix Zimmermann Date: Wed Jan 15 16:37:40 2025 +0100 note commit 9d7dc8d001d0f1c3716151c77d7a70004c0df049 Merge: 4e5e62bb aff63fbf Author: Felix Zimmermann Date: Wed Jan 15 16:27:37 2025 +0100 Merge branch 'main' into improvedoc commit 4e5e62bbfae69e7cf5e6a4ab19213daafde3479b Author: Felix Zimmermann Date: Wed Jan 15 15:41:24 2025 +0100 typo commit a5bde84845e8dbea5c937061d7e08d4b6ff8f147 Author: Felix Zimmermann Date: Wed Jan 15 15:38:20 2025 +0100 cartesian sample commit d9da8001d358b1322e7f50fe83d8b76dde65a87d Author: Felix Zimmermann Date: Wed Jan 15 13:20:55 2025 +0100 exclude rst from mege check commit fea532680ee0b85e023db12ce0426c5ccb1b7e46 Merge: 6fe9ec84 6685fc9c Author: Felix Zimmermann Date: Wed Jan 15 13:19:19 2025 +0100 Merge branch 'main' into improvedoc commit 6fe9ec84ad38acda041fc7a72d95bdc15cf1a8ab Author: Felix Zimmermann Date: Wed Jan 15 12:56:38 2025 +0100 update commit d4bf1d8ef361c657c07613ed191c651bdd5bf019 Author: Leonid Lunin Date: Wed Jan 15 12:38:51 2025 +0100 Add favicon to docs (#605) commit eeb3ae0c1f88eddfc8d521d0ce6e562190980d8d Author: Felix Zimmermann Date: Wed Jan 15 12:37:31 2025 +0100 Squashed commit of the following: commit 6685fc9ce5ab323b5511e97a91c2b920597cde40 Author: Leonid Lunin Date: Wed Jan 15 11:24:26 2025 +0100 Update .ipynb notebooks from .py in pre-commit hook locally (#537) Co-authored-by: Felix F Zimmermann commit 6cc7ef9f357b4ab192256feedbdaff9a4596d2cd Author: Felix F Zimmermann Date: Tue Jan 14 16:19:51 2025 +0100 Fix WaveletOp (#604) fixed problem for computation of gradients commit 3ad7c371a95a17982a257f11a86b9e8cce54ab77 Author: Felix Zimmermann Date: Wed Jan 15 11:16:34 2025 +0100 calls first commit ed6f7f97567d53a36dfd013eee4b30ff674ccf4a Author: Felix Zimmermann Date: Wed Jan 15 11:15:30 2025 +0100 update notebooks commit 8b47125e6e52b13cc67ff54369b163e3b1b42cb9 Author: Felix Zimmermann Date: Wed Jan 15 11:15:17 2025 +0100 fix adjpint as backward docstrings commit 634d40e91e47cb95ad9de09a6c4ac35e24a3ee8b Author: Felix Zimmermann Date: Wed Jan 15 10:32:04 2025 +0100 shorten all references in exmaples commit 10c8c944406bf3a748b065d4e469f2553a024544 Author: Felix F Zimmermann Date: Wed Jan 15 10:11:08 2025 +0100 Apply suggestions from code review Co-authored-by: Christoph Kolbitsch commit 39603ca3e1b13329d4fa33f15feccf87f5d7c59b Author: lrlunin Date: Wed Jan 15 09:54:27 2025 +0100 fixed recon svg, increase its size in docs commit c032e125d24e0a095daf8f23dca102cb85740581 Author: Felix Zimmermann Date: Wed Jan 15 09:38:20 2025 +0100 update commit 17b03ea82fc5b318b1e41095536ac69be1ffdc51 Author: Felix Zimmermann Date: Tue Jan 14 23:45:44 2025 +0100 fix commit b8907cae224f198098a1c17330a1991dd65877a3 Author: Felix Zimmermann Date: Tue Jan 14 23:45:16 2025 +0100 fix commit e6ab0dbe554ba6363b5fe716ac0433b7fdaa64d9 Author: Felix Zimmermann Date: Tue Jan 14 22:47:27 2025 +0100 fix commit 337a514c9f1c6ba8b2900223c11ad49aa6e8e2dc Author: Felix Zimmermann Date: Tue Jan 14 22:40:31 2025 +0100 update readme commit 2b3faf1451addc5d14db31b63246bd26d0759230 Author: Felix Zimmermann Date: Tue Jan 14 22:36:26 2025 +0100 update commit ac37591bf6aef64343a5fda0f8fe3c49992081cb Author: Felix Zimmermann Date: Tue Jan 14 22:28:30 2025 +0100 update commit 215651819a8d9fac0fcaded3d55ea23cee9f7c76 Author: Felix Zimmermann Date: Tue Jan 14 22:22:28 2025 +0100 lint commit 75b7633fff25cb93da5797fc2e99b725a43a4c15 Author: Felix Zimmermann Date: Tue Jan 14 22:20:01 2025 +0100 svg instead of png commit d0baf78832c89118028312c10bb176a3935b5e7a Author: Felix Zimmermann Date: Tue Jan 14 22:15:34 2025 +0100 update commit 457787c2015281737f33373f79bf0ee63d1ef581 Author: Felix Zimmermann Date: Tue Jan 14 10:24:09 2025 +0100 rename commit 688a55ca1a712995c15c3398f6f5284ed4a3afcb Author: Felix F Zimmermann Date: Tue Jan 14 10:17:04 2025 +0100 Apply suggestions from code review Co-authored-by: Christoph Kolbitsch commit 4d497e7277bb02c6d7f0c46230e38fd7cb377613 Author: Felix Zimmermann Date: Tue Jan 14 00:11:36 2025 +0100 update commit 644cce9fc66c6787d226593dc757a4dcba881124 Author: Felix Zimmermann Date: Mon Jan 13 22:52:41 2025 +0100 update commit 54114aae8ee08f6c401effe041c4d271b575de8c Author: Felix Zimmermann Date: Mon Jan 13 20:48:16 2025 +0100 update commit 3dbd1d647f470f1209ca00a9bf70bbf0647c3bc6 Author: Felix Zimmermann Date: Mon Jan 13 17:06:44 2025 +0100 update commit 929aa0d124996cc246e67326b40b221ebb4807bc Author: Felix Zimmermann Date: Mon Jan 13 16:11:25 2025 +0100 update commit df16dd95e5ae01aea7bf7c0c624936b7b46e2953 Author: Felix Zimmermann Date: Mon Jan 13 16:00:54 2025 +0100 update commit 433649eaa871e809d76e8d1bf6031787312275af Author: Felix Zimmermann Date: Mon Jan 13 15:35:54 2025 +0100 update commit 71943e0337c1da276e3e814bbd31fe1d072e420f Author: Felix Zimmermann Date: Mon Jan 13 13:36:19 2025 +0100 update commit 871cdd2b1e65f605843e9db2edfcbfa0a113b794 Author: Felix Zimmermann Date: Mon Jan 13 13:30:21 2025 +0100 update commit 768472482b7f755e4f5a5801b6ff26f12247a696 Author: Felix Zimmermann Date: Mon Jan 13 01:54:21 2025 +0100 update commit 09c6b9737458551b8d1fd45b999816f2f291025b Merge: 408022be e029d5ed Author: Felix F Zimmermann Date: Mon Jan 13 01:32:34 2025 +0100 Merge branch 'main' into improvedoc commit 408022be200b2c11002f8b59059b91cca2c61735 Author: Felix Zimmermann Date: Mon Jan 13 01:23:13 2025 +0100 update commit ba5b48461f883397ae4535d0153165299cd36d5f Author: Felix Zimmermann Date: Mon Jan 13 01:14:53 2025 +0100 update commit 5311951be72dcba400e42b96ced6a4a2fde69dc5 Author: Felix Zimmermann Date: Mon Jan 13 00:34:30 2025 +0100 update commit 84537989a17873c3bb62d5aba0218213b3ad6ccb Author: Felix Zimmermann Date: Mon Jan 13 00:19:52 2025 +0100 update commit 5d214247163cdfcf97f8983e0e6852f5a0a3b5ce Author: Felix Zimmermann Date: Sun Jan 12 22:27:47 2025 +0100 update commit f94a1779a5b10742cc159954bf5e79982ab79406 Author: Felix Zimmermann Date: Sun Jan 12 22:13:36 2025 +0100 update commit 93ad440dbc31eaf9c0310b31b3813bfbf62a1af4 Author: Felix Zimmermann Date: Sun Jan 12 21:39:37 2025 +0100 fix commit 7cc658792324ea4803e7b7452eaa2e7ae9de5c09 Author: Felix Zimmermann Date: Sun Jan 12 17:16:15 2025 +0100 Squashed commit of the following: commit 51d3dda80e3d3e2b4d90985f56413fffc002082b Author: Felix Zimmermann Date: Sun Jan 12 15:25:48 2025 +0100 pip install mrpro with notebook option commit feabab37b67225fc9ad42a3db45ab63b06f260d1 Author: Felix Zimmermann Date: Sun Jan 12 15:24:02 2025 +0100 execute notebooks locally commit 8a537d45ff0c381bb655a88bd26ea2799af1f2f3 Author: Felix Zimmermann Date: Sun Jan 12 15:21:48 2025 +0100 silence jupytext commit 36785c44c3618d0cf719c0565b6d87a699788d7e Author: lrlunin Date: Fri Jan 10 13:57:38 2025 +0100 move cartesion_reconstruction in correct folders commit d3b0d1bbcd53670497b04f1e083b6719491b37fb Author: lrlunin Date: Fri Jan 10 13:54:34 2025 +0100 run mypy hook as last commit b66b9b749ce09f66d10cf07192f0859b04841e91 Author: Lunin Leonid Date: Thu Jan 9 22:22:38 2025 +0100 fix notebook_path variable in matrix commit 8e2cb8ea39416b1e308e811c0b2aaadacec44ee7 Merge: f4505297 a1873a0a Author: Lunin Leonid Date: Thu Jan 9 22:20:50 2025 +0100 Merge branch 'main' into notebooks-in-pre-commit commit f4505297b062e23ce5474678cbcb9dd26c4bcd84 Author: Felix F Zimmermann Date: Thu Jan 9 22:15:20 2025 +0100 changes to leonids pr (#602) commit a1873a0ae0a1db04d8549d426ffb448e515245b3 Author: Lunin Leonid Date: Wed Jan 8 09:54:12 2025 +0100 Add dark and light logo to README (#600) commit f040c001d68985c25674724794743e0f53764903 Author: Felix F Zimmermann Date: Wed Jan 8 00:07:27 2025 +0100 Release v0.250107 (#599) commit 13c4e7e8b2144be156df3105965fbb34b77544b2 Author: lrlunin Date: Wed Dec 18 16:24:36 2024 +0100 fix notebooks_path output commit 93ef986cba7f332dfc2e5f2790713b9f58eee82b Author: lrlunin Date: Wed Dec 18 16:21:23 2024 +0100 fix matrix element name commit 7eccf8723981a5357a9b5376ba6d625249ffec16 Merge: 2d3c3ff9 9dc11673 Author: Lunin Leonid Date: Wed Dec 18 16:14:30 2024 +0100 Merge branch 'main' into notebooks-in-pre-commit commit 2d3c3ff94e3bcf17c2bcc4da79c1d86ba5ca5936 Author: lrlunin Date: Wed Dec 18 15:46:10 2024 +0100 remove space after markdown for colab badge commit 9a5556db6f068a6fa37821b6a2ab2e4661a3021b Author: lrlunin Date: Wed Dec 18 15:45:25 2024 +0100 add space after markdown for colab badge commit fba5a6f8398bc11b82111a92e9600367a51a47d6 Author: lrlunin Date: Wed Dec 18 15:05:55 2024 +0100 add colab badge for each notebook commit 159afed9f6ebfc0df68e074777a5588024da8493 Author: lrlunin Date: Wed Dec 18 14:17:45 2024 +0100 update jupytext version commit 34d43f0ea81583b20fe8ddf5d25306746613e65e Author: lrlunin Date: Tue Dec 10 13:58:43 2024 +0100 use find for notebook listing in docs commit f76b236c2a93b7036cad0d108df7e69b3698f368 Author: lrlunin Date: Tue Dec 10 13:23:29 2024 +0100 one-way conversion from .py to .ipynb, remove preamble from .py representation commit 865347a70a84ea8cdad82d30f7870128d6ddb452 Author: lrlunin Date: Wed Nov 20 22:45:42 2024 +0100 fixed path for notebooks in examples commit 1688c70ebcbb2ed071745c4e91a86cfbfbb9cbdc Author: lrlunin Date: Wed Nov 20 22:44:16 2024 +0100 fixed trigger for jupytext commit b9b3a6fd690ac446acd8cacf47e513657f863f81 Author: lrlunin Date: Wed Nov 20 22:40:17 2024 +0100 split scripts and notebooks commit 279a578a5142ebbee65e88e57a27532a0bc36c6a Merge: beddf13d 8d24ebba Author: Lunin Leonid Date: Tue Nov 19 21:10:30 2024 +0100 Merge branch 'main' into notebooks-in-pre-commit commit beddf13df25447ac9cdbe9f5bbe8049d52965720 Author: lrlunin Date: Tue Nov 19 21:10:03 2024 +0100 sync .ipynb/.py in pre-commit, add preamble in .py files commit 0600a53159cfd1104aa509bd96fa1b0e09014132 Author: lrlunin Date: Tue Nov 19 18:42:47 2024 +0100 moved examples ruff config to examples folder, removed verbose from pre-commit hook commit 36f3b7ae254b9271fdf67eceef1bc7fcf332b136 Author: lrlunin Date: Tue Nov 19 18:25:57 2024 +0100 fixed missing kernelspec commit 4d610c639b2062d155bdd893e05a24d5813c6f17 Author: lrlunin Date: Fri Nov 15 14:23:38 2024 +0100 also removing metadata.language_info commit 48e8080cf55866e564a5b3fa7437024778b14c36 Author: lrlunin Date: Thu Nov 14 22:45:35 2024 +0100 clean kernel related information from the cells commit f8aa62107c652102ed07512ec6d29ff38b5f5886 Author: lrlunin Date: Thu Nov 14 20:44:13 2024 +0100 remove mention of convert steps commit 534aaa0197c92e40094788741e71dca3f8499380 Author: lrlunin Date: Thu Nov 14 20:37:43 2024 +0100 reset cell id commit 9f5f2da01905bd76ea09dc478fd523bf274e645f Merge: 74675d67 38722bf9 Author: Lunin Leonid Date: Thu Nov 14 20:36:21 2024 +0100 Merge branch 'main' into notebooks-in-pre-commit commit 74675d67c44fe7aff8be5f3a717b6bbf0bf3c293 Merge: ae3a6135 c268ad25 Author: Lunin Leonid Date: Sun Nov 10 20:50:44 2024 +0100 Merge branch 'main' into notebooks-in-pre-commit commit ae3a6135b66f7039fcc5eb1ee281452cf6b25e27 Author: lrlunin Date: Sun Nov 10 20:50:10 2024 +0100 reset the cells to init states commit 5d1dba27f8f5b5665ce1a704e3fe813349df53b2 Author: lrlunin Date: Fri Nov 8 16:43:47 2024 +0100 moved notebooks formatting and update to pre-commit commit 66a4d4e2f9e51f6faac72e550b22637d56427ee9 Author: Felix Zimmermann Date: Sun Jan 12 03:06:49 2025 +0100 update commit d4f7fb2e45a69d3502e47f8efff01a4801c2e045 Author: Felix Zimmermann Date: Sun Jan 12 00:54:07 2025 +0100 update commit 132637c5cef53ab8b97844792d1c36aa0f5e10d2 Author: Felix Zimmermann Date: Sat Jan 11 02:21:20 2025 +0100 update commit 48e312537a7ffd47a52f4532649964768f2dcc03 Author: Felix Zimmermann Date: Sat Jan 11 01:56:33 2025 +0100 fix commit 2571ae3957318078aabf88ef04d88fb59ecf811e Author: Felix Zimmermann Date: Sat Jan 11 01:18:52 2025 +0100 update commit e51b7c77e18a3b1ea688a7bfd8005042aa7ce0bc Author: Felix Zimmermann Date: Sat Jan 11 00:56:54 2025 +0100 update commit 7caf514993f96b066be1ba5221f3cf107ff456d2 Author: Felix Zimmermann Date: Fri Jan 10 23:10:45 2025 +0100 test commit cfb20b007c6ef7f5840135a2b053ae19d6b6a927 Author: Felix Zimmermann Date: Fri Jan 10 23:09:53 2025 +0100 update commit 7a3cf18ab7c86c38b21c20ad610b2d48f17cf46f Author: Felix Zimmermann Date: Fri Jan 10 22:48:50 2025 +0100 update commit 6a5ef6bf7f09219ffcf0ca608c72b198599e3675 Author: Felix Zimmermann Date: Fri Jan 10 21:39:42 2025 +0100 test commit a26a559af51fc5e950b9d9febca45d1f06a9f7cd Author: Felix Zimmermann Date: Fri Jan 10 16:42:02 2025 +0100 update commit 0af333887d7d89169c633b4dce50902f169caf85 Author: Felix Zimmermann Date: Fri Jan 10 16:33:42 2025 +0100 update commit a72d095ece9b36aef0d530cb54485d865bb0fdb0 Author: Felix Zimmermann Date: Fri Jan 10 16:28:44 2025 +0100 Squashed commit of the following: commit 36785c44c3618d0cf719c0565b6d87a699788d7e Author: lrlunin Date: Fri Jan 10 13:57:38 2025 +0100 move cartesion_reconstruction in correct folders commit d3b0d1bbcd53670497b04f1e083b6719491b37fb Author: lrlunin Date: Fri Jan 10 13:54:34 2025 +0100 run mypy hook as last commit b66b9b749ce09f66d10cf07192f0859b04841e91 Author: Lunin Leonid Date: Thu Jan 9 22:22:38 2025 +0100 fix notebook_path variable in matrix commit 8e2cb8ea39416b1e308e811c0b2aaadacec44ee7 Merge: f4505297 a1873a0a Author: Lunin Leonid Date: Thu Jan 9 22:20:50 2025 +0100 Merge branch 'main' into notebooks-in-pre-commit commit f4505297b062e23ce5474678cbcb9dd26c4bcd84 Author: Felix F Zimmermann Date: Thu Jan 9 22:15:20 2025 +0100 changes to leonids pr (#602) commit a1873a0ae0a1db04d8549d426ffb448e515245b3 Author: Lunin Leonid Date: Wed Jan 8 09:54:12 2025 +0100 Add dark and light logo to README (#600) commit f040c001d68985c25674724794743e0f53764903 Author: Felix F Zimmermann Date: Wed Jan 8 00:07:27 2025 +0100 Release v0.250107 (#599) commit 13c4e7e8b2144be156df3105965fbb34b77544b2 Author: lrlunin Date: Wed Dec 18 16:24:36 2024 +0100 fix notebooks_path output commit 93ef986cba7f332dfc2e5f2790713b9f58eee82b Author: lrlunin Date: Wed Dec 18 16:21:23 2024 +0100 fix matrix element name commit 7eccf8723981a5357a9b5376ba6d625249ffec16 Merge: 2d3c3ff9 9dc11673 Author: Lunin Leonid Date: Wed Dec 18 16:14:30 2024 +0100 Merge branch 'main' into notebooks-in-pre-commit commit 2d3c3ff94e3bcf17c2bcc4da79c1d86ba5ca5936 Author: lrlunin Date: Wed Dec 18 15:46:10 2024 +0100 remove space after markdown for colab badge commit 9a5556db6f068a6fa37821b6a2ab2e4661a3021b Author: lrlunin Date: Wed Dec 18 15:45:25 2024 +0100 add space after markdown for colab badge commit fba5a6f8398bc11b82111a92e9600367a51a47d6 Author: lrlunin Date: Wed Dec 18 15:05:55 2024 +0100 add colab badge for each notebook commit 159afed9f6ebfc0df68e074777a5588024da8493 Author: lrlunin Date: Wed Dec 18 14:17:45 2024 +0100 update jupytext version commit 34d43f0ea81583b20fe8ddf5d25306746613e65e Author: lrlunin Date: Tue Dec 10 13:58:43 2024 +0100 use find for notebook listing in docs commit f76b236c2a93b7036cad0d108df7e69b3698f368 Author: lrlunin Date: Tue Dec 10 13:23:29 2024 +0100 one-way conversion from .py to .ipynb, remove preamble from .py representation commit 865347a70a84ea8cdad82d30f7870128d6ddb452 Author: lrlunin Date: Wed Nov 20 22:45:42 2024 +0100 fixed path for notebooks in examples commit 1688c70ebcbb2ed071745c4e91a86cfbfbb9cbdc Author: lrlunin Date: Wed Nov 20 22:44:16 2024 +0100 fixed trigger for jupytext commit b9b3a6fd690ac446acd8cacf47e513657f863f81 Author: lrlunin Date: Wed Nov 20 22:40:17 2024 +0100 split scripts and notebooks commit 279a578a5142ebbee65e88e57a27532a0bc36c6a Merge: beddf13d 8d24ebba Author: Lunin Leonid Date: Tue Nov 19 21:10:30 2024 +0100 Merge branch 'main' into notebooks-in-pre-commit commit beddf13df25447ac9cdbe9f5bbe8049d52965720 Author: lrlunin Date: Tue Nov 19 21:10:03 2024 +0100 sync .ipynb/.py in pre-commit, add preamble in .py files commit 0600a53159cfd1104aa509bd96fa1b0e09014132 Author: lrlunin Date: Tue Nov 19 18:42:47 2024 +0100 moved examples ruff config to examples folder, removed verbose from pre-commit hook commit 36f3b7ae254b9271fdf67eceef1bc7fcf332b136 Author: lrlunin Date: Tue Nov 19 18:25:57 2024 +0100 fixed missing kernelspec commit 4d610c639b2062d155bdd893e05a24d5813c6f17 Author: lrlunin Date: Fri Nov 15 14:23:38 2024 +0100 also removing metadata.language_info commit 48e8080cf55866e564a5b3fa7437024778b14c36 Author: lrlunin Date: Thu Nov 14 22:45:35 2024 +0100 clean kernel related information from the cells commit f8aa62107c652102ed07512ec6d29ff38b5f5886 Author: lrlunin Date: Thu Nov 14 20:44:13 2024 +0100 remove mention of convert steps commit 534aaa0197c92e40094788741e71dca3f8499380 Author: lrlunin Date: Thu Nov 14 20:37:43 2024 +0100 reset cell id commit 9f5f2da01905bd76ea09dc478fd523bf274e645f Merge: 74675d67 38722bf9 Author: Lunin Leonid Date: Thu Nov 14 20:36:21 2024 +0100 Merge branch 'main' into notebooks-in-pre-commit commit 74675d67c44fe7aff8be5f3a717b6bbf0bf3c293 Merge: ae3a6135 c268ad25 Author: Lunin Leonid Date: Sun Nov 10 20:50:44 2024 +0100 Merge branch 'main' into notebooks-in-pre-commit commit ae3a6135b66f7039fcc5eb1ee281452cf6b25e27 Author: lrlunin Date: Sun Nov 10 20:50:10 2024 +0100 reset the cells to init states commit 5d1dba27f8f5b5665ce1a704e3fe813349df53b2 Author: lrlunin Date: Fri Nov 8 16:43:47 2024 +0100 moved notebooks formatting and update to pre-commit commit 31bf097bbf867c9e33153a72b31f13e0c9f72b9f Author: Felix Zimmermann Date: Fri Jan 10 16:25:58 2025 +0100 update commit 47ad68e20c769672e09cf46657b4f7e435a0d8bf Author: Felix Zimmermann Date: Fri Jan 10 16:10:21 2025 +0100 update commit a92877b42ab72ef63eb5396f3b886328ce2f6aad Author: Felix Zimmermann Date: Thu Jan 9 00:04:40 2025 +0100 update commit f6f0502b71ee9f5fa6dbf6734224ac2cd194222d Author: Felix Zimmermann Date: Thu Jan 9 22:08:52 2025 +0100 Squashed commit of the following: commit 399ea3b033ffcbae534927650c64a753cb514c4d Author: Felix Zimmermann Date: Thu Jan 9 19:35:48 2025 +0100 notebools commit 9a24e28ab1950f5d65741991a9824bd71e7503cc Author: Felix Zimmermann Date: Thu Jan 9 17:18:08 2025 +0100 changes to leonids pr commit 13c4e7e8b2144be156df3105965fbb34b77544b2 Author: lrlunin Date: Wed Dec 18 16:24:36 2024 +0100 fix notebooks_path output commit 93ef986cba7f332dfc2e5f2790713b9f58eee82b Author: lrlunin Date: Wed Dec 18 16:21:23 2024 +0100 fix matrix element name commit 7eccf8723981a5357a9b5376ba6d625249ffec16 Merge: 2d3c3ff9 9dc11673 Author: Lunin Leonid Date: Wed Dec 18 16:14:30 2024 +0100 Merge branch 'main' into notebooks-in-pre-commit commit 2d3c3ff94e3bcf17c2bcc4da79c1d86ba5ca5936 Author: lrlunin Date: Wed Dec 18 15:46:10 2024 +0100 remove space after markdown for colab badge commit 9a5556db6f068a6fa37821b6a2ab2e4661a3021b Author: lrlunin Date: Wed Dec 18 15:45:25 2024 +0100 add space after markdown for colab badge commit fba5a6f8398bc11b82111a92e9600367a51a47d6 Author: lrlunin Date: Wed Dec 18 15:05:55 2024 +0100 add colab badge for each notebook commit 159afed9f6ebfc0df68e074777a5588024da8493 Author: lrlunin Date: Wed Dec 18 14:17:45 2024 +0100 update jupytext version commit 34d43f0ea81583b20fe8ddf5d25306746613e65e Author: lrlunin Date: Tue Dec 10 13:58:43 2024 +0100 use find for notebook listing in docs commit f76b236c2a93b7036cad0d108df7e69b3698f368 Author: lrlunin Date: Tue Dec 10 13:23:29 2024 +0100 one-way conversion from .py to .ipynb, remove preamble from .py representation commit 865347a70a84ea8cdad82d30f7870128d6ddb452 Author: lrlunin Date: Wed Nov 20 22:45:42 2024 +0100 fixed path for notebooks in examples commit 1688c70ebcbb2ed071745c4e91a86cfbfbb9cbdc Author: lrlunin Date: Wed Nov 20 22:44:16 2024 +0100 fixed trigger for jupytext commit b9b3a6fd690ac446acd8cacf47e513657f863f81 Author: lrlunin Date: Wed Nov 20 22:40:17 2024 +0100 split scripts and notebooks commit 279a578a5142ebbee65e88e57a27532a0bc36c6a Merge: beddf13d 8d24ebba Author: Lunin Leonid Date: Tue Nov 19 21:10:30 2024 +0100 Merge branch 'main' into notebooks-in-pre-commit commit beddf13df25447ac9cdbe9f5bbe8049d52965720 Author: lrlunin Date: Tue Nov 19 21:10:03 2024 +0100 sync .ipynb/.py in pre-commit, add preamble in .py files commit 0600a53159cfd1104aa509bd96fa1b0e09014132 Author: lrlunin Date: Tue Nov 19 18:42:47 2024 +0100 moved examples ruff config to examples folder, removed verbose from pre-commit hook commit 36f3b7ae254b9271fdf67eceef1bc7fcf332b136 Author: lrlunin Date: Tue Nov 19 18:25:57 2024 +0100 fixed missing kernelspec commit 4d610c639b2062d155bdd893e05a24d5813c6f17 Author: lrlunin Date: Fri Nov 15 14:23:38 2024 +0100 also removing metadata.language_info commit 48e8080cf55866e564a5b3fa7437024778b14c36 Author: lrlunin Date: Thu Nov 14 22:45:35 2024 +0100 clean kernel related information from the cells commit f8aa62107c652102ed07512ec6d29ff38b5f5886 Author: lrlunin Date: Thu Nov 14 20:44:13 2024 +0100 remove mention of convert steps commit 534aaa0197c92e40094788741e71dca3f8499380 Author: lrlunin Date: Thu Nov 14 20:37:43 2024 +0100 reset cell id commit 9f5f2da01905bd76ea09dc478fd523bf274e645f Merge: 74675d67 38722bf9 Author: Lunin Leonid Date: Thu Nov 14 20:36:21 2024 +0100 Merge branch 'main' into notebooks-in-pre-commit commit 74675d67c44fe7aff8be5f3a717b6bbf0bf3c293 Merge: ae3a6135 c268ad25 Author: Lunin Leonid Date: Sun Nov 10 20:50:44 2024 +0100 Merge branch 'main' into notebooks-in-pre-commit commit ae3a6135b66f7039fcc5eb1ee281452cf6b25e27 Author: lrlunin Date: Sun Nov 10 20:50:10 2024 +0100 reset the cells to init states commit 5d1dba27f8f5b5665ce1a704e3fe813349df53b2 Author: lrlunin Date: Fri Nov 8 16:43:47 2024 +0100 moved notebooks formatting and update to pre-commit commit eab03958fb4e240f04a6d474e3c8455846daa225 Author: Felix Zimmermann Date: Thu Jan 9 17:19:16 2025 +0100 update commit df51ecfbc3c4122aa696683b7a58ae12e9951636 Author: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Date: Wed Jan 8 22:45:42 2025 +0000 Notebooks updated commit 66066728eaf8d3a728e33f16ce60fdd1c3aab7b7 Author: Felix Zimmermann Date: Wed Jan 8 23:38:59 2025 +0100 test commit e00b6a7bd6077789e38315786c670c8d87c1f242 Author: Felix Zimmermann Date: Wed Jan 8 16:52:12 2025 +0100 test commit e395218bed725fd274697b95462b0906dcec0049 Author: Felix Zimmermann Date: Wed Jan 8 16:14:02 2025 +0100 example commit 3420fc642451868653e10cac54c1c35fb26900f6 Author: Felix Zimmermann Date: Wed Jan 8 14:11:03 2025 +0100 update commit 466ab3cc12903386ebdb04a866f64cfe99b2edc2 Author: Felix Zimmermann Date: Wed Jan 8 13:44:45 2025 +0100 update commit 9d0793566ea45e3234db12fe9308be9c3bf5a22a Author: Felix Zimmermann Date: Wed Jan 8 11:46:08 2025 +0100 update commit f1e5d1e991720a7e2a0d3209b8cf61e7cf01efa6 Author: Felix Zimmermann Date: Tue Jan 7 23:09:44 2025 +0100 update commit 9f500fbb118dd72dc06fc1b563f2917691e5acaa Author: Felix Zimmermann Date: Tue Jan 7 03:25:52 2025 +0100 update commit f3a6e16ea55e37b6f2477f7cde2080a8471e8fed Author: Felix Zimmermann Date: Tue Jan 7 03:18:55 2025 +0100 update commit 4398a1195b42daf10d2a73f147c02fedadf33bdb Author: Felix Zimmermann Date: Tue Jan 7 03:14:30 2025 +0100 update commit 470f1a20c3837165224b305511162020d1c3d3a4 Author: Felix Zimmermann Date: Tue Jan 7 02:48:52 2025 +0100 improve doc --- .gitignore | 1 + .pre-commit-config.yaml | 38 ++ README.md | 2 +- docs/source/_static/custom.css | 11 + docs/source/_static/encoded_recon_space.png | Bin 72424 -> 0 bytes docs/source/_static/encoded_recon_space.svg | 191 +++++++ docs/source/_static/favicon.svg | 108 ++++ docs/source/_static/logo_white.svg | 36 +- docs/source/_templates/class_template.rst | 5 +- docs/source/api.rst | 48 +- docs/source/conf.py | 279 ++++++++-- docs/source/contributor_guide.rst | 34 +- docs/source/examples.rst | 3 +- docs/source/faq.rst | 6 +- docs/source/index.rst | 48 +- docs/source/user_guide.rst | 94 +++- .../notebooks/cartesian_reconstruction.ipynb | 517 +++++++++--------- .../comparison_trajectory_calculators.ipynb | 256 +++++++++ .../notebooks/direct_reconstruction.ipynb | 367 +++++++++---- .../iterative_sense_reconstruction.ipynb | 351 ------------ ...rative_sense_reconstruction_radial2D.ipynb | 470 ++++++++++++++++ ...e_reconstruction_with_regularization.ipynb | 445 +++++++++++++++ .../pulseq_2d_radial_golden_angle.ipynb | 225 -------- .../notebooks/qmri_sg_challenge_2024_t1.ipynb | 261 +++++---- .../qmri_sg_challenge_2024_t2_star.ipynb | 330 ----------- ...nb => qmri_t1_mapping_with_grad_acq.ipynb} | 329 ++++++----- ...rized_iterative_sense_reconstruction.ipynb | 421 -------------- examples/scripts/cartesian_reconstruction.py | 364 ++++++------ .../comparison_trajectory_calculators.py | 124 +++++ examples/scripts/direct_reconstruction.py | 185 +++++-- .../scripts/iterative_sense_reconstruction.py | 140 ----- ...iterative_sense_reconstruction_radial2D.py | 221 ++++++++ ...ense_reconstruction_with_regularization.py | 228 ++++++++ .../scripts/pulseq_2d_radial_golden_angle.py | 92 ---- examples/scripts/qmri_sg_challenge_2024_t1.py | 165 +++--- .../scripts/qmri_sg_challenge_2024_t2_star.py | 146 ----- ...cq.py => qmri_t1_mapping_with_grad_acq.py} | 177 +++--- ...ularized_iterative_sense_reconstruction.py | 193 ------- pyproject.toml | 42 +- src/mrpro/algorithms/__init__.py | 6 +- src/mrpro/algorithms/csm/__init__.py | 4 +- src/mrpro/algorithms/csm/walsh.py | 28 +- src/mrpro/algorithms/dcf/__init__.py | 2 + src/mrpro/algorithms/dcf/dcf_voronoi.py | 23 +- src/mrpro/algorithms/optimizers/__init__.py | 2 + src/mrpro/algorithms/optimizers/adam.py | 54 +- src/mrpro/algorithms/optimizers/cg.py | 49 +- src/mrpro/algorithms/optimizers/lbfgs.py | 49 +- src/mrpro/algorithms/prewhiten_kspace.py | 12 +- .../reconstruction/DirectReconstruction.py | 32 +- .../IterativeSENSEReconstruction.py | 27 +- .../reconstruction/Reconstruction.py | 20 +- ...RegularizedIterativeSENSEReconstruction.py | 29 +- .../algorithms/reconstruction/__init__.py | 2 + src/mrpro/data/CsmData.py | 4 +- src/mrpro/data/Data.py | 2 +- src/mrpro/data/DcfData.py | 2 +- src/mrpro/data/IData.py | 13 +- src/mrpro/data/KData.py | 48 +- src/mrpro/data/KNoise.py | 2 +- src/mrpro/data/KTrajectory.py | 34 +- src/mrpro/data/KTrajectoryRawShape.py | 30 +- src/mrpro/data/MoveDataMixin.py | 35 +- src/mrpro/data/QData.py | 2 +- src/mrpro/data/Rotation.py | 170 +++--- src/mrpro/data/SpatialDimension.py | 2 +- src/mrpro/data/__init__.py | 7 +- .../traj_calculators/KTrajectoryCalculator.py | 2 +- src/mrpro/data/traj_calculators/__init__.py | 4 +- src/mrpro/operators/FastFourierOp.py | 48 +- src/mrpro/operators/FiniteDifferenceOp.py | 6 +- src/mrpro/operators/FourierOp.py | 29 +- src/mrpro/operators/LinearOperator.py | 116 ++-- src/mrpro/operators/LinearOperatorMatrix.py | 2 +- src/mrpro/operators/Operator.py | 35 +- src/mrpro/operators/PCACompressionOp.py | 18 +- src/mrpro/operators/SensitivityOp.py | 14 +- src/mrpro/operators/SliceProjectionOp.py | 26 +- src/mrpro/operators/WaveletOp.py | 6 +- src/mrpro/operators/__init__.py | 6 +- src/mrpro/operators/functionals/L1Norm.py | 8 +- .../operators/functionals/L1NormViewAsReal.py | 8 +- .../operators/functionals/L2NormSquared.py | 8 +- .../operators/models/InversionRecovery.py | 6 +- src/mrpro/operators/models/MOLLI.py | 8 +- .../operators/models/MonoExponentialDecay.py | 6 +- .../operators/models/SaturationRecovery.py | 6 +- .../TransientSteadyStateWithPreparation.py | 30 +- src/mrpro/operators/models/WASABI.py | 2 +- src/mrpro/operators/models/WASABITI.py | 2 +- src/mrpro/operators/models/__init__.py | 4 +- src/mrpro/phantoms/EllipsePhantom.py | 17 +- src/mrpro/phantoms/__init__.py | 4 +- src/mrpro/utils/__init__.py | 4 +- src/mrpro/utils/reshape.py | 21 +- src/mrpro/utils/slice_profiles.py | 4 +- src/mrpro/utils/smap.py | 6 +- src/mrpro/utils/split_idx.py | 4 +- src/mrpro/utils/typing.py | 13 +- src/mrpro/utils/unit_conversion.py | 2 +- src/mrpro/utils/zero_pad_or_crop.py | 9 +- tests/data/test_kheader.py | 2 +- tests/helper.py | 4 +- 103 files changed, 4615 insertions(+), 3518 deletions(-) delete mode 100644 docs/source/_static/encoded_recon_space.png create mode 100644 docs/source/_static/encoded_recon_space.svg create mode 100644 docs/source/_static/favicon.svg create mode 100644 examples/notebooks/comparison_trajectory_calculators.ipynb delete mode 100644 examples/notebooks/iterative_sense_reconstruction.ipynb create mode 100644 examples/notebooks/iterative_sense_reconstruction_radial2D.ipynb create mode 100644 examples/notebooks/iterative_sense_reconstruction_with_regularization.ipynb delete mode 100644 examples/notebooks/pulseq_2d_radial_golden_angle.ipynb delete mode 100644 examples/notebooks/qmri_sg_challenge_2024_t2_star.ipynb rename examples/notebooks/{t1_mapping_with_grad_acq.ipynb => qmri_t1_mapping_with_grad_acq.ipynb} (66%) delete mode 100644 examples/notebooks/regularized_iterative_sense_reconstruction.ipynb create mode 100644 examples/scripts/comparison_trajectory_calculators.py delete mode 100644 examples/scripts/iterative_sense_reconstruction.py create mode 100644 examples/scripts/iterative_sense_reconstruction_radial2D.py create mode 100644 examples/scripts/iterative_sense_reconstruction_with_regularization.py delete mode 100644 examples/scripts/pulseq_2d_radial_golden_angle.py delete mode 100644 examples/scripts/qmri_sg_challenge_2024_t2_star.py rename examples/scripts/{t1_mapping_with_grad_acq.py => qmri_t1_mapping_with_grad_acq.py} (62%) delete mode 100644 examples/scripts/regularized_iterative_sense_reconstruction.py diff --git a/.gitignore b/.gitignore index 6680dd749..68223bfba 100644 --- a/.gitignore +++ b/.gitignore @@ -193,3 +193,4 @@ cython_debug/ *~ *.swp *.swo + diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 805555639..0173d4780 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -7,6 +7,7 @@ repos: hooks: - id: check-added-large-files - id: check-merge-conflict + exclude_types: [rst] - id: check-yaml - id: check-toml - id: check-json @@ -68,6 +69,16 @@ repos: files: ^examples/scripts/.*py types_or: [python] + - repo: https://github.com/kynan/nbstripout + rev: 0.8.0 + hooks: + # cleans the .ipynbs (removes outputs, resets all cell-ids to 0..N, cleans steps) + # also clean any kernel information left after execution + - id: nbstripout + name: clean .ipynb output + args: [--extra-keys, "metadata.language_info"] + files: examples/notebooks + - repo: https://github.com/pre-commit/mirrors-mypy rev: v1.14.1 hooks: @@ -88,6 +99,33 @@ repos: - "--index-url=https://download.pytorch.org/whl/cpu" - "--extra-index-url=https://pypi.python.org/simple" + - repo: https://github.com/kynan/nbstripout + rev: 0.8.0 + hooks: + # cleans the .ipynbs (removes outputs, resets all cell-ids to 0..N, cleans steps) + # also clean any kernel information left after execution + - id: nbstripout + name: clean .ipynb output + args: [--extra-keys, "metadata.language_info"] + files: examples/notebooks + + - repo: https://github.com/mwouts/jupytext + rev: v1.16.6 + hooks: + - id: jupytext + name: convert .py to .ipynb + args: + - --update + - --pipe + - "python .precommit/add_notebook_preamble.py {}" + - --to + - "../notebooks//ipynb" + - --update-metadata + - '{"accelerator": "GPU","colab": {"gpuType": "T4","provenance": []},"kernelspec": {"display_name": "Python 3 (ipykernel)","language": "python","name": "python3"}}' + - examples/scripts/*.py + always_run: true + pass_filenames: false + ci: autofix_commit_msg: | [pre-commit] auto fixes from pre-commit hooks diff --git a/README.md b/README.md index 2e53443b2..631897776 100644 --- a/README.md +++ b/README.md @@ -22,7 +22,7 @@ MR image reconstruction and processing package specifically developed for PyTorc ## Awards -- 2024 ISMRM QMRI Study Group Challenge, 2nd prize for Relaxometry (T2* and T1) +- 2024 ISMRM QMRI Study Group Challenge, 2nd prize for Relaxometry ([T2*](https://github.com/PTB-MR/mrpro/blob/8d2133c4a7ce63ac490798c4eb5a70cc1c543646/examples/qmri_sg_challenge_2024_t2_star.ipynb) and [T1](https://github.com/PTB-MR/mrpro/blob/8d2133c4a7ce63ac490798c4eb5a70cc1c543646/examples/qmri_sg_challenge_2024_t1.ipynb)) ## Main features diff --git a/docs/source/_static/custom.css b/docs/source/_static/custom.css index e853ee142..a72a8cebf 100644 --- a/docs/source/_static/custom.css +++ b/docs/source/_static/custom.css @@ -7,3 +7,14 @@ html { .wy-nav-content { max-width: 75% !important; } + +/* new line after each property */ +dl.py.property { + display: block !important; +} + +/* example notebooks outputs */ +.output>.highlight { + background: #efefef !important; + color: inherit !important; +} diff --git a/docs/source/_static/encoded_recon_space.png b/docs/source/_static/encoded_recon_space.png deleted file mode 100644 index 0fbb5403549fe5c915f7eed094f558faa7289e5a..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 72424 zcmc$`Wms0*+BS@ZfeID^A_xWzA|fGSAfPDH9ZE<^N~fZNBA}>*QYs)VA>D|Cq@Z+n zN=nyzPWSsh-;e$L|MqdLz1D$p-*e6}#&w-_&F3=G68rYh>>(i`*>_!1Opb(PH!TUt zR)$@q_{o`z15)_cZVO4(2P7n%Cy9Ty*hh)l;)m4M*HoI3icrropuU`l~;F8bLF$&dc}CByo-#HCWVbv<&e~; z=bfjF&l~G_rwlYwx%ilS=YRjMa?&gR(djbx6NZPsdoX)R9vx**c5-vGZwb%kQNI`e ze)vIcSbtY&d%)$%P=_Hco-(R!3{NPo?E0_&Gc4-(z3;^T`bT-f%stQkx8I@o#Jz`~ z@qhgtL+`D=|NSMzhtNbkJo&$TKgE@2W&gi_jg&m!LtLZ7ugK4z8QIvRI1^3VGtTnx z@W{USzc(|L6Ue2_C?KH9nRxjzsfCr*IoTI`xtN%C-VPJ;lQL)>+2 z{@uHeN{MQQ-37v=`)HnQ*+yE_*r+%@+qHcsIo%U6M!)+82#7RB2;xXRH`Ur`Lv^0eU*NK*trS-+`f8RMb zTqkk!ruT$Pdxojy`ijk=jQ`r!vz$D9HrsYFY>E&U7$X}YqpHdhxKVmk`_5=Xl(?ys zzW(WjsqU9eiR%3kFGXIfpZ56wF8F``OC#;u0fz4}Uo>+)a9x+!DXXGh(u>l?D2^~Yh@%Cdw6+ynVE}ALRxzN>o;%w{QN$q?TuCN5_VeX=_`HFl4(vaXfqb6 zsQ%w4>6P7a;@Nng;pRTDzPc!J_wGlHm1hS|ChJ#G2EBV{q-bVn$V|zh7P*l1=@X+y zw$%=NI=@8^NtlRBrNB_@SA&txTs}Oz1GKaWicbIA7f_qVitpm~Dt}2IVcuO(QB&hD zMZRxeX>I7myZGPq1%7`15BLL>bmJqq_>>33(&vpDo;=XFetkD7Ih{9dmL~7ke;0>} zb`Ro2kOunsJrZ$QIl;kki!zaOTTLOY>U|?7DAidh2(pazbKaiuI`6 zdDFJT;m)(WS=re1!}ay_dR}l96m6`p+I(T+;v&~cp&A?su488T=$= zWMm$`etnSSVSc`_hK5F3*Pc3sJ$v@N3JCDPkNE`zimR%~e0_aMlQNT&DfIRAnORxg zU%Vijn3%Zfx?$3<$sf3zsI^_zrKP16wYB1AW}IS-2Rb@BdWMEbA3W$6<_mcD?s;|f zO_YQWj`L&FfB&Apk$>&lHS7yd9R15H>Iw=^yuJ4h3=fwyHXaEJ3ro(-Y<3d%s;sP> zUR!=JFgRG;)kSA!W;Q)Nebzo-@usD;e?R~u2gg3_E{f-37It=?!NI}BW4|&{dGhRM zw&F2cSXy#dI&95zBw;;ya+`pF0Dk$Qr{~W5_wVnOQ@(t8%XgiW>%*}}jvQfTXMd8P zpI5h)h&fdvMv%jm?lj=$|G&Oq%2X|b&crha5 zattmJcl7bor*C?56lj(9j*c5_LkzaIwxk!CHaO-eh>k;b6PJJG$`uNFdal5(&$o_+ z@LO(W^s5l#=wF#GA)o0hCA%3eqRF{l^k^IZgQ2~Si<6160yu)=_wTdo=;~UIH*pBL zu2G?AU%qy2$FR!n+qa3m@uxkr{1wAh3JQu3^`%2jJHRZtce_n64db(lMY z12r>JfAX!rzooB@rRC-RGN1YkQ}rhv9-ky$(xHqu#f0rUcHsl|90{=rvg^+A@bC)m z^dP7A&9NDubqxp!VIP-pb{3)&b@h(E9X=aYrKP21*p|kE0?e&fL18`evp11U?B8+X zKK)$GUANq~_xtlh{0_6H44il7(Ok40Z`y~_QeIB)#=i8eyDi;BpPZcJQiQnAS^Mcb zt*f)MRME%cdFmVGY*=?K*a&ti?(~2Zik5}DNzmwz2`MyJauaO zRgb;(_LI@iQ6cW4BoigftRt&4Y8c{QPiv zspm1dH_gxMIC-!5sw(~21}9z8j2ri$orm;C%CX=HJ@O={N5wooG&E#kYb)NFV}nbL zTzJo~yO-z$j(?e@Lj|9rxIgec@LX)yo;_CzUDwZ=b?|pN@n7LCvzX}v77^>2X#FZF zEnP-*>hSiqwojJ*Wg{&qCv)tkd|$nyNOaeD{rdIvaJXCF(oFxe7ccrdZl|WCTyFlN zdEnqdcXH7+nRw3wef|BGW4~Fl({76_Z^tf7T_mRyDgr36nC`iJ>eQ)<<0dHizQ-@> z3{?1`(ZB90%g&L!eLJw`>ZgbZfjNp}$Bvyh@1jjqOMhK+)!D_RF+g2K5G z!;Vlvo7eene@Fc8(^=tQXY1C~)YL~w9rdOaIEaJu%ea8~ud6Ds)K653y|nxkIJV8> zLIX*K8?S>s^PHFE7P3P_L+{fyL`abT9dEuJ@9<28J)q_)R?2>ufsT&OYK}s8I~5Hr zPRqXEvGSf7;+;*Zp^QHU{+IKqV?fQpXw($=x zj?`a`$Sd(AA09X|YuuW8`l8)rNShe6)Nv)^7s+)xE&(>h)W< z^l{X5?%lgblUeApy1;(+?5j6gs;aRW1yNFgV}CoBS5{I(FWQ}pRfx;@g3~hQydNtz z9UzR;8Cq%j>+`-`i(dDh@S{ZksVJ3X9Nu4D_~SlYbu0LSzCmqBeU2M}DjafI zd`r*V4r5<*5Emy6<~5<5o0*Xn3}a?vlUFT&N_JGmbN}(}KC(hWLMV0?dhCB=*Ka&j$M zmi<9bpFR~got~T1eg6Epd!f9`_1m`%=v|jSewa!e8=RV|%P?*KJe!Amp=RoSeOgK{ z{8pHdYQKXd_OXh!b=rpw4%Jk#XoqL7R`n|-qvR;*=xX0zFlCJ~laP?O;JRjy^3Cy| zdqvqy0O)idGXg!i=(3X5wC*G?Q*?kq?BtcLzQ95bkIm7}%NrURtc^WCZu6xRvM)ky z6Rc5?r6zUQ}IO-QsnGO`%D5il)0IRo5^&*_+hg?kh0*BnvGYUkFWPaw zkvv7ySbo&v@83~*O4KREPHpFzr%LO-US2vtJc9GNcS}5s$Lk0CK4WXH_P-QO8`DOc zFzGJH?;2ttZiAJzZ6x2?1yj?NM1Ey|5mL0cryHuSMye`VL;3U7?dP+FwvmhT@L z8DY-ibaHZ{JaQz;k{%BzJ}|rr-H-SEQ8rd)<{p-xe#V|iQdzb{~C zXSXi+C_&sn=9|=q5vi%?7Qiid00+`_ck4f*my1;xUL^$>FaVHrUr|s~s}xzAz8YHo z5Y<>owGQoc$4=F}jWINYGV-@)KMVC3f{VD*8*V8A47zpD+InNnVQ6eDZmw<^1nWN+ z4A?L76%}&{mhg^>GB@o`)-hT4zSqc#m z5rAL`S=j?awS3aD`~#dqPV2wvoeQo z-@Z-%U<#k+4BwyEmfG6cc^nA0qN++>%{O&Taj5#;W5sx-8P}n{K6lXN;+mR$0JW0W zulH6T!#Ux$olrS^`0$;!G$Uij`AtBs`g9ZJjzTx>exVgc$`{=CTh7*MP@bI-@PGZ< z9W}9U^jEaws3*5(fiv~Tk00|MsI#%L0nvT~csYIgbVX(5677B$kaRa2uu^(k+y?6YC?+a)b6jgN5` zW9$|7;3iE`Nl5eV=+M}{bLUA;PT#qAU%&EzLT=~c;%aGWIUy=ak7^?+Dfw#E;l``D#$dU#^*Du?6k990XMs_=#7zpU<@I{P*wQ z4gDAK!Bjl=_5u^!)z^<}OPQDaM{Vs;^$YFjF}o9B zgX=5v{lc5rr9ZPD7!YfQQ?O&lj#P(=@%DXqc-2AN+lYrZ@aFdUiQZ#nGJZF6%zo&` zG{0fGy|+>SN|(|5FBaby2SUKAa-9~xXtmuPIpRyDJMmI;eT9NR<;-$tks`Qx#inavUj<;7o!V|8{=3y9i`bRzoPF| zX?ay~kdl&Cx_p^X)X=C_cWvAw5fBiN0O~u@ZeFMl*+mb_X(jtNDk^GXd2X1Uknf}d>xb6B+J68K%aJ4`|QgwFD>&Vv7(yAry8Th7U zZF$Z?`K^_mok4bX_WO+`6kUod8%Ll=l~MCG0)wqDw42`u_pbTz<1TcaVIdb2RKj|B z4=*;_8K1yh>(Osb392QGCr_$gxw7>I`iR@wjAujiR`rbgv6kUUnz?1*y8QYr?d@N& z*}g)qsPxH}x^V-|_53P6TlF#NKK_19{f)J`+g%yBS^rRBrvOC-g^J_asO$UbMXGb> z-{Z-avHd{*jbeMcvby>eh$1C2@|2RC+&3zx`NIRKJT2jF8$Y0$C0Y9)zopFoUeG44 zzAo0wYfsmom@Ua4KAgap6{Y}*R}E31(@ZS%6&dAXVGe8KycrxibMp@9i9R&SdnkbuK<^VENW-CATkoL$ z(9F-zkDs3(Yt$uL6dI2%pp(^{!gH~2R#sLm^W)8}w?)^#JDD~&G`wD)s}rSZWTSkc zdg&60dHd?$(Gpb87F74aF}JlP?)EgJLuC8+4<(4WEKI0&Ii5PW8wikoYV$TwlAK_m z+sa>-U?GQdOyQ6lotCCQio2X5fN1VzOQDW5Bl%)B*;ya@&AISR3*WiFfAvh767CX> z)|7>r`3}BixHC*ZNJzs}`_#eb4Guq?rt-_N2&K4Cy>h82Iijz*qUu}#+O4&lK|`Bu zlE*2?)%-$$u57E*Q~QrCPIl_@+^g>CEHhb}?loR5quSgf;l%}M;(~tVVd4Rzn6xIU zXSTfKF=+X8?L|;h5|=rc$%3Im;TH%jDuCep^E@^o?C*N^i2Pjn6!NDQ5|MbD#QrEBF4dyey2aGUWef6!W!x{e@su1y_eIbN>1sAMM z_hgNYjh*;fC!z~hV6_Y#Dc5bYK-|Uo*MHF5`|Y%eDk<)B7nM;uZRdt=<)@cWZILiC zG71rdAoZKH)_Jkh?*3q`UJ#R%QUMQEk`M_F(&`l=W_?3)r8E* zp%d`#Z-6j(o=H-KnOV|QM1P9}NbG%ZLu0P{f4U39?N&!eMhw$!3higy?cS|Fd`PkY z*qFYIhq#JOs|wIU{XwW_1jo9%wDhTb9e8L_q+D#JAV8x1V{%~K=)d=h#)aS9vBj4}Ja|k(iem8%^d-b$K}4WUc5XL^JhFv(WqTCZ^kbDw^Gz)#add+PxAZzqdQX7&d)>-z1T>r=q;v)3hT?cCaNUIr){%R9EPm zOx(Nic#|g>snuB6Ieo9PxmVx5UCFpxLfX2f*8VU83^f&$J!XwKS1sPuJoavm&?7WBV@by_7ST1`A?iZhE)L^A%-{cY-}MtlLKAMJ_~QOtIKM%eO84PDBW!$*!- zej0~z)hK;}>X%!QALnFuVck2)eN<GH4w zJcL6)*X58#46S>YZLl6koH@rI_dRBXK7B)Zi&7NUs^%S!g#KA_?IOSIY=K~5r;M_r z5m9n1#{{fSMTZD+N$a$j%pp&NAdQ-Lu6&%h1#`apv)*q>E6m;^E<#7s*1# z9v0Ww+*sdH*DboSq?>&X-BbnoWlzRIFuZ|3k?cK?yM2LEZks8J-&$T>t@2mQG}DaA zRbwtWO69V)WEoX%fHF3D^j&c9S7-yf%{BopRj}I}w)qk^#d0llVpT`SoU%dfJbS;n zOHxp=8h!hhw)%o=wgT9nr&9-n<55>vuT@cOtxR5tT^jaGb{U zBd(r`j*gI;5CQrADKG9a@bnrMgOQ&f-P_s4Kp{Z#{o;RBRZ0C>PaPQ0tIamj*DvMN z$U2d=&uw#^55R!Vyr>=>Ng)2#{GWE^(KzwSn?YwEJ)kqDdAoGxBkas);tD%xp*OyK zzyJ{G8!GCiHTCDLQjuTZ9g2sSSLSjGTvjU^l_SLW`OM~D-y#74KI9nnd(OEphRo2~!2D()x6zmX4P$J^b*(+nby6=uxJnE4nCE!;kjk2!#ghxeX4JC^5s*3K^Gd zKjU>Nss8)nP!hNK-wyerp6NT6Z^*;->r9WNC7FO}UF_PLbL|d!#o@!+4;%2Dw>O)_ zJ&d?l^ZrL8i=NY^k5~apc9j!ZA;Wdy4>FpPAp}6mBsHAh?01S&?YzZT(D*HYQ!~!y z7`zgD<%yMEPx^r|I4g&(BjRt!$sHW_Xr3zE{9@xom?N{cy=44*_U_etNhjP8mq&J# za~ECn919m$U}B8wyj*%^)wsHfV*b7AppT05R8Hv)=zeFILe2G+Ov|L@Ki}+q`v5|s zzXPu7iO5JXRlnwCiHZ{bHn|RlPe#8!lg7W`>X8iIvGv@!bNrV#E?l_aZg>5F@xnx_ z`&8geVqQ9wqr`R@6&2>p9e0nEDk@!L^iz6t1sBuTq1~Nh6W{cQ_lOi#MYFb>4}evi z5NIL~KP0?~d>@~E1sf|S&%_w7@{)0Ka+0zIzjBLs_G}l*8|lI4Vw^6pB1XFMbDyh4 zT`bsS!_(NSW`Mdyao_-hQ}~-V+sde*tL#-v5owG*b3mP{*z144|R288|505VQ-1#t^QFsv5L~PoF+fz^tqr`q3L4jU!Uh+WKyB3A({V zN47jCG~az-kWD%JQ0?+Yy@odje!PLNbUMpfR8%z0uzqW%S?B90O*o;bHAgfkiD%U1 z3SqSri2ANkY^SMpDfbO{2ldgn>5B(dR8<+ypFe!;!u?lKEXN{v%J_AyQD@jSvScjs zlZG?X(+4A`T2ih~x&Tklpj+?ebi>AIL5q%_fa$x7wY;wGwnZm!qbk~+@l35D=;FRyROcS~fzy_P2mH`lpf z-X{FIrYGN*%bdth@iP!zT+hHDA~I5pGyl`4t-$CvxDxV>tR6h@d-JB`y7E!yIIg3d z8V?m%{fX@oB0JWk2oVE7ou5yKq~FDDg)mWJv%C!msEi7H`}RX#9&J-o(-~uGVhx^+ zq8j!BlUW&^l<@tXlLwUlL-RzwWxq$0z7ep(9teOi`k%Ak>a?E+6%ILnRYNG(YH$ye zDL}oNKd;q*WcI^|xY^~czNufo`n`H}B9M}n)*C0nP%)EYMI^c{)12$5MdU$6gwCs5}%=DxfR4t@b`kb#wzSP*fu&`B*folLI_N6Ra#s{De2 zo&nVMgS$d4y9`l_;AvX~owukfQt7&npMQc~L|8p5GZU>m*&+i&Lsm-JaXJYic?F-O z{hFRpQ3|_N42=HZp+ow^ zv*W`>?G~K>@+(0%An_Q45jJUJm3)Z7;P7z7r%zYn$?~scWo1?DFlH9v|2P6)#%fAa zQ?ou!ku5({ihKGqgbBl+A08>`1EQ3`hd0|BKXJVeO~|4wr|i9_My~BXofH+$8mi&n z!Geni86iIstl5a( zHP^@MBRqR9#L3%mWP3rwhHnyUf@LLiW=n)YR12Pu}&y*q2>AWv_yR z$v`2qt#y5hmxiuwY;33n4o^*CGrxt*1)bqB*S(_5s^>Y`i6YxcNv#S>ATCkaO?DIm zN9@fZd&~J(fSmjH`Z@WUtAu!+Q#Qj{ef1`d#r8b4YkDYL2d#5AWoI6O<>#NmB(~C)vDP1RnT^ z9y9jm`j0KJt>A?vEb)5BQR~taYTdmc$01}5D`)iLS5W9{-?-sm@l^bZgMs+UsXNc% zL1LAarRAffq@<29mc^;=Vc~$O;WpUIiQ|;t6aZiHb2`MhpThdN1MFxOKLF~ZgP70~ zjIMfQMgC599>Y^LY>e|S-nM*R5x1)x%SIp*PW16{m&tuBUbCZnn9jHLOsj~{y^6p5R2^0vfM^E;V3QMse5@D#GG)R z`JI=}65^0*+eJ>!{U^n}i%>e!wcHyH|2wsPB))Yiy5(mp z%Vca#6cOO(H*{v#4qp!V(yZvNfD(Lv)V4sRDNa!;I)_(3?txcQ z7x3tKy4F<_*rK0rhx=-~EFY3AK@SgDc5_(1XIoqORI z9E9|3b@U^zL&YdQBiEXje3u?wZ?;e|(v|TSe9K zn2P3%kNlGR;l138eZfbu=9=TPOLRfp;zdtiIV6hkA_xMX^cx|CEhj`qZkO<-E%rMp zL6XJ5N?(4lEy-D^+-E2^u% zL%bOD6|n5vj=YH%SX6L~|2Ld2tA!7+O%=0gMRoT==O}?A_dCGc)YJ#vt_Wb_%a<>x z-H+qr*$4&S`kkvT3kypzf?*aQT zVsvK*e#FGY$T)-`sgswN*VEhk2<^G5pb)xn>zxJ#D%~Jqrwgtd%LeJ?YU=7GFf#~` zeE8DMo7L*{7DNlA_+)P0FK_JpBtiOnv%yvGU(1}1T-vd$$e>9>pY8mc2k)XrI{Lvt2H|Z_#VCc?tPjQ)H94%=5 z$X-sT(|6IxMM4coi*NJAkugV4n{XpaNFovHd{@Z_I^$kiUcbz}g}Ux?@| z{zxfXtritM=6p*n5QF+|4+S6K*+F2GKuWy5=K-ePY$poc$fBcvzAxQUZ z*++9;h9=#0s!JFp=LjWbWqxK#3b*k(s(I;?-G>GOz9sYx4|{_26TS{qyeBVTl23VQ zU-RGha2(RBlmR%Z4g)h1K6`=&5u2{$nPbI&lwfq|}v#IgP%+?rKWXehDa~ z^W*jDB>*zsTBpIvBScscu1-t3NnpKu^+RR0<-FRaCZ*97JhGs3`g^8)ypdSY0rpFE zh|A36o0_TtrrW-QmSv;snDNCNm+f~W;z3JsYeP(gSryjXJWY=Oc1(Cnw zH}cXYqP>qN1iu{>`XnXn?65p!H+2xfthL!HE-E4WvygJ#2~O3YCo3!a%*%_2$Rr!o z(M6c;W74Cu0u{O$!Y>=Ws*gOR`(z-s3Eiiqi)J{Chz5_Q`(~PQ>b!m!0p0NK+R{wO z=360QVat`jMg+MMU7LQ)F3iGmJtJQY56!1jeCpxBD!f4?Z}aTAneyRTF3mS<*F)NKxlnfpGCh&3kzRqK znu62L=la|kI~QTHA6;x(&gUb6#cqZPz2mW;U05gsPy1QuwwbB(@D-5r;$r3vNVYTM zNrj)_Wt=fQLSXcY+>d}sA!#<_P5Hmr1aot8tQyX%+G;WvOexJg+Wzry`{=;H_f?8L z-M{is^u%|6<8Bm)Y8gh)?rIG0q9k%n5O)3Ez9njUFSgb*U*9If7hqm5iEt^g_&${O z{^8+Vi(yk9@{N|^E(_F5EfvkpN8#68wy;RP?0U~@IEuk_czKMQhvzEt)0fBoMbGOG zcT>3_n`_woB~*4?-}_^7vdq+@8RspWNLbt`j~tu!IkH?(VkZkTpg!Np?i3SKe^2q_ zR0rgmeB1J(r9-cv?^=#Foc4btx)M1m6#3yp(W%9%!%3p3w={ZTm5aFGZ$6qXNGxoa}nC2%AHoIod;cnN}#ExMiQ;4s^z3NpD9lY{3IW@JfQ~ikj zyhJ~iG-R`bS~m#p0kSi?-e-)XVq$g?-hmW$qI|YcFI)qG&1kGIqo82hsbcZCfIABM z0BRv=4s%1jND7)eCm$%%bd+yCw)e_JXYNt>c2-8{59RoW`%clr%zGb*QYQU&1vTry zkt5fN#s9%e!9KDAXj2z1OL==H)xF8k_ zvn;5x`hUvGYU!D$QN!t<#^&k=WPzYp274j}N1!@P_}FnlYndDlm76#Bx~_~eMWm%< zWd+S?vZ=e+!@dfc`-t6=1rZvzWRUC-*XN+BrDgVNIa@~e-TJonDv_sjUbc!z z^D|FKyicbS_pcbs#r{6gKF87|FDv`;*Hlq%ks+b_vTNm`LRR;PrSTdyY!Bqryn>t3 zGr}3%2BO7er(tZ&>hA7tvASRyk@k+_Q$AeAK0ND?iZ)=>yv>c(5RT@q!$=tPh>b#k zJw_)R+_d~F>m;}fxRkqKfQ3c zZVL$^5J1-Skt<|0T}^Bu$H9bTJfGOf_=xCF1GnU$5=yIGx#GTCO-Bd@b4W_$$02v< zELKz4r|~wbIw?V4Z>6OwK3?LS5JlE`Jm=}zMuF1iVThNHn@w&)9W!i6Wc2lAWe%-2Ny ziK{Uek@u1H<|UO6vD;DvWj~GcrFqu^XVTV$=ZN^L zWY>PqMM_e#M<{_)k50#XRsTFW_wk7BwAT?icz13G=#cO*kDHsDMxOlvpv(|LEAlWe zSQh8xKo}Xt;(W#KE4%8GI5l(b5L;=a9TDQcQ{D6!jo_87T0)fiE=;VDzks}~H6?Zo zf@jaljE>zd`;c`z_6%I-s%%4Q8k)HCRx7vQ-;iZ50#w*f_dJpvQ=&cWWN&}s?Af2p zHgokuv`9CL85_rGs9{&U75VWG$zORgio(m}Gj*jS_j^YN?UpTDtc<`$Ngci^r-T6n z>FMhmDK-)L(#vaJgUUwm@6BCxdoD&JFt}GMsjq|VtXBA~ABZ+!*IfcxHK+{_3p0Cr z>1#|3v$UFw48>H6*PH@~{nF~{u}ICN{$Mhsw#M_rpz=Iu@V4Ce$7o{?Qcx7NdbLvN zzC{AVs$nlTX@Bt1|L6?SLCs}l9=7T-J(H8ZNSP3dLC7jE(kfYYQ(?vJAF71Dt1MP6 z0gWD2`?<5gp(5ASRO0yK?(38Nxr)Thf83zE?iUsf`50A{t)8H+}0y9(jN$51nx&3wTxJ}Kz)gGeWC}% zBBn*H;5&y3^mpXv<)xXuVL^WO{hNCUqO)Gz;emmHvdSKELOa2Hwg(n3<{C~sc=vDX zYZA$U-yjE^e@5yf)OMEUaUf^PxfsS>)>@`Uzr=ZmPS9F1K6O8xP{s9rvdJw)PTUFCh^r8j$C*PYD^a5j`}7~jZaQ}O@b}%;ZMk+_28uMA z=?Dn}^jR4pAk}1?UI%4Mqre#hL6{Xv>vunhaMHj2F#0*j6J&Chw2S0(uJ&j9KU>aL z3!OrW^MUVVTe?*Is@2q<$E38_ngqFqOxxpp!SLq^P~wBYtj1NU1pqN9H`=s2uVa9^qc4 zQ6wlzm&sh(+3p4N&$~#gP{9fFi|f{gOd)jIkTisCj_Ip9mChBlbcgl{fc~-BQ`hkI zYMl9pnwnv8U7B=(O|k)}J}8E_391s9d6sn4wgU! zMc;}%Ye4R@_OUgHO$p)|I~wB?k(sW_=VE974weZO94JG8kJ^`Bw6-;D?cdT%zyE0N zjoW|ywQm)Q0&ER<)m&UnX!U zf9@tX`EzgY? zQ=R085tz-eMhY28!$Ic@Mh&MRZ_IV|C=KB}eLt%0%n-ORJ1F({Py10m^RD07`A;ZH z)Id3iT%hy{pxs%7$|ANuWyW#%2CIbvd+j+%Msjiy;GILJlXEB{T*FsP5c%vm5eFB_ z>gk=DI#d(K{AD|lxU5xj8as9R^j%~G-DeeKJNU8@C7kXqL(eF| zL*;~9;KkFUJPh%*CD$(LgU~-ju}O)b(ctz{RQ$`s(UU}+9NFA)1&@!2vir?hT!PO- zOdu&bAah2*X{LPO;*g%tEiEedVKmDu6DbcN?{jznQ0S)V;H^Ib&#sJu=w19s4;430 z-8r5I`L9}mbk}vq{1ADf1+U*cb+d~)~_cO@iA_j zF$J_GzqwX)cL5_X9UrA;9<4PstAq{pZ>K>VD-c|%R2xDY0bw@`>mk2AY0Yie^*eb=QF6zh*LEF%Z<``KT zVt@hK6s5|Vk0BlOZWfqae$QD~va}Z<93{GH<8R={u{}J~Of-7Peb zJ5|sxtn>_a!yTL~LI1l4t1l7hX%Y)9oEm*3Q{&IU=tWwYCb}BPel&aj3B@%aSijYL z_{Q#4kX92w4f8hnhyp|7p8};$x(k^*GUF165ybO&CwZ#R^aWUEd!EDBGJVpWJ0Dc4 zfP;!Vd`(SlNi+Htgg-M^M=NLy;6{{FnnUiw*%>Ndi)O)F=>|?p3E8^c z6DQcX!xwJSOn~L`tiI1|67nl=nt%Lw4y2-K+J(uLRET#3PwTn>D#=>2+Ni8;=}RwY z&5X}99_QfXya!iI7X?{uk(lcXaYoQyIrugK_tQlJ;rX>hhr7o8)!<}fJJdmk^+Rr= z9{iVI_cZv{0#rD%0w7J^d>F0a~A0y6CA>VqZc~SYvA* zz&GjQ?jsnvWj^dzA}vx4YHAb;A9VP4F90>CR$h$kaZqRjq+LV8YrxqRhA_s}|0n!R zbG+A!7p)-o?;bziW=;*6-MH%PZ-Mz%gK!pM&U5Ep+i~XOp{Ltm)axrM!#Vg(T_WYB zrMr#4$^i9kxr5KVkDbgeEUYQBfe4rqx?T1Kv}?7|8B8|~{k1IfrlzSmO6|m}YzkBo z?TsM~|K+7GN4%-HuS7if6ch6dvGFH>2{RksIyyQELr1af;D{m4n0uMPYSYXIbC(g} zHl!t~NNkB{moCm%nbYx}I?>JFSz)Y*viQ}!fYHRlFG`TZE@{X&R=UqIk2$U}MCA6$ zt012%I3zu~t)s(u?4m6@4bjz<5=zX^gg!(tNVRMWKbBl|^14xL(wdE1{S0?TI_Y=xo;tvA3vKW9eyH_2!!svIFq#9W@ z7dTuP7`P>~k(VblG(KLn+{!u!qd}TpbaU;2&oBdgngp1fA?KivC=LaKMKIN&#hicd zIpSEDfK1i{(PJdzKR<4(s;Oy=>GtL(e9oriyH`mchKjhnUkpN4#B$8i+FB2jLrr)H z=xcBMnFa)Z1b@4;MFIrv&8mA167pDG8_qzkk!P53D@HS<_!9QzR%8^GW z(wh@t%_03b%P)L=??Ifp2OX5PJ>q&%Jw#gm<%EQU7T{_7opx}rbv9SKH&rfQex$Ji zrCwkVN$V6mhnWoqB6b4qs{`ke+u+j13K5nEt|kTrLe`+3OrW6Xq6Ax&zbMtQg=%r( z?zinxM|0z~;z%nkEiLmN-PlID_s-_p9JzTI9!lzIserRUwEpwP4T~7!ItxKgx(fh3 z6?#nV3@q&X<4uVn$C%)(4G9NEBgYvgQV0PoLc1B#m_pJE>Iu`Xz7r zK>@)d^L%LfdN|O1HE*mfMYR8479c37f{6J@U-Ivh(kwcl>wR3n>Iq*G)Gw60V?;Wr zW)UbO0~?Bxii$b#HbPRsc|nLmnrnix8xZJZ-8H_Wz$KW72&NnT66fS1vQijzrt{dv zk92##H&>h%W~_8Do}?g1r!%3LT!D?G0?}(=QaI+{Gr(NWY!@5kV_jl1JJ!Xbpb(L~ zuZ9mX4}s~LE+c% zcm2ZQ3LIfY{~i#6y&P(3&rtnC?LwS4H(WAcj&wPRK-V?i7KPDGhbdxQh-%RFUl4YD zaduV*GO#|893?nN>ttuHD)#ljEDIUGBgVrY*w_r38dzAIMKDG|fc*N+n|j1{0DgN@ zwR(Y+_8s@VAQ$hk<|Hn4x~gyAwnFy(-G0u{n;Qe9#}R1VNpim}Z8so7y;SQH3Rh?v z*I;%`m)A-sA<7a*BgQfs4Si5P?m)fV<+4sV6`5C8WgNYnsn)?G3I`>)7 zU-U>)Op@k>H^<8FB%<_i0+H9l?5GDSSFI(2j&KaDjF6_ev3NO=(f>w$jOTPGinh~87L*OTx7UynhdAYc}d?!pDs6)hzJYt!@ z7a}l1lZCu!rHH~iR}H2K&d$z1@sUKlA2A%jvu7_~4s)n0 zhN2Yi+O_Kv`qwazva&KfaHMm{u?sP5@eN9d1px&dWRkS<5#=dvP4%t=%c)1K8NGYX zb#i267e=IrB%0I0UdUa>RojcGzq08*doYIhE)pNd-Zm`*(_v`yhLJW%TJN7fJ{Tv8 zjEef0m#1MWN^%0W9X9E&5f#(7fq`tH!H9mGz&tUklxK7_696Qkxqy2R1~rab|G;Q_ zCKo{X4gxEk!<3l}(sur)sd(iBLd|iC@fZ&ACg#Wo2Bzsubafvi2J_Kws>|4M&)(yL zZ|#ol?P+Wp1D95-k=NDH0kPkiK939#6(QG58DwN+5QCqvDR%ow5^jD+zSB`+E2}$U z@|eXSkDh!+ErzoWKtz&k{;1ycuXxt?#r!hB)uVBCh@Idj?*lSZh{ zaS3)XcJtIlwj`B>LXf}@YQSQ6opmwk?yRws=RVNO0LzImUwf8M|9E=-?&p}&diFt!ZD2XmKjv< zm26hVvuy<@v{8FB*jwV6faIB4=3UIeOkj>{AkySueNE78jEy2!!Rhl*!DFq`85OxO z@I*kHCYVx)F?_L57=&Zjv|_-G#PS6&Kk1yDs7si$b_ir^{EPm`)n#kawg}uG=OU*v zQ@WV;7pLv5$nm(*?CQ*QmPEXj1e=C&3X~)P#76C9?qO~7l|2&P`7}I81*i-c#Ki+{ z27VRv`l*bWuSzE0WjIRLMw>dvv`pL5?h=%Zm*y*5#MFZuo%lU&N*rLA%xSEJ@Fw!q zZ&5itJ*lY$kTIA*mmt}?N1`o{mX>yV=pi)v)I^drhxyT8o{1XSuit$6u}4A?V740T zW;2+iAv{b1S=e!1h>r%$72yO>b)_QShe)9;JgjF%v*ne zuECqKrTg2Fzwjxnvorel{W;dMPMb3Sz81rAMy1@Ks$=HQjR(eS1Dtw$(OJO>-3KyXo`ku)j#FrS;9 z-HSm=aeI4y1ozJBmhBBy(vHc6Zm$(3Ocdun;c@Npz(ISmnnE zs`zb(f-jJ$TQCWT@58HVPG)tn;>jVr#?E~qXYG>)Ik zLc|=&u4TUInVDkZ%{c(H#U&+I$F1<%314cSdv}n-bG#pV7ZEoGAt72~Fc9y*=vhN^ zB?i+V{V386kBxcZaTBL|dhI(#y9hHKR^%OXb1r6c00;{ZfNX!gMr3`8m9<@-0;8g> z?vA_dS5Oy=FNqP(fEtZgw1j^MkCr_34W-W#G5}Dv#JH_-6=G|IafM)a8fOVU9>7Ul zOzcU|G7pLMWCu0oDBNd>EF?ajCedE86mRwbL_3nF^A?y6GTL*9SK~tEP{Yn`tc-I( zCpPgfgnXj%_1->M%rUcB09O4tjBrGJl|l(2xg;{9PQ| z7}c+zn+EwWUL?%@8_K4#=MkDCj8<&(#<&8o+qjnIfufdhsN*>C*=+- zkQ;$v+aH3y6YrmZH|{l|!`!{h5jh3R0$qVHeNtRjZ2>~O7x^*kpJv*wWF+HPSSU(J zfrMvCY*J)vr!3KGM}9?!>Cbe=BWnVja#n-J5szu)G)^S|MEhRqB3D?u3JxmO$V zoLE;(td-+|`ZUew|!^{^TNjR}Bfxqi0&H1~S`ovPx zpm_=afO;YEdE(3&@zD$oXb70t!dpiDsKp^=ueUq4jGkghdllAA zzfocJW4qbwi-_3__tOH_3(vh%R#^yfo^){&dDAGKlz{ZCJZRqUM0LJDow>>=4^4>j zSi`mtRyLw?fJ)d1Hy`pGi@J7P#{$w}1dVIPFw$QLU`7}@^VpD&1Iz(7hOx~nnv z8iw${5MxlFYxcgmkjTkF>Q^Rf`}<}p`guqHf@|J)vMjA%i~TlYfx`A>MtkvNE0o zJyhEWSL88Afh_sI_Fl03841pJi3WJLLA0Cp1vdYDx~l6V6|m&;=3Uxy0x zsi}+PxGt|6p;LVKM_wH_GcqFOdT9L+xMwhHZZC-j#(qPPp-BP|W)&1vL+*`LDIthe z`3r%~Fh^POdpP>@=dlXElY$S1yuqH20N7&U6c=q^G-#xt@YZ#C@F+Cno6Sku&^-d- zf-9sOD+mfqJI0~yz+MZ0s1y>~T6&Fco2%N; zB-JpA(geJWPh}9EF4`7{gpjZ8X1=Ur((m7y@mGP<_UzgvL#z=v+Yt`6-@SFhc(aoyAi;Go^l zx9GkUn7H@?WZD?|Q;z-2C#*g@Ir+=KQ_*1#L(~QGSyD(s6W0M8!*=0<60v}InTj9d zHHA-NyD`5l0lZN)n+4EQlRV-K65%+Nzu549*n9W5p6~zv|FzlXy%C?y*eHh$Gt9Ar zim=(_R1PH-qLru|Qj|(+r`n7p$4F*TI;TWQYT7D?L`Rh(a!8aziKu?p`?Ku5eZRl& z<@fvJ_vf!aK9~1r>G^uTp3leQe!tysx7+>x06Oeg5Aq@^8OxtEH7&sV!?L?^`TF+y zMlTCCwMJ#N+o-8e1Biu@z!Zy3PqWiduWIBSpf`Njx9=*=?r?uwfP*2+D|>xwnfX3T zN`9)BKhx59ga6?(u(ZJGE!{U?X1=NXqT~O>tE(CQ^Z!>pnELnMw*U9kD^73y^Iv4F zZolB)|1ExhxJ4%_-G$tK;Ni~wHcTcMXxGjSJWN}VG=>E>U>>@kayLY?(f#kg;(tuu zWNVpu1^B!E=AZ9QpSNq+{8?*sLE!sfWCu^bey zLw29eU0!(bPMJ$OdDBv@?(cT(x^7;d>Scf5tIxY^;+`P}c_|MNvv z-2;^!4>s>=>f%Kgb^C;gbJ9f1#?@b3akV z$T54~(ym0CfBnvVGp#|E`+9ep)Npx<{@R;A4mw8%9>D6i-rJ_Tp*jp6H0WyZ(`V1j zupAT}voVxC32F|r``2Zj=+Erq@B9NLwj=#qm;B}3{{54De8S5a+JFDQ@<03y|JV2F zZu$6s0G;z=EQ>Z9E{j}QnqcfT@t=Pa@R51Glj)g_?hh-|$~PPS>k~%5A7|R>pZ|F2 zxIX`W23NQG{`*O~hF1OOpMT*~u|oG>Yw&-+L~cR!ci2-Rp;xB|Vs*FJ_OF{*`pL=T zm{OMe{qwS_8+|q$VXW*we*T}A?*9j?|NplA@^g zIBdw(pF<+jmNHyd*Bqq1=j6$giNF7Au{>}4!d@3E#1%kwBsGV0J=5)UP_BkUB(;tS ziV{Hyg|QeLjsk)`o}3$UySCQR+4=7~mH*-93^_1U)A5mD2`W3(Nqz6u(^IlX&iT!U z?)_LuBa=~S4Ihjj@UPX^%V|UATyl;m{ph$gKrnv|eUrg6cFrLR7NvW8g9t32%=+ht z4VdI?o;jBcf9BPbnbOrQS>Q}1_oe|-t}WCqAj+&Jl< zm+|p*nU;A4w5Wye@nzkLYJeXl(>!=}Gt&QjjQV89FJZuj@f~uLzzX2Lee!JDUZ%Ir zY)hmz0;5(u)gx~dBTMw`(`VSYaVLXYH~jN+RITY6+8PX-nZG8ziUN&P%LWR?D zd=2Os_fXe;8vzv&NbtmZKcGWUXh0C1Ij1m{X)zz5g_bqnk`G5NG8YQ~O@c<}{&_W3 z^^j5r-Da6a<8)*4;2}enAKf)_F?Oh}^I*ZS@oRkWWD^s$)~>EywA%(BUc-#%`a+h=A-XD*_9o=4Jv+dyx;AOj|p*Zk0ZqLZt<_=06ARYx&r z+EvGYeeB!6stFM15@qGV6G<5e?Y(<~EMKRc^#9bd%R7>-&xN4T*5B)_zIIQ!wXpNf zneRX7L3{J`&Aw}T|W%wL-a-i?qDpiJ-mPU|pCrmr3 z$%ga0OvF5}S8{Fq^#kb_{ME-9!yY;*ms6=06=3~I!a%IN+0j3RCw8j-rc19c;`NMq zY(UFGy2FMHmba8JgmK@{JtY!jzxwGtz47hKJEihK7k3fQD9ocaJici>>#gL8DBwLm zpOe9A&p7huUw;i9Gv>Tbu2uD&Yi&V@?Nj}av6lQcybaw5kavF->IFFrYf2+W$dMV_Z?_GG87_`TuEUaEb zfg>AL*@i)s2-w3|>-LO&TPEPCH4)^MY-f!wTBiZ$j(lJhZs5rkwyIO!& z6RbQ)>x64gS~w9ZPP-Ru`J$9=t>M%ed8K1^QY=yaEVo?3Xh?MMHP&*!18VCktL{BZ zA*|Yi)rMIfKiXs78<*xkxDxi>=WRRNabMHAhZiS;)ZPenUk?$`a8Wbws6ef0ti2l> z2nG^`Xz;$66S%79Fw>l#YKGLu^Vh9g7uG1Oc;E46v>5dYlP{-n>spG7Owgk}E_a$xVf;qDUVt?BLJFa!1O1LjYZJaAO6q( z1QZ$0Xs(^P72cs1LNf3$5yt_3hBUd+i#XV(vvC$Q)|e7+W{%+`JeWupM*>DV43 zDaw|Xq)qjPaX5;`aAcbsbqjxIiIIp>Z|%kFQ*{vJrqSJS>}4~>PY>lO>)F)5`2)G4 zhRyIy+x_X@XETy$_qwWq^`};^pN!S=xD&KIf1CJuR-wOT z-@0Q;L-g+0C5+r@WklDGeEA0cx9{As+c0tDNMlG1Q)lOdzkZSbta4Hd%8j75l<&0?y9d@t`8D9Lske#kyh;#OO1bmhtwvp@M$r;kng_`@%)^4F{_yA(?x zD%EI1{^{2Dx|P^onZOkA@MU6ov}^<2Oh^#ZG>`R}Wypxy45L=w2T!RF81?Y-gI>k) zu=#rMl@&K-t?Jp#$ec?@MDF7XwP|`iH8fVbY=lrDdFVFue)gO%w4MdO@_M>c)mKnJ z`lH|Hj{;n#MvXa=e&RcLZ$ZvgUN}L6q-(L@Y5T@oxJ)P9)y=I0cvf9szh=XA8C%NI zKHspBzxV%kM&^}@xH;n)1BE`^SEseYNFPI#RKb~9Cl?sIeZH9{CRmZaN=R|qk~&Ev z%>=Qf5u~=W?Ec~keuY18!_DU=x^BMmRr_Wq@IM)UN!X#e&xq&lH9A4 zjo5;WuqQKXlKT@|Ul;$CD-yD?ltLQ%YHTB^R{6-qeB4*ZWu1Ol;dXsKP6D+!wD)Ls z|MG9-Vti6s>fJ(6fC&PLTHIFP6vbm*>2V%?z{0)MvMhtHsTxK>WB;h`ZWbA^eZE)$-~I5cwa zewUuZ&31-|FZa*Bw3$6N-Zq$`#p`MAxNg0>@aLm5S&v83H+4R!yY}0#riKTlCYTS$ zX!cS*HYMarus%Kkw3AB>A((IOv()XfxZ|8Ltj0kR=&g{3GbS0&oN2<|O0qo{xN@62 z6g%#x&oXNZth^eFR&EO-QarfKLhP$!g+iYx7f?B$uZ$@taz!wVsWiS9%@A~EB&to2 z4<4}Om|MSV3Cm_mERFBgzkeLv8N-GSJ-1yRhsOEizM;eBM;>Laq|RLsPb2)PNnF~q zeJpAFCUflPmoXE^$qO?d{P6beBbP7FG5q?({!sNFiGOvM-}#KIWNhDJW>Yz_jdLgN zeb>3)=;%0$#5*Cwb(DphQ#*FQ$;^BxeMk|#%rf$k_Udc8iQn?u>b(*5pQsyn92fQw zuvr?+=%i!4Mp;F(P@`}$Mxq1?%S&a2=sc82DMTV)pY$`4R)~R_u~Sc~_0>0>A89(~ zX?8r59gYHg%BpUSl17at8Wu z7Qm^R9J_0m-g-zs(0l}HZ|_arMf0b_+7!*f#a1@A-{_>O97d&pp$qWFM2P|JXn&Yd z97*S%NzBT}sp=4t~(~C{@yMEiF^&zoWphc%Ofk@K;0K zZs4Ernn7p$bjH&uT9~+ewik^BbW6HkO@8LoP+PcKq5`0I#rdmTd9vBHv_4Na_4l8V zkJa1s#7c|IGAJo+Cban3N?=tD76?%-I90LWtg7WJLv;XFN^-^Y_!kLC`U1O#xKz!*5a>9zbYbimS0`x)Y z1fB~w0SWw6Z^BseUia>K>$bNbZA!`as|g=wJ{xS9V0#qvgFl1*nXgBZ(tz`J#L8Xc#a#0 z#%fH|8J_(@xrdyXgz@3j@h7OdLTdI(JeoCYmcN8v-Ob^e{hASbYUwN5Gl*O_6^B6q zc%5Dtg8HDI6uG=bGh`@HBVa(r543v+Mk37Ln>&yK8$3R)QIAjZvG|NZ;EWCUh8Dq# zb6z4dYl}vO(rP?SDRU;*>CmghI+B{={G9O;EuTG%qK?)5q&F{bsz}N4m5T-3~E7${<^h@9%s*e?eqQ0C)d4ZK{XrS!{JLXJ^UkkbOVp zA!DtYOVV4HS(r>>Rr)R5=kQUgl+!AeZ)=0GX_nX8rGpJGMtC;dJA~!J{>78qy&Yh= z@8=)+74czL%rQPf<0s#Nh+CHr$`+6J8)#-TBY#SFgTwfg@b` zK%C+3*zU%^%)MRx=4VH~8$R7==x-|&LE*_eZf>qQYqbn>BM@$4x!VY872ig;N_Rul z(JKSDO$dy6ENf?TapTKmo*c!`w=S1R+!U5LBBR#bx!61R;G`vh0aKP<{fl2ip^6#Z zwYSyd7wS7lB1t5U4`0WD0|)kPP2`qUW}dY%CkwGaWLiSRKq+Z%{8;j?N_ChQD)G}d z3A7ArV9Xq01#dq8)qcE&XtaeVO#d{kd-s`wCOt7y>tYYzIcNlvwB&rN@FDx>@WnDL{Vlh;zpP|*`8>+a1q2J< zX1-Sg#&E>M)uo%-Ig6B^2Hq_U6haI$*K}6dr8&$^-X`r-X*f#sJosmmUH2B}jwK2l z&|P~rx+*b-`ozLPSD&3fD1v7ov8R#A!}=3;q^71?dAF=*2HaQljg&A!ejVc?S$$LB zh`M?A<5?^{LYi}wncsX@>z0QnuhVw1wSjYH76uDASNU9vL5&mz-D*SC+nIg8H{9Ci z$Q~Ab@AU+J^J4?doe^l}3!L>LGB(${y|g^q_On9Kvnr{t2E^!v7QsT&{qTM=p2 zoPO+Ec4C#IIYT_dcrT0-54?3(K1+3DmfgjO{S2Ss&-xS#O-xC{^SS+8qM^l?nOD3R z(on-w+nt`CzAP`A9e^@#U;0%3diX`AAd1Rjy^5U8B#FSk3nWpV1>2iT2yLV?whX4? zsdj7EjN+}c9A-^NAH0l+J+|)}S{TaGJWoyup}64*XztYxf7aMIpV@Lyja_SnqGSo( z4v?^RHS@m$hTqmEx}_t9>CP7A9vb1`?X|F-HSx3LN(py<`p8ObwOT-pu1O>V*wGa* z$rQZGv*h16$04fT<82q<%o$q!HE)JYLGy-6y;A%rG41nY@+5{Az>TlHy`96Jgot8GQ zWjCC#ijp0Wu}M})OVSTDv_5ooq_Kq!lVd3Z@mel7$pxEwcjw4!f7}hc9+q(AFS7N0A3>vLV=;DaP7ax192e=5nKI8(9>& z-+w=zNIsk3rgOVgxu*Fa(RxYv@rZBOVgiS!8p6vXG@4)G_(f^YBn6m8d z;X(Y-QN(0 zogm-!_TS>o>kWOE%LFt9CG~lFf?;M61P0$EFlFPtE189Nl`Adnm3UVae=`uEnj^)l zDR~(7byhBa`eeu+bkLW(y3hf0%K7%@cGkiD*Im>pZL2!<|M=q-=55TD(srv`G*eTa zEZNO0FJiF0coOkdsX21l0#x1+#w?L1o#b-F1Yei-ZFXn0r*_z&T|-T#LW<@duZ&wV zNn8io1|9Fg_OD=l&LP4Tk=tM)JqEx`Bc=+do_xfWh*2iKE`UUruRh?D2h^ukEHzbLlq9+Z;M z-MJ|+3>P@oVY))7=$@xl6-U8J^YVmczfNQT?B6B4z%gnK^j3?7Pc!{yG!a}7aE20bIAy)QhGn^Xn#ycBHm z^2G~#6d>xkvC*@G#(qQeO8lZbH(5R*x6gC3F(4=BSquEx=B-gqC}NSK4tONKlH<{vfNOD(bC^3(<12K_D#Yu zIE99>cwOOURI1oRAS6-HlyE{5`bqgk&P;imh!U51Aw1w zs!|1=8_~Yu@E2`kDJ<-Lp97|J$utu{(-7`^|JFjuTXr$ zzjn^_fbwpHF&3=TYX|`(_R3RZjIkBj3Qgdf#AWT_T$NwOo=@-% zg&e=f+?V`?YtoMJE~CErwHH=8=Bs)g=HFWMlo;*vEBek}T%2QDETi$RyB3HsD)E+FGI`szGkr_~tvqT5bK2Nn@w)>8eiOzAX{Wv3_IY)w=bggY6CVEv zeQB$dZpaYpI37dUj|*%ZGOs|VHl80IC#|1oUCjn@i-}8!7(#bkdwL4*m<7(%Rm}(a zU!~i(OIX}kU)SBxC5l7SzJ@-Hw|+Z$(x<--&0VJ0*=Ke(Bf zR>W}A0Q)o)9idx)BIob{N2)$&U>18b*Xa!4QpghV{ZXNagOM9GAtbJfrNK0WkGIiv zMN<<}++{VP-oJ}1-i)cM7gP4`9b?l#K}Zy# z!YP?;fi-L}j!aFhu0?T6mSCH{!YYl#6B#x6yEh|EBLYPqr_B!=)9{uup6Upz(^s7y zy!rNf_aeeW67)_oG%367f4GOq6K~I8Yl`0qN~3qL&#visK3h3r$(d`fms3vBokARu zFgOdyHP$lS>b)*q5_PC@o!&)Akk;g-=477-La#r^tZ`ttAv`YH=Xyu-wPQp2Ux&1fTqlTU^$d*#M^5f6dtHeoddLU&z3PW9 zN{(-(l04i${KyfpmH3uSe0J4FCmY9P-qaa#J@;QI-$<0zf4Vx$g4OAQEF6{I_uZF2SXpTiqO)`qACm8r z5Qi-}bDBYaf%a*%dg#-&A`6k97rzY8i})4UhWVJYG`p+o%t~*0wqLfv`J_dohS4rZhRhJwcc%<7A%Mud@QC(3K&51Z%QI0^!&^+9;T!dCW}~hB;0HD zl#uhNveg{X-U)$XvT6SaTRwtVzS94p*KG@u_$vQVi%;2ZkhAbKgP%4wCXJHX#>dn7 zcG2FGGWH&NWpuQZ%c6jE6nrnfwAyPTNb0|_1w}4KjTPfOm=}*#G#rBQaDOIdn8IVF z*L!i9fNk4mH?M^(s9KMuS0@`v4ASPJouxb$cO#lAacn=GRf}P#n0@tyQU|Gg9L1?l zyvCtpZQe%ZoJ5y}1V^81Sv!3P!Vd}2(xN+YqLt`pUiEb7iCMT{o2H|#7;|$Ucj`VN zAos9*Ox^B}2q5!ltr1guri{v>BPORE(yIK<4|loG#0N_Pt5!4*beNV_VQj9j0+qa* z?So{X4MxJ6Y;=PYwP(if_hWvQS{z5T?9ZixXIJ8OWs_QsV-g4)__=sk`H2Y7#W%l} zPgf=T(pg>m%aD-K(Q;g($K3;NNSLYGCu7vo~k;ZbOp5zCY5g~}RRKD8L-ZX4rHVSHA z0oIm$k*$n$;C^RL0xO^F`O!yid7gDEb!l-=Oqqx|LS6w@ja7B-3`gj@Q)s-&Rs~?0 zG)_+PO~OAS>4C17c-ejYq+GYzW)OzBXlq#yrAIMkYyByEX5rpqCqdKVFsEN*)+B~1 zjghB_{UWC!uVVPNribO>JZ{e3{suDi7)L0sH0h4Jhmf4xJcdjAY#cHw1~g1e5^dD_ zA^o_0CTxiBSc~p+t%6Oub48|@j*XTx_Y6E5;&pfrIXwY{$$&^KqF?(y<&_mveLw7X zca8VUa)VlUn81@K-F82fNR~4R9C{oH(BSc(JqRRub3_caa=+cvb+&>HZj24{b^jC)4FoFUU9l-mNDv1i+EEOt>&Z!sbPE7FrU+-$NvVwD7g)V?I*t!!-voE(u+-j-brlO` z)F?r3gs}h#Fc=E-7>05#gqzfg?hfN89UeS&xd#bP2VGu_14Og%gAYFV5b_Ht;6izM`MWh- zZ2qFUT@G>&(YrpHq_Imm0p#mnKJZN<4m;3E)B!>>v^n~2KIP2VX#RRORMRmjKjQne zqwbZ}etrE1(LbSRi^nHosOvjn!{xK>0bs$N{^H{JCs4vW0Fz*(041~E6@#zO)>lue zjt)v%Svn7Nq7@Bo90hd26_U`t-!kUk-t*0bz(RH;n<*n+Domt6vSOyZs*GXnXga5a z0l<;Yx6k)377G-gWYJzg1T3h&&EJbthZmF??~46`_mFdwyL|n}@!Jo|t^UJ25Q>M& z0kx$A(Nt<}#q;MYu&gMNCXadMz-uKc#D5zj#mt1gKy1MQMf+X_lw z0jQ#qpL^)5pG;Gnmr1DHa#SAu*zG0w!6P8Pg(Z{xc}V0TzT3mnL_-jZLeWXR@WR@7 zG5U_w#58T4n}Y$>gp8NO&@O3dQ2yP;Z$Zywrbe*M)!>()wtCO&3@wo+z8Zwo1?U|{ zF;s_ujh%uQbhUWWTNbCs|RwNP(bP$W)tXFzE^ivkqy85cM z%D{n2p|^uHWI%y5P*>=QJ~aAQ`#4ih(*4SJiH0Oa(0j$xGY~}5aRCNh>uWb~dNZsr zEc|dR3yY!Ty5$NnJRnq-0Nod7Gc)NKS!I3@H@{izBKy~h&LoOEVa0H$`ZmG>SkZlT zTvo^E9qrbXH2B?BmyBpq1&;?+_COxvLu>E|H1%y0;T5dv5-e(DHkVp=*QQOxhw{$xzN z8w_Se*OQ}8b?(-kRKEh;fg!ur^{yOyELBG9s~3_;8J@7mid9g+C-m*1goneg(lWO< zl;}KS#%|E+PGa%!qNI4ee!qWpI`hgZjGXg+`CxEY$F>)F-W=qvrQDGwU~k>?N7fkG zG+VCHA&j>0#Gu}SG@5G=C`GwFw6NqMEYopFK=uS2aSWu}@p#0DVdKrCRjF#8-0};> zs!#~gqZ!kL4S{qV-T?-k)R-f%&%o0)tWlOE9t6d~WjFdr)e2sZPPRPO&Amo=+*Q|F@f-aerNJ+$K%E1`*pPAH$x8wvjO3KK(@!>AGkd^{cTrW} z9tVHu>))D>`9OxC6k{)t-Ux_0rF7Q~9I9rh;4;2QYQ2G5`u8!AFLcR7RwaQL6Z9$> zRoknFppWd$wn_eb+=Lc3~Wbhmsl$GAS*HbT&QUAzQT00fUP^_>1~O3%xn8_l+~*cqi^a@ zKlO(Ta}Z!bqF9~P&9Kb`D}pFjO36u{nT#Jpi9Svrf#=snhU%TxXD8Dm@PjD0hks%9 z8uvK8#b>-~X<6l8-}LKQ#`~@Guc)XH8=+^V$>C|+8`_L#%`#oIXi+Z{d1|B2b~Eg4 z#GXI^)sB{UCbCHfj7fQayv8yuoEVIhuZVXQV`pFe!kZG0->0@YmDqb)AO-6NzDa%M zDwjn(z^+C!-6)XeHG9*Ie=a+F$ge{M%3S@SyJ6eDTNj0$N&gl2V^3J@Sn`VBYc9Ry zn6}kwp;SwFNeXOqKEKm9l>et(;~;J01&+6F-AaNK-0#8){L>&ieSPijT*pGbtfp5R zG4i?hg9`Ux z_q6MsRds4ZGiW8cb{VDtq=S4i)F}c2X;#Mz*u+evz&q3m{@D=nru1;}t?dd9UirLK8usXZceQbI)GdR4u2@f#WPXqopcxg9oEp|( zYBK|`sX1bDFQIZ&xOSG?vq!}X*F+G&eUtG0Vd!s6P*_L#7XFaDe*R@qRNtJ|v+_0N^uX#3o_)zcGSIzLO8fTbH6h6N7t6P)WUTmX*Z_T)3RE3ha zMSyVDGuAzoDkK?Hw{H>>MFfR$2d1)pJJ@NR8+>4qf^1VazDNKE|C#tMqTU+?ki=eI z+NicEspZ!{{p6D`)7x58tGd7U&E^m15^U>r>uI!K;no+#$^X#3^_^^kk8f5T9f&}N znP=#l%G17$@OpBJo&t#ZeQ73Qr0MY$b2H0$J#uvkQ9ho_D32QMo|^(Bn^`dG^9d6s zXragS{KEWl&2sYR5kmRZt(YqxcahkU@zd3B_Ub_Plt8Wpx0@?kpJ$#`(4No5talCV zP52B7=>q#SVn{REs#4h2?TSz{%s-g@>BxmAW{K+390VwI00Z_eBc0mdOW+7(R z4g-*dpFhs=fnsGrLKl&W)2I*QS%`!hMVn2oodDT;Y`_RE7m#{ozz$bBf=Nv$s2jqN z-t-GgqAfzHT$q-%OoVCHQIT%dy-Li7--yKl=R#ixvBU8sv}|(%W{HEQE-R45dVbHS zgW#B+{rasg8X@21FyEyW-Wx>wU;gQpiYBUvn;9flcl`?h(w&@L+5HCh@K6#$!~itkQN6#mK2PYC{w z;Ag8-T<8WXXpq)?Kgu_0=gtrL{qTtsD+My&Sl!-Ax74p)_P91)DNlopy9zU8l*?)FTl*qoUoHf5U$ReV0flB6SD1x?{tdC8dd!-u&b= z6L_^5Kydrqj_mk}Fxg%h7M1&RyBQQTF5ROs5`SZuv`oWGiAqSwosR-6t2LE^bTu2ViQaqZc;%~FC%K`*$-K{Q{OBUfDi z85tR~MDT9Sec~I+ew1G6D6^P+NH#_t$Bf@}2A93m8gX^cyY25Tr>qi2nn5@70FvvEf*)oOO^R9KV+9vK=K$&maX)gV z$x%!deI{;9=6k!A2pnc$yf$aVhzCmSD?q^!)5KX)Z58aaa||^AG{p}3&8=G~!3*(1 zcErv2H-~5}+rpWSvR?4z_9+Hm9EIsDRBEE12df$42#->@OnL%vrPPEhhg+6*b1K<8 zXO(AG_oH8EQ!G2(17<|cQD?sqM)b?n;2qdza`6KCIUz&K?9=#e`ZPI9eDrz0K;2&$ zVVdXd2}&b_MiHpQPy>!zJhs2Q4$okx>OL~c5?7F#BfHR)UZoC3ezf`!jg+aBMMX@j z$t;s54hX7{C2YoXvsUR+`px6rbWPpDRO~_+W^TYz8pWu2sRDl}ZzOj+a@{_9&WG8t zcOJD!T}icKr&#U=^z#qf!=r)l974xxrUz}~GWeh)l?F75pOg2mSPAfpP6ZV!wdV67 zChGp2@Eb+ttiOfVeW5JfJDp7x&a1meKTwgh95qceZ@pIN+{p6lLdn;dwm&m5Epb&*JhHU=KSl9W0G*fG6vMh#ScCQ+ z%?{Jn0EwIQ7-D=}{pq7emw7mil$B8%Up%o7VeY08U%db}@;3r!Wj{eHl^(St_AC=l z;l|H?p}n7iA+WF7`6l6K8a`~;`Hn4Z+kMoT=f4ZllUYWug$OykRRs}G`%FheQZPl` zNp zieyVPnH3$ec)3t}jH<9+#a?+$?d4Pedu4Z3tY?qjX1rd$=QZ!PmXmESSj5uvHS?~5 zD~7w;>jtR+|QH!{U@nK^Fo-|2W;BgxTu6xa<= zy~3>}@6Lb)L(!!w8kst}Xp^E-a^%wCpM3I%JW_r%8x( z`S8`=pBsi{;T~o$3FZUb5)M}#Wwgx5^lZx2hp}E8H@07Nz_@S6g}uPI6vsmCBa_6# z;W|24nH%p#N92w}L}$bS+7><RE9ux>Qz75L(*FEWMl_I$Ax-X z89o(z?HEkNG3LPqHaD`js)v(%%-aQl8xB0>Jk&QCi}3WRW^^9J0o_GPd7rY4SJ}3y zqLfOkD*pwbECyw{;>^gozyDa!xlz7rom%Rcn8$RHIj}I5FN?d?Z^vT!T$P2oql2Sq zr0H}BRcFh30E#px$hLLv&+}zu#e3w0!q%k42|5bd2_wrxJ-@e*fvG;#c^E?gikr6Pyca#{Tkvb-KK2#RO`q_5iOkcJn z27Y4C?kihFp)Y-S!HP^KYBVyL&$UdjJn@3#N^@3|ZI?A^$yqE(7<;9vC1skinZ!d5 z1z7G?hr4b8lW)fc3yjY=cy+6N8j69mANev5_)-K*wF6Nf?(FDL%ZeRu5PXBYOu29w z$S1pE@+E1k7{pwk^K~AS3Br!=OZR3Ll>w0q-LORTa>jG$Z=@Gv;(KapxlU&CHN0Oo zWzJ=@u>Q)*vQ$?H2B$o8ljSl+?Wk$QJViQ#4qvM*;JcbJRYxg& zU59kXIqyjL8W~j18;}qNq2G}q?{3g_*ovoCtjfl<(kbSzq4%wV$@V6=oS|S$-q154zwPDSjSt}j<&oj8Kb=4-n@8sd%$!(WBlw1&9Tvf0>5mnAi)8}G=8s0LHh)>~-R7BX5+EO> zvgpEwA=emFN@yt@GRq-Jib=ga;Xyk@k%fz$_Vljwm&u{gfgdt?2j~=uQ}q1)adAqs zf$CM4)G?Y1U|x|Uq6mrVttT0v6~JdkN)^;$MXLKzd;J~ud;xt4rVRDu;g2t5`c@o0 z_eTjm#eSVs1N_D`{ut^h_2RXOYT~^-1#fGwBhPDWZ?=Auv|O@%lrSiunTD12FzDm? z*S<0j){=NGmYFA(u0$g#B<2Ywn|#UU{=0^%fW=o=>flc*sHxk*1fFvk!3H8vz29lCFCH!KeXBG}*h812;%E1Gm{h_7+95bK zM^7;Sbc7udUFZ;5Z5{{R1%lgEW%nD9`91)1XRe*g(X=P$hij?7p-LFky6L!K`!b5{G{J>Dfopx_R*fO85^3&-l1l{L}=t z;ZvSM_{BV~-(NQy#I@D^Hcs1_2bLoJi0yW;j73Q~PlxN~VmxPj1K{vNp+S(JQ@X{f7XvX+q z6CMV?wG0^(J*={_vgZ7|s>$*iU(mCqMeYrkrh;1fhlFGvuw0&0S7x%!;orkYR;Dea z>YG_Gf!##A&6brq`=`WEWz*#It*`T&s-^F+JB_3ntzgLm$WAkUCsAv_No=Vxl=y4R z7t$yJ_}>-gE^{(~d%9boCgFsPnP@dlxyaz+LJJejYJ~3P<`7sip(20%R|EN8y%ErE zd<_a^2QM=R90$SndSusktSDN|T#pJesu|T4@UVsF<;XUHx)>epEmP%aESF@E5=7G= z5At_v13g(z;D-r|nX~Bc!c)vs)3Q005+8=J zDn9Jlb1(&tUzyM*ef!41cj&gX0^yD$H~BpVs1L;{aKz#bmD3UL`mE~bP0^vGbMeR* zG>g{@RsZLe*Y2bV0ENw)H;;{$nQ6ELF_|c^*Hh%ny59+)LN|4FBq}6I<9O;Z zfC;18vZ_AvBHGGm5N4LGGy_R5I(syzm&FCD#QDf!nPmX4xDGpFK)EQkrEDxrr?eCVqL%FJ45@8bY`Dn88Tevd$Ytv1?DNjPO9pQi_by9z3XkcNjktC~fEF zFL7M1PCqpWUKB6q6}Y>;6HD+n8*mnV*qygnKvPJn+Q9JeMwwXx`Rb?ay8{F~R%_RB zrLR0=f@y7}oTM+Q)Y=~y8i|2nw=GaGWd~24g8{0JPo!JX&(2;jhwnhuv`*s2{N@`W zOS=jA#CS!>Co9xYtwldm&t}&Qoeqno-B!8v+~azS*;rHTF7B;5-YUr#9a=W+jl60Gpah*q0Wy3*B#3~TJ>_NFReev9Y0&k}ZHvdFTZvYi(|K#gKZ znH1Efmnh0I#|F1p!3qna=!K0V-V)+syZ1p2%q$icW4W>bt+FgPT-}|u#>6w=kVs78M zW%DJEJ77PN&`9()6j!xc0&)UvHwplH_Kkd&Abj!5z*nc-x+`Nf=10u?>vvXv@}>0s z7P_kjIMRYQU#xWuFUxF}u#c9NArR~1r;IMdGX(>FU{e-=87fWW$}eV72&M3F7Ynb( zFutyi#M?sFkJixFn?4gkU4nKr$Bbt$IxKWsQtOSvB_ja08BpD(RmizB^$vhH%QYE9VFp%>ajaZ$JyNx_Ck8K z&AW8%YRci3Z8Tr4BV3I3j{cI&s~OE0TBR-ws9Q2&VnejH<*{@FKd--X(g z(qr|tWb1KtckXqIioi{yQEJW@=<8B%l2OMn9v-fSdWrZ zVHRa3Ubp@4K{s~wKIFzMC4>}lrZ$M8OI_zL7FB5TP@TTj8H{uJi7|O9wdRtyLHxHxV3bR0< z48|}+@zx2p!D!bEk6@JhuF9wP2G#O3=8lTrIQ~veO$1F*7*{v8KT`petFFfXVSSpy z69k;<)fdYY(N}L{U0lKT*d^(!9k6Ib|49jmx{20T(A5zH43jeEl?9pfnmVMobW};d zbc{@Eq^l+d8ugH5cV z(L+axql_z-?#hEkvt||2Fdy8n>OFzWt1n*US7W51+};a4*aOK_c+rds86nH2UH5TQ zLxZVA(v2HeZ*%wbi~^Q&0Q9Slxt|AOrDD!`kB}NGgJY?fYP|hu`IVAOK{w0Caymr$ znsn_6cP7zCC5;bgUP*5CR??y%Lx|Ww>t1TY^avK^e8AsuZ|R;66IxK{(bC6nWtbu; z$Sy?ToDxI*?eXAPwnkPbMy$U7zIC$8ZM=0fgO2>wST*^?+10aZ<>UqWs*tU;qbI$n z-XW@lq?U{WQjKs#=p3fKPKlw&9Jct>@yrdhy-PbypG$5KI-J&X_BLJPIbEY8Vv15+ zmNWacEdJT$Dc$WG?pOA{>;p9lmGaFMF+AVcb z`7M!8*iLuzFG9(~2NVHF`)2*{{rA!z7JfYn4mV^|vwfygwjXSJmA$Mtv0l56jb5Wo$N&yD0PG3MIq`IS*_M~t(FXL%nNL3!-~%jZ z%;?nJTiTm3ktm3n3*32P8LVyp=S#Q7LX(tOX(X0ag_y6F9Qw38yN_H zk5>9)O2j+}uf%e2EfFmn>Xt91skMfbA`YX9GiNA}px7>SWwW-qQh&p>^{JcsE+=$g zyQ$arYp)qUl>oml*RFunvW|lU_EfV#b_qL3g(1s_0@}Axx{9gB+xNDN*Cih4Z()3q z?&9uk`+*9arEt5h?)ly)aSM|4UcP)^c#w@xDqSRT)`358&PE> zKGdDUw`$+~wwL8;?7^-j2vyGKPOAn?r!Lqx(d!gmOAQBFQ4Z5k`&cV)eR$sktmh)k zxXTht$L{0_B_2~R*hmy~_Gq?-l7t5NLezMtT{Zeu zIIP|ZoCm><-LFi9X+-1~A$K>*Fm+s(nPn{XM);9yPlEwAzd-Ia z(+JK=b~JZs9|L5HO6xdB@h+AJ$JqGJ*fX-7J?{>C4wLEVwm8O8JN2DITgZqDNEd6CFcspv&|PmN zD~GYrly=Vg8g_3%hLFSg+H2SX)N?~K%g`sf*7GUE$5~HhG9IMJTFd^xF-A|V6kfG% zwd4aFhyL-lVoJcL|94aOk}1XOk%0(VAUGrg!$ab{i`VvU^-tM`%?UYH;fAv%ll9hJ*Kk zNSdZ<)(IgtoW;L#!jK5WQrsseLZV%Ibo>*FDF)n62hJHaT_Yigs9iOD zHfvRzzN}1>UC?_oVEMyA623;41I5&W7xIc^;OdbfcC(u6eM7-cDX;o5MAfwf$fVro zz%gUyVrCmN;V03hJ#PLoWbDRmaHqrAMHKNdid1l)!~VweNG!T}j|;M)%91us>s$uW z(ArxwaWVN{lY>C#%2Qtt)yXl=mlB{{uyjL|jOhxP7yiOcj9x-n$8D`eMeuvW+G_ ziY9PCR3h_ZzgINE4%;S{E*jf}s;*&Wa2aFGo{M63%6LC_R4kQhAF)uxbHq#7-n}39 z9A(wfOr=Uh<2yhOzeiqKZ+Vb2GK4`;q^)smPLQHqiNQu{Hl$XA%%^O=c`{LhP`^eH z2J?ZmGF`;4@w^A}&{}dF5|(buQ@nrSVB`)jFAJVGPsS2MI;|C%&0=DqC{Z3zDi5go z)~!(J&#PN$^qR2Ph0^{W{Q~kLBe(|##2D8`LBkv+Tyu#M7Cd})YfT?`CfDXuHR23F ze(ey3_HNZqauiWstQ*!N#HTJ&KTDQHHZ3s3vraFy7A}CHR6}#GU&p=)d$@Dl8Y=@D zsQPHyZHZ;9OjD42E>C{sIdNv(m@TA5=9Mk`iC<@$?+?%1NB1qOYfwja=HD#hb8DqV zU(jJRGZq~|0DUeQAG?J%f1BAbLNjtXS)Lnb`t}stWk7&dfGJL>*0aE}p zxEyaicPr7SH;REw^~YPWnb*Efs=U`P&^v2^Qo=M9c;K>Vpr}wv%vXQW-U!(c0ZHX* zBVF+D?wS+as2Gv)Cq=;8^$wBmlyTsgx@L=*4tXp8Tkhi@uwRYS#dHWF1ypE9FNm?6 zFQa1R3uP=pkqSHvF;upg^Jb?ZCK#72qT>n4Fipm5QLGTO>A}q0hbvs3E%R|?h4`dz*r(@CPXs4=*^P4z_p5;vH=&zq z(7Ez*aUqyOXEh={isxK<;5F$CSE)KZ$eMLf5)X|9tnGg!9$rV0xk-2)mAN+CG`x}W zGJ?({#hTn^=a31l9uC7@@ahHdIB2ivpOY=2_iwQ@erI|Sdi{m?emE(>@*NGwOYvUy z-t&(N{U8*l3{DUrapU?dFJ<1i&x36hdz)ej*^-gEI*JPWnbAJm_6`@xm0tCg8=o4r zka~TSq%%QBXZNH9hs$n20x}G7N1KxHshV~WR{JKnCloXT zK!kJ@fi zjNbpffo9|`p?g4aW1}a@u0fWQMZu5Rw?+px(V(Q*p3OaPeAiieFa+!Pj6Vd8!j<~6 z?p96p*NBiz7k>vu3$1}e6)oB|)_@4!jS-kghWMBZI|OWeawk`j8JvcM*crV#H{_tW ztnG}}g!6(J+>-Fd{|f5h`Cj|16tE4C=U5 zxaxA;U+oPhpIMK)bAYasDU@ii>N?TskIr@xYpS%H!Y$6^jV&jTNEbF0dDmLtO9AJq zDC;_)*Xel@g?;*RTl*PFb05q#CRz-o`uYli#P2Si-Pc@s`st;Zt*(j^KSFbP*VEIc~zE&IdX+P%PBao~17DY!tnfJSi}FdcO2%5nl2uBo`-toI0j{HGdFDS-qr*E&}c(_fH@b3S{=` zS)S}n8B2Pgm@c3wqNoy5VL<0o;kcpck3SQ#5kHIry|F@>Pxe?A$x4rzbm@RJ!znx;3^zJj+MoK2ca8o_?i-MLC4F3EeAsTVrZif9T#UbNBH}~)#D}j=tkgwA zIhKm0y99hRcBpL0fJCQPscCfRI8u%IYX8?ptvn_+cI`rriMn9Vys_kNj`qo$MO8u!Q4`W?g}> zQgZ}ub9>w2;uXv3qM-}5HRP)SAxhAZ9%iZ2N7g>%ruu6TMLv@VZpZ-cbxh^SIxG7~ z<@3O&hyP)y>l*gNI!ijFkN4a3+*)}cni5EmRvDJPx2}E(wH6|sC{*k5Pe~89R3I>& zw;z=T1O#mPT8t)-x~H`KD3dQ33ti~+@DnNVWd=ou0WQbixqP2KMGX-e^WL~K zK+zCK|ImTEq^+j7;Uvbng`$(yG|65G7|$VtGllc9j)4!7oSF2Ud<9kM)YX#|)~GDS zCEPm3sp>4sWA1t*b80J7i6L#h@RkE{9{unx|NO>RDKCBuWpO!xMB1Y><|gJ-y?%QO zQtXY=Ag{fswvB6{y9=nmms?YwE)82*?j(L!Ihc$q=rm5myE6}}pn=l^M13ptoSx54 zEMkh6^h?s4GrDDy++^!I9vTF%UJUI-bL2>-k>c1_4JbbsGel$YnT-|{vaqPWjZ`H^ zJBVbJ-FY)LL#oOmmpp@Tv_ysQODmlgsI#ii&Y-l z_z-X^$XPsy^dxwY_Iio^GnSg7@MKgknL%@w+2SH-ixRl2~hEfAK zK(PG1`#Wf1l^ss5d!$+&M~9gbTS1!zK!y=DdHp{G@Svgq{0re4ct5? z!6nnhk@DFDK3)}$jWvRiH8j%nE@p~~JDo(OlG|m!4jXqsX~8G=hp$0PKR2Y2Rv^V9 zZ2psPsMZF`d2wj*sP$Sdf;Ld(Qn+fYyHm8~*mRSKuF>SowUCUE${}>8UbA4@S3rTW5BW7#gWP@M0;p!J94t~zTo6AAtlM#(qdx1hM86TZzb47 zPtBa$$3Qr0a=cOcY&5O;7?KO>C3;tzN#_AcOs1=td*3_cm;XxNmS3<@hmH$&5^o(+ zOHRBQo#j@uyQwCRtO~LiCr7v@wV$ z<)cG(#2M zNOOx6oeJpWc?{AcCL%!$#fxt5g|C)gwe1RMvP*cdOQxhOU}wP&&E&g}%*~w|C z9gq=HbMfc=GPFJ#5Gx0j*+1o+^793n1>Ft^&L4F9S`YsZdh}q}c{J^Enc_GvVmj~Z zmzWTo5O$(nK0@#5S__UG{$x|yYcD_{4k0B*jom@Z1kfpRN%|Q?&7ec>Omz7TqG9;h zvH3Nl-o4nXSFhb8?n?dVoC-fWe({dC-gyW2*Js%i{v-cYW$Pd8aaR1DxNY<^_rWpW zym@oyklFo|!u6u9N9GmstqgcZnA?-v8{-#o_8!T2rDo zYWBc5v6#ywAld?Iu^%WW7D8~{4_Y3wJvG(lB9tv~_Dj30ah#X%8zpqg#!5*3J&?Dc zG0}0DO#;2twHCTX=Kj47@4q;gy=w>q{$96kt{(A|A!!b@N(QQ48K$Gr_@K0)sMhQK@N82XtM!6be)6^9+2XWr+rMDsZ zej%dPbv98nC*)0BiPNNbzc}8UL@0)kTg@*oxxa` zymBHRld&t|=?|Iz2Vs%lSDWhD_sFA0Wtz8E=01EKXh5b6eo)p$w3M4b`trqlKA-y) zY^)spmv68f@vf5guhe5a6*;c6CPdQ!o^-Ltr*i=Xi*yhYQ~`7KZa;4#2}^gOajouWaeNC@NN807y>He_C){i*c+{rZJG)GsId1w37Lw}(Fg`F6uwg4bjmkH&ETv|*ooQuS=T z*z(uq&zWjly=w(oXyhze8`Z1H;wJQ3S?7vro|AGYw`0_li849!6Kda;GepuvQawIV zX5NilTz2+r;&7B4zHz<#;{y*z8^?hTuRq>o=27+2N2Z)Y>)Be_WA=nYf{%XW-=cBZ zX7}(H!zJ2Qcq1|$mIP9V{W;VS_+Y-D*V#kd&{|8Nm-=)lToltcYA9Oo!2T1*UOld-=cBcF@ z1cUMt>G+pJNcZHnL2#5WreQdHnM-(fnof6GA`N-q+U*7Cn9_&;S7~n^mvg?x|2NB- z=FG7ijIm`Kgsh4yWa~@jm`~H01pKE!&Uf1jTB?NZ|E!Wv^cl=-T2Cuifp)ikyOLsuQ zT`lTy{zg2|{lEWREFI|XzT(rxZUioC@y_gn>ghu~S(#x@t((xt1q=T<&*sVog>a7K zvexyKO(q1|ZUO=-C8g7SF}(WUGYypD@Y%CJrfa97=f8wVSb@0&7i6d0M_e$b8y|46 z_QfobG3A5!j?<|lkw9&_EmHpC3xtqIZ~k?$*Pj$-ckbMou;~&{nCfYHwQ;H`Of=#M zh-`_Kl|2g>?umk|_-NyG*)d-u4&nrTXM14zAL3+ljTnfMD8)i9;T#%gfbe6-b80C) zo(IgE65U5+@_}n~zqhckIJ|M`NLI=cOq{->sY3f({eNRrjyU$Okprvbak|FikGsOw zGyUJr)srED}A1vb}uue)g9RFuFeb z3uWc{&%}-+=0XjnC4n?JY7`73D*TWSNh=8Yd( z$9muN&Vx~`%_+17mC6?sablp}ri~I$pXXF8r<)K~)kXmp3}~TDIYXX4lQL5r?-`S9 zzp4u3g7rp)t!*!ToVrZEHC`iL-8fGp?+4?s2g`TvxpS{-o7#bMp$iWL_MRO##KdLR z(vBK=c^>&eYRlEaKA+$9d;6P%bL)1s9%kPo>qOa{C;!#A)B5;B@8 z^6HiD;K>(no*$IoZzl7yvi&F~s55oQVh#Ra+Rwn8+6)gwUeo5r(gi^j6kjowBdPRP z!b40^xJeDPcngC(y}a^ywoI8}Z-4V@z5Q!Oh7ZcMwz0`i@mZ;CF2%jWBDa!1Wep)T z?V7-PF;X#M&RW^*my4BiXJ2$0$3U7Lj!#@s0}lBJd3C~`$E}CE5%QF zq1)QimsUUD%NXYVkrguH&Dywv9~GY>=gy$~T2V(+`rO^#ZKxa@;4+q@%_qdm#xPmR zOnaBfT#4n72ZNF3(0}EDnixGHo|tlEf=z%7al@6J9q6wURz8Z?dPHR{78_Sg2hN*4 z&)n`_$5SWoO!(BQymVp#S0ZM_^PG-D&J5N{>Ckf|HHOGLC)KCU02jv31hRL+^N+@1lwsnZrk#$0-LWIHounYlEk?&TQpZrR9N2 z`yIUbIjwkH7j>5GCgvkwm6;Ii^oW{&D=Qd(S zx+?d@Gvu|E$~uZMp%@sKH%r?HYNu?*q3iecmWe8Tr>wV-F&I>aP-GtaT&Sc)y~U45 zLSareUAhUxWkOTBC3Z+;txtx^uPRxLnV7nBEtC1ut~LGfJQ%gKfpE|V+tXpV6k(sYuv*!M& z1YNNxhuGV_FTvlqzP{eR7@zgsXV$D+H!0;8<)h=C`w?U7%m!%FC>38YX#9CqGk)1MTE@m-YC2{!VkbVo~Ak-~+NoF}1Zaz!glx=WM zjdvI?+}XU(&`{}z~Z4JowPZ;$ld!&}?zKVN?7?kz2cd(5qs_LH)xST5lGvcnTS zJtKOiAoeQO-TRq2tJwx7=ylY8+qQSmU<03r#`_-!mzCM^i>*WRH8P5Z=)CsG8s7k! zFYQ9YhKQR1fq|!an3iEdE6jw^GuaBX zd9#xAF{3q#jWJPKjjc0xATvMzuYhV^Pbpn1R=XzXh*4rQ7ZMjcJt_?Lj_`4VjjyMn=9*)^^IM}J!{))2b zMl+d84E!m7^@a}V^9O;fXo%wPKdVVl&%JjKlZ$=jZlTgcQ5MN1^1k6VeDWj#eVNgRv4$8ebNWqLaixf(%6kT)YDLK*a@wxGVEC~muB#`) zF_CJG;1^|k)l414Fq9Gtz)M_n1VVqjXTtKyI0!mDNYoOX?+(cCWmIgj4B_l^4fHy`mX`}owW6q{P8)Pp6<8b=tF#_LkY`9 z(hCzjmL7O0JTV1>ksZZg>>LGjk>lHEJaHGCcJ|!(i#rS@iJe=dw>3{uC~O`T)LBD|mxIh)^{UE@+!+I+Bl1m$k-I^ryXqB>s zK(xVDK_jfwp)QTVb<|d+Y$r zEsAoH+unJE^TnM>*cvL#T!T0Cwt-B)l!6I+;4R z=RNn=Oh6v=6GPO(WJm?X{7jh($w=YH^u%{|Ph`Y(8e^>LlQ)$0 zNBc?cJkWlOKjRn^!4}7a-z5^uZux{}&Woyjj~+eHdKZmi%ueaAv7Tkc2N`c)ht;MR z;=LX9f)lglu7i~TwNyqPuRH0}{GXd%xmZ#}?$B)^!7emsNir3=}j zTk^`eR6P`=w+78Tar`^nMDqS$6NsFAD<*|xTp=eZIa&Jqi|64fJJ`=+L|B|p9CSFB zu~BYTDEn5bK7~SkFhtO8rR%9Xn-Ie;L!xh)BL+n{vcwN`k#az3@#p%w#%(NUYScNp z9;KZ20bI1xu$j6WR*|MASs9q+VW! zOG;l!xO^qaHdq2A!C{kS&`Rng>?x`rhjEkAGtT2SBSruKzG;2FcI| zkT!lmHJ1=jq?J6}IeR@JkD8)dOy?PpQ8kdbF2*M`vN_K#a#L~{Fdm=5c`F1}Ue?e; ze%+mtvSg7ae}D7)x8US}ZxJIa@R&G9x1oLhTX|)rS6k>Xma_(jtQ@3mj85Li`a5FE zaYu^^Bf^Y`g}*@sc%0L{G{K8G?I`dPs|_t=HB!O9QX++H)>-PezE3^pQFyhY-fe4` zeU)ID5~+9=HV^96WY`xI7I}h`Jg+}#7S02_vUr6q@6K2K^(+l}Gl|857tvhq@PvNO zr{}79{F{73DYK6lS2HTUFlYp7cqq#X`A%B8UpuF+>bI_sC-!_IBEF`Lb)NS-0rjfe;8U zk?o-=pf&~b;&z+QovnBb2bjk;DC#K*;WEko2-1Mj@?k?yalp&IWG;>`Lx+uha?Mtz zHU$0?Uo>0-ZKMh&KqQZky*aBWLs{OY7oHD-0Jm`AaYRh;g zz#~Fw8DwZ*WP40Zlz1$h1LeBuypE~Yboze;k)3^<&nfo6DrM%!q@>@Ki8p3@IrL#B zcNuM{nUBNbRDbP@?R!nw*d)U_+l@_*)O|v`*^(k_ZYD zgK(ZYI?b1a4S4xvhAbAWx*r|F$0KK`DVxY+rE^gVk2s??39r6&sRzoNp$1Q&A1pg7 zVTao`-s)KBAog?P(lxR|%m5HswSUvsV^;cIPz6)00tECz?x|U!4MZ-JE~zNdlQ+DN z?>l0;+W|BzRIk!bQQ?ZMS&$9(P@Q)Y1Vo1@G%1cKWi=kV=P;7R&yCGG@Rg_w#jy56 zv$^{{_3&8^t*mpq)`Xs58JfaQQC|`2N;|C7l9(+Xl}zv6b<@6#un-Z^TvDia*7k=Y zMiN=DG(Mm%vUq?;{tgkms)qZxX4pv#)9FcN(W-1#yu5J()5H2QOa+h}oAvW|u50^v z;vyjoz_lcS09pFx9V*2egSxi@ovXxMw5~L@{pohN(UqsW=#=2wzN;pVYa$NF26#Tg z!E6fOvji_;O2BEOkfxpe;rREnv43QopWW1}nN9f>H!|7w(nx_vt)y<*r6=(Cq7C9ZOeRoaAmP2 zBuqdeV4L_ofl0|`r7&yhyJZn1(Jdh7FP^z&D8?ro(@}z95~C7khb(AyWA2|+ZD~1P z0jekT)LQn?;L!DgLJ0^yVY;qVaBa>c_$b$$KemBUu9ht>ouwFnn4&7>r4o5KPH}2V8>bLHq%!-Y`=`j`c=Tl(-9oPy@Hi8w@x>HF zD0%$nzGgbX{nztDT^?mL%NQPNTB{&Y3|d(9)PJD}to_sqrWj*-F`H|2K}#Yx)nq8+ zUf$=ZAku6*;K78`AkWvE%QLKP*cWFPKY7n6scPdlIGFYK7|@uw>gkqelrKPRvj0Sk zcz~AzB30-Lj0svD!yvsL-sXM;ya0+T3L$^kP!F4d3|yc*8=3HL{ZbG5o0j*F+HyQv zDa=K4`LR)4n@%oiU~+)Uo6Z{|@qx53N)E>;eTk~$d+A}>E^t}NtIOCWsXi5zhIdxS z_rxy2O+->g!!TPA92w0lC|Mavb}!12>sPZBe>?DzWx1*RxJ=E9bAV7?f~mPb-W)vQ zm(+)KD{g_yVZjH~f8|LFvdCO!>^p`_k;T!h+ITyUqy*7dKztfm`iNW0s8Q+JSA4rB zWA~O2qydNg*e~l`*i6%Ciiyp*44J>@xw9$U{Nm%(6^C&hqhw-NJq#;eeQolo!9==* zq^VpikDx&qYoOM?hvgc3E|xE2wm=y1zF|Lyh$|r^Sd5meuwmYGT-KThrv#l>^Mpr(MDTYN{r#lJ}yTWapMF72wJK0jg6L3qm6ndv$bplvXOo(OJH*V^NesdFLO^ zq%iYg$%cxYzA5K^^pz1>2I_$Of9H(@=r56p$$>7I1^lu5A{C1-POSf)RvIH-&`@il zVP*x>_fmu-wvj@qF>Y~+6cdQw6~%;dhn|ZM*x>cS*P1)HM1-OKwLJReT`c&-EK-q? z(!8bfWS5ge(Aw*Rd^0cl&V8n_#E-`xGZR@)^#dSC9+)Uk_1(WZe+I9%cq!%D;=a^< zG|rFiXfOyRtE*X{G6M>;K&mP>w`; zlJfZch-v;N1k|I9M;uTWttXH4i+kri7Bwxd>m0i3Wm=@$ZMh5 zGkF1@6weBO(S(R`yX^H6%TJqopPd8JQWP{v)I##UJ4H#^2_pX|J*zol$!keaG0V}* zD!!98g(&N>YSnzoctblebp#jL7Z~W|S;{jTPWww8acdjyIF$^+o!1_EOR;u}mCB5f zm(?ubPuD)@j?+c0^6u3+=vMYKYK6Ep!En7FpcDOl3d(V6l=F-6aad=f9sib7srcu` zK3h~mRSl3T<%xzF>7n0!<_KSBjkzovq(zpJp9C6rVeyqufzf8O&t>AFJ``Rc42}uO z1E~m%XsL54pjIRBbw0qwUA%|a7NY|hsLmN(eq3CNsiSWcIVx!y7{1*9wdbSi)>Tbr zE7rD{UQyH`N|R^JZkn?oD8KN`srpa1#~NZ>k6PT*TddAB$4C5`Sy*T;#haj1$w~Mx z$S7(!rP4YeIjL7s;)GLl$$BX;X<>=+LX_729Y}=K$Mgh;BmX2tGoaI_VTjHJ>T%g$ zVRTf-l1w5NSu$WjxjH#z0?5p6*blL85~Ez!mVF1`kTd%Ej}F7i zD;U$8PQT&d<>lo)oPW`_TPm;*sRUG6XbCE<7kgtFB9PUbrqOmYQp(a>WRrtfv%&M3 z%KmNaVG4MbyLajG=kA7Z+=8(H(sm$(xUVD@tQZ4`x0 zaxf>mPvFUy_adg<*rAHA^R&W;7)%_ekJ{{IXKybEYWgeRwQEC3LGp1REJFcJa$Waj zHB)(*5q5%Y6cJZxZH}l9`suEN{o%5txZmHnQxIhI60*fvhCA>bGkb*c!ct_E=o)g_ z{tM~sxcF42Ea8^PbxF-47^lW@tZpn=C~(H^$u$IOl8*OnESsju;w;6FMOr!EKQ?@E zFnPcg@w^m%?bI5)!k+x`;DY-b80*kV6x`uy&y6P^l)%7A4g`c3Pc&RMm`ai0BA=DB z&%}t-aTqUc6DmSBvfg78^pnhSOFo136AxuU;iO%Lyb=}s*3SLo=d^-OOTUJW!utj= zk!$RVV$GCt(Ni-ACQkzKvUt=OzdcZ|z&UKD$t^m?o8mm3!IFuYHxr0k55;-%etKt1 zNQy{P5Zagk@&%iI+F=2csSs}_#I=0Rye!>;f|QYuXM)o$#6t>IhU4|w6c^r%zc0nO zMf>J_3```>BCU`;<3oT@Cz~q!#=O>iMnmzM@uo?~<=TLqb((WK54J&4&Y!;s(&G0N zCiE|N5slJ+oW`NTSe`E^E7`1JEM_LfNt5^heX%Jr#UmiTPSP89RZ)pyp}>(brSMv+ zdGi|0^-?TKWPov~siKMJ4mBN0Msq=ny-&fajrGs<*7_s;+6TK9etb2Y7t{FPODh7y zAER?yD^}`MvB85MS?nIiS>KCL(WzR7LIvi4cUeHr=@|Ca%n`u!GFI37s^Sx7o@O%~ za&Hq3Va{kGds^-NKfml$QC%Gldgnqgo`m`2hvp$a%=fxqFMD?{c>%yiX=oc$qfp)m zYl!eWOrS>6=Y&4x-?dHnB_%PjlF(n)s1QpljVNi^3&|5p8=^7POcA+y`#&}jl@H7Qm%sZqDY1VPSR#z9V83Wmdz$Z!_VZh6{QOZJJv zO6C+5VIELQ8@R)i5U+R?H!oSL^?lT1fI)!mIXaB5Cj8bIN`^rjgiJaJE^v0o?z`CSajzDpWY7+4^LTcwsvMy zeUo;Ij4h=-Uv9E~_Ng8Dys6InsPE7_nbBOfU+N#8m8f`ov;)6=_LokNw!4OYoY+ zd@CoYlz)f)*&RwCF}{LeHH6K%M}wRnzrxD-z!yqs)2`&_o5~~vsHX7PDEIP;hFtjs zNJq39stsg#b)uo#s=}yn^O_#?yv_`cK=DH^iw|9>bXfBkcXOR#hTN^+|K_o zcNX>DKVJjt+Ck30|7CN2hsg_GAaqX<-w~5_-S7ri@0F6Ae0YVWC5Q6^E%ou|buv4+ zXvwjhA6m!0k2xd;p9k+gepmiC?dO}Y1xh(S$ZD~9E*z{0u_N1%47%%Du!bxG^+<{fvKJCg$gPSASHt z;KD<18QL-V=gvwZOtMciwr?PK6^#H&>X9_{sJ|BSfSQnq>aTX$Ci}|J6Byaa%r-G$ zndZ$woRC`^r*H}=7&kMYHhaO1S1!6`h*U!STF2Sn1fP|?E;f15acT@>J3rwe${)vo=HyFmjmHE4IPec;% zjy;diZkZSYZKsF+_?6g0?&0GrvV2T3B%rgIMjr!NQU96}B@8G?z{4xuo#sK_3%e5( z6x79Y)vCKx0YHV11R@+Ed8d%Pv@dd^shhFL0aN zyhOGIA<1|SEpg#_D-sFWlqHiso{sUfV7tZNxhqjgR;AUR{7vclw(Z;Rf>ju{3Qg$m z@$TKbuoA6dm#KGrG6m=rHbjgKxu4f(hgpgnR}ok?a*%r)QMRNMS}gk3#2k} zo3O-AyPJNvqBB;!a?*b%t9&x{Dgxg z*2XP_)3Rg?bnJHVw&DPJFPM?`bGhi_NWg~v%lhC_FIX_Q>-P5j|9s@kTfk#fSw}W# zt=z}`#`UZ0TRI|h5L<(qq>7^{&arn(gmmJMvZPvPkT&8DD-@(NXE~ z>K^GmDsS$o3sO{;?(_a#s8au}7MFV}tVc>R4dlk=m-+z~+(bf0q-!|?Dv>h10v_ma zl_&cg2kXJ9xh_J`xn|SlRbxW3dTC$nJy7rHWR_F-F;Oo=V&$NB)Zv1@FE72AL|zOQDd*&$FM-&7OA#)Sd&Yc@&eoL2Z^)S9npO zNFM>tJKj!SoE%0dtGM_F)z<3CRpckd(R+McxEvseNfg|f#taE`$ex$pWwWpNybJX} ziI6kc773D+iqxj6FR2q`Mp89;^XAR2=c@6fgKiKd9_jn{@B7+Xan(|p2NJIhz>uX4g&vf88{qQAW_Dv!c@Obis`8a(Wzs{+ zegwF}$V+2JNT(0pb&~q4v_6^Q@hrdrRu`N63-_I*?2wi1vZ4u6t9I=Q5_&#wI~AZB z1ll^jKpS*YZ2Y?(K?rk{nO@g&Ak6vnDTa2Wbk-0*y%oibX()k#eL>9r9jQb0S#&B2lDfe=7vtL=KcxOL}lX#i(#tkp0Mw>AyJ56nEZPzvAn>XNsHFQusF0ovg*-|H& zDcl#@18&AxV2y$=evS|qUSWcTJTZ`ci83-2ThdSIzCMqujM2q9g#cbIr-xF*HSOAa zF0bP^vct(p!y>s3g5dn{1EXmDWEXCL1@T$fj?8sprpPpj$O}FF$%29)iAm>*5Qej=XA?5sFYy5{Ll*Aib$?)#8FYJn}vdKu^sK(ZRmM#m|mE62`+Ii(|#OlxZl5YjUZ{&4%7# z2JBU#+aUIiI^w-=6|>9$MKYkw`uPRmJ+gO&YK7?(arzB-8gufpPf0HZU&LmeNM;o` zmzG<0q4P9bMI49xkycqGsgP-cKhdtlW;NbHPTqhA$)T2wNr{PP`7xP=<5r#p^T!!? zrgA5hJKW$~hXdeJ#+aL%Q<6@+uF>lXwJQ?dy(huKW6vcZoik+ubF&zFGTS^8 zM1yx9C_N2z>%v>(DNH!BoVyViCy7Wab4;~52*rGZJ4 z<(P|q{lw`A5gwUOYiWA~H3rGMij=CU9fgnN42&_NiW+Urw5dd03S7knlWp+&Y=Mxy z&!bA;V}>hh&~lN33uqpG=~4=Fd&9?$y%W(S=oE^*C~n~LmzS7DZK=>}y9;^);u_ec zGb8u3iYoCTCf={B3&%Y5^`u^sw|%3TS}6pyWm>^Ta1+Bz$7JjU4M|a6n#io$ztS#Q z2ir{bmW%)?o6~cXSS|ean};3s&r`ZqlJXH?a@Vd1J})t@x;)>_uB~S3n_X0Sr9R60 zA}p3`#gxBQv%-3^*a6x^AHL?nhTy!sJioR&8etB!tQwWgP?Y-{@}uX8iR$vnV7yhJF;i`)W3OCKqoJJNWfd9>9R4xw-o)?3=!3$374yY@7Hx- z%|0#l8ee?=Ior=3y7ma?+ecn(bhsgC9TES^U>S*7QIc$m@+T*zXx2O5>yaFDE6#9Y z-K0QSkIOY2x1GXb7DZ+zKe897^)($t70|-I@bKrGHuA#{g}5t|I8^MieSvX0ylpo)%t{B|)4 zQxLSGBCIK_vqfO#R#4XN|JDy)g-@1QrBQ2!dra)07`n0xXvBO=51^98y!yyBnS-Wu zzHhQ44oyJ_E%TA9Tmd0}Ku_<7slZ6&5+&`rrm*o#A>x|U|8s7i#``Ivn+rBks1OvF zVRK4(5~?uJ!oQ1^ahjr_c4Q>^XYWJ+wAE>XP zg5T6Z72hE(B}z*^M?xuJCF6q&C|+YWv~gXtd#g`rD(ePV=s02?fOEsWIb#l8-3STk zS{Tq59ALr@!DAaQI@2SG9g~stuw$3a)VPUONhFP3eQ}vDsmPY+6mjWU(NcTp4c)g0P(5b{T308nEZQpqwJgA)Yd8;ssTd6{)I&RVGMqT zds;64;^y-%4u|S+Fq!DGeBAdm(v}hAzAX`g_|shs%<8+d))|^v^(n=1@x@#jJ2uLY zbpV@2)tsUhc8Z45@GZ3uz41EOr!?Bis4<2IBG$s)-95hkEiU3Tx2n<1VFhnFY&dRW zQ?I5}1zn@K_-X6nHzdDsC@n2jeM<6xL2wFJ|1|B9{_nDy$4OsX&bS`p;C%h*ErXc# z82PN~aO|awTaEMvBQ!MfpFLA?addMXhB+q*XnEz~c=1fQp@mfKnKi#Wc89>$)G_he z&PYYXV@5=6e3oR|(~LH-_xzf7SFc|6eU^2xTBe;>Jm_`v8`-GU3k``4{}4O8kj^R+ zq5q+B*}pt}^>&M1R18LtIH+5?mPw>EHSAODdrHh!1hWw$PnfcO#-W}UmTa6X!- z6!d~qGR|>a{2L2x{S?$XvX`anoVjxgXr*0m z`Ve?xEl8C4^0{RgKqe{0Oo(RuIfL6|5(DlX#ogkrzL@IO1h(0dRB!dR_3*fL^pr2w zD9%T`4cGnP==LhB*%QmcxwnGVZF{t~#4B(lrJ%6stEQM!MQmR2${~IJvl5-bq7&_z~ZVd3c@vbU4E??e^Lh3Q`&+O;*dq44TbX+ba?HE^Zq6#aNf z!THmF4kP_iXA7NaIc>MD<;@j&^L-96ZFA(BYe@;w&1bMn!J31Ptw~VNKk(}eF9n%& zQF{E{f>760rfOCXKM*Q^{h(%hF;UF>6=m}rZ@q4}JrzrJ7sfbjH;52&3Tan>+NZCM z(&W03=2W9ah;xz7zAzbIyE*-dB-h1v}mRCfQ$^?j#~rEqLpk0_K19D%jgq%%?N z$UDsZQ77MBhOc2L4s94v%mQ4B6Rl9S0V}bQ{Y~=JH@jFIMEUHoJ}7`^&>-M0*LOvZ zW9XF-lCx$-mOsLXfN0#_rWIsI)u{M5{UT~Nsn>IQAo4+N_yhEh2}wpODxOng)I;N2 zLs|1kxQ<+Sd%?81a}RHfc}f-F6dk0QCYv@-{+U^H`B>7(W=4wC2K>`qP zrVedbe5@}!eWe!{wnM5qo(aXAOznlcw0uGPzS1!n2>`W#m?+{V%KmU3boS4gf7D47<+#%mB&SJUY>^XB{ty+CwVz9QO+(CKFoW|l;p05T-fnzx`L9<=)p!g7- zy8$%NuvrqEd+whiSdQq((hEQ7Hv!eT%=t1eaI_17XTM@-bI z5E9{b zF#H`auNWHiJ7HBar;X2TrG8&fxaZSS#L4?a3*s=E(JV2&}& z6pKeZ9L;Urv}Ak4`Vvs+N|gSp(ew~YX>GP}yEaX{>F5-yp8aJYyV+3UdtRa!k1aVA zQ|w+=6P)1Zw&rHD=K;tk{~!L<{vP?S$B#P2p}60Umo-CoI>-O0JRiM-3}nhGc`7l5 z888_mz(t%sNFYUd!vxbfe2s43$v85wHg?~mFBM7(eA2TARy&ykh}q@VKdZNF z-FoDjz)lb^J(Jz!TXu-dCalih#EP67HHGhJ4mB%cSJ&C^(hZDM$#MH7>&D&Cg3R?1 zcCuUC=Hv56{PL3098^V=4BWuwfSuAZ#LoHjRS+FFH+h784O9*gbErr8P1QoWh_m!C zc%gnD|0-cwZicJ!-jrP{viyNS4k`9Q^1ElNf)Y1 zSEt`4Tp48u>2tjLpHA+)f4j_j>4DDQ)9MMz#^YoQ2Dk_Kr*GJ2{cI2I`YPZg!uIE^ zq*CaD`t}X(vdhoUgilTVIhW4hR#>yB@~JP3yjK=oSSBk4Wz)Q+jWZdeDB2C?BIf?; zCBNKaVeX^J_vjom239-kljq~ed48*i$O3F*>eegX2^&(&TX2%+8(RMa6C#funb4~* zRxI>2ioMQ@E~C>V2RmnI(P#?Z1Fspge<(A4%+{%@GnnqHxq?Tj7A&o3;l!bW>U=rvqeMU<#0AnsRMzoJKCT5yx)5?EaRTYhEE#s*V1CyrT;ht zIiqSveetzKMl*p9DZCng2=yJZcg^h4F=KdaffT721G;2IcGRaS%41K`erZhJi`y`w zbp{>FhBYEh8QIUq64pOfxCOF(?EXiAHX)OIYF*>Q?~2}+gxKejH@$5GOFf25C$5Rq zLDT@XwFPZMy(n~rB{Ybj>~7TA8->FYo#hW69v(AYVf1Ftn>Vr8w8mDpI5))k$QX`g z4Qg7=@)#M;VAJW6uHjfjEcxH_V0(1Zf{vS`MJq<#Hw^u?>Pw)MYTW2eF?usyOOSUi zP?g3E2Ad{_5K=Ub^A9hvhN42tyQo+pO$E98(*;hq`+&SL8{RN z@k)HgFHE`)WZJ-Pv4hv@3U^oF0ZueOc$IA`k>nJsI5*xfkIfk(=R%w666RE#3QUN@ zVXkehmLq74PA1JxZrtPfcMMWe*$SPcs^A@Ml1=czSA9uWAOqvG4GfZ2`pa8@C{m3^ zo(`(==OrV3@f&0j73vzpC!$Z3WpU-^OZo%Hj_=jR!p<%I2RepJOYvfAH-uAMF>64s zaYZeawnBX1O)-^6C~s41{jrgoF1PFQa@&3x31r1`o20tloLMu@pp1P43~G*$9dBpp z`&6R=(8RGwXr0=%bV9xYPdumAFC-|%q{a^xr#`3azL%KCrdNKj;mAr%+Q9Go0M6VP z-1^6V#z^?MT=zi4DVDA{*dU~K2y8??CuYf#tQca7c-JfbDn6tLBQ>{+uaV3y88y{P zKcQ+*k0!0uZrJaSUkgoy z%pkElkz8fq$|a}GpX2{(3UX3W42{pAc)0_??6J@IT}mm9w;J`lYZPe|rKcI`%9M=@ zXrxU5KZKYDV~+D}bQAte%JXHjhA+Bwk!v<8su1%Xv=WD}As7jzqL&0D>@2M*nRJNI zt-zOwb%n?Q7RUCD8aJ6IlcVMsUmb{)8>#215BnPeskA=G$))e-&fa|aV6W(Yi8whU zOR;Ja$(6Xw!V->uB2f>_eM;+F``BaKu??&wQSs818w!ao5in=V3#LBf@MBVUmPD?G z(m>j1ibAr>GA;^Awr9HaoqA8m49hr9j9rG{QdJ$!il=0r$e=`jPm*Cpl^L=#S-0W! z{>_STpC!rI_T(G-ySfy`oRWg(g~Dp9{XW<1+A`?zCxHQ520gE{@x)nV**BZ$BPTFN z_rCNpgRO{dosz0|;BPPN1IUA=>)t(s5tfS)oMePBCtqoJ(OeoH)*ytV`#zykt`YX< ztatAV*K-j3JS~; za4=5cm;_C7yCBCTe!^EI*ZVb@DX=)=B2;wqA7U}Br%LD-1LiQPd;y=u`LdV$o5YZw|J2Qb2sjhcD4wZ+lva;}K$QOM=B&tK6%1%_9 zSHN+qSxG>Hl|W#|{8~(KAxs@{&jesTJC0MG`DFC8f-k@N>gL@Cgejb;4}te~+)F^=b+(#}}kdqFa0Wp`zzj^IMoQ+!#qGojZ|R=a^;%ni$!#6vC%ktQ(N z44pryxUmP%sFduOD_=m^>uXzlajhpAA`8cWs%Dus_V!G|zeb#2@UKbN``rrq{mVZh zaz_m~Y&mS&h?m2+J&sYI6+iaCie878`Q^P-R_{`@WxM~1aWf({?!F!Q)2<20hAKNd zpC4rxyz9J~#douZ$ERM_dt1~L*Q0V&f^}Nqh(CALYgZhtPa52D@P-N%{|*d{nMK&9 zPMRMc7WSYbQsdBtgoNv0LQkLt=jIuQnpm&8)?GLG#o*w%*ZIJGSHSut3*|MNc`%J9#=X5-zvAh*(!M;`!=H5LVp~XjO)7c;T{W2bZa~;o0eywdA7^ z+{j#Xn3qW8taVN-G@LYf@>5_Wf69&3EvYMuVuo<>d%t+`V*bK~y~+1Gjpu3)!6nh2 zg|h0ao?IQqQnRWce}Df(qU2L3=`)A-qy0^+Jh8(LNZ;mBWNd7#)tZQj&iXY? zy)4-Xtb-S0G{Jlg)~8+d#=!RZb3Jr(kf9}(?Va<4f$rPy{0vbF_Sy6{y}uoa^Cc78 z-ub&GRh{jr?crd({^hk*MUDrq!BZta^Jqw&%!fX{wcl?%HRGzPt3yIVhd+4mzzzWi zYroISuk1T;;5x~-Yen5{WVy*D5pT%*WxDG>09(zszx$7hLd#K9%G>lk2;0|Ne$aJ-vU24(dwj;YUDjXzJ$B z#n{WPeLno8fdBPFMonyIV_SgOn651iMxy?=DuSTPd`Kd;IwF-Wjo3k$KXanvGLyPI3@S=B=5 zFFYF&;YZkbeJQI0qf&j4&u%R0tn#%_iI;i8opiR-^u@Ho{^+Ei24yk!((O!;$LtFi zF09U%gGk+@+_Gh4dvIL*D5jH2iFa@jJBfbNr%jud zki<;PBoOPbD}p@dGU+kt=FOYQZX=hwx%p>gWLVJG=H=&KBPNINRoo9)-k(abdH04b z|JL$2nfqL;IwJN7x`CRoQwu`ZKuJ7BkdtHY?Ebd8rpAx*^7{DOIss?(C;_>@d(K3) zzq`$;HztMFg9s1FK!Ja|r{-_l5}TeKN%?i>f>4v@R|hWp5yLh8Sq!k@XhUkRVRPSHpf6{ue(ufDB6)~2G@`f^unOK&Ah1kc-OvafPBPo$>me*5N^!DwK*H8eD+ zvxG>;=gY0<9<|i^l>9LhK}^#5b~gYel@l==L(2A>+Zy;?eqT~o>-D0q&%-@Edwl)% zbR#39hpTgq-goS?;eq+(cln8k&q7Y0R%KgNH_wR=-KRZi>c07s2ZT+AskzPmvodkT z0*OABw%;f>UuNW1Ju2zh;ps2tEPA(Kq)qaKd3`^$x99 zV$wEl?4bEabbY-Ik4@3d`lij9VD+;2QLEbyF&_EyHFcgtcdeVRR~NzgkBef1l&76Ff$~hS_d?C4;P(d(uE8-FK>jJnjMJV z+A4*dPf4jV^n~j-v=fun`;A>Z6%!<9KFE>X{*!AHhYcH+8IyX8bG)x7D{4kj6ZV6a z(=O`2c++XaLtQ09@X4pAMn|htus*G1(%|T%pQk-yKDIY?R+ZT?oHoAh?%1$MruX?8 z&^hK^WND8}fBkwjF(kzPoXge@z4rDzv%u2k^o4ngnr@mW=N~?N_;dfwKUeLYGx~TWJBI|>R4{9Lkb-t7AH_LOMv0sd3oRbDlo z*`7mP+m|33@$1C`G=4cO{Z$-)oeBtoM7vMh@4-1)DZ9#r{Lmu3B|V{rdFj z@_3H~Qg^Z8H4~2PUAG*hXjO?i_(LMZu3D&PUgt9`jCXSSylqeH_~_ z6@UEE&FN-*UkWfApOX9@sjovRZ6v?ZQCFmN|M+8Cu8w`RswykD^>6)a>ng_18)xWk zI1v>!I!Cwh;r(PUmrGV3Uww1>LjK}5-Qyt$_^#dtny9R-%=DV4?R8tXak``9sWW*W z|LOqcb=|ezT_rBQ*J6y>p25MSs-ezIY4+m(?qKTq8g2Khl1?9gL8VKlVeVgdd07@P zFN1361d+Z+OW9SXHNGw{FJA>M^6Y=!^9hH(TJN$~ZtO;?6jQ2 zSq0qN{{n{WM4WM54wGBG-ut54Qq!%il)(?hmw$(UQIiJ0*jiOp^@L=&la%Y`i4IMj zE zlFu~0qT*vKnO0%MdF;}KciH;(x?Tp&_)`jo2i3_jF)=*)zmdYOp@*}lDNdbqj=U?M zgl#$70cEw=r;?o-HHy+BJStf&EId4fsFphEZ@_xiq~Fu-4yF71y3oeQrn!8~rtQR> z4N)(M&VIc$)F`KVVBxW&+gz=I5)&_6nD9mCekyz-x!|-r(csN%I5AJDi+190I|Nl@ z|H#P5_b;58UyWY#@<~r9ZosXjOn z8H+t-wsMQ>$J_H(Cv(~3%5H6Zay!%?^No$WQD5F_c=K;=(JBV6uF3VF-l@%v{rEeN z>!apbi_8Yu6OQ>TUueg6j82O5?BP=dQSSQDtVJYvRt{MGX-H}uwe zcYzk>?74HhQ1lHX-uCF&vEyALA+ussu-N!@9UA2R{d#6@b{Wsp)ySDt9ihd%jobj` zkQE>Q)MMjL3m-3CxzNhW9>DeP!-pzR^SBlG_at*N+BY4^LI>PBjKaw#of*uSJ3Z?A#XtQYPvM1Z diff --git a/docs/source/_static/encoded_recon_space.svg b/docs/source/_static/encoded_recon_space.svg new file mode 100644 index 000000000..b90c8775a --- /dev/null +++ b/docs/source/_static/encoded_recon_space.svg @@ -0,0 +1,191 @@ + + + +KData.dataencoded spacerecon spacexk0 (readout)k1 (phase encoding)x diff --git a/docs/source/_static/favicon.svg b/docs/source/_static/favicon.svg new file mode 100644 index 000000000..2176613e0 --- /dev/null +++ b/docs/source/_static/favicon.svg @@ -0,0 +1,108 @@ + + + + + + + diff --git a/docs/source/_static/logo_white.svg b/docs/source/_static/logo_white.svg index d0d5b3527..dd42a7e55 100644 --- a/docs/source/_static/logo_white.svg +++ b/docs/source/_static/logo_white.svg @@ -8,7 +8,7 @@ version="1.1" id="svg1" inkscape:version="1.3.2 (091e20e, 2023-11-25, custom)" - sodipodi:docname="mrpro_logo_text_n_scanner_white.svg" + sodipodi:docname="logo_white.svg" xml:space="preserve" inkscape:export-filename="mrpro_white_text.png" inkscape:export-xdpi="243" @@ -18,7 +18,7 @@ xmlns="http://www.w3.org/2000/svg" xmlns:svg="http://www.w3.org/2000/svg">MRpr + id="tspan1">MRpr diff --git a/docs/source/_templates/class_template.rst b/docs/source/_templates/class_template.rst index aab3811f1..57ea79656 100644 --- a/docs/source/_templates/class_template.rst +++ b/docs/source/_templates/class_template.rst @@ -4,9 +4,8 @@ .. autoclass:: {{ objname }} :members: + :special-members: '__init__, __call__, __matmul__, __add__, __mul__, __or__, __and__, __radd__, __rmul__, __rmatmul__, __ror__, __rand__, __truediv__, __eq__, __pow__' :inherited-members: Module :show-inheritance: - {% block methods %} - .. automethod:: __init__ - {% endblock %} + diff --git a/docs/source/api.rst b/docs/source/api.rst index 63cdb3861..f5eb05f9c 100644 --- a/docs/source/api.rst +++ b/docs/source/api.rst @@ -2,57 +2,21 @@ API === This is the MRpro Application Programming Interface (API) documentation. +It is designed to assist developers and researchers in understanding and utilizing the core functionalities of MRpro effectively. +Each module includes detailed descriptions and function signatures to guide you through their usage. +This documentation is generated from the docstrings, which are also available in your IDE. -MRpro Algorithms ----------------- +Navigate through the sections below for more information about each module and its capabilities. .. autosummary:: :toctree: _autosummary :template: module_template.rst :recursive: - mrpro.algorithms - - -MRpro Data ----------- - -.. autosummary:: - :toctree: _autosummary - :template: module_template.rst - :recursive: + mrpro.algorithms mrpro.data - - -MRpro Operators ---------------- - -.. autosummary:: - :toctree: _autosummary - :template: module_template.rst - :recursive: - mrpro.operators - - -MRpro Phantoms --------------- - -.. autosummary:: - :toctree: _autosummary - :template: module_template.rst - :recursive: - mrpro.phantoms - - -MRpro Utilities ---------------- - -.. autosummary:: - :toctree: _autosummary - :template: module_template.rst - :recursive: - mrpro.utils + diff --git a/docs/source/conf.py b/docs/source/conf.py index c667bb735..705e7fffb 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -1,89 +1,270 @@ # Configuration file for the Sphinx documentation builder. -# -# For the full list of built-in configuration values, see the documentation: -# https://www.sphinx-doc.org/en/master/usage/configuration.html - -# -- Path setup -------------------------------------------------------------- - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. - +import ast +import dataclasses +import inspect import os +import re import shutil import sys from pathlib import Path +from typing import get_overloads import nbformat +from sphinx.ext.autodoc import AttributeDocumenter, ClassDocumenter, MethodDocumenter, PropertyDocumenter +from sphinx.util.inspect import isclassmethod, isstaticmethod, signature, stringify_signature from sphinx_pyproject import SphinxConfig -from mrpro import __version__ as project_version -config = SphinxConfig("../../pyproject.toml", globalns=globals(), config_overrides = {"version": project_version}) +from mrpro import __version__ as project_version + sys.path.insert(0, os.path.abspath('../../src')) # Source code dir relative to this file -# -- Project information ----------------------------------------------------- -# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information +config = SphinxConfig('../../pyproject.toml', globalns=globals(), config_overrides={'version': project_version}) + project = name copyright = '2023, Physikalisch-Technische Bundesanstalt (PTB) Berlin' author = author version = version -# -- General configuration --------------------------------------------------- -# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration - extensions = [ 'sphinx.ext.doctest', 'sphinx.ext.autodoc', 'sphinx.ext.autosummary', - 'sphinx.ext.viewcode', + 'sphinx_github_style', 'sphinx.ext.napoleon', 'myst_nb', 'sphinx.ext.mathjax', - 'sphinx-mathjax-offline' + 'sphinx-mathjax-offline', + 'sphinx.ext.intersphinx', + 'sphinx_autodoc_typehints', + 'sphinx.ext.autosectionlabel', + 'sphinx_copybutton', + 'sphinx_last_updated_by_git', ] + + +intersphinx_mapping = { + 'torch': ('https://pytorch.org/docs/stable/', None), + 'numpy': ('https://numpy.org/doc/stable/', None), + 'ismrmrd': ('https://ismrmrd.readthedocs.io/en/latest/', None), + 'einops': ('https://einops.rocks/', None), + 'python': ('https://docs.python.org/3', None), + 'pydicom': ('https://pydicom.github.io/pydicom/stable/', None), + 'pypulseq': ('https://pypulseq.readthedocs.io/en/master/', None), + 'torchkbnufft': ('https://torchkbnufft.readthedocs.io/en/stable/', None), + 'scipy': ('https://docs.scipy.org/doc/scipy/', None), + 'ptwt': ('https://pytorch-wavelet-toolbox.readthedocs.io/en/latest/', None), + 'typing-extensions': ('https://typing-extensions.readthedocs.io/en/latest/', None), +} + +autosectionlabel_prefix_document = True +napoleon_use_param = True +napoleon_use_rtype = False +typehints_defaults = 'comma' +typehints_use_signature = True +typehints_use_signature_return = True +typehints_use_rtype = False +typehints_document_rtype = False autosummary_generate = True autosummary_imported_members = False autosummary_ignore_module_all = False autodoc_member_order = 'groupwise' -default_role = 'py:obj' +autodoc_preserve_defaults = True +autodoc_class_signature = 'separated' templates_path = ['_templates'] exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] source_suffix = {'.rst': 'restructuredtext', '.txt': 'restructuredtext', '.md': 'markdown'} - myst_enable_extensions = [ - "amsmath", - "dollarmath", + 'amsmath', + 'dollarmath', ] -nb_execution_mode = "off" -nb_merge_streams = True - -# -- Options for HTML output ------------------------------------------------- -# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output - +nb_execution_mode = 'auto' +nb_output_stderr = 'remove' +nb_output_stdout = 'remove' +nb_execution_timeout = 120 +html_favicon = '_static/favicon.svg' html_theme = 'sphinx_rtd_theme' html_title = name html_show_sphinx = False +html_show_sourcelink = False html_static_path = ['_static'] html_css_files = ['custom.css'] html_logo = '_static/logo_white.svg' html_sidebars = {'**': ['search-field', 'sidebar-nav-bs']} html_theme_options = { 'logo_only': True, - 'pygment_light_style': 'default', - 'pygment_dark_style': 'github-dark', - 'show_toc_level': 3, - 'icon_links': [ - { - # Label for this link - 'name': 'GitHub', - # URL where the link will redirect - 'url': 'https://github.com/PTB-MR/mrpro', - # Icon class (if "type": "fontawesome"), or path to local image (if "type": "local") - 'icon': 'fa-brands fa-github', - }, - ], + 'collapse_navigation': False, + 'navigation_depth': -1 + } +html_context = { + 'display_github': False, + 'github_user': 'PTB-MR', + 'github_repo': 'mrpro', + 'github_version': 'main', +} +linkcode_blob = html_context['github_version'] +linkcode_link_text = '[source]' +default_role = 'py:obj' +pygments_style = 'default' + +def get_lambda_source(obj): + """Convert lambda to source code.""" + source = inspect.getsource(obj) + for node in ast.walk(ast.parse(source.strip())): + if isinstance(node, ast.Lambda): + return ast.unparse(node.body) + + +class DefaultValue: + """Used to store default values of dataclass fields with default factory.""" + + def __init__(self, value): + self.value = value + + def __repr__(self): + """This is called by sphinx when rendering the default value.""" + return self.value + + +def rewrite_dataclass_init_default_factories(app, obj, bound_method) -> None: + """Replace default fields in dataclass.__init__.""" + if ( + 'init' not in str(obj) + or not getattr(obj, '__defaults__', None) + or not any(isinstance(d, dataclasses._HAS_DEFAULT_FACTORY_CLASS) for d in obj.__defaults__) + ): + # not an dataclass.__init__ method with default factory + return + parameters = inspect.signature(obj).parameters + module = sys.modules[obj.__module__] + class_ref = getattr(module, obj.__qualname__.split('.')[0]) + defaults = {} + for field in dataclasses.fields(class_ref): + if field.default_factory is not dataclasses.MISSING: + if field.name not in parameters: + continue + if hasattr(field.default_factory, '__name__') and field.default_factory.__name__ == '': + defaults[field.name] = DefaultValue(get_lambda_source(field.default_factory)) + elif hasattr(field.default_factory, '__name__'): + defaults[field.name] = DefaultValue(field.default_factory.__name__ + '()') + else: + continue + new_defaults = tuple( + defaults.get(name, param.default) for name, param in parameters.items() if param.default != inspect._empty + ) + obj.__defaults__ = new_defaults + + +def autodoc_inherit_overload(app, what, name, obj, options, sig, ret_ann): + """Create overloaded signatures.""" + if what in ('function', 'method') and callable(obj): + try: + overloads = get_overloads(obj) + except: + return (sig, ret_ann) + if overloads: + kwargs = {} + if app.config.autodoc_typehints in ('none', 'description'): + kwargs['show_annotation'] = False + if app.config.autodoc_typehints_format == 'short': + kwargs['unqualified_typehints'] = True + type_aliases = app.config.autodoc_type_aliases + bound_method = what == 'method' + sigs = [] + for overload in overloads: + if hasattr(overload, '__func__'): + overload = overload.__func__ # classmethod or staticmethod + overload_sig = signature(overload, bound_method=bound_method, type_aliases=type_aliases) + sigs.append(stringify_signature(overload_sig, **kwargs)) + return '\n'.join(sigs), None + + +class CustomClassDocumenter(ClassDocumenter): + """Custom Documenter to reorder class members.""" + + def sort_members(self, documenters: list[tuple['Documenter', bool]], order: str) -> list[tuple['Documenter', bool]]: + """Sort the given member list with custom logic for `groupwise` ordering.""" + if order == 'groupwise': + if not self.parse_name() or not self.import_object(): + return documenters + # Split members into groups (non-inherited,inherited) + static_methods = [], [] + class_methods = [], [] + special_methods = [], [] + instance_methods = [], [] + attributes = [], [] + properties = [], [] + other = [], [] + others_methods = [] + init_method = [] + call_methods = [] + + for documenter in documenters: + doc = documenter[0] + parsed = doc.parse_name() and doc.import_object() + inherited = parsed and doc.object_name not in self.object.__dict__ + if isinstance(doc, AttributeDocumenter): + attributes[inherited].append(documenter) + elif isinstance(doc, PropertyDocumenter): + properties[inherited].append(documenter) + elif isinstance(doc, MethodDocumenter): + if not parsed: + others_methods.append(documenter) + continue + if doc.object_name == '__init__': + init_method.append(documenter) + elif doc.object_name in ('__call__', 'forward', 'adjoint'): + call_methods.append(documenter) + elif doc.object_name[:2] == '__': + special_methods[inherited].append(documenter) + elif isclassmethod(doc.object): + class_methods[inherited].append(documenter) + elif isstaticmethod(doc.object): + static_methods[inherited].append(documenter) + else: + instance_methods[inherited].append(documenter) + else: + other[inherited].append(documenter) + continue + # Combine groups in the desired order + constructors = init_method + class_methods[0] + class_methods[1] + call_methods = sorted(call_methods, key=lambda x: x[0].object_name) + methods = ( + call_methods + + instance_methods[0] + + instance_methods[1] + + others_methods + + static_methods[0] + + static_methods[1] + + special_methods[0] + + special_methods[1] + ) + return ( + constructors + + attributes[0] + + attributes[1] + + properties[0] + + properties[1] + + methods + + other[0] + + other[1] + ) + else: + return super().sort_members(documenters, order) + + +def replace_patterns_in_markdown(app, docname, source): + """Replace patterns like `module.class` with {any}`module.class` in Markdown cells.""" + if '_notebooks' not in docname: + return + notebook = nbformat.reads(source[0], as_version=4) + for cell in notebook.cells: + if cell['cell_type'] == 'markdown': + # Replace with `text` with {py:obj}`text`. leave ``text`` as is. + cell['source'] = re.sub(r'(?`_ . See their documentation for more details. + an online repository (e.g. zenodo) such that it can be automatically downloaded. + Individual cells should be indicated with ``# %%``. For markdown cells use ``# %% [markdown]``. + The translation from python script to jupyter notebook is done in pre-commit (locally and on GitHub) + using `jupytext `_ . See its documentation for more details. After translating the scripts to notebooks, the notebooks are run and their output is converted to html and added to this documentation in the *Examples* section. - We are not using notebooks directly because if contributors forget to clear all cells prior to committing then the - content of the notebook is also version controlled with git which makes things very messy. - -**mrpro/src** - Main code for this package + All output cells in the notebooks are automatically cleared, and only cleared notebooks should be added to the repository. **tests** Tests which are automatically run by pytest. The subfolder structure should follow the same structure as in *mrpro/src*. - -src/mrpro structure -=================== -**algorithms** +**src/mrpro/algorithms** Everything which does something with the data, e.g. prewhiten k-space or remove oversampling. -**data** +**src/mrpro/data** All the data classes such as ``KData``, ``ImageData`` or ``CsmData``. As the name suggestions these should mainly contain data and meta information. Any functionality beyond what is absolutely required for the classes should be put as separate functions. -**operators** +**src/mrpro/operators** Linear and non-linear algorithms describing e.g. the transformation from image to k-space (``FourierOp``), the effect of receiver coils (``SensitivityOp``) or MR signal models. -**phantoms** +**src/mrpro/phantoms** Numerical phantoms useful to evaluate reconstruction algorithms. -**utils** +**src/mrpro/utils** Utilities such as spatial filters and also more basic functionality such as applying functions serially along the batch dimension (``smap``). @@ -92,14 +84,14 @@ You can use VSCode's test panel to discover and run tests. All tests must pass b Building the Documentation ========================== -You can build the documentation locally via running ```make html``` in the docs folder. The documentation will also be build in each PR and can be viewed online. +You can build the documentation locally via running ``make html`` in the docs folder. The documentation will also be build in each PR and can be viewed online. Please check how your new additions render in the documentation before requesting a PR review. Adding new Examples =================== -New exciting applications of MRpro can be added in ```examples``` as only ```.py``` files with code-cells. These can, for example, be used in VSCode with the python extension, or in JupyterLab with the `jupytext `_ extension. -An automatic workflow at github will create notebooks and pages in the documentation based on the python scripts. +New exciting applications of MRpro can be added in ``examples`` as only ``.py`` files with code-cells. These can, for example, be used in VSCode with the python extension, or in JupyterLab with the `jupytext `_ extension. +A pre-commit action will convert the scripts to notebooks. Our documetantion build will pick up these notebooks, run them, and include them with outputs in the documentation. The data to run the examples should be publicly available and hosted externally, for example at zenodo. Please be careful not to add any binary files to your commits. diff --git a/docs/source/examples.rst b/docs/source/examples.rst index 8d3937d35..f16dd8759 100644 --- a/docs/source/examples.rst +++ b/docs/source/examples.rst @@ -1,10 +1,11 @@ +======== Examples ======== .. |colab-badge| image:: https://colab.research.google.com/assets/colab-badge.svg :target: https://colab.research.google.com/github/PTB-MR/mrpro Notebooks with examples of how you can use MRpro. -Each notebook can be launched in Colab |colab-badge| +Each notebook can be launched in Colab: |colab-badge| .. toctree:: :maxdepth: 1 diff --git a/docs/source/faq.rst b/docs/source/faq.rst index 1aeb712fa..fb46181a2 100644 --- a/docs/source/faq.rst +++ b/docs/source/faq.rst @@ -10,7 +10,8 @@ The data was acquired with partial Fourier along the readout and the readout (k0 direction. In addition the acquired resolution along the phase encoding direction was lower than for the final image. I.e the highest sampled k-space frequency along k1 was lower than required based on the desired image resolution. -.. image:: _static/encoded_recon_space.png +.. image:: _static/encoded_recon_space.svg + :scale: 150% :alt: Relationship between acquired data, encoding-space, recon-space and image :align: center @@ -22,7 +23,7 @@ ensure the correct voxel size. Finally the acquired data was also oversampled by a factor of 2 along the readout. This then needs to be cropped. The recon-space is therefore half the encoded-space along k1. -More information on this can also be found here: `MRD-docu `_ +More information on this can also be found `here `_ Some parameters in my MRD-file are wrong. What shall I do? ========================================================== @@ -53,7 +54,6 @@ script such as e.g.: for acq in acquisitions: ds.append_acquisition(acq) ds.close() - ... the header information of the raw-data file can be adapted. This can also be used to add trajectory information to the raw-data or select only a subset of the acquired data. diff --git a/docs/source/index.rst b/docs/source/index.rst index 294847210..0cef77c40 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -2,24 +2,50 @@ .. image:: _static/logo.svg :align: center - :width: 400 -| + :width: 300 + +.. |colab-badge| image:: https://colab.research.google.com/assets/colab-badge.svg + :target: https://colab.research.google.com/github/PTB-MR/mrpro Welcome to MRpro's documentation! ================================= +MR image reconstruction and processing for PyTorch + +| **Source code:** ``_ +| **Bug reports:** ``_ +| **Try it out:** |colab-badge| +| **See our examples:** :doc:`examples` + +Main Features +------------- +- **Standard file formats** + MRpro supports the ISMRMRD format for MR raw data and DICOM for image data + +- **PyTorch integration** + All data containers utilize PyTorch tensors to ensure easy integration with PyTorch-based network schemes. + +- **Cartesian and non-Cartesian trajectories** + MRpro can reconstruct data obtained with Cartesian and non-Cartesian sampling schemes (e.g., radial, spiral). It automatically detects whether FFT or nuFFT is required to reconstruct the k-space data. + +- **Pulseq support** + If the data acquisition was carried out using a pulseq-based sequence, the seq-file can be provided to MRpro, which will automatically calculate the used trajectory. + +- **Signal models** + A range of MR signal models is implemented (e.g., T1 recovery, WASABI). + +- **Regularized image reconstruction** + Regularized image reconstruction algorithms, including wavelet-based compressed sensing and total variation regularized image reconstruction, are available. + + +Content +======= .. toctree:: - :maxdepth: 1 + :maxdepth: 2 - api - examples user_guide + examples contributor_guide + api faq -Indices and tables -================== - -* :ref:`genindex` -* :ref:`modindex` -* :ref:`search` diff --git a/docs/source/user_guide.rst b/docs/source/user_guide.rst index 1c8b81a55..d7a6b58e7 100644 --- a/docs/source/user_guide.rst +++ b/docs/source/user_guide.rst @@ -3,9 +3,28 @@ User Guide ========== MRpro is a MR image reconstruction and processing framework specifically developed to work well with pytorch. -The data classes utilize torch tensors for storing data such as MR raw data or reconstructed image data. +The data classes utilize `torch.Tensor` for storing data such as MR raw data or reconstructed image data, +operators are implemented as `torch.nn.Module` Where possible batch parallelisation of pytorch is utilized to speed up image reconstruction. +Installation +============ + +MRpro is available on `pypi `_ and can be installed with:: + + pip install mrpro + +To install additional dependencies used in our example notebooks, use:: + + pip install mrpro[notebook] + +You can also install the latest development directly from github using:: + + pip install "git+https://github.com/PTB-MR/mrpro" + + +Usage +===== MRpro is designed to work directly from MR raw data using the `MRD `_ data format. A basic pipeline would contain the following steps: @@ -19,51 +38,72 @@ A basic pipeline would contain the following steps: :target: https://colab.research.google.com/github/PTB-MR/mrpro The following provides some basic information about these steps. -For more detailed information please have a look at the notebooks in the *examples* folder. - -You can easily launch notebooks via the |colab-badge| badge and give the notebooks a try without having to -install anything. +For more detailed information please have a look at the :doc:`examples`. +You can easily launch notebooks via the |colab-badge| badge and give the notebooks a try - Reading in raw data -=================== -Reading in raw data from a MRD file works by creating a ``KData`` object and using the class method ``from_file``. -``KData`` contains the raw k-space data, the header information obtained from the MRD file and the k-space trajectory. -To ensure the trajectory is calculated correctly, a ``KTrajectoryCalculator`` needs to be provided. +------------------- +Reading in raw data from a MRD file works by creating a `mrpro.data.KData` object and using the class method `~mrpro.data.KData.from_file`. +`~mrpro.data.KData` contains the raw k-space data, the header information obtained from the MRD file and the k-space trajectory. +To ensure the trajectory is calculated correctly, a `~mrpro.data.traj_calculators.KTrajectoryCalculator` needs to be provided. The trajectory can either be calculated based on MRpro functionality (e.g. for a 2D radial sampling scheme), read out -from MRD or calculated from a `pulseq `_ file. +from MRD or calculated from a `pulseq `_ file. See `~mrpro.data.traj_calculators` +for available trajectory calculators and :doc:`_notebooks/comparison_trajectory_calculators` for an example. + + +.. note:: + In MRpro, we use the convention ``(z, y, x)`` for spatial dimensions and ``(k2, k1, k0)`` for k-space dimensions. + Here, `k0` is the readout direction, `k1` and `k2` are phase encoding directions. + The full shape of a multi-slice 2D k-space data, for example, is ``(other, coil, 1, k1, k0)`` where `other` will be the different slices. + In general, `other` can be any number of additional dimensions. .. note:: - The trajectory is expected to be defined within the space of the ``encoding_matrix``, e.g. if the - ``encoding_matrix`` is defined as (z=1, y=256, x=256), then a fully sampled Cartesian trajectory without partial - echo or partial Fourier is expected to be within [-128, 127] along both readout and phase encoding. + The trajectory is expected to be defined within the space of the `encoding_matrix`, e.g. if the + `encoding_matrix` is defined as ``(z=1, y=256, x=256)``, then a fully sampled Cartesian trajectory without partial + echo or partial Fourier is expected to be within ``[-128, 127]`` along both readout and phase encoding. Preparation for reconstruction -============================== +------------------------------ MRpro provides a range of functionality to prepare the data for image reconstruction such as: * Noise prewhiting * Removal of oversampling along readout direction * Calculation of the density compensation function * Estimation of coil sensitivity maps -* ... +* Fourier transformation Data reconstruction -=================== -As a first step for the reconstruction an acquisition model consisting of linear and non-linear operators needs to -be created. A simply acquisition model could consist of a ``SensitivityOp`` describing the effect of different -receiver coils and ``FourierOp`` describing the transform from image space to k-space taking the sampling scheme +------------------- +MRpro provides a flexible framework for MR image reconstruction. We provide some high level functions for commonly used +reconstruction algorithms in `mrpro.algorithms.reconstruction`, such as +`~mrpro.algorithms.reconstruction.RegularizedIterativeSENSEReconstruction`. We also provide all building blocks to +create custom reconstruction algorithms and do manual reconstructions. + +As a first step for a new reconstruction, an acquisition model consisting of linear and non-linear operators can be created. +A simply acquisition model could consist of a `~mrpro.operators.SensitivityOp` describing the effect of different +receiver coils and `~mrpro.operators.FourierOp` describing the transform from image space to k-space taking the sampling scheme (trajectory) into account. Additional operators describing transformations due to physiological motion or -MR signal models can be added. +MR signal models can be added. See `~mrpro.operators` for a list of available operators. All operators take one or more tensors as input and return a tuple of one or more tensors as output. -Operators can be chained using ``@`` to form a full acquisition model. +Operators can be chained using ``@`` to form a full acquisition model. We also support addition, multiplication, etc. +between operators. -Based on the acquisition model a suitable minimization function and reconstruction algorithm needs to be selected. +Based on the acquisition model either a suitable optimizer from `mrpro.algorithms.optimizers` can be selected +or a new optimizer using pytorch functions can be created. -Depending on the choices made above the reconstruction algorithms provides images (``IData``) or quantitative -parametric maps (``QData``). +See for examples :doc:`_notebooks/cartesian_reconstruction`, :doc:`_notebooks/direct_reconstruction`, and :doc:`_notebooks/iterative_sense_reconstruction_radial2D` Image processing -================ +---------------- Further processing of the reconstructed data such as quantitative parameter estimation is available. -The *examples* folder also contains notebooks which show how to carry out motion estimation from reconstructed dynamic -images. +Our examples contain a notebook showing how to read in DICOM images and perform qMRI parameter estimation using +a non-linear optimizer: :doc:`_notebooks/qmri_sg_challenge_2024_t1`, + + +Citation +======== +We are currently preparing a manuscript for MRpro. In the meantime, please cite: + +Zimmermann, F. F., Schuenke, P., Brahma, S., Guastini, M., Hammacher, J., Kofler, A., Kranich Redshaw, C., Lunin, L., Martin, S., Schote, D., & Kolbitsch, C. (2024). +MRpro - PyTorch-based MR image reconstruction and processing package +`10.5281/zenodo.14509599 `_ diff --git a/examples/notebooks/cartesian_reconstruction.ipynb b/examples/notebooks/cartesian_reconstruction.ipynb index 46564398f..c0e6af1c3 100644 --- a/examples/notebooks/cartesian_reconstruction.ipynb +++ b/examples/notebooks/cartesian_reconstruction.ipynb @@ -43,26 +43,21 @@ "metadata": {}, "source": [ "## Overview\n", - "\n", - "In this notebook, we are going to explore the MRpro KData object and the included header parameters. We will then use\n", - "a FFT-operator in order to reconstruct data acquired with a Cartesian sampling scheme. We will also reconstruct data\n", - "acquired on a Cartesian grid but with partial echo and partial Fourier acceleration. Finally, we will reconstruct a\n", - "Cartesian scan with regular undersampling using iterative SENSE." - ] - }, - { - "cell_type": "markdown", - "id": "4", - "metadata": {}, - "source": [ - "## Import MRpro and download data" + "In this notebook, we are going to explore the `~mrpro.data.KData` object and the included header parameters.\n", + "We will then use a FFT-operator in order to reconstruct data acquired with a Cartesian sampling scheme.\n", + "We will also reconstruct data acquired on a Cartesian grid but with partial echo and partial Fourier acceleration.\n", + "Finally, we will reconstruct a Cartesian scan with regular undersampling." ] }, { "cell_type": "code", "execution_count": null, - "id": "5", - "metadata": {}, + "id": "4", + "metadata": { + "tags": [ + "hide-cell" + ] + }, "outputs": [], "source": [ "# Get the raw data from zenodo\n", @@ -71,129 +66,140 @@ "\n", "import zenodo_get\n", "\n", - "data_folder = Path(tempfile.mkdtemp())\n", "dataset = '14173489'\n", + "\n", + "tmp = tempfile.TemporaryDirectory() # RAII, automatically cleaned up\n", + "data_folder = Path(tmp.name)\n", "zenodo_get.zenodo_get([dataset, '-r', 5, '-o', data_folder]) # r: retries" ] }, - { - "cell_type": "code", - "execution_count": null, - "id": "6", - "metadata": {}, - "outputs": [], - "source": [ - "# List the downloaded files\n", - "for f in data_folder.iterdir():\n", - " print(f.name)" - ] - }, { "cell_type": "markdown", - "id": "7", - "metadata": { - "lines_to_next_cell": 2 - }, + "id": "5", + "metadata": {}, "source": [ - "We have three different scans obtained from the same object with the same FOV and resolution:\n", + "We have three different scans obtained from the same object with the same FOV and resolution, saved as ISMRMRD\n", + "raw data files (``*.mrd`` or ``*.h5``):\n", "\n", - "- cart_t1.mrd is a fully sampled Cartesian acquisition\n", + "- ``cart_t1.mrd`` is a fully sampled Cartesian acquisition\n", "\n", - "- cart_t1_msense_integrated.mrd is accelerated using regular undersampling and self-calibrated SENSE\n", + "- ``cart_t1_msense_integrated.mrd`` is accelerated using regular undersampling and self-calibrated SENSE\n", "\n", - "- cart_t1_partial_echo_partial_fourier.mrd is accelerated using partial echo and partial Fourier" + "- ``cart_t1_partial_echo_partial_fourier.mrd`` is accelerated using partial echo and partial Fourier" ] }, { "cell_type": "markdown", - "id": "8", + "id": "6", "metadata": {}, "source": [ "## Read in raw data and explore header\n", "\n", - "To read in an ISMRMRD raw data file (*.mrd), we can simply pass on the file name to a `KData` object.\n", + "To read in an ISMRMRD file, we can simply pass on the file name to a `~mrpro.data.KData` object.\n", "Additionally, we need to provide information about the trajectory. In MRpro, this is done using trajectory\n", "calculators. These are functions that calculate the trajectory based on the acquisition information and additional\n", "parameters provided to the calculators (e.g. the angular step for a radial acquisition).\n", "\n", "In this case, we have a Cartesian acquisition. This means that we only need to provide a Cartesian trajectory\n", - "calculator (called `KTrajectoryCartesian` in MRpro) without any further parameters." + "calculator `~mrpro.data.traj_calculators.KTrajectoryCartesian` without any further parameters.\n", + "\n", + "See for more information about different ways to\n", + "define the trajectory." ] }, { "cell_type": "code", "execution_count": null, - "id": "9", + "id": "7", "metadata": {}, "outputs": [], "source": [ - "from mrpro.data import KData\n", - "from mrpro.data.traj_calculators import KTrajectoryCartesian\n", + "import mrpro\n", "\n", - "kdata = KData.from_file(data_folder / 'cart_t1.mrd', KTrajectoryCartesian())" + "kdata = mrpro.data.KData.from_file(\n", + " data_folder / 'cart_t1.mrd',\n", + " mrpro.data.traj_calculators.KTrajectoryCartesian(),\n", + ")" ] }, { "cell_type": "markdown", - "id": "10", + "id": "8", "metadata": {}, "source": [ - "Now we can explore this data object." + "Now we can explore this data object.\n", + "Simply printing ``kdata`` gives us a basic overview of the `~mrpro.data.KData` object." ] }, { "cell_type": "code", "execution_count": null, - "id": "11", - "metadata": {}, + "id": "9", + "metadata": { + "tags": [ + "show-output" + ] + }, "outputs": [], "source": [ - "# Start with simply calling print(kdata), whichs gives us a nice overview of the KData object.\n", "print(kdata)" ] }, + { + "cell_type": "markdown", + "id": "10", + "metadata": {}, + "source": [ + "We can also have a look at more specific header information like the 1H Lamor frequency" + ] + }, { "cell_type": "code", "execution_count": null, - "id": "12", + "id": "11", "metadata": {}, "outputs": [], "source": [ - "# We can also have a look at more specific header information like the 1H Lamor frequency\n", - "print(kdata.header.lamor_frequency_proton)" + "print('Lamor Frequency:', kdata.header.lamor_frequency_proton)" ] }, { "cell_type": "markdown", - "id": "13", + "id": "12", "metadata": {}, "source": [ "## Reconstruction of fully sampled acquisition\n", "\n", - "For the reconstruction of a fully sampled Cartesian acquisition, we can use a simple Fast Fourier Transform (FFT).\n", + "For the reconstruction of a fully sampled Cartesian acquisition, we can either use a general\n", + "`~mrpro.operators.FourierOp` or manually set up a Fast Fourier Transform (FFT).\n", + "For demonstration purposes, we first show the manual approach.\n", "\n", - "Let's create an FFT-operator (called `FastFourierOp` in MRpro) and apply it to our `KData` object. Please note that\n", - "all MRpro operators currently only work on PyTorch tensors and not on the MRpro objects directly. Therefore, we have\n", - "to call the operator on kdata.data. One other important feature of MRpro operators is that they always return a\n", - "tuple of PyTorch tensors, even if the output is only a single tensor. This is why we use the `(img,)` syntax below." + "```{note}\n", + " Most of the time, you will use the `~mrpro.operators.FourierOp` operator, which automatically takes care\n", + "of choosing whether to use a FFT or a non-uniform FFT (NUFFT) based on the trajectory.\n", + "It optionally can be created from a `~mrpro.data.KData` object without any further information.\n", + "```\n", + "\n", + "Let's create an FFT-operator `~mrpro.operators.FastFourierOp` and apply it to our `~mrpro.data.KData` object.\n", + "Please note that all MRpro operator work on PyTorch tensors and not on the MRpro objects directly. Therefore, we have\n", + "to call the operator on kdata.data. One other important property of MRpro operators is that they always return a\n", + "tuple of PyTorch tensors, even if the output is only a single tensor. This is why we use the ``(img,)`` syntax below." ] }, { "cell_type": "code", "execution_count": null, - "id": "14", + "id": "13", "metadata": {}, "outputs": [], "source": [ - "from mrpro.operators import FastFourierOp\n", - "\n", - "fft_op = FastFourierOp(dim=(-2, -1))\n", + "fft_op = mrpro.operators.FastFourierOp(dim=(-2, -1))\n", "(img,) = fft_op.adjoint(kdata.data)" ] }, { "cell_type": "markdown", - "id": "15", + "id": "14", "metadata": {}, "source": [ "Let's have a look at the shape of the obtained tensor." @@ -202,16 +208,16 @@ { "cell_type": "code", "execution_count": null, - "id": "16", + "id": "15", "metadata": {}, "outputs": [], "source": [ - "print(img.shape)" + "print('Shape:', img.shape)" ] }, { "cell_type": "markdown", - "id": "17", + "id": "16", "metadata": {}, "source": [ "We can see that the second dimension, which is the coil dimension, is 16. This means we still have a coil resolved\n", @@ -219,30 +225,30 @@ "one. Later, we will do something a bit more sophisticated. We can also see that the x-dimension is 512. This is\n", "because in MRI we commonly oversample the readout direction by a factor 2 leading to a FOV twice as large as we\n", "actually need. We can either remove this oversampling along the readout direction or we can simply tell the\n", - "`FastFourierOp` to crop the image by providing the correct output matrix size (recon_matrix)." + "`~mrpro.operators.FastFourierOp` to crop the image by providing the correct output matrix size ``recon_matrix``." ] }, { "cell_type": "code", "execution_count": null, - "id": "18", + "id": "17", "metadata": {}, "outputs": [], "source": [ "# Create FFT-operator with correct output matrix size\n", - "fft_op = FastFourierOp(\n", + "fft_op = mrpro.operators.FastFourierOp(\n", " dim=(-2, -1),\n", " recon_matrix=kdata.header.recon_matrix,\n", " encoding_matrix=kdata.header.encoding_matrix,\n", ")\n", "\n", "(img,) = fft_op.adjoint(kdata.data)\n", - "print(img.shape)" + "print('Shape:', img.shape)" ] }, { "cell_type": "markdown", - "id": "19", + "id": "18", "metadata": {}, "source": [ "Now, we have an image which is 256 x 256 voxel as we would expect. Let's combine the data from the different receiver\n", @@ -253,18 +259,42 @@ { "cell_type": "code", "execution_count": null, - "id": "20", - "metadata": {}, + "id": "19", + "metadata": { + "tags": [ + "hide-cell" + ] + }, "outputs": [], "source": [ "import matplotlib.pyplot as plt\n", "import torch\n", "\n", - "# Combine data from different coils\n", - "img_fully_sampled = torch.sqrt(torch.sum(img**2, dim=-4)).abs().squeeze()\n", "\n", - "# plot the image\n", - "plt.imshow(img_fully_sampled)" + "def show_images(*images: torch.Tensor, titles: list[str] | None = None) -> None:\n", + " \"\"\"Plot images.\"\"\"\n", + " n_images = len(images)\n", + " _, axes = plt.subplots(1, n_images, squeeze=False, figsize=(n_images * 3, 3))\n", + " for i in range(n_images):\n", + " axes[0][i].imshow(images[i], cmap='gray')\n", + " axes[0][i].axis('off')\n", + " if titles:\n", + " axes[0][i].set_title(titles[i])\n", + " plt.show()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "20", + "metadata": { + "lines_to_next_cell": 0 + }, + "outputs": [], + "source": [ + "# Combine data from different coils and show magnitude image\n", + "magnitude_fully_sampled = img.abs().square().sum(dim=-4).sqrt().squeeze()\n", + "show_images(magnitude_fully_sampled)" ] }, { @@ -288,15 +318,21 @@ "execution_count": null, "id": "23", "metadata": { - "lines_to_next_cell": 2 + "lines_to_next_cell": 0, + "tags": [ + "remove-output" + ] }, "outputs": [], "source": [ "# Read in the data\n", - "kdata_pe_pf = KData.from_file(data_folder / 'cart_t1_partial_echo_partial_fourier.mrd', KTrajectoryCartesian())\n", + "kdata_pe_pf = mrpro.data.KData.from_file(\n", + " data_folder / 'cart_t1_partial_echo_partial_fourier.mrd',\n", + " mrpro.data.traj_calculators.KTrajectoryCartesian(),\n", + ")\n", "\n", "# Create FFT-operator with correct output matrix size\n", - "fft_op = FastFourierOp(\n", + "fft_op = mrpro.operators.FastFourierOp(\n", " dim=(-2, -1),\n", " recon_matrix=kdata.header.recon_matrix,\n", " encoding_matrix=kdata.header.encoding_matrix,\n", @@ -306,20 +342,16 @@ "(img_pe_pf,) = fft_op.adjoint(kdata_pe_pf.data)\n", "\n", "# Combine data from different coils using root-sum-of-squares\n", - "img_pe_pf = torch.sqrt(torch.sum(img_pe_pf**2, dim=-4)).abs().squeeze()\n", + "magnitude_pe_pf = img_pe_pf.abs().square().sum(dim=-4).sqrt().squeeze()\n", "\n", "# Plot both images\n", - "fig, ax = plt.subplots(1, 2, squeeze=False)\n", - "ax[0, 0].imshow(img_fully_sampled)\n", - "ax[0, 1].imshow(img_pe_pf)" + "show_images(magnitude_fully_sampled, magnitude_pe_pf, titles=['fully sampled', 'PF & PE'])" ] }, { "cell_type": "markdown", "id": "24", - "metadata": { - "lines_to_next_cell": 2 - }, + "metadata": {}, "source": [ "Well, we got an image, but when we compare it to the previous result, it seems like the head has shrunk.\n", "Since that's extremely unlikely, there's probably a mistake in our reconstruction.\n", @@ -340,13 +372,15 @@ { "cell_type": "markdown", "id": "26", - "metadata": { - "lines_to_next_cell": 2 - }, + "metadata": {}, "source": [ - "We see that the trajectory has kz, ky, and kx components. Kx and ky only vary along one dimension.\n", - "This is because MRpro saves the trajectory in the most efficient way.\n", - "To get the full trajectory as a tensor, we can just call as_tensor()." + "We see that the trajectory has ``kz``, ``ky``, and ``kx`` components. ``kx`` and ``ky`` only vary along one dimension.\n", + "```{note}\n", + "This is because MRpro saves meta data such as trajectories in an efficient way, where dimensions in which the data\n", + "does not change are often collapsed. The original shape can be obtained by\n", + "[broadcasting](https://numpy.org/doc/stable/user/basics.broadcasting.html).\n", + "```\n", + "To get the full trajectory as a tensor, we can also just call `~mrpro.data.KTrajectory.as_tensor()`:" ] }, { @@ -357,12 +391,14 @@ "outputs": [], "source": [ "# Plot the fully sampled trajectory (in blue)\n", - "plt.plot(kdata.traj.as_tensor()[2, 0, 0, :, :].flatten(), kdata.traj.as_tensor()[1, 0, 0, :, :].flatten(), 'ob')\n", + "full_kz, full_ky, full_kx = kdata.traj.as_tensor()\n", + "plt.plot(full_ky[0, 0].flatten(), full_kx[0, 0].flatten(), 'ob')\n", "\n", "# Plot the partial echo and partial Fourier trajectory (in red)\n", - "plt.plot(\n", - " kdata_pe_pf.traj.as_tensor()[2, 0, 0, :, :].flatten(), kdata_pe_pf.traj.as_tensor()[1, 0, 0, :, :].flatten(), '+r'\n", - ")" + "full_kz, full_ky, full_kx = kdata_pe_pf.traj.as_tensor()\n", + "plt.plot(full_ky[0, 0].flatten(), full_kx[0, 0].flatten(), '+r')\n", + "\n", + "plt.show()" ] }, { @@ -378,8 +414,8 @@ "between encoding and recon matrix needs to be zero-padded symmetrically.\n", "\n", "To take the asymmetric acquisition into account and sort the data correctly into a matrix where we can apply the\n", - "FFT-operator to, we have got the `CartesianSamplingOp` in MRpro. This operator calculates a sorting index based on the\n", - "k-space trajectory and the dimensions of the encoding k-space.\n", + "FFT-operator to, we have got the `~mrpro.operators.CartesianSamplingOp` in MRpro. This operator performs\n", + "sorting based on the k-space trajectory and the dimensions of the encoding k-space.\n", "\n", "Let's try it out!" ] @@ -391,9 +427,9 @@ "metadata": {}, "outputs": [], "source": [ - "from mrpro.operators import CartesianSamplingOp\n", - "\n", - "cart_sampling_op = CartesianSamplingOp(encoding_matrix=kdata_pe_pf.header.encoding_matrix, traj=kdata_pe_pf.traj)" + "cart_sampling_op = mrpro.operators.CartesianSamplingOp(\n", + " encoding_matrix=kdata_pe_pf.header.encoding_matrix, traj=kdata_pe_pf.traj\n", + ")" ] }, { @@ -401,7 +437,7 @@ "id": "30", "metadata": {}, "source": [ - "Now, we first apply the CartesianSamplingOp and then call the FFT-operator." + "Now, we first apply the adjoint CartesianSamplingOp and then call the adjoint FFT-operator." ] }, { @@ -412,44 +448,68 @@ "outputs": [], "source": [ "(img_pe_pf,) = fft_op.adjoint(cart_sampling_op.adjoint(kdata_pe_pf.data)[0])\n", - "img_pe_pf = torch.sqrt(torch.sum(img_pe_pf**2, dim=-4)).abs().squeeze()\n", + "magnitude_pe_pf = img_pe_pf.abs().square().sum(dim=-4).sqrt().squeeze()\n", "\n", - "fig, ax = plt.subplots(1, 2, squeeze=False)\n", - "ax[0, 0].imshow(img_fully_sampled)\n", - "ax[0, 1].imshow(img_pe_pf)" + "show_images(magnitude_fully_sampled, magnitude_pe_pf, titles=['fully sampled', 'PF & PE'])" ] }, { "cell_type": "markdown", "id": "32", - "metadata": { - "lines_to_next_cell": 0 - }, - "source": [] + "metadata": {}, + "source": [ + "Voila! We've got the same brains, and they're the same size!" + ] }, { "cell_type": "markdown", "id": "33", - "metadata": { - "lines_to_next_cell": 2 - }, + "metadata": {}, + "source": [ + "## More about operators\n", + "### The Fourier Operator\n", + "In MRpro, we have a smart `~mrpro.operators.FourierOp` operator, that automatically does the resorting and can\n", + "handle non-cartesian data as well. For cartesian data, it internally does exactly the two steps we just did manually.\n", + "The operator can be also be created from an existing `~mrpro.data.KData` object\n", + "This is the recommended way to transform k-space data." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "34", + "metadata": {}, + "outputs": [], "source": [ - "Voila! We've got the same brains, and they're the same size!\n", "\n", - "But wait a second—something still looks a bit off. In the bottom left corner, it seems like there's a \"hole\"\n", + "fourier_op = mrpro.operators.FourierOp.from_kdata(kdata_pe_pf)\n", + "# no need for and explicit CartesianSamplingOp anymore!\n", + "(img_pe_pf,) = fourier_op.adjoint(kdata_pe_pf.data)\n", + "magnitude_pe_pf = img_pe_pf.abs().square().sum(dim=-4).sqrt().squeeze()\n", + "show_images(magnitude_fully_sampled, magnitude_pe_pf, titles=['fully sampled', 'PF & PE'])" + ] + }, + { + "cell_type": "markdown", + "id": "35", + "metadata": {}, + "source": [ + "That was easy!\n", + "But wait a second — something still looks a bit off. In the bottom left corner, it seems like there's a \"hole\"\n", "in the brain. That definitely shouldn't be there.\n", "\n", "The issue is that we combined the data from the different coils using a root-sum-of-squares approach.\n", "While it's simple, it's not the ideal method. Typically, coil sensitivity maps are calculated to combine the data\n", "from different coils. In MRpro, you can do this by calculating coil sensitivity data and then creating a\n", - "`SensitivityOp` to combine the data after image reconstruction." + "`~mrpro.operators.SensitivityOp` to combine the data after image reconstruction." ] }, { "cell_type": "markdown", - "id": "34", + "id": "36", "metadata": {}, "source": [ + "### Sensitivity Operator\n", "We have different options for calculating coil sensitivity maps from the image data of the various coils.\n", "Here, we're going to use the Walsh method." ] @@ -457,112 +517,96 @@ { "cell_type": "code", "execution_count": null, - "id": "35", + "id": "37", "metadata": {}, "outputs": [], "source": [ - "from mrpro.algorithms.csm import walsh\n", - "from mrpro.operators import SensitivityOp\n", - "\n", "# Calculate coil sensitivity maps\n", - "(img_pe_pf,) = fft_op.adjoint(cart_sampling_op.adjoint(kdata_pe_pf.data)[0])\n", + "(img_pe_pf,) = fft_op.adjoint(*cart_sampling_op.adjoint(kdata_pe_pf.data))\n", "\n", "# This algorithms is designed to calculate coil sensitivity maps for each other dimension.\n", - "csm_data = walsh(img_pe_pf[0, ...], smoothing_width=5)[None, ...]\n", + "csm_data = mrpro.algorithms.csm.walsh(img_pe_pf[0, ...], smoothing_width=5)[None, ...]\n", "\n", "# Create SensitivityOp\n", - "csm_op = SensitivityOp(csm_data)\n", + "csm_op = mrpro.operators.SensitivityOp(csm_data)\n", "\n", "# Reconstruct coil-combined image\n", - "(img_pe_pf,) = csm_op.adjoint(fft_op.adjoint(cart_sampling_op.adjoint(kdata_pe_pf.data)[0])[0])\n", - "img_pe_pf = img_pe_pf.abs().squeeze()\n", - "\n", - "fig, ax = plt.subplots(1, 2, squeeze=False)\n", - "ax[0, 0].imshow(img_fully_sampled)\n", - "ax[0, 1].imshow(img_pe_pf.squeeze())" + "(img_walsh_combined,) = csm_op.adjoint(*fourier_op.adjoint(kdata_pe_pf.data))\n", + "magnitude_walsh_combined = img_walsh_combined.abs().squeeze()\n", + "show_images(magnitude_pe_pf, magnitude_walsh_combined, titles=['RSS', 'Adaptive Combination'])" ] }, { "cell_type": "markdown", - "id": "36", + "id": "38", "metadata": {}, "source": [ - "Tada! The \"hole\" is gone, and the image looks much better.\n", + "Tada! The \"hole\" is gone, and the image looks much better 🎉.\n", "\n", "When we reconstructed the image, we called the adjoint method of several different operators one after the other. That\n", - "was a bit cumbersome. To make our life easier, MRpro allows to combine the operators first and then call the adjoint\n", - "of the composite operator. We have to keep in mind that we have to put them in the order of the forward method of the\n", - "operators. By calling the adjoint, the order will be automatically reversed." + "was a bit cumbersome. To make our life easier, MRpro allows to combine the operators first, get the adjoint\n", + "of the composite operator and then later call this adjoint composite operator." ] }, { "cell_type": "code", "execution_count": null, - "id": "37", + "id": "39", "metadata": {}, "outputs": [], "source": [ + "### Operator Composition\n", "# Create composite operator\n", - "acq_op = cart_sampling_op @ fft_op @ csm_op\n", - "(img_pe_pf,) = acq_op.adjoint(kdata_pe_pf.data)\n", - "img_pe_pf = img_pe_pf.abs().squeeze()\n", - "\n", - "fig, ax = plt.subplots(1, 2, squeeze=False)\n", - "ax[0, 0].imshow(img_fully_sampled)\n", - "ax[0, 1].imshow(img_pe_pf)" + "adjoint_operator = (fourier_op @ csm_op).H\n", + "(magnitude_pe_pf,) = adjoint_operator(kdata_pe_pf.data)\n", + "magnitude_pe_pf = magnitude_pe_pf.abs().squeeze()\n", + "show_images(magnitude_pe_pf, titles=['PF & PE'])" ] }, { "cell_type": "markdown", - "id": "38", + "id": "40", "metadata": {}, "source": [ "Although we now have got a nice looking image, it was still a bit cumbersome to create it. We had to define several\n", "different operators and chain them together. Wouldn't it be nice if this could be done automatically?\n", "\n", "That is why we also included some top-level reconstruction algorithms in MRpro. For this whole steps from above,\n", - "we can simply call a `DirectReconstruction`. A `DirectReconstruction` object can be created from only the information\n", - "in the `KData` object.\n", + "we can simply use a `~mrpro.algorithnms.reconstruction.DirectReconstruction`.\n", + "Reconstruction algorithms can be instantiated from only the information in the `~mrpro.data.KData` object.\n", "\n", "In contrast to operators, top-level reconstruction algorithms operate on the data objects of MRpro, i.e. the input is\n", - "a `KData` object and the output is an image data (called `IData` in MRpro) object. To get the tensor content of the\n", - "`IData` object, we can call its `rss` method." + "a `~mrpro.data.KData` object and the output is an `~mrpro.data.IData` object containing\n", + "the reconstructed image data. To get its magnitude, we can call the `~mrpro.data.IData.rss` method." ] }, { "cell_type": "code", "execution_count": null, - "id": "39", + "id": "41", "metadata": {}, "outputs": [], "source": [ - "from mrpro.algorithms.reconstruction import DirectReconstruction\n", "\n", "# Create DirectReconstruction object from KData object\n", - "direct_recon_pe_pf = DirectReconstruction(kdata_pe_pf)\n", + "direct_recon_pe_pf = mrpro.algorithms.reconstruction.DirectReconstruction(kdata_pe_pf)\n", "\n", "# Reconstruct image by calling the DirectReconstruction object\n", - "idat_pe_pf = direct_recon_pe_pf(kdata_pe_pf)\n", - "\n", - "fig, ax = plt.subplots(1, 2, squeeze=False)\n", - "ax[0, 0].imshow(img_fully_sampled)\n", - "ax[0, 1].imshow(idat_pe_pf.rss().squeeze())" + "idat_pe_pf = direct_recon_pe_pf(kdata_pe_pf)" ] }, { "cell_type": "markdown", - "id": "40", - "metadata": { - "lines_to_next_cell": 2 - }, + "id": "42", + "metadata": {}, "source": [ "This is much simpler — everything happens in the background, so we don't have to worry about it.\n", - "Let's try it on the undersampled dataset now." + "Let's finally try it on the undersampled dataset now." ] }, { "cell_type": "markdown", - "id": "41", + "id": "43", "metadata": {}, "source": [ "## Reconstruction of undersampled data" @@ -571,157 +615,140 @@ { "cell_type": "code", "execution_count": null, - "id": "42", + "id": "44", "metadata": {}, "outputs": [], "source": [ - "kdata_us = KData.from_file(data_folder / 'cart_t1_msense_integrated.mrd', KTrajectoryCartesian())\n", - "direct_recon_us = DirectReconstruction(kdata_us)\n", + "kdata_us = mrpro.data.KData.from_file(\n", + " data_folder / 'cart_t1_msense_integrated.mrd',\n", + " mrpro.data.traj_calculators.KTrajectoryCartesian(),\n", + ")\n", + "direct_recon_us = mrpro.algorithms.reconstruction.DirectReconstruction(kdata_us)\n", "idat_us = direct_recon_us(kdata_us)\n", "\n", - "fig, ax = plt.subplots(1, 2, squeeze=False)\n", - "ax[0, 0].imshow(img_fully_sampled)\n", - "ax[0, 1].imshow(idat_us.rss().squeeze())" + "show_images(idat_pe_pf.rss().squeeze(), idat_us.rss().squeeze(), titles=['PE & PF', 'Undersampled'])" ] }, { "cell_type": "markdown", - "id": "43", - "metadata": {}, + "id": "45", + "metadata": { + "lines_to_next_cell": 0 + }, "source": [ - "As expected, we can see undersampling artifacts in the image. In order to get rid of them, we can use an iterative\n", - "SENSE algorithm. As you might have guessed, this is also included in MRpro.\n", - "\n", - "Similarly to the `DirectReconstruction`, we can create an `IterativeSENSEReconstruction` and apply it to the\n", - "undersampled data.\n", "\n", - "One important thing to keep in mind is that this only works if the coil maps that we use do not have any\n", - "undersampling artifacts. Commonly, we would get them from a fully sampled self-calibration reference lines in the\n", - "center of k-space or a separate coil sensitivity scan.\n", + "We used the same data for coil sensitivity calculation as for image reconstruction (*auto-calibration*)\n", + "Another approach is to acquire a few calibration lines in the center of k-space to create a low-resolution,\n", + "fully sampled image. In our example data from Siemens scanners, these lines are part of the dataset.\n", + "As they aren't meant to be used for image reconstruction, only for calibration, i.e., coil sensitivity calculation,\n", + "and are labeled in the data as such, they are ignored by the default `acquisition_filter_criterion` of\n", + "`~mrpro.data.KData.from_file`.\n", + "However, we can change the filter criterion to `is_coil_calibration_acquisition` to read in only these acquisitions.\n", "\n", - "As a first step, we are going to assume that we have got a nice fully sampled reference scan like our partial echo and\n", - "partial Fourier acquisition. We can get the `CsmData`, which is needed for the `IterativeSENSEReconstruction`, from\n", - "the previous reconstruction." + "```{note}\n", + "There are already some other filter criteria available, see `mrpro.data.acq_filters`. You can also implement your own\n", + "function returning whether to include an acquisition\n", + "```\n" ] }, { "cell_type": "code", "execution_count": null, - "id": "44", + "id": "46", "metadata": {}, "outputs": [], "source": [ - "from mrpro.algorithms.reconstruction import IterativeSENSEReconstruction\n", - "\n", - "it_sense_recon = IterativeSENSEReconstruction(kdata=kdata_us, csm=direct_recon_pe_pf.csm)\n", - "idat_us = it_sense_recon(kdata_us)\n", + "kdata_calib_lines = mrpro.data.KData.from_file(\n", + " data_folder / 'cart_t1_msense_integrated.mrd',\n", + " mrpro.data.traj_calculators.KTrajectoryCartesian(),\n", + " acquisition_filter_criterion=mrpro.data.acq_filters.is_coil_calibration_acquisition,\n", + ")\n", "\n", - "fig, ax = plt.subplots(1, 2, squeeze=False)\n", - "ax[0, 0].imshow(img_fully_sampled)\n", - "ax[0, 1].imshow(idat_us.rss().squeeze())" + "direct_recon_calib_lines = mrpro.algorithms.reconstruction.DirectReconstruction(kdata_calib_lines)\n", + "idat_calib_lines = direct_recon_calib_lines(kdata_calib_lines)" ] }, { "cell_type": "markdown", - "id": "45", + "id": "47", "metadata": {}, "source": [ - "That worked well, but in practice, we don't want to acquire a fully sampled version of our scan just to\n", - "reconstruct it. A more efficient approach is to get a few self-calibration lines in the center of k-space\n", - "to create a low-resolution, fully sampled image.\n", - "\n", - "In our scan, these lines are part of the dataset, but they aren't used for image reconstruction since\n", - "they're only meant for calibration (i.e., coil sensitivity map calculation). Because they're not labeled\n", - "for imaging, MRpro ignores them by default when reading the data. However, we can set a flag when calling\n", - "`from_file` to read in just those lines for reconstructing the coil sensitivity maps." + "If we look at the reconstructed image, we see it is low resolution.." ] }, { "cell_type": "code", "execution_count": null, - "id": "46", + "id": "48", "metadata": {}, "outputs": [], "source": [ - "from mrpro.data.acq_filters import is_coil_calibration_acquisition\n", - "\n", - "kdata_calib_lines = KData.from_file(\n", - " data_folder / 'cart_t1_msense_integrated.mrd',\n", - " KTrajectoryCartesian(),\n", - " acquisition_filter_criterion=lambda acq: is_coil_calibration_acquisition(acq),\n", - ")\n", - "\n", - "direct_recon_calib_lines = DirectReconstruction(kdata_calib_lines)\n", - "im_calib_lines = direct_recon_calib_lines(kdata_calib_lines)\n", - "\n", - "plt.imshow(im_calib_lines.rss().squeeze())" + "show_images(idat_calib_lines.rss().squeeze(), titles=['Calibration Image'])" ] }, { "cell_type": "markdown", - "id": "47", + "id": "49", "metadata": {}, "source": [ - "Although this only yields a low-resolution image, it is good enough to calculate coil sensitivity maps." + "..but it is good enough to calculate coil sensitivity maps, which we can use when creating the reconstruction object:" ] }, { "cell_type": "code", "execution_count": null, - "id": "48", + "id": "50", "metadata": {}, "outputs": [], "source": [ - "# Visualize coil sensitivity maps of all 16 coils\n", - "assert direct_recon_calib_lines.csm is not None # needed for type checking\n", - "fig, ax = plt.subplots(4, 4, squeeze=False)\n", - "for idx, cax in enumerate(ax.flatten()):\n", - " cax.imshow(direct_recon_calib_lines.csm.data[0, idx, 0, ...].abs())" + "# The coil sensitivity maps\n", + "assert direct_recon_calib_lines.csm is not None\n", + "show_images(\n", + " *direct_recon_calib_lines.csm.data[0].abs().squeeze(),\n", + " titles=[f'|CSM {i}|' for i in range(direct_recon_calib_lines.csm.data.size(-4))],\n", + ")\n", + "# reusing the CSMs\n", + "direct_recon_us_csm = mrpro.algorithms.reconstruction.DirectReconstruction(kdata_us, csm=direct_recon_calib_lines.csm)\n", + "idat_us_csm = direct_recon_us_csm(kdata_us)\n", + "show_images(idat_us.rss().squeeze(), idat_us_csm.rss().squeeze(), titles=['Autocalibration', 'Calibration Lines'])" ] }, { "cell_type": "markdown", - "id": "49", + "id": "51", "metadata": {}, "source": [ - "Now, we can use these coil sensitivity maps to reconstruct our SENSE scan." + "As expected, we still see undersampling artifacts in the image. In order to get rid of them,\n", + "we try can a more sophisticated reconstruction method, such as the *iterative SENSE algorithm*.\n", + "As you might have guessed, these are also included in MRpro:\n", + "Instead of the `~mrpro.algorithms.reconstruction.DirectReconstruction`,\n", + "we can use `~mrpro.algorithms.reconstruction.IterativeSENSEReconstruction`:" ] }, { "cell_type": "code", "execution_count": null, - "id": "50", + "id": "52", "metadata": {}, "outputs": [], "source": [ - "it_sense_recon = IterativeSENSEReconstruction(kdata_us, csm=direct_recon_calib_lines.csm)\n", - "idat_us = it_sense_recon(kdata_us)\n", - "\n", - "fig, ax = plt.subplots(1, 2, squeeze=False)\n", - "ax[0, 0].imshow(img_fully_sampled)\n", - "ax[0, 1].imshow(idat_us.rss().squeeze())" + "sense_recon_us = mrpro.algorithms.reconstruction.IterativeSENSEReconstruction(\n", + " kdata_us,\n", + " csm=direct_recon_calib_lines.csm,\n", + " n_iterations=8,\n", + ")\n", + "idat_us_sense = sense_recon_us(kdata_us)\n", + "show_images(idat_us_sense.rss().squeeze(), titles=['Iterative SENSE'])" ] }, { "cell_type": "markdown", - "id": "51", - "metadata": { - "lines_to_next_cell": 0 - }, - "source": [] - }, - { - "cell_type": "markdown", - "id": "52", + "id": "53", "metadata": {}, "source": [ - "The final image is a little worse (nothing beats fully sampled high-resolution scans for coil map\n", - "calculation), but we've managed to get rid of the undersampling artifacts inside the brain. If you want to\n", - "further improve the coil sensitivity map quality, try:\n", - "- using different methods to calculate them, e.g. `mrpro.algorithms.csm.inati`\n", - "- playing around with the parameters of these methods\n", - "- applying a smoothing filter on the images (or ideally directly in k-space) used to calculate the coil\n", - " sensitivity maps" + "This looks better! More information about the iterative SENSE reconstruction and its implementation in MRpro\n", + "can be found in the examples and\n", + "." ] } ], @@ -732,7 +759,7 @@ "provenance": [] }, "jupytext": { - "cell_metadata_filter": "-all" + "cell_metadata_filter": "tags,-all" }, "kernelspec": { "display_name": "Python 3 (ipykernel)", diff --git a/examples/notebooks/comparison_trajectory_calculators.ipynb b/examples/notebooks/comparison_trajectory_calculators.ipynb new file mode 100644 index 000000000..cbdc891b3 --- /dev/null +++ b/examples/notebooks/comparison_trajectory_calculators.ipynb @@ -0,0 +1,256 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "0", + "metadata": { + "lines_to_next_cell": 0 + }, + "source": [ + "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/PTB-MR/mrpro/blob/main/examples/notebooks/comparison_trajectory_calculators.ipynb)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1", + "metadata": { + "tags": [ + "remove-cell" + ] + }, + "outputs": [], + "source": [ + "import importlib\n", + "\n", + "if not importlib.util.find_spec('mrpro'):\n", + " %pip install mrpro[notebook]" + ] + }, + { + "cell_type": "markdown", + "id": "2", + "metadata": { + "lines_to_next_cell": 2 + }, + "source": [ + "# Different ways to obtain the Trajectory\n", + "This example builds upon the example and demonstrates three ways\n", + "to obtain the trajectory information required for image reconstruction:\n", + "- using the trajectory that is stored in the ISMRMRD file\n", + "- calculating the trajectory using the radial 2D trajectory calculator\n", + "- calculating the trajectory from the pulseq sequence file using the PyPulseq trajectory calculator" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3", + "metadata": { + "tags": [ + "hide-cell" + ] + }, + "outputs": [], + "source": [ + "# Download raw data from Zenodo\n", + "import tempfile\n", + "from pathlib import Path\n", + "\n", + "import mrpro\n", + "import torch\n", + "import zenodo_get\n", + "\n", + "dataset = '14617082'\n", + "\n", + "tmp = tempfile.TemporaryDirectory() # RAII, automatically cleaned up\n", + "data_folder = Path(tmp.name)\n", + "zenodo_get.zenodo_get([dataset, '-r', 5, '-o', data_folder]) # r: retries" + ] + }, + { + "cell_type": "markdown", + "id": "4", + "metadata": {}, + "source": [ + "### Using KTrajectoryIsmrmrd - Trajectory saved in ISMRMRD file\n", + "Passing an instance of `~mrpro.data.traj_calculators.KTrajectoryIsmrmrd` to\n", + "when loading the data tells the `~mrpro.data.KData` object to use the trajectory\n", + "that is stored in the ISMRMRD file.\n", + "```{note}\n", + "Often the trajectory information has not been stored in the ISMRMRD file,\n", + "in which case loading the trajectory this way will raise an error.\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5", + "metadata": {}, + "outputs": [], + "source": [ + "# Read the raw data and the trajectory from ISMRMRD file\n", + "kdata = mrpro.data.KData.from_file(\n", + " data_folder / 'radial2D_402spokes_golden_angle_with_traj.h5',\n", + " mrpro.data.traj_calculators.KTrajectoryIsmrmrd(),\n", + ")\n", + "\n", + "# Reconstruct image\n", + "reconstruction = mrpro.algorithms.reconstruction.DirectReconstruction(kdata)\n", + "img_using_ismrmrd_traj = reconstruction(kdata)" + ] + }, + { + "cell_type": "markdown", + "id": "6", + "metadata": {}, + "source": [ + "### Using KTrajectoryRadial2D - Specific trajectory calculator\n", + "For some common trajectories, we provide specific trajectory calculators.\n", + "These calculators often require only a few parameters to be specified,\n", + "such as the angle between spokes in the radial trajectory. Other parameters\n", + "will be taken from the ISMRMRD file.\n", + "This will calculate the trajectory using the radial 2D trajectory calculator.\n", + "```{note}\n", + "You can also implement your own trajectory calculator by subclassing\n", + "`~mrpro.data.traj_calculators.KTrajectoryCalculator`.\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7", + "metadata": {}, + "outputs": [], + "source": [ + "# Read raw data and calculate trajectory using KTrajectoryRadial2D\n", + "golden_angle = torch.pi * 0.618034\n", + "kdata = mrpro.data.KData.from_file(\n", + " data_folder / 'radial2D_402spokes_golden_angle_with_traj.h5',\n", + " mrpro.data.traj_calculators.KTrajectoryRadial2D(golden_angle),\n", + ")\n", + "\n", + "# Reconstruct image\n", + "reconstruction = mrpro.algorithms.reconstruction.DirectReconstruction(kdata)\n", + "img_using_rad2d_traj = reconstruction(kdata)" + ] + }, + { + "cell_type": "markdown", + "id": "8", + "metadata": {}, + "source": [ + "### Using KTrajectoryPulseq - Trajectory from pulseq sequence file\n", + "This will calculate the trajectory from the pulseq sequence file\n", + "using the PyPulseq trajectory calculator. This method\n", + "requires the pulseq sequence file that was used to acquire the data.\n", + "The path to the sequence file is provided as an argument to KTrajectoryPulseq." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9", + "metadata": {}, + "outputs": [], + "source": [ + "# Read raw data and calculate trajectory using KTrajectoryPulseq\n", + "seq_path = data_folder / 'radial2D_402spokes_golden_angle.seq'\n", + "kdata = mrpro.data.KData.from_file(\n", + " data_folder / 'radial2D_402spokes_golden_angle_with_traj.h5',\n", + " mrpro.data.traj_calculators.KTrajectoryPulseq(seq_path),\n", + ")\n", + "\n", + "# Reconstruct image\n", + "reconstruction = mrpro.algorithms.reconstruction.DirectReconstruction(kdata)\n", + "img_using_pulseq_traj = reconstruction(kdata)" + ] + }, + { + "cell_type": "markdown", + "id": "10", + "metadata": { + "lines_to_next_cell": 0 + }, + "source": [ + "### Plot the different reconstructed images\n", + "All three images are reconstructed using the same raw data and should look almost identical." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "11", + "metadata": { + "tags": [ + "hide-cell" + ] + }, + "outputs": [], + "source": [ + "import matplotlib.pyplot as plt\n", + "import torch\n", + "\n", + "\n", + "def show_images(*images: torch.Tensor, titles: list[str] | None = None) -> None:\n", + " \"\"\"Plot images.\"\"\"\n", + " n_images = len(images)\n", + " _, axes = plt.subplots(1, n_images, squeeze=False, figsize=(n_images * 3, 3))\n", + " for i in range(n_images):\n", + " axes[0][i].imshow(images[i], cmap='gray')\n", + " axes[0][i].axis('off')\n", + " if titles:\n", + " axes[0][i].set_title(titles[i])\n", + " plt.show()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "12", + "metadata": {}, + "outputs": [], + "source": [ + "show_images(\n", + " img_using_ismrmrd_traj.rss()[0, 0],\n", + " img_using_rad2d_traj.rss()[0, 0],\n", + " img_using_pulseq_traj.rss()[0, 0],\n", + " titles=['KTrajectoryIsmrmrd', 'KTrajectoryRadial2D', 'KTrajectoryPulseq'],\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "13", + "metadata": {}, + "source": [ + "Tada! We have successfully reconstructed images using three different trajectory calculators.\n", + "```{note}\n", + "Which of these three methods is the best depends on the specific use case:\n", + "If a trajectory is already stored in the ISMRMRD file, it is the most convenient to use.\n", + "If a pulseq sequence file is available, the trajectory can be calculated using the PyPulseq trajectory calculator.\n", + "Otherwise, a trajectory calculator needs to be implemented for the specific trajectory used.\n", + "```" + ] + } + ], + "metadata": { + "accelerator": "GPU", + "colab": { + "gpuType": "T4", + "provenance": [] + }, + "jupytext": { + "cell_metadata_filter": "tags,-all" + }, + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/examples/notebooks/direct_reconstruction.ipynb b/examples/notebooks/direct_reconstruction.ipynb index 794361ef0..a7620836d 100644 --- a/examples/notebooks/direct_reconstruction.ipynb +++ b/examples/notebooks/direct_reconstruction.ipynb @@ -30,187 +30,338 @@ { "cell_type": "markdown", "id": "2", - "metadata": { - "lines_to_next_cell": 0 - }, + "metadata": {}, "source": [ "# Direct Reconstruction of 2D golden angle radial data\n", - "Here we use the DirectReconstruction class to reconstruct images from ISMRMRD 2D radial data" + "Here we use the `~mrpro.algorithms.reconstruction.DirectReconstruction` class to perform a basic reconstruction of\n", + "2D radial data.\n", + "A *direct* reconstruction uses the density compensated adjoint of the acquisition operator to obtain the images." + ] + }, + { + "cell_type": "markdown", + "id": "3", + "metadata": {}, + "source": [ + "## Using `~mrpro.algorithms.reconstruction.DirectReconstruction`\n", + "We use the `~mrpro.algorithms.reconstruction.DirectReconstruction` class to reconstruct images from 2D radial data.\n", + "`~mrpro.algorithms.reconstruction.DirectReconstruction` estimates sensitivity maps, density compensation factors, etc.\n", + "and performs an adjoint Fourier transform.\n", + "This the simplest reconstruction method in our high-level interface to the reconstruction pipeline." + ] + }, + { + "cell_type": "markdown", + "id": "4", + "metadata": {}, + "source": [ + "### Load the data\n", + "We load in the Data from the ISMRMRD file. We want use the trajectory that is stored also stored the ISMRMRD file.\n", + "This can be done by passing a `~mrpro.data.traj_calculators.KTrajectoryIsmrmrd` object to\n", + "`~mrpro.data.KData.from_file` when loading creating the `~mrpro.data.KData`." ] }, { "cell_type": "code", "execution_count": null, - "id": "3", + "id": "5", "metadata": { - "lines_to_next_cell": 0 + "tags": [ + "hide-cell" + ] }, "outputs": [], "source": [ - "# define zenodo URL of the example ismrmd data\n", - "zenodo_url = 'https://zenodo.org/records/10854057/files/'\n", - "fname = 'pulseq_radial_2D_402spokes_golden_angle_with_traj.h5'" + "# Download raw data from Zenodo\n", + "import tempfile\n", + "from pathlib import Path\n", + "\n", + "import zenodo_get\n", + "\n", + "dataset = '14617082'\n", + "\n", + "tmp = tempfile.TemporaryDirectory() # RAII, automatically cleaned up\n", + "data_folder = Path(tmp.name)\n", + "zenodo_get.zenodo_get([dataset, '-r', 5, '-o', data_folder]) # r: retries" ] }, { "cell_type": "code", "execution_count": null, - "id": "4", + "id": "6", "metadata": {}, "outputs": [], "source": [ - "# Download raw data\n", - "import tempfile\n", + "import mrpro\n", + "import torch\n", "\n", - "import requests\n", + "trajectory_calculator = mrpro.data.traj_calculators.KTrajectoryIsmrmrd()\n", + "kdata = mrpro.data.KData.from_file(data_folder / 'radial2D_402spokes_golden_angle_with_traj.h5', trajectory_calculator)" + ] + }, + { + "cell_type": "markdown", + "id": "7", + "metadata": {}, + "source": [ + "## Setup the DirectReconstruction instance\n", + "We create a `~mrpro.algorithms.reconstruction.DirectReconstruction` and supply ``kdata``.\n", + "`~mrpro.algorithms.reconstruction.DirectReconstruction` uses the information in ``kdata`` to\n", + " setup a Fourier transfrm, density compensation factors, and estimate coil sensitivity maps.\n", + "(See the *Behind the scenes* section for more details.)\n", "\n", - "data_file = tempfile.NamedTemporaryFile(mode='wb', delete=False, suffix='.h5')\n", - "response = requests.get(zenodo_url + fname, timeout=30)\n", - "data_file.write(response.content)\n", - "data_file.flush()" + "```{note}\n", + "You can also directly set the Fourier operator, coil sensitivity maps, density compensation factors, etc.\n", + "of the reconstruction instance.\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8", + "metadata": {}, + "outputs": [], + "source": [ + "reconstruction = mrpro.algorithms.reconstruction.DirectReconstruction(kdata)" ] }, { "cell_type": "markdown", - "id": "5", - "metadata": { - "lines_to_next_cell": 0 - }, + "id": "9", + "metadata": {}, "source": [ - "### Image reconstruction\n", - "We use the DirectReconstruction class to reconstruct images from 2D radial data.\n", - "DirectReconstruction estimates CSMs, DCFs and performs an adjoint Fourier transform.\n", - "This is a high-level interface to the reconstruction pipeline." + "## Perform the reconstruction\n", + "The reconstruction is performed by calling the passing the k-space data.\n", + "```{note}\n", + "Often, the data used to obtain the meta data for constructing the reconstruction instance\n", + "is the same as the data passed to the reconstruction.\n", + "But you can also different to create the coil sensitivity maps, dcf, etc.\n", + "than the data that is passed to the reconstruction.\n", + "```" ] }, { "cell_type": "code", "execution_count": null, - "id": "6", - "metadata": { - "lines_to_next_cell": 0 - }, + "id": "10", + "metadata": {}, "outputs": [], "source": [ - "import mrpro\n", - "\n", - "# Use the trajectory that is stored in the ISMRMRD file\n", - "trajectory = mrpro.data.traj_calculators.KTrajectoryIsmrmrd()\n", - "# Load in the Data from the ISMRMRD file\n", - "kdata = mrpro.data.KData.from_file(data_file.name, trajectory)\n", - "# Perform the reconstruction\n", - "reconstruction = mrpro.algorithms.reconstruction.DirectReconstruction(kdata)\n", - "# Use this to run on gpu: kdata = kdata.cuda()\n", "img = reconstruction(kdata)" ] }, + { + "cell_type": "markdown", + "id": "11", + "metadata": {}, + "source": [ + "### Display the reconstructed image\n", + "We now got in `~mrpro.data.IData` object containing a header and the image tensor.\n", + "We display the reconstructed image using matplotlib." + ] + }, { "cell_type": "code", "execution_count": null, - "id": "7", - "metadata": { - "lines_to_next_cell": 0 - }, + "id": "12", + "metadata": {}, "outputs": [], "source": [ "import matplotlib.pyplot as plt\n", "\n", - "# Display the reconstructed image\n", "# If there are multiple slices, ..., only the first one is selected\n", - "first_img = img.rss().cpu()[0, 0, :, :] # images, z, y, x\n", - "plt.matshow(first_img, cmap='gray')" + "first_img = img.rss()[0, 0] # images, z, y, x\n", + "plt.imshow(first_img, cmap='gray')\n", + "plt.axis('off')\n", + "plt.show()" ] }, { "cell_type": "markdown", - "id": "8", - "metadata": { - "lines_to_next_cell": 0 - }, + "id": "13", + "metadata": {}, "source": [ - "### Behind the scenes\n", - "These steps are done in a direct reconstruction:" + "## Behind the scenes\n", + "We now peek behind the scenes to see what happens in the `~mrpro.algorithms.reconstruction.DirectReconstruction`\n", + "class, and perform all steps manually:\n", + "- Calculate density compensation factors\n", + "- Setup Fourier operator\n", + "- Obtain coil-wise images\n", + "- Calculate coil sensitivity maps\n", + "- Perform direct reconstruction\n", + "\n", + "### Calculate density compensation using the trajectory\n", + "We use a Voronoi tessellation of the trajectory to calculate the `~mrpro.data.DcfData` and obtain\n", + "a `~mrpro.operators.DensityCompensationOp` operator." ] }, { "cell_type": "code", "execution_count": null, - "id": "9", - "metadata": { - "lines_to_next_cell": 0 - }, + "id": "14", + "metadata": {}, + "outputs": [], + "source": [ + "dcf_operator = mrpro.data.DcfData.from_traj_voronoi(kdata.traj).as_operator()" + ] + }, + { + "cell_type": "markdown", + "id": "15", + "metadata": {}, + "source": [ + "### Setup Fourier Operator\n", + "Next, we create the Fourier operator. We can just pass the ``kdata`` object to the constructor of the\n", + "`~mrpro.operators.FourierOp`, and the trajectory and header information is used to create the operator. We want the\n", + "to use the adjoint density compensated Fourier operator, so we perform a composition with ``dcf_operator``\n", + "and use the `~mrpro.operators.FourierOp.H` property of the operator to obtain its adjoint." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "16", + "metadata": {}, "outputs": [], "source": [ - "# Calculate dcf using the trajectory\n", - "dcf_operator = mrpro.data.DcfData.from_traj_voronoi(kdata.traj).as_operator()\n", - "\n", - "# Define Fourier operator using the trajectory\n", - "# and header information in kdata\n", "fourier_operator = dcf_operator @ mrpro.operators.FourierOp.from_kdata(kdata)\n", - "adjoint_operator = fourier_operator.H\n", - "\n", - "# Calculate coil maps\n", - "# Note that operators return a tuple of tensors, so we need to unpack it,\n", - "# even though there is only one tensor returned from adjoint operator.\n", + "adjoint_operator = fourier_operator.H" + ] + }, + { + "cell_type": "markdown", + "id": "17", + "metadata": {}, + "source": [ + "### Calculate coil sensitivity maps\n", + "Coil sensitivity maps are calculated using the walsh method (See `~mrpro.data.CsmData` for other available methods).\n", + "We first need to calculate the coil-wise images, which are then used to calculate the coil sensitivity maps." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "18", + "metadata": {}, + "outputs": [], + "source": [ "img_coilwise = mrpro.data.IData.from_tensor_and_kheader(*adjoint_operator(kdata.data), kdata.header)\n", - "csm_operator = mrpro.data.CsmData.from_idata_walsh(img_coilwise).as_operator()\n", - "\n", - "# Perform Direct Reconstruction\n", - "forward_operator = fourier_operator @ csm_operator\n", - "adjoint_operator = forward_operator.H\n", - "img_manual = mrpro.data.IData.from_tensor_and_kheader(*adjoint_operator(kdata.data), kdata.header)" + "csm_operator = mrpro.data.CsmData.from_idata_walsh(img_coilwise).as_operator()" ] }, { "cell_type": "markdown", - "id": "10", - "metadata": { - "lines_to_next_cell": 0 - }, + "id": "19", + "metadata": {}, "source": [ - "### Further behind the scenes\n", - "... these steps are equivalent to:" + "### Perform Direct Reconstruction\n", + "Finally, the direct reconstruction is performed and an `~mrpro.data.IData` object with the reconstructed\n", + "image is returned. We update the ``adjoint_operator`` to also include the coil sensitivity maps, thus\n", + "performing the coil combination." ] }, { "cell_type": "code", "execution_count": null, - "id": "11", + "id": "20", + "metadata": {}, + "outputs": [], + "source": [ + "adjoint_operator = (fourier_operator @ csm_operator).H\n", + "img_manual = mrpro.data.IData.from_tensor_and_kheader(*adjoint_operator(kdata.data), kdata.header)" + ] + }, + { + "cell_type": "markdown", + "id": "21", "metadata": { "lines_to_next_cell": 0 }, - "outputs": [], "source": [ - "# Define Fourier operator\n", + "## Further behind the scenes\n", + "There is also a even more manual way to perform the direct reconstruction. We can set up the Fourier operator by\n", + "passing the trajectory and matrix sizes.\n", + "\n", "fourier_operator = mrpro.operators.FourierOp(\n", " recon_matrix=kdata.header.recon_matrix,\n", " encoding_matrix=kdata.header.encoding_matrix,\n", " traj=kdata.traj,\n", - ")\n", - "\n", - "# Calculate dcf using the trajectory\n", - "dcf_data = mrpro.data.DcfData.from_traj_voronoi(kdata.traj)\n", - "\n", - "# Perform adjoint Fourier transform\n", - "# Note that operators return a tuple of tensors, so we need to unpack it.\n", - "(img_tensor_coilwise,) = fourier_operator.adjoint(kdata.data * dcf_data.data.unsqueeze(-4))\n", - "img_coilwise = mrpro.data.IData.from_tensor_and_kheader(img_tensor_coilwise, kdata.header)\n", - "\n", - "\n", - "# Calculate and apply coil maps\n", - "csm_data = mrpro.data.CsmData.from_idata_walsh(img_coilwise)\n", - "csm_operator = mrpro.operators.SensitivityOp(csm_data)\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "22", + "metadata": {}, + "source": [ + "We can call one of the algorithms in `mrpro.algorithms.dcf` to calculate the density compensation factors." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "23", + "metadata": {}, + "outputs": [], + "source": [ + "kykx = torch.stack((kdata.traj.ky[0, 0], kdata.traj.kx[0, 0]))\n", + "dcf_tensor = mrpro.algorithms.dcf.dcf_2d3d_voronoi(kykx)" + ] + }, + { + "cell_type": "markdown", + "id": "24", + "metadata": {}, + "source": [ + "We use these DCFs to weight the k-space data before performing the adjoint Fourier transform. We can also call\n", + "`~mrpro.operators.FourierOp.adjoint` on the Fourier operator instead of obtaining an adjoint operator." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "25", + "metadata": {}, + "outputs": [], + "source": [ + "(img_tensor_coilwise,) = fourier_operator.adjoint(dcf_tensor * kdata.data)" + ] + }, + { + "cell_type": "markdown", + "id": "26", + "metadata": {}, + "source": [ + "Next, we calculate the coil sensitivity maps by using one of the algorithms in `mrpro.algorithms.csm` and set\n", + "up a `~mrpro.operators.SensitivityOp` operator.\n", + "csm_data = mrpro.algorithms.csm.walsh(img_tensor_coilwise[0], smoothing_width=5)\n", + "csm_operator = mrpro.operators.SensitivityOp(csm_data)" + ] + }, + { + "cell_type": "markdown", + "id": "27", + "metadata": {}, + "source": [ + "Finally, we perform the coil combination of the coil-wise images and obtain final images." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "28", + "metadata": {}, + "outputs": [], + "source": [ "(img_tensor_coilcombined,) = csm_operator.adjoint(img_tensor_coilwise)\n", - "\n", "img_more_manual = mrpro.data.IData.from_tensor_and_kheader(img_tensor_coilcombined, kdata.header)" ] }, { "cell_type": "markdown", - "id": "12", - "metadata": { - "lines_to_next_cell": 0 - }, + "id": "29", + "metadata": {}, "source": [ "### Check for equal results\n", "The 3 versions result should in the same image data." @@ -219,16 +370,22 @@ { "cell_type": "code", "execution_count": null, - "id": "13", + "id": "30", "metadata": {}, "outputs": [], "source": [ - "import torch\n", - "\n", "# If the assert statement did not raise an exception, the results are equal.\n", - "assert torch.allclose(img.data, img_manual.data)\n", - "assert torch.allclose(img.data, img_more_manual.data)" + "torch.testing.assert_close(img.data, img_manual.data)\n", + "torch.testing.assert_close(img.data, img_more_manual.data, atol=1e-4, rtol=1e-4)" ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "31", + "metadata": {}, + "outputs": [], + "source": [] } ], "metadata": { @@ -238,7 +395,7 @@ "provenance": [] }, "jupytext": { - "cell_metadata_filter": "-all" + "cell_metadata_filter": "tags,-all" }, "kernelspec": { "display_name": "Python 3 (ipykernel)", diff --git a/examples/notebooks/iterative_sense_reconstruction.ipynb b/examples/notebooks/iterative_sense_reconstruction.ipynb deleted file mode 100644 index 312d469bd..000000000 --- a/examples/notebooks/iterative_sense_reconstruction.ipynb +++ /dev/null @@ -1,351 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "0", - "metadata": { - "lines_to_next_cell": 0 - }, - "source": [ - "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/PTB-MR/mrpro/blob/main/examples/notebooks/iterative_sense_reconstruction.ipynb)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "1", - "metadata": { - "tags": [ - "remove-cell" - ] - }, - "outputs": [], - "source": [ - "import importlib\n", - "\n", - "if not importlib.util.find_spec('mrpro'):\n", - " %pip install mrpro[notebook]" - ] - }, - { - "cell_type": "markdown", - "id": "2", - "metadata": { - "lines_to_next_cell": 0 - }, - "source": [ - "# Iterative SENSE Reconstruction of 2D golden angle radial data\n", - "Here we use the IterativeSENSEReconstruction class to reconstruct images from ISMRMRD 2D radial data" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3", - "metadata": { - "lines_to_next_cell": 0 - }, - "outputs": [], - "source": [ - "# define zenodo URL of the example ismrmd data\n", - "zenodo_url = 'https://zenodo.org/records/10854057/files/'\n", - "fname = 'pulseq_radial_2D_402spokes_golden_angle_with_traj.h5'" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "4", - "metadata": {}, - "outputs": [], - "source": [ - "# Download raw data\n", - "import tempfile\n", - "\n", - "import requests\n", - "\n", - "data_file = tempfile.NamedTemporaryFile(mode='wb', delete=False, suffix='.h5')\n", - "response = requests.get(zenodo_url + fname, timeout=30)\n", - "data_file.write(response.content)\n", - "data_file.flush()" - ] - }, - { - "cell_type": "markdown", - "id": "5", - "metadata": { - "lines_to_next_cell": 0 - }, - "source": [ - "### Image reconstruction\n", - "We use the IterativeSENSEReconstruction class to reconstruct images from 2D radial data.\n", - "IterativeSENSEReconstruction solves the following reconstruction problem:\n", - "\n", - "Let's assume we have obtained the k-space data $y$ from an image $x$ with an acquisition model (Fourier transforms,\n", - "coil sensitivity maps...) $A$ then we can formulate the forward problem as:\n", - "\n", - "$ y = Ax + n $\n", - "\n", - "where $n$ describes complex Gaussian noise. The image $x$ can be obtained by minimizing the functional $F$\n", - "\n", - "$ F(x) = ||W^{\\frac{1}{2}}(Ax - y)||_2^2 $\n", - "\n", - "where $W^\\frac{1}{2}$ is the square root of the density compensation function (which corresponds to a diagonal\n", - "operator).\n", - "\n", - "Setting the derivative of the functional $F$ to zero and rearranging yields\n", - "\n", - "$ A^H W A x = A^H W y$\n", - "\n", - "which is a linear system $Hx = b$ that needs to be solved for $x$." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "6", - "metadata": {}, - "outputs": [], - "source": [ - "import mrpro" - ] - }, - { - "cell_type": "markdown", - "id": "7", - "metadata": {}, - "source": [ - "##### Read-in the raw data" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "8", - "metadata": {}, - "outputs": [], - "source": [ - "# Use the trajectory that is stored in the ISMRMRD file\n", - "trajectory = mrpro.data.traj_calculators.KTrajectoryIsmrmrd()\n", - "# Load in the Data from the ISMRMRD file\n", - "kdata = mrpro.data.KData.from_file(data_file.name, trajectory)\n", - "kdata.header.recon_matrix.x = 256\n", - "kdata.header.recon_matrix.y = 256" - ] - }, - { - "cell_type": "markdown", - "id": "9", - "metadata": {}, - "source": [ - "##### Direct reconstruction for comparison" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "10", - "metadata": {}, - "outputs": [], - "source": [ - "# For comparison we can carry out a direct reconstruction\n", - "direct_reconstruction = mrpro.algorithms.reconstruction.DirectReconstruction(kdata)\n", - "img_direct = direct_reconstruction(kdata)" - ] - }, - { - "cell_type": "markdown", - "id": "11", - "metadata": {}, - "source": [ - "##### Iterative SENSE reconstruction" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "12", - "metadata": {}, - "outputs": [], - "source": [ - "# We can use the direct reconstruction to obtain the coil maps.\n", - "iterative_sense_reconstruction = mrpro.algorithms.reconstruction.IterativeSENSEReconstruction(\n", - " kdata, csm=direct_reconstruction.csm, n_iterations=4\n", - ")\n", - "img = iterative_sense_reconstruction(kdata)" - ] - }, - { - "cell_type": "markdown", - "id": "13", - "metadata": {}, - "source": [ - "### Behind the scenes" - ] - }, - { - "cell_type": "markdown", - "id": "14", - "metadata": {}, - "source": [ - "##### Set-up the density compensation operator $W$" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "15", - "metadata": { - "lines_to_next_cell": 2 - }, - "outputs": [], - "source": [ - "# The density compensation operator is calculated based on the k-space locations of the acquired data.\n", - "dcf_operator = mrpro.data.DcfData.from_traj_voronoi(kdata.traj).as_operator()" - ] - }, - { - "cell_type": "markdown", - "id": "16", - "metadata": {}, - "source": [ - "##### Set-up the acquisition model $A$" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "17", - "metadata": {}, - "outputs": [], - "source": [ - "# Define Fourier operator using the trajectory and header information in kdata\n", - "fourier_operator = mrpro.operators.FourierOp.from_kdata(kdata)\n", - "\n", - "# Calculate coil maps\n", - "# Note that operators return a tuple of tensors, so we need to unpack it,\n", - "# even though there is only one tensor returned from adjoint operator.\n", - "img_coilwise = mrpro.data.IData.from_tensor_and_kheader(*fourier_operator.H(*dcf_operator(kdata.data)), kdata.header)\n", - "csm_operator = mrpro.data.CsmData.from_idata_walsh(img_coilwise).as_operator()\n", - "\n", - "# Create the acquisition operator A\n", - "acquisition_operator = fourier_operator @ csm_operator" - ] - }, - { - "cell_type": "markdown", - "id": "18", - "metadata": {}, - "source": [ - "##### Calculate the right-hand-side of the linear system $b = A^H W y$" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "19", - "metadata": { - "lines_to_next_cell": 2 - }, - "outputs": [], - "source": [ - "(right_hand_side,) = acquisition_operator.H(dcf_operator(kdata.data)[0])" - ] - }, - { - "cell_type": "markdown", - "id": "20", - "metadata": {}, - "source": [ - "##### Set-up the linear self-adjoint operator $H = A^H W A$" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "21", - "metadata": {}, - "outputs": [], - "source": [ - "operator = acquisition_operator.H @ dcf_operator @ acquisition_operator" - ] - }, - { - "cell_type": "markdown", - "id": "22", - "metadata": {}, - "source": [ - "##### Run conjugate gradient" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "23", - "metadata": {}, - "outputs": [], - "source": [ - "img_manual = mrpro.algorithms.optimizers.cg(\n", - " operator, right_hand_side, initial_value=right_hand_side, max_iterations=4, tolerance=0.0\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "24", - "metadata": {}, - "outputs": [], - "source": [ - "# Display the reconstructed image\n", - "import matplotlib.pyplot as plt\n", - "import torch\n", - "\n", - "fig, ax = plt.subplots(1, 3, squeeze=False)\n", - "ax[0, 0].imshow(img_direct.rss()[0, 0, :, :])\n", - "ax[0, 0].set_title('Direct Reconstruction', fontsize=10)\n", - "ax[0, 1].imshow(img.rss()[0, 0, :, :])\n", - "ax[0, 1].set_title('Iterative SENSE', fontsize=10)\n", - "ax[0, 2].imshow(img_manual.abs()[0, 0, 0, :, :])\n", - "ax[0, 2].set_title('\"Manual\" Iterative SENSE', fontsize=10)" - ] - }, - { - "cell_type": "markdown", - "id": "25", - "metadata": {}, - "source": [ - "### Check for equal results\n", - "The two versions result should in the same image data." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "26", - "metadata": {}, - "outputs": [], - "source": [ - "# If the assert statement did not raise an exception, the results are equal.\n", - "assert torch.allclose(img.data, img_manual)" - ] - } - ], - "metadata": { - "accelerator": "GPU", - "colab": { - "gpuType": "T4", - "provenance": [] - }, - "jupytext": { - "cell_metadata_filter": "-all" - }, - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/examples/notebooks/iterative_sense_reconstruction_radial2D.ipynb b/examples/notebooks/iterative_sense_reconstruction_radial2D.ipynb new file mode 100644 index 000000000..a7fe85bfb --- /dev/null +++ b/examples/notebooks/iterative_sense_reconstruction_radial2D.ipynb @@ -0,0 +1,470 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "0", + "metadata": { + "lines_to_next_cell": 0 + }, + "source": [ + "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/PTB-MR/mrpro/blob/main/examples/notebooks/iterative_sense_reconstruction_radial2D.ipynb)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1", + "metadata": { + "tags": [ + "remove-cell" + ] + }, + "outputs": [], + "source": [ + "import importlib\n", + "\n", + "if not importlib.util.find_spec('mrpro'):\n", + " %pip install mrpro[notebook]" + ] + }, + { + "cell_type": "markdown", + "id": "2", + "metadata": {}, + "source": [ + "# Iterative SENSE Reconstruction of 2D golden angle radial data\n", + "Here we use an iterative reconstruction method reconstruct images from ISMRMRD 2D radial data." + ] + }, + { + "cell_type": "markdown", + "id": "3", + "metadata": {}, + "source": [ + "We use the `~mrpro.algorithms.reconstruction.IterativeSENSEReconstruction` class to reconstruct images by solving\n", + "the following reconstruction problem:\n", + "\n", + "Let's assume we have obtained the k-space data $y$ from an image $x$ with an acquisition model (Fourier transforms,\n", + "coil sensitivity maps...) $A$ then we can formulate the forward problem as:\n", + "\n", + "$ y = Ax + n $\n", + "\n", + "where $n$ describes complex Gaussian noise. The image $x$ can be obtained by minimizing the functional $F$\n", + "\n", + "$ F(x) = ||W^{\\frac{1}{2}}(Ax - y)||_2^2 $\n", + "\n", + "where $W^\\frac{1}{2}$ is the square root of the density compensation function (which corresponds to a diagonal\n", + "operator) used to weight the loss.\n", + "\n", + "Setting the derivative of the functional $F$ to zero and rearranging yields\n", + "\n", + "$ A^H W A x = A^H W y$\n", + "\n", + "which is a linear system $Hx = b$ that needs to be solved for $x$. This is done using the conjugate gradient method." + ] + }, + { + "cell_type": "markdown", + "id": "4", + "metadata": {}, + "source": [ + "## Using `~mrpro.algorithms.reconstruction.IterativeSENSEReconstruction`\n", + "First, we demonstrate the use of `~mrpro.algorithms.reconstruction.IterativeSENSEReconstruction`, before we\n", + "peek behind the scenes and implement the reconstruction manually.\n", + "\n", + "## Read-in the raw data\n", + "We read the raw k-space data and the trajectory from the ISMRMRD file\n", + "(see for more information on the trajectory calculation).\n", + "Our example data contains three datasets:\n", + "- `radial2D_402spokes_golden_angle_with_traj.h5` with 402 spokes\n", + "- `radial2D_96spokes_golden_angle_with_traj.h5` with 96 spokes\n", + "- `radial2D_24spokes_golden_angle_with_traj.h5` with 24 spokes\n", + "\n", + "We use the 402 spokes dataset for the reconstruction." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5", + "metadata": { + "tags": [ + "hide-cell" + ] + }, + "outputs": [], + "source": [ + "# ### Download raw data from Zenodo\n", + "import tempfile\n", + "from pathlib import Path\n", + "\n", + "import zenodo_get\n", + "\n", + "dataset = '14617082'\n", + "\n", + "tmp = tempfile.TemporaryDirectory() # RAII, automatically cleaned up\n", + "data_folder = Path(tmp.name)\n", + "zenodo_get.zenodo_get([dataset, '-r', 5, '-o', data_folder]) # r: retries" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6", + "metadata": {}, + "outputs": [], + "source": [ + "import mrpro\n", + "\n", + "trajectory_calculator = mrpro.data.traj_calculators.KTrajectoryIsmrmrd()\n", + "kdata = mrpro.data.KData.from_file(data_folder / 'radial2D_402spokes_golden_angle_with_traj.h5', trajectory_calculator)" + ] + }, + { + "cell_type": "markdown", + "id": "7", + "metadata": {}, + "source": [ + "## Direct reconstruction for comparison\n", + "For comparison, we first can carry out a direct reconstruction using the\n", + "`~mrpro.algorithms.reconstruction.DirectReconstruction` class.\n", + "See also ." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8", + "metadata": {}, + "outputs": [], + "source": [ + "direct_reconstruction = mrpro.algorithms.reconstruction.DirectReconstruction(kdata)\n", + "img_direct = direct_reconstruction(kdata)" + ] + }, + { + "cell_type": "markdown", + "id": "9", + "metadata": {}, + "source": [ + "### Setting up the iterative SENSE reconstruction\n", + "Now let's use the `~mrpro.algorithms.reconstruction.IterativeSENSEReconstruction` class to reconstruct the image\n", + "using the iterative SENSE algorithm.\n", + "\n", + "We first set up the reconstruction. Here, we reuse the the Fourier operator, the DCFs and the coil sensitivity maps\n", + "from ``direct_reconstruction``. We use *early stopping* after 4 iterations by setting `n_iterations`.\n", + "\n", + "```{note}\n", + "When settings up the reconstruction can also just provide the `~mrpro.data.KData` and let\n", + " `~mrpro.algorithms.reconstruction.IterativeSENSEReconstruction` figure\n", + "out the Fourier operator, estimate the coil sensitivity maps, and choose a density weighting.\\\n", + "We can also provide `~mrpro.data.KData` and some information, such as the sensitivity maps.\n", + "In that case, the reconstruction will fill in the missing information based on the `~mrpro.data.KData` object.\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "10", + "metadata": {}, + "outputs": [], + "source": [ + "iterative_sense_reconstruction = mrpro.algorithms.reconstruction.IterativeSENSEReconstruction(\n", + " fourier_op=direct_reconstruction.fourier_op,\n", + " csm=direct_reconstruction.csm,\n", + " dcf=direct_reconstruction.dcf,\n", + " n_iterations=4,\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "11", + "metadata": { + "lines_to_next_cell": 0 + }, + "source": [ + "### Run the reconstruction\n", + "We now run the reconstruction using ``iterative_sense_reconstruction`` object. We just need to pass the k-space data\n", + "and obtain the reconstructed image as `~mrpro.data.IData` object." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "12", + "metadata": {}, + "outputs": [], + "source": [ + "\n", + "img = iterative_sense_reconstruction(kdata)" + ] + }, + { + "cell_type": "markdown", + "id": "13", + "metadata": {}, + "source": [ + "## Behind the scenes\n", + "We now peek behind the scenes to see how the iterative SENSE reconstruction is implemented. We perform all steps\n", + "`~mrpro.algorithms.reconstruction.IterativeSENSEReconstruction` does when initialized with only an `~mrpro.data.KData`\n", + "object, i.e., we need to set up a Fourier operator, estimate coil sensitivity maps, and the density weighting.\n", + "without reusing any thing from `direct_reconstruction`." + ] + }, + { + "cell_type": "markdown", + "id": "14", + "metadata": {}, + "source": [ + "### Set up density compensation operator $W$\n", + "We create a density compensation operator $W$ for weighting the loss. We use\n", + "Voronoi tessellation of the trajectory to calculate the `~mrpro.data.DcfData`.\n", + "\n", + "```{note}\n", + "Using a weighted loss in iterative SENSE is not necessary, and there has been some discussion about\n", + "the benefits and drawbacks. Currently, the iterative SENSE reconstruction in mrpro uses a weighted loss.\n", + "This might chang in the future.\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "15", + "metadata": {}, + "outputs": [], + "source": [ + "dcf_operator = mrpro.data.DcfData.from_traj_voronoi(kdata.traj).as_operator()" + ] + }, + { + "cell_type": "markdown", + "id": "16", + "metadata": {}, + "source": [ + "### Set up the acquisition model $A$\n", + "We need `~mrpro.operators.FourierOp` and `~mrpro.operators.SensitivityOp` operators to set up the acquisition model\n", + "$A$. The Fourier operator is created from the trajectory and header information in `kdata`:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "17", + "metadata": {}, + "outputs": [], + "source": [ + "fourier_operator = mrpro.operators.FourierOp(\n", + " traj=kdata.traj,\n", + " recon_matrix=kdata.header.recon_matrix,\n", + " encoding_matrix=kdata.header.encoding_matrix,\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "18", + "metadata": {}, + "source": [ + "To estimate the coil sensitivity maps, we first calculate the coil-wise images from the k-space data and then\n", + "estimate the coil sensitivity maps using the Walsh method:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "19", + "metadata": {}, + "outputs": [], + "source": [ + "img_coilwise = mrpro.data.IData.from_tensor_and_kheader(*fourier_operator.H(*dcf_operator(kdata.data)), kdata.header)\n", + "csm_data = mrpro.data.CsmData.from_idata_walsh(img_coilwise)\n", + "csm_operator = csm_data.as_operator()" + ] + }, + { + "cell_type": "markdown", + "id": "20", + "metadata": {}, + "source": [ + "Now we can set up the acquisition operator $A$ by composing the Fourier operator and the coil sensitivity maps\n", + "operator using ``@``." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "21", + "metadata": {}, + "outputs": [], + "source": [ + "acquisition_operator = fourier_operator @ csm_operator" + ] + }, + { + "cell_type": "markdown", + "id": "22", + "metadata": {}, + "source": [ + "### Calculate the right-hand-side of the linear system\n", + "Next, we need to calculate $b = A^H W y$." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "23", + "metadata": {}, + "outputs": [], + "source": [ + "(right_hand_side,) = (acquisition_operator.H @ dcf_operator)(kdata.data)" + ] + }, + { + "cell_type": "markdown", + "id": "24", + "metadata": {}, + "source": [ + "### Set-up the linear self-adjoint operator $H$\n", + "We setup $H = A^H W A$, using the ``dcf_operator`` and ``acquisition_operator``." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "25", + "metadata": {}, + "outputs": [], + "source": [ + "operator = acquisition_operator.H @ dcf_operator @ acquisition_operator" + ] + }, + { + "cell_type": "markdown", + "id": "26", + "metadata": {}, + "source": [ + "### Run conjugate gradient\n", + "Finally, we solve the linear system $Hx = b$ using the conjugate gradient method.\n", + "Again, we use early stopping after 4 iterations. Instead, we could also use a tolerance\n", + "to stop the iterations when the residual is below a certain threshold." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "27", + "metadata": {}, + "outputs": [], + "source": [ + "img_manual = mrpro.algorithms.optimizers.cg(\n", + " operator, right_hand_side, initial_value=right_hand_side, max_iterations=4, tolerance=0.0\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "28", + "metadata": {}, + "source": [ + "## Display the results\n", + "We can now compare the results of the iterative SENSE reconstruction with the direct reconstruction.\n", + "Both versions, the one using the `~mrpro.algorithms.reconstruction.IterativeSENSEReconstruction` class\n", + "and the manual implementation should result in identical images." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "29", + "metadata": { + "tags": [ + "hide-cell" + ] + }, + "outputs": [], + "source": [ + "import matplotlib.pyplot as plt\n", + "import torch\n", + "\n", + "\n", + "def show_images(*images: torch.Tensor, titles: list[str] | None = None) -> None:\n", + " \"\"\"Plot images.\"\"\"\n", + " n_images = len(images)\n", + " _, axes = plt.subplots(1, n_images, squeeze=False, figsize=(n_images * 3, 3))\n", + " for i in range(n_images):\n", + " axes[0][i].imshow(images[i], cmap='gray')\n", + " axes[0][i].axis('off')\n", + " if titles:\n", + " axes[0][i].set_title(titles[i])\n", + " plt.show()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "30", + "metadata": {}, + "outputs": [], + "source": [ + "show_images(\n", + " img_direct.rss()[0, 0],\n", + " img.rss()[0, 0],\n", + " img_manual.abs()[0, 0, 0],\n", + " titles=['Direct', 'Iterative SENSE', 'Manual Iterative SENSE'],\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "31", + "metadata": {}, + "source": [ + "### Check for equal results\n", + " Finally, we check if two images are really identical." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "32", + "metadata": {}, + "outputs": [], + "source": [ + "# If the assert statement did not raise an exception, the results are equal.\n", + "assert torch.allclose(img.data, img_manual)" + ] + }, + { + "cell_type": "markdown", + "id": "33", + "metadata": {}, + "source": [ + "## Next steps\n", + "We can also reconstruct undersampled data: You can replace the filename above to use a dataset with fewer spokes to\n", + "try it out.\\\n", + "If you want to see how to include a regularization term in the optimization problem,\n", + "see the example in ." + ] + } + ], + "metadata": { + "accelerator": "GPU", + "colab": { + "gpuType": "T4", + "provenance": [] + }, + "jupytext": { + "cell_metadata_filter": "tags,-all" + }, + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/examples/notebooks/iterative_sense_reconstruction_with_regularization.ipynb b/examples/notebooks/iterative_sense_reconstruction_with_regularization.ipynb new file mode 100644 index 000000000..11ebb134b --- /dev/null +++ b/examples/notebooks/iterative_sense_reconstruction_with_regularization.ipynb @@ -0,0 +1,445 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "0", + "metadata": { + "lines_to_next_cell": 0 + }, + "source": [ + "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/PTB-MR/mrpro/blob/main/examples/notebooks/iterative_sense_reconstruction_with_regularization.ipynb)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1", + "metadata": { + "tags": [ + "remove-cell" + ] + }, + "outputs": [], + "source": [ + "import importlib\n", + "\n", + "if not importlib.util.find_spec('mrpro'):\n", + " %pip install mrpro[notebook]" + ] + }, + { + "cell_type": "markdown", + "id": "2", + "metadata": {}, + "source": [ + "# Regularized Iterative SENSE Reconstruction of 2D golden angle radial data\n", + "Here we use the `~mrpro.algorithms.reconstruction.RegularizedIterativeSENSEReconstruction` class to reconstruct\n", + "undersampled images from 2D radial data." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3", + "metadata": { + "tags": [ + "hide-cell" + ] + }, + "outputs": [], + "source": [ + "# Download raw data from Zenodo\n", + "import tempfile\n", + "from pathlib import Path\n", + "\n", + "import zenodo_get\n", + "\n", + "dataset = '14617082'\n", + "\n", + "tmp = tempfile.TemporaryDirectory() # RAII, automatically cleaned up\n", + "data_folder = Path(tmp.name)\n", + "zenodo_get.zenodo_get([dataset, '-r', 5, '-o', data_folder]) # r: retries" + ] + }, + { + "cell_type": "markdown", + "id": "4", + "metadata": {}, + "source": [ + "### Image reconstruction\n", + "We use the `~mrpro.algorithms.reconstruction.RegularizedIterativeSENSEReconstruction` class to reconstruct images\n", + "from 2D radial data. It solves the following reconstruction problem:\n", + "\n", + "Let's assume we have obtained the k-space data $y$ from an image $x$ with an acquisition model (Fourier transforms,\n", + "coil sensitivity maps...) $A$ then we can formulate the forward problem as:\n", + "\n", + "$ y = Ax + n $\n", + "\n", + "where $n$ describes complex Gaussian noise. The image $x$ can be obtained by minimizing the functionl $F$\n", + "\n", + "$ F(x) = ||W^{\\frac{1}{2}}(Ax - y)||_2^2 $\n", + "\n", + "where $W^\\frac{1}{2}$ is the square root of the density compensation function (which corresponds to a diagonal\n", + "operator). Because this is an ill-posed problem, we can add a regularization term to stabilize the problem and obtain\n", + "a solution with certain properties:\n", + "\n", + "$ F(x) = ||W^{\\frac{1}{2}}(Ax - y)||_2^2 + l||Bx - x_{reg}||_2^2$\n", + "\n", + "where $l$ is the strength of the regularization, $B$ is a linear operator and $x_{reg}$ is a regularization image.\n", + "With this functional $F$ we obtain a solution which is close to $x_{reg}$ and to the acquired data $y$.\n", + "\n", + "Setting the derivative of the functional $F$ to zero and rearranging yields\n", + "\n", + "$ (A^H W A + l B) x = A^H W y + l x_{reg}$\n", + "\n", + "which is a linear system $Hx = b$ that needs to be solved for $x$.\n", + "\n", + "One important question of course is, what to use for $x_{reg}$. For dynamic images (e.g. cine MRI) low-resolution\n", + "dynamic images or high-quality static images have been proposed. In recent years, also the output of neural-networks\n", + "has been used as an image regulariser.\n", + "\n", + "In this example we are going to use a high-quality image to regularize the reconstruction of an undersampled image.\n", + "Both images are obtained from the same data acquisition (one using all the acquired data ($x_{reg}$) and one using\n", + "only parts of it ($x$)). This of course is an unrealistic case but it will allow us to study the effect of the\n", + "regularization." + ] + }, + { + "cell_type": "markdown", + "id": "5", + "metadata": {}, + "source": [ + "### Reading of both fully sampled and undersampled data\n", + "We read the raw data and the trajectory from the ISMRMRD file.\n", + "We load both, the fully sampled and the undersampled data.\n", + "The fully sampled data will be used to estimate the coil sensitivity maps and as a regularization image.\n", + "The undersampled data will be used to reconstruct the image." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6", + "metadata": {}, + "outputs": [], + "source": [ + "# Read the raw data and the trajectory from ISMRMRD file\n", + "import mrpro\n", + "\n", + "kdata_fullysampled = mrpro.data.KData.from_file(\n", + " data_folder / 'radial2D_402spokes_golden_angle_with_traj.h5',\n", + " mrpro.data.traj_calculators.KTrajectoryIsmrmrd(),\n", + ")\n", + "kdata_undersampled = mrpro.data.KData.from_file(\n", + " data_folder / 'radial2D_24spokes_golden_angle_with_traj.h5',\n", + " mrpro.data.traj_calculators.KTrajectoryIsmrmrd(),\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "7", + "metadata": {}, + "source": [ + "##### Image $x_{reg}$ from fully sampled data\n", + "We first reconstruct the fully sampled image to use it as a regularization image.\n", + "In a real-world scenario, we would not have this image and would have to use a low-resolution image as a prior, or use\n", + "a neural network to estimate the regularization image." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8", + "metadata": {}, + "outputs": [], + "source": [ + "# Estimate coil maps. Here we use the fully sampled data to estimate the coil sensitivity maps.\n", + "# In a real-world scenario, we would either a calibration scan (e.g. a separate fully sampled scan) to estimate the coil\n", + "# sensitivity maps or use ESPIRiT or similar methods to estimate the coil sensitivity maps from the undersampled data.\n", + "direct_reconstruction = mrpro.algorithms.reconstruction.DirectReconstruction(kdata_fullysampled)\n", + "csm = direct_reconstruction.csm\n", + "assert csm is not None\n", + "\n", + "# unregularized iterative SENSE reconstruction of the fully sampled data\n", + "iterative_sense_reconstruction = mrpro.algorithms.reconstruction.IterativeSENSEReconstruction(\n", + " kdata_fullysampled, csm=csm, n_iterations=3\n", + ")\n", + "img_iterative_sense = iterative_sense_reconstruction(kdata_fullysampled)" + ] + }, + { + "cell_type": "markdown", + "id": "9", + "metadata": {}, + "source": [ + "##### Image $x$ from undersampled data\n", + "We now reconstruct the undersampled image using the fully sampled image first without regularization,\n", + "and with with an regularization image." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "10", + "metadata": {}, + "outputs": [], + "source": [ + "# Unregularized iterative SENSE reconstruction of the undersampled data\n", + "iterative_sense_reconstruction = mrpro.algorithms.reconstruction.IterativeSENSEReconstruction(\n", + " kdata_undersampled, csm=csm, n_iterations=6\n", + ")\n", + "img_us_iterative_sense = iterative_sense_reconstruction(kdata_undersampled)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "11", + "metadata": {}, + "outputs": [], + "source": [ + "# Regularized iterativ SENSE reconstruction of the undersampled data\n", + "\n", + "regularized_iterative_sense_reconstruction = mrpro.algorithms.reconstruction.RegularizedIterativeSENSEReconstruction(\n", + " kdata_undersampled,\n", + " csm=csm,\n", + " n_iterations=6,\n", + " regularization_data=img_iterative_sense.data,\n", + " regularization_weight=1.0,\n", + ")\n", + "img_us_regularized_iterative_sense = regularized_iterative_sense_reconstruction(kdata_undersampled)" + ] + }, + { + "cell_type": "markdown", + "id": "12", + "metadata": {}, + "source": [ + "##### Display the results\n", + "Besides the fully sampled image, we display two undersampled images:\n", + "The first one is obtained by unregularized iterative SENSE, the second one using regularization." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "13", + "metadata": { + "tags": [ + "hide-cell" + ] + }, + "outputs": [], + "source": [ + "import matplotlib.pyplot as plt\n", + "import torch\n", + "\n", + "\n", + "def show_images(*images: torch.Tensor, titles: list[str] | None = None) -> None:\n", + " \"\"\"Plot images.\"\"\"\n", + " n_images = len(images)\n", + " _, axes = plt.subplots(1, n_images, squeeze=False, figsize=(n_images * 3, 3))\n", + " for i in range(n_images):\n", + " axes[0][i].imshow(images[i], cmap='gray')\n", + " axes[0][i].axis('off')\n", + " if titles:\n", + " axes[0][i].set_title(titles[i])\n", + " plt.show()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "14", + "metadata": {}, + "outputs": [], + "source": [ + "show_images(\n", + " img_iterative_sense.rss()[0, 0],\n", + " img_us_iterative_sense.rss()[0, 0],\n", + " img_us_regularized_iterative_sense.rss()[0, 0],\n", + " titles=['Fully sampled', 'Iterative SENSE R=20', 'Regularized Iterative SENSE R=20'],\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "15", + "metadata": {}, + "source": [ + "### Behind the scenes\n", + "We now investigate the steps that are done in the regularized iterative SENSE reconstruction and\n", + "perform them manually. This also demonstrates how to use the `~mrpro` operators and algorithms\n", + "to build your own reconstruction pipeline." + ] + }, + { + "cell_type": "markdown", + "id": "16", + "metadata": {}, + "source": [ + "##### Set-up the density compensation operator $W$ and acquisition model $A$\n", + "\n", + "This is very similar to .\n", + "For more details, please refer to that notebook." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "17", + "metadata": {}, + "outputs": [], + "source": [ + "dcf_operator = mrpro.data.DcfData.from_traj_voronoi(kdata_undersampled.traj).as_operator()\n", + "fourier_operator = mrpro.operators.FourierOp.from_kdata(kdata_undersampled)\n", + "csm_operator = csm.as_operator()\n", + "acquisition_operator = fourier_operator @ csm_operator" + ] + }, + { + "cell_type": "markdown", + "id": "18", + "metadata": {}, + "source": [ + "##### Calculate the right-hand-side of the linear system\n", + "We calculated $b = A^H W y + l x_{reg}$.\n", + "Here, we make use of operator composition using ``@``." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "19", + "metadata": {}, + "outputs": [], + "source": [ + "regularization_weight = 1.0\n", + "regularization_image = img_iterative_sense.data\n", + "\n", + "(right_hand_side,) = (acquisition_operator.H @ dcf_operator)(kdata_undersampled.data)\n", + "right_hand_side = right_hand_side + regularization_weight * regularization_image" + ] + }, + { + "cell_type": "markdown", + "id": "20", + "metadata": {}, + "source": [ + "##### Set-up the linear self-adjoint operator $H$\n", + "We define $H= A^H W A + l$. We use the `~mrpro.operators.IdentityOp` and make\n", + "use of operator composition using ``@``, addition using ``+`` and multiplication using ``*``.\n", + "The resulting operator is a `~mrpro.operators.LinearOperator` object." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "21", + "metadata": {}, + "outputs": [], + "source": [ + "operator = (\n", + " acquisition_operator.H @ dcf_operator @ acquisition_operator + mrpro.operators.IdentityOp() * regularization_weight\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "22", + "metadata": {}, + "source": [ + "##### Run conjugate gradient\n", + "We solve the linear system $Hx = b$ using the conjugate gradient method.\n", + "Here, we use early stopping after 8 iterations. Instead, we could also use a tolerance to stop the iterations when\n", + "the residual is small enough." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "23", + "metadata": {}, + "outputs": [], + "source": [ + "img_manual = mrpro.algorithms.optimizers.cg(\n", + " operator, right_hand_side, initial_value=right_hand_side, max_iterations=8, tolerance=0.0\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "24", + "metadata": {}, + "source": [ + "##### Display the reconstructed image\n", + "We can now compare our 'manual' reconstruction with the regularized iterative SENSE reconstruction\n", + "obtained using `~mrpro.algorithms.reconstruction.RegularizedIterativeSENSEReconstruction`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "25", + "metadata": {}, + "outputs": [], + "source": [ + "show_images(\n", + " img_us_regularized_iterative_sense.rss()[0, 0],\n", + " img_manual.abs()[0, 0, 0],\n", + " titles=['RegularizedIterativeSense', 'Manual'],\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "26", + "metadata": {}, + "source": [ + "We can also check if the results are equal by comparing the actual image data.\n", + "If the assert statement does not raise an exception, the results are equal." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "27", + "metadata": {}, + "outputs": [], + "source": [ + "torch.testing.assert_close(img_us_regularized_iterative_sense.data, img_manual)" + ] + }, + { + "cell_type": "markdown", + "id": "28", + "metadata": {}, + "source": [ + "### Next steps\n", + "\n", + "We are cheating here because we used the fully sampled image as a regularization. In real world applications\n", + "we would not have that. One option is to apply a low-pass filter to the undersampled k-space data to try to reduce the\n", + "streaking artifacts and use that as a regularization image. Try that and see if you can also improve the image quality\n", + "compared to the unregularised images." + ] + } + ], + "metadata": { + "accelerator": "GPU", + "colab": { + "gpuType": "T4", + "provenance": [] + }, + "jupytext": { + "cell_metadata_filter": "tags,-all" + }, + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/examples/notebooks/pulseq_2d_radial_golden_angle.ipynb b/examples/notebooks/pulseq_2d_radial_golden_angle.ipynb deleted file mode 100644 index 1faf8d0a3..000000000 --- a/examples/notebooks/pulseq_2d_radial_golden_angle.ipynb +++ /dev/null @@ -1,225 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "0", - "metadata": { - "lines_to_next_cell": 0 - }, - "source": [ - "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/PTB-MR/mrpro/blob/main/examples/notebooks/pulseq_2d_radial_golden_angle.ipynb)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "1", - "metadata": { - "tags": [ - "remove-cell" - ] - }, - "outputs": [], - "source": [ - "import importlib\n", - "\n", - "if not importlib.util.find_spec('mrpro'):\n", - " %pip install mrpro[notebook]" - ] - }, - { - "cell_type": "markdown", - "id": "2", - "metadata": {}, - "source": [ - "# Reconstruction of 2D golden angle radial data from pulseq sequence\n", - "Here we manually do all steps of a direction reconstruction, i.e.\n", - "CSM estimation, density compensation, adjoint fourier transform, and coil combination.\n", - "See also the example `pulseq_2d_radial_golden_angle_direct_reconstruction.py`\n", - "for a more high-level example using the `DirectReconstruction` class." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3", - "metadata": {}, - "outputs": [], - "source": [ - "# Imports\n", - "import tempfile\n", - "\n", - "import matplotlib.pyplot as plt\n", - "import requests\n", - "from mrpro.algorithms.reconstruction import DirectReconstruction\n", - "from mrpro.data import KData\n", - "from mrpro.data.traj_calculators import KTrajectoryIsmrmrd, KTrajectoryPulseq, KTrajectoryRadial2D" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "4", - "metadata": {}, - "outputs": [], - "source": [ - "# define zenodo records URL and create a temporary directory and h5-file\n", - "zenodo_url = 'https://zenodo.org/records/10854057/files/'\n", - "fname = 'pulseq_radial_2D_402spokes_golden_angle_with_traj.h5'\n", - "data_file = tempfile.NamedTemporaryFile(mode='wb', delete=False, suffix='.h5')" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "5", - "metadata": {}, - "outputs": [], - "source": [ - "# Download raw data using requests\n", - "response = requests.get(zenodo_url + fname, timeout=30)\n", - "data_file.write(response.content)\n", - "data_file.flush()" - ] - }, - { - "cell_type": "markdown", - "id": "6", - "metadata": {}, - "source": [ - "### Image reconstruction using KTrajectoryIsmrmrd\n", - "This will use the trajectory that is stored in the ISMRMRD file." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "7", - "metadata": {}, - "outputs": [], - "source": [ - "# Read the raw data and the trajectory from ISMRMRD file\n", - "kdata = KData.from_file(data_file.name, KTrajectoryIsmrmrd())\n", - "\n", - "# Reconstruct image\n", - "direct_reconstruction = DirectReconstruction(kdata)\n", - "img_using_ismrmrd_traj = direct_reconstruction(kdata)" - ] - }, - { - "cell_type": "markdown", - "id": "8", - "metadata": {}, - "source": [ - "### Image reconstruction using KTrajectoryRadial2D\n", - "This will calculate the trajectory using the radial 2D trajectory calculator." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "9", - "metadata": {}, - "outputs": [], - "source": [ - "# Read raw data and calculate trajectory using KTrajectoryRadial2D\n", - "kdata = KData.from_file(data_file.name, KTrajectoryRadial2D())\n", - "\n", - "# Reconstruct image\n", - "direct_reconstruction = DirectReconstruction(kdata)\n", - "img_using_rad2d_traj = direct_reconstruction(kdata)" - ] - }, - { - "cell_type": "markdown", - "id": "10", - "metadata": {}, - "source": [ - "### Image reconstruction using KTrajectoryPulseq\n", - "This will calculate the trajectory from the pulseq sequence file\n", - "using the PyPulseq trajectory calculator. Please note that this method\n", - "requires the pulseq sequence file that was used to acquire the data.\n", - "The path to the sequence file is provided as an argument to KTrajectoryPulseq." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "11", - "metadata": {}, - "outputs": [], - "source": [ - "# download the sequence file from zenodo\n", - "zenodo_url = 'https://zenodo.org/records/10868061/files/'\n", - "seq_fname = 'pulseq_radial_2D_402spokes_golden_angle.seq'\n", - "seq_file = tempfile.NamedTemporaryFile(mode='wb', delete=False, suffix='.seq')\n", - "response = requests.get(zenodo_url + seq_fname, timeout=30)\n", - "seq_file.write(response.content)\n", - "seq_file.flush()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "12", - "metadata": {}, - "outputs": [], - "source": [ - "# Read raw data and calculate trajectory using KTrajectoryPulseq\n", - "kdata = KData.from_file(data_file.name, KTrajectoryPulseq(seq_path=seq_file.name))\n", - "\n", - "# Reconstruct image\n", - "direct_reconstruction = DirectReconstruction(kdata)\n", - "img_using_pulseq_traj = direct_reconstruction(kdata)" - ] - }, - { - "cell_type": "markdown", - "id": "13", - "metadata": { - "lines_to_next_cell": 0 - }, - "source": [ - "### Plot the different reconstructed images\n", - "Please note: there is currently a mismatch between the actual trajectory\n", - "that was used to acquire the data and the trajectory calculated with KTrajectoryRadial2D.\n", - "This leads to a deviation between the image reconstructed with KTrajectoryRadial2D\n", - "and the other two methods. In the future, we will upload new measurement data with\n", - "an updated trajectory and adjust this example accordingly." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "14", - "metadata": {}, - "outputs": [], - "source": [ - "titles = ['KTrajectoryIsmrmrd', 'KTrajectoryRadial2D', 'KTrajectoryPulseq']\n", - "plt.subplots(1, len(titles))\n", - "for i, img in enumerate([img_using_ismrmrd_traj.rss(), img_using_rad2d_traj.rss(), img_using_pulseq_traj.rss()]):\n", - " plt.subplot(1, len(titles), i + 1)\n", - " plt.imshow(img[0, 0, :, :])\n", - " plt.title(titles[i])\n", - " plt.axis('off')" - ] - } - ], - "metadata": { - "accelerator": "GPU", - "colab": { - "gpuType": "T4", - "provenance": [] - }, - "jupytext": { - "cell_metadata_filter": "-all" - }, - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/examples/notebooks/qmri_sg_challenge_2024_t1.ipynb b/examples/notebooks/qmri_sg_challenge_2024_t1.ipynb index 040f1a16a..c1a0ce151 100644 --- a/examples/notebooks/qmri_sg_challenge_2024_t1.ipynb +++ b/examples/notebooks/qmri_sg_challenge_2024_t1.ipynb @@ -32,40 +32,8 @@ "id": "2", "metadata": {}, "source": [ - "# QMRI Challenge ISMRM 2024 - $T_1$ mapping" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3", - "metadata": {}, - "outputs": [], - "source": [ - "# Imports\n", - "import shutil\n", - "import tempfile\n", - "import zipfile\n", - "from pathlib import Path\n", - "\n", - "import matplotlib.pyplot as plt\n", - "import torch\n", - "import zenodo_get\n", - "from einops import rearrange\n", - "from mpl_toolkits.axes_grid1 import make_axes_locatable # type: ignore [import-untyped]\n", - "from mrpro.algorithms.optimizers import adam\n", - "from mrpro.data import IData\n", - "from mrpro.operators import MagnitudeOp\n", - "from mrpro.operators.functionals import MSE\n", - "from mrpro.operators.models import InversionRecovery" - ] - }, - { - "cell_type": "markdown", - "id": "4", - "metadata": {}, - "source": [ - "### Overview\n", + "# QMRI Challenge ISMRM 2024 - $T_1$ mapping\n", + "In the 2024 ISMRM QMRI Challenge, the goal is to estimate $T_1$ maps from a set of inversion recovery images.\n", "The dataset consists of images obtained at 10 different inversion times using a turbo spin echo sequence. Each\n", "inversion time is saved in a separate DICOM file. In order to obtain a $T_1$ map, we are going to:\n", "- download the data from Zenodo\n", @@ -77,7 +45,7 @@ }, { "cell_type": "markdown", - "id": "5", + "id": "3", "metadata": {}, "source": [ "### Get data from Zenodo" @@ -86,12 +54,20 @@ { "cell_type": "code", "execution_count": null, - "id": "6", + "id": "4", "metadata": {}, "outputs": [], "source": [ - "data_folder = Path(tempfile.mkdtemp())\n", + "import tempfile\n", + "import zipfile\n", + "from pathlib import Path\n", + "\n", + "import zenodo_get\n", + "\n", "dataset = '10868350'\n", + "\n", + "tmp = tempfile.TemporaryDirectory() # RAII, automatically cleaned up\n", + "data_folder = Path(tmp.name)\n", "zenodo_get.zenodo_get([dataset, '-r', 5, '-o', data_folder]) # r: retries\n", "with zipfile.ZipFile(data_folder / Path('T1 IR.zip'), 'r') as zip_ref:\n", " zip_ref.extractall(data_folder)" @@ -99,23 +75,27 @@ }, { "cell_type": "markdown", - "id": "7", + "id": "5", "metadata": { "lines_to_next_cell": 0 }, "source": [ - "### Create image data (IData) object with different inversion times" + "### Create image data (IData) object with different inversion times\n", + "We read in the DICOM files and combine them in an `~mrpro.data.IData` object.\n", + "The inversion times are stored in the DICOM files are available in the header of the `~mrpro.data.IData` object." ] }, { "cell_type": "code", "execution_count": null, - "id": "8", + "id": "6", "metadata": {}, "outputs": [], "source": [ + "import mrpro\n", + "\n", "ti_dicom_files = data_folder.glob('**/*.dcm')\n", - "idata_multi_ti = IData.from_dicom_files(ti_dicom_files)\n", + "idata_multi_ti = mrpro.data.IData.from_dicom_files(ti_dicom_files)\n", "\n", "if idata_multi_ti.header.ti is None:\n", " raise ValueError('Inversion times need to be defined in the DICOM files.')" @@ -124,20 +104,47 @@ { "cell_type": "code", "execution_count": null, - "id": "9", + "id": "7", + "metadata": { + "tags": [ + "hide-cell" + ] + }, + "outputs": [], + "source": [ + "import matplotlib.pyplot as plt\n", + "import torch\n", + "\n", + "\n", + "def show_images(*images: torch.Tensor, titles: list[str] | None = None) -> None:\n", + " \"\"\"Plot images.\"\"\"\n", + " n_images = len(images)\n", + " _, axes = plt.subplots(1, n_images, squeeze=False, figsize=(n_images * 3, 3))\n", + " for i in range(n_images):\n", + " axes[0][i].imshow(images[i], cmap='gray')\n", + " axes[0][i].axis('off')\n", + " if titles:\n", + " axes[0][i].set_title(titles[i])\n", + " plt.show()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8", "metadata": {}, "outputs": [], "source": [ "# Let's have a look at some of the images\n", - "fig, axes = plt.subplots(1, 3, squeeze=False)\n", - "for idx, ax in enumerate(axes.flatten()):\n", - " ax.imshow(torch.abs(idata_multi_ti.data[idx, 0, 0, :, :]))\n", - " ax.set_title(f'TI = {idata_multi_ti.header.ti[idx]:.3f}s')" + "show_images(\n", + " *idata_multi_ti.data[:, 0, 0].abs(),\n", + " titles=[f'TI = {ti:.3f}s' for ti in idata_multi_ti.header.ti.squeeze()],\n", + ")" ] }, { "cell_type": "markdown", - "id": "10", + "id": "9", "metadata": {}, "source": [ "### Signal model and loss function\n", @@ -152,19 +159,17 @@ { "cell_type": "code", "execution_count": null, - "id": "11", + "id": "10", "metadata": {}, "outputs": [], "source": [ - "model = MagnitudeOp() @ InversionRecovery(ti=idata_multi_ti.header.ti)" + "model = mrpro.operators.MagnitudeOp() @ mrpro.operators.models.InversionRecovery(ti=idata_multi_ti.header.ti)" ] }, { "cell_type": "markdown", - "id": "12", - "metadata": { - "lines_to_next_cell": 0 - }, + "id": "11", + "metadata": {}, "source": [ "As a loss function for the optimizer, we calculate the mean-squared error between the image data $x$ and our signal\n", "model $q$." @@ -173,19 +178,17 @@ { "cell_type": "code", "execution_count": null, - "id": "13", + "id": "12", "metadata": {}, "outputs": [], "source": [ - "mse = MSE(idata_multi_ti.data.abs())" + "mse = mrpro.operators.functionals.MSE(idata_multi_ti.data.abs())" ] }, { "cell_type": "markdown", - "id": "14", - "metadata": { - "lines_to_next_cell": 0 - }, + "id": "13", + "metadata": {}, "source": [ "Now we can simply combine the two into a functional to solve\n", "\n", @@ -195,7 +198,7 @@ { "cell_type": "code", "execution_count": null, - "id": "15", + "id": "14", "metadata": {}, "outputs": [], "source": [ @@ -204,7 +207,7 @@ }, { "cell_type": "markdown", - "id": "16", + "id": "15", "metadata": {}, "source": [ "### Starting values for the fit\n", @@ -226,7 +229,7 @@ { "cell_type": "code", "execution_count": null, - "id": "17", + "id": "16", "metadata": {}, "outputs": [], "source": [ @@ -237,14 +240,26 @@ "# just a scaling factor and we are going to normalize the signal curves.\n", "(signal_dictionary,) = model(torch.ones(1), t1_dictionary)\n", "signal_dictionary = signal_dictionary.to(dtype=torch.complex64)\n", - "vector_norm = torch.linalg.vector_norm(signal_dictionary, dim=0)\n", - "signal_dictionary /= vector_norm\n", + "signal_dictionary /= torch.linalg.vector_norm(signal_dictionary, dim=0)\n", "\n", "# Calculate the dot-product and select for each voxel the T1 values that correspond to the maximum of the dot-product\n", - "n_y, n_x = idata_multi_ti.data.shape[-2:]\n", - "dot_product = torch.mm(rearrange(idata_multi_ti.data, 'other 1 z y x->(z y x) other'), signal_dictionary)\n", - "idx_best_match = torch.argmax(torch.abs(dot_product), dim=1)\n", - "t1_start = rearrange(t1_dictionary[idx_best_match], '(y x)->1 1 y x', y=n_y, x=n_x)" + "import einops\n", + "\n", + "dot_product = einops.einsum(\n", + " idata_multi_ti.data,\n", + " signal_dictionary,\n", + " 'ti ..., ti t1 -> t1 ...',\n", + ")\n", + "idx_best_match = dot_product.abs().argmax(dim=0)\n", + "t1_start = t1_dictionary[idx_best_match]" + ] + }, + { + "cell_type": "markdown", + "id": "17", + "metadata": {}, + "source": [ + "The maximum absolute value observed is a good approximation for $M_0$" ] }, { @@ -254,102 +269,120 @@ "metadata": {}, "outputs": [], "source": [ - "# The maximum absolute value observed is a good approximation for m0\n", - "m0_start = torch.amax(torch.abs(idata_multi_ti.data), 0)" + "m0_start = idata_multi_ti.data.abs().amax(dim=0)" + ] + }, + { + "cell_type": "markdown", + "id": "19", + "metadata": {}, + "source": [ + "#### Visualize the starting values\n", + "Let's have a look at the starting values for $M_0$ and $T_1$:" ] }, { "cell_type": "code", "execution_count": null, - "id": "19", + "id": "20", "metadata": {}, "outputs": [], "source": [ - "# Visualize the starting values\n", - "fig, axes = plt.subplots(1, 2, figsize=(8, 2), squeeze=False)\n", - "colorbar_ax = [make_axes_locatable(ax).append_axes('right', size='5%', pad=0.05) for ax in axes[0, :]]\n", - "im = axes[0, 0].imshow(m0_start[0, 0, ...])\n", + "fig, axes = plt.subplots(1, 2, figsize=(6, 2), squeeze=False)\n", + "\n", + "im = axes[0, 0].imshow(m0_start[0, 0])\n", "axes[0, 0].set_title('$M_0$ start values')\n", - "fig.colorbar(im, cax=colorbar_ax[0])\n", - "im = axes[0, 1].imshow(t1_start[0, 0, ...], vmin=0, vmax=2.5)\n", + "axes[0, 0].set_axis_off()\n", + "fig.colorbar(im, ax=axes[0, 0], label='a.u.')\n", + "\n", + "im = axes[0, 1].imshow(t1_start[0, 0], vmin=0, vmax=2.5, cmap='magma')\n", "axes[0, 1].set_title('$T_1$ start values')\n", - "fig.colorbar(im, cax=colorbar_ax[1], label='s')" + "axes[0, 1].set_axis_off()\n", + "fig.colorbar(im, ax=axes[0, 1], label='s')\n", + "\n", + "plt.show()" ] }, { "cell_type": "markdown", - "id": "20", + "id": "21", "metadata": {}, "source": [ - "### Carry out fit" + "### Carry out fit\n", + "We are now ready to carry out the fit. We are going to use the `~mrpro.algorithms.optimizers.adam` optimizer.\n", + "If there is a GPU available, we can use it ny moving both the data and the model to the GPU." ] }, { "cell_type": "code", "execution_count": null, - "id": "21", + "id": "22", "metadata": {}, "outputs": [], "source": [ + "# Move initial values and model to GPU if available\n", + "if torch.cuda.is_available():\n", + " print('Using GPU')\n", + " functional.cuda()\n", + " m0_start = m0_start.cuda()\n", + " t1_start = t1_start.cuda()\n", + "\n", "# Hyperparameters for optimizer\n", "max_iter = 2000\n", "lr = 1e-1\n", "\n", "# Run optimization\n", - "params_result = adam(functional, [m0_start, t1_start], max_iter=max_iter, lr=lr)\n", - "m0, t1 = (p.detach() for p in params_result)\n", - "m0[torch.isnan(t1)] = 0\n", - "t1[torch.isnan(t1)] = 0" + "result = mrpro.algorithms.optimizers.adam(functional, [m0_start, t1_start], max_iter=max_iter, lr=lr)\n", + "m0, t1 = (p.detach().cpu() for p in result)" ] }, { "cell_type": "markdown", - "id": "22", + "id": "23", "metadata": {}, "source": [ "### Visualize the final results\n", + "\n", "To get an impression of how well the fit has worked, we are going to calculate the relative error between\n", "\n", "$E_{relative} = \\sum_{TI}\\frac{|(q(M_0, T_1, TI) - x)|}{|x|}$\n", "\n", - "on a voxel-by-voxel basis" + "on a voxel-by-voxel basis\n", + "We also mask out the background by thresholding on $M_0$." ] }, { "cell_type": "code", "execution_count": null, - "id": "23", - "metadata": { - "lines_to_next_cell": 2 - }, + "id": "24", + "metadata": {}, "outputs": [], "source": [ - "img_mult_te_abs_sum = torch.sum(torch.abs(idata_multi_ti.data), dim=0)\n", - "relative_absolute_error = torch.sum(torch.abs(model(m0, t1)[0] - idata_multi_ti.data), dim=0) / (\n", - " img_mult_te_abs_sum + 1e-9\n", - ")\n", + "error = model(m0, t1)[0] - idata_multi_ti.data\n", + "relative_absolute_error = error.abs().sum(dim=0) / (idata_multi_ti.data.abs().sum(dim=0) + 1e-9)\n", + "\n", + "mask = torch.isnan(t1) | (m0 < 500)\n", + "m0[mask] = 0\n", + "t1[mask] = 0\n", + "relative_absolute_error[mask] = 0\n", + "\n", "fig, axes = plt.subplots(1, 3, figsize=(10, 2), squeeze=False)\n", - "colorbar_ax = [make_axes_locatable(ax).append_axes('right', size='5%', pad=0.05) for ax in axes[0, :]]\n", - "im = axes[0, 0].imshow(m0[0, 0, ...])\n", + "im = axes[0, 0].imshow(m0[0, 0])\n", "axes[0, 0].set_title('$M_0$')\n", - "fig.colorbar(im, cax=colorbar_ax[0])\n", - "im = axes[0, 1].imshow(t1[0, 0, ...], vmin=0, vmax=2.5)\n", + "axes[0, 0].set_axis_off()\n", + "fig.colorbar(im, ax=axes[0, 0], label='a.u.')\n", + "\n", + "im = axes[0, 1].imshow(t1[0, 0], vmin=0, vmax=2.5, cmap='magma')\n", "axes[0, 1].set_title('$T_1$')\n", - "fig.colorbar(im, cax=colorbar_ax[1], label='s')\n", - "im = axes[0, 2].imshow(relative_absolute_error[0, 0, ...], vmin=0, vmax=1.0)\n", + "axes[0, 1].set_axis_off()\n", + "fig.colorbar(im, ax=axes[0, 1], label='s')\n", + "\n", + "im = axes[0, 2].imshow(relative_absolute_error[0, 0], vmin=0, vmax=0.1)\n", "axes[0, 2].set_title('Relative error')\n", - "fig.colorbar(im, cax=colorbar_ax[2])" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "24", - "metadata": {}, - "outputs": [], - "source": [ - "# Clean-up by removing temporary directory\n", - "shutil.rmtree(data_folder)" + "axes[0, 2].set_axis_off()\n", + "fig.colorbar(im, ax=axes[0, 2])\n", + "\n", + "plt.show()" ] } ], @@ -360,7 +393,7 @@ "provenance": [] }, "jupytext": { - "cell_metadata_filter": "-all" + "cell_metadata_filter": "tags,-all" }, "kernelspec": { "display_name": "Python 3 (ipykernel)", diff --git a/examples/notebooks/qmri_sg_challenge_2024_t2_star.ipynb b/examples/notebooks/qmri_sg_challenge_2024_t2_star.ipynb deleted file mode 100644 index aab3f04b6..000000000 --- a/examples/notebooks/qmri_sg_challenge_2024_t2_star.ipynb +++ /dev/null @@ -1,330 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "0", - "metadata": { - "lines_to_next_cell": 0 - }, - "source": [ - "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/PTB-MR/mrpro/blob/main/examples/notebooks/qmri_sg_challenge_2024_t2_star.ipynb)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "1", - "metadata": { - "tags": [ - "remove-cell" - ] - }, - "outputs": [], - "source": [ - "import importlib\n", - "\n", - "if not importlib.util.find_spec('mrpro'):\n", - " %pip install mrpro[notebook]" - ] - }, - { - "cell_type": "markdown", - "id": "2", - "metadata": {}, - "source": [ - "# QMRI Challenge ISMRM 2024 - $T_2^*$ mapping" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3", - "metadata": {}, - "outputs": [], - "source": [ - "# Imports\n", - "import shutil\n", - "import tempfile\n", - "import time\n", - "import zipfile\n", - "from pathlib import Path\n", - "\n", - "import matplotlib.pyplot as plt\n", - "import torch\n", - "import zenodo_get\n", - "from mpl_toolkits.axes_grid1 import make_axes_locatable # type: ignore [import-untyped]\n", - "from mrpro.algorithms.optimizers import adam\n", - "from mrpro.data import IData\n", - "from mrpro.operators.functionals import MSE\n", - "from mrpro.operators.models import MonoExponentialDecay" - ] - }, - { - "cell_type": "markdown", - "id": "4", - "metadata": {}, - "source": [ - "### Overview\n", - "The dataset consists of gradient echo images obtained at 11 different echo times, each saved in a separate DICOM file.\n", - "In order to obtain a $T_2^*$ map, we are going to:\n", - "- download the data from Zenodo\n", - "- read in the DICOM files (one for each echo time) and combine them in an IData object\n", - "- define a signal model (mono-exponential decay) and data loss (mean-squared error) function\n", - "- carry out a fit using ADAM from PyTorch\n", - "\n", - "Everything is based on PyTorch, and therefore we can run the code either on the CPU or GPU. Simply set the flag below\n", - "to True to run the parameter estimation on the GPU." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "5", - "metadata": {}, - "outputs": [], - "source": [ - "flag_use_cuda = False" - ] - }, - { - "cell_type": "markdown", - "id": "6", - "metadata": {}, - "source": [ - "### Get data from Zenodo" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "7", - "metadata": {}, - "outputs": [], - "source": [ - "data_folder = Path(tempfile.mkdtemp())\n", - "dataset = '10868361'\n", - "zenodo_get.zenodo_get([dataset, '-r', 5, '-o', data_folder]) # r: retries\n", - "with zipfile.ZipFile(data_folder / Path('T2star.zip'), 'r') as zip_ref:\n", - " zip_ref.extractall(data_folder)" - ] - }, - { - "cell_type": "markdown", - "id": "8", - "metadata": { - "lines_to_next_cell": 0 - }, - "source": [ - "### Create image data (IData) object with different echo times" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "9", - "metadata": {}, - "outputs": [], - "source": [ - "te_dicom_files = data_folder.glob('**/*.dcm')\n", - "idata_multi_te = IData.from_dicom_files(te_dicom_files)\n", - "# scaling the signal down to make the optimization easier\n", - "idata_multi_te.data[...] = idata_multi_te.data / 1500\n", - "\n", - "# Move the data to the GPU\n", - "if flag_use_cuda:\n", - " idata_multi_te = idata_multi_te.cuda()\n", - "\n", - "if idata_multi_te.header.te is None:\n", - " raise ValueError('Echo times need to be defined in the DICOM files.')" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "10", - "metadata": {}, - "outputs": [], - "source": [ - "# Let's have a look at some of the images\n", - "fig, axes = plt.subplots(1, 3, squeeze=False)\n", - "for idx, ax in enumerate(axes.flatten()):\n", - " ax.imshow(torch.abs(idata_multi_te.data[idx, 0, 0, :, :]).cpu())\n", - " ax.set_title(f'TE = {idata_multi_te.header.te[idx]:.3f}s')" - ] - }, - { - "cell_type": "markdown", - "id": "11", - "metadata": {}, - "source": [ - "### Signal model and loss function\n", - "We use the model $q$\n", - "\n", - "$q(TE) = M_0 e^{-TE/T_2^*}$\n", - "\n", - "with the equilibrium magnetization $M_0$, the echo time $TE$, and $T_2^*$" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "12", - "metadata": {}, - "outputs": [], - "source": [ - "model = MonoExponentialDecay(decay_time=idata_multi_te.header.te)" - ] - }, - { - "cell_type": "markdown", - "id": "13", - "metadata": { - "lines_to_next_cell": 0 - }, - "source": [ - "As a loss function for the optimizer, we calculate the mean-squared error between the image data $x$ and our signal\n", - "model $q$." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "14", - "metadata": {}, - "outputs": [], - "source": [ - "mse = MSE(idata_multi_te.data)" - ] - }, - { - "cell_type": "markdown", - "id": "15", - "metadata": { - "lines_to_next_cell": 0 - }, - "source": [ - "Now we can simply combine the two into a functional which will then solve\n", - "\n", - "$ \\min_{M_0, T_2^*} ||q(M_0, T_2^*, TE) - x||_2^2$" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "16", - "metadata": {}, - "outputs": [], - "source": [ - "functional = mse @ model" - ] - }, - { - "cell_type": "markdown", - "id": "17", - "metadata": {}, - "source": [ - "### Carry out fit" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "18", - "metadata": {}, - "outputs": [], - "source": [ - "# The shortest echo time is a good approximation of the equilibrium magnetization\n", - "m0_start = torch.abs(idata_multi_te.data[torch.argmin(idata_multi_te.header.te), ...])\n", - "# 20 ms as a starting value for T2*\n", - "t2star_start = torch.ones(m0_start.shape, dtype=torch.float32, device=m0_start.device) * 20e-3\n", - "\n", - "# Hyperparameters for optimizer\n", - "max_iter = 20000\n", - "lr = 1e-3\n", - "\n", - "if flag_use_cuda:\n", - " functional.cuda()\n", - "\n", - "# Run optimization\n", - "start_time = time.time()\n", - "params_result = adam(functional, [m0_start, t2star_start], max_iter=max_iter, lr=lr)\n", - "print(f'Optimization took {time.time() - start_time}s')\n", - "m0, t2star = (p.detach() for p in params_result)\n", - "m0[torch.isnan(t2star)] = 0\n", - "t2star[torch.isnan(t2star)] = 0" - ] - }, - { - "cell_type": "markdown", - "id": "19", - "metadata": { - "lines_to_next_cell": 0 - }, - "source": [ - "### Visualize the final results\n", - "To get an impression of how well the fit has worked, we are going to calculate the relative error between\n", - "\n", - "$E_{relative} = \\sum_{TE}\\frac{|(q(M_0, T_2^*, TE) - x)|}{|x|}$\n", - "\n", - "on a voxel-by-voxel basis." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "20", - "metadata": { - "lines_to_next_cell": 2 - }, - "outputs": [], - "source": [ - "img_mult_te_abs_sum = torch.sum(torch.abs(idata_multi_te.data), dim=0)\n", - "relative_absolute_error = torch.sum(torch.abs(model(m0, t2star)[0] - idata_multi_te.data), dim=0) / (\n", - " img_mult_te_abs_sum + 1e-9\n", - ")\n", - "fig, axes = plt.subplots(1, 3, figsize=(10, 2), squeeze=False)\n", - "colorbar_ax = [make_axes_locatable(ax).append_axes('right', size='5%', pad=0.05) for ax in axes[0, :]]\n", - "\n", - "im = axes[0, 0].imshow(m0[0, 0, ...].cpu())\n", - "axes[0, 0].set_title('$M_0$')\n", - "fig.colorbar(im, cax=colorbar_ax[0])\n", - "\n", - "im = axes[0, 1].imshow(t2star[0, 0, ...].cpu(), vmin=0, vmax=5)\n", - "axes[0, 1].set_title('$T_2^*$')\n", - "fig.colorbar(im, cax=colorbar_ax[1], label='s')\n", - "\n", - "im = axes[0, 2].imshow(relative_absolute_error[0, 0, ...].cpu(), vmin=0, vmax=0.1)\n", - "axes[0, 2].set_title('Relative error')\n", - "fig.colorbar(im, cax=colorbar_ax[2])" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "21", - "metadata": {}, - "outputs": [], - "source": [ - "# Clean-up by removing temporary directory\n", - "shutil.rmtree(data_folder)" - ] - } - ], - "metadata": { - "accelerator": "GPU", - "colab": { - "gpuType": "T4", - "provenance": [] - }, - "jupytext": { - "cell_metadata_filter": "-all" - }, - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/examples/notebooks/t1_mapping_with_grad_acq.ipynb b/examples/notebooks/qmri_t1_mapping_with_grad_acq.ipynb similarity index 66% rename from examples/notebooks/t1_mapping_with_grad_acq.ipynb rename to examples/notebooks/qmri_t1_mapping_with_grad_acq.ipynb index 3d98e64f6..1ec1945f2 100644 --- a/examples/notebooks/t1_mapping_with_grad_acq.ipynb +++ b/examples/notebooks/qmri_t1_mapping_with_grad_acq.ipynb @@ -7,7 +7,7 @@ "lines_to_next_cell": 0 }, "source": [ - "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/PTB-MR/mrpro/blob/main/examples/notebooks/t1_mapping_with_grad_acq.ipynb)" + "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/PTB-MR/mrpro/blob/main/examples/notebooks/qmri_t1_mapping_with_grad_acq.ipynb)" ] }, { @@ -30,40 +30,16 @@ { "cell_type": "markdown", "id": "2", - "metadata": {}, + "metadata": { + "lines_to_next_cell": 2 + }, "source": [ "# $T_1$ mapping from a continuous Golden radial acquisition" ] }, - { - "cell_type": "code", - "execution_count": null, - "id": "3", - "metadata": {}, - "outputs": [], - "source": [ - "# Imports\n", - "import shutil\n", - "import tempfile\n", - "from pathlib import Path\n", - "\n", - "import matplotlib.pyplot as plt\n", - "import torch\n", - "import zenodo_get\n", - "from mpl_toolkits.axes_grid1 import make_axes_locatable # type: ignore [import-untyped]\n", - "from mrpro.algorithms.optimizers import adam\n", - "from mrpro.algorithms.reconstruction import DirectReconstruction\n", - "from mrpro.data import KData\n", - "from mrpro.data.traj_calculators import KTrajectoryIsmrmrd\n", - "from mrpro.operators import ConstraintsOp, MagnitudeOp\n", - "from mrpro.operators.functionals import MSE\n", - "from mrpro.operators.models import TransientSteadyStateWithPreparation\n", - "from mrpro.utils import split_idx" - ] - }, { "cell_type": "markdown", - "id": "4", + "id": "3", "metadata": { "lines_to_next_cell": 2 }, @@ -74,14 +50,14 @@ "data can be divided into different dynamic time frames, each corresponding to a different inversion time. A signal\n", "model can then be fitted to this data to obtain a $T_1$ map.\n", "\n", - "More information can be found in:\n", + "More information can be found in:\\\n", "Kerkering KM, Schulz-Menger J, Schaeffter T, Kolbitsch C (2023). Motion-corrected model-based reconstruction for 2D\n", "myocardial $T_1$ mapping. *Magnetic Resonance in Medicine*, 90(3):1086-1100, [10.1002/mrm.29699](https://doi.org/10.1002/mrm.29699)" ] }, { "cell_type": "markdown", - "id": "5", + "id": "4", "metadata": {}, "source": [ "The number of time frames and hence the number of radial lines per time frame, can in principle be chosen arbitrarily.\n", @@ -91,7 +67,7 @@ }, { "cell_type": "markdown", - "id": "6", + "id": "5", "metadata": {}, "source": [ "During data acquisition, the magnetization $M_z(t)$ can be described by the signal model:\n", @@ -103,7 +79,7 @@ }, { "cell_type": "markdown", - "id": "7", + "id": "6", "metadata": {}, "source": [ "where the effective longitudinal relaxation time is given by:\n", @@ -115,7 +91,7 @@ }, { "cell_type": "markdown", - "id": "8", + "id": "7", "metadata": {}, "source": [ "and the steady-state magnetization is\n", @@ -127,7 +103,7 @@ }, { "cell_type": "markdown", - "id": "9", + "id": "8", "metadata": {}, "source": [ "The initial magnetization $M_0^{init}$ after an inversion pulse is $-M_0$. Nevertheless, commonly after an inversion\n", @@ -142,52 +118,71 @@ }, { "cell_type": "markdown", - "id": "10", - "metadata": {}, + "id": "9", + "metadata": { + "lines_to_next_cell": 0 + }, "source": [ "In this example, we are going to:\n", "- Reconstruct a single high quality image using all acquired radial lines.\n", "- Split the data into multiple dynamics and reconstruct these dynamic images\n", - "- Define a signal model and a loss function to obtain the $T_1$ maps" + "- Define a signal model and a loss function to obtain the $T_1$ maps\n" ] }, { "cell_type": "code", "execution_count": null, - "id": "11", + "id": "10", "metadata": { - "lines_to_next_cell": 2 + "lines_to_next_cell": 0, + "tags": [ + "hide-cell" + ] }, "outputs": [], "source": [ "# Download raw data in ISMRMRD format from zenodo into a temporary directory\n", - "data_folder = Path(tempfile.mkdtemp())\n", + "import tempfile\n", + "from pathlib import Path\n", + "\n", + "import zenodo_get\n", + "\n", "dataset = '13207352'\n", + "\n", + "tmp = tempfile.TemporaryDirectory() # RAII, automatically cleaned up\n", + "data_folder = Path(tmp.name)\n", "zenodo_get.zenodo_get([dataset, '-r', 5, '-o', data_folder]) # r: retries" ] }, { "cell_type": "markdown", - "id": "12", - "metadata": {}, + "id": "11", + "metadata": { + "lines_to_next_cell": 0 + }, "source": [ - "## Reconstruct average image\n", - "Reconstruct one image as the average over all radial lines" + "We will use the following libraries:" ] }, { "cell_type": "code", "execution_count": null, - "id": "13", + "id": "12", "metadata": {}, "outputs": [], "source": [ - "# Read raw data and trajectory\n", - "kdata = KData.from_file(data_folder / '2D_GRad_map_t1.h5', KTrajectoryIsmrmrd())\n", - "\n", - "# Perform the reconstruction\n", - "reconstruction = DirectReconstruction(kdata)\n", - "img_average = reconstruction(kdata)" + "import matplotlib.pyplot as plt\n", + "import mrpro\n", + "import torch" + ] + }, + { + "cell_type": "markdown", + "id": "13", + "metadata": {}, + "source": [ + "## Reconstruct average image\n", + "Reconstruct one image as the average over all radial lines" ] }, { @@ -197,10 +192,17 @@ "metadata": {}, "outputs": [], "source": [ + "# Read raw data and trajectory\n", + "kdata = mrpro.data.KData.from_file(data_folder / '2D_GRad_map_t1.h5', mrpro.data.traj_calculators.KTrajectoryIsmrmrd())\n", + "\n", + "# Perform the reconstruction\n", + "reconstruction = mrpro.algorithms.reconstruction.DirectReconstruction(kdata)\n", + "img_average = reconstruction(kdata)\n", + "\n", "# Visualize average image\n", - "plt.figure()\n", - "plt.imshow(img_average.rss()[0, 0, :, :], cmap='gray')\n", - "plt.title('Average image')" + "plt.imshow(img_average.rss()[0, 0], cmap='gray')\n", + "plt.title('Average image')\n", + "plt.show()" ] }, { @@ -221,7 +223,8 @@ "metadata": {}, "outputs": [], "source": [ - "idx_dynamic = split_idx(torch.argsort(kdata.header.acq_info.acquisition_time_stamp[0, 0, :, 0]), 30, 0)\n", + "\n", + "idx_dynamic = mrpro.utils.split_idx(kdata.header.acq_info.acquisition_time_stamp[0, 0, :, 0].argsort(), 30, 0)\n", "kdata_dynamic = kdata.split_k1_into_other(idx_dynamic, other_label='repetition')" ] }, @@ -236,7 +239,7 @@ "source": [ "# Perform the reconstruction\n", "# Here we use the same coil sensitivity map for all dynamics\n", - "reconstruction_dynamic = DirectReconstruction(kdata_dynamic, csm=reconstruction.csm)\n", + "reconstruction_dynamic = mrpro.algorithms.reconstruction.DirectReconstruction(kdata_dynamic, csm=reconstruction.csm)\n", "img_dynamic = reconstruction_dynamic(kdata_dynamic)\n", "# Get absolute value of complex image and normalize the images\n", "img_rss_dynamic = img_dynamic.rss()\n", @@ -247,14 +250,17 @@ "cell_type": "code", "execution_count": null, "id": "18", - "metadata": {}, + "metadata": { + "lines_to_next_cell": 0 + }, "outputs": [], "source": [ "# Visualize the first six dynamic images\n", "fig, ax = plt.subplots(2, 3, squeeze=False)\n", "for idx, cax in enumerate(ax.flatten()):\n", " cax.imshow(img_rss_dynamic[idx, 0, :, :], cmap='gray', vmin=0, vmax=0.8)\n", - " cax.set_title(f'Dynamic {idx}')" + " cax.set_title(f'Dynamic {idx}')\n", + "plt.show()" ] }, { @@ -273,13 +279,15 @@ "### Signal model\n", "We use a three parameter signal model $q(M_0, T_1, \\alpha)$.\n", "\n", - "As known input, the model needs information about the time $t$ (`sampling_time`) in Eq. (1) since the inversion pulse.\n", + "The model needs information about the time $t$, `sampling_time`, in Eq. (1) since the inversion pulse.\n", "This can be calculated from the `acquisition_time_stamp`. If we average the `acquisition_time_stamp`-values for each\n", "dynamic image and subtract the first `acquisition_time_stamp`, we get the mean time since the inversion pulse for each\n", "dynamic. Note: The time taken by the spoiler gradient is taken into consideration in the\n", - "`TransientSteadyStateWithPreparation`-model and does not have to be added here. Another important thing to note is\n", - "that the `acquisition_time_stamp` is not given in time units but in vendor-specific time stamp units. For the Siemens\n", - "data used here, one time stamp corresponds to 2.5 ms." + "`~mrpro.operators.models.TransientSteadyStateWithPreparation`-model and does not have to be added here.\n", + "```{note}\n", + "The acquisition_time_stamp is not given in time units but in vendor-specific time stamp units. For the Siemens\n", + "data used here, one time stamp corresponds to 2.5 ms.\n", + "```" ] }, { @@ -289,11 +297,11 @@ "metadata": {}, "outputs": [], "source": [ - "sampling_time = torch.mean(kdata_dynamic.header.acq_info.acquisition_time_stamp[:, 0, :, 0].to(torch.float32), dim=-1)\n", - "# Subtract time stamp of first radial line\n", - "sampling_time -= kdata_dynamic.header.acq_info.acquisition_time_stamp[0, 0, 0, 0]\n", - "# Convert to seconds\n", - "sampling_time *= 2.5 / 1000" + "sampling_time = kdata_dynamic.header.acq_info.acquisition_time_stamp.squeeze()\n", + "# Subtract time stamp of first radial line and convert to seconds\n", + "sampling_time = (sampling_time - sampling_time[0, 0]) * 2.5e-3\n", + "# Average over radial lines of each dynamic\n", + "sampling_time = sampling_time.mean(-1)" ] }, { @@ -304,8 +312,9 @@ "We also need the repetition time between two RF-pulses. There is a parameter `tr` in the header, but this describes\n", "the time \"between the beginning of a pulse sequence and the beginning of the succeeding (essentially identical) pulse\n", "sequence\" (see [DICOM Standard Browser](https://dicom.innolitics.com/ciods/mr-image/mr-image/00180080)). We have one\n", - "inversion pulse at the beginning, which is never repeated and hence `tr` is the duration of the entire scan.\n", - "Therefore, we have to use the parameter `echo_spacing`, which describes the time between two gradient echoes." + "inversion pulse at the beginning, which is never repeated and hence ``tr`` is the duration of the entire scan.\n", + "Therefore, we have to use the parameter `~mrpro.data.KHeader.echo_spacing`, which describes the time between\n", + "two gradient echoes." ] }, { @@ -337,7 +346,7 @@ "metadata": {}, "outputs": [], "source": [ - "model_op = TransientSteadyStateWithPreparation(\n", + "model_op = mrpro.operators.models.TransientSteadyStateWithPreparation(\n", " sampling_time, repetition_time, m0_scaling_preparation=-1, delay_after_preparation=0.02\n", ")" ] @@ -349,7 +358,7 @@ "source": [ "The reconstructed image data is complex-valued. We could fit a complex $M_0$ to the data, but in this case it is more\n", "robust to fit $|q(M_0, T_1, \\alpha)|$ to the magnitude of the image data. We therefore combine our model with a\n", - "`MagnitudeOp`." + "`~mrpro.operators.MagnitudeOp`." ] }, { @@ -359,19 +368,23 @@ "metadata": {}, "outputs": [], "source": [ - "magnitude_model_op = MagnitudeOp() @ model_op" + "magnitude_model_op = mrpro.operators.MagnitudeOp() @ model_op" ] }, { "cell_type": "markdown", "id": "28", - "metadata": {}, + "metadata": { + "lines_to_next_cell": 0 + }, "source": [ "### Constraints\n", "$T_1$ and $\\alpha$ need to be positive. Based on the knowledge of the phantom, we can constrain $T_1$ between 50 ms\n", "and 3 s. Further, we can constrain $\\alpha$. Although the effective flip angle can vary, it can only vary by a\n", "certain percentage relative to the nominal flip angle. Here, we chose a maximum deviation from the nominal flip angle\n", - "of 50%." + "of 50%.\n", + "We use a `~mrpro.operators.ConstraintsOp` to define these constraints. It maps unconstrained parameters to constrained\n", + "parameters, such that the optimizer can work with unconstrained parameters" ] }, { @@ -383,10 +396,16 @@ "source": [ "if kdata_dynamic.header.fa is None:\n", " raise ValueError('Nominal flip angle needs to be defined.')\n", - "else:\n", - " nominal_flip_angle = float(kdata_dynamic.header.fa[0])\n", "\n", - "constraints_op = ConstraintsOp(bounds=((None, None), (0.05, 3.0), (nominal_flip_angle * 0.5, nominal_flip_angle * 1.5)))" + "nominal_flip_angle = float(kdata_dynamic.header.fa[0])\n", + "\n", + "constraints_op = mrpro.operators.ConstraintsOp(\n", + " bounds=(\n", + " (None, None), # M0 is not constrained\n", + " (0.05, 3.0), # T1 is constrained between 50 ms and 3 s\n", + " (nominal_flip_angle * 0.5, nominal_flip_angle * 1.5), # alpha is constrained\n", + " )\n", + ")" ] }, { @@ -408,7 +427,7 @@ "metadata": {}, "outputs": [], "source": [ - "mse_loss = MSE(img_rss_dynamic)" + "mse_loss = mrpro.operators.functionals.MSE(img_rss_dynamic)" ] }, { @@ -438,9 +457,15 @@ { "cell_type": "markdown", "id": "34", - "metadata": {}, + "metadata": { + "lines_to_next_cell": 0 + }, "source": [ - "### Carry out fit" + "### Carry out fit\n", + "We use an LBFGS optimizer to minimize the loss function. We start with the following initial values:\n", + "- The intensity at shortest echo time as a good approximation for the equilibrium magnetization $M_0$,\n", + "- 1 s for $T_1$, and\n", + "- nominal flip angle for the actual flip angle." ] }, { @@ -448,80 +473,136 @@ "execution_count": null, "id": "35", "metadata": { - "lines_to_next_cell": 2 + "lines_to_next_cell": 0 }, "outputs": [], "source": [ - "# The shortest echo time is a good approximation for the equilibrium magnetization\n", - "m0_start = img_rss_dynamic[0, ...]\n", - "# 1 s a good starting value for T1\n", - "t1_start = torch.ones(m0_start.shape, dtype=torch.float32)\n", - "# and the nominal flip angle a good starting value for the actual flip angle\n", - "flip_angle_start = torch.ones(m0_start.shape, dtype=torch.float32) * kdata_dynamic.header.fa" + "m0_start = img_rss_dynamic[0]\n", + "t1_start = torch.ones_like(m0_start)\n", + "flip_angle_start = torch.ones_like(m0_start) * kdata_dynamic.header.fa" ] }, { - "cell_type": "code", - "execution_count": null, + "cell_type": "markdown", "id": "36", - "metadata": {}, - "outputs": [], + "metadata": { + "lines_to_next_cell": 0 + }, "source": [ - "# Hyperparameters for optimizer\n", - "max_iter = 500\n", - "lr = 1e-2\n", - "\n", - "# Run optimization\n", - "params_result = adam(functional, [m0_start, t1_start, flip_angle_start], max_iter=max_iter, lr=lr)\n", - "params_result = constraints_op(*params_result)\n", - "m0, t1, flip_angle = (p.detach() for p in params_result)" + "If we use a `~mrpro.operators.ConstraintsOp`, the start values must be transformed to the\n", + "unconstrained space before the optimization and back to the original space after the optimization." ] }, { "cell_type": "code", "execution_count": null, "id": "37", - "metadata": {}, + "metadata": { + "lines_to_next_cell": 0 + }, "outputs": [], "source": [ - "# Visualize parametric maps\n", - "fig, axes = plt.subplots(1, 3, figsize=(10, 2), squeeze=False)\n", - "colorbar_ax = [make_axes_locatable(ax).append_axes('right', size='5%', pad=0.05) for ax in axes[0, :]]\n", - "im = axes[0, 0].imshow(m0[0, ...].abs(), cmap='gray')\n", - "axes[0, 0].set_title('$M_0$')\n", - "fig.colorbar(im, cax=colorbar_ax[0])\n", - "im = axes[0, 1].imshow(t1[0, ...], vmin=0, vmax=2)\n", - "axes[0, 1].set_title('$T_1$ (s)')\n", - "fig.colorbar(im, cax=colorbar_ax[1])\n", - "im = axes[0, 2].imshow(flip_angle[0, ...] / torch.pi * 180, vmin=0, vmax=8)\n", - "axes[0, 2].set_title('Flip angle (°)')\n", - "fig.colorbar(im, cax=colorbar_ax[2])" + "initial_parameters = constraints_op.inverse(m0_start, t1_start, flip_angle_start)" ] }, { "cell_type": "markdown", "id": "38", "metadata": { - "lines_to_next_cell": 2 + "lines_to_next_cell": 0 }, "source": [ - "### Next steps\n", - "The quality of the final $T_1$ maps depends on the quality of the individual dynamic images. Using more advanced image\n", - "reconstruction methods, we can improve the image quality and hence the quality of the maps.\n", - "\n", - "Try to exchange `DirectReconstruction` above with `IterativeSENSEReconstruction` and compare the quality of the\n", - "$T_1$ maps for different number of iterations (`n_iterations`)." + "Now we can run the optimizer in the unconstrained space." ] }, { "cell_type": "code", "execution_count": null, "id": "39", - "metadata": {}, + "metadata": { + "lines_to_next_cell": 0 + }, + "outputs": [], + "source": [ + "result = mrpro.algorithms.optimizers.lbfgs(functional, initial_parameters=initial_parameters)" + ] + }, + { + "cell_type": "markdown", + "id": "40", + "metadata": { + "lines_to_next_cell": 0 + }, + "source": [ + "Transforming the parameters back to the original space, we get the final $M_0$, $T_1$, and flip angle:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "41", + "metadata": { + "lines_to_next_cell": 0 + }, "outputs": [], "source": [ - "# Clean-up by removing temporary directory\n", - "shutil.rmtree(data_folder)" + "m0, t1, flip_angle = (p.detach().cpu().squeeze() for p in constraints_op(*result))" + ] + }, + { + "cell_type": "markdown", + "id": "42", + "metadata": { + "lines_to_next_cell": 0 + }, + "source": [ + "## Visualize results\n", + "Finally, we can take a look at the estimated $M_0$, $T_1$, and flip angle maps:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "43", + "metadata": { + "lines_to_next_cell": 0 + }, + "outputs": [], + "source": [ + "# Visualize parametric maps\n", + "fig, axes = plt.subplots(1, 3, figsize=(10, 2), squeeze=False)\n", + "\n", + "im = axes[0, 0].imshow(m0.abs(), cmap='gray')\n", + "axes[0, 0].set_title('$|M_0|$')\n", + "axes[0, 0].set_axis_off()\n", + "fig.colorbar(im, ax=axes[0, 0])\n", + "\n", + "im = axes[0, 1].imshow(t1, vmin=0, vmax=2, cmap='magma')\n", + "axes[0, 1].set_title('$T_1$ (s)')\n", + "axes[0, 1].set_axis_off()\n", + "fig.colorbar(im, ax=axes[0, 1])\n", + "\n", + "im = axes[0, 2].imshow(torch.rad2deg(flip_angle), vmin=0, vmax=8)\n", + "axes[0, 2].set_title('Flip angle (°)')\n", + "axes[0, 2].set_axis_off()\n", + "fig.colorbar(im, ax=axes[0, 2])\n", + "\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "id": "44", + "metadata": {}, + "source": [ + "Great! We have successfully estimated the $T_1$ map from the dynamic images!\n", + "\n", + "### Next steps\n", + "The quality of the final $T_1$ maps depends on the quality of the individual dynamic images. Using more advanced image\n", + "reconstruction methods, we can improve the image quality and hence the quality of the maps.\n", + "Try to exchange `~mrpro.algorithms.reconstruction.DirectReconstruction` above with\n", + "`~mrpro.algorithms.reconstruction.IterativeSENSEReconstruction`\n", + "or try a different optimizer such as `~mrpro.algorithms.optimizers.adam`." ] } ], @@ -532,7 +613,7 @@ "provenance": [] }, "jupytext": { - "cell_metadata_filter": "-all" + "cell_metadata_filter": "tags,-all" }, "kernelspec": { "display_name": "Python 3 (ipykernel)", diff --git a/examples/notebooks/regularized_iterative_sense_reconstruction.ipynb b/examples/notebooks/regularized_iterative_sense_reconstruction.ipynb deleted file mode 100644 index 44853077d..000000000 --- a/examples/notebooks/regularized_iterative_sense_reconstruction.ipynb +++ /dev/null @@ -1,421 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "0", - "metadata": { - "lines_to_next_cell": 0 - }, - "source": [ - "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/PTB-MR/mrpro/blob/main/examples/notebooks/regularized_iterative_sense_reconstruction.ipynb)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "1", - "metadata": { - "tags": [ - "remove-cell" - ] - }, - "outputs": [], - "source": [ - "import importlib\n", - "\n", - "if not importlib.util.find_spec('mrpro'):\n", - " %pip install mrpro[notebook]" - ] - }, - { - "cell_type": "markdown", - "id": "2", - "metadata": { - "lines_to_next_cell": 0 - }, - "source": [ - "# Regularized Iterative SENSE Reconstruction of 2D golden angle radial data\n", - "Here we use the RegularizedIterativeSENSEReconstruction class to reconstruct images from ISMRMRD 2D radial data" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3", - "metadata": { - "lines_to_next_cell": 0 - }, - "outputs": [], - "source": [ - "# define zenodo URL of the example ismrmd data\n", - "zenodo_url = 'https://zenodo.org/records/10854057/files/'\n", - "fname = 'pulseq_radial_2D_402spokes_golden_angle_with_traj.h5'" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "4", - "metadata": {}, - "outputs": [], - "source": [ - "# Download raw data\n", - "import tempfile\n", - "\n", - "import requests\n", - "\n", - "data_file = tempfile.NamedTemporaryFile(mode='wb', delete=False, suffix='.h5')\n", - "response = requests.get(zenodo_url + fname, timeout=30)\n", - "data_file.write(response.content)\n", - "data_file.flush()" - ] - }, - { - "cell_type": "markdown", - "id": "5", - "metadata": {}, - "source": [ - "### Image reconstruction\n", - "We use the RegularizedIterativeSENSEReconstruction class to reconstruct images from 2D radial data.\n", - "RegularizedIterativeSENSEReconstruction solves the following reconstruction problem:\n", - "\n", - "Let's assume we have obtained the k-space data $y$ from an image $x$ with an acquisition model (Fourier transforms,\n", - "coil sensitivity maps...) $A$ then we can formulate the forward problem as:\n", - "\n", - "$ y = Ax + n $\n", - "\n", - "where $n$ describes complex Gaussian noise. The image $x$ can be obtained by minimizing the functionl $F$\n", - "\n", - "$ F(x) = ||W^{\\frac{1}{2}}(Ax - y)||_2^2 $\n", - "\n", - "where $W^\\frac{1}{2}$ is the square root of the density compensation function (which corresponds to a diagonal\n", - "operator). Because this is an ill-posed problem, we can add a regularization term to stabilize the problem and obtain\n", - "a solution with certain properties:\n", - "\n", - "$ F(x) = ||W^{\\frac{1}{2}}(Ax - y)||_2^2 + l||Bx - x_{reg}||_2^2$\n", - "\n", - "where $l$ is the strength of the regularization, $B$ is a linear operator and $x_{reg}$ is a regularization image.\n", - "With this functional $F$ we obtain a solution which is close to $x_{reg}$ and to the acquired data $y$.\n", - "\n", - "Setting the derivative of the functional $F$ to zero and rearranging yields\n", - "\n", - "$ (A^H W A + l B) x = A^H W y + l x_{reg}$\n", - "\n", - "which is a linear system $Hx = b$ that needs to be solved for $x$.\n", - "\n", - "One important question of course is, what to use for $x_{reg}$. For dynamic images (e.g. cine MRI) low-resolution\n", - "dynamic images or high-quality static images have been proposed. In recent years, also the output of neural-networks\n", - "has been used as an image regulariser.\n", - "\n", - "In this example we are going to use a high-quality image to regularize the reconstruction of an undersampled image.\n", - "Both images are obtained from the same data acquisition (one using all the acquired data ($x_{reg}$) and one using\n", - "only parts of it ($x$)). This of course is an unrealistic case but it will allow us to study the effect of the\n", - "regularization." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "6", - "metadata": {}, - "outputs": [], - "source": [ - "import mrpro" - ] - }, - { - "cell_type": "markdown", - "id": "7", - "metadata": { - "lines_to_next_cell": 0 - }, - "source": [ - "##### Read-in the raw data" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "8", - "metadata": {}, - "outputs": [], - "source": [ - "from mrpro.data import KData\n", - "from mrpro.data.traj_calculators import KTrajectoryIsmrmrd\n", - "\n", - "# Load in the Data and the trajectory from the ISMRMRD file\n", - "kdata = KData.from_file(data_file.name, KTrajectoryIsmrmrd())\n", - "kdata.header.recon_matrix.x = 256\n", - "kdata.header.recon_matrix.y = 256" - ] - }, - { - "cell_type": "markdown", - "id": "9", - "metadata": {}, - "source": [ - "##### Image $x_{reg}$ from fully sampled data" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "10", - "metadata": {}, - "outputs": [], - "source": [ - "from mrpro.algorithms.reconstruction import DirectReconstruction, IterativeSENSEReconstruction\n", - "from mrpro.data import CsmData\n", - "\n", - "# Estimate coil maps\n", - "direct_reconstruction = DirectReconstruction(kdata, csm=None)\n", - "img_coilwise = direct_reconstruction(kdata)\n", - "csm = CsmData.from_idata_walsh(img_coilwise)\n", - "\n", - "# Iterative SENSE reconstruction\n", - "iterative_sense_reconstruction = IterativeSENSEReconstruction(kdata, csm=csm, n_iterations=3)\n", - "img_iterative_sense = iterative_sense_reconstruction(kdata)" - ] - }, - { - "cell_type": "markdown", - "id": "11", - "metadata": {}, - "source": [ - "##### Image $x$ from undersampled data" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "12", - "metadata": {}, - "outputs": [], - "source": [ - "import torch\n", - "\n", - "# Data undersampling, i.e. take only the first 20 radial lines\n", - "idx_us = torch.arange(0, 20)[None, :]\n", - "kdata_us = kdata.split_k1_into_other(idx_us, other_label='repetition')" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "13", - "metadata": {}, - "outputs": [], - "source": [ - "# Iterativ SENSE reconstruction\n", - "iterative_sense_reconstruction = IterativeSENSEReconstruction(kdata_us, csm=csm, n_iterations=6)\n", - "img_us_iterative_sense = iterative_sense_reconstruction(kdata_us)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "14", - "metadata": {}, - "outputs": [], - "source": [ - "# Regularized iterativ SENSE reconstruction\n", - "from mrpro.algorithms.reconstruction import RegularizedIterativeSENSEReconstruction\n", - "\n", - "regularization_weight = 1.0\n", - "n_iterations = 6\n", - "regularized_iterative_sense_reconstruction = RegularizedIterativeSENSEReconstruction(\n", - " kdata_us,\n", - " csm=csm,\n", - " n_iterations=n_iterations,\n", - " regularization_data=img_iterative_sense.data,\n", - " regularization_weight=regularization_weight,\n", - ")\n", - "img_us_regularized_iterative_sense = regularized_iterative_sense_reconstruction(kdata_us)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "15", - "metadata": { - "lines_to_next_cell": 2 - }, - "outputs": [], - "source": [ - "import matplotlib.pyplot as plt\n", - "\n", - "vis_im = [img_iterative_sense.rss(), img_us_iterative_sense.rss(), img_us_regularized_iterative_sense.rss()]\n", - "vis_title = ['Fully sampled', 'Iterative SENSE R=20', 'Regularized Iterative SENSE R=20']\n", - "fig, ax = plt.subplots(1, 3, squeeze=False, figsize=(12, 4))\n", - "for ind in range(3):\n", - " ax[0, ind].imshow(vis_im[ind][0, 0, ...])\n", - " ax[0, ind].set_title(vis_title[ind])" - ] - }, - { - "cell_type": "markdown", - "id": "16", - "metadata": {}, - "source": [ - "### Behind the scenes" - ] - }, - { - "cell_type": "markdown", - "id": "17", - "metadata": { - "lines_to_next_cell": 0 - }, - "source": [ - "##### Set-up the density compensation operator $W$ and acquisition model $A$\n", - "\n", - "This is very similar to the iterative SENSE reconstruction. For more detail please look at the\n", - "iterative_sense_reconstruction notebook." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "18", - "metadata": {}, - "outputs": [], - "source": [ - "dcf_operator = mrpro.data.DcfData.from_traj_voronoi(kdata_us.traj).as_operator()\n", - "fourier_operator = mrpro.operators.FourierOp.from_kdata(kdata_us)\n", - "csm_operator = csm.as_operator()\n", - "acquisition_operator = fourier_operator @ csm_operator" - ] - }, - { - "cell_type": "markdown", - "id": "19", - "metadata": {}, - "source": [ - "##### Calculate the right-hand-side of the linear system $b = A^H W y + l x_{reg}$" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "20", - "metadata": { - "lines_to_next_cell": 2 - }, - "outputs": [], - "source": [ - "right_hand_side = (\n", - " acquisition_operator.H(dcf_operator(kdata_us.data)[0])[0] + regularization_weight * img_iterative_sense.data\n", - ")" - ] - }, - { - "cell_type": "markdown", - "id": "21", - "metadata": {}, - "source": [ - "##### Set-up the linear self-adjoint operator $H = A^H W A + l$" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "22", - "metadata": {}, - "outputs": [], - "source": [ - "from mrpro.operators import IdentityOp\n", - "\n", - "operator = acquisition_operator.H @ dcf_operator @ acquisition_operator + IdentityOp() * torch.as_tensor(\n", - " regularization_weight\n", - ")" - ] - }, - { - "cell_type": "markdown", - "id": "23", - "metadata": {}, - "source": [ - "##### Run conjugate gradient" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "24", - "metadata": {}, - "outputs": [], - "source": [ - "img_manual = mrpro.algorithms.optimizers.cg(\n", - " operator, right_hand_side, initial_value=right_hand_side, max_iterations=n_iterations, tolerance=0.0\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "25", - "metadata": {}, - "outputs": [], - "source": [ - "# Display the reconstructed image\n", - "vis_im = [img_us_regularized_iterative_sense.rss(), img_manual.abs()[:, 0, ...]]\n", - "vis_title = ['Regularized Iterative SENSE R=20', '\"Manual\" Regularized Iterative SENSE R=20']\n", - "fig, ax = plt.subplots(1, 2, squeeze=False, figsize=(8, 4))\n", - "for ind in range(2):\n", - " ax[0, ind].imshow(vis_im[ind][0, 0, ...])\n", - " ax[0, ind].set_title(vis_title[ind])" - ] - }, - { - "cell_type": "markdown", - "id": "26", - "metadata": {}, - "source": [ - "### Check for equal results\n", - "The two versions should result in the same image data." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "27", - "metadata": {}, - "outputs": [], - "source": [ - "# If the assert statement did not raise an exception, the results are equal.\n", - "assert torch.allclose(img_us_regularized_iterative_sense.data, img_manual)" - ] - }, - { - "cell_type": "markdown", - "id": "28", - "metadata": {}, - "source": [ - "### Next steps\n", - "Play around with the regularization_weight to see how it effects the final image quality.\n", - "\n", - "Of course we are cheating here because we used the fully sampled image as a regularization. In real world applications\n", - "we would not have that. One option is to apply a low-pass filter to the undersampled k-space data to try to reduce the\n", - "streaking artifacts and use that as a regularization image. Try that and see if you can also improve the image quality\n", - "compared to the unregularised images." - ] - } - ], - "metadata": { - "accelerator": "GPU", - "colab": { - "gpuType": "T4", - "provenance": [] - }, - "jupytext": { - "cell_metadata_filter": "-all" - }, - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/examples/scripts/cartesian_reconstruction.py b/examples/scripts/cartesian_reconstruction.py index e6069fc9b..050ec95a1 100644 --- a/examples/scripts/cartesian_reconstruction.py +++ b/examples/scripts/cartesian_reconstruction.py @@ -5,90 +5,96 @@ # %% [markdown] # ## Overview -# -# In this notebook, we are going to explore the MRpro KData object and the included header parameters. We will then use -# a FFT-operator in order to reconstruct data acquired with a Cartesian sampling scheme. We will also reconstruct data -# acquired on a Cartesian grid but with partial echo and partial Fourier acceleration. Finally, we will reconstruct a -# Cartesian scan with regular undersampling using iterative SENSE. - -# %% [markdown] -# ## Import MRpro and download data +# In this notebook, we are going to explore the `~mrpro.data.KData` object and the included header parameters. +# We will then use a FFT-operator in order to reconstruct data acquired with a Cartesian sampling scheme. +# We will also reconstruct data acquired on a Cartesian grid but with partial echo and partial Fourier acceleration. +# Finally, we will reconstruct a Cartesian scan with regular undersampling. -# %% +# %% tags=["hide-cell"] # Get the raw data from zenodo import tempfile from pathlib import Path import zenodo_get -data_folder = Path(tempfile.mkdtemp()) dataset = '14173489' -zenodo_get.zenodo_get([dataset, '-r', 5, '-o', data_folder]) # r: retries -# %% -# List the downloaded files -for f in data_folder.iterdir(): - print(f.name) +tmp = tempfile.TemporaryDirectory() # RAII, automatically cleaned up +data_folder = Path(tmp.name) +zenodo_get.zenodo_get([dataset, '-r', 5, '-o', data_folder]) # r: retries # %% [markdown] -# We have three different scans obtained from the same object with the same FOV and resolution: +# We have three different scans obtained from the same object with the same FOV and resolution, saved as ISMRMRD +# raw data files (``*.mrd`` or ``*.h5``): # -# - cart_t1.mrd is a fully sampled Cartesian acquisition +# - ``cart_t1.mrd`` is a fully sampled Cartesian acquisition # -# - cart_t1_msense_integrated.mrd is accelerated using regular undersampling and self-calibrated SENSE +# - ``cart_t1_msense_integrated.mrd`` is accelerated using regular undersampling and self-calibrated SENSE # -# - cart_t1_partial_echo_partial_fourier.mrd is accelerated using partial echo and partial Fourier - +# - ``cart_t1_partial_echo_partial_fourier.mrd`` is accelerated using partial echo and partial Fourier # %% [markdown] # ## Read in raw data and explore header # -# To read in an ISMRMRD raw data file (*.mrd), we can simply pass on the file name to a `KData` object. +# To read in an ISMRMRD file, we can simply pass on the file name to a `~mrpro.data.KData` object. # Additionally, we need to provide information about the trajectory. In MRpro, this is done using trajectory # calculators. These are functions that calculate the trajectory based on the acquisition information and additional # parameters provided to the calculators (e.g. the angular step for a radial acquisition). # # In this case, we have a Cartesian acquisition. This means that we only need to provide a Cartesian trajectory -# calculator (called `KTrajectoryCartesian` in MRpro) without any further parameters. +# calculator `~mrpro.data.traj_calculators.KTrajectoryCartesian` without any further parameters. +# +# See for more information about different ways to +# define the trajectory. # %% -from mrpro.data import KData -from mrpro.data.traj_calculators import KTrajectoryCartesian +import mrpro -kdata = KData.from_file(data_folder / 'cart_t1.mrd', KTrajectoryCartesian()) +kdata = mrpro.data.KData.from_file( + data_folder / 'cart_t1.mrd', + mrpro.data.traj_calculators.KTrajectoryCartesian(), +) # %% [markdown] # Now we can explore this data object. +# Simply printing ``kdata`` gives us a basic overview of the `~mrpro.data.KData` object. -# %% -# Start with simply calling print(kdata), whichs gives us a nice overview of the KData object. +# %% tags=["show-output"] print(kdata) -# %% +# %% [markdown] # We can also have a look at more specific header information like the 1H Lamor frequency -print(kdata.header.lamor_frequency_proton) + +# %% +print('Lamor Frequency:', kdata.header.lamor_frequency_proton) # %% [markdown] # ## Reconstruction of fully sampled acquisition # -# For the reconstruction of a fully sampled Cartesian acquisition, we can use a simple Fast Fourier Transform (FFT). +# For the reconstruction of a fully sampled Cartesian acquisition, we can either use a general +# `~mrpro.operators.FourierOp` or manually set up a Fast Fourier Transform (FFT). +# For demonstration purposes, we first show the manual approach. +# +# ```{note} +# Most of the time, you will use the `~mrpro.operators.FourierOp` operator, which automatically takes care +# of choosing whether to use a FFT or a non-uniform FFT (NUFFT) based on the trajectory. +# It optionally can be created from a `~mrpro.data.KData` object without any further information. +# ``` # -# Let's create an FFT-operator (called `FastFourierOp` in MRpro) and apply it to our `KData` object. Please note that -# all MRpro operators currently only work on PyTorch tensors and not on the MRpro objects directly. Therefore, we have -# to call the operator on kdata.data. One other important feature of MRpro operators is that they always return a -# tuple of PyTorch tensors, even if the output is only a single tensor. This is why we use the `(img,)` syntax below. +# Let's create an FFT-operator `~mrpro.operators.FastFourierOp` and apply it to our `~mrpro.data.KData` object. +# Please note that all MRpro operator work on PyTorch tensors and not on the MRpro objects directly. Therefore, we have +# to call the operator on kdata.data. One other important property of MRpro operators is that they always return a +# tuple of PyTorch tensors, even if the output is only a single tensor. This is why we use the ``(img,)`` syntax below. # %% -from mrpro.operators import FastFourierOp - -fft_op = FastFourierOp(dim=(-2, -1)) +fft_op = mrpro.operators.FastFourierOp(dim=(-2, -1)) (img,) = fft_op.adjoint(kdata.data) # %% [markdown] # Let's have a look at the shape of the obtained tensor. # %% -print(img.shape) +print('Shape:', img.shape) # %% [markdown] # We can see that the second dimension, which is the coil dimension, is 16. This means we still have a coil resolved @@ -96,46 +102,60 @@ # one. Later, we will do something a bit more sophisticated. We can also see that the x-dimension is 512. This is # because in MRI we commonly oversample the readout direction by a factor 2 leading to a FOV twice as large as we # actually need. We can either remove this oversampling along the readout direction or we can simply tell the -# `FastFourierOp` to crop the image by providing the correct output matrix size (recon_matrix). +# `~mrpro.operators.FastFourierOp` to crop the image by providing the correct output matrix size ``recon_matrix``. # %% # Create FFT-operator with correct output matrix size -fft_op = FastFourierOp( +fft_op = mrpro.operators.FastFourierOp( dim=(-2, -1), recon_matrix=kdata.header.recon_matrix, encoding_matrix=kdata.header.encoding_matrix, ) (img,) = fft_op.adjoint(kdata.data) -print(img.shape) +print('Shape:', img.shape) # %% [markdown] # Now, we have an image which is 256 x 256 voxel as we would expect. Let's combine the data from the different receiver # coils using root-sum-of-squares and then display the image. Note that we usually index from behind in MRpro # (i.e. -1 for the last, -4 for the fourth last (coil) dimension) to allow for more than one 'other' dimension. -# %% +# %% tags=["hide-cell"] import matplotlib.pyplot as plt import torch -# Combine data from different coils -img_fully_sampled = torch.sqrt(torch.sum(img**2, dim=-4)).abs().squeeze() -# plot the image -plt.imshow(img_fully_sampled) +def show_images(*images: torch.Tensor, titles: list[str] | None = None) -> None: + """Plot images.""" + n_images = len(images) + _, axes = plt.subplots(1, n_images, squeeze=False, figsize=(n_images * 3, 3)) + for i in range(n_images): + axes[0][i].imshow(images[i], cmap='gray') + axes[0][i].axis('off') + if titles: + axes[0][i].set_title(titles[i]) + plt.show() + +# %% +# Combine data from different coils and show magnitude image +magnitude_fully_sampled = img.abs().square().sum(dim=-4).sqrt().squeeze() +show_images(magnitude_fully_sampled) # %% [markdown] # Great! That was very easy! Let's try to reconstruct the next dataset. # %% [markdown] # ## Reconstruction of acquisition with partial echo and partial Fourier -# %% +# %% tags=["remove-output"] # Read in the data -kdata_pe_pf = KData.from_file(data_folder / 'cart_t1_partial_echo_partial_fourier.mrd', KTrajectoryCartesian()) +kdata_pe_pf = mrpro.data.KData.from_file( + data_folder / 'cart_t1_partial_echo_partial_fourier.mrd', + mrpro.data.traj_calculators.KTrajectoryCartesian(), +) # Create FFT-operator with correct output matrix size -fft_op = FastFourierOp( +fft_op = mrpro.operators.FastFourierOp( dim=(-2, -1), recon_matrix=kdata.header.recon_matrix, encoding_matrix=kdata.header.encoding_matrix, @@ -145,38 +165,38 @@ (img_pe_pf,) = fft_op.adjoint(kdata_pe_pf.data) # Combine data from different coils using root-sum-of-squares -img_pe_pf = torch.sqrt(torch.sum(img_pe_pf**2, dim=-4)).abs().squeeze() +magnitude_pe_pf = img_pe_pf.abs().square().sum(dim=-4).sqrt().squeeze() # Plot both images -fig, ax = plt.subplots(1, 2, squeeze=False) -ax[0, 0].imshow(img_fully_sampled) -ax[0, 1].imshow(img_pe_pf) - - +show_images(magnitude_fully_sampled, magnitude_pe_pf, titles=['fully sampled', 'PF & PE']) # %% [markdown] # Well, we got an image, but when we compare it to the previous result, it seems like the head has shrunk. # Since that's extremely unlikely, there's probably a mistake in our reconstruction. # # Let's step back and check out the trajectories for both scans. - # %% print(kdata.traj) # %% [markdown] -# We see that the trajectory has kz, ky, and kx components. Kx and ky only vary along one dimension. -# This is because MRpro saves the trajectory in the most efficient way. -# To get the full trajectory as a tensor, we can just call as_tensor(). - +# We see that the trajectory has ``kz``, ``ky``, and ``kx`` components. ``kx`` and ``ky`` only vary along one dimension. +# ```{note} +# This is because MRpro saves meta data such as trajectories in an efficient way, where dimensions in which the data +# does not change are often collapsed. The original shape can be obtained by +# [broadcasting](https://numpy.org/doc/stable/user/basics.broadcasting.html). +# ``` +# To get the full trajectory as a tensor, we can also just call `~mrpro.data.KTrajectory.as_tensor()`: # %% # Plot the fully sampled trajectory (in blue) -plt.plot(kdata.traj.as_tensor()[2, 0, 0, :, :].flatten(), kdata.traj.as_tensor()[1, 0, 0, :, :].flatten(), 'ob') +full_kz, full_ky, full_kx = kdata.traj.as_tensor() +plt.plot(full_ky[0, 0].flatten(), full_kx[0, 0].flatten(), 'ob') # Plot the partial echo and partial Fourier trajectory (in red) -plt.plot( - kdata_pe_pf.traj.as_tensor()[2, 0, 0, :, :].flatten(), kdata_pe_pf.traj.as_tensor()[1, 0, 0, :, :].flatten(), '+r' -) +full_kz, full_ky, full_kx = kdata_pe_pf.traj.as_tensor() +plt.plot(full_ky[0, 0].flatten(), full_kx[0, 0].flatten(), '+r') + +plt.show() # %% [markdown] # We see that for the fully sampled acquisition, the k-space is covered symmetrically from -256 to 255 along the @@ -187,201 +207,189 @@ # between encoding and recon matrix needs to be zero-padded symmetrically. # # To take the asymmetric acquisition into account and sort the data correctly into a matrix where we can apply the -# FFT-operator to, we have got the `CartesianSamplingOp` in MRpro. This operator calculates a sorting index based on the -# k-space trajectory and the dimensions of the encoding k-space. +# FFT-operator to, we have got the `~mrpro.operators.CartesianSamplingOp` in MRpro. This operator performs +# sorting based on the k-space trajectory and the dimensions of the encoding k-space. # # Let's try it out! # %% -from mrpro.operators import CartesianSamplingOp - -cart_sampling_op = CartesianSamplingOp(encoding_matrix=kdata_pe_pf.header.encoding_matrix, traj=kdata_pe_pf.traj) +cart_sampling_op = mrpro.operators.CartesianSamplingOp( + encoding_matrix=kdata_pe_pf.header.encoding_matrix, traj=kdata_pe_pf.traj +) # %% [markdown] -# Now, we first apply the CartesianSamplingOp and then call the FFT-operator. +# Now, we first apply the adjoint CartesianSamplingOp and then call the adjoint FFT-operator. # %% (img_pe_pf,) = fft_op.adjoint(cart_sampling_op.adjoint(kdata_pe_pf.data)[0]) -img_pe_pf = torch.sqrt(torch.sum(img_pe_pf**2, dim=-4)).abs().squeeze() +magnitude_pe_pf = img_pe_pf.abs().square().sum(dim=-4).sqrt().squeeze() -fig, ax = plt.subplots(1, 2, squeeze=False) -ax[0, 0].imshow(img_fully_sampled) -ax[0, 1].imshow(img_pe_pf) +show_images(magnitude_fully_sampled, magnitude_pe_pf, titles=['fully sampled', 'PF & PE']) -# %% [markdown] # %% [markdown] # Voila! We've got the same brains, and they're the same size! -# -# But wait a second—something still looks a bit off. In the bottom left corner, it seems like there's a "hole" + +# %% [markdown] +# ## More about operators +# ### The Fourier Operator +# In MRpro, we have a smart `~mrpro.operators.FourierOp` operator, that automatically does the resorting and can +# handle non-cartesian data as well. For cartesian data, it internally does exactly the two steps we just did manually. +# The operator can be also be created from an existing `~mrpro.data.KData` object +# This is the recommended way to transform k-space data. + +# %% + +fourier_op = mrpro.operators.FourierOp.from_kdata(kdata_pe_pf) +# no need for and explicit CartesianSamplingOp anymore! +(img_pe_pf,) = fourier_op.adjoint(kdata_pe_pf.data) +magnitude_pe_pf = img_pe_pf.abs().square().sum(dim=-4).sqrt().squeeze() +show_images(magnitude_fully_sampled, magnitude_pe_pf, titles=['fully sampled', 'PF & PE']) + +# %% [markdown] +# That was easy! +# But wait a second — something still looks a bit off. In the bottom left corner, it seems like there's a "hole" # in the brain. That definitely shouldn't be there. # # The issue is that we combined the data from the different coils using a root-sum-of-squares approach. # While it's simple, it's not the ideal method. Typically, coil sensitivity maps are calculated to combine the data # from different coils. In MRpro, you can do this by calculating coil sensitivity data and then creating a -# `SensitivityOp` to combine the data after image reconstruction. - +# `~mrpro.operators.SensitivityOp` to combine the data after image reconstruction. # %% [markdown] +# ### Sensitivity Operator # We have different options for calculating coil sensitivity maps from the image data of the various coils. # Here, we're going to use the Walsh method. # %% -from mrpro.algorithms.csm import walsh -from mrpro.operators import SensitivityOp - # Calculate coil sensitivity maps -(img_pe_pf,) = fft_op.adjoint(cart_sampling_op.adjoint(kdata_pe_pf.data)[0]) +(img_pe_pf,) = fft_op.adjoint(*cart_sampling_op.adjoint(kdata_pe_pf.data)) # This algorithms is designed to calculate coil sensitivity maps for each other dimension. -csm_data = walsh(img_pe_pf[0, ...], smoothing_width=5)[None, ...] +csm_data = mrpro.algorithms.csm.walsh(img_pe_pf[0, ...], smoothing_width=5)[None, ...] # Create SensitivityOp -csm_op = SensitivityOp(csm_data) +csm_op = mrpro.operators.SensitivityOp(csm_data) # Reconstruct coil-combined image -(img_pe_pf,) = csm_op.adjoint(fft_op.adjoint(cart_sampling_op.adjoint(kdata_pe_pf.data)[0])[0]) -img_pe_pf = img_pe_pf.abs().squeeze() - -fig, ax = plt.subplots(1, 2, squeeze=False) -ax[0, 0].imshow(img_fully_sampled) -ax[0, 1].imshow(img_pe_pf.squeeze()) +(img_walsh_combined,) = csm_op.adjoint(*fourier_op.adjoint(kdata_pe_pf.data)) +magnitude_walsh_combined = img_walsh_combined.abs().squeeze() +show_images(magnitude_pe_pf, magnitude_walsh_combined, titles=['RSS', 'Adaptive Combination']) # %% [markdown] -# Tada! The "hole" is gone, and the image looks much better. +# Tada! The "hole" is gone, and the image looks much better 🎉. # # When we reconstructed the image, we called the adjoint method of several different operators one after the other. That -# was a bit cumbersome. To make our life easier, MRpro allows to combine the operators first and then call the adjoint -# of the composite operator. We have to keep in mind that we have to put them in the order of the forward method of the -# operators. By calling the adjoint, the order will be automatically reversed. +# was a bit cumbersome. To make our life easier, MRpro allows to combine the operators first, get the adjoint +# of the composite operator and then later call this adjoint composite operator. # %% +### Operator Composition # Create composite operator -acq_op = cart_sampling_op @ fft_op @ csm_op -(img_pe_pf,) = acq_op.adjoint(kdata_pe_pf.data) -img_pe_pf = img_pe_pf.abs().squeeze() - -fig, ax = plt.subplots(1, 2, squeeze=False) -ax[0, 0].imshow(img_fully_sampled) -ax[0, 1].imshow(img_pe_pf) +adjoint_operator = (fourier_op @ csm_op).H +(magnitude_pe_pf,) = adjoint_operator(kdata_pe_pf.data) +magnitude_pe_pf = magnitude_pe_pf.abs().squeeze() +show_images(magnitude_pe_pf, titles=['PF & PE']) # %% [markdown] # Although we now have got a nice looking image, it was still a bit cumbersome to create it. We had to define several # different operators and chain them together. Wouldn't it be nice if this could be done automatically? # # That is why we also included some top-level reconstruction algorithms in MRpro. For this whole steps from above, -# we can simply call a `DirectReconstruction`. A `DirectReconstruction` object can be created from only the information -# in the `KData` object. +# we can simply use a `~mrpro.algorithnms.reconstruction.DirectReconstruction`. +# Reconstruction algorithms can be instantiated from only the information in the `~mrpro.data.KData` object. # # In contrast to operators, top-level reconstruction algorithms operate on the data objects of MRpro, i.e. the input is -# a `KData` object and the output is an image data (called `IData` in MRpro) object. To get the tensor content of the -# `IData` object, we can call its `rss` method. +# a `~mrpro.data.KData` object and the output is an `~mrpro.data.IData` object containing +# the reconstructed image data. To get its magnitude, we can call the `~mrpro.data.IData.rss` method. # %% -from mrpro.algorithms.reconstruction import DirectReconstruction # Create DirectReconstruction object from KData object -direct_recon_pe_pf = DirectReconstruction(kdata_pe_pf) +direct_recon_pe_pf = mrpro.algorithms.reconstruction.DirectReconstruction(kdata_pe_pf) # Reconstruct image by calling the DirectReconstruction object idat_pe_pf = direct_recon_pe_pf(kdata_pe_pf) -fig, ax = plt.subplots(1, 2, squeeze=False) -ax[0, 0].imshow(img_fully_sampled) -ax[0, 1].imshow(idat_pe_pf.rss().squeeze()) - # %% [markdown] # This is much simpler — everything happens in the background, so we don't have to worry about it. -# Let's try it on the undersampled dataset now. - +# Let's finally try it on the undersampled dataset now. # %% [markdown] # ## Reconstruction of undersampled data # %% -kdata_us = KData.from_file(data_folder / 'cart_t1_msense_integrated.mrd', KTrajectoryCartesian()) -direct_recon_us = DirectReconstruction(kdata_us) +kdata_us = mrpro.data.KData.from_file( + data_folder / 'cart_t1_msense_integrated.mrd', + mrpro.data.traj_calculators.KTrajectoryCartesian(), +) +direct_recon_us = mrpro.algorithms.reconstruction.DirectReconstruction(kdata_us) idat_us = direct_recon_us(kdata_us) -fig, ax = plt.subplots(1, 2, squeeze=False) -ax[0, 0].imshow(img_fully_sampled) -ax[0, 1].imshow(idat_us.rss().squeeze()) +show_images(idat_pe_pf.rss().squeeze(), idat_us.rss().squeeze(), titles=['PE & PF', 'Undersampled']) # %% [markdown] -# As expected, we can see undersampling artifacts in the image. In order to get rid of them, we can use an iterative -# SENSE algorithm. As you might have guessed, this is also included in MRpro. -# Similarly to the `DirectReconstruction`, we can create an `IterativeSENSEReconstruction` and apply it to the -# undersampled data. +# We used the same data for coil sensitivity calculation as for image reconstruction (*auto-calibration*) +# Another approach is to acquire a few calibration lines in the center of k-space to create a low-resolution, +# fully sampled image. In our example data from Siemens scanners, these lines are part of the dataset. +# As they aren't meant to be used for image reconstruction, only for calibration, i.e., coil sensitivity calculation, +# and are labeled in the data as such, they are ignored by the default `acquisition_filter_criterion` of +# `~mrpro.data.KData.from_file`. +# However, we can change the filter criterion to `is_coil_calibration_acquisition` to read in only these acquisitions. # -# One important thing to keep in mind is that this only works if the coil maps that we use do not have any -# undersampling artifacts. Commonly, we would get them from a fully sampled self-calibration reference lines in the -# center of k-space or a separate coil sensitivity scan. +# ```{note} +# There are already some other filter criteria available, see `mrpro.data.acq_filters`. You can also implement your own +# function returning whether to include an acquisition +# ``` # -# As a first step, we are going to assume that we have got a nice fully sampled reference scan like our partial echo and -# partial Fourier acquisition. We can get the `CsmData`, which is needed for the `IterativeSENSEReconstruction`, from -# the previous reconstruction. - # %% -from mrpro.algorithms.reconstruction import IterativeSENSEReconstruction - -it_sense_recon = IterativeSENSEReconstruction(kdata=kdata_us, csm=direct_recon_pe_pf.csm) -idat_us = it_sense_recon(kdata_us) +kdata_calib_lines = mrpro.data.KData.from_file( + data_folder / 'cart_t1_msense_integrated.mrd', + mrpro.data.traj_calculators.KTrajectoryCartesian(), + acquisition_filter_criterion=mrpro.data.acq_filters.is_coil_calibration_acquisition, +) -fig, ax = plt.subplots(1, 2, squeeze=False) -ax[0, 0].imshow(img_fully_sampled) -ax[0, 1].imshow(idat_us.rss().squeeze()) +direct_recon_calib_lines = mrpro.algorithms.reconstruction.DirectReconstruction(kdata_calib_lines) +idat_calib_lines = direct_recon_calib_lines(kdata_calib_lines) # %% [markdown] -# That worked well, but in practice, we don't want to acquire a fully sampled version of our scan just to -# reconstruct it. A more efficient approach is to get a few self-calibration lines in the center of k-space -# to create a low-resolution, fully sampled image. -# -# In our scan, these lines are part of the dataset, but they aren't used for image reconstruction since -# they're only meant for calibration (i.e., coil sensitivity map calculation). Because they're not labeled -# for imaging, MRpro ignores them by default when reading the data. However, we can set a flag when calling -# `from_file` to read in just those lines for reconstructing the coil sensitivity maps. +# If we look at the reconstructed image, we see it is low resolution.. # %% -from mrpro.data.acq_filters import is_coil_calibration_acquisition - -kdata_calib_lines = KData.from_file( - data_folder / 'cart_t1_msense_integrated.mrd', - KTrajectoryCartesian(), - acquisition_filter_criterion=lambda acq: is_coil_calibration_acquisition(acq), -) - -direct_recon_calib_lines = DirectReconstruction(kdata_calib_lines) -im_calib_lines = direct_recon_calib_lines(kdata_calib_lines) - -plt.imshow(im_calib_lines.rss().squeeze()) +show_images(idat_calib_lines.rss().squeeze(), titles=['Calibration Image']) # %% [markdown] -# Although this only yields a low-resolution image, it is good enough to calculate coil sensitivity maps. +# ..but it is good enough to calculate coil sensitivity maps, which we can use when creating the reconstruction object: # %% -# Visualize coil sensitivity maps of all 16 coils -assert direct_recon_calib_lines.csm is not None # needed for type checking -fig, ax = plt.subplots(4, 4, squeeze=False) -for idx, cax in enumerate(ax.flatten()): - cax.imshow(direct_recon_calib_lines.csm.data[0, idx, 0, ...].abs()) +# The coil sensitivity maps +assert direct_recon_calib_lines.csm is not None +show_images( + *direct_recon_calib_lines.csm.data[0].abs().squeeze(), + titles=[f'|CSM {i}|' for i in range(direct_recon_calib_lines.csm.data.size(-4))], +) +# reusing the CSMs +direct_recon_us_csm = mrpro.algorithms.reconstruction.DirectReconstruction(kdata_us, csm=direct_recon_calib_lines.csm) +idat_us_csm = direct_recon_us_csm(kdata_us) +show_images(idat_us.rss().squeeze(), idat_us_csm.rss().squeeze(), titles=['Autocalibration', 'Calibration Lines']) # %% [markdown] -# Now, we can use these coil sensitivity maps to reconstruct our SENSE scan. +# As expected, we still see undersampling artifacts in the image. In order to get rid of them, +# we try can a more sophisticated reconstruction method, such as the *iterative SENSE algorithm*. +# As you might have guessed, these are also included in MRpro: +# Instead of the `~mrpro.algorithms.reconstruction.DirectReconstruction`, +# we can use `~mrpro.algorithms.reconstruction.IterativeSENSEReconstruction`: # %% -it_sense_recon = IterativeSENSEReconstruction(kdata_us, csm=direct_recon_calib_lines.csm) -idat_us = it_sense_recon(kdata_us) - -fig, ax = plt.subplots(1, 2, squeeze=False) -ax[0, 0].imshow(img_fully_sampled) -ax[0, 1].imshow(idat_us.rss().squeeze()) +sense_recon_us = mrpro.algorithms.reconstruction.IterativeSENSEReconstruction( + kdata_us, + csm=direct_recon_calib_lines.csm, + n_iterations=8, +) +idat_us_sense = sense_recon_us(kdata_us) +show_images(idat_us_sense.rss().squeeze(), titles=['Iterative SENSE']) # %% [markdown] -# %% [markdown] -# The final image is a little worse (nothing beats fully sampled high-resolution scans for coil map -# calculation), but we've managed to get rid of the undersampling artifacts inside the brain. If you want to -# further improve the coil sensitivity map quality, try: -# - using different methods to calculate them, e.g. `mrpro.algorithms.csm.inati` -# - playing around with the parameters of these methods -# - applying a smoothing filter on the images (or ideally directly in k-space) used to calculate the coil -# sensitivity maps +# This looks better! More information about the iterative SENSE reconstruction and its implementation in MRpro +# can be found in the examples and +# . diff --git a/examples/scripts/comparison_trajectory_calculators.py b/examples/scripts/comparison_trajectory_calculators.py new file mode 100644 index 000000000..325a00a09 --- /dev/null +++ b/examples/scripts/comparison_trajectory_calculators.py @@ -0,0 +1,124 @@ +# %% [markdown] +# # Different ways to obtain the Trajectory +# This example builds upon the example and demonstrates three ways +# to obtain the trajectory information required for image reconstruction: +# - using the trajectory that is stored in the ISMRMRD file +# - calculating the trajectory using the radial 2D trajectory calculator +# - calculating the trajectory from the pulseq sequence file using the PyPulseq trajectory calculator + + +# %% tags=["hide-cell"] +# Download raw data from Zenodo +import tempfile +from pathlib import Path + +import mrpro +import torch +import zenodo_get + +dataset = '14617082' + +tmp = tempfile.TemporaryDirectory() # RAII, automatically cleaned up +data_folder = Path(tmp.name) +zenodo_get.zenodo_get([dataset, '-r', 5, '-o', data_folder]) # r: retries + +# %% [markdown] +# ### Using KTrajectoryIsmrmrd - Trajectory saved in ISMRMRD file +# Passing an instance of `~mrpro.data.traj_calculators.KTrajectoryIsmrmrd` to +# when loading the data tells the `~mrpro.data.KData` object to use the trajectory +# that is stored in the ISMRMRD file. +# ```{note} +# Often the trajectory information has not been stored in the ISMRMRD file, +# in which case loading the trajectory this way will raise an error. +# ``` + +# %% +# Read the raw data and the trajectory from ISMRMRD file +kdata = mrpro.data.KData.from_file( + data_folder / 'radial2D_402spokes_golden_angle_with_traj.h5', + mrpro.data.traj_calculators.KTrajectoryIsmrmrd(), +) + +# Reconstruct image +reconstruction = mrpro.algorithms.reconstruction.DirectReconstruction(kdata) +img_using_ismrmrd_traj = reconstruction(kdata) + +# %% [markdown] +# ### Using KTrajectoryRadial2D - Specific trajectory calculator +# For some common trajectories, we provide specific trajectory calculators. +# These calculators often require only a few parameters to be specified, +# such as the angle between spokes in the radial trajectory. Other parameters +# will be taken from the ISMRMRD file. +# This will calculate the trajectory using the radial 2D trajectory calculator. +# ```{note} +# You can also implement your own trajectory calculator by subclassing +# `~mrpro.data.traj_calculators.KTrajectoryCalculator`. +# ``` + +# %% +# Read raw data and calculate trajectory using KTrajectoryRadial2D +golden_angle = torch.pi * 0.618034 +kdata = mrpro.data.KData.from_file( + data_folder / 'radial2D_402spokes_golden_angle_with_traj.h5', + mrpro.data.traj_calculators.KTrajectoryRadial2D(golden_angle), +) + +# Reconstruct image +reconstruction = mrpro.algorithms.reconstruction.DirectReconstruction(kdata) +img_using_rad2d_traj = reconstruction(kdata) + +# %% [markdown] +# ### Using KTrajectoryPulseq - Trajectory from pulseq sequence file +# This will calculate the trajectory from the pulseq sequence file +# using the PyPulseq trajectory calculator. This method +# requires the pulseq sequence file that was used to acquire the data. +# The path to the sequence file is provided as an argument to KTrajectoryPulseq. + +# %% +# Read raw data and calculate trajectory using KTrajectoryPulseq +seq_path = data_folder / 'radial2D_402spokes_golden_angle.seq' +kdata = mrpro.data.KData.from_file( + data_folder / 'radial2D_402spokes_golden_angle_with_traj.h5', + mrpro.data.traj_calculators.KTrajectoryPulseq(seq_path), +) + +# Reconstruct image +reconstruction = mrpro.algorithms.reconstruction.DirectReconstruction(kdata) +img_using_pulseq_traj = reconstruction(kdata) + +# %% [markdown] +# ### Plot the different reconstructed images +# All three images are reconstructed using the same raw data and should look almost identical. +# %% tags=["hide-cell"] +import matplotlib.pyplot as plt +import torch + + +def show_images(*images: torch.Tensor, titles: list[str] | None = None) -> None: + """Plot images.""" + n_images = len(images) + _, axes = plt.subplots(1, n_images, squeeze=False, figsize=(n_images * 3, 3)) + for i in range(n_images): + axes[0][i].imshow(images[i], cmap='gray') + axes[0][i].axis('off') + if titles: + axes[0][i].set_title(titles[i]) + plt.show() + + +# %% +show_images( + img_using_ismrmrd_traj.rss()[0, 0], + img_using_rad2d_traj.rss()[0, 0], + img_using_pulseq_traj.rss()[0, 0], + titles=['KTrajectoryIsmrmrd', 'KTrajectoryRadial2D', 'KTrajectoryPulseq'], +) + +# %% [markdown] +# Tada! We have successfully reconstructed images using three different trajectory calculators. +# ```{note} +# Which of these three methods is the best depends on the specific use case: +# If a trajectory is already stored in the ISMRMRD file, it is the most convenient to use. +# If a pulseq sequence file is available, the trajectory can be calculated using the PyPulseq trajectory calculator. +# Otherwise, a trajectory calculator needs to be implemented for the specific trajectory used. +# ``` diff --git a/examples/scripts/direct_reconstruction.py b/examples/scripts/direct_reconstruction.py index 7672aa7e7..853f01ac7 100644 --- a/examples/scripts/direct_reconstruction.py +++ b/examples/scripts/direct_reconstruction.py @@ -1,98 +1,175 @@ # %% [markdown] # # Direct Reconstruction of 2D golden angle radial data -# Here we use the DirectReconstruction class to reconstruct images from ISMRMRD 2D radial data -# %% -# define zenodo URL of the example ismrmd data -zenodo_url = 'https://zenodo.org/records/10854057/files/' -fname = 'pulseq_radial_2D_402spokes_golden_angle_with_traj.h5' -# %% -# Download raw data +# Here we use the `~mrpro.algorithms.reconstruction.DirectReconstruction` class to perform a basic reconstruction of +# 2D radial data. +# A *direct* reconstruction uses the density compensated adjoint of the acquisition operator to obtain the images. + +# %% [markdown] +# ## Using `~mrpro.algorithms.reconstruction.DirectReconstruction` +# We use the `~mrpro.algorithms.reconstruction.DirectReconstruction` class to reconstruct images from 2D radial data. +# `~mrpro.algorithms.reconstruction.DirectReconstruction` estimates sensitivity maps, density compensation factors, etc. +# and performs an adjoint Fourier transform. +# This the simplest reconstruction method in our high-level interface to the reconstruction pipeline. + +# %% [markdown] +# ### Load the data +# We load in the Data from the ISMRMRD file. We want use the trajectory that is stored also stored the ISMRMRD file. +# This can be done by passing a `~mrpro.data.traj_calculators.KTrajectoryIsmrmrd` object to +# `~mrpro.data.KData.from_file` when loading creating the `~mrpro.data.KData`. + +# %% tags=["hide-cell"] +# Download raw data from Zenodo import tempfile +from pathlib import Path -import requests +import zenodo_get -data_file = tempfile.NamedTemporaryFile(mode='wb', delete=False, suffix='.h5') -response = requests.get(zenodo_url + fname, timeout=30) -data_file.write(response.content) -data_file.flush() +dataset = '14617082' + +tmp = tempfile.TemporaryDirectory() # RAII, automatically cleaned up +data_folder = Path(tmp.name) +zenodo_get.zenodo_get([dataset, '-r', 5, '-o', data_folder]) # r: retries -# %% [markdown] -# ### Image reconstruction -# We use the DirectReconstruction class to reconstruct images from 2D radial data. -# DirectReconstruction estimates CSMs, DCFs and performs an adjoint Fourier transform. -# This is a high-level interface to the reconstruction pipeline. # %% import mrpro +import torch + +trajectory_calculator = mrpro.data.traj_calculators.KTrajectoryIsmrmrd() +kdata = mrpro.data.KData.from_file(data_folder / 'radial2D_402spokes_golden_angle_with_traj.h5', trajectory_calculator) + +# %% [markdown] +### Setup the DirectReconstruction instance +# We create a `~mrpro.algorithms.reconstruction.DirectReconstruction` and supply ``kdata``. +# `~mrpro.algorithms.reconstruction.DirectReconstruction` uses the information in ``kdata`` to +# setup a Fourier transfrm, density compensation factors, and estimate coil sensitivity maps. +# (See the *Behind the scenes* section for more details.) +# +# ```{note} +# You can also directly set the Fourier operator, coil sensitivity maps, density compensation factors, etc. +# of the reconstruction instance. +# ``` -# Use the trajectory that is stored in the ISMRMRD file -trajectory = mrpro.data.traj_calculators.KTrajectoryIsmrmrd() -# Load in the Data from the ISMRMRD file -kdata = mrpro.data.KData.from_file(data_file.name, trajectory) -# Perform the reconstruction +# %% reconstruction = mrpro.algorithms.reconstruction.DirectReconstruction(kdata) -# Use this to run on gpu: kdata = kdata.cuda() + +# %% [markdown] +### Perform the reconstruction +# The reconstruction is performed by calling the passing the k-space data. +# ```{note} +# Often, the data used to obtain the meta data for constructing the reconstruction instance +# is the same as the data passed to the reconstruction. +# But you can also different to create the coil sensitivity maps, dcf, etc. +# than the data that is passed to the reconstruction. +# ``` + +# %% img = reconstruction(kdata) + +# %% [markdown] +# ### Display the reconstructed image +# We now got in `~mrpro.data.IData` object containing a header and the image tensor. +# We display the reconstructed image using matplotlib. + # %% import matplotlib.pyplot as plt -# Display the reconstructed image # If there are multiple slices, ..., only the first one is selected -first_img = img.rss().cpu()[0, 0, :, :] # images, z, y, x -plt.matshow(first_img, cmap='gray') +first_img = img.rss()[0, 0] # images, z, y, x +plt.imshow(first_img, cmap='gray') +plt.axis('off') +plt.show() + # %% [markdown] -# ### Behind the scenes -# These steps are done in a direct reconstruction: +# ## Behind the scenes +# We now peek behind the scenes to see what happens in the `~mrpro.algorithms.reconstruction.DirectReconstruction` +# class, and perform all steps manually: +# - Calculate density compensation factors +# - Setup Fourier operator +# - Obtain coil-wise images +# - Calculate coil sensitivity maps +# - Perform direct reconstruction + +# ### Calculate density compensation using the trajectory +# We use a Voronoi tessellation of the trajectory to calculate the `~mrpro.data.DcfData` and obtain +# a `~mrpro.operators.DensityCompensationOp` operator. + # %% -# Calculate dcf using the trajectory dcf_operator = mrpro.data.DcfData.from_traj_voronoi(kdata.traj).as_operator() -# Define Fourier operator using the trajectory -# and header information in kdata +# %% [markdown] +# ### Setup Fourier Operator +# Next, we create the Fourier operator. We can just pass the ``kdata`` object to the constructor of the +# `~mrpro.operators.FourierOp`, and the trajectory and header information is used to create the operator. We want the +# to use the adjoint density compensated Fourier operator, so we perform a composition with ``dcf_operator`` +# and use the `~mrpro.operators.FourierOp.H` property of the operator to obtain its adjoint. + +# %% fourier_operator = dcf_operator @ mrpro.operators.FourierOp.from_kdata(kdata) adjoint_operator = fourier_operator.H -# Calculate coil maps -# Note that operators return a tuple of tensors, so we need to unpack it, -# even though there is only one tensor returned from adjoint operator. +# %% [markdown] +# ### Calculate coil sensitivity maps +# Coil sensitivity maps are calculated using the walsh method (See `~mrpro.data.CsmData` for other available methods). +# We first need to calculate the coil-wise images, which are then used to calculate the coil sensitivity maps. + +# %% img_coilwise = mrpro.data.IData.from_tensor_and_kheader(*adjoint_operator(kdata.data), kdata.header) csm_operator = mrpro.data.CsmData.from_idata_walsh(img_coilwise).as_operator() -# Perform Direct Reconstruction -forward_operator = fourier_operator @ csm_operator -adjoint_operator = forward_operator.H -img_manual = mrpro.data.IData.from_tensor_and_kheader(*adjoint_operator(kdata.data), kdata.header) # %% [markdown] -# ### Further behind the scenes -# ... these steps are equivalent to: +# ### Perform Direct Reconstruction +# Finally, the direct reconstruction is performed and an `~mrpro.data.IData` object with the reconstructed +# image is returned. We update the ``adjoint_operator`` to also include the coil sensitivity maps, thus +# performing the coil combination. + # %% -# Define Fourier operator +adjoint_operator = (fourier_operator @ csm_operator).H +img_manual = mrpro.data.IData.from_tensor_and_kheader(*adjoint_operator(kdata.data), kdata.header) + +# %% [markdown] +# ## Further behind the scenes +# There is also a even more manual way to perform the direct reconstruction. We can set up the Fourier operator by +# passing the trajectory and matrix sizes. + fourier_operator = mrpro.operators.FourierOp( recon_matrix=kdata.header.recon_matrix, encoding_matrix=kdata.header.encoding_matrix, traj=kdata.traj, ) +# %% [markdown] +# We can call one of the algorithms in `mrpro.algorithms.dcf` to calculate the density compensation factors. -# Calculate dcf using the trajectory -dcf_data = mrpro.data.DcfData.from_traj_voronoi(kdata.traj) +# %% +kykx = torch.stack((kdata.traj.ky[0, 0], kdata.traj.kx[0, 0])) +dcf_tensor = mrpro.algorithms.dcf.dcf_2d3d_voronoi(kykx) -# Perform adjoint Fourier transform -# Note that operators return a tuple of tensors, so we need to unpack it. -(img_tensor_coilwise,) = fourier_operator.adjoint(kdata.data * dcf_data.data.unsqueeze(-4)) -img_coilwise = mrpro.data.IData.from_tensor_and_kheader(img_tensor_coilwise, kdata.header) +# %% [markdown] +# We use these DCFs to weight the k-space data before performing the adjoint Fourier transform. We can also call +# `~mrpro.operators.FourierOp.adjoint` on the Fourier operator instead of obtaining an adjoint operator. +# %% +(img_tensor_coilwise,) = fourier_operator.adjoint(dcf_tensor * kdata.data) -# Calculate and apply coil maps -csm_data = mrpro.data.CsmData.from_idata_walsh(img_coilwise) +# %% [markdown] +# Next, we calculate the coil sensitivity maps by using one of the algorithms in `mrpro.algorithms.csm` and set +# up a `~mrpro.operators.SensitivityOp` operator. +csm_data = mrpro.algorithms.csm.walsh(img_tensor_coilwise[0], smoothing_width=5) csm_operator = mrpro.operators.SensitivityOp(csm_data) -(img_tensor_coilcombined,) = csm_operator.adjoint(img_tensor_coilwise) +# %% [markdown] +# Finally, we perform the coil combination of the coil-wise images and obtain final images. + +# %% +(img_tensor_coilcombined,) = csm_operator.adjoint(img_tensor_coilwise) img_more_manual = mrpro.data.IData.from_tensor_and_kheader(img_tensor_coilcombined, kdata.header) + # %% [markdown] # ### Check for equal results # The 3 versions result should in the same image data. -# %% -import torch +# %% # If the assert statement did not raise an exception, the results are equal. -assert torch.allclose(img.data, img_manual.data) -assert torch.allclose(img.data, img_more_manual.data) +torch.testing.assert_close(img.data, img_manual.data) +torch.testing.assert_close(img.data, img_more_manual.data, atol=1e-4, rtol=1e-4) + +# %% diff --git a/examples/scripts/iterative_sense_reconstruction.py b/examples/scripts/iterative_sense_reconstruction.py deleted file mode 100644 index 6d0bc49a5..000000000 --- a/examples/scripts/iterative_sense_reconstruction.py +++ /dev/null @@ -1,140 +0,0 @@ -# %% [markdown] -# # Iterative SENSE Reconstruction of 2D golden angle radial data -# Here we use the IterativeSENSEReconstruction class to reconstruct images from ISMRMRD 2D radial data -# %% -# define zenodo URL of the example ismrmd data -zenodo_url = 'https://zenodo.org/records/10854057/files/' -fname = 'pulseq_radial_2D_402spokes_golden_angle_with_traj.h5' -# %% -# Download raw data -import tempfile - -import requests - -data_file = tempfile.NamedTemporaryFile(mode='wb', delete=False, suffix='.h5') -response = requests.get(zenodo_url + fname, timeout=30) -data_file.write(response.content) -data_file.flush() - -# %% [markdown] -# ### Image reconstruction -# We use the IterativeSENSEReconstruction class to reconstruct images from 2D radial data. -# IterativeSENSEReconstruction solves the following reconstruction problem: -# -# Let's assume we have obtained the k-space data $y$ from an image $x$ with an acquisition model (Fourier transforms, -# coil sensitivity maps...) $A$ then we can formulate the forward problem as: -# -# $ y = Ax + n $ -# -# where $n$ describes complex Gaussian noise. The image $x$ can be obtained by minimizing the functional $F$ -# -# $ F(x) = ||W^{\frac{1}{2}}(Ax - y)||_2^2 $ -# -# where $W^\frac{1}{2}$ is the square root of the density compensation function (which corresponds to a diagonal -# operator). -# -# Setting the derivative of the functional $F$ to zero and rearranging yields -# -# $ A^H W A x = A^H W y$ -# -# which is a linear system $Hx = b$ that needs to be solved for $x$. -# %% -import mrpro - -# %% [markdown] -# ##### Read-in the raw data - -# %% -# Use the trajectory that is stored in the ISMRMRD file -trajectory = mrpro.data.traj_calculators.KTrajectoryIsmrmrd() -# Load in the Data from the ISMRMRD file -kdata = mrpro.data.KData.from_file(data_file.name, trajectory) -kdata.header.recon_matrix.x = 256 -kdata.header.recon_matrix.y = 256 - -# %% [markdown] -# ##### Direct reconstruction for comparison - -# %% -# For comparison we can carry out a direct reconstruction -direct_reconstruction = mrpro.algorithms.reconstruction.DirectReconstruction(kdata) -img_direct = direct_reconstruction(kdata) - -# %% [markdown] -# ##### Iterative SENSE reconstruction - -# %% -# We can use the direct reconstruction to obtain the coil maps. -iterative_sense_reconstruction = mrpro.algorithms.reconstruction.IterativeSENSEReconstruction( - kdata, csm=direct_reconstruction.csm, n_iterations=4 -) -img = iterative_sense_reconstruction(kdata) - -# %% [markdown] -# ### Behind the scenes - -# %% [markdown] -# ##### Set-up the density compensation operator $W$ - -# %% -# The density compensation operator is calculated based on the k-space locations of the acquired data. -dcf_operator = mrpro.data.DcfData.from_traj_voronoi(kdata.traj).as_operator() - - -# %% [markdown] -# ##### Set-up the acquisition model $A$ - -# %% -# Define Fourier operator using the trajectory and header information in kdata -fourier_operator = mrpro.operators.FourierOp.from_kdata(kdata) - -# Calculate coil maps -# Note that operators return a tuple of tensors, so we need to unpack it, -# even though there is only one tensor returned from adjoint operator. -img_coilwise = mrpro.data.IData.from_tensor_and_kheader(*fourier_operator.H(*dcf_operator(kdata.data)), kdata.header) -csm_operator = mrpro.data.CsmData.from_idata_walsh(img_coilwise).as_operator() - -# Create the acquisition operator A -acquisition_operator = fourier_operator @ csm_operator - -# %% [markdown] -# ##### Calculate the right-hand-side of the linear system $b = A^H W y$ - -# %% -(right_hand_side,) = acquisition_operator.H(dcf_operator(kdata.data)[0]) - - -# %% [markdown] -# ##### Set-up the linear self-adjoint operator $H = A^H W A$ - -# %% -operator = acquisition_operator.H @ dcf_operator @ acquisition_operator - -# %% [markdown] -# ##### Run conjugate gradient - -# %% -img_manual = mrpro.algorithms.optimizers.cg( - operator, right_hand_side, initial_value=right_hand_side, max_iterations=4, tolerance=0.0 -) - -# %% -# Display the reconstructed image -import matplotlib.pyplot as plt -import torch - -fig, ax = plt.subplots(1, 3, squeeze=False) -ax[0, 0].imshow(img_direct.rss()[0, 0, :, :]) -ax[0, 0].set_title('Direct Reconstruction', fontsize=10) -ax[0, 1].imshow(img.rss()[0, 0, :, :]) -ax[0, 1].set_title('Iterative SENSE', fontsize=10) -ax[0, 2].imshow(img_manual.abs()[0, 0, 0, :, :]) -ax[0, 2].set_title('"Manual" Iterative SENSE', fontsize=10) - -# %% [markdown] -# ### Check for equal results -# The two versions result should in the same image data. - -# %% -# If the assert statement did not raise an exception, the results are equal. -assert torch.allclose(img.data, img_manual) diff --git a/examples/scripts/iterative_sense_reconstruction_radial2D.py b/examples/scripts/iterative_sense_reconstruction_radial2D.py new file mode 100644 index 000000000..2616f7f0e --- /dev/null +++ b/examples/scripts/iterative_sense_reconstruction_radial2D.py @@ -0,0 +1,221 @@ +# %% [markdown] +# # Iterative SENSE Reconstruction of 2D golden angle radial data +# Here we use an iterative reconstruction method reconstruct images from ISMRMRD 2D radial data. + +# %% [markdown] +# We use the `~mrpro.algorithms.reconstruction.IterativeSENSEReconstruction` class to reconstruct images by solving +# the following reconstruction problem: +# +# Let's assume we have obtained the k-space data $y$ from an image $x$ with an acquisition model (Fourier transforms, +# coil sensitivity maps...) $A$ then we can formulate the forward problem as: +# +# $ y = Ax + n $ +# +# where $n$ describes complex Gaussian noise. The image $x$ can be obtained by minimizing the functional $F$ +# +# $ F(x) = ||W^{\frac{1}{2}}(Ax - y)||_2^2 $ +# +# where $W^\frac{1}{2}$ is the square root of the density compensation function (which corresponds to a diagonal +# operator) used to weight the loss. +# +# Setting the derivative of the functional $F$ to zero and rearranging yields +# +# $ A^H W A x = A^H W y$ +# +# which is a linear system $Hx = b$ that needs to be solved for $x$. This is done using the conjugate gradient method. + +# %% [markdown] +# ## Using `~mrpro.algorithms.reconstruction.IterativeSENSEReconstruction` +# First, we demonstrate the use of `~mrpro.algorithms.reconstruction.IterativeSENSEReconstruction`, before we +# peek behind the scenes and implement the reconstruction manually. +# +# ## Read-in the raw data +# We read the raw k-space data and the trajectory from the ISMRMRD file +# (see for more information on the trajectory calculation). +# Our example data contains three datasets: +# - `radial2D_402spokes_golden_angle_with_traj.h5` with 402 spokes +# - `radial2D_96spokes_golden_angle_with_traj.h5` with 96 spokes +# - `radial2D_24spokes_golden_angle_with_traj.h5` with 24 spokes +# +# We use the 402 spokes dataset for the reconstruction. + +# %% tags=["hide-cell"] +# ### Download raw data from Zenodo +import tempfile +from pathlib import Path + +import zenodo_get + +dataset = '14617082' + +tmp = tempfile.TemporaryDirectory() # RAII, automatically cleaned up +data_folder = Path(tmp.name) +zenodo_get.zenodo_get([dataset, '-r', 5, '-o', data_folder]) # r: retries + +# %% +import mrpro + +trajectory_calculator = mrpro.data.traj_calculators.KTrajectoryIsmrmrd() +kdata = mrpro.data.KData.from_file(data_folder / 'radial2D_402spokes_golden_angle_with_traj.h5', trajectory_calculator) + +# %% [markdown] +# ## Direct reconstruction for comparison +# For comparison, we first can carry out a direct reconstruction using the +# `~mrpro.algorithms.reconstruction.DirectReconstruction` class. +# See also . + +# %% +direct_reconstruction = mrpro.algorithms.reconstruction.DirectReconstruction(kdata) +img_direct = direct_reconstruction(kdata) + +# %% [markdown] +# ### Setting up the iterative SENSE reconstruction +# Now let's use the `~mrpro.algorithms.reconstruction.IterativeSENSEReconstruction` class to reconstruct the image +# using the iterative SENSE algorithm. +# +# We first set up the reconstruction. Here, we reuse the the Fourier operator, the DCFs and the coil sensitivity maps +# from ``direct_reconstruction``. We use *early stopping* after 4 iterations by setting `n_iterations`. +# +# ```{note} +# When settings up the reconstruction can also just provide the `~mrpro.data.KData` and let +# `~mrpro.algorithms.reconstruction.IterativeSENSEReconstruction` figure +# out the Fourier operator, estimate the coil sensitivity maps, and choose a density weighting.\ +# We can also provide `~mrpro.data.KData` and some information, such as the sensitivity maps. +# In that case, the reconstruction will fill in the missing information based on the `~mrpro.data.KData` object. +# ``` + +# %% +iterative_sense_reconstruction = mrpro.algorithms.reconstruction.IterativeSENSEReconstruction( + fourier_op=direct_reconstruction.fourier_op, + csm=direct_reconstruction.csm, + dcf=direct_reconstruction.dcf, + n_iterations=4, +) + +# %% [markdown] +# ### Run the reconstruction +# We now run the reconstruction using ``iterative_sense_reconstruction`` object. We just need to pass the k-space data +# and obtain the reconstructed image as `~mrpro.data.IData` object. +# %% + +img = iterative_sense_reconstruction(kdata) + +# %% [markdown] +# ## Behind the scenes +# We now peek behind the scenes to see how the iterative SENSE reconstruction is implemented. We perform all steps +# `~mrpro.algorithms.reconstruction.IterativeSENSEReconstruction` does when initialized with only an `~mrpro.data.KData` +# object, i.e., we need to set up a Fourier operator, estimate coil sensitivity maps, and the density weighting. +# without reusing any thing from `direct_reconstruction`. + +# %% [markdown] +# ### Set up density compensation operator $W$ +# We create a density compensation operator $W$ for weighting the loss. We use +# Voronoi tessellation of the trajectory to calculate the `~mrpro.data.DcfData`. +# +# ```{note} +# Using a weighted loss in iterative SENSE is not necessary, and there has been some discussion about +# the benefits and drawbacks. Currently, the iterative SENSE reconstruction in mrpro uses a weighted loss. +# This might chang in the future. +# ``` + +# %% +dcf_operator = mrpro.data.DcfData.from_traj_voronoi(kdata.traj).as_operator() + +# %% [markdown] +# ### Set up the acquisition model $A$ +# We need `~mrpro.operators.FourierOp` and `~mrpro.operators.SensitivityOp` operators to set up the acquisition model +# $A$. The Fourier operator is created from the trajectory and header information in `kdata`: + +# %% +fourier_operator = mrpro.operators.FourierOp( + traj=kdata.traj, + recon_matrix=kdata.header.recon_matrix, + encoding_matrix=kdata.header.encoding_matrix, +) + +# %% [markdown] +# To estimate the coil sensitivity maps, we first calculate the coil-wise images from the k-space data and then +# estimate the coil sensitivity maps using the Walsh method: + +# %% +img_coilwise = mrpro.data.IData.from_tensor_and_kheader(*fourier_operator.H(*dcf_operator(kdata.data)), kdata.header) +csm_data = mrpro.data.CsmData.from_idata_walsh(img_coilwise) +csm_operator = csm_data.as_operator() + +# %% [markdown] +# Now we can set up the acquisition operator $A$ by composing the Fourier operator and the coil sensitivity maps +# operator using ``@``. + +# %% +acquisition_operator = fourier_operator @ csm_operator + +# %% [markdown] +# ### Calculate the right-hand-side of the linear system +# Next, we need to calculate $b = A^H W y$. + +# %% +(right_hand_side,) = (acquisition_operator.H @ dcf_operator)(kdata.data) + +# %% [markdown] +# ### Set-up the linear self-adjoint operator $H$ +# We setup $H = A^H W A$, using the ``dcf_operator`` and ``acquisition_operator``. + +# %% +operator = acquisition_operator.H @ dcf_operator @ acquisition_operator + +# %% [markdown] +# ### Run conjugate gradient +# Finally, we solve the linear system $Hx = b$ using the conjugate gradient method. +# Again, we use early stopping after 4 iterations. Instead, we could also use a tolerance +# to stop the iterations when the residual is below a certain threshold. + +# %% +img_manual = mrpro.algorithms.optimizers.cg( + operator, right_hand_side, initial_value=right_hand_side, max_iterations=4, tolerance=0.0 +) + +# %% [markdown] +# ## Display the results +# We can now compare the results of the iterative SENSE reconstruction with the direct reconstruction. +# Both versions, the one using the `~mrpro.algorithms.reconstruction.IterativeSENSEReconstruction` class +# and the manual implementation should result in identical images. + +# %% tags=["hide-cell"] +import matplotlib.pyplot as plt +import torch + + +def show_images(*images: torch.Tensor, titles: list[str] | None = None) -> None: + """Plot images.""" + n_images = len(images) + _, axes = plt.subplots(1, n_images, squeeze=False, figsize=(n_images * 3, 3)) + for i in range(n_images): + axes[0][i].imshow(images[i], cmap='gray') + axes[0][i].axis('off') + if titles: + axes[0][i].set_title(titles[i]) + plt.show() + + +# %% +show_images( + img_direct.rss()[0, 0], + img.rss()[0, 0], + img_manual.abs()[0, 0, 0], + titles=['Direct', 'Iterative SENSE', 'Manual Iterative SENSE'], +) + +# %% [markdown] +# ### Check for equal results +# Finally, we check if two images are really identical. + +# %% +# If the assert statement did not raise an exception, the results are equal. +assert torch.allclose(img.data, img_manual) + +# %% [markdown] +# ## Next steps +# We can also reconstruct undersampled data: You can replace the filename above to use a dataset with fewer spokes to +# try it out.\ +# If you want to see how to include a regularization term in the optimization problem, +# see the example in . diff --git a/examples/scripts/iterative_sense_reconstruction_with_regularization.py b/examples/scripts/iterative_sense_reconstruction_with_regularization.py new file mode 100644 index 000000000..a1b11eaae --- /dev/null +++ b/examples/scripts/iterative_sense_reconstruction_with_regularization.py @@ -0,0 +1,228 @@ +# %% [markdown] +# # Regularized Iterative SENSE Reconstruction of 2D golden angle radial data +# Here we use the `~mrpro.algorithms.reconstruction.RegularizedIterativeSENSEReconstruction` class to reconstruct +# undersampled images from 2D radial data. + +# %% tags=["hide-cell"] +# Download raw data from Zenodo +import tempfile +from pathlib import Path + +import zenodo_get + +dataset = '14617082' + +tmp = tempfile.TemporaryDirectory() # RAII, automatically cleaned up +data_folder = Path(tmp.name) +zenodo_get.zenodo_get([dataset, '-r', 5, '-o', data_folder]) # r: retries + +# %% [markdown] +# ### Image reconstruction +# We use the `~mrpro.algorithms.reconstruction.RegularizedIterativeSENSEReconstruction` class to reconstruct images +# from 2D radial data. It solves the following reconstruction problem: +# +# Let's assume we have obtained the k-space data $y$ from an image $x$ with an acquisition model (Fourier transforms, +# coil sensitivity maps...) $A$ then we can formulate the forward problem as: +# +# $ y = Ax + n $ +# +# where $n$ describes complex Gaussian noise. The image $x$ can be obtained by minimizing the functionl $F$ +# +# $ F(x) = ||W^{\frac{1}{2}}(Ax - y)||_2^2 $ +# +# where $W^\frac{1}{2}$ is the square root of the density compensation function (which corresponds to a diagonal +# operator). Because this is an ill-posed problem, we can add a regularization term to stabilize the problem and obtain +# a solution with certain properties: +# +# $ F(x) = ||W^{\frac{1}{2}}(Ax - y)||_2^2 + l||Bx - x_{reg}||_2^2$ +# +# where $l$ is the strength of the regularization, $B$ is a linear operator and $x_{reg}$ is a regularization image. +# With this functional $F$ we obtain a solution which is close to $x_{reg}$ and to the acquired data $y$. +# +# Setting the derivative of the functional $F$ to zero and rearranging yields +# +# $ (A^H W A + l B) x = A^H W y + l x_{reg}$ +# +# which is a linear system $Hx = b$ that needs to be solved for $x$. +# +# One important question of course is, what to use for $x_{reg}$. For dynamic images (e.g. cine MRI) low-resolution +# dynamic images or high-quality static images have been proposed. In recent years, also the output of neural-networks +# has been used as an image regulariser. +# +# In this example we are going to use a high-quality image to regularize the reconstruction of an undersampled image. +# Both images are obtained from the same data acquisition (one using all the acquired data ($x_{reg}$) and one using +# only parts of it ($x$)). This of course is an unrealistic case but it will allow us to study the effect of the +# regularization. + +# %% [markdown] +# ### Reading of both fully sampled and undersampled data +# We read the raw data and the trajectory from the ISMRMRD file. +# We load both, the fully sampled and the undersampled data. +# The fully sampled data will be used to estimate the coil sensitivity maps and as a regularization image. +# The undersampled data will be used to reconstruct the image. + +# %% +# Read the raw data and the trajectory from ISMRMRD file +import mrpro + +kdata_fullysampled = mrpro.data.KData.from_file( + data_folder / 'radial2D_402spokes_golden_angle_with_traj.h5', + mrpro.data.traj_calculators.KTrajectoryIsmrmrd(), +) +kdata_undersampled = mrpro.data.KData.from_file( + data_folder / 'radial2D_24spokes_golden_angle_with_traj.h5', + mrpro.data.traj_calculators.KTrajectoryIsmrmrd(), +) + +# %% [markdown] +# ##### Image $x_{reg}$ from fully sampled data +# We first reconstruct the fully sampled image to use it as a regularization image. +# In a real-world scenario, we would not have this image and would have to use a low-resolution image as a prior, or use +# a neural network to estimate the regularization image. + +# %% +# Estimate coil maps. Here we use the fully sampled data to estimate the coil sensitivity maps. +# In a real-world scenario, we would either a calibration scan (e.g. a separate fully sampled scan) to estimate the coil +# sensitivity maps or use ESPIRiT or similar methods to estimate the coil sensitivity maps from the undersampled data. +direct_reconstruction = mrpro.algorithms.reconstruction.DirectReconstruction(kdata_fullysampled) +csm = direct_reconstruction.csm +assert csm is not None + +# unregularized iterative SENSE reconstruction of the fully sampled data +iterative_sense_reconstruction = mrpro.algorithms.reconstruction.IterativeSENSEReconstruction( + kdata_fullysampled, csm=csm, n_iterations=3 +) +img_iterative_sense = iterative_sense_reconstruction(kdata_fullysampled) + +# %% [markdown] +# ##### Image $x$ from undersampled data +# We now reconstruct the undersampled image using the fully sampled image first without regularization, +# and with with an regularization image. + +# %% +# Unregularized iterative SENSE reconstruction of the undersampled data +iterative_sense_reconstruction = mrpro.algorithms.reconstruction.IterativeSENSEReconstruction( + kdata_undersampled, csm=csm, n_iterations=6 +) +img_us_iterative_sense = iterative_sense_reconstruction(kdata_undersampled) + +# %% +# Regularized iterativ SENSE reconstruction of the undersampled data + +regularized_iterative_sense_reconstruction = mrpro.algorithms.reconstruction.RegularizedIterativeSENSEReconstruction( + kdata_undersampled, + csm=csm, + n_iterations=6, + regularization_data=img_iterative_sense.data, + regularization_weight=1.0, +) +img_us_regularized_iterative_sense = regularized_iterative_sense_reconstruction(kdata_undersampled) + +# %% [markdown] +# ##### Display the results +# Besides the fully sampled image, we display two undersampled images: +# The first one is obtained by unregularized iterative SENSE, the second one using regularization. + +# %% tags=["hide-cell"] +import matplotlib.pyplot as plt +import torch + + +def show_images(*images: torch.Tensor, titles: list[str] | None = None) -> None: + """Plot images.""" + n_images = len(images) + _, axes = plt.subplots(1, n_images, squeeze=False, figsize=(n_images * 3, 3)) + for i in range(n_images): + axes[0][i].imshow(images[i], cmap='gray') + axes[0][i].axis('off') + if titles: + axes[0][i].set_title(titles[i]) + plt.show() + + +# %% +show_images( + img_iterative_sense.rss()[0, 0], + img_us_iterative_sense.rss()[0, 0], + img_us_regularized_iterative_sense.rss()[0, 0], + titles=['Fully sampled', 'Iterative SENSE R=20', 'Regularized Iterative SENSE R=20'], +) + +# %% [markdown] +# ### Behind the scenes +# We now investigate the steps that are done in the regularized iterative SENSE reconstruction and +# perform them manually. This also demonstrates how to use the `~mrpro` operators and algorithms +# to build your own reconstruction pipeline. + +# %% [markdown] +# ##### Set-up the density compensation operator $W$ and acquisition model $A$ +# +# This is very similar to . +# For more details, please refer to that notebook. + +# %% +dcf_operator = mrpro.data.DcfData.from_traj_voronoi(kdata_undersampled.traj).as_operator() +fourier_operator = mrpro.operators.FourierOp.from_kdata(kdata_undersampled) +csm_operator = csm.as_operator() +acquisition_operator = fourier_operator @ csm_operator + +# %% [markdown] +# ##### Calculate the right-hand-side of the linear system +# We calculated $b = A^H W y + l x_{reg}$. +# Here, we make use of operator composition using ``@``. + +# %% +regularization_weight = 1.0 +regularization_image = img_iterative_sense.data + +(right_hand_side,) = (acquisition_operator.H @ dcf_operator)(kdata_undersampled.data) +right_hand_side = right_hand_side + regularization_weight * regularization_image + +# %% [markdown] +# ##### Set-up the linear self-adjoint operator $H$ +# We define $H= A^H W A + l$. We use the `~mrpro.operators.IdentityOp` and make +# use of operator composition using ``@``, addition using ``+`` and multiplication using ``*``. +# The resulting operator is a `~mrpro.operators.LinearOperator` object. + +# %% +operator = ( + acquisition_operator.H @ dcf_operator @ acquisition_operator + mrpro.operators.IdentityOp() * regularization_weight +) + +# %% [markdown] +# ##### Run conjugate gradient +# We solve the linear system $Hx = b$ using the conjugate gradient method. +# Here, we use early stopping after 8 iterations. Instead, we could also use a tolerance to stop the iterations when +# the residual is small enough. + +# %% +img_manual = mrpro.algorithms.optimizers.cg( + operator, right_hand_side, initial_value=right_hand_side, max_iterations=8, tolerance=0.0 +) + +# %% [markdown] +# ##### Display the reconstructed image +# We can now compare our 'manual' reconstruction with the regularized iterative SENSE reconstruction +# obtained using `~mrpro.algorithms.reconstruction.RegularizedIterativeSENSEReconstruction`. + +# %% +show_images( + img_us_regularized_iterative_sense.rss()[0, 0], + img_manual.abs()[0, 0, 0], + titles=['RegularizedIterativeSense', 'Manual'], +) + +# %% [markdown] +# We can also check if the results are equal by comparing the actual image data. +# If the assert statement does not raise an exception, the results are equal. + +# %% +torch.testing.assert_close(img_us_regularized_iterative_sense.data, img_manual) + +# %% [markdown] +# ### Next steps +# +# We are cheating here because we used the fully sampled image as a regularization. In real world applications +# we would not have that. One option is to apply a low-pass filter to the undersampled k-space data to try to reduce the +# streaking artifacts and use that as a regularization image. Try that and see if you can also improve the image quality +# compared to the unregularised images. diff --git a/examples/scripts/pulseq_2d_radial_golden_angle.py b/examples/scripts/pulseq_2d_radial_golden_angle.py deleted file mode 100644 index 3f857c382..000000000 --- a/examples/scripts/pulseq_2d_radial_golden_angle.py +++ /dev/null @@ -1,92 +0,0 @@ -# %% [markdown] -# # Reconstruction of 2D golden angle radial data from pulseq sequence -# Here we manually do all steps of a direction reconstruction, i.e. -# CSM estimation, density compensation, adjoint fourier transform, and coil combination. -# See also the example `pulseq_2d_radial_golden_angle_direct_reconstruction.py` -# for a more high-level example using the `DirectReconstruction` class. - -# %% -# Imports -import tempfile - -import matplotlib.pyplot as plt -import requests -from mrpro.algorithms.reconstruction import DirectReconstruction -from mrpro.data import KData -from mrpro.data.traj_calculators import KTrajectoryIsmrmrd, KTrajectoryPulseq, KTrajectoryRadial2D - -# %% -# define zenodo records URL and create a temporary directory and h5-file -zenodo_url = 'https://zenodo.org/records/10854057/files/' -fname = 'pulseq_radial_2D_402spokes_golden_angle_with_traj.h5' -data_file = tempfile.NamedTemporaryFile(mode='wb', delete=False, suffix='.h5') - -# %% -# Download raw data using requests -response = requests.get(zenodo_url + fname, timeout=30) -data_file.write(response.content) -data_file.flush() - -# %% [markdown] -# ### Image reconstruction using KTrajectoryIsmrmrd -# This will use the trajectory that is stored in the ISMRMRD file. - -# %% -# Read the raw data and the trajectory from ISMRMRD file -kdata = KData.from_file(data_file.name, KTrajectoryIsmrmrd()) - -# Reconstruct image -direct_reconstruction = DirectReconstruction(kdata) -img_using_ismrmrd_traj = direct_reconstruction(kdata) - -# %% [markdown] -# ### Image reconstruction using KTrajectoryRadial2D -# This will calculate the trajectory using the radial 2D trajectory calculator. - -# %% -# Read raw data and calculate trajectory using KTrajectoryRadial2D -kdata = KData.from_file(data_file.name, KTrajectoryRadial2D()) - -# Reconstruct image -direct_reconstruction = DirectReconstruction(kdata) -img_using_rad2d_traj = direct_reconstruction(kdata) - -# %% [markdown] -# ### Image reconstruction using KTrajectoryPulseq -# This will calculate the trajectory from the pulseq sequence file -# using the PyPulseq trajectory calculator. Please note that this method -# requires the pulseq sequence file that was used to acquire the data. -# The path to the sequence file is provided as an argument to KTrajectoryPulseq. - -# %% -# download the sequence file from zenodo -zenodo_url = 'https://zenodo.org/records/10868061/files/' -seq_fname = 'pulseq_radial_2D_402spokes_golden_angle.seq' -seq_file = tempfile.NamedTemporaryFile(mode='wb', delete=False, suffix='.seq') -response = requests.get(zenodo_url + seq_fname, timeout=30) -seq_file.write(response.content) -seq_file.flush() - -# %% -# Read raw data and calculate trajectory using KTrajectoryPulseq -kdata = KData.from_file(data_file.name, KTrajectoryPulseq(seq_path=seq_file.name)) - -# Reconstruct image -direct_reconstruction = DirectReconstruction(kdata) -img_using_pulseq_traj = direct_reconstruction(kdata) - -# %% [markdown] -# ### Plot the different reconstructed images -# Please note: there is currently a mismatch between the actual trajectory -# that was used to acquire the data and the trajectory calculated with KTrajectoryRadial2D. -# This leads to a deviation between the image reconstructed with KTrajectoryRadial2D -# and the other two methods. In the future, we will upload new measurement data with -# an updated trajectory and adjust this example accordingly. -# %% -titles = ['KTrajectoryIsmrmrd', 'KTrajectoryRadial2D', 'KTrajectoryPulseq'] -plt.subplots(1, len(titles)) -for i, img in enumerate([img_using_ismrmrd_traj.rss(), img_using_rad2d_traj.rss(), img_using_pulseq_traj.rss()]): - plt.subplot(1, len(titles), i + 1) - plt.imshow(img[0, 0, :, :]) - plt.title(titles[i]) - plt.axis('off') diff --git a/examples/scripts/qmri_sg_challenge_2024_t1.py b/examples/scripts/qmri_sg_challenge_2024_t1.py index d0259f267..1d690d42e 100644 --- a/examples/scripts/qmri_sg_challenge_2024_t1.py +++ b/examples/scripts/qmri_sg_challenge_2024_t1.py @@ -1,26 +1,6 @@ # %% [markdown] # # QMRI Challenge ISMRM 2024 - $T_1$ mapping - -# %% -# Imports -import shutil -import tempfile -import zipfile -from pathlib import Path - -import matplotlib.pyplot as plt -import torch -import zenodo_get -from einops import rearrange -from mpl_toolkits.axes_grid1 import make_axes_locatable # type: ignore [import-untyped] -from mrpro.algorithms.optimizers import adam -from mrpro.data import IData -from mrpro.operators import MagnitudeOp -from mrpro.operators.functionals import MSE -from mrpro.operators.models import InversionRecovery - -# %% [markdown] -# ### Overview +# In the 2024 ISMRM QMRI Challenge, the goal is to estimate $T_1$ maps from a set of inversion recovery images. # The dataset consists of images obtained at 10 different inversion times using a turbo spin echo sequence. Each # inversion time is saved in a separate DICOM file. In order to obtain a $T_1$ map, we are going to: # - download the data from Zenodo @@ -33,27 +13,56 @@ # ### Get data from Zenodo # %% -data_folder = Path(tempfile.mkdtemp()) +import tempfile +import zipfile +from pathlib import Path + +import zenodo_get + dataset = '10868350' + +tmp = tempfile.TemporaryDirectory() # RAII, automatically cleaned up +data_folder = Path(tmp.name) zenodo_get.zenodo_get([dataset, '-r', 5, '-o', data_folder]) # r: retries with zipfile.ZipFile(data_folder / Path('T1 IR.zip'), 'r') as zip_ref: zip_ref.extractall(data_folder) # %% [markdown] # ### Create image data (IData) object with different inversion times +# We read in the DICOM files and combine them in an `~mrpro.data.IData` object. +# The inversion times are stored in the DICOM files are available in the header of the `~mrpro.data.IData` object. # %% +import mrpro + ti_dicom_files = data_folder.glob('**/*.dcm') -idata_multi_ti = IData.from_dicom_files(ti_dicom_files) +idata_multi_ti = mrpro.data.IData.from_dicom_files(ti_dicom_files) if idata_multi_ti.header.ti is None: raise ValueError('Inversion times need to be defined in the DICOM files.') +# %% tags=["hide-cell"] +import matplotlib.pyplot as plt +import torch + + +def show_images(*images: torch.Tensor, titles: list[str] | None = None) -> None: + """Plot images.""" + n_images = len(images) + _, axes = plt.subplots(1, n_images, squeeze=False, figsize=(n_images * 3, 3)) + for i in range(n_images): + axes[0][i].imshow(images[i], cmap='gray') + axes[0][i].axis('off') + if titles: + axes[0][i].set_title(titles[i]) + plt.show() + + # %% # Let's have a look at some of the images -fig, axes = plt.subplots(1, 3, squeeze=False) -for idx, ax in enumerate(axes.flatten()): - ax.imshow(torch.abs(idata_multi_ti.data[idx, 0, 0, :, :])) - ax.set_title(f'TI = {idata_multi_ti.header.ti[idx]:.3f}s') +show_images( + *idata_multi_ti.data[:, 0, 0].abs(), + titles=[f'TI = {ti:.3f}s' for ti in idata_multi_ti.header.ti.squeeze()], +) # %% [markdown] # ### Signal model and loss function @@ -65,18 +74,20 @@ # images only contain the magnitude of the signal. Therefore, we need $|q(TI)|$: # %% -model = MagnitudeOp() @ InversionRecovery(ti=idata_multi_ti.header.ti) +model = mrpro.operators.MagnitudeOp() @ mrpro.operators.models.InversionRecovery(ti=idata_multi_ti.header.ti) # %% [markdown] # As a loss function for the optimizer, we calculate the mean-squared error between the image data $x$ and our signal # model $q$. + # %% -mse = MSE(idata_multi_ti.data.abs()) +mse = mrpro.operators.functionals.MSE(idata_multi_ti.data.abs()) # %% [markdown] # Now we can simply combine the two into a functional to solve # # $ \min_{M_0, T_1} || |q(M_0, T_1, TI)| - x||_2^2$ + # %% functional = mse @ model @@ -104,70 +115,98 @@ # just a scaling factor and we are going to normalize the signal curves. (signal_dictionary,) = model(torch.ones(1), t1_dictionary) signal_dictionary = signal_dictionary.to(dtype=torch.complex64) -vector_norm = torch.linalg.vector_norm(signal_dictionary, dim=0) -signal_dictionary /= vector_norm +signal_dictionary /= torch.linalg.vector_norm(signal_dictionary, dim=0) # Calculate the dot-product and select for each voxel the T1 values that correspond to the maximum of the dot-product -n_y, n_x = idata_multi_ti.data.shape[-2:] -dot_product = torch.mm(rearrange(idata_multi_ti.data, 'other 1 z y x->(z y x) other'), signal_dictionary) -idx_best_match = torch.argmax(torch.abs(dot_product), dim=1) -t1_start = rearrange(t1_dictionary[idx_best_match], '(y x)->1 1 y x', y=n_y, x=n_x) +import einops + +dot_product = einops.einsum( + idata_multi_ti.data, + signal_dictionary, + 'ti ..., ti t1 -> t1 ...', +) +idx_best_match = dot_product.abs().argmax(dim=0) +t1_start = t1_dictionary[idx_best_match] + +# %% [markdown] +# The maximum absolute value observed is a good approximation for $M_0$ # %% -# The maximum absolute value observed is a good approximation for m0 -m0_start = torch.amax(torch.abs(idata_multi_ti.data), 0) +m0_start = idata_multi_ti.data.abs().amax(dim=0) + +# %% [markdown] +# #### Visualize the starting values +# Let's have a look at the starting values for $M_0$ and $T_1$: # %% -# Visualize the starting values -fig, axes = plt.subplots(1, 2, figsize=(8, 2), squeeze=False) -colorbar_ax = [make_axes_locatable(ax).append_axes('right', size='5%', pad=0.05) for ax in axes[0, :]] -im = axes[0, 0].imshow(m0_start[0, 0, ...]) +fig, axes = plt.subplots(1, 2, figsize=(6, 2), squeeze=False) + +im = axes[0, 0].imshow(m0_start[0, 0]) axes[0, 0].set_title('$M_0$ start values') -fig.colorbar(im, cax=colorbar_ax[0]) -im = axes[0, 1].imshow(t1_start[0, 0, ...], vmin=0, vmax=2.5) +axes[0, 0].set_axis_off() +fig.colorbar(im, ax=axes[0, 0], label='a.u.') + +im = axes[0, 1].imshow(t1_start[0, 0], vmin=0, vmax=2.5, cmap='magma') axes[0, 1].set_title('$T_1$ start values') -fig.colorbar(im, cax=colorbar_ax[1], label='s') +axes[0, 1].set_axis_off() +fig.colorbar(im, ax=axes[0, 1], label='s') + +plt.show() # %% [markdown] # ### Carry out fit +# We are now ready to carry out the fit. We are going to use the `~mrpro.algorithms.optimizers.adam` optimizer. +# If there is a GPU available, we can use it ny moving both the data and the model to the GPU. # %% +# Move initial values and model to GPU if available +if torch.cuda.is_available(): + print('Using GPU') + functional.cuda() + m0_start = m0_start.cuda() + t1_start = t1_start.cuda() + # Hyperparameters for optimizer max_iter = 2000 lr = 1e-1 # Run optimization -params_result = adam(functional, [m0_start, t1_start], max_iter=max_iter, lr=lr) -m0, t1 = (p.detach() for p in params_result) -m0[torch.isnan(t1)] = 0 -t1[torch.isnan(t1)] = 0 +result = mrpro.algorithms.optimizers.adam(functional, [m0_start, t1_start], max_iter=max_iter, lr=lr) +m0, t1 = (p.detach().cpu() for p in result) # %% [markdown] # ### Visualize the final results +# # To get an impression of how well the fit has worked, we are going to calculate the relative error between # # $E_{relative} = \sum_{TI}\frac{|(q(M_0, T_1, TI) - x)|}{|x|}$ # # on a voxel-by-voxel basis +# We also mask out the background by thresholding on $M_0$. # %% -img_mult_te_abs_sum = torch.sum(torch.abs(idata_multi_ti.data), dim=0) -relative_absolute_error = torch.sum(torch.abs(model(m0, t1)[0] - idata_multi_ti.data), dim=0) / ( - img_mult_te_abs_sum + 1e-9 -) +error = model(m0, t1)[0] - idata_multi_ti.data +relative_absolute_error = error.abs().sum(dim=0) / (idata_multi_ti.data.abs().sum(dim=0) + 1e-9) + +mask = torch.isnan(t1) | (m0 < 500) +m0[mask] = 0 +t1[mask] = 0 +relative_absolute_error[mask] = 0 + fig, axes = plt.subplots(1, 3, figsize=(10, 2), squeeze=False) -colorbar_ax = [make_axes_locatable(ax).append_axes('right', size='5%', pad=0.05) for ax in axes[0, :]] -im = axes[0, 0].imshow(m0[0, 0, ...]) +im = axes[0, 0].imshow(m0[0, 0]) axes[0, 0].set_title('$M_0$') -fig.colorbar(im, cax=colorbar_ax[0]) -im = axes[0, 1].imshow(t1[0, 0, ...], vmin=0, vmax=2.5) +axes[0, 0].set_axis_off() +fig.colorbar(im, ax=axes[0, 0], label='a.u.') + +im = axes[0, 1].imshow(t1[0, 0], vmin=0, vmax=2.5, cmap='magma') axes[0, 1].set_title('$T_1$') -fig.colorbar(im, cax=colorbar_ax[1], label='s') -im = axes[0, 2].imshow(relative_absolute_error[0, 0, ...], vmin=0, vmax=1.0) -axes[0, 2].set_title('Relative error') -fig.colorbar(im, cax=colorbar_ax[2]) +axes[0, 1].set_axis_off() +fig.colorbar(im, ax=axes[0, 1], label='s') +im = axes[0, 2].imshow(relative_absolute_error[0, 0], vmin=0, vmax=0.1) +axes[0, 2].set_title('Relative error') +axes[0, 2].set_axis_off() +fig.colorbar(im, ax=axes[0, 2]) -# %% -# Clean-up by removing temporary directory -shutil.rmtree(data_folder) +plt.show() diff --git a/examples/scripts/qmri_sg_challenge_2024_t2_star.py b/examples/scripts/qmri_sg_challenge_2024_t2_star.py deleted file mode 100644 index a80f40754..000000000 --- a/examples/scripts/qmri_sg_challenge_2024_t2_star.py +++ /dev/null @@ -1,146 +0,0 @@ -# %% [markdown] -# # QMRI Challenge ISMRM 2024 - $T_2^*$ mapping - -# %% -# Imports -import shutil -import tempfile -import time -import zipfile -from pathlib import Path - -import matplotlib.pyplot as plt -import torch -import zenodo_get -from mpl_toolkits.axes_grid1 import make_axes_locatable # type: ignore [import-untyped] -from mrpro.algorithms.optimizers import adam -from mrpro.data import IData -from mrpro.operators.functionals import MSE -from mrpro.operators.models import MonoExponentialDecay - -# %% [markdown] -# ### Overview -# The dataset consists of gradient echo images obtained at 11 different echo times, each saved in a separate DICOM file. -# In order to obtain a $T_2^*$ map, we are going to: -# - download the data from Zenodo -# - read in the DICOM files (one for each echo time) and combine them in an IData object -# - define a signal model (mono-exponential decay) and data loss (mean-squared error) function -# - carry out a fit using ADAM from PyTorch -# -# Everything is based on PyTorch, and therefore we can run the code either on the CPU or GPU. Simply set the flag below -# to True to run the parameter estimation on the GPU. - -# %% -flag_use_cuda = False - -# %% [markdown] -# ### Get data from Zenodo - -# %% -data_folder = Path(tempfile.mkdtemp()) -dataset = '10868361' -zenodo_get.zenodo_get([dataset, '-r', 5, '-o', data_folder]) # r: retries -with zipfile.ZipFile(data_folder / Path('T2star.zip'), 'r') as zip_ref: - zip_ref.extractall(data_folder) - -# %% [markdown] -# ### Create image data (IData) object with different echo times -# %% -te_dicom_files = data_folder.glob('**/*.dcm') -idata_multi_te = IData.from_dicom_files(te_dicom_files) -# scaling the signal down to make the optimization easier -idata_multi_te.data[...] = idata_multi_te.data / 1500 - -# Move the data to the GPU -if flag_use_cuda: - idata_multi_te = idata_multi_te.cuda() - -if idata_multi_te.header.te is None: - raise ValueError('Echo times need to be defined in the DICOM files.') - -# %% -# Let's have a look at some of the images -fig, axes = plt.subplots(1, 3, squeeze=False) -for idx, ax in enumerate(axes.flatten()): - ax.imshow(torch.abs(idata_multi_te.data[idx, 0, 0, :, :]).cpu()) - ax.set_title(f'TE = {idata_multi_te.header.te[idx]:.3f}s') - -# %% [markdown] -# ### Signal model and loss function -# We use the model $q$ -# -# $q(TE) = M_0 e^{-TE/T_2^*}$ -# -# with the equilibrium magnetization $M_0$, the echo time $TE$, and $T_2^*$ - -# %% -model = MonoExponentialDecay(decay_time=idata_multi_te.header.te) - -# %% [markdown] -# As a loss function for the optimizer, we calculate the mean-squared error between the image data $x$ and our signal -# model $q$. -# %% -mse = MSE(idata_multi_te.data) - -# %% [markdown] -# Now we can simply combine the two into a functional which will then solve -# -# $ \min_{M_0, T_2^*} ||q(M_0, T_2^*, TE) - x||_2^2$ -# %% -functional = mse @ model - -# %% [markdown] -# ### Carry out fit - -# %% -# The shortest echo time is a good approximation of the equilibrium magnetization -m0_start = torch.abs(idata_multi_te.data[torch.argmin(idata_multi_te.header.te), ...]) -# 20 ms as a starting value for T2* -t2star_start = torch.ones(m0_start.shape, dtype=torch.float32, device=m0_start.device) * 20e-3 - -# Hyperparameters for optimizer -max_iter = 20000 -lr = 1e-3 - -if flag_use_cuda: - functional.cuda() - -# Run optimization -start_time = time.time() -params_result = adam(functional, [m0_start, t2star_start], max_iter=max_iter, lr=lr) -print(f'Optimization took {time.time() - start_time}s') -m0, t2star = (p.detach() for p in params_result) -m0[torch.isnan(t2star)] = 0 -t2star[torch.isnan(t2star)] = 0 - -# %% [markdown] -# ### Visualize the final results -# To get an impression of how well the fit has worked, we are going to calculate the relative error between -# -# $E_{relative} = \sum_{TE}\frac{|(q(M_0, T_2^*, TE) - x)|}{|x|}$ -# -# on a voxel-by-voxel basis. -# %% -img_mult_te_abs_sum = torch.sum(torch.abs(idata_multi_te.data), dim=0) -relative_absolute_error = torch.sum(torch.abs(model(m0, t2star)[0] - idata_multi_te.data), dim=0) / ( - img_mult_te_abs_sum + 1e-9 -) -fig, axes = plt.subplots(1, 3, figsize=(10, 2), squeeze=False) -colorbar_ax = [make_axes_locatable(ax).append_axes('right', size='5%', pad=0.05) for ax in axes[0, :]] - -im = axes[0, 0].imshow(m0[0, 0, ...].cpu()) -axes[0, 0].set_title('$M_0$') -fig.colorbar(im, cax=colorbar_ax[0]) - -im = axes[0, 1].imshow(t2star[0, 0, ...].cpu(), vmin=0, vmax=5) -axes[0, 1].set_title('$T_2^*$') -fig.colorbar(im, cax=colorbar_ax[1], label='s') - -im = axes[0, 2].imshow(relative_absolute_error[0, 0, ...].cpu(), vmin=0, vmax=0.1) -axes[0, 2].set_title('Relative error') -fig.colorbar(im, cax=colorbar_ax[2]) - - -# %% -# Clean-up by removing temporary directory -shutil.rmtree(data_folder) diff --git a/examples/scripts/t1_mapping_with_grad_acq.py b/examples/scripts/qmri_t1_mapping_with_grad_acq.py similarity index 62% rename from examples/scripts/t1_mapping_with_grad_acq.py rename to examples/scripts/qmri_t1_mapping_with_grad_acq.py index de8e31c43..10e9fb440 100644 --- a/examples/scripts/t1_mapping_with_grad_acq.py +++ b/examples/scripts/qmri_t1_mapping_with_grad_acq.py @@ -1,24 +1,6 @@ # %% [markdown] # # $T_1$ mapping from a continuous Golden radial acquisition -# %% -# Imports -import shutil -import tempfile -from pathlib import Path - -import matplotlib.pyplot as plt -import torch -import zenodo_get -from mpl_toolkits.axes_grid1 import make_axes_locatable # type: ignore [import-untyped] -from mrpro.algorithms.optimizers import adam -from mrpro.algorithms.reconstruction import DirectReconstruction -from mrpro.data import KData -from mrpro.data.traj_calculators import KTrajectoryIsmrmrd -from mrpro.operators import ConstraintsOp, MagnitudeOp -from mrpro.operators.functionals import MSE -from mrpro.operators.models import TransientSteadyStateWithPreparation -from mrpro.utils import split_idx # %% [markdown] # ### Overview @@ -27,7 +9,7 @@ # data can be divided into different dynamic time frames, each corresponding to a different inversion time. A signal # model can then be fitted to this data to obtain a $T_1$ map. # -# More information can be found in: +# More information can be found in:\ # Kerkering KM, Schulz-Menger J, Schaeffter T, Kolbitsch C (2023). Motion-corrected model-based reconstruction for 2D # myocardial $T_1$ mapping. *Magnetic Resonance in Medicine*, 90(3):1086-1100, [10.1002/mrm.29699](https://doi.org/10.1002/mrm.29699) @@ -73,13 +55,25 @@ # - Reconstruct a single high quality image using all acquired radial lines. # - Split the data into multiple dynamics and reconstruct these dynamic images # - Define a signal model and a loss function to obtain the $T_1$ maps - -# %% +# +# %% tags=["hide-cell"] # Download raw data in ISMRMRD format from zenodo into a temporary directory -data_folder = Path(tempfile.mkdtemp()) +import tempfile +from pathlib import Path + +import zenodo_get + dataset = '13207352' -zenodo_get.zenodo_get([dataset, '-r', 5, '-o', data_folder]) # r: retries +tmp = tempfile.TemporaryDirectory() # RAII, automatically cleaned up +data_folder = Path(tmp.name) +zenodo_get.zenodo_get([dataset, '-r', 5, '-o', data_folder]) # r: retries +# %% [markdown] +# We will use the following libraries: +# %% +import matplotlib.pyplot as plt +import mrpro +import torch # %% [markdown] # ## Reconstruct average image @@ -87,17 +81,16 @@ # %% # Read raw data and trajectory -kdata = KData.from_file(data_folder / '2D_GRad_map_t1.h5', KTrajectoryIsmrmrd()) +kdata = mrpro.data.KData.from_file(data_folder / '2D_GRad_map_t1.h5', mrpro.data.traj_calculators.KTrajectoryIsmrmrd()) # Perform the reconstruction -reconstruction = DirectReconstruction(kdata) +reconstruction = mrpro.algorithms.reconstruction.DirectReconstruction(kdata) img_average = reconstruction(kdata) -# %% # Visualize average image -plt.figure() -plt.imshow(img_average.rss()[0, 0, :, :], cmap='gray') +plt.imshow(img_average.rss()[0, 0], cmap='gray') plt.title('Average image') +plt.show() # %% [markdown] # ## Split the data into dynamics and reconstruct dynamic images @@ -106,13 +99,14 @@ # estimated above) for each dynamic. # %% -idx_dynamic = split_idx(torch.argsort(kdata.header.acq_info.acquisition_time_stamp[0, 0, :, 0]), 30, 0) + +idx_dynamic = mrpro.utils.split_idx(kdata.header.acq_info.acquisition_time_stamp[0, 0, :, 0].argsort(), 30, 0) kdata_dynamic = kdata.split_k1_into_other(idx_dynamic, other_label='repetition') # %% # Perform the reconstruction # Here we use the same coil sensitivity map for all dynamics -reconstruction_dynamic = DirectReconstruction(kdata_dynamic, csm=reconstruction.csm) +reconstruction_dynamic = mrpro.algorithms.reconstruction.DirectReconstruction(kdata_dynamic, csm=reconstruction.csm) img_dynamic = reconstruction_dynamic(kdata_dynamic) # Get absolute value of complex image and normalize the images img_rss_dynamic = img_dynamic.rss() @@ -125,7 +119,7 @@ for idx, cax in enumerate(ax.flatten()): cax.imshow(img_rss_dynamic[idx, 0, :, :], cmap='gray', vmin=0, vmax=0.8) cax.set_title(f'Dynamic {idx}') - +plt.show() # %% [markdown] # ## Estimate $T_1$ map @@ -133,27 +127,30 @@ # ### Signal model # We use a three parameter signal model $q(M_0, T_1, \alpha)$. # -# As known input, the model needs information about the time $t$ (`sampling_time`) in Eq. (1) since the inversion pulse. +# The model needs information about the time $t$, `sampling_time`, in Eq. (1) since the inversion pulse. # This can be calculated from the `acquisition_time_stamp`. If we average the `acquisition_time_stamp`-values for each # dynamic image and subtract the first `acquisition_time_stamp`, we get the mean time since the inversion pulse for each # dynamic. Note: The time taken by the spoiler gradient is taken into consideration in the -# `TransientSteadyStateWithPreparation`-model and does not have to be added here. Another important thing to note is -# that the `acquisition_time_stamp` is not given in time units but in vendor-specific time stamp units. For the Siemens +# `~mrpro.operators.models.TransientSteadyStateWithPreparation`-model and does not have to be added here. +# ```{note} +# The acquisition_time_stamp is not given in time units but in vendor-specific time stamp units. For the Siemens # data used here, one time stamp corresponds to 2.5 ms. +# ``` # %% -sampling_time = torch.mean(kdata_dynamic.header.acq_info.acquisition_time_stamp[:, 0, :, 0].to(torch.float32), dim=-1) -# Subtract time stamp of first radial line -sampling_time -= kdata_dynamic.header.acq_info.acquisition_time_stamp[0, 0, 0, 0] -# Convert to seconds -sampling_time *= 2.5 / 1000 +sampling_time = kdata_dynamic.header.acq_info.acquisition_time_stamp.squeeze() +# Subtract time stamp of first radial line and convert to seconds +sampling_time = (sampling_time - sampling_time[0, 0]) * 2.5e-3 +# Average over radial lines of each dynamic +sampling_time = sampling_time.mean(-1) # %% [markdown] # We also need the repetition time between two RF-pulses. There is a parameter `tr` in the header, but this describes # the time "between the beginning of a pulse sequence and the beginning of the succeeding (essentially identical) pulse # sequence" (see [DICOM Standard Browser](https://dicom.innolitics.com/ciods/mr-image/mr-image/00180080)). We have one -# inversion pulse at the beginning, which is never repeated and hence `tr` is the duration of the entire scan. -# Therefore, we have to use the parameter `echo_spacing`, which describes the time between two gradient echoes. +# inversion pulse at the beginning, which is never repeated and hence ``tr`` is the duration of the entire scan. +# Therefore, we have to use the parameter `~mrpro.data.KHeader.echo_spacing`, which describes the time between +# two gradient echoes. # %% if kdata_dynamic.header.echo_spacing is None: @@ -166,17 +163,17 @@ # the acquired data, but we have to know the value and set it by hand to 20 ms. Now we can define the signal model. # %% -model_op = TransientSteadyStateWithPreparation( +model_op = mrpro.operators.models.TransientSteadyStateWithPreparation( sampling_time, repetition_time, m0_scaling_preparation=-1, delay_after_preparation=0.02 ) # %% [markdown] # The reconstructed image data is complex-valued. We could fit a complex $M_0$ to the data, but in this case it is more # robust to fit $|q(M_0, T_1, \alpha)|$ to the magnitude of the image data. We therefore combine our model with a -# `MagnitudeOp`. +# `~mrpro.operators.MagnitudeOp`. # %% -magnitude_model_op = MagnitudeOp() @ model_op +magnitude_model_op = mrpro.operators.MagnitudeOp() @ model_op # %% [markdown] # ### Constraints @@ -184,21 +181,28 @@ # and 3 s. Further, we can constrain $\alpha$. Although the effective flip angle can vary, it can only vary by a # certain percentage relative to the nominal flip angle. Here, we chose a maximum deviation from the nominal flip angle # of 50%. - +# We use a `~mrpro.operators.ConstraintsOp` to define these constraints. It maps unconstrained parameters to constrained +# parameters, such that the optimizer can work with unconstrained parameters # %% if kdata_dynamic.header.fa is None: raise ValueError('Nominal flip angle needs to be defined.') -else: - nominal_flip_angle = float(kdata_dynamic.header.fa[0]) -constraints_op = ConstraintsOp(bounds=((None, None), (0.05, 3.0), (nominal_flip_angle * 0.5, nominal_flip_angle * 1.5))) +nominal_flip_angle = float(kdata_dynamic.header.fa[0]) + +constraints_op = mrpro.operators.ConstraintsOp( + bounds=( + (None, None), # M0 is not constrained + (0.05, 3.0), # T1 is constrained between 50 ms and 3 s + (nominal_flip_angle * 0.5, nominal_flip_angle * 1.5), # alpha is constrained + ) +) # %% [markdown] # ### Loss function # As a loss function for the optimizer, we calculate the mean squared error between the image data $x$ and our signal # model $q$. # %% -mse_loss = MSE(img_rss_dynamic) +mse_loss = mrpro.operators.functionals.MSE(img_rss_dynamic) # %% [markdown] # Now we can simply combine the loss function, the signal model and the constraints to solve @@ -211,49 +215,56 @@ # %% [markdown] # ### Carry out fit - +# We use an LBFGS optimizer to minimize the loss function. We start with the following initial values: +# - The intensity at shortest echo time as a good approximation for the equilibrium magnetization $M_0$, +# - 1 s for $T_1$, and +# - nominal flip angle for the actual flip angle. # %% -# The shortest echo time is a good approximation for the equilibrium magnetization -m0_start = img_rss_dynamic[0, ...] -# 1 s a good starting value for T1 -t1_start = torch.ones(m0_start.shape, dtype=torch.float32) -# and the nominal flip angle a good starting value for the actual flip angle -flip_angle_start = torch.ones(m0_start.shape, dtype=torch.float32) * kdata_dynamic.header.fa - - +m0_start = img_rss_dynamic[0] +t1_start = torch.ones_like(m0_start) +flip_angle_start = torch.ones_like(m0_start) * kdata_dynamic.header.fa +# %% [markdown] +# If we use a `~mrpro.operators.ConstraintsOp`, the start values must be transformed to the +# unconstrained space before the optimization and back to the original space after the optimization. # %% -# Hyperparameters for optimizer -max_iter = 500 -lr = 1e-2 - -# Run optimization -params_result = adam(functional, [m0_start, t1_start, flip_angle_start], max_iter=max_iter, lr=lr) -params_result = constraints_op(*params_result) -m0, t1, flip_angle = (p.detach() for p in params_result) - +initial_parameters = constraints_op.inverse(m0_start, t1_start, flip_angle_start) +# %% [markdown] +# Now we can run the optimizer in the unconstrained space. +# %% +result = mrpro.algorithms.optimizers.lbfgs(functional, initial_parameters=initial_parameters) +# %% [markdown] +# Transforming the parameters back to the original space, we get the final $M_0$, $T_1$, and flip angle: +# %% +m0, t1, flip_angle = (p.detach().cpu().squeeze() for p in constraints_op(*result)) +# %% [markdown] +# ## Visualize results +# Finally, we can take a look at the estimated $M_0$, $T_1$, and flip angle maps: # %% # Visualize parametric maps fig, axes = plt.subplots(1, 3, figsize=(10, 2), squeeze=False) -colorbar_ax = [make_axes_locatable(ax).append_axes('right', size='5%', pad=0.05) for ax in axes[0, :]] -im = axes[0, 0].imshow(m0[0, ...].abs(), cmap='gray') -axes[0, 0].set_title('$M_0$') -fig.colorbar(im, cax=colorbar_ax[0]) -im = axes[0, 1].imshow(t1[0, ...], vmin=0, vmax=2) + +im = axes[0, 0].imshow(m0.abs(), cmap='gray') +axes[0, 0].set_title('$|M_0|$') +axes[0, 0].set_axis_off() +fig.colorbar(im, ax=axes[0, 0]) + +im = axes[0, 1].imshow(t1, vmin=0, vmax=2, cmap='magma') axes[0, 1].set_title('$T_1$ (s)') -fig.colorbar(im, cax=colorbar_ax[1]) -im = axes[0, 2].imshow(flip_angle[0, ...] / torch.pi * 180, vmin=0, vmax=8) +axes[0, 1].set_axis_off() +fig.colorbar(im, ax=axes[0, 1]) + +im = axes[0, 2].imshow(torch.rad2deg(flip_angle), vmin=0, vmax=8) axes[0, 2].set_title('Flip angle (°)') -fig.colorbar(im, cax=colorbar_ax[2]) +axes[0, 2].set_axis_off() +fig.colorbar(im, ax=axes[0, 2]) +plt.show() # %% [markdown] +# Great! We have successfully estimated the $T_1$ map from the dynamic images! +# # ### Next steps # The quality of the final $T_1$ maps depends on the quality of the individual dynamic images. Using more advanced image # reconstruction methods, we can improve the image quality and hence the quality of the maps. -# -# Try to exchange `DirectReconstruction` above with `IterativeSENSEReconstruction` and compare the quality of the -# $T_1$ maps for different number of iterations (`n_iterations`). - - -# %% -# Clean-up by removing temporary directory -shutil.rmtree(data_folder) +# Try to exchange `~mrpro.algorithms.reconstruction.DirectReconstruction` above with +# `~mrpro.algorithms.reconstruction.IterativeSENSEReconstruction` +# or try a different optimizer such as `~mrpro.algorithms.optimizers.adam`. diff --git a/examples/scripts/regularized_iterative_sense_reconstruction.py b/examples/scripts/regularized_iterative_sense_reconstruction.py deleted file mode 100644 index e41dc4ac5..000000000 --- a/examples/scripts/regularized_iterative_sense_reconstruction.py +++ /dev/null @@ -1,193 +0,0 @@ -# %% [markdown] -# # Regularized Iterative SENSE Reconstruction of 2D golden angle radial data -# Here we use the RegularizedIterativeSENSEReconstruction class to reconstruct images from ISMRMRD 2D radial data -# %% -# define zenodo URL of the example ismrmd data -zenodo_url = 'https://zenodo.org/records/10854057/files/' -fname = 'pulseq_radial_2D_402spokes_golden_angle_with_traj.h5' -# %% -# Download raw data -import tempfile - -import requests - -data_file = tempfile.NamedTemporaryFile(mode='wb', delete=False, suffix='.h5') -response = requests.get(zenodo_url + fname, timeout=30) -data_file.write(response.content) -data_file.flush() - -# %% [markdown] -# ### Image reconstruction -# We use the RegularizedIterativeSENSEReconstruction class to reconstruct images from 2D radial data. -# RegularizedIterativeSENSEReconstruction solves the following reconstruction problem: -# -# Let's assume we have obtained the k-space data $y$ from an image $x$ with an acquisition model (Fourier transforms, -# coil sensitivity maps...) $A$ then we can formulate the forward problem as: -# -# $ y = Ax + n $ -# -# where $n$ describes complex Gaussian noise. The image $x$ can be obtained by minimizing the functionl $F$ -# -# $ F(x) = ||W^{\frac{1}{2}}(Ax - y)||_2^2 $ -# -# where $W^\frac{1}{2}$ is the square root of the density compensation function (which corresponds to a diagonal -# operator). Because this is an ill-posed problem, we can add a regularization term to stabilize the problem and obtain -# a solution with certain properties: -# -# $ F(x) = ||W^{\frac{1}{2}}(Ax - y)||_2^2 + l||Bx - x_{reg}||_2^2$ -# -# where $l$ is the strength of the regularization, $B$ is a linear operator and $x_{reg}$ is a regularization image. -# With this functional $F$ we obtain a solution which is close to $x_{reg}$ and to the acquired data $y$. -# -# Setting the derivative of the functional $F$ to zero and rearranging yields -# -# $ (A^H W A + l B) x = A^H W y + l x_{reg}$ -# -# which is a linear system $Hx = b$ that needs to be solved for $x$. -# -# One important question of course is, what to use for $x_{reg}$. For dynamic images (e.g. cine MRI) low-resolution -# dynamic images or high-quality static images have been proposed. In recent years, also the output of neural-networks -# has been used as an image regulariser. -# -# In this example we are going to use a high-quality image to regularize the reconstruction of an undersampled image. -# Both images are obtained from the same data acquisition (one using all the acquired data ($x_{reg}$) and one using -# only parts of it ($x$)). This of course is an unrealistic case but it will allow us to study the effect of the -# regularization. - -# %% -import mrpro - -# %% [markdown] -# ##### Read-in the raw data -# %% -from mrpro.data import KData -from mrpro.data.traj_calculators import KTrajectoryIsmrmrd - -# Load in the Data and the trajectory from the ISMRMRD file -kdata = KData.from_file(data_file.name, KTrajectoryIsmrmrd()) -kdata.header.recon_matrix.x = 256 -kdata.header.recon_matrix.y = 256 - -# %% [markdown] -# ##### Image $x_{reg}$ from fully sampled data - -# %% -from mrpro.algorithms.reconstruction import DirectReconstruction, IterativeSENSEReconstruction -from mrpro.data import CsmData - -# Estimate coil maps -direct_reconstruction = DirectReconstruction(kdata, csm=None) -img_coilwise = direct_reconstruction(kdata) -csm = CsmData.from_idata_walsh(img_coilwise) - -# Iterative SENSE reconstruction -iterative_sense_reconstruction = IterativeSENSEReconstruction(kdata, csm=csm, n_iterations=3) -img_iterative_sense = iterative_sense_reconstruction(kdata) - -# %% [markdown] -# ##### Image $x$ from undersampled data - -# %% -import torch - -# Data undersampling, i.e. take only the first 20 radial lines -idx_us = torch.arange(0, 20)[None, :] -kdata_us = kdata.split_k1_into_other(idx_us, other_label='repetition') - -# %% -# Iterativ SENSE reconstruction -iterative_sense_reconstruction = IterativeSENSEReconstruction(kdata_us, csm=csm, n_iterations=6) -img_us_iterative_sense = iterative_sense_reconstruction(kdata_us) - -# %% -# Regularized iterativ SENSE reconstruction -from mrpro.algorithms.reconstruction import RegularizedIterativeSENSEReconstruction - -regularization_weight = 1.0 -n_iterations = 6 -regularized_iterative_sense_reconstruction = RegularizedIterativeSENSEReconstruction( - kdata_us, - csm=csm, - n_iterations=n_iterations, - regularization_data=img_iterative_sense.data, - regularization_weight=regularization_weight, -) -img_us_regularized_iterative_sense = regularized_iterative_sense_reconstruction(kdata_us) - -# %% -import matplotlib.pyplot as plt - -vis_im = [img_iterative_sense.rss(), img_us_iterative_sense.rss(), img_us_regularized_iterative_sense.rss()] -vis_title = ['Fully sampled', 'Iterative SENSE R=20', 'Regularized Iterative SENSE R=20'] -fig, ax = plt.subplots(1, 3, squeeze=False, figsize=(12, 4)) -for ind in range(3): - ax[0, ind].imshow(vis_im[ind][0, 0, ...]) - ax[0, ind].set_title(vis_title[ind]) - - -# %% [markdown] -# ### Behind the scenes - -# %% [markdown] -# ##### Set-up the density compensation operator $W$ and acquisition model $A$ -# -# This is very similar to the iterative SENSE reconstruction. For more detail please look at the -# iterative_sense_reconstruction notebook. -# %% -dcf_operator = mrpro.data.DcfData.from_traj_voronoi(kdata_us.traj).as_operator() -fourier_operator = mrpro.operators.FourierOp.from_kdata(kdata_us) -csm_operator = csm.as_operator() -acquisition_operator = fourier_operator @ csm_operator - -# %% [markdown] -# ##### Calculate the right-hand-side of the linear system $b = A^H W y + l x_{reg}$ - -# %% -right_hand_side = ( - acquisition_operator.H(dcf_operator(kdata_us.data)[0])[0] + regularization_weight * img_iterative_sense.data -) - - -# %% [markdown] -# ##### Set-up the linear self-adjoint operator $H = A^H W A + l$ - -# %% -from mrpro.operators import IdentityOp - -operator = acquisition_operator.H @ dcf_operator @ acquisition_operator + IdentityOp() * torch.as_tensor( - regularization_weight -) - -# %% [markdown] -# ##### Run conjugate gradient - -# %% -img_manual = mrpro.algorithms.optimizers.cg( - operator, right_hand_side, initial_value=right_hand_side, max_iterations=n_iterations, tolerance=0.0 -) - -# %% -# Display the reconstructed image -vis_im = [img_us_regularized_iterative_sense.rss(), img_manual.abs()[:, 0, ...]] -vis_title = ['Regularized Iterative SENSE R=20', '"Manual" Regularized Iterative SENSE R=20'] -fig, ax = plt.subplots(1, 2, squeeze=False, figsize=(8, 4)) -for ind in range(2): - ax[0, ind].imshow(vis_im[ind][0, 0, ...]) - ax[0, ind].set_title(vis_title[ind]) - -# %% [markdown] -# ### Check for equal results -# The two versions should result in the same image data. - -# %% -# If the assert statement did not raise an exception, the results are equal. -assert torch.allclose(img_us_regularized_iterative_sense.data, img_manual) - -# %% [markdown] -# ### Next steps -# Play around with the regularization_weight to see how it effects the final image quality. -# -# Of course we are cheating here because we used the fully sampled image as a regularization. In real world applications -# we would not have that. One option is to apply a low-pass filter to the undersampled k-space data to try to reduce the -# streaking artifacts and use that as a regularization image. Try that and see if you can also improve the image quality -# compared to the unregularised images. diff --git a/pyproject.toml b/pyproject.toml index 45b428988..9d86ea3d2 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -22,19 +22,20 @@ description = "MR image reconstruction and processing package specifically devel readme = "README.md" requires-python = ">=3.10,<3.14" dynamic = ["version"] -keywords = ["MRI", - "qMRI", - "medical imaging", - "physics-informed learning", - "model-based reconstruction", - "quantitative", - "signal models", - "machine learning", - "deep learning", - "reconstruction", - "processing", - "Pulseq", - "PyTorch", +keywords = [ + "MRI", + "qMRI", + "medical imaging", + "physics-informed learning", + "model-based reconstruction", + "quantitative", + "signal models", + "machine learning", + "deep learning", + "reconstruction", + "processing", + "Pulseq", + "PyTorch", ] authors = [ { name = "MRpro Team", email = "info@emerpro.de" }, @@ -79,12 +80,18 @@ test = [ "pytest-cov", "pytest-xdist", ] -docs = ["sphinx", +docs = [ + "sphinx", "sphinx_rtd_theme", "sphinx-pyproject", "myst-nb", "sphinx-mathjax-offline", - ] + "sphinx_github_style", + "sphinx-autodoc-typehints", + "sphinx-copybutton", + "sphinx-last-updated-by-git", + +] notebook = [ "zenodo_get", "ipykernel", @@ -92,6 +99,8 @@ notebook = [ "jupytext", "matplotlib", "pip-tools", + "sigpy==0.1.26", + ] [project.urls] @@ -103,7 +112,7 @@ testpaths = ["tests"] filterwarnings = [ "error", "ignore:'write_like_original':DeprecationWarning:pydicom:", - "ignore:Anomaly Detection has been enabled:UserWarning", #torch.autograd + "ignore:Anomaly Detection has been enabled:UserWarning", #torch.autograd ] addopts = "-n auto" markers = ["cuda : Tests only to be run when cuda device is available"] @@ -204,6 +213,7 @@ iy = "iy" daa = "daa" # required for wavelet operator gaus = "gaus" # required for wavelet operator arange = "arange" # torch.arange +Ba = "Ba" [tool.typos.files] extend-exclude = [ diff --git a/src/mrpro/algorithms/__init__.py b/src/mrpro/algorithms/__init__.py index 3dbc8ed07..bd31bb42f 100644 --- a/src/mrpro/algorithms/__init__.py +++ b/src/mrpro/algorithms/__init__.py @@ -1,3 +1,5 @@ -from mrpro.algorithms import csm, optimizers, reconstruction +"""Algorithms for reconstructions, optimization, density and sensitivity map estimation, etc.""" + +from mrpro.algorithms import csm, optimizers, reconstruction, dcf from mrpro.algorithms.prewhiten_kspace import prewhiten_kspace -__all__ = ["csm", "optimizers", "prewhiten_kspace", "reconstruction"] \ No newline at end of file +__all__ = ["csm", "dcf", "optimizers", "prewhiten_kspace", "reconstruction"] \ No newline at end of file diff --git a/src/mrpro/algorithms/csm/__init__.py b/src/mrpro/algorithms/csm/__init__.py index 255e09a89..ae81bf185 100644 --- a/src/mrpro/algorithms/csm/__init__.py +++ b/src/mrpro/algorithms/csm/__init__.py @@ -1,3 +1,5 @@ +"""Coil Sensitivity Estimation.""" + from mrpro.algorithms.csm.walsh import walsh from mrpro.algorithms.csm.inati import inati -__all__ = ["inati", "walsh"] \ No newline at end of file +__all__ = ["inati", "walsh"] diff --git a/src/mrpro/algorithms/csm/walsh.py b/src/mrpro/algorithms/csm/walsh.py index b1978d604..cc6fb619a 100644 --- a/src/mrpro/algorithms/csm/walsh.py +++ b/src/mrpro/algorithms/csm/walsh.py @@ -9,12 +9,30 @@ def walsh(coil_images: torch.Tensor, smoothing_width: SpatialDimension[int] | int) -> torch.Tensor: """Calculate a coil sensitivity map (csm) using an iterative version of the Walsh method. - This is for a single set of coil images. The input should be a tensor with dimensions - (coils, z, y, x). The output will have the same dimensions. - Either apply this function individually to each set of coil images, - or see CsmData.from_idata_walsh which performs this operation on a whole dataset [WAL2000]_. + This function computes CSMs from a set of complex coil images assuming spatially + slowly changing sensitivity maps using Walsh's method [WAL2000]_. - This function is inspired by https://github.com/ismrmrd/ismrmrd-python-tools. + The algorithm follows these steps: + + 1. **Compute Pointwise Covariance**: + Calculate the covariance matrix of the coil images at each voxel to capture inter-coil signal relationships. + + 2. **Apply Smoothing Filter**: + Smooth the covariance matrices across spatial dimensions using a uniform filter of specified width + to reduce noise and enforce spatial consistency. + + 3. **Dominant Eigenvector Estimation via Power Iteration**: + Perform power iterations to approximate the dominant eigenvector of the covariance matrix at each voxel, + representing the principal component of the signal. + + 4. **Normalize Sensitivity Maps**: + Normalize the resulting eigenvectors to produce the final CSMs. + + This function works on a single set of coil images. The input should be a tensor with dimensions + (coils, z, y, x). The output will have the same dimensions. Either apply this function individually to each set of + coil images, or see CsmData.from_idata_walsh which performs this operation on a whole dataset [WAL2000]_. + + This implementation is inspired by `ismrmrd-python-tools `_. Parameters ---------- diff --git a/src/mrpro/algorithms/dcf/__init__.py b/src/mrpro/algorithms/dcf/__init__.py index d0291f190..3c68f26f9 100644 --- a/src/mrpro/algorithms/dcf/__init__.py +++ b/src/mrpro/algorithms/dcf/__init__.py @@ -1,2 +1,4 @@ +"""Density Compensation Calculation.""" + from mrpro.algorithms.dcf.dcf_voronoi import dcf_1d, dcf_2d3d_voronoi __all__ = ["dcf_1d", "dcf_2d3d_voronoi"] diff --git a/src/mrpro/algorithms/dcf/dcf_voronoi.py b/src/mrpro/algorithms/dcf/dcf_voronoi.py index 9218948ec..9ecdaa5c4 100644 --- a/src/mrpro/algorithms/dcf/dcf_voronoi.py +++ b/src/mrpro/algorithms/dcf/dcf_voronoi.py @@ -18,10 +18,17 @@ def _volume(v: ArrayLike): def dcf_1d(traj: torch.Tensor) -> torch.Tensor: """Calculate sample density compensation function for 1D trajectory. + This function operators on a single `other` sample. + See also `~mrpro.data.DcfData` and `mrpro.utils.smap` + Parameters ---------- traj k-space positions, 1D tensor + + Returns + ------- + density compensation values """ traj_sorted, inverse, counts = torch.unique( torch.round(traj, decimals=UNIQUE_ROUNDING_DECIMALS), @@ -56,19 +63,25 @@ def dcf_1d(traj: torch.Tensor) -> torch.Tensor: def dcf_2d3d_voronoi(traj: torch.Tensor) -> torch.Tensor: - """Calculate sample density compensation function using voronoi method. + """Calculate sample density compensation function using Voronoi method. + + This function computes the DCF by determining the area around each point in k-space using the Voronoi tessellation. + Points at the edge of k-space are detected as outliers and are assigned the area of the 1% largest DCF values. + + The Voronoi tessellation assigns each point in k-space a region based on the proximity to its nearest neighbors. The + DCF is then computed based on the inverse of the area of these regions. - Points at the edge of k-space are detected as outliers and assigned the - area of the 1% largest dcf values. + This function operators on a single `other` sample. + See also `~mrpro.data.DcfData` and `mrpro.utils.smap` Parameters ---------- traj - k-space positions (2 or 3, k2, k1, k0) + k-space positions `(2 or 3, k2, k1, k0)` Returns ------- - density compensation values (1, k2, k1, k0) + density compensation values `(1, k2, k1, k0)` """ # 2D and 3D trajectories supported dim = traj.shape[0] diff --git a/src/mrpro/algorithms/optimizers/__init__.py b/src/mrpro/algorithms/optimizers/__init__.py index 701dc59aa..3ba4a03fa 100644 --- a/src/mrpro/algorithms/optimizers/__init__.py +++ b/src/mrpro/algorithms/optimizers/__init__.py @@ -1,3 +1,5 @@ +"""Optimizers.""" + from mrpro.algorithms.optimizers.OptimizerStatus import OptimizerStatus from mrpro.algorithms.optimizers.adam import adam from mrpro.algorithms.optimizers.cg import cg diff --git a/src/mrpro/algorithms/optimizers/adam.py b/src/mrpro/algorithms/optimizers/adam.py index bbf6eeac3..12a5422dd 100644 --- a/src/mrpro/algorithms/optimizers/adam.py +++ b/src/mrpro/algorithms/optimizers/adam.py @@ -21,7 +21,46 @@ def adam( decoupled_weight_decay: bool = False, callback: Callable[[OptimizerStatus], None] | None = None, ) -> tuple[torch.Tensor, ...]: - """Adam for non-linear minimization problems. + r"""Adam for non-linear minimization problems. + + Adam [KING2015]_ (Adaptive Moment Estimation) is a first-order optimization algorithm that adapts learning rates + for each parameter using estimates of the first and second moments of the gradients. + + The parameter update rule is: + + .. math:: + + m_t &= \beta_1 m_{t-1} + (1 - \beta_1) g_t \\ + v_t &= \beta_2 v_{t-1} + (1 - \beta_2) g_t^2 \\ + \hat{m}_t &= \frac{m_t}{1 - \beta_1^t}, \quad \hat{v}_t = \frac{v_t}{1 - \beta_2^t} \\ + \theta_{t+1} &= \theta_t - \frac{\eta}{\sqrt{\hat{v}_t} + \epsilon} \hat{m}_t + + where + :math:`g_t` is the gradient at step :math:`t`, + :math:`m_t` and :math:`v_t` are biased estimates of the first and second moments, + :math:`\hat{m}_t` and :math:`\hat{v}_t` are bias-corrected estimates, + :math:`\eta` is the learning rate, + :math:`\epsilon` is a small constant for numerical stability, + :math:`\beta_1` and :math:`\beta_2` are decay rates for the moment estimates. + + Steps of the Adam algorithm: + + 1. Initialize parameters and moment estimates (:math:`m_0`, :math:`v_0`). + 2. Compute the gradient of the objective function. + 3. Compute biased corrected estimates of the moments :math:`\hat{m}_t` and :math:`\hat{v}_t`. + 4. Update parameters using the adaptive step size. + + This function wraps PyTorch's :class:`torch.optim.Adam` and :class:`torch.optim.AdamW` implementations, + supporting both standard Adam and decoupled weight decay regularization (AdamW) [LOS2019]_ + + References + ---------- + .. [KING2015] Kingma DP, Ba J (2015) Adam: A Method for Stochastic Optimization. ICLR. + https://doi.org/10.48550/arXiv.1412.6980 + .. [LOS2019] Loshchilov I, Hutter F (2019) Decoupled Weight Decay Regularization. ICLR. + https://doi.org/10.48550/arXiv.1711.05101 + .. [REDDI2019] Sashank J. Reddi, Satyen Kale, Sanjiv Kumar (2019) On the Convergence of Adam and Beyond. ICLR. + https://doi.org/10.48550/arXiv.1904.09237 Parameters ---------- @@ -40,24 +79,17 @@ def adam( eps term added to the denominator to improve numerical stability weight_decay - weight decay (L2 penalty if decoupled_weight_decay is False) + weight decay (L2 penalty if `decoupled_weight_decay` is False) amsgrad - whether to use the AMSGrad variant of this algorithm from the paper - `On the Convergence of Adam and Beyond` + whether to use the AMSGrad variant [REDDI2019]_ decoupled_weight_decay - whether to use Adam (default) or AdamW (if set to true) [LOS2019]_ + whether to use Adam (default) or AdamW (if set to `True`) [LOS2019]_ callback function to be called after each iteration - Returns ------- list of optimized parameters - - References - ---------- - .. [LOS2019] Loshchilov I, Hutter F (2019) Decoupled Weight Decay Regularization. ICLR - https://doi.org/10.48550/arXiv.1711.05101 """ parameters = tuple(p.detach().clone().requires_grad_(True) for p in initial_parameters) diff --git a/src/mrpro/algorithms/optimizers/cg.py b/src/mrpro/algorithms/optimizers/cg.py index 2d458bfa0..645a1afea 100644 --- a/src/mrpro/algorithms/optimizers/cg.py +++ b/src/mrpro/algorithms/optimizers/cg.py @@ -25,42 +25,53 @@ def cg( ) -> torch.Tensor: r"""CG for solving a linear system :math:`Hx=b`. - Thereby, :math:`H` is a linear self-adjoint operator, :math:`b` is the right-hand-side - of the system and :math:`x` is the sought solution. + This algorithm solves systems of the form :math:`H x = b`, where :math:`H` is a self-adjoint linear operator + and :math:`b` is the right-hand side. The method can solve a batch of :math:`N` systems jointly, thereby taking + :math:`H` as a block-diagonal with blocks :math:`H_i` and :math:`b = [b_1, ..., b_N] ^T`. - Note that this implementation allows for simultaneously solving a batch of :math:`N` problems - of the form :math:`H_i x_i = b_i` with :math:`i=1,...,N`. + The method performs the following steps: - Thereby, the underlying assumption is that the considered problem is :math:`Hx=b` with - :math:`H:= diag(H_1, ..., H_N)` and :math:`b:= [b_1, ..., b_N]^T`. + 1. Initialize the residual :math:`r_0 = b - Hx_0` (with :math:`x_0` as the initial guess). + 2. Set the search direction :math:`p_0 = r_0`. + 3. For each iteration :math:`k = 0, 1, 2, ...`: - Thus, if all :math:`H_i` are self-adjoint, so is :math:`H` and the CG can be applied. - Note however, that the accuracy of the obtained solutions might vary among the different - problems. - Note also that we don't test if the input operator is self-adjoint or not. + - Compute :math:`\alpha_k = \frac{r_k^T r_k}{p_k^T H p_k}`. + - Update the solution: :math:`x_{k+1} = x_k + \alpha_k p_k`. + - Compute the new residual: :math:`r_{k+1} = r_k - \alpha_k H p_k`. + - Update the search direction: :math:`\beta_k = \frac{r_{k+1}^T r_{k+1}}{r_k^T r_k}`, + then :math:`p_{k+1} = r_{k+1} + \beta_k p_k`. - Further, note that if the condition of :math:`H` is very large, a small residual does not necessarily - imply that the solution is accurate. + This implementation assumes that :math:`H` is self-adjoint and does not verify this condition. + + See [Hestenes1952]_, [Nocedal2006]_, and [WikipediaCG]_ for more information. Parameters ---------- operator - self-adjoint operator (named H above) + self-adjoint operator (named :math:`H` above) right_hand_side - right-hand-side of the system (named b above) + right-hand-side of the system (named :math:`b` above) initial_value - initial value of the algorithm; if None, it will be set to right_hand_side + initial value of the algorithm; if `None`, it will be set to `right_hand_side` max_iterations - maximal number of iterations + maximal number of iterations. Can be used for early stopping. tolerance tolerance for the residual; if set to zero, the maximal number of iterations - is the only stopping criterion used to stop the cg + is the only stopping criterion used to stop the cg. + If the condition number of :math:`H` is large, a small residual may not imply a highly accurate solution. callback - function to be called at each iteration + function to be called at each iteration. This can be used to monitor the progress of the algorithm. Returns ------- - an approximate solution of the linear system Hx=b + an approximate solution of the linear system :math:`Hx=b` + + References + ---------- + .. [Hestenes1952] Hestenes, M. R., & Stiefel, E. (1952). Methods of conjugate gradients for solving linear systems. + Journal of Research of the National Bureau of Standards , 49(6), 409-436 + .. [Nocedal2006] Nocedal, J. (2006). *Numerical Optimization* (2nd ed.). Springer. + .. [WikipediaCG] Wikipedia: Conjugate Gradient https://en.wikipedia.org/wiki/Conjugate_gradient """ if initial_value is not None and (initial_value.shape != right_hand_side.shape): raise ValueError( diff --git a/src/mrpro/algorithms/optimizers/lbfgs.py b/src/mrpro/algorithms/optimizers/lbfgs.py index 736dcc0b0..6a05cfa80 100644 --- a/src/mrpro/algorithms/optimizers/lbfgs.py +++ b/src/mrpro/algorithms/optimizers/lbfgs.py @@ -22,22 +22,57 @@ def lbfgs( line_search_fn: None | Literal['strong_wolfe'] = 'strong_wolfe', callback: Callable[[OptimizerStatus], None] | None = None, ) -> tuple[torch.Tensor, ...]: - """LBFGS for non-linear minimization problems. + r""" + LBFGS for (non-linear) minimization problems. + + The Limited-memory Broyden-Fletcher-Goldfarb-Shanno (LBFGS) algorithm is a quasi-Newton optimization method + that approximates the inverse Hessian matrix using a limited memory of past gradients and updates. + It is well-suited for high-dimensional problems and leverages curvature information + for faster convergence compared to first-order methods such as `mrpro.algorithms.optimizers.adam` + + The parameter update rule is: + + .. math:: + + \theta_{k+1} = \theta_k - \alpha_k H_k \nabla f(\theta_k), + + where :math:`H_k` is a limited-memory approximation of the inverse Hessian, + and :math:`\alpha_k` is the step size determined via line search (e.g., strong Wolfe conditions). + + The algorithm performs the following steps: + + 1. Compute the gradient of the objective function. + 2. Approximate the inverse Hessian matrix :math:`H_k` using stored gradients and updates. + 3. Perform a line search to compute the step size :math:`\alpha_k`. + 4. Update the parameters. + 5. Store the latest gradient and update information. + + This implementation wraps PyTorch's `torch.optim.LBFGS` class. + For more information, see [WIKI]_, [NOC1980]_, and [LIU1989]_. + + References + ---------- + .. [NOC1980] Nocedal, J. (1980). "Updating quasi-Newton matrices with limited storage." + *Mathematics of Computation*, 35(151), 773-782. https://doi.org/10.1090/S0025-5718-1980-0572855-7 + .. [LIU1989] Liu, D. C., & Nocedal, J. (1989). "On the limited memory BFGS method for large scale optimization." + *Mathematical Programming*, 45(1-3), 503-528. https://doi.org/10.1007/BF01589116 + .. [WIKI] Wikipedia: Limited-memory_BFGS https://en.wikipedia.org/wiki/Limited-memory_BFGS + Parameters ---------- f scalar function to be minimized initial_parameters - Sequence (for example list) of parameters to be optimized. + `Sequence` of parameters to be optimized. Note that these parameters will not be changed. Instead, we create a copy and leave the initial values untouched. lr - learning rate + learning rate. This should usually be left as ``1.0`` if a line search is used. max_iter maximal number of iterations max_eval - maximal number of evaluations of f per optimization step + maximal number of evaluations of `f` per optimization step tolerance_grad termination tolerance on first order optimality tolerance_change @@ -45,10 +80,12 @@ def lbfgs( history_size update history size line_search_fn - line search algorithm, either 'strong_wolfe' or None (meaning constant step size) + line search algorithm, either ``strong_wolfe`` or `None` (meaning constant step size) callback function to be called after each iteration. - N.B. the callback is NOT called within the line search of LBFGS + N.B. the callback is not called within the line search of LBFGS + You can use the information from the `~mrpro.algorithms.optimizers.OptimizerStatus` + to display a progress bar. Returns ------- diff --git a/src/mrpro/algorithms/prewhiten_kspace.py b/src/mrpro/algorithms/prewhiten_kspace.py index 3e1dde12d..cb13a6618 100644 --- a/src/mrpro/algorithms/prewhiten_kspace.py +++ b/src/mrpro/algorithms/prewhiten_kspace.py @@ -14,10 +14,10 @@ def prewhiten_kspace(kdata: KData, knoise: KNoise, scale_factor: float | torch.T Steps: - - Calculate noise correlation matrix N - - Carry out Cholesky decomposition L L^H = N - - Estimate noise decorrelation matrix D = inv(L) - - Apply D to k-space data + - Calculate noise correlation matrix :math:`N` + - Carry out Cholesky decomposition :math:`L L^H = N` + - Estimate noise decorrelation matrix :math:`D = L^{-1}` + - Apply :math:`D` to k-space data More information can be found in [ISMa]_ [HAN2014]_ [ROE1990]_. @@ -35,11 +35,11 @@ def prewhiten_kspace(kdata: KData, knoise: KNoise, scale_factor: float | torch.T scale_factor Square root is applied on the noise covariance matrix. Used to adjust for effective noise bandwidth and difference in sampling rate between noise calibration and actual measurement: - scale_factor = (T_acq_dwell/T_noise_dwell)*NoiseReceiverBandwidthRatio + ``scale_factor = (T_acq_dwell/T_noise_dwell)*NoiseReceiverBandwidthRatio`` Returns ------- - Prewhitened copy of k-space data + Prewhitened *copy* of k-space data References ---------- diff --git a/src/mrpro/algorithms/reconstruction/DirectReconstruction.py b/src/mrpro/algorithms/reconstruction/DirectReconstruction.py index a201b86c2..afa318f75 100644 --- a/src/mrpro/algorithms/reconstruction/DirectReconstruction.py +++ b/src/mrpro/algorithms/reconstruction/DirectReconstruction.py @@ -25,28 +25,36 @@ def __init__( ): """Initialize DirectReconstruction. + A direct reconstruction uses the adjoint of the acquisition operator and a + density compensation to obtain the complex valued images from k-space data. + + If csm is not set to `None`, a single coil combined image will reconstructed. + The method for estimating sensitivity maps can be adjusted using the `csm` argument. + Parameters ---------- kdata - KData. If kdata is provided and fourier_op or dcf are None, then fourier_op and dcf are estimated based on - kdata. Otherwise fourier_op and dcf are used as provided. + If `kdata` is provided and `fourier_op` or `dcf` are `None`, then `fourier_op` and `dcf` are estimated + based on `kdata`. Otherwise `fourier_op` and `dcf` are used as provided. fourier_op - Instance of the FourierOperator used for reconstruction. If None, set up based on kdata. + Instance of the `~mrpro.operators.FourierOperator` used for reconstruction. + If `None`, set up based on `kdata`. csm - Sensitivity maps for coil combination. If None, no coil combination is carried out, i.e. images for each - coil are returned. If a callable is provided, coil images are reconstructed using the adjoint of the - FourierOperator (including density compensation) and then sensitivity maps are calculated using the - callable. For this, kdata needs also to be provided. For examples have a look at the CsmData class - e.g. from_idata_walsh or from_idata_inati. + Sensitivity maps for coil combination. If `None`, no coil combination is carried out, i.e. images for each + coil are returned. If a `Callable` is provided, coil images are reconstructed using the adjoint of the + `~mrpro.operators.FourierOperator` (including density compensation) and then sensitivity maps are calculated + using the callable. For this, `kdata` needs also to be provided. + For examples have a look at the `~mrpro.data.CsmData` class e.g. `~mrpro.data.CsmData.from_idata_walsh` + or `~mrpro.data.from_idata_inati`. noise - KNoise used for prewhitening. If None, no prewhitening is performed + Noise used for prewhitening. If `None`, no prewhitening is performed dcf - K-space sampling density compensation. If None, set up based on kdata. + K-space sampling density compensation. If `None`, set up based on `kdata`. Raises ------ - ValueError - If the kdata and fourier_op are None or if csm is a Callable but kdata is None. + `ValueError` + If the `kdata` and `fourier_op` are `None` or if `csm` is a `Callable` but `kdata` is None. """ super().__init__() if fourier_op is None: diff --git a/src/mrpro/algorithms/reconstruction/IterativeSENSEReconstruction.py b/src/mrpro/algorithms/reconstruction/IterativeSENSEReconstruction.py index 444d85712..c4faf4af5 100644 --- a/src/mrpro/algorithms/reconstruction/IterativeSENSEReconstruction.py +++ b/src/mrpro/algorithms/reconstruction/IterativeSENSEReconstruction.py @@ -46,31 +46,34 @@ def __init__( ) -> None: """Initialize IterativeSENSEReconstruction. - For a regularized version of the iterative SENSE algorithm please see RegularizedIterativeSENSEReconstruction. + For a regularized version of the iterative SENSE algorithm + please see `~mrpro.algorithms.reconstruction.RegularizedIterativeSENSEReconstruction`. Parameters ---------- kdata - KData. If kdata is provided and fourier_op or dcf are None, then fourier_op and dcf are estimated based on - kdata. Otherwise fourier_op and dcf are used as provided. + If `kdata` is provided and `fourier_op` or `dcf` are `None`, then `fourier_op` and `dcf` are estimated + based on `kdata`. Otherwise `fourier_op` and `dcf` are used as provided. fourier_op - Instance of the FourierOperator used for reconstruction. If None, set up based on kdata. + Instance of the `~mrpro.operators.FourierOperator` used for reconstruction. + If `None`, set up based on `kdata`. csm - Sensitivity maps for coil combination. If None, no coil combination is carried out, i.e. images for each + Sensitivity maps for coil combination. If `None`, no coil combination is carried out, i.e. images for each coil are returned. If a callable is provided, coil images are reconstructed using the adjoint of the - FourierOperator (including density compensation) and then sensitivity maps are calculated using the - callable. For this, kdata needs also to be provided. For examples have a look at the CsmData class - e.g. from_idata_walsh or from_idata_inati. + `~mrpro.operators.FourierOperator` (including density compensation) and then sensitivity maps are calculated + using the callable. For this, `kdata` needs also to be provided. + For examples have a look at the `mrpro.data.CsmData` class e.g. `~mrpro.data.CsmData.from_idata_walsh` + or `~mrpro.data.from_idata_inati`. noise - KNoise used for prewhitening. If None, no prewhitening is performed + Noise used for prewhitening. If `None`, no prewhitening is performed dcf - K-space sampling density compensation. If None, set up based on kdata. + K-space sampling density compensation. If `None`, set up based on `kdata`. n_iterations Number of CG iterations Raises ------ - ValueError - If the kdata and fourier_op are None or if csm is a Callable but kdata is None. + `ValueError` + If the `kdata` and `fourier_op` are `None` or if `csm` is a `Callable` but `kdata` is None. """ super().__init__(kdata, fourier_op, csm, noise, dcf, n_iterations=n_iterations, regularization_weight=0) diff --git a/src/mrpro/algorithms/reconstruction/Reconstruction.py b/src/mrpro/algorithms/reconstruction/Reconstruction.py index 54a1f6af2..1bde94969 100644 --- a/src/mrpro/algorithms/reconstruction/Reconstruction.py +++ b/src/mrpro/algorithms/reconstruction/Reconstruction.py @@ -49,7 +49,7 @@ def recalculate_fourierop(self, kdata: KData) -> Self: Parameters ---------- kdata - KData to determine trajectory and recon/encoding matrix from. + k-space data to determine trajectory and recon/encoding matrix from. """ self.fourier_op = FourierOp.from_kdata(kdata) self.dcf = DcfData.from_traj_voronoi(kdata.traj) @@ -66,16 +66,16 @@ def recalculate_csm( Parameters ---------- kdata - KData used for adjoint reconstruction (including DCF-weighting if available), which is then used for + k-space data used for adjoint reconstruction (including DCF-weighting if available), which is then used for CSM estimation. csm_calculation Function to calculate csm expecting idata as input and returning csmdata. For examples have a look at the - CsmData class e.g. from_idata_walsh or from_idata_inati. + `~mrpro.data.CsmData`. noise Noise measurement for prewhitening. - If None, self.noise (if previously set) is used. - If False, no prewithening is performed even if self.noise is set. - Use this if the kdata is already prewhitened. + If `None`, `self.noise` (if previously set) is used. + If `False`, no prewithening is performed even if `self.noise` is set. + Use this if the `kdata` is already prewhitened. """ if noise is False: noise = None @@ -89,8 +89,12 @@ def recalculate_csm( def direct_reconstruction(self, kdata: KData) -> IData: """Direct reconstruction of the MR acquisition. - Here we use S^H F^H W to calculate the image data using the coil sensitivity operator S, the Fourier operator F - and the density compensation operator W. S and W are optional. + Here we use :math:`S^H F^H W` to calculate the image data using + the coil sensitivity operator :math:`S`, + the Fourier operator :math:`F`, + and the density compensation operator :math:`W`. + :math:`S` and :math:`W` are optional: If they have not been set in this instance, + no coil combination or density compensation, respectively, will be performed. Parameters ---------- diff --git a/src/mrpro/algorithms/reconstruction/RegularizedIterativeSENSEReconstruction.py b/src/mrpro/algorithms/reconstruction/RegularizedIterativeSENSEReconstruction.py index e3c1c49ce..6bf6e3bbe 100644 --- a/src/mrpro/algorithms/reconstruction/RegularizedIterativeSENSEReconstruction.py +++ b/src/mrpro/algorithms/reconstruction/RegularizedIterativeSENSEReconstruction.py @@ -57,26 +57,28 @@ def __init__( ) -> None: """Initialize RegularizedIterativeSENSEReconstruction. - For a unregularized version of the iterative SENSE algorithm the regularization_weight can be set to 0 or - IterativeSENSEReconstruction algorithm can be used. + For a unregularized version of the iterative SENSE algorithm the regularization_weight can be set to ``0`` + or `~mrpro.algorithms.reconstruction.IterativeSENSEReconstruction` algorithm can be used. Parameters ---------- kdata - KData. If kdata is provided and fourier_op or dcf are None, then fourier_op and dcf are estimated based on - kdata. Otherwise fourier_op and dcf are used as provided. + If `kdata` is provided and `fourier_op` or `dcf` are `None`, then `fourier_op` and `dcf` are estimated + based on `kdata`. Otherwise `fourier_op` and `dcf` are used as provided. fourier_op - Instance of the FourierOperator used for reconstruction. If None, set up based on kdata. + Instance of the `~mrpro.operators.FourierOperator` used for reconstruction. + If `None`, set up based on `kdata`. csm - Sensitivity maps for coil combination. If None, no coil combination is carried out, i.e. images for each + Sensitivity maps for coil combination. If `None`, no coil combination is carried out, i.e. images for each coil are returned. If a callable is provided, coil images are reconstructed using the adjoint of the - FourierOperator (including density compensation) and then sensitivity maps are calculated using the - callable. For this, kdata needs also to be provided. For examples have a look at the CsmData class - e.g. from_idata_walsh or from_idata_inati. + `~mrpro.operators.FourierOperator` (including density compensation) and then sensitivity maps are calculated + using the callable. For this, `kdata` needs also to be provided. + For examples have a look at the `mrpro.data.CsmData` class e.g. `~mrpro.data.CsmData.from_idata_walsh` + or `~mrpro.data.from_idata_inati`. noise - KNoise used for prewhitening. If None, no prewhitening is performed + Noise used for prewhitening. If `None`, no prewhitening is performed dcf - K-space sampling density compensation. If None, set up based on kdata. + K-space sampling density compensation. If `None`, set up based on `kdata`. n_iterations Number of CG iterations regularization_data @@ -87,11 +89,10 @@ def __init__( Linear operator :math:`B` applied to the current estimate in the regularization term. If None, nothing is applied to the current estimate. - Raises ------ - ValueError - If the kdata and fourier_op are None or if csm is a Callable but kdata is None. + `ValueError` + If the `kdata` and `fourier_op` are `None` or if `csm` is a `Callable` but `kdata` is None. """ super().__init__(kdata, fourier_op, csm, noise, dcf) self.n_iterations = n_iterations diff --git a/src/mrpro/algorithms/reconstruction/__init__.py b/src/mrpro/algorithms/reconstruction/__init__.py index 38b539f8b..33277f07b 100644 --- a/src/mrpro/algorithms/reconstruction/__init__.py +++ b/src/mrpro/algorithms/reconstruction/__init__.py @@ -1,3 +1,5 @@ +"""Pre-built reconstruction algorithms.""" + from mrpro.algorithms.reconstruction.Reconstruction import Reconstruction from mrpro.algorithms.reconstruction.DirectReconstruction import DirectReconstruction from mrpro.algorithms.reconstruction.RegularizedIterativeSENSEReconstruction import RegularizedIterativeSENSEReconstruction diff --git a/src/mrpro/data/CsmData.py b/src/mrpro/data/CsmData.py index 000884f4b..0c3e3cf9f 100644 --- a/src/mrpro/data/CsmData.py +++ b/src/mrpro/data/CsmData.py @@ -27,6 +27,8 @@ def from_idata_walsh( ) -> Self: """Create csm object from image data using iterative Walsh method. + See also `~mrpro.algorithms.csm.walsh`. + Parameters ---------- idata @@ -35,7 +37,7 @@ def from_idata_walsh( width of smoothing filter. chunk_size_otherdim: How many elements of the other dimensions should be processed at once. - Default is None, which means that all elements are processed at once. + Default is `None`, which means that all elements are processed at once. """ from mrpro.algorithms.csm.walsh import walsh diff --git a/src/mrpro/data/Data.py b/src/mrpro/data/Data.py index cb0f3f3f5..ab1bf4921 100644 --- a/src/mrpro/data/Data.py +++ b/src/mrpro/data/Data.py @@ -14,7 +14,7 @@ class Data(MoveDataMixin, ABC): """A general data class with field data and header.""" data: torch.Tensor - """Data. Shape (...other coils k2 k1 k0)""" + """Data. Shape `(...other coils k2 k1 k0)`""" header: Any """Header information for data.""" diff --git a/src/mrpro/data/DcfData.py b/src/mrpro/data/DcfData.py index 3db7f6772..84047bce5 100644 --- a/src/mrpro/data/DcfData.py +++ b/src/mrpro/data/DcfData.py @@ -23,7 +23,7 @@ class DcfData(MoveDataMixin): """Density compensation data (DcfData) class.""" data: torch.Tensor - """Density compensation values. Shape (... other, k2, k1, k0)""" + """Density compensation values. Shape `(... other, k2, k1, k0)`""" @classmethod def from_traj_voronoi(cls, traj: KTrajectory) -> Self: diff --git a/src/mrpro/data/IData.py b/src/mrpro/data/IData.py index 0c3cad043..5a19fab8f 100644 --- a/src/mrpro/data/IData.py +++ b/src/mrpro/data/IData.py @@ -59,12 +59,13 @@ def rss(self, keepdim: bool = False) -> torch.Tensor: Parameters ---------- keepdim - if True, the output tensor has the same number of dimensions as the data tensor, and the coil dimension is - kept as a singleton dimension. If False, the coil dimension is removed. + if `True`, the output tensor has the same number of dimensions as the data tensor, and the coil dimension is + kept as a singleton dimension. If `False`, the coil dimension is removed. Returns ------- - image data tensor with shape (..., 1, z, y, x) if keepdim is True or (..., z, y, x) if keepdim is False. + image data tensor with shape `(..., 1, z, y, x)` if `keepdim` is `True` + or `(..., z, y, x)` if keepdim is `False`. """ coildim = -4 return self.data.abs().square().sum(dim=coildim, keepdim=keepdim).sqrt() @@ -76,9 +77,9 @@ def from_tensor_and_kheader(cls, data: torch.Tensor, kheader: KHeader) -> Self: Parameters ---------- data - torch.Tensor containing image data with dimensions (broadcastable to) (other, coils, z, y, x). + image data with dimensions (broadcastable to) `(other, coils, z, y, x)`. kheader - MR raw data header (KHeader) containing required meta data for the image header (IHeader). + MR raw data header containing required meta data for the image header. """ header = IHeader.from_kheader(kheader) return cls(header=header, data=data) @@ -144,7 +145,7 @@ def from_dicom_folder(cls, foldername: str | Path, suffix: str | None = 'dcm') - path to folder with DICOM files. suffix file extension (without period/full stop) to identify the DICOM files. - If None, then all files in the folder are read in. + If `None`, then all files in the folder are read in. """ # Get files file_paths = list(Path(foldername).glob('*')) if suffix is None else list(Path(foldername).glob('*.' + suffix)) diff --git a/src/mrpro/data/KData.py b/src/mrpro/data/KData.py index 719141921..3894e9c05 100644 --- a/src/mrpro/data/KData.py +++ b/src/mrpro/data/KData.py @@ -72,10 +72,10 @@ class KData( """Header information for k-space data""" data: torch.Tensor - """K-space data. Shape (...other coils k2 k1 k0)""" + """K-space data. Shape `(*other coils k2 k1 k0)`""" traj: KTrajectory - """K-space trajectory along kz, ky and kx. Shape (...other k2 k1 k0)""" + """K-space trajectory along kz, ky and kx. Shape `(*other k2 k1 k0)`""" @classmethod def from_file( @@ -314,9 +314,9 @@ def compress_coils( Raises ------ - ValueError + `ValueError` If both batch_dims and joint_dims are defined. - Valuer Error + `ValuerError` If coil dimension is part of joint_dims or batch_dims. References @@ -378,14 +378,16 @@ def compress_coils( def rearrange_k2_k1_into_k1(self: Self) -> Self: """Rearrange kdata from (... k2 k1 ...) to (... 1 (k2 k1) ...). + Note: This function will be deprecated in the future. + Parameters ---------- kdata - K-space data (other coils k2 k1 k0) + K-space data `(other coils k2 k1 k0)` Returns ------- - K-space data (other coils 1 (k2 k1) k0) + K-space data `(other coils 1 (k2 k1) k0)` """ # Rearrange data kdat = rearrange(self.data, '... coils k2 k1 k0->... coils 1 (k2 k1) k0') @@ -404,7 +406,10 @@ def rearrange_k2_k1_into_k1(self: Self) -> Self: return type(self)(kheader, kdat, type(self.traj).from_tensor(ktraj)) def remove_readout_os(self: Self) -> Self: - """Remove any oversampling along the readout (k0) direction [GAD]_. + """Remove any oversampling along the readout direction. + + Removes oversampling along the readout direction by cropping the data + to the size of the reconstruction matrix in image space [GAD]_. Returns a copy of the data. @@ -419,7 +424,7 @@ def remove_readout_os(self: Self) -> Self: Raises ------ - ValueError + `ValueError` If the recon matrix along x is larger than the encoding matrix along x. References @@ -472,10 +477,12 @@ def select_other_subset( ) -> Self: """Select a subset from the other dimension of KData. + Note: This function will be deprecated in the future. + Parameters ---------- kdata - K-space data (other coils k2 k1 k0) + K-space data `(other coils k2 k1 k0)` subset_idx Index which elements of the other subset to use, e.g. phase 0,1,2 and 5 subset_label @@ -483,11 +490,11 @@ def select_other_subset( Returns ------- - K-space data (other_subset coils k2 k1 k0) + K-space data `(other_subset coils k2 k1 k0)` Raises ------ - ValueError + `ValueError` If the subset indices are not available in the data """ # Make a copy such that the original kdata.header remains the same @@ -528,7 +535,7 @@ def _split_k2_or_k1_into_other( ---------- split_idx 2D index describing the k2 or k1 points in each block to be moved to the other dimension - (other_split, k1_per_split) or (other_split, k2_per_split) + `(other_split, k1_per_split)` or `(other_split, k2_per_split)` other_label Label of other dimension, e.g. repetition, phase split_dir @@ -537,12 +544,12 @@ def _split_k2_or_k1_into_other( Returns ------- K-space data with new shape - ((other other_split) coils k2 k1_per_split k0) or ((other other_split) coils k2_per_split k1 k0) + `((other other_split) coils k2 k1_per_split k0)` or `((other other_split) coils k2_per_split k1 k0)` Raises ------ - ValueError - Already existing "other_label" can only be of length 1 + `ValueError` + Already existing `other_label` can only be of length 1 """ # Number of other n_other = split_idx.shape[0] @@ -644,17 +651,20 @@ def split_k2_into_other( ) -> Self: """Based on an index tensor, split the data in e.g. phases. + Note: This function will be deprecated in the future. + Parameters ---------- kdata - K-space data (other coils k2 k1 k0) + K-space data `(other coils k2 k1 k0)` split_idx - 2D index describing the k2 points in each block to be moved to other dimension (other_split, k2_per_split) + 2D index describing the k2 points in each block to be moved to *other* dimension + `(other_split, k2_per_split)` other_label - Label of other dimension, e.g. repetition, phase + Label of *other* dimension, e.g. repetition, phase Returns ------- - K-space data with new shape ((other other_split) coils k2_per_split k1 k0) + K-space data with new shape `((other other_split) coils k2_per_split k1 k0)` """ return self._split_k2_or_k1_into_other(split_idx, other_label, split_dir='k2') diff --git a/src/mrpro/data/KNoise.py b/src/mrpro/data/KNoise.py index ec606114d..cdf9ab22c 100644 --- a/src/mrpro/data/KNoise.py +++ b/src/mrpro/data/KNoise.py @@ -18,7 +18,7 @@ class KNoise(MoveDataMixin): """MR raw data / k-space data class for noise measurements.""" data: torch.Tensor - """K-space data of noise measurements. Shape (...other coils k2 k1 k0)""" + """K-space data of noise measurements. Shape `(*other coils k2 k1 k0)`""" @classmethod def from_file( diff --git a/src/mrpro/data/KTrajectory.py b/src/mrpro/data/KTrajectory.py index f8bab4bae..8813af1a9 100644 --- a/src/mrpro/data/KTrajectory.py +++ b/src/mrpro/data/KTrajectory.py @@ -18,29 +18,33 @@ class KTrajectory(MoveDataMixin): """K-space trajectory. - Order of directions is always kz, ky, kx - Shape of each of kx,ky,kz is (other,k2,k1,k0) + Contains the trajectory in k-space along the three dimensions `kz`, `ky`, `kx`, + i.e. describes where in k-space each data point was acquired. - Example for 2D-Cartesian Trajectories: - kx changes along k0 and is Frequency Encoding - ky changes along k2 and is Phase Encoding - kz is zero(1,1,1,1) + The shape of each of `kx`, `ky`, `kz` is `(*other, k2, k1, k0)`, + where `other` can span multiple dimensions. + + Example for 2D-Cartesian trajectories: + + - `kx` changes along `k0` and is frequency encoding, + - `ky` changes along `k1` and is phase encoding + - `kz` is zero with shape `(1,1,1,1)` """ kz: torch.Tensor - """Trajectory in z direction / phase encoding direction k2 if Cartesian. Shape (other,k2,k1,k0)""" + """Trajectory in z direction / phase encoding direction k2 if Cartesian. Shape `(*other, k2, k1, k0)`""" ky: torch.Tensor - """Trajectory in y direction / phase encoding direction k1 if Cartesian. Shape (other,k2,k1,k0)""" + """Trajectory in y direction / phase encoding direction k1 if Cartesian. Shape `(*other, k2, k1, k0)`""" kx: torch.Tensor - """Trajectory in x direction / phase encoding direction k0 if Cartesian. Shape (other,k2,k1,k0)""" + """Trajectory in x direction / phase encoding direction k0 if Cartesian. Shape `(*other, k2, k1, k0)`""" grid_detection_tolerance: float = 1e-3 """tolerance of how close trajectory positions have to be to integer grid points.""" repeat_detection_tolerance: float | None = 1e-3 - """tolerance for repeat detection. Set to None to disable.""" + """tolerance for repeat detection. Set to `None` to disable.""" def __post_init__(self) -> None: """Reduce repeated dimensions to singletons.""" @@ -78,13 +82,13 @@ def from_tensor( ) -> Self: """Create a KTrajectory from a tensor representation of the trajectory. - Reduces repeated dimensions to singletons if repeat_detection_tolerance is not set to None. + Reduces repeated dimensions to singletons if repeat_detection_tolerance is not set to `None`. Parameters ---------- tensor The tensor representation of the trajectory. - This should be a 5-dim tensor, with (kz, ky, kx) stacked in this order along `stack_dim`. + This should be a 5-dim tensor, with (`kz`, `ky`, `kx`) stacked in this order along `stack_dim`. stack_dim The dimension in the tensor along which the directions are stacked. axes_order @@ -153,8 +157,8 @@ def _traj_types( """Calculate the trajectory type along kzkykx and k2k1k0. Checks if the entries of the trajectory along certain dimensions - - are of shape 1 -> TrajType.SINGLEVALUE - - lie on a Cartesian grid -> TrajType.ONGRID + - are of shape 1 -> `TrajType.SINGLEVALUE` + - lie on a Cartesian grid -> `TrajType.ONGRID` Parameters ---------- @@ -163,7 +167,7 @@ def _traj_types( Returns ------- - ((types along kz,ky,kx),(types along k2,k1,k0)) + (`(types along kz,ky,kx)`,`(types along k2,k1,k0)`) # TODO: consider non-integer positions that are on a grid, e.g. (0.5, 1, 1.5, ....) """ diff --git a/src/mrpro/data/KTrajectoryRawShape.py b/src/mrpro/data/KTrajectoryRawShape.py index e75ad8c16..798a1e700 100644 --- a/src/mrpro/data/KTrajectoryRawShape.py +++ b/src/mrpro/data/KTrajectoryRawShape.py @@ -15,25 +15,25 @@ @dataclass(slots=True, frozen=True) class KTrajectoryRawShape(MoveDataMixin): - """K-space trajectory shaped ((other*k2*k1),k0). + """K-space trajectory shaped `((other*k2*k1), k0)`. - Order of directions is always kz, ky, kx - Shape of each of kx,ky,kz is ((other,k2,k1),k0) this means that e.g. slices, averages... have not yet been - separated from the phase and slice encoding dimensions. The trajectory is in the same shape as the raw data in the - raw data file. + Contains the k-space trajectory, i.e. a description of where data point was acquired in k-space, + in the raw shape as it is read from the data file, before any reshaping or sorting by indices is applied. + The shape of each of `kx`, `ky`,` kz` is `((other*k2*k1), k0)`, + this means that e.g. slices, averages... have not yet been separated from the phase and slice encoding dimensions. """ kz: torch.Tensor - """(other,k2,k1,k0), phase encoding direction k2 if Cartesian.""" + """`(other*k2*k1,k0)`, phase encoding direction k2 if Cartesian.""" ky: torch.Tensor - """(other,k2,k1,k0), phase encoding direction k1 if Cartesian.""" + """`(other*k2*k1,k0)`, phase encoding direction k1 if Cartesian.""" kx: torch.Tensor - """(other,k2,k1,k0), frequency encoding direction k0 if Cartesian.""" + """`(other*k2*k1,k0),` frequency encoding direction k0 if Cartesian.""" repeat_detection_tolerance: None | float = 1e-3 - """tolerance for repeat detection. Set to None to disable.""" + """tolerance for repeat detection. Set to `None` to disable.""" @classmethod def from_tensor( @@ -58,7 +58,7 @@ def from_tensor( repeat_detection_tolerance Tolerance for detecting repeated dimensions (broadcasting). If trajectory points differ by less than this value, they are considered identical. - Set to None to disable this feature. + Set to `None` to disable this feature. scaling_matrix If a scaling matrix is provided, the trajectory is rescaled to fit within the dimensions of the matrix. If not provided, the trajectory remains unchanged. @@ -89,12 +89,14 @@ def sort_and_reshape( ) -> KTrajectory: """Resort and reshape the raw trajectory to KTrajectory. + This function is used to sort the raw trajectory and reshape it to an `mrpro.data.KTrajectory` + by separating the combined dimension `(other k2 k1)` into three separate dimensions. + Parameters ---------- sort_idx - Index which defines how combined dimension (other k2 k1) needs to be sorted such that it can be separated - into three separate dimensions using simple reshape operation. This information needs to be provided from - kheader.acq_info. + Index which defines how combined dimension `(other k2 k1)` needs to be sorted such that it can be separated + into three separate dimensions using a reshape operation. n_k2 number of k2 points. n_k1 @@ -102,7 +104,7 @@ def sort_and_reshape( Returns ------- - KTrajectory with kx, ky and kz each in the shape (other k2 k1 k0). + KTrajectory with kx, ky and kz each in the shape `(other k2 k1 k0)`. """ # Resort and reshape kz = rearrange(self.kz[sort_idx, ...], '(other k2 k1) k0 -> other k2 k1 k0', k1=n_k1, k2=n_k2) diff --git a/src/mrpro/data/MoveDataMixin.py b/src/mrpro/data/MoveDataMixin.py index 99bcb3df5..caa06cab9 100644 --- a/src/mrpro/data/MoveDataMixin.py +++ b/src/mrpro/data/MoveDataMixin.py @@ -10,8 +10,21 @@ from typing_extensions import Any, Protocol, Self, TypeVar, overload, runtime_checkable -class InconsistentDeviceError(ValueError): # noqa: D101 - def __init__(self, *devices): # noqa: D107 +class InconsistentDeviceError(ValueError): + """Raised if the devices of different fields differ. + + There is no single device that all fields are on, thus + the overall device of the object cannot be determined. + """ + + def __init__(self, *devices): + """Initialize. + + Parameters + ---------- + devices + The devices of the fields that differ. + """ super().__init__(f'Inconsistent devices found, found at least {", ".join(str(d) for d in devices)}') @@ -62,21 +75,21 @@ def to( def to(self, *args, **kwargs) -> Self: """Perform dtype and/or device conversion of data. - A torch.dtype and torch.device are inferred from the arguments + A :py:class:`torch.dtype` and :py:class:`torch.device` are inferred from the arguments args and kwargs. Please have a look at the - documentation of torch.Tensor.to() for more details. + documentation of :py:class:`torch.Tensor.to()` for more details. A new instance of the dataclass will be returned. The conversion will be applied to all Tensor- or Module fields of the dataclass, and to all fields that implement - the MoveDataMixin. + the :py:class:`MoveDataMixin`. The dtype-type, i.e. float or complex will always be preserved, but the precision of floating point dtypes might be changed. Example: - If called with dtype=torch.float32 OR dtype=torch.complex64: + If called with ``dtype=torch.float32`` OR ``dtype=torch.complex64``: - A complex128 tensor will be converted to complex64 - A float64 tensor will be converted to float32 @@ -156,10 +169,10 @@ def _to( ) -> Self: """Move data to device and convert dtype if necessary. - This method is called by .to(), .cuda(), .cpu(), .double(), and so on. - It should not be called directly. + This method is called by `.to()`, `.cuda()`, `.cpu()`, + `.double()`, and so on. It should not be called directly. - See .to() for more details. + See `MoveDataMixin.to()` for more details. Parameters ---------- @@ -391,13 +404,13 @@ def device(self) -> torch.device | None: Looks at each field of a dataclass implementing a device attribute, such as torch.Tensors or MoveDataMixin instances. If the devices - of the fields differ, an InconsistentDeviceError is raised, otherwise + of the fields differ, an :py:exc:`~mrpro.data.InconsistentDeviceError` is raised, otherwise the device is returned. If no field implements a device attribute, None is returned. Raises ------ - InconsistentDeviceError: + :py:exc:`InconsistentDeviceError` If the devices of different fields differ. Returns diff --git a/src/mrpro/data/QData.py b/src/mrpro/data/QData.py index 04f4ca1f4..562c6f08b 100644 --- a/src/mrpro/data/QData.py +++ b/src/mrpro/data/QData.py @@ -28,7 +28,7 @@ def __init__(self, data: torch.Tensor, header: KHeader | IHeader | QHeader) -> N Parameters ---------- data - quantitative image data tensor with dimensions (other, coils, z, y, x) + quantitative image data tensor with dimensions `(other, coils, z, y, x)` header MRpro header containing required meta data for the QHeader """ diff --git a/src/mrpro/data/Rotation.py b/src/mrpro/data/Rotation.py index 628d93c7e..41baeb53d 100644 --- a/src/mrpro/data/Rotation.py +++ b/src/mrpro/data/Rotation.py @@ -391,7 +391,7 @@ class Rotation(torch.nn.Module): - not all features are implemented. Notably, mrp, davenport, and reduce are missing. - arbitrary number of batching dimensions - support for improper rotations (rotoinversion), i.e., rotations with an coordinate inversion - or a reflection about a plane perpendicular to the rotation axis. + or a reflection about a plane perpendicular to the rotation axis. """ def __init__( @@ -404,7 +404,7 @@ def __init__( ) -> None: """Initialize a new Rotation. - Instead of calling this method, also consider the different ``from_*`` class methods to construct a Rotation. + Instead of calling this method, also consider the different `from_*` class methods to construct a Rotation. Parameters ---------- @@ -516,15 +516,16 @@ def from_quat( i.e. rotations with reflection with respect to the plane perpendicular to the rotation axis or inversion of the coordinate system. - Note: If inversion != reflection, the rotation will be improper and save as a rotation followed by an inversion. - containing an inversion of the coordinate system. + .. note:: + If ``inversion != reflection``, the rotation will be improper and saved + as a rotation followed by an inversion inversion of the coordinate system. Parameters ---------- quaternions - shape (..., 4) + shape `(..., 4)` Each row is a (possibly non-unit norm) quaternion representing an - active rotation, in scalar-last (x, y, z, w) format. Each + active rotation, in scalar-last `(x, y, z, w)` format. Each quaternion will be normalized to unit norm. inversion if the rotation should contain an inversion of the coordinate system, i.e. a reflection @@ -558,7 +559,7 @@ def from_matrix(cls, matrix: torch.Tensor | NestedSequence[float], allow_imprope Parameters ---------- matrix - A single matrix or a stack of matrices, shape (..., 3, 3) + A single matrix or a stack of matrices, shape `(..., 3, 3)` allow_improper If true, the rotation is considered as improper if the determinant of the matrix is negative. If false, an ValueError is raised if the determinant is negative. @@ -572,7 +573,7 @@ def from_matrix(cls, matrix: torch.Tensor | NestedSequence[float], allow_imprope References ---------- .. [ROTa] Rotation matrix https://en.wikipedia.org/wiki/Rotation_matrix#In_three_dimensions - .. [ROTb] Rotation matrix https://en.wikipedia.org/wiki/Improper_rotation + .. [ROTb] Improper Rotation https://en.wikipedia.org/wiki/Improper_rotation .. [MAR2008] Landis Markley F (2008) Unit Quaternion from Rotation Matrix, Journal of guidance, control, and dynamics 31(2),440-442. """ @@ -611,7 +612,7 @@ def from_directions( 3 Basis vectors of the new coordinate system, i.e. the columns of the rotation matrix allow_improper If true, the rotation is considered as improper if the determinant of the matrix is negative - and the sign will be preserved. If false, a ValueError is raised if the determinant is negative. + and the sign will be preserved. If false, a `ValueError` is raised if the determinant is negative. Returns @@ -635,7 +636,7 @@ def as_directions( """Represent as the basis vectors of the new coordinate system as SpatialDimensions. Returns the three basis vectors of the new coordinate system after rotation, - i.e. the columns of the rotation matrix, as SpatialDimensions. + i.e. the columns of the rotation matrix, as `~mrpro.data.SpatialDimensions`. Returns ------- @@ -666,7 +667,7 @@ def from_rotvec( Parameters ---------- rotvec - shape (..., 3), the rotation vectors. + shape `(..., 3)`, the rotation vectors. degrees If True, then the given angles are assumed to be in degrees, otherwise radians. @@ -831,7 +832,7 @@ def as_quat( Active rotations in 3 dimensions can be represented using unit norm quaternions [QUAb]_. The mapping from quaternions to rotations is - two-to-one, i.e. quaternions ``q`` and ``-q``, where ``-q`` simply + two-to-one, i.e. quaternions `q` and `-q`, where `-q` simply reverses the sign of each component, represent the same spatial rotation. The returned value is in scalar-last (x, y, z, w) format. @@ -860,9 +861,9 @@ def as_quat( Returns ------- quaternions - shape (..., 4,), depends on shape of inputs used for initialization. + shape `(..., 4,)`, depends on shape of inputs used for initialization. (optional) reflection (if improper is 'reflection') or inversion (if improper is 'inversion') - boolean tensor of shape (...,), indicating if the rotation is improper + boolean tensor of shape `(...,)`, indicating if the rotation is improper and if a reflection or inversion should be performed after the rotation. References @@ -906,17 +907,17 @@ def as_matrix(self) -> torch.Tensor: """Represent as rotation matrix. 3D rotations can be represented using rotation matrices, which - are 3 x 3 real orthogonal matrices with determinant equal to +1 [ROTb]_ + are 3 x 3 real orthogonal matrices with determinant equal to +1 [ROT]_ for proper rotations and -1 for improper rotations. Returns ------- matrix - shape (..., 3, 3), depends on shape of inputs used for initialization. + shape `(..., 3, 3)`, depends on shape of inputs used for initialization. References ---------- - .. [ROTb] Rotation matrix https://en.wikipedia.org/wiki/Rotation_matrix#In_three_dimensions + .. [ROT] Rotation matrix https://en.wikipedia.org/wiki/Rotation_matrix#In_three_dimensions """ quaternions = self._quaternions matrix = _quaternion_to_matrix(quaternions) @@ -967,9 +968,9 @@ def as_rotvec( Returns ------- rotvec - Shape (..., 3), depends on shape of inputs used for initialization. + Shape `(..., 3)`, depends on shape of inputs used for initialization. (optional) reflection (if improper is 'reflection') or inversion (if improper is 'inversion') - boolean tensor of shape (...,), indicating if the rotation is improper + boolean tensor of shape `(...,)`, indicating if the rotation is improper and if a reflection or inversion should be performed after the rotation. @@ -1056,15 +1057,15 @@ def as_euler( Returns ------- angles - shape (3,) or (..., 3), depending on shape of inputs used to initialize object. + shape `(3,)` or `(..., 3)`, depending on shape of inputs used to initialize object. The returned angles are in the range: - - First angle belongs to [-180, 180] degrees (both inclusive) - - Third angle belongs to [-180, 180] degrees (both inclusive) + - First angle belongs to`` [-180, 180]`` degrees (both inclusive) + - Third angle belongs to ``[-180, 180]`` degrees (both inclusive) - Second angle belongs to: - + [-90, 90] degrees if all axes are different (like xyz) - + [0, 180] degrees if first and third axes are the same (like zxz) + + ``[-90, 90]`` degrees if all axes are different (like xyz) + + ``[0, 180]`` degrees if first and third axes are the same (like zxz) References ---------- @@ -1158,7 +1159,8 @@ def apply( This is a hybrid method that matches the signature of both `torch.nn.Module.apply` and `scipy.spatial.transform.Rotation.apply`. If a callable is passed, it is assumed to be a function that will be applied to the Rotation module. - For applying the rotation to a vector, consider using `Rotation(vector)` instead of `Rotation.apply(vector)`. + For applying the rotation to a vector, consider using ``rotation(vector)`` instead of + ``rotation.apply(vector)``. """ if callable(fn): # torch.nn.Module.apply @@ -1221,9 +1223,9 @@ def forward( - If object contains a single rotation (as opposed to a stack with a single rotation) and a single vector is specified with - shape ``(3,)``, then `rotated_vectors` has shape ``(3,)``. - - In all other cases, `rotated_vectors` has shape ``(..., 3)``, - where ``...`` is determined by broadcasting. + shape `(3,)`, then `rotated_vectors` has shape `(3,)`. + - In all other cases, `rotated_vectors` has shape `(..., 3)`, + where `...` is determined by broadcasting. """ matrix = self.as_matrix() if inverse: @@ -1278,23 +1280,23 @@ def random( Parameters ---------- num - Number of random rotations to generate. If None (default), then a + Number of random rotations to generate. If `None`, then a single rotation is generated. random_state - If `random_state` is None, the `numpy.random.RandomState` + If `random_state` is `None`, the `~numpy.random.RandomState` singleton is used. - If `random_state` is an int, a new ``RandomState`` instance is used, + If `random_state` is an int, a new `RandomState` instance is used, seeded with `random_state`. - If `random_state` is already a ``Generator`` or ``RandomState`` instance + If `random_state` is already a `Generator` or `RandomState` instance then that instance is used. improper - if True, only improper rotations are generated. If False, only proper rotations are generated. + if `True`, only improper rotations are generated. If False, only proper rotations are generated. if "random", then a random mix of proper and improper rotations are generated. Returns ------- random_rotation - Contains a single rotation if `num` is None. Otherwise contains a + Contains a single rotation if `num` is `None`. Otherwise contains a stack of `num` rotations. """ generator: np.random.RandomState = check_random_state(random_state) @@ -1332,7 +1334,7 @@ def random_vmf( Parameters ---------- mean_axis - shape (..., 3,), the mean axis of the von Mises-Fisher distribution. + shape `(..., 3,)`, the mean axis of the von Mises-Fisher distribution. kappa The concentration parameter of the von Mises-Fisher distribution. small kappa results in a uniform distribution, large kappa results in a peak around the mean axis. @@ -1341,7 +1343,7 @@ def random_vmf( Standard deviation (radians) of the 2pi-wrapped Gaussian distribution used to sample the rotation angle. Use `math.inf` if a uniform distribution is desired. num - number of samples to generate. If None, a single rotation is generated. + number of samples to generate. If `None`, a single rotation is generated. Returns ------- @@ -1369,7 +1371,7 @@ def __matmul__(self, other: Rotation) -> Self: """Compose this rotation with the other. If `p` and `q` are two rotations, then the composition of 'q followed - by p' is equivalent to `p * q`. In terms of rotation matrices, + by p' is equivalent to ``p @ q``. In terms of rotation matrices, the composition can be expressed as ``p.as_matrix() @ q.as_matrix()``. @@ -1377,8 +1379,8 @@ def __matmul__(self, other: Rotation) -> Self: ---------- other Object containing the rotations to be composed with this one. Note - that rotation compositions are not commutative, so ``p * q`` is - generally different from ``q * p``. + that rotation compositions are not commutative, so ``p @ q`` is + generally different from ``q @ p``. Returns ------- @@ -1386,12 +1388,12 @@ def __matmul__(self, other: Rotation) -> Self: This function supports composition of multiple rotations at a time. The following cases are possible: - - Either ``p`` or ``q`` contains a single rotation. In this case + - Either `p` or `q` contains a single rotation. In this case `composition` contains the result of composing each rotation in the other object with the single rotation. - - Both ``p`` and ``q`` contain ``N`` rotations. In this case each - rotation ``p[i]`` is composed with the corresponding rotation - ``q[i]`` and `output` contains ``N`` rotations. + - Both `p` and `q` contain `N` rotations. In this case each + rotation `p[i]` is composed with the corresponding rotation + `q[i]` and `output` contains `N` rotations. """ if not isinstance(other, Rotation): return NotImplemented # type: ignore[unreachable] @@ -1410,13 +1412,13 @@ def __matmul__(self, other: Rotation) -> Self: def __pow__(self, n: float, modulus: None = None): """Compose this rotation with itself `n` times. - Composition of a rotation ``p`` with itself can be extended to - non-integer ``n`` by considering the power ``n`` to be a scale factor + Composition of a rotation `p` with itself can be extended to + non-integer `n` by considering the power `n` to be a scale factor applied to the angle of rotation about the rotation's fixed axis. The expression ``q = p ** n`` can also be expressed as ``q = Rotation.from_rotvec(n * p.as_rotvec())``. - If ``n`` is negative, then the rotation is inverted before the power + If `n` is negative, then the rotation is inverted before the power is applied. In other words, ``p ** -abs(n) == p.inv() ** abs(n)``. Parameters @@ -1425,21 +1427,21 @@ def __pow__(self, n: float, modulus: None = None): The number of times to compose the rotation with itself. modulus This overridden argument is not applicable to Rotations and must be - ``None``. + `None`. Returns ------- - power : `Rotation` instance - If the input Rotation ``p`` contains ``N`` multiple rotations, then - the output will contain ``N`` rotations where the ``i`` th rotation - is equal to ``p[i] ** n`` + power + If the input Rotation `p` contains `N` multiple rotations, then + the output will contain `N` rotations where the `i` th rotation + is equal to `p[i] ** n` Notes ----- For example, a power of 2 will double the angle of rotation, and a power of 0.5 will halve the angle. There are three notable cases: if - ``n == 1`` then the original rotation is returned, if ``n == 0`` - then the identity rotation is returned, and if ``n == -1`` then + `n == 1` then the original rotation is returned, if `n == 0` + then the identity rotation is returned, and if `n == -1` then ``p.inv()`` is returned. For improper rotations, the power of a rotation with a reflection is @@ -1449,9 +1451,9 @@ def __pow__(self, n: float, modulus: None = None): This means that, for example a 0.5 power of a rotation with a reflection applied twice will result in a rotation without a reflection. - Note that fractional powers ``n`` which effectively take a root of + Note that fractional powers `n` which effectively take a root of rotation, do so using the shortest path smallest representation of that - angle (the principal root). This means that powers of ``n`` and ``1/n`` + angle (the principal root). This means that powers of `n` and `1/n` are not necessarily inverses of each other. For example, a 0.5 power of a +240 degree rotation will be calculated as the 0.5 power of a -120 degree rotation, with the result being a rotation of -60 rather than @@ -1523,9 +1525,9 @@ def invert_axes(self) -> Self: Converts a proper rotation to an improper one, or vice versa by inversion of the coordinate system. - Note: - This is not the same as the inverse of the rotation. - See `inv` for that. + .. note:: + This is not the same as the inverse of the rotation. + See `inv` an inverse. Returns ------- @@ -1545,7 +1547,7 @@ def magnitude(self) -> torch.Tensor: Returns ------- magnitude - Angles in radians. The magnitude will always be in the range [0, pi]. + Angles in radians. The magnitude will always be in the range ``[0, pi]``. """ angles = 2 * torch.atan2( torch.linalg.vector_norm(self._quaternions[..., :3], dim=-1), torch.abs(self._quaternions[..., 3]) @@ -1569,7 +1571,7 @@ def approx_equal(self, other: Rotation, atol: float = 1e-6, degrees: bool = Fals considered equal. degrees If True and `atol` is given, then `atol` is measured in degrees. If - False (default), then atol is measured in radians. + False, then atol is measured in radians. Returns ------- @@ -1596,11 +1598,11 @@ def __getitem__(self, indexer: TorchIndexerType) -> Self: Returns ------- - rotation + The extracted rotation(s). Raises ------ - TypeError if the instance was created as a single rotation. + `TypeError` if the instance was created as a single rotation. """ if self._single: raise TypeError('Single rotation is not subscriptable.') @@ -1678,7 +1680,7 @@ def __setitem__(self, indexer: TorchIndexerType, value: Rotation): Raises ------ - TypeError if the instance was created as a single rotation. + `TypeError` if the instance was created as a single rotation. """ if self._single: raise TypeError('Single rotation is not subscriptable.') @@ -1703,12 +1705,12 @@ def identity(cls, shape: int | None | tuple[int, ...] = None) -> Self: Parameters ---------- shape - Number of identity rotations to generate. If None (default), then a + Number of identity rotations to generate. If `None`, then a single rotation is generated. Returns ------- - identity : Rotation object + identity The identity rotation. """ match shape: @@ -1762,14 +1764,12 @@ def align_vectors( function is minimized to solve for the rotation matrix :math:`R`: .. math:: - - L(R) = \\frac{1}{2} \\sum_{i = 1}^{n} w_i \\lVert \\mathbf{a}_i - - R \\mathbf{b}_i \\rVert^2 , + L(R) = \frac{1}{2} \sum_{i = 1}^{n} w_i \| a_i - R b_i \|^2 , where :math:`w_i`'s are the `weights` corresponding to each vector. - The rotation is estimated with Kabsch algorithm [1]_, and solves what - is known as the "pointing problem", or "Wahba's problem" [2]_. + The rotation is estimated with Kabsch algorithm [KAB]_, and solves what + is known as the "pointing problem", or "Wahba's problem" [WAH]_. There are two special cases. The first is if a single vector is given for `a` and `b`, in which the shortest distance rotation that aligns @@ -1781,7 +1781,7 @@ def align_vectors( of these two rotations. The result via this process is the same as the Kabsch algorithm as the corresponding weight approaches infinity in the limit. For a single secondary vector this is known as the - "align-constrain" algorithm [3]_. + "align-constrain" algorithm [MAG2018]_. For both special cases (single vectors or an infinite weight), the sensitivity matrix does not have physical meaning and an error will be @@ -1799,13 +1799,13 @@ def align_vectors( denotes a vector. weights Weights describing the relative importance of the vector - observations. If None (default), then all values in `weights` are + observations. If `None`, then all values in `weights` are assumed to be 1. One and only one weight may be infinity, and weights must be positive. return_sensitivity Whether to return the sensitivity matrix. allow_improper - If True, allow improper rotations to be returned. If False (default), + If True, allow improper rotations to be returned. If False, then the rotation is restricted to be proper. Returns @@ -1822,12 +1822,10 @@ def align_vectors( References ---------- - .. [1] https://en.wikipedia.org/wiki/Kabsch_algorithm - .. [2] https://en.wikipedia.org/wiki/Wahba%27s_problem - .. [3] Magner, Robert, - "Extending target tracking capabilities through trajectory and - momentum setpoint optimization." Small Satellite Conference, - 2018. + .. [KAB] https://en.wikipedia.org/wiki/Kabsch_algorithm + .. [WAH] https://en.wikipedia.org/wiki/Wahba%27s_problem + .. [MAG2018] Magner R (2018), Extending target tracking capabilities through trajectory and momentum setpoint + optimization. Small Satellite Conference. """ a_tensor = torch.stack([torch.as_tensor(el) for el in a]) if isinstance(a, Sequence) else torch.as_tensor(a) b_tensor = torch.stack([torch.as_tensor(el) for el in b]) if isinstance(b, Sequence) else torch.as_tensor(b) @@ -1895,14 +1893,14 @@ def mean( r"""Get the mean of the rotations. The mean used is the chordal L2 mean (also called the projected or - induced arithmetic mean) [HAR2013]_. If ``A`` is a set of rotation matrices, - then the mean ``M`` is the rotation matrix that minimizes the + induced arithmetic mean) [HAR2013]_. If `A` is a set of rotation matrices, + then the mean `M` is the rotation matrix that minimizes the following loss function: - :math:`L(M) = \sum_{i = 1}^{n} w_i \lVert \mathbf{A}_i - \mathbf{M} \rVert^2`, + :math:`L(M) = \sum_{i = 1}^{n} w_i \| A_i - M \|^2`, where :math:`w_i`'s are the `weights` corresponding to each matrix. - Optionally, if A is a set of Rotation matrices with multiple batch dimensions, + Optionally, if `A` is a set of Rotation matrices with multiple batch dimensions, the dimensions to reduce over can be specified. If the rotations contains improper, the mean will be computed without @@ -1914,17 +1912,17 @@ def mean( ---------- weights Weights describing the relative importance of the rotations. If - None (default), then all values in `weights` are assumed to be + `None`, then all values in `weights` are assumed to be equal. dim - Batch Dimensions to reduce over. None will always return a single Rotation. + Batch Dimensions to reduce over. `None` will always return a single Rotation. keepdim Keep reduction dimensions as length-1 dimensions. Returns ------- - mean : `Rotation` instance + mean Object containing the mean of the rotations in the current instance. diff --git a/src/mrpro/data/SpatialDimension.py b/src/mrpro/data/SpatialDimension.py index 12f94e8a6..0e3a93b4c 100644 --- a/src/mrpro/data/SpatialDimension.py +++ b/src/mrpro/data/SpatialDimension.py @@ -414,7 +414,7 @@ def shape(self) -> tuple[int, ...]: Raises ------ - ValueError if the shapes are not equal + `ValueError` if the shapes are not equal """ if isinstance(self.x, ScalarTypes) and isinstance(self.y, ScalarTypes) and isinstance(self.z, ScalarTypes): return () diff --git a/src/mrpro/data/__init__.py b/src/mrpro/data/__init__.py index 89cc6695b..b0dd9a27e 100644 --- a/src/mrpro/data/__init__.py +++ b/src/mrpro/data/__init__.py @@ -1,3 +1,5 @@ +"""Data containers, loading and saving data.""" + from mrpro.data import enums, traj_calculators, acq_filters from mrpro.data.AcqInfo import AcqIdx, AcqInfo from mrpro.data.CsmData import CsmData @@ -11,7 +13,7 @@ from mrpro.data.KNoise import KNoise from mrpro.data.KTrajectory import KTrajectory from mrpro.data.KTrajectoryRawShape import KTrajectoryRawShape -from mrpro.data.MoveDataMixin import MoveDataMixin +from mrpro.data.MoveDataMixin import MoveDataMixin, InconsistentDeviceError from mrpro.data.QData import QData from mrpro.data.QHeader import QHeader from mrpro.data.Rotation import Rotation @@ -25,6 +27,7 @@ "EncodingLimits", "IData", "IHeader", + "InconsistentDeviceError", "KData", "KHeader", "KNoise", @@ -39,4 +42,4 @@ "acq_filters", "enums", "traj_calculators" -] +] \ No newline at end of file diff --git a/src/mrpro/data/traj_calculators/KTrajectoryCalculator.py b/src/mrpro/data/traj_calculators/KTrajectoryCalculator.py index 1893d761c..0fb6274bf 100644 --- a/src/mrpro/data/traj_calculators/KTrajectoryCalculator.py +++ b/src/mrpro/data/traj_calculators/KTrajectoryCalculator.py @@ -36,7 +36,7 @@ def _kfreq(self, kheader: KHeader) -> torch.Tensor: Raises ------ - ValueError + `ValueError` Number of samples have to be the same for each readout """ n_samples = torch.unique(kheader.acq_info.number_of_samples) diff --git a/src/mrpro/data/traj_calculators/__init__.py b/src/mrpro/data/traj_calculators/__init__.py index 2fd0e5b5a..f6961e826 100644 --- a/src/mrpro/data/traj_calculators/__init__.py +++ b/src/mrpro/data/traj_calculators/__init__.py @@ -1,3 +1,5 @@ +"""Classes for calculating k-space trajectories.""" + from mrpro.data.traj_calculators.KTrajectoryCalculator import KTrajectoryCalculator from mrpro.data.traj_calculators.KTrajectoryRpe import KTrajectoryRpe from mrpro.data.traj_calculators.KTrajectorySunflowerGoldenRpe import KTrajectorySunflowerGoldenRpe @@ -13,4 +15,4 @@ "KTrajectoryRadial2D", "KTrajectoryRpe", "KTrajectorySunflowerGoldenRpe" -] \ No newline at end of file +] diff --git a/src/mrpro/operators/FastFourierOp.py b/src/mrpro/operators/FastFourierOp.py index 4ffe7e3a6..d515ec7d2 100644 --- a/src/mrpro/operators/FastFourierOp.py +++ b/src/mrpro/operators/FastFourierOp.py @@ -20,14 +20,23 @@ class FastFourierOp(LinearOperator): forward and adjoint [FFT]_. Remark regarding the fftshift/ifftshift: + fftshift shifts the zero-frequency point to the center of the data, ifftshift undoes this operation. - The input to both forward and ajoint assumes that the zero-frequency is in the center of the data. - Torch.fft.fftn and torch.fft.ifftn expect the zero-frequency to be the first entry in the tensor. - Therefore for forward and ajoint first ifftshift needs to be applied, then fftn or ifftn and then ifftshift. + The input to both `~FastFourierOp.forward` and `~FastFourierOp.adjoint` + are assumed to have the zero-frequency is in the center of the data. `torch.fft.fftn` + and `torch.fft.ifftn` expect the zero-frequency to be the first entry in the tensor. + Therefore in `~FastFourierOp.forward` and `~FastFourierOp.adjoint`, + first `torch.fft.ifftshift`, then `torch.fft.fftn` or `torch.fft.ifftn`, + finally `torch.fft.ifftshift` is applied. + + .. note:: + See also `~mrpro.operators.FourierOp` for a Fourier operator that handles + automatic sorting of the k-space data based on a trajectory. + References ---------- - .. [FFT] FFT https://numpy.org/doc/stable/reference/routines.fft.html + .. [FFT] `numpy: FFT _` """ @@ -39,30 +48,31 @@ def __init__( ) -> None: """Initialize a Fast Fourier Operator. - If both recon_matrix and encoding_matrix are set, the operator will perform padding/cropping before and - after the transforms to match the shape in image space (recon_matrix) and k-shape (encoding_matrix). + If both `recon_matrix` and `encoding_matrix` are set, the operator will perform padding/cropping before and + after the transforms to match the shape in image space (`recon_matrix`) and k-shape (`encoding_matrix`). If both are set to None, no padding or cropping will be performed. If these are SpatialDimension, the transform dimensions must be within the last three dimensions, - typically corresponding to the (k2,k1,k0) and (z,y,x) axes of KData and IData, respectively. + typically corresponding to the `(k2,k1,k0)` and `(z,y,x)` axes of `~mrpro.data.KData` + and `~mrpro.data.IData`, respectively. Parameters ---------- dim - dim along which FFT and IFFT are applied, by default last three dimensions (-3, -2, -1), - as these correspond to k2, k1, and k0 of KData. + dim along which FFT and IFFT are applied, by default last three dimensions, + as these correspond to k2, k1, and k0 of k-space data. encoding_matrix - shape of encoded k-data along the axes in dim. Must be set if recon_matrix is set. - If encoding_matrix and recon_matrix are None, no padding or cropping will be performed. - If all values in dim are -3, -2 or -1, this can also be a SpatialDimension describing the - k-space shape in all 3 dimensions (k2, k1, k0), but only values in the dimensions in dim will be used. - Otherwise, it should be a Sequence of the same length as dim. + shape of encoded k-data along the axes in `dim`. Must be set if `recon_matrix` is set. + If `encoding_matrix` and `recon_matrix` are `None`, no padding or cropping will be performed. + If all values in dim are -3, -2 or -1, this can also be a `~mrpro.data.SpatialDimension` describing the + k-space shape in all 3 dimensions `(k2, k1, k0)`, but only values in the dimensions in dim will be used. + Otherwise, it should be a `Sequence` of the same length as `dim`. recon_matrix - shape of reconstructed image data. Must be set if encoding_matrix is set. - If encoding_matrix and recon_matrix are None, no padding or cropping will be performed. - If all values in dim are -3, -2 or -1, this can also be a SpatialDimension describing the - image-space shape in all 3 dimensions (z,y,x), but only values in the dimensions in dim will be used. - Otherwise, it should be a Sequence of the same length as dim. + shape of reconstructed image data. Must be set if `encoding_matrix` is set. + If `encoding_matrix` and `recon_matrix` are `None`, no padding or cropping will be performed. + If all values in dim are -3, -2 or -1, this can also be a `~mrpro.data.SpatialDimension` describing the + image-space shape in all 3 dimensions `(z, y, x)`, but only values in the dimensions in dim will be used. + Otherwise, it should be a `Sequence` of the same length as `dim`. """ super().__init__() self._dim = tuple(dim) diff --git a/src/mrpro/operators/FiniteDifferenceOp.py b/src/mrpro/operators/FiniteDifferenceOp.py index 3fcfd0e58..18f986069 100644 --- a/src/mrpro/operators/FiniteDifferenceOp.py +++ b/src/mrpro/operators/FiniteDifferenceOp.py @@ -13,7 +13,7 @@ class FiniteDifferenceOp(LinearOperator): """Finite Difference Operator.""" @staticmethod - def finite_difference_kernel(mode: str) -> torch.Tensor: + def finite_difference_kernel(mode: Literal['central', 'forward', 'backward']) -> torch.Tensor: """Finite difference kernel. Parameters @@ -27,7 +27,7 @@ def finite_difference_kernel(mode: str) -> torch.Tensor: Raises ------ - ValueError + `ValueError` If mode is not central, forward, backward or doublecentral """ if mode == 'central': @@ -97,7 +97,7 @@ def adjoint(self, y: torch.Tensor) -> tuple[torch.Tensor,]: Raises ------ - ValueError + `ValueError` If the first dimension of y is to the same as the number of dimensions along which the finite differences are calculated """ diff --git a/src/mrpro/operators/FourierOp.py b/src/mrpro/operators/FourierOp.py index c32adb71d..23b87bc27 100644 --- a/src/mrpro/operators/FourierOp.py +++ b/src/mrpro/operators/FourierOp.py @@ -18,7 +18,18 @@ class FourierOp(LinearOperator, adjoint_as_backward=True): - """Fourier Operator class.""" + """Fourier Operator class. + + This is the recommended operator for all Fourier transformations. + It auto-detects if a non-uniform or regular fast Fourier transformation is required. + For Cartesian data on a regular grid, the data is sorted and a FFT is used. + For non-Cartesian data, a NUFFT with regridding is used. + It also includes padding/cropping to the reconstruction matrix size. + + The operator can directly be constructed from a `~mrpro.data.KData` object to match its + trajectory and header information, see `FourierOp.from_kdata` + + """ def __init__( self, @@ -29,7 +40,7 @@ def __init__( nufft_numpoints: int = 6, nufft_kbwidth: float = 2.34, ) -> None: - """Fourier Operator class. + """Initialize Fourier Operator. Parameters ---------- @@ -150,11 +161,11 @@ def forward(self, x: torch.Tensor) -> tuple[torch.Tensor,]: Parameters ---------- x - coil image data with shape: (... coils z y x) + coil image data with shape: `(... coils z y x)` Returns ------- - coil k-space data with shape: (... coils k2 k1 k0) + coil k-space data with shape: `(... coils k2 k1 k0)` """ if self._fwd_nufft_op is not None and self._omega is not None: # NUFFT Type 2 @@ -192,11 +203,11 @@ def adjoint(self, x: torch.Tensor) -> tuple[torch.Tensor,]: Parameters ---------- x - coil k-space data with shape: (... coils k2 k1 k0) + coil k-space data with shape: `(... coils k2 k1 k0)` Returns ------- - coil image data with shape: (... coils z y x) + coil image data with shape: `(... coils z y x)` """ if self._fast_fourier_op is not None and self._cart_sampling_op is not None: # IFFT @@ -307,7 +318,7 @@ class FourierGramOp(LinearOperator): the Cartesian FFT operator This Operator is only used internally and should not be used directly. - Instead, consider using the `gram` property of :class: `mrpro.operators.FourierOp`. + Instead, consider using the py:func:`~FourierOp.gram` property of py:class:`FourierOp`. """ _kernel: torch.Tensor | None @@ -361,7 +372,7 @@ def forward(self, x: torch.Tensor) -> tuple[torch.Tensor,]: Parameters ---------- x - input tensor, shape (..., coils, z, y, x) + input tensor, shape: `(..., coils, z, y, x)` """ if self.nufft_gram is not None: (x,) = self.nufft_gram(x) @@ -376,7 +387,7 @@ def adjoint(self, x: torch.Tensor) -> tuple[torch.Tensor,]: Parameters ---------- x - input tensor, shape (..., coils, k2, k1, k0) + input tensor, shape: `(..., coils, k2, k1, k0)` """ return self.forward(x) diff --git a/src/mrpro/operators/LinearOperator.py b/src/mrpro/operators/LinearOperator.py index 74d3bafdd..6c301e739 100644 --- a/src/mrpro/operators/LinearOperator.py +++ b/src/mrpro/operators/LinearOperator.py @@ -2,10 +2,10 @@ from __future__ import annotations +import functools import operator from abc import abstractmethod from collections.abc import Callable, Sequence -from functools import reduce from typing import cast, no_type_check import torch @@ -58,15 +58,32 @@ def jvp(ctx: Any, *grad_inputs: Any) -> torch.Tensor: # noqa: ANN401 class LinearOperator(Operator[torch.Tensor, tuple[torch.Tensor]]): """General Linear Operator. - LinearOperators have exactly one input and one output, - and fulfill f(a*x + b*y) = a*f(x) + b*f(y) - with a,b scalars and x,y tensors. + LinearOperators have exactly one input tensors and one output tensor, + and fulfill :math:`f(a*x + b*y) = a*f(x) + b*f(y)` + with :math:`a`, :math:`b` scalars and :math:`x`, :math:`y` tensors. + + LinearOperators can be composed, added, multiplied, applied to tensors. + LinearOperators have an `~LinearOperator.H` property that returns the adjoint operator, + and a `~LinearOperator.gram` property that returns the Gram operator. + + Subclasses must implement the forward and adjoint methods. + When subclassing, the `adjoint_as_backward` class attribute can be set to `True`:: + + class MyOperator(LinearOperator, adjoint_as_backward=True): + ... + + This will make pytorch use the adjoint method as the backward method of the forward, + and the forward method as the backward method of the adjoint, avoiding the need to + have differentiable forward and adjoint methods. """ @no_type_check def __init_subclass__(cls, adjoint_as_backward: bool = False, **kwargs: Any) -> None: # noqa: ANN401 """Wrap the forward and adjoint functions for autograd. + This will wrap the forward and adjoint functions for autograd, + and use the adjoint function as the backward function of the forward and vice versa. + Parameters ---------- adjoint_as_backward @@ -77,11 +94,17 @@ def __init_subclass__(cls, adjoint_as_backward: bool = False, **kwargs: Any) -> """ if adjoint_as_backward and not hasattr(cls, '_saved_forward'): cls._saved_forward, cls._saved_adjoint = cls.forward, cls.adjoint - cls.forward = lambda self, x: ( - _AutogradWrapper.apply(lambda x: self._saved_forward(x)[0], lambda x: self._saved_adjoint(x)[0], x), + cls.forward = functools.update_wrapper( + lambda self, x: ( + _AutogradWrapper.apply(lambda x: self._saved_forward(x)[0], lambda x: self._saved_adjoint(x)[0], x), + ), + cls.forward, ) - cls.adjoint = lambda self, x: ( - _AutogradWrapper.apply(lambda x: self._saved_adjoint(x)[0], lambda x: self._saved_forward(x)[0], x), + cls.adjoint = functools.update_wrapper( + lambda self, x: ( + _AutogradWrapper.apply(lambda x: self._saved_adjoint(x)[0], lambda x: self._saved_forward(x)[0], x), + ), + cls.adjoint, ) super().__init_subclass__(**kwargs) @@ -92,7 +115,13 @@ def adjoint(self, x: torch.Tensor) -> tuple[torch.Tensor,]: @property def H(self) -> LinearOperator: # noqa: N802 - """Adjoint operator.""" + """Adjoint operator. + + Obtains the adjoint of an instance of this operator as an `AdjointLinearOperator`, + which itself is a an `LinearOperator` that can be applied to tensors. + + Note: ``linear_operator.H.H == linear_operator`` + """ return AdjointLinearOperator(self) def operator_norm( @@ -104,26 +133,28 @@ def operator_norm( absolute_tolerance: float = 1e-5, callback: Callable[[torch.Tensor], None] | None = None, ) -> torch.Tensor: - """Power iteration for computing the operator norm of the linear operator. + """Power iteration for computing the operator norm of the operator. Parameters ---------- initial_value - initial value to start the iteration; if the initial value contains a zero-vector for - one of the considered problems, the function throws an value error. + initial value to start the iteration; must be element of the domain. + if the initial value contains a zero-vector for one of the considered problems, + the function throws an `ValueError`. dim - the dimensions of the tensors on which the operator operates. - For example, for a matrix-vector multiplication example, a batched matrix tensor with shape (4,30,80,160), - input tensors of shape (4,30,160) to be multiplied, and dim = None, it is understood that the - matrix representation of the operator corresponds to a block diagonal operator (with 4*30 matrices) - and thus the algorithm returns a tensor of shape (1,1,1) containing one single value. - In contrast, if for example, dim=(-1,), the algorithm computes a batched operator - norm and returns a tensor of shape (4,30,1) corresponding to the operator norms of the respective - matrices in the diagonal of the block-diagonal operator (if considered in matrix representation). - In any case, the output of the algorithm has the same number of dimensions as the elements of the - domain of the considered operator (whose dimensionality is implicitly defined by choosing dim), such that - the pointwise multiplication of the operator norm and elements of the domain (to be for example used - in a Landweber iteration) is well-defined. + The dimensions of the tensors on which the operator operates. The choice of `dim` determines how + the operator norm is inperpreted. For example, for a matrix-vector multiplication with a batched matrix + tensor of shape `(batch1, batch2, row, column)` and a batched input tensor of shape `(batch1, batch2, row)`: + + * If `dim=None`, the operator is considered as a block diagonal matrix with batch1*batch2 blocks + and the result is a tensor containing a single norm value (shape `(1, 1, 1)`). + + * If `dim=(-1)`, `batch1*batch2` matrices are considered, and for each a separate operator norm is computed. + + * If `dim=(-1,-2)`, `batch1` matrices with `batch2` blocks are considered, and for each matrix a + separate operator norm is computed. + + Thus, the choice of `dim` determines implicitly determines the domain of the operator. max_iterations maximum number of iterations relative_tolerance @@ -139,7 +170,10 @@ def operator_norm( Returns ------- - an estimaton of the operator norm + an estimaton of the operator norm. Shape corresponds to the shape of the input tensor `initial_value` + with the dimensions specified in `dim` reduced to a single value. + The pointwise multiplication of `initial_value` with the result of the operator norm will always + be well-defined. """ if max_iterations < 1: raise ValueError('The number of iterations should be larger than zero.') @@ -204,7 +238,7 @@ def __matmul__( ) -> Operator[Unpack[Tin2], tuple[torch.Tensor,]] | LinearOperator: """Operator composition. - Returns lambda x: self(other(x)) + Returns ``lambda x: self(other(x))`` """ if isinstance(other, mrpro.operators.IdentityOp): # neutral element of composition @@ -222,7 +256,7 @@ def __matmul__( def __radd__(self, other: torch.Tensor) -> LinearOperator: """Operator addition. - Returns lambda self(x) + other*x + Returns ``lambda x: self(x) + other*x`` """ return self + other @@ -239,8 +273,8 @@ def __add__( ) -> Operator[torch.Tensor, tuple[torch.Tensor,]] | LinearOperator: """Operator addition. - Returns lambda x: self(x) + other(x) if other is a operator, - lambda x: self(x) + other if other is a tensor + Returns ``lambda x: self(x) + other(x)`` if other is a operator, + ``lambda x: self(x) + other`` if other is a tensor """ if isinstance(other, torch.Tensor): # tensor addition @@ -263,7 +297,7 @@ def __add__( def __mul__(self, other: torch.Tensor | complex) -> LinearOperator: """Operator elementwise left multiplication with tensor/scalar. - Returns lambda x: self(x*other) + Returns ``lambda x: self(x*other)`` """ if isinstance(other, complex | float | int): if other == 0: @@ -280,7 +314,7 @@ def __mul__(self, other: torch.Tensor | complex) -> LinearOperator: def __rmul__(self, other: torch.Tensor | complex) -> LinearOperator: """Operator elementwise right multiplication with tensor/scalar. - Returns lambda x: other*self(x) + Returns ``lambda x: other*self(x)`` """ if isinstance(other, complex | float | int): if other == 0: @@ -295,14 +329,24 @@ def __rmul__(self, other: torch.Tensor | complex) -> LinearOperator: return NotImplemented # type: ignore[unreachable] def __and__(self, other: LinearOperator) -> mrpro.operators.LinearOperatorMatrix: - """Vertical stacking of two LinearOperators.""" + """Vertical stacking of two LinearOperators. + + ``A&B`` is a `~mrpro.operators.LinearOperatorMatrix` with two rows, + with ``(A&B)(x) == (A(x), B(x))``. + See `mrpro.operators.LinearOperatorMatrix` for more information. + """ if not isinstance(other, LinearOperator): return NotImplemented # type: ignore[unreachable] operators = [[self], [other]] return mrpro.operators.LinearOperatorMatrix(operators) def __or__(self, other: LinearOperator) -> mrpro.operators.LinearOperatorMatrix: - """Horizontal stacking of two LinearOperators.""" + """Horizontal stacking of two LinearOperators. + + ``A|B`` is a `~mrpro.operators.LinearOperatorMatrix` with two columns, + with ``(A|B)(x1,x2) == A(x1)+B(x2)``. + See `mrpro.operators.LinearOperatorMatrix` for more information. + """ if not isinstance(other, LinearOperator): return NotImplemented # type: ignore[unreachable] operators = [[self, other]] @@ -314,8 +358,8 @@ def gram(self) -> LinearOperator: For a LinearOperator :math:`A`, the self-adjoint Gram operator is defined as :math:`A^H A`. - Note: This is a default implementation that can be overwritten by subclasses for more efficient - implementations. + .. note:: + This is the inherited default implementation. """ return self.H @ self @@ -347,7 +391,7 @@ class LinearOperatorSum(LinearOperator, OperatorSum[torch.Tensor, tuple[torch.Te def adjoint(self, x: torch.Tensor) -> tuple[torch.Tensor,]: """Adjoint of the operator addition.""" # (A+B)^H = A^H + B^H - return (reduce(operator.add, (op.adjoint(x)[0] for op in self._operators)),) + return (functools.reduce(operator.add, (op.adjoint(x)[0] for op in self._operators)),) class LinearOperatorElementwiseProductRight( diff --git a/src/mrpro/operators/LinearOperatorMatrix.py b/src/mrpro/operators/LinearOperatorMatrix.py index ab0673398..5523487ea 100644 --- a/src/mrpro/operators/LinearOperatorMatrix.py +++ b/src/mrpro/operators/LinearOperatorMatrix.py @@ -237,7 +237,7 @@ def adjoint(self, *x: torch.Tensor) -> tuple[torch.Tensor, ...]: def from_diagonal(cls, *operators: LinearOperator): """Create a diagonal LinearOperatorMatrix. - Create a square LinearOperatorMatrix with the given Linear Operators on the diagonal, + Construct a square LinearOperatorMatrix with the given Linear Operators on the diagonal, resulting in a block-diagonal linear operator. Parameters diff --git a/src/mrpro/operators/Operator.py b/src/mrpro/operators/Operator.py index d5a7ae83a..440c63aaa 100644 --- a/src/mrpro/operators/Operator.py +++ b/src/mrpro/operators/Operator.py @@ -17,7 +17,13 @@ class Operator(Generic[Unpack[Tin], Tout], ABC, torch.nn.Module): - """The general Operator class.""" + """The general Operator class. + + An operator is a function that maps one or more input tensors to one or more output tensors. + Operators always return a tuple of tensors. + Operators can be composed, added, multiplied, and applied to tensors. + The forward method must be implemented by the subclasses. + """ @abstractmethod def forward(self, *args: Unpack[Tin]) -> Tout: @@ -25,7 +31,14 @@ def forward(self, *args: Unpack[Tin]) -> Tout: ... def __call__(self, *args: Unpack[Tin]) -> Tout: - """Apply the forward operator.""" + """Apply the forward operator. + + For more information, see `forward`. + + .. note:: + Prefer using ``operator_instance(*parameters)``, i.e. using + `__call__` over using `forward`. + """ return super().__call__(*args) def __matmul__( @@ -33,7 +46,7 @@ def __matmul__( ) -> Operator[Unpack[Tin2], Tout]: """Operator composition. - Returns lambda x: self(other(x)) + Returns ``lambda x: self(other(x))`` """ return OperatorComposition(self, other) @@ -42,7 +55,7 @@ def __radd__( ) -> Operator[Unpack[Tin], tuple[Unpack[Tin]]]: """Operator right addition. - Returns lambda x: other*x + self(x) + Returns ``lambda x: other*x + self(x)`` """ return self + other @@ -58,8 +71,8 @@ def __add__( ) -> Operator[Unpack[Tin], Tout] | Operator[Unpack[Tin], tuple[Unpack[Tin]]]: """Operator addition. - Returns lambda x: self(x) + other(x) if other is a operator, - lambda x: self(x) + other*x if other is a tensor + Returns ``lambda x: self(x) + other(x)`` if other is a operator, + ``lambda x: self(x) + other*x`` if other is a tensor """ if isinstance(other, torch.Tensor): s = cast(Operator[Unpack[Tin], tuple[Unpack[Tin]]], self) @@ -76,14 +89,14 @@ def __add__( def __mul__(self, other: torch.Tensor | complex) -> Operator[Unpack[Tin], Tout]: """Operator multiplication with tensor. - Returns lambda x: self(x*other) + Returns ``lambda x: self(x*other)`` """ return OperatorElementwiseProductLeft(self, other) def __rmul__(self, other: torch.Tensor | complex) -> Operator[Unpack[Tin], Tout]: """Operator multiplication with tensor. - Returns lambda x: other*self(x) + Returns ``lambda x: other*self(x)`` """ return OperatorElementwiseProductRight(self, other) @@ -94,7 +107,7 @@ class OperatorComposition(Operator[Unpack[Tin2], Tout]): def __init__(self, operator1: Operator[Unpack[Tin], Tout], operator2: Operator[Unpack[Tin2], tuple[Unpack[Tin]]]): """Operator composition initialization. - Returns lambda x: operator1(operator2(x)) + Returns ``lambda x: operator1(operator2(x))`` Parameters ---------- @@ -141,7 +154,7 @@ def _add(a: tuple[torch.Tensor, ...], b: tuple[torch.Tensor, ...]) -> Tout: class OperatorElementwiseProductRight(Operator[Unpack[Tin], Tout]): """Operator elementwise right multiplication with a tensor. - Performs Tensor*Operator(x) + Performs ``Tensor*Operator(x)`` """ def __init__(self, operator: Operator[Unpack[Tin], Tout], scalar: torch.Tensor | complex): @@ -159,7 +172,7 @@ def forward(self, *args: Unpack[Tin]) -> Tout: class OperatorElementwiseProductLeft(Operator[Unpack[Tin], Tout]): """Operator elementwise left multiplication with a tensor. - Performs Operator(x*Tensor) + Performs ``Operator(x*Tensor)`` """ def __init__(self, operator: Operator[Unpack[Tin], Tout], scalar: torch.Tensor | complex): diff --git a/src/mrpro/operators/PCACompressionOp.py b/src/mrpro/operators/PCACompressionOp.py index ace625ea8..5718a9665 100644 --- a/src/mrpro/operators/PCACompressionOp.py +++ b/src/mrpro/operators/PCACompressionOp.py @@ -14,20 +14,20 @@ def __init__( self, data: torch.Tensor, n_components: int, - ): + ) -> None: """Construct a PCA based compression operator. The operator carries out an SVD followed by a threshold of the n_components largest values along the last - dimension of a data with shape (*other, joint_dim, compression_dim). A single SVD is carried out for everything - along joint_dim. Other are batch dimensions. + dimension of a data with shape `(*other, joint_dim, compression_dim)`. + A single SVD is carried out for everything along joint_dim. Other are batch dimensions. - Consider combining this operator with :class:`mrpro.operators.RearrangeOp` to make sure the data is + Consider combining this operator with `~mrpro.operators.RearrangeOp` to make sure the data is in the correct shape before applying. Parameters ---------- data - Data of shape (*other, joint_dim, compression_dim) to be used to find the principal components. + Data of shape `(*other, joint_dim, compression_dim)` to be used to find the principal components. n_components Number of principal components to keep along the compression_dim. """ @@ -46,11 +46,11 @@ def forward(self, data: torch.Tensor) -> tuple[torch.Tensor,]: Parameters ---------- data - data to be compressed of shape (*other, joint_dim, compression_dim) + data to be compressed of shape `(*other, joint_dim, compression_dim)` Returns ------- - compressed data of shape (*other, joint_dim, n_components) + compressed data of shape `(*other, joint_dim, n_components)` """ try: result = (self._compression_matrix @ data.unsqueeze(-1)).squeeze(-1) @@ -68,11 +68,11 @@ def adjoint(self, data: torch.Tensor) -> tuple[torch.Tensor,]: Parameters ---------- data - compressed data of shape (*other, joint_dim, n_components) + compressed data of shape `(*other, joint_dim, n_components)` Returns ------- - expanded data of shape (*other, joint_dim, compression_dim) + expanded data of shape `(*other, joint_dim, compression_dim)` """ try: result = (self._compression_matrix.mH @ data.unsqueeze(-1)).squeeze(-1) diff --git a/src/mrpro/operators/SensitivityOp.py b/src/mrpro/operators/SensitivityOp.py index 562948816..5daa20310 100644 --- a/src/mrpro/operators/SensitivityOp.py +++ b/src/mrpro/operators/SensitivityOp.py @@ -7,7 +7,11 @@ class SensitivityOp(LinearOperator): - """Sensitivity operator class.""" + """Sensitivity operator class. + + The forward operator expands an image to multiple coil images according to coil sensitivity maps, + the adjoint operator reduces the coil images to a single image. + """ def __init__(self, csm: CsmData | torch.Tensor) -> None: """Initialize a Sensitivity Operator. @@ -29,11 +33,11 @@ def forward(self, img: torch.Tensor) -> tuple[torch.Tensor,]: Parameters ---------- img - image data tensor with dimensions (other 1 z y x). + image data tensor with dimensions `(other 1 z y x)`. Returns ------- - image data tensor with dimensions (other coils z y x). + image data tensor with dimensions `(other coils z y x)`. """ return (self.csm_tensor * img,) @@ -43,10 +47,10 @@ def adjoint(self, img: torch.Tensor) -> tuple[torch.Tensor,]: Parameters ---------- img - image data tensor with dimensions (other coils z y x). + image data tensor with dimensions `(other coils z y x)`. Returns ------- - image data tensor with dimensions (other 1 z y x). + image data tensor with dimensions `(other 1 z y x)`. """ return ((self.csm_tensor.conj() * img).sum(-4, keepdim=True),) diff --git a/src/mrpro/operators/SliceProjectionOp.py b/src/mrpro/operators/SliceProjectionOp.py index dad2f742c..417a906a6 100644 --- a/src/mrpro/operators/SliceProjectionOp.py +++ b/src/mrpro/operators/SliceProjectionOp.py @@ -8,13 +8,13 @@ import einops import numpy as np import torch -from numpy._typing import _NestedSequence as NestedSequence from torch import Tensor from mrpro.data.Rotation import Rotation from mrpro.data.SpatialDimension import SpatialDimension from mrpro.operators.LinearOperator import LinearOperator from mrpro.utils.slice_profiles import SliceSmoothedRectangular +from mrpro.utils.typing import NestedSequence class _MatrixMultiplicationCtx(torch.autograd.function.FunctionCtx): @@ -77,7 +77,7 @@ class SliceProjectionOp(LinearOperator): """Slice Projection Operator. This operation samples from a 3D Volume a slice with a given rotation and shift - (relative to the center of the volume) according to the slice_profile. + (relative to the center of the volume) according to the `slice_profile`. It can, for example, be used to describe the slice selection of a 2D MRI sequence from the 3D Volume. @@ -118,9 +118,9 @@ def __init__( Parameters ---------- input_shape - Shape of the 3D volume to sample from (z, y, x) + Shape of the 3D volume to sample from. `(z, y, x)` slice_rotation - Rotation that describes the orientation of the plane. If None, + Rotation that describes the orientation of the plane. If `None`, an identity rotation is used. slice_shift Offset of the plane in the volume perpendicular plane from the center of the volume. @@ -128,7 +128,7 @@ def __init__( slice_profile A function returning the relative intensity of the slice profile at a position x (relative to the nominal profile center). This can also be a nested Sequence or an - numpy array of functions. + numpy array of functions. See `mrpro.utils.slice_profiles` for examples. If it is a single float, it will be interpreted as the FWHM of a rectangular profile. optimize_for Whether to optimize for forward or adjoint operation or both. @@ -214,12 +214,12 @@ def forward(self, x: Tensor) -> tuple[Tensor]: Parameters ---------- x - 3D Volume with shape (..., z, y, x) + 3D Volume with shape `(..., z, y, x)` with z, y, x matching the input_shape Returns ------- - A 2D slice with shape (..., 1, max(z, y, x), (max(z, y, x))) + A 2D slice with shape `(..., 1, max(z, y, x), (max(z, y, x)))` """ match (self.matrix, self.matrix_adjoint): # selection based on the optimize_for setting @@ -246,13 +246,13 @@ def adjoint(self, x: Tensor) -> tuple[Tensor,]: Parameters ---------- x - 2D Slice with shape (..., 1, max(z, y, x), (max(z, y, x))) - with z, y, x matching the input_shape + 2D Slice with shape `(..., 1, max(z, y, x), (max(z, y, x)))` + with `z, y, x` matching the input_shape Returns ------- - A 3D Volume with shape (..., z, y, x) - with z, y, x matching the input_shape + A 3D Volume with shape `(..., z, y, x)` + with` z, y, x` matching the input_shape """ match (self.matrix, self.matrix_adjoint): # selection based on the optimize_for setting @@ -340,12 +340,12 @@ def projection_matrix( Rotation that describes the orientation of the plane offset: Tensor Shift of the plane from the center of the volume in the rotated coordinate system - in units of the 3D volume, order z, y, x + in units of the 3D volume, order `z, y, x` w: int Factor that determines the number of pixels that are considered in the projection along the slice profile direction. slice_function - Function that describes the slice profile. + Function that describes the slice profile. See `mrpro.utils.slice_profiles` for examples. rotation_center Center of rotation, if None the center of the volume is used, i.e. for 4 pixels 0 1 2 3 it is between 1 and 2 diff --git a/src/mrpro/operators/WaveletOp.py b/src/mrpro/operators/WaveletOp.py index 3fa6390ce..222d4c8a1 100644 --- a/src/mrpro/operators/WaveletOp.py +++ b/src/mrpro/operators/WaveletOp.py @@ -68,11 +68,11 @@ def __init__( Raises ------ - ValueError + `ValueError` If wavelets are calculated for more than three dimensions. - ValueError + `ValueError` If wavelet dimensions and domain shape do not match. - NotImplementedError + `NotImplementedError` If any dimension of the domain shape is odd. Adjoint will lead to the wrong domain shape. """ super().__init__() diff --git a/src/mrpro/operators/__init__.py b/src/mrpro/operators/__init__.py index c22f386cd..6db3244ee 100644 --- a/src/mrpro/operators/__init__.py +++ b/src/mrpro/operators/__init__.py @@ -1,3 +1,5 @@ +"""Linear operators (such as FourierOp), functionals/loss functions, and qMRI signal models.""" + from mrpro.operators.Operator import Operator from mrpro.operators.LinearOperator import LinearOperator from mrpro.operators.Functional import Functional, ProximableFunctional, ElementaryFunctional, ElementaryProximableFunctional, ScaledFunctional, ScaledProximableFunctional @@ -17,6 +19,7 @@ from mrpro.operators.PCACompressionOp import PCACompressionOp from mrpro.operators.PhaseOp import PhaseOp from mrpro.operators.ProximableFunctionalSeparableSum import ProximableFunctionalSeparableSum +from mrpro.operators.RearrangeOp import RearrangeOp from mrpro.operators.SensitivityOp import SensitivityOp from mrpro.operators.SignalModel import SignalModel from mrpro.operators.SliceProjectionOp import SliceProjectionOp @@ -46,6 +49,7 @@ "PhaseOp", "ProximableFunctional", "ProximableFunctionalSeparableSum", + "RearrangeOp", "ScaledFunctional", "ScaledProximableFunctional", "SensitivityOp", @@ -56,4 +60,4 @@ "ZeroPadOp", "functionals", "models" -] \ No newline at end of file +] diff --git a/src/mrpro/operators/functionals/L1Norm.py b/src/mrpro/operators/functionals/L1Norm.py index 29f7b753c..9fc22d407 100644 --- a/src/mrpro/operators/functionals/L1Norm.py +++ b/src/mrpro/operators/functionals/L1Norm.py @@ -24,7 +24,7 @@ def forward( ) -> tuple[torch.Tensor]: """Forward method. - Compute the l1-norm of the input. + Compute the L1 norm of the input. Parameters ---------- @@ -33,7 +33,7 @@ def forward( Returns ------- - l1 norm of the input tensor + L1 norm of the input tensor """ value = (self.weight * (x - self.target)).abs() @@ -45,7 +45,7 @@ def forward( def prox(self, x: torch.Tensor, sigma: torch.Tensor | float = 1.0) -> tuple[torch.Tensor]: """Proximal Mapping of the L1 Norm. - Compute the proximal mapping of the L1-norm. + Compute the proximal mapping of the L1 norm. Parameters ---------- @@ -73,7 +73,7 @@ def prox_convex_conj( ) -> tuple[torch.Tensor]: """Convex conjugate of the L1 Norm. - Compute the proximal mapping of the convex conjugate of the L1-norm. + Compute the proximal mapping of the convex conjugate of the L1 norm. Parameters ---------- diff --git a/src/mrpro/operators/functionals/L1NormViewAsReal.py b/src/mrpro/operators/functionals/L1NormViewAsReal.py index e4227c70b..aeb12d2aa 100644 --- a/src/mrpro/operators/functionals/L1NormViewAsReal.py +++ b/src/mrpro/operators/functionals/L1NormViewAsReal.py @@ -1,4 +1,4 @@ -"""L1 Norm.""" +"""L1 Norm with :math:`C` as :math:`R^2`.""" import torch @@ -26,7 +26,7 @@ def forward( ) -> tuple[torch.Tensor]: """Forward method. - Compute the L1-norm of the input with C identified as R^2. + Compute the L1 norm of the input with :math:`C` identified as :math:`R^2` Parameters ---------- @@ -35,7 +35,7 @@ def forward( Returns ------- - L1 norm of the input tensor, where C is identified as R^2 + L1 norm of the input tensor, where :math:`C` is identified as :math:`R^2` """ dtype = torch.promote_types(self.target.dtype, x.dtype) x = x.to(dtype) @@ -56,7 +56,7 @@ def forward( def prox(self, x: torch.Tensor, sigma: torch.Tensor | float = 1.0) -> tuple[torch.Tensor]: """Proximal Mapping of the L1 Norm. - Apply the proximal mapping of the L1-norm with C identified as R^2. + Apply the proximal mapping of the L1 norm with :math:`C` identified as :math:`R^2`. Parameters ---------- diff --git a/src/mrpro/operators/functionals/L2NormSquared.py b/src/mrpro/operators/functionals/L2NormSquared.py index c8d001f97..fc825b835 100644 --- a/src/mrpro/operators/functionals/L2NormSquared.py +++ b/src/mrpro/operators/functionals/L2NormSquared.py @@ -27,7 +27,7 @@ def forward( ) -> tuple[torch.Tensor]: """Forward method. - Compute the squared L2-norm of the input. + Compute the squared L2 norm of the input. Parameters ---------- @@ -36,7 +36,7 @@ def forward( Returns ------- - squared l2 norm of the input tensor + squared L2 norm of the input tensor """ value = (self.weight * (x - self.target)).abs().square() @@ -52,7 +52,7 @@ def prox( ) -> tuple[torch.Tensor]: """Proximal Mapping of the squared L2 Norm. - Apply the proximal mapping of the squared L2-norm. + Apply the proximal mapping of the squared L2 norm. Parameters ---------- @@ -81,7 +81,7 @@ def prox_convex_conj( ) -> tuple[torch.Tensor]: """Convex conjugate of squared L2 Norm. - Apply the proximal mapping of the convex conjugate of the squared L2-norm. + Apply the proximal mapping of the convex conjugate of the squared L2 norm. Parameters ---------- diff --git a/src/mrpro/operators/models/InversionRecovery.py b/src/mrpro/operators/models/InversionRecovery.py index eb691606c..8573e3c96 100644 --- a/src/mrpro/operators/models/InversionRecovery.py +++ b/src/mrpro/operators/models/InversionRecovery.py @@ -29,14 +29,14 @@ def forward(self, m0: torch.Tensor, t1: torch.Tensor) -> tuple[torch.Tensor,]: ---------- m0 equilibrium signal / proton density - with shape (... other, coils, z, y, x) + with shape `(*other, coils, z, y, x)` t1 longitudinal relaxation time T1 - with shape (... other, coils, z, y, x) + with shape `(*other, coils, z, y, x)` Returns ------- - signal with shape (time ... other, coils, z, y, x) + signal with shape `(time, *other, coils, z, y, x)` """ ti = unsqueeze_right(self.ti, m0.ndim - (self.ti.ndim - 1)) # -1 for time signal = m0 * (1 - 2 * torch.exp(-(ti / t1))) diff --git a/src/mrpro/operators/models/MOLLI.py b/src/mrpro/operators/models/MOLLI.py index 9313e2b4a..783cecdba 100644 --- a/src/mrpro/operators/models/MOLLI.py +++ b/src/mrpro/operators/models/MOLLI.py @@ -40,17 +40,17 @@ def forward(self, a: torch.Tensor, c: torch.Tensor, t1: torch.Tensor) -> tuple[t ---------- a parameter a in MOLLI signal model - with shape (... other, coils, z, y, x) + with shape `(*other, coils, z, y, x)` c parameter c = b/a in MOLLI signal model - with shape (... other, coils, z, y, x) + with shape `(*other, coils, z, y, x)` t1 longitudinal relaxation time T1 - with shape (... other, coils, z, y, x) + with shape `(*other, coils, z, y, x)` Returns ------- - signal with shape (time ... other, coils, z, y, x) + signal with shape `(time, *other, coils, z, y, x)` """ ti = unsqueeze_right(self.ti, a.ndim - (self.ti.ndim - 1)) # -1 for time signal = a * (1 - c * torch.exp(ti / t1 * (1 - c))) diff --git a/src/mrpro/operators/models/MonoExponentialDecay.py b/src/mrpro/operators/models/MonoExponentialDecay.py index a899d84c2..cdc9530b4 100644 --- a/src/mrpro/operators/models/MonoExponentialDecay.py +++ b/src/mrpro/operators/models/MonoExponentialDecay.py @@ -29,14 +29,14 @@ def forward(self, m0: torch.Tensor, decay_constant: torch.Tensor) -> tuple[torch ---------- m0 equilibrium signal / proton density - with shape (... other, coils, z, y, x) + with shape `(*other, coils, z, y, x)` decay_constant exponential decay constant (e.g. T2, T2* or T1rho) - with shape (... other, coils, z, y, x) + with shape `(*other, coils, z, y, x)` Returns ------- - signal with shape (time ... other, coils, z, y, x) + signal with shape `(time, *other, coils, z, y, x)` """ decay_time = unsqueeze_right(self.decay_time, m0.ndim - (self.decay_time.ndim - 1)) # -1 for time signal = m0 * torch.exp(-(decay_time / decay_constant)) diff --git a/src/mrpro/operators/models/SaturationRecovery.py b/src/mrpro/operators/models/SaturationRecovery.py index 86ecb0750..8db18f74c 100644 --- a/src/mrpro/operators/models/SaturationRecovery.py +++ b/src/mrpro/operators/models/SaturationRecovery.py @@ -29,14 +29,14 @@ def forward(self, m0: torch.Tensor, t1: torch.Tensor) -> tuple[torch.Tensor,]: ---------- m0 equilibrium signal / proton density - with shape (... other, coils, z, y, x) + with shape `(*other, coils, z, y, x)` t1 longitudinal relaxation time T1 - with shape (... other, coils, z, y, x) + with shape `(*other, coils, z, y, x)` Returns ------- - signal with shape (time ... other, coils, z, y, x) + signal with shape `(time, *other, coils, z, y, x)` """ ti = unsqueeze_right(self.ti, m0.ndim - (self.ti.ndim - 1)) # -1 for time signal = m0 * (1 - torch.exp(-(ti / t1))) diff --git a/src/mrpro/operators/models/TransientSteadyStateWithPreparation.py b/src/mrpro/operators/models/TransientSteadyStateWithPreparation.py index e08cefa0c..fe3d3c128 100644 --- a/src/mrpro/operators/models/TransientSteadyStateWithPreparation.py +++ b/src/mrpro/operators/models/TransientSteadyStateWithPreparation.py @@ -20,18 +20,18 @@ class TransientSteadyStateWithPreparation(SignalModel[torch.Tensor, torch.Tensor [Part A: 180° inversion pulse][Part B: spoiler gradient][Part C: Continuous data acquisition] - Part A: The 180° pulse leads to an inversion of the equilibrium magnetization (:math:`M_0`) to :math:`-M_0`. - This can be described by setting the scaling factor ``m0_scaling_preparation`` to -1. + This can be described by setting the scaling factor `m0_scaling_preparation` to `-1`. - Part B: Commonly after an inversion pulse a strong spoiler gradient is played out to compensate for non-perfect inversion. During this time the magnetization :math:`M_z(t)` follows the signal model: - :math:`M_z(t) = M_0 + (s * M_0 - M_0)e^{(-t / T1)}` where :math:`s` is ``m0_scaling_preparation``. + :math:`M_z(t) = M_0 + (s * M_0 - M_0)e^{(-t / T1)}` where :math:`s` is `m0_scaling_preparation`. - Part C: After the spoiler gradient the data acquisition starts and the magnetization :math:`M_z(t)` can be - described by the signal model: :math:`M_z(t) = M_0^* + (M_{init} - M_0^*)e^{(-t / T1^*)}` where the initial - magnetization is :math:`M_{init} = M_0 + (s*M_0 - M_0)e^{(-\Delta t / T1)}` where :math:`s` is - ``m0_scaling_preparation`` and :math:`\Delta t` is ``delay_after_preparation``. The effective longitudinal - relaxation time is :math:`T1^* = 1/(1/T1 - ln(cos(\alpha)/TR)` - where :math:`TR` is ``repetition_time`` and :math:`\alpha` is ``flip_angle``. + described by the signal model: :math:`M_z(t) = M_0^* + (M_{init} - M_0^*)e^{(-t / T1^*)}` + where the initial magnetization is :math:`M_{init} = M_0 + (s*M_0 - M_0)e^{(-\Delta t / T1)}`, + where :math:`s` is `m0_scaling_preparation` and :math:`\Delta t` is `delay_after_preparation`. + The effective longitudinal relaxation time is :math:`T1^* = 1/(1/T1 - ln(cos(\alpha)/TR)` + where :math:`TR` is `repetition_time` and :math:`\alpha` is `flip_angle`. The steady-state magnetization is :math:`M_0^* = M_0 T1^* / T1`. References @@ -57,10 +57,10 @@ def __init__( Parameters ---------- sampling_time - Time points when model is evaluated. A sampling_time of 0 describes the first acquired data point after the - inversion pulse and spoiler gradients. To take the T1 relaxation during the delay between inversion pulse - and start of data acquisition into account, set the delay_after_preparation > 0. - with shape (time, ...) + Time points when model is evaluated. A `sampling_time` of 0 describes the first acquired data point + after the inversion pulse and spoiler gradients. To take the T1 relaxation during the delay between + inversion pulse and start of data acquisition into account, set the `delay_after_preparation` > 0. + with shape `(time, ...)` repetition_time repetition time m0_scaling_preparation @@ -91,17 +91,17 @@ def forward(self, m0: torch.Tensor, t1: torch.Tensor, flip_angle: torch.Tensor) ---------- m0 equilibrium signal / proton density - with shape (... other, coils, z, y, x) + with shape `(*other, coils, z, y, x)` t1 longitudinal relaxation time T1 - with shape (... other, coils, z, y, x) + with shape `(*other, coils, z, y, x)` flip_angle flip angle of data acquisition in rad - with shape (... other, coils, z, y, x) + with shape `(*other, coils, z, y, x)` Returns ------- - signal with shape (time ... other, coils, z, y, x) + signal with shape `(time, *other, coils, z, y, x)` """ m0_ndim = m0.ndim diff --git a/src/mrpro/operators/models/WASABI.py b/src/mrpro/operators/models/WASABI.py index a9c51e723..5709b0a89 100644 --- a/src/mrpro/operators/models/WASABI.py +++ b/src/mrpro/operators/models/WASABI.py @@ -79,7 +79,7 @@ def forward( Returns ------- - signal with shape `(offsets *other, coils, z, y, x)` + signal with shape `(offsets, *other, coils, z, y, x)` """ offsets = unsqueeze_right(self.offsets, b0_shift.ndim - (self.offsets.ndim - 1)) # -1 for offset delta_b0 = offsets - b0_shift diff --git a/src/mrpro/operators/models/WASABITI.py b/src/mrpro/operators/models/WASABITI.py index 5a7a59a04..f9a61104d 100644 --- a/src/mrpro/operators/models/WASABITI.py +++ b/src/mrpro/operators/models/WASABITI.py @@ -79,7 +79,7 @@ def forward(self, b0_shift: torch.Tensor, relative_b1: torch.Tensor, t1: torch.T Returns ------- - signal with shape `(offsets *other, coils, z, y, x)` + signal with shape `(offsets, *other, coils, z, y, x)` """ delta_ndim = b0_shift.ndim - (self.offsets.ndim - 1) # -1 for offset offsets = unsqueeze_right(self.offsets, delta_ndim) diff --git a/src/mrpro/operators/models/__init__.py b/src/mrpro/operators/models/__init__.py index 629a560d3..ec540b247 100644 --- a/src/mrpro/operators/models/__init__.py +++ b/src/mrpro/operators/models/__init__.py @@ -1,3 +1,5 @@ +"""qMRI signal models.""" + from mrpro.operators.models.SaturationRecovery import SaturationRecovery from mrpro.operators.models.InversionRecovery import InversionRecovery from mrpro.operators.models.MOLLI import MOLLI @@ -13,4 +15,4 @@ "TransientSteadyStateWithPreparation", "WASABI", "WASABITI" -] \ No newline at end of file +] diff --git a/src/mrpro/phantoms/EllipsePhantom.py b/src/mrpro/phantoms/EllipsePhantom.py index 277e1bb18..3355ea8c4 100644 --- a/src/mrpro/phantoms/EllipsePhantom.py +++ b/src/mrpro/phantoms/EllipsePhantom.py @@ -11,14 +11,7 @@ class EllipsePhantom: - """Numerical phantom as the sum of different ellipses. - - Parameters - ---------- - ellipses - ellipses defined by their center, radii and intensity. - if None, defaults to three ellipses - """ + """Numerical phantom as the sum of different ellipses.""" def __init__(self, ellipses: Sequence[EllipseParameters] | None = None): """Initialize ellipse phantom. @@ -26,8 +19,8 @@ def __init__(self, ellipses: Sequence[EllipseParameters] | None = None): Parameters ---------- ellipses - Sequence of EllipseParameters defining the ellipses. - if None, defaults to three ellipses with different parameters. + Parameters defining the ellipses. + If `None`, defaults to three ellipses with different parameters. """ if ellipses is None: self.ellipses = [ @@ -41,7 +34,7 @@ def __init__(self, ellipses: Sequence[EllipseParameters] | None = None): def kspace(self, ky: torch.Tensor, kx: torch.Tensor) -> torch.Tensor: """Create 2D analytic kspace data based on given k-space locations. - For a corresponding image with 256 x 256 voxel, the k-space locations should be defined within [-128, 127] + For a corresponding image with 256 x 256 voxel, the k-space locations should be defined within ``[-128, 127]`` The Fourier representation of ellipses can be analytically described by Bessel functions [KOA2007]_. @@ -86,7 +79,7 @@ def image_space(self, image_dimensions: SpatialDimension[int]) -> torch.Tensor: ---------- image_dimensions number of voxels in the image - This is a 2D simulation so the output will be (1 1 1 image_dimensions.y image_dimensions.x) + This is a 2D simulation so the output will be `(1 1 1 image_dimensions.y image_dimensions.x)` """ # Calculate image representation of phantom ny, nx = image_dimensions.y, image_dimensions.x diff --git a/src/mrpro/phantoms/__init__.py b/src/mrpro/phantoms/__init__.py index 081d2e465..570411948 100644 --- a/src/mrpro/phantoms/__init__.py +++ b/src/mrpro/phantoms/__init__.py @@ -1,3 +1,5 @@ +"""Numerical Phantoms""" + from mrpro.phantoms.EllipsePhantom import EllipsePhantom from mrpro.phantoms.phantom_elements import EllipseParameters -__all__ = ["EllipseParameters", "EllipsePhantom"] \ No newline at end of file +__all__ = ["EllipseParameters", "EllipsePhantom"] diff --git a/src/mrpro/utils/__init__.py b/src/mrpro/utils/__init__.py index ca1dc740a..e075dd229 100644 --- a/src/mrpro/utils/__init__.py +++ b/src/mrpro/utils/__init__.py @@ -1,3 +1,5 @@ +"""Functions for tensor shaping, unit conversion, typing, etc.""" + from mrpro.utils import slice_profiles from mrpro.utils import typing from mrpro.utils import unit_conversion @@ -22,4 +24,4 @@ "unsqueeze_left", "unsqueeze_right", "zero_pad_or_crop" -] \ No newline at end of file +] diff --git a/src/mrpro/utils/reshape.py b/src/mrpro/utils/reshape.py index 0b208381b..14c3cab38 100644 --- a/src/mrpro/utils/reshape.py +++ b/src/mrpro/utils/reshape.py @@ -11,7 +11,7 @@ def unsqueeze_right(x: torch.Tensor, n: int) -> torch.Tensor: """Unsqueeze multiple times in the rightmost dimension. Example: - tensor with shape (1,2,3) and n=2 would result in tensor with shape (1,2,3,1,1) + tensor with shape `(1,2,3)` and `n=2` would result in tensor with shape `(1,2,3,1,1)` Parameters ---------- @@ -31,7 +31,7 @@ def unsqueeze_left(x: torch.Tensor, n: int) -> torch.Tensor: """Unsqueze multiple times in the leftmost dimension. Example: - tensor with shape (1,2,3) and n=2 would result in tensor with shape (1,1,1,2,3) + tensor with shape `(1,2,3)` and `n=2` would result in tensor with shape `(1,1,1,2,3)` Parameters @@ -53,11 +53,11 @@ def broadcast_right(*x: torch.Tensor) -> tuple[torch.Tensor, ...]: Given multiple tensors, apply broadcasting with unsqueezed on the right. First, tensors are unsqueezed on the right to the same number of dimensions. - Then, torch.broadcasting is used. + Then, `torch.broadcast_tensors` is used. Example: - tensors with shapes (1,2,3), (1,2), (2) - results in tensors with shape (2,2,3) + tensors with shapes `(1,2,3), (1,2), (2)` + results in tensors with shape `(2,2,3)` Parameters ---------- @@ -78,7 +78,7 @@ def reduce_view(x: torch.Tensor, dim: int | Sequence[int] | None = None) -> torc Reduce either all or specific dimensions to a singleton if it points to the same memory address. - This undoes expand. + This undoes `torch.expand`. Parameters ---------- @@ -86,7 +86,7 @@ def reduce_view(x: torch.Tensor, dim: int | Sequence[int] | None = None) -> torc input tensor dim only reduce expanded dimensions in the specified dimensions. - If None, reduce all expanded dimensions. + If `None`, reduce all expanded dimensions. """ if dim is None: dim_: Sequence[int] = range(x.ndim) @@ -105,7 +105,7 @@ def reduce_view(x: torch.Tensor, dim: int | Sequence[int] | None = None) -> torc @lru_cache def _reshape_idx(old_shape: tuple[int, ...], new_shape: tuple[int, ...], old_stride: tuple[int, ...]) -> list[slice]: - """Get reshape reduce index (Cached helper function for reshape_broadcasted). + """Get reshape reduce index (Cached helper function for `reshape_broadcasted`). This function tries to group axes from new_shape and old_shape into the smallest groups that have the same number of elements, starting from the right. @@ -113,7 +113,7 @@ def _reshape_idx(old_shape: tuple[int, ...], new_shape: tuple[int, ...], old_str Example: old_shape = (30, 2, 2, 3) - new_shape = (6, 5, 4, 3) + new_shape = `(6, 5, 4, 3)` Will results in the groups (starting from the right): - old: 3 new: 3 - old: 2, 2 new: 4 @@ -121,6 +121,7 @@ def _reshape_idx(old_shape: tuple[int, ...], new_shape: tuple[int, ...], old_str Only the "old" groups are important. If all axes that are grouped together in an "old" group are stride 0 (=broadcasted) we can collapse them to singleton dimensions. + This function returns the indexer that either collapses dimensions to singleton or keeps all elements, i.e. the slices in the returned list are all either slice(1) or slice(None). """ @@ -160,7 +161,7 @@ def reshape_broadcasted(tensor: torch.Tensor, *shape: int) -> torch.Tensor: tensor The input tensor to reshape. shape - The target shape for the tensor. One of the values can be `-1` and its size will be inferred. + The target shape for the tensor. One of the values can be ``-1`` and its size will be inferred. Returns ------- diff --git a/src/mrpro/utils/slice_profiles.py b/src/mrpro/utils/slice_profiles.py index 8eaf95311..44e9d761a 100644 --- a/src/mrpro/utils/slice_profiles.py +++ b/src/mrpro/utils/slice_profiles.py @@ -16,11 +16,11 @@ class SliceProfileBase(abc.ABC, torch.nn.Module): @abc.abstractmethod def forward(self, x: Tensor) -> Tensor: - """Evaluate the slice profile at a position x.""" + """Evaluate the slice profile at a position.""" raise NotImplementedError def random_sample(self, size: Sequence[int]) -> Tensor: - """Sample n random positions from the profile. + """Sample `n` random positions from the profile. Use the profile as a probability density function to sample positions. diff --git a/src/mrpro/utils/smap.py b/src/mrpro/utils/smap.py index b05e4921f..68016029e 100644 --- a/src/mrpro/utils/smap.py +++ b/src/mrpro/utils/smap.py @@ -1,4 +1,4 @@ -"""Smap utility function.""" +"""Serial mapping function.""" from collections.abc import Callable, Sequence @@ -13,13 +13,13 @@ def smap( """Apply a function to a tensor serially along multiple dimensions. The function is applied serially without a batch dimensions. - Compared to torch.vmap, it works with arbitrary functions, but is slower. + Compared to `torch.vmap`, it works with arbitrary functions, but is slower. Parameters ---------- function Function to apply to the tensor. - Should handle len(fun_dims) dimensions and not change the number of dimensions. + Should handle ``len(fun_dims)`` dimensions and not change the number of dimensions. tensor Tensor to apply the function to. passed_dimensions diff --git a/src/mrpro/utils/split_idx.py b/src/mrpro/utils/split_idx.py index 5d2dd535c..edec8c7c9 100644 --- a/src/mrpro/utils/split_idx.py +++ b/src/mrpro/utils/split_idx.py @@ -33,9 +33,9 @@ def split_idx(idx: torch.Tensor, np_per_block: int, np_overlap: int = 0, cyclic: Raises ------ - ValueError + `ValueError` If the provided idx is not 1D - ValueError + `ValueError` If the overlap is smaller than the number of points per block """ # Make sure idx is 1D diff --git a/src/mrpro/utils/typing.py b/src/mrpro/utils/typing.py index 96ad34d07..9b2b26b5b 100644 --- a/src/mrpro/utils/typing.py +++ b/src/mrpro/utils/typing.py @@ -2,7 +2,7 @@ from typing import TYPE_CHECKING, TypeAlias -from typing_extensions import Any +from typing_extensions import Any, Protocol, TypeVar if TYPE_CHECKING: from types import EllipsisType @@ -10,7 +10,7 @@ import torch from numpy import ndarray - from torch._C import _NestedSequence as NestedSequence + from numpy._typing import _NestedSequence as NestedSequence from typing_extensions import SupportsIndex # This matches the torch.Tensor indexer typehint @@ -25,7 +25,14 @@ else: TorchIndexerType: TypeAlias = Any - NestedSequence: TypeAlias = Any + """Torch indexer type.""" + + class NestedSequence(Protocol[TypeVar('T')]): + """A nested sequence type.""" + + ... + NumpyIndexerType: TypeAlias = Any + """Numpy indexer type.""" __all__ = ['NestedSequence', 'NumpyIndexerType', 'TorchIndexerType'] diff --git a/src/mrpro/utils/unit_conversion.py b/src/mrpro/utils/unit_conversion.py index 5a1b5aaae..f3d4b84c6 100644 --- a/src/mrpro/utils/unit_conversion.py +++ b/src/mrpro/utils/unit_conversion.py @@ -18,7 +18,7 @@ ] GYROMAGNETIC_RATIO_PROTON = 42.58 * 1e6 -r"""The gyromagnetic ratio :math:`\frac{\gamma}{2\pi}` of 1H in H20 in Hz/T""" +r"""The gyromagnetic ratio :math:`\frac{\gamma}{2\pi}` of 1H in H2O in Hz/T""" # Conversion functions for units T = TypeVar('T', float, torch.Tensor) diff --git a/src/mrpro/utils/zero_pad_or_crop.py b/src/mrpro/utils/zero_pad_or_crop.py index 23fb39599..d46bdd954 100644 --- a/src/mrpro/utils/zero_pad_or_crop.py +++ b/src/mrpro/utils/zero_pad_or_crop.py @@ -19,8 +19,8 @@ def normalize_index(ndim: int, index: int) -> int: Raises ------ - IndexError - if index is outside [-ndim,ndim) + `IndexError` + if index is outside ``[-ndim,ndim)`` """ if 0 < index < ndim: return index @@ -43,8 +43,9 @@ def zero_pad_or_crop( data new_shape desired shape of data - dim: - dimensions the new_shape corresponds to. None (default) is interpreted as last len(new_shape) dimensions. + dim + dimensions the `new_shape` corresponds to. + `None` (default) is interpreted as last ``len(new_shape)`` dimensions. Returns ------- diff --git a/tests/data/test_kheader.py b/tests/data/test_kheader.py index 55cbfdf67..b41be16c2 100644 --- a/tests/data/test_kheader.py +++ b/tests/data/test_kheader.py @@ -27,7 +27,7 @@ def test_kheader_set_missing_defaults(random_mandatory_ismrmrd_header, random_ac def test_kheader_verify_None(random_mandatory_ismrmrd_header, random_acq_info): - """Correct handling of None and missing values in KHeader creation.""" + """Correct handling of `None` and missing values in `KHeader` creation.""" tr_default = None fa_default = torch.as_tensor([0.1]) defaults = {'trajectory': DummyTrajectory(), 'tr': tr_default, 'fa': fa_default} diff --git a/tests/helper.py b/tests/helper.py index 7e11826a7..2092b73ee 100644 --- a/tests/helper.py +++ b/tests/helper.py @@ -59,9 +59,9 @@ def dotproduct_adjointness_test( Raises ------ - AssertionError + `AssertionError` if the adjointness property does not hold - AssertionError + `AssertionError` if the shape of operator(u) and v does not match if the shape of u and operator.H(v) does not match