From e77b00dce854b51d0ced8e58d11eefec388c4952 Mon Sep 17 00:00:00 2001 From: Ivo Vellekoop Date: Fri, 4 Oct 2024 22:32:49 +0200 Subject: [PATCH] grammar and typo fixes --- .readthedocs.yaml | 2 +- README.md | 227 +++++++++++++++++++------- STYLEGUIDE.md | 8 +- docs/source/conf.py | 9 +- docs/source/core.rst | 4 +- docs/source/development.rst | 4 +- docs/source/readme.rst | 4 +- docs/source/slms.rst | 2 +- examples/slm_demo.py | 2 +- openwfs/algorithms/basic_fourier.py | 13 +- openwfs/algorithms/dual_reference.py | 37 +++-- openwfs/algorithms/genetic.py | 2 +- openwfs/algorithms/troubleshoot.py | 17 +- openwfs/algorithms/utilities.py | 24 +-- openwfs/core.py | 27 +-- openwfs/devices/camera.py | 2 +- openwfs/devices/galvo_scanner.py | 8 +- openwfs/devices/slm/geometry.py | 2 +- openwfs/devices/slm/patch.py | 2 +- openwfs/devices/slm/shaders.py | 2 +- openwfs/devices/slm/slm.py | 4 +- openwfs/plot_utilities.py | 8 +- openwfs/simulation/microscope.py | 4 +- openwfs/simulation/slm.py | 2 +- openwfs/simulation/transmission.py | 4 +- openwfs/utilities/patterns.py | 4 +- openwfs/utilities/utilities.py | 2 +- tests/test_algorithms_troubleshoot.py | 4 +- tests/test_scanning_microscope.py | 28 +--- tests/test_simulation.py | 2 +- 30 files changed, 278 insertions(+), 182 deletions(-) diff --git a/.readthedocs.yaml b/.readthedocs.yaml index 97bb35f..d0d32a3 100644 --- a/.readthedocs.yaml +++ b/.readthedocs.yaml @@ -1,4 +1,4 @@ -version: "2" +version: 2 build: os: "ubuntu-22.04" diff --git a/README.md b/README.md index d7a4e36..4109a04 100644 --- a/README.md +++ b/README.md @@ -4,34 +4,80 @@ # What is wavefront shaping? -Wavefront shaping (WFS) is a technique for controlling the propagation of light in arbitrarily complex structures, including strongly scattering materials [[1](#id76)]. In WFS, a spatial light modulator (SLM) is used to shape the phase and/or amplitude of the incident light. With a properly constructed wavefront, light can be made to focus through [[2](#id57)], or inside [[3](#id46)] scattering materials; or light can be shaped to have other desired properties, such as optimal sensitivity for specific measurements [[4](#id47)], specialized point-spread functions [[5](#id33)], spectral filtering [[6](#id69)],, or for functions like optical trapping [[7](#id36)]. - -It stands out that an important driving force in WFS is the development of new algorithms, for example to account for sample movement [[8](#id35)], experimental conditions [[9](#id64)], to be optimally resilient to noise [[10](#id34)], or to use digital twin models to compute the required correction patterns [[11](#id56), [12](#id55), [13](#id72), [14](#id38)]. Much progress has been made towards developing fast and noise-resilient algorithms, or algorithms designed for specific towards the methodology of wavefront shaping, such as using algorithms based on Hadamard patterns, or Fourier-based approaches [[15](#id51)]. Fast techniques that enable wavefront shaping in dynamic samples [[16](#id59), [17](#id60)], and many potential applications have been developed and prototyped, including endoscopy [[12](#id55)], optical trapping [[18](#id61)], Raman scattering, [[19](#id45)], and deep-tissue imaging [[20](#id62)]. Applications extend beyond that of microscope imaging such as optimizing photoelectrochemical absorption [[21](#id63)] and tuning random lasers [[22](#id68)]. - -With the development of these advanced algorithms, however, the complexity of WFS software is steadily increasing as the field matures, which hinders cooperation as well as end-user adoption. Code for controlling wavefront shaping tends to be complex and setup-specific, and developing this code typically requires detailed technical knowledge and low-level programming. A recent c++ based contribution [[23](#id67)], highlights the growing need for software based tools that enable use and development. Moreover, since many labs use their own in-house programs to control the experiments, sharing and re-using code between different research groups is troublesome. +Wavefront shaping (WFS) is a technique for controlling the propagation of light in arbitrarily complex structures, +including strongly scattering materials [[1](#id76)]. In WFS, a spatial light modulator (SLM) is used to shape the phase +and/or amplitude of the incident light. With a properly constructed wavefront, light can be made to focus +through [[2](#id57)], or inside [[3](#id46)] scattering materials; or light can be shaped to have other desired +properties, such as optimal sensitivity for specific measurements [[4](#id47)], specialized point-spread +functions [[5](#id33)], spectral filtering [[6](#id69)],, or for functions like optical trapping [[7](#id36)]. + +It stands out that an important driving force in WFS is the development of new algorithms, for example to account for +sample movement [[8](#id35)], experimental conditions [[9](#id64)], to be optimally resilient to noise [[10](#id34)], or +to use digital twin models to compute the required correction +patterns [[11](#id56), [12](#id55), [13](#id72), [14](#id38)]. Much progress has been made towards developing fast and +noise-resilient algorithms, or algorithms designed for specific towards the methodology of wavefront shaping, such as +using algorithms based on Hadamard patterns, or Fourier-based approaches [[15](#id51)]. Fast techniques that enable +wavefront shaping in dynamic samples [[16](#id59), [17](#id60)], and many potential applications have been developed and +prototyped, including endoscopy [[12](#id55)], optical trapping [[18](#id61)], Raman scattering, [[19](#id45)], and +deep-tissue imaging [[20](#id62)]. Applications extend beyond that of microscope imaging such as optimizing +photoelectrochemical absorption [[21](#id63)] and tuning random lasers [[22](#id68)]. + +With the development of these advanced algorithms, however, the complexity of WFS software is steadily increasing as the +field matures, which hinders cooperation as well as end-user adoption. Code for controlling wavefront shaping tends to +be complex and setup-specific, and developing this code typically requires detailed technical knowledge and low-level +programming. A recent c++ based contribution [[23](#id67)], highlights the growing need for software based tools that +enable use and development. Moreover, since many labs use their own in-house programs to control the experiments, +sharing and re-using code between different research groups is troublesome. # What is OpenWFS? -OpenWFS is a Python package for performing and for simulating wavefront shaping experiments. It aims to accelerate wavefront shaping research by providing: - -* **Hardware control**. Modular code for controlling spatial light modulators, cameras, and other hardware typically encountered in wavefront shaping experiments. Highlights include: - > * **Spatial light modulator**. The `SLM` object provides a versatile way to control spatial light modulators, allowing for software lookup tables, synchronization, texture warping, and multi-texture functionality accelerated by OpenGL. - > * **Scanning microscope**. The `ScanningMicroscope` object uses a National Instruments data acquisition card to control a laser-scanning microscope. - > * **GenICam cameras**. The `Camera` object uses the harvesters backend [[24](#id39)] to access any camera supporting the GenICam standard [[25](#id42)]. - > * **Automatic synchronization**. OpenWFS provides tools for automatic synchronization of actuators (e. g. an SLM) and detectors (e. g. a camera). The automatic synchronization makes it trivial to perform pipelined measurements that avoid the delay normally caused by the latency of the video card and SLM. -* **Wavefront shaping algorithms**. A (growing) collection of wavefront shaping algorithms. OpenWFS abstracts the hardware control, synchronization, and signal processing so that the user can focus on the algorithm itself. As a result, most algorithms can be implemented cleanly without hardware-specific programming. -* **Simulation**. OpenWFS provides an extensive framework for testing and simulating wavefront shaping algorithms, including the effect of measurement noise, stage drift, and user-defined aberrations. This allows for rapid prototyping and testing of new algorithms, without the need for physical hardware. -* **Platform for exchange and joint collaboration**. OpenWFS can be used as a platform for sharing and exchanging wavefront shaping algorithms. The package is designed to be modular and easy to expand, and it is our hope that the community will contribute to the package by adding new algorithms, hardware control modules, and simulation tools. Python was specifically chosen for this purpose for its active community, high level of abstraction and the ease of sharing tools. Further expansion of the supported hardware is of high priority, especially wrapping c-based software support with tools like ctypes and the Micro-Manager based device adapters. -* **Platform for simplifying use of wavefront shaping**. OpenWFS is compatible with the recently developed PyDevice [], and can therefore be controlled from Micro-Manager [[26](#id65)], a commonly used microscopy control platform. -* **Automated troubleshooting**. OpenWFS provides tools for automated troubleshooting of wavefront shaping experiments. This includes tools for measuring the performance of wavefront shaping algorithms, and for identifying common problems such as incorrect SLM calibration, drift, measurement noise, and other experimental imperfections. +OpenWFS is a Python package for performing and for simulating wavefront shaping experiments. It aims to accelerate +wavefront shaping research by providing: + +* **Hardware control**. Modular code for controlling spatial light modulators, cameras, and other hardware typically + encountered in wavefront shaping experiments. Highlights include: + > * **Spatial light modulator**. The `SLM` object provides a versatile way to control spatial light modulators, + allowing for software lookup tables, synchronization, texture warping, and multi-texture functionality accelerated + by OpenGL. + > * **Scanning microscope**. The `ScanningMicroscope` object uses a National Instruments data acquisition card to + control a laser-scanning microscope. + > * **GenICam cameras**. The `Camera` object uses the harvesters backend [[24](#id39)] to access any camera supporting + the GenICam standard [[25](#id42)]. + > * **Automatic synchronization**. OpenWFS provides tools for automatic synchronization of actuators (e.g. an SLM) and + detectors (e.g. a camera). The automatic synchronization makes it trivial to perform pipelined measurements that + avoid the delay normally caused by the latency of the video card and SLM. +* **Wavefront shaping algorithms**. A (growing) collection of wavefront shaping algorithms. OpenWFS abstracts the + hardware control, synchronization, and signal processing so that the user can focus on the algorithm itself. As a + result, most algorithms can be implemented cleanly without hardware-specific programming. +* **Simulation**. OpenWFS provides an extensive framework for testing and simulating wavefront shaping algorithms, + including the effect of measurement noise, stage drift, and user-defined aberrations. This allows for rapid + prototyping and testing of new algorithms, without the need for physical hardware. +* **Platform for exchange and joint collaboration**. OpenWFS can be used as a platform for sharing and exchanging + wavefront shaping algorithms. The package is designed to be modular and easy to expand, and it is our hope that the + community will contribute to the package by adding new algorithms, hardware control modules, and simulation tools. + Python was specifically chosen for this purpose for its active community, high level of abstraction and the ease of + sharing tools. Further expansion of the supported hardware is of high priority, especially wrapping c-based software + support with tools like ctypes and the Micro-Manager based device adapters. +* **Platform for simplifying use of wavefront shaping**. OpenWFS is compatible with the recently developed PyDevice [], + and can therefore be controlled from Micro-Manager [[26](#id65)], a commonly used microscopy control platform. +* **Automated troubleshooting**. OpenWFS provides tools for automated troubleshooting of wavefront shaping experiments. + This includes tools for measuring the performance of wavefront shaping algorithms, and for identifying common problems + such as incorrect SLM calibration, drift, measurement noise, and other experimental imperfections. # Getting started -OpenWFS is available on the PyPI repository, and it can be installed with the command `pip install openwfs`. The latest documentation and the example code can be found on the [Read the Docs](https://openwfs.readthedocs.io/en/latest/) website [[27](#id66)]. To use OpenWFS, you need to have Python 3.9 or later installed. At the time of writing, OpenWFS is tested up to Python version 3.11 (not all dependencies were available for Python 3.12 yet). OpenWFS is developed and tested on Windows 11 and Manjaro Linux. +OpenWFS is available on the PyPI repository, and it can be installed with the command `pip install openwfs`. The latest +documentation and the example code can be found on the [Read the Docs](https://openwfs.readthedocs.io/en/latest/) +website [[27](#id66)]. To use OpenWFS, you need to have Python 3.9 or later installed. At the time of writing, OpenWFS +is tested up to Python version 3.11 (not all dependencies were available for Python 3.12 yet). OpenWFS is developed and +tested on Windows 11 and Manjaro Linux. -[Listing 3.1](#hello-wfs) shows an example of how to use OpenWFS to run a simple wavefront shaping experiment. This example illustrates several of the main concepts of OpenWFS. First, the code initializes objects to control a spatial light modulator (SLM) connected to a video port, and a camera that provides feedback to the wavefront shaping algorithm. +[Listing 3.1](#hello-wfs) shows an example of how to use OpenWFS to run a simple wavefront shaping experiment. This +example illustrates several of the main concepts of OpenWFS. First, the code initializes objects to control a spatial +light modulator (SLM) connected to a video port, and a camera that provides feedback to the wavefront shaping algorithm. + ```python """ Hello wavefront shaping @@ -68,34 +114,65 @@ after = feedback.read() print(f"Intensity in the target increased from {before} to {after}") ``` -This example uses the StepwiseSequential wavefront shaping algorithm [[28](#id58)]. The algorithm needs access to the SLM for controlling the wavefront. This feedback is obtained from a `SingleRoi` object, which takes images from the camera, and averages them over the specified circular region of interest. The algorithm returns the measured transmission matrix in the field results.t, which is used to compute the optimal phase pattern to compensate the aberrations. Finally, the code measures the intensity at the detector before and after applying the optimized phase pattern. +This example uses the StepwiseSequential wavefront shaping algorithm [[28](#id58)]. The algorithm needs access to the +SLM for controlling the wavefront. This feedback is obtained from a `SingleRoi` object, which takes images from the +camera, and averages them over the specified circular region of interest. The algorithm returns the measured +transmission matrix in the field results.t, which is used to compute the optimal phase pattern to compensate the +aberrations. Finally, the code measures the intensity at the detector before and after applying the optimized phase +pattern. -This code illustrates how OpenWFS separates the concerns of the hardware control (SLM and Camera), signal processing (SingleROIProcessor) and the algorithm itself (StepwiseSequential). A large variety of wavefront shaping experiments can be performed by using different types of feedback signals (such as optimizing multiple foci simultaneously using a `MultiRoiProcessor` object), using different algorithms, or different image sources, such as a `ScanningMicroscope`. Notably, these objects can be replaced by *mock* objects, that simulate the hardware and allow for rapid prototyping and testing of new algorithms without direct access to wavefront shaping hardware (see `section-simulations`). +This code illustrates how OpenWFS separates the concerns of the hardware control (SLM and Camera), signal processing ( +SingleROIProcessor) and the algorithm itself (StepwiseSequential). A large variety of wavefront shaping experiments can +be performed by using different types of feedback signals (such as optimizing multiple foci simultaneously using +a `MultiRoiProcessor` object), using different algorithms, or different image sources, such as a `ScanningMicroscope`. +Notably, these objects can be replaced by *mock* objects, that simulate the hardware and allow for rapid prototyping and +testing of new algorithms without direct access to wavefront shaping hardware (see `section-simulations`). # Analysis and troubleshooting -The principles of wavefront shaping are well established, and under close-to-ideal experimental conditions, it is possible to accurately predict the signal enhancement. In practice, however, there exist many practical issues that can negatively affect the outcome of the experiment. OpenWFS has built-in functions to analyze and troubleshoot the measurements from a wavefront shaping experiment. +The principles of wavefront shaping are well established, and under close-to-ideal experimental conditions, it is +possible to accurately predict the signal enhancement. In practice, however, there exist many practical issues that can +negatively affect the outcome of the experiment. OpenWFS has built-in functions to analyze and troubleshoot the +measurements from a wavefront shaping experiment. -The `result` structure in [Listing 3.1](#hello-wfs), as returned by the wavefront shaping algorithm, was computed with the utility function `analyze_phase_stepping()`. This function extracts the transmission matrix from phase stepping measurements, and additionally computes a series of troubleshooting statistics in the form of a *fidelity*, which is a number that ranges from 0 (no sensible measurement possible) to 1 (perfect situation, optimal focus expected). These fidelities are: +The `result` structure in [Listing 3.1](#hello-wfs), as returned by the wavefront shaping algorithm, was computed with +the utility function `analyze_phase_stepping()`. This function extracts the transmission matrix from phase stepping +measurements, and additionally computes a series of troubleshooting statistics in the form of a *fidelity*, which is a +number that ranges from 0 (no sensible measurement possible) to 1 (perfect situation, optimal focus expected). These +fidelities are: * `fidelity_noise`: The fidelity reduction due to noise in the measurements. * `fidelity_amplitude`: The fidelity reduction due to unequal illumination of the SLM. * `fidelity_calibration`: The fidelity reduction due to imperfect phase response of the SLM. -If these fidelities are much lower than 1, this indicates a problem in the experiment, or a bug in the wavefront shaping experiment. For a comprehensive overview of the practical considerations in wavefront shaping and their effects on the fidelity, please see [[29](#id31)]. +If these fidelities are much lower than 1, this indicates a problem in the experiment, or a bug in the wavefront shaping +experiment. For a comprehensive overview of the practical considerations in wavefront shaping and their effects on the +fidelity, please see [[29](#id31)]. Further troubleshooting can be performed with the `troubleshoot()` function, which estimates the following fidelities: -* `fidelity_non_modulated`: The fidelity reduction due to non-modulated light., e. g. due to reflection from the front surface of the SLM. +* `fidelity_non_modulated`: The fidelity reduction due to non-modulated light., e.g. due to reflection from the front + surface of the SLM. * `fidelity_decorrelation`: The fidelity reduction due to decorrelation of the field during the measurement. -All fidelity estimations are combined to make an order of magnitude estimation of the expected enhancement. `troubleshoot()` returns a `WFSTroubleshootResult` object containing the outcome of the different tests and analyses, which can be printed to the console as a comprehensive troubleshooting report with the method `report()`. See `examples/troubleshooter_demo.py` for an example of how to use the automatic troubleshooter. +All fidelity estimations are combined to make an order of magnitude estimation of the expected +enhancement. `troubleshoot()` returns a `WFSTroubleshootResult` object containing the outcome of the different tests and +analyses, which can be printed to the console as a comprehensive troubleshooting report with the method `report()`. +See `examples/troubleshooter_demo.py` for an example of how to use the automatic troubleshooter. -Lastly, the `troubleshoot()` function computes several image frame metrics such as the *unbiased contrast to noise ratio* and *unbiased contrast enhancement*. These metrics are especially useful for scenarios where the contrast is expected to improve due to wavefront shaping, such as in multi-photon excitation fluorescence (multi-PEF) microscopy. Furthermore, `troubleshoot()` tests the image capturing repeatability and runs a stability test by capturing and comparing many frames over a longer period of time. +Lastly, the `troubleshoot()` function computes several image frame metrics such as the *unbiased contrast to noise +ratio* and *unbiased contrast enhancement*. These metrics are especially useful for scenarios where the contrast is +expected to improve due to wavefront shaping, such as in multi-photon excitation fluorescence (multi-PEF) microscopy. +Furthermore, `troubleshoot()` tests the image capturing repeatability and runs a stability test by capturing and +comparing many frames over a longer period of time. # Acknowledgements -We would like to thank Gerwin Osnabrugge, Bahareh Mastiani, Giulia Sereni, Siebe Meijer, Gijs Hannink, Merle van Gorsel, Michele Gintoli, Karina van Beek, Abhilash Thendiyammal, Lyuba Amitonova, and Tzu-Lun Wang for their contributions to earlier revisions of our wavefront shaping code. This work was supported by the European Research Council under the European Union’s Horizon 2020 Programme / ERC Grant Agreement n° [678919], and the Dutch Research Council (NWO) through Vidi grant number 14879. +We would like to thank Gerwin Osnabrugge, Bahareh Mastiani, Giulia Sereni, Siebe Meijer, Gijs Hannink, Merle van Gorsel, +Michele Gintoli, Karina van Beek, Abhilash Thendiyammal, Lyuba Amitonova, and Tzu-Lun Wang for their contributions to +earlier revisions of our wavefront shaping code. This work was supported by the European Research Council under the +European Union’s Horizon 2020 Programme / ERC Grant Agreement n° [678919], and the Dutch Research Council (NWO) through +Vidi grant number 14879. # Conflict of interest statement @@ -103,107 +180,142 @@ The authors declare no conflict of interest. 1 -Joel Kubby, Sylvain Gigan, and Meng Cui, editors. *Wavefront Shaping for Biomedical Imaging*. Advances in Microscopy and Microanalysis. Cambridge University Press, 2019. [doi:10.1017/9781316403938](https://doi.org/10.1017/9781316403938). +Joel Kubby, Sylvain Gigan, and Meng Cui, editors. *Wavefront Shaping for Biomedical Imaging*. Advances in Microscopy and +Microanalysis. Cambridge University Press, 2019. [doi:10.1017/9781316403938](https://doi.org/10.1017/9781316403938). 2 -Ivo M. Vellekoop and A. P. Mosk. Focusing coherent light through opaque strongly scattering media. *Opt. Lett.*, 32(16):2309–2311, Aug 2007. [doi:10.1364/OL.32.002309](https://doi.org/10.1364/OL.32.002309). +Ivo M. Vellekoop and A. P. Mosk. Focusing coherent light through opaque strongly scattering media. *Opt. Lett.*, 32(16): +2309–2311, Aug 2007. [doi:10.1364/OL.32.002309](https://doi.org/10.1364/OL.32.002309). 3 -Ivo M. Vellekoop, EG Van Putten, A Lagendijk, and AP Mosk. Demixing light paths inside disordered metamaterials. *Optics express*, 16(1):67–80, 2008. +Ivo M. Vellekoop, EG Van Putten, A Lagendijk, and AP Mosk. Demixing light paths inside disordered metamaterials. *Optics +express*, 16(1):67–80, 2008. 4 -Dorian Bouchet, Stefan Rotter, and Allard P Mosk. Maximum information states for coherent scattering measurements. *Nature Physics*, 17(5):564–568, 2021. +Dorian Bouchet, Stefan Rotter, and Allard P Mosk. Maximum information states for coherent scattering measurements. +*Nature Physics*, 17(5):564–568, 2021. 5 -Antoine Boniface et al. Transmission-matrix-based point-spread-function engineering through a complex medium. *Optica*, 4(1):54–59, 2017. +Antoine Boniface et al. Transmission-matrix-based point-spread-function engineering through a complex medium. *Optica*, +4(1):54–59, 2017. 6 -Jung-Hoon Park, ChungHyun Park, YongKeun Park, Hyunseung Yu, and Yong-Hoon Cho. Active spectral filtering through turbid media. *Optics Letters, Vol. 37, Issue 15, pp. 3261-3263*, 37:3261–3263, 8 2012. URL: [https://opg.optica.org/viewmedia.cfm?uri=ol-37-15-3261&seq=0&html=true https://opg.optica.org/abstract.cfm?uri=ol-37-15-3261 https://opg.optica.org/ol/abstract.cfm?uri=ol-37-15-3261](https://opg.optica.org/viewmedia.cfm?uri=ol-37-15-3261&seq=0&html=true https://opg.optica.org/abstract.cfm?uri=ol-37-15-3261 https://opg.optica.org/ol/abstract.cfm?uri=ol-37-15-3261), [doi:10.1364/OL.37.003261](https://doi.org/10.1364/OL.37.003261). +Jung-Hoon Park, ChungHyun Park, YongKeun Park, Hyunseung Yu, and Yong-Hoon Cho. Active spectral filtering through turbid +media. *Optics Letters, Vol. 37, Issue 15, pp. 3261-3263*, 37:3261–3263, 8 2012. +URL: [https://opg.optica.org/viewmedia.cfm?uri=ol-37-15-3261&seq=0&html=true https://opg.optica.org/abstract.cfm?uri=ol-37-15-3261 https://opg.optica.org/ol/abstract.cfm?uri=ol-37-15-3261](https://opg.optica.org/viewmedia.cfm?uri=ol-37-15-3261&seq=0&html=true https://opg.optica.org/abstract.cfm?uri=ol-37-15-3261 https://opg.optica.org/ol/abstract.cfm?uri=ol-37-15-3261), [doi:10.1364/OL.37.003261](https://doi.org/10.1364/OL.37.003261). 7 -Tomáš Čižmár, Michael Mazilu, and Kishan Dholakia. In situ wavefront correction and its application to micromanipulation. *Nature Photonics*, 4(6):388–394, 2010. +Tomáš Čižmár, Michael Mazilu, and Kishan Dholakia. In situ wavefront correction and its application to +micromanipulation. *Nature Photonics*, 4(6):388–394, 2010. 8 -Lorenzo Valzania and Sylvain Gigan. Online learning of the transmission matrix of dynamic scattering media. *Optica*, 10(6):708–716, 2023. +Lorenzo Valzania and Sylvain Gigan. Online learning of the transmission matrix of dynamic scattering media. *Optica*, +10(6):708–716, 2023. 9 -Benjamin R. Anderson, Ray Gunawidjaja, and Hergen Eilers. Effect of experimental parameters on optimal reflection of light from opaque media. *Physical Review A*, 93:013813, 1 2016. URL: [https://journals.aps.org/pra/abstract/10.1103/PhysRevA.93.013813](https://journals.aps.org/pra/abstract/10.1103/PhysRevA.93.013813), [doi:10.1103/PHYSREVA.93.013813/FIGURES/12/MEDIUM](https://doi.org/10.1103/PHYSREVA.93.013813/FIGURES/12/MEDIUM). +Benjamin R. Anderson, Ray Gunawidjaja, and Hergen Eilers. Effect of experimental parameters on optimal reflection of +light from opaque media. *Physical Review A*, 93:013813, 1 2016. +URL: [https://journals.aps.org/pra/abstract/10.1103/PhysRevA.93.013813](https://journals.aps.org/pra/abstract/10.1103/PhysRevA.93.013813), [doi:10.1103/PHYSREVA.93.013813/FIGURES/12/MEDIUM](https://doi.org/10.1103/PHYSREVA.93.013813/FIGURES/12/MEDIUM). 10 -Bahareh Mastiani and Ivo M Vellekoop. Noise-tolerant wavefront shaping in a hadamard basis. *Optics express*, 29(11):17534–17541, 2021. +Bahareh Mastiani and Ivo M Vellekoop. Noise-tolerant wavefront shaping in a hadamard basis. *Optics express*, 29(11): +17534–17541, 2021. 11 -PS Salter, M Baum, I Alexeev, M Schmidt, and MJ Booth. Exploring the depth range for three-dimensional laser machining with aberration correction. *Optics express*, 22(15):17644–17656, 2014. +PS Salter, M Baum, I Alexeev, M Schmidt, and MJ Booth. Exploring the depth range for three-dimensional laser machining +with aberration correction. *Optics express*, 22(15):17644–17656, 2014. 12 -Martin Plöschner, Tomáš Tyc, and Tomáš Čižmár. Seeing through chaos in multimode fibres. *Nature Photonics*, 9(8):529–535, 2015. +Martin Plöschner, Tomáš Tyc, and Tomáš Čižmár. Seeing through chaos in multimode fibres. *Nature Photonics*, 9(8): +529–535, 2015. 13 -Abhilash Thendiyammal, Gerwin Osnabrugge, Tom Knop, and Ivo M. Vellekoop. Model-based wavefront shaping microscopy. *Opt. Lett.*, 45(18):5101–5104, Sep 2020. [doi:10.1364/OL.400985](https://doi.org/10.1364/OL.400985). +Abhilash Thendiyammal, Gerwin Osnabrugge, Tom Knop, and Ivo M. Vellekoop. Model-based wavefront shaping microscopy. +*Opt. Lett.*, 45(18):5101–5104, Sep 2020. [doi:10.1364/OL.400985](https://doi.org/10.1364/OL.400985). 14 -DWS Cox, T Knop, and Ivo M. Vellekoop. Model-based aberration corrected microscopy inside a glass tube. *arXiv preprint arXiv:2311.13363*, 2023. +DWS Cox, T Knop, and Ivo M. Vellekoop. Model-based aberration corrected microscopy inside a glass tube. *arXiv preprint +arXiv:2311.13363*, 2023. 15 -Bahareh Mastiani, Gerwin Osnabrugge, and Ivo M. Vellekoop. Wavefront shaping for forward scattering. *Optics Express*, 30:37436, 10 2022. [doi:10.1364/oe.470194](https://doi.org/10.1364/oe.470194). +Bahareh Mastiani, Gerwin Osnabrugge, and Ivo M. Vellekoop. Wavefront shaping for forward scattering. *Optics Express*, +30:37436, 10 2022. [doi:10.1364/oe.470194](https://doi.org/10.1364/oe.470194). 16 -Yan Liu et al. Focusing light inside dynamic scattering media with millisecond digital optical phase conjugation. *Optica*, 4(2):280–288, Feb 2017. [doi:10.1364/OPTICA.4.000280](https://doi.org/10.1364/OPTICA.4.000280). +Yan Liu et al. Focusing light inside dynamic scattering media with millisecond digital optical phase conjugation. +*Optica*, 4(2):280–288, Feb 2017. [doi:10.1364/OPTICA.4.000280](https://doi.org/10.1364/OPTICA.4.000280). 17 -Omer Tzang et al. Wavefront shaping in complex media with a 350 khz modulator via a 1d-to-2d transform. *Nature Photonics*, 2019. [doi:10.1038/s41566-019-0503-6](https://doi.org/10.1038/s41566-019-0503-6). +Omer Tzang et al. Wavefront shaping in complex media with a 350 khz modulator via a 1d-to-2d transform. *Nature +Photonics*, 2019. [doi:10.1038/s41566-019-0503-6](https://doi.org/10.1038/s41566-019-0503-6). 18 -Tomáš Čižmár, Michael Mazilu, and Kishan Dholakia. In situ wavefront correction and its application to micromanipulation. *Nature Photonics*, 4:388–394, 05 2010. [doi:10.1038/nphoton.2010.85](https://doi.org/10.1038/nphoton.2010.85). +Tomáš Čižmár, Michael Mazilu, and Kishan Dholakia. In situ wavefront correction and its application to +micromanipulation. *Nature Photonics*, 4:388–394, 05 +2010. [doi:10.1038/nphoton.2010.85](https://doi.org/10.1038/nphoton.2010.85). 19 -Jonathan V. Thompson, Graham A. Throckmorton, Brett H. Hokr, and Vladislav V. Yakovlev. Wavefront shaping enhanced raman scattering in a turbid medium. *Optics letters*, 41:1769, 4 2016. URL: [https://pubmed.ncbi.nlm.nih.gov/27082341/](https://pubmed.ncbi.nlm.nih.gov/27082341/), [doi:10.1364/OL.41.001769](https://doi.org/10.1364/OL.41.001769). +Jonathan V. Thompson, Graham A. Throckmorton, Brett H. Hokr, and Vladislav V. Yakovlev. Wavefront shaping enhanced raman +scattering in a turbid medium. *Optics letters*, 41:1769, 4 2016. +URL: [https://pubmed.ncbi.nlm.nih.gov/27082341/](https://pubmed.ncbi.nlm.nih.gov/27082341/), [doi:10.1364/OL.41.001769](https://doi.org/10.1364/OL.41.001769). 20 -Lina Streich et al. High-resolution structural and functional deep brain imaging using adaptive optics three-photon microscopy. *Nature Methods 2021 18:10*, 18:1253–1258, 9 2021. [doi:10.1038/s41592-021-01257-6](https://doi.org/10.1038/s41592-021-01257-6). +Lina Streich et al. High-resolution structural and functional deep brain imaging using adaptive optics three-photon +microscopy. *Nature Methods 2021 18:10*, 18:1253–1258, 9 +2021. [doi:10.1038/s41592-021-01257-6](https://doi.org/10.1038/s41592-021-01257-6). 21 -Seng Fatt Liew, Sébastien M. Popoff, Stafford W. Sheehan, Arthur Goetschy, Charles A. Schmuttenmaer, A. Douglas Stone, and Hui Cao. Coherent control of photocurrent in a strongly scattering photoelectrochemical system. *ACS Photonics*, 3:449–455, 3 2016. URL: [https://technion-staging.elsevierpure.com/en/publications/coherent-control-of-photocurrent-in-a-strongly-scattering-photoel](https://technion-staging.elsevierpure.com/en/publications/coherent-control-of-photocurrent-in-a-strongly-scattering-photoel), [doi:10.1021/ACSPHOTONICS.5B00642](https://doi.org/10.1021/ACSPHOTONICS.5B00642). +Seng Fatt Liew, Sébastien M. Popoff, Stafford W. Sheehan, Arthur Goetschy, Charles A. Schmuttenmaer, A. Douglas Stone, +and Hui Cao. Coherent control of photocurrent in a strongly scattering photoelectrochemical system. *ACS Photonics*, 3: +449–455, 3 2016. +URL: [https://technion-staging.elsevierpure.com/en/publications/coherent-control-of-photocurrent-in-a-strongly-scattering-photoel](https://technion-staging.elsevierpure.com/en/publications/coherent-control-of-photocurrent-in-a-strongly-scattering-photoel), [doi:10.1021/ACSPHOTONICS.5B00642](https://doi.org/10.1021/ACSPHOTONICS.5B00642). 22 -Nicolas Bachelard, Sylvain Gigan, Xavier Noblin, and Patrick Sebbah. Adaptive pumping for spectral control of random lasers. *Nature Physics*, 10:426–431, 2014. URL: [https://ui.adsabs.harvard.edu/abs/2014NatPh..10..426B/abstract](https://ui.adsabs.harvard.edu/abs/2014NatPh..10..426B/abstract), [doi:10.1038/nphys2939](https://doi.org/10.1038/nphys2939). +Nicolas Bachelard, Sylvain Gigan, Xavier Noblin, and Patrick Sebbah. Adaptive pumping for spectral control of random +lasers. *Nature Physics*, 10:426–431, 2014. +URL: [https://ui.adsabs.harvard.edu/abs/2014NatPh..10..426B/abstract](https://ui.adsabs.harvard.edu/abs/2014NatPh..10..426B/abstract), [doi:10.1038/nphys2939](https://doi.org/10.1038/nphys2939). 23 -Benjamin R. Anderson, Andrew O’Kins, Kostiantyn Makrasnov, Rebecca Udby, Patrick Price, and Hergen Eilers. A modular gui-based program for genetic algorithm-based feedback-assisted wavefront shaping. *Journal of Physics: Photonics*, 6:045008, 8 2024. URL: [https://iopscience.iop.org/article/10.1088/2515-7647/ad6ed3 https://iopscience.iop.org/article/10.1088/2515-7647/ad6ed3/meta](https://iopscience.iop.org/article/10.1088/2515-7647/ad6ed3 https://iopscience.iop.org/article/10.1088/2515-7647/ad6ed3/meta), [doi:10.1088/2515-7647/AD6ED3](https://doi.org/10.1088/2515-7647/AD6ED3). +Benjamin R. Anderson, Andrew O’Kins, Kostiantyn Makrasnov, Rebecca Udby, Patrick Price, and Hergen Eilers. A modular +gui-based program for genetic algorithm-based feedback-assisted wavefront shaping. *Journal of Physics: Photonics*, 6: +045008, 8 2024. +URL: [https://iopscience.iop.org/article/10.1088/2515-7647/ad6ed3 https://iopscience.iop.org/article/10.1088/2515-7647/ad6ed3/meta](https://iopscience.iop.org/article/10.1088/2515-7647/ad6ed3 https://iopscience.iop.org/article/10.1088/2515-7647/ad6ed3/meta), [doi:10.1088/2515-7647/AD6ED3](https://doi.org/10.1088/2515-7647/AD6ED3). 24 -Rod Barman et al. Harvesters. URL: [https://github.com/genicam/harvesters](https://github.com/genicam/harvesters). +Rod Barman et al. Harvesters. URL: [https://github.com/genicam/harvesters](https://github.com/genicam/harvesters). 25 -GenICam - generic interface for cameras. URL: [https://www.emva.org/standards-technology/genicam/](https://www.emva.org/standards-technology/genicam/). +GenICam - generic interface for cameras. +URL: [https://www.emva.org/standards-technology/genicam/](https://www.emva.org/standards-technology/genicam/). 26 -Mark Tsuchida and Sam Griffin. Micro-manager project overview. URL: [https://micro-manager.org/Micro-Manager_Project_Overview](https://micro-manager.org/Micro-Manager_Project_Overview). +Mark Tsuchida and Sam Griffin. Micro-manager project overview. +URL: [https://micro-manager.org/Micro-Manager_Project_Overview](https://micro-manager.org/Micro-Manager_Project_Overview). 27 @@ -211,8 +323,11 @@ OpenWFS documentation. URL: [https://openwfs.readthedocs.io/en/latest/](https:// 28 -Ivo M. Vellekoop and AP Mosk. Phase control algorithms for focusing light through turbid media. *Optics communications*, 281(11):3071–3080, 2008. +Ivo M. Vellekoop and AP Mosk. Phase control algorithms for focusing light through turbid media. *Optics communications*, +281(11):3071–3080, 2008. 29 -Bahareh Mastiani, Daniël W. S. Cox, and Ivo M. Vellekoop. Practical considerations for high-fidelity wavefront shaping experiments. http://arxiv.org/abs/2403.15265, March 2024. [arXiv:2403.15265](https://arxiv.org/abs/2403.15265), [doi:10.48550/arXiv.2403.15265](https://doi.org/10.48550/arXiv.2403.15265). +Bahareh Mastiani, Daniël W. S. Cox, and Ivo M. Vellekoop. Practical considerations for high-fidelity wavefront shaping +experiments. http://arxiv.org/abs/2403.15265, March +2024. [arXiv:2403.15265](https://arxiv.org/abs/2403.15265), [doi:10.48550/arXiv.2403.15265](https://doi.org/10.48550/arXiv.2403.15265). diff --git a/STYLEGUIDE.md b/STYLEGUIDE.md index 62ef254..f5a9eeb 100644 --- a/STYLEGUIDE.md +++ b/STYLEGUIDE.md @@ -6,8 +6,12 @@ # General -- The package `black` is used to ensure correct formatting. Install with `pip install black` and run in the terminal - using `black .` when located at the root of the repository. +- The package `black` is used to ensure correct formatting. + When using PyCharm, just install black through the settings dialog. +- PyCharm warnings and errors should be fixed. Exceptions: + - PEP 8: E501 line too long. May be disabled. This is already checked by black. For docstrings, keeping a string + line limit can be very cumbersome. + - PEP 8:E203 whitespace before ':'. May be disabled. This is already checked by (and conflicts with) black. # Tests diff --git a/docs/source/conf.py b/docs/source/conf.py index 9c927d7..7b7e635 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -49,7 +49,8 @@ \author[1]{Tom~Knop} \author[1,2]{Harish~Sasikumar} \author[1]{Ivo~M.~Vellekoop} - \affil[1]{University of Twente, Biomedical Photonic Imaging, TechMed Institute, P. O. Box 217, 7500 AE Enschede, The Netherlands} + \affil[1]{University of Twente, Biomedical Photonic Imaging, TechMed Institute, P. O. Box 217, + 7500 AE Enschede, The Netherlands} \affil[2]{Imec (Netherlands), Holst Centre (HTC-31), 5656 AE, Eindhoven, The Netherlands} \publishers{% \normalfont\normalsize% @@ -128,11 +129,11 @@ autodoc_mock_imports = ["PyOpenGL", "OpenGL"] -## Hide some classes that are not production ready yet -def skip(app, what, name, obj, skip, options): +# Hide some classes that are not production ready yet +def skip(app, what, name, obj, do_skip, options): if name in ("WFSController", "Gain"): return True - return skip + return do_skip def visit_citation(self, node): diff --git a/docs/source/core.rst b/docs/source/core.rst index 3e585c2..7b9f956 100644 --- a/docs/source/core.rst +++ b/docs/source/core.rst @@ -27,7 +27,7 @@ Detectors in OpenWFS are objects that capture, generate, or process data. All de def coordinates(dimension: int) -> Quantity -The :meth:`~.Detector.read()` method of a detector starts a measurement and returns the captured data. It triggers the detector and blocks until the data is available. Data is always returned as `numpy` array :cite:`numpy`. Subclasses of :class:`~.Detector` typically add properties specific to that detector (e. g. shutter time, gain, etc.). In the simplest case, setting these properties and calling :meth:`.~Detector.read()` is all that is needed to capture data. The :meth:`~.Detector.trigger()` method is used for asynchronous measurements as described below. All other properties and methods are used for metadata and units, as described in :numref:`Units and metadata`. +The :meth:`~.Detector.read()` method of a detector starts a measurement and returns the captured data. It triggers the detector and blocks until the data is available. Data is always returned as `numpy` array :cite:`numpy`. Subclasses of :class:`~.Detector` typically add properties specific to that detector (e.g. shutter time, gain, etc.). In the simplest case, setting these properties and calling :meth:`.~Detector.read()` is all that is needed to capture data. The :meth:`~.Detector.trigger()` method is used for asynchronous measurements as described below. All other properties and methods are used for metadata and units, as described in :numref:`Units and metadata`. The detector object inherits some properties and methods from the base class :class:`~.Device`. These are used by the synchronization mechanism to determine when it is safe to start a measurement, as described in :numref:`device-synchronization`. @@ -88,7 +88,7 @@ OpenWFS consistently uses `astropy.units` :cite:`astropy` for quantities with ph c.shutter_time = 0.01 * u.s # equivalent to the previous line c.shutter_time = 10 # raises an error, since the unit is missing -In addition, OpenWFS allows attaching pixel-size metadata to data arrays using the functions :func:`~.set_pixel_size()`. Pixel sizes can represent a physical length (e. g. as in the size pixels on an image sensor), or other units such as time (e. g. as the sampling period in a time series). OpenWFS fully supports anisotropic pixels, where the pixel sizes in the x and y directions are different. +In addition, OpenWFS allows attaching pixel-size metadata to data arrays using the functions :func:`~.set_pixel_size()`. Pixel sizes can represent a physical length (e.g. as in the size pixels on an image sensor), or other units such as time (e.g. as the sampling period in a time series). OpenWFS fully supports anisotropic pixels, where the pixel sizes in the x and y directions are different. The data arrays returned by the :meth:`~.Detector.read()` function of a detector have `pixel_size` metadata attached whenever appropriate. The pixel size can be retrieved from the array using :func:`~.get_pixel_size()`, or obtained from the :attr:`~.Detector.pixel_size` attribute directly. As an alternative to accessing the pixel size directly, :func:`~get_extent()` and :class:`~.Detector.extent` provide access to the extent of the array, which is always equal to the pixel size times the shape of the array. Finally, the convenience function :meth:`~.Detector.coordinates` returns a vector of coordinates with appropriate units along a specified dimension of the array. diff --git a/docs/source/development.rst b/docs/source/development.rst index 7b57fcb..69115c3 100644 --- a/docs/source/development.rst +++ b/docs/source/development.rst @@ -70,11 +70,11 @@ If the detector is created with the flag ``multi_threaded = True``, then `_fetch Implementing a processor ++++++++++++++++++++++++++++++++++ -To implement a data processing step that dynamically processes date from one or more input detectors, implement a custom processor. This is done by deriving from the `Processor` base class and implementing the `__init__` function. This function should pass a list of all upstream nodes, i. e. all detectors which provide the input signals to the processor, the base class constructor. In addition, the :meth"`~Detector._fetch()` method should be implemented to process the data. The framework will wait until the data from all sources is available, and calls `_fetch()` with this data as input. See the implementation of :class:`~.Shutter` or any other processor for an example of how to implement this function. +To implement a data processing step that dynamically processes date from one or more input detectors, implement a custom processor. This is done by deriving from the `Processor` base class and implementing the `__init__` function. This function should pass a list of all upstream nodes, i.e. all detectors which provide the input signals to the processor, the base class constructor. In addition, the :meth"`~Detector._fetch()` method should be implemented to process the data. The framework will wait until the data from all sources is available, and calls `_fetch()` with this data as input. See the implementation of :class:`~.Shutter` or any other processor for an example of how to implement this function. Implementing an actuator +++++++++++++++++++++++++++++++ -To implement an actuator, the user should subclass the `Actuator` base class, and implement whatever properties and logic appropriate to the device. All methods that start the actuator (e. g. `update()` or `move()`), should first call `self._start()` to request a state switch to the `moving` state. As for detectors, actuators should either specify a static `duration` and `latency` if known, or override these properties to return run-time values for the duration and latency. Similarly, if the duration of an action of the actuator is not known in advance, the class should override `busy` to poll for the action to complete. +To implement an actuator, the user should subclass the `Actuator` base class, and implement whatever properties and logic appropriate to the device. All methods that start the actuator (e.g. `update()` or `move()`), should first call `self._start()` to request a state switch to the `moving` state. As for detectors, actuators should either specify a static `duration` and `latency` if known, or override these properties to return run-time values for the duration and latency. Similarly, if the duration of an action of the actuator is not known in advance, the class should override `busy` to poll for the action to complete. diff --git a/docs/source/readme.rst b/docs/source/readme.rst index 833bfeb..5b8431b 100644 --- a/docs/source/readme.rst +++ b/docs/source/readme.rst @@ -29,7 +29,7 @@ OpenWFS is a Python package for performing and for simulating wavefront shaping * **Spatial light modulator**. The :class:`~.slm.SLM` object provides a versatile way to control spatial light modulators, allowing for software lookup tables, synchronization, texture warping, and multi-texture functionality accelerated by OpenGL. * **Scanning microscope**. The :class:`~.devices.ScanningMicroscope` object uses a National Instruments data acquisition card to control a laser-scanning microscope. * **GenICam cameras**. The :class:`~.devices.Camera` object uses the `harvesters` backend :cite:`harvesters` to access any camera supporting the GenICam standard :cite:`genicam`. - * **Automatic synchronization**. OpenWFS provides tools for automatic synchronization of actuators (e. g. an SLM) and detectors (e. g. a camera). The automatic synchronization makes it trivial to perform pipelined measurements that avoid the delay normally caused by the latency of the video card and SLM. + * **Automatic synchronization**. OpenWFS provides tools for automatic synchronization of actuators (e.g. an SLM) and detectors (e.g. a camera). The automatic synchronization makes it trivial to perform pipelined measurements that avoid the delay normally caused by the latency of the video card and SLM. * **Wavefront shaping algorithms**. A (growing) collection of wavefront shaping algorithms. OpenWFS abstracts the hardware control, synchronization, and signal processing so that the user can focus on the algorithm itself. As a result, most algorithms can be implemented cleanly without hardware-specific programming. @@ -78,7 +78,7 @@ If these fidelities are much lower than 1, this indicates a problem in the exper Further troubleshooting can be performed with the :func:`~.troubleshoot` function, which estimates the following fidelities: -* :attr:`~.WFSTroubleshootResult.fidelity_non_modulated`: The fidelity reduction due to non-modulated light., e. g. due to reflection from the front surface of the SLM. +* :attr:`~.WFSTroubleshootResult.fidelity_non_modulated`: The fidelity reduction due to non-modulated light., e.g. due to reflection from the front surface of the SLM. * :attr:`~.WFSTroubleshootResult.fidelity_decorrelation`: The fidelity reduction due to decorrelation of the field during the measurement. All fidelity estimations are combined to make an order of magnitude estimation of the expected enhancement. :func:`~.troubleshoot` returns a ``WFSTroubleshootResult`` object containing the outcome of the different tests and analyses, which can be printed to the console as a comprehensive troubleshooting report with the method :meth:`~.WFSTroubleshootResult.report()`. See ``examples/troubleshooter_demo.py`` for an example of how to use the automatic troubleshooter. diff --git a/docs/source/slms.rst b/docs/source/slms.rst index a3043f1..b229bc5 100644 --- a/docs/source/slms.rst +++ b/docs/source/slms.rst @@ -14,7 +14,7 @@ The :meth:`~.PhaseSLM.set_phases()` method takes a scalar or a 2-D array of phas Currently, there are two implementations of the `PhaseSLM` interface. The :class:`simulation.SLM` is used for simulating experiments and for testing algorithms (see :numref:`section-simulations`). The :class:`hardware.SLM` is an OpenGL-accelerated controller for using a phase-only SLM that is connected to the video output of a computer. The SLM can be created in windowed mode (useful for debugging), or full screen. It is possible to have multiple windowed SLMs on the same monitor, but only one full-screen SLM per monitor. In addition, the SLM implements some advanced features that are discussed below. -At the time of writing, SLMs that are controlled through other interfaces than the video output are not supported. However, the interface of the `PhaseSLM` class is designed to accommodate these devices in the future. Through this interface, support for intensity-only light modulators (e. g. Digital Mirror Devices) operating in phase-modulation mode (e. g. :cite:`conkey2012high`) may also be added. +At the time of writing, SLMs that are controlled through other interfaces than the video output are not supported. However, the interface of the `PhaseSLM` class is designed to accommodate these devices in the future. Through this interface, support for intensity-only light modulators (e.g. Digital Mirror Devices) operating in phase-modulation mode (e.g. :cite:`conkey2012high`) may also be added. Texture mapping and blending ----------------------------------- diff --git a/examples/slm_demo.py b/examples/slm_demo.py index bbb64c9..7231e3b 100644 --- a/examples/slm_demo.py +++ b/examples/slm_demo.py @@ -1,7 +1,7 @@ """ SLM Demo ======== -Example on how different geometries and patches work for an SLM. Currently uses SLM number 0, which is the left +Example on how different geometries and patches work for an SLM. Currently, uses SLM number 0, which is the left upper corner of the primary monitor. EPILEPSY WARNING: YOUR PRIMARY SCREEN MAY QUICKLY FLASH DURING RUNNING THIS FILE diff --git a/openwfs/algorithms/basic_fourier.py b/openwfs/algorithms/basic_fourier.py index 822bc0e..d7084fc 100644 --- a/openwfs/algorithms/basic_fourier.py +++ b/openwfs/algorithms/basic_fourier.py @@ -14,11 +14,12 @@ class FourierDualReference(DualReference): Improvements over [1]: - The set of plane waves is taken from a disk in k-space instead of a square. - - No overlap between the two halves is needed, instead the final stitching step is done using measurements already in the data set. - - When only a single target is optimized, the algorithm can be used in an iterative version to increase SNR during the measurument, - similar to [2]. + - No overlap between the two halves is needed, instead the final stitching step is done + using measurements already in the data set. + - When only a single target is optimized, the algorithm can be used in an iterative version + to increase SNR during the measurement, similar to [2]. - [1]: Bahareh Mastiani, Gerwin Osnabrugge, and Ivo M. Vellekoop, + [1]: Bahareh Mastiani, Gerwin Osnabrugge, and Ivo M. Vellekoop, "Wavefront shaping for forward scattering," Opt. Express 30, 37436-37445 (2022) [2]: X. Tao, T. Lam, B. Zhu, et al., “Three-dimensional focusing through scattering media using conjugate adaptive @@ -89,8 +90,8 @@ def _update_modes(self): modes = np.zeros((*self._slm_shape, len(k)), dtype=np.float32) for i, k_i in enumerate(k): # tilt generates a pattern from -2.0 to 2.0 (The convention for Zernike modes normalized to an RMS of 1). - # The natural step to take is the Abbe diffraction limit of the modulated part, which corresponds to a gradient - # from -π to π over the modulated part. + # The natural step to take is the Abbe diffraction limit of the modulated part, + # which corresponds to a gradient from -π to π over the modulated part. modes[..., i] = tilt(self._slm_shape, g=k_i * 0.5 * np.pi) self.phase_patterns = (modes, modes) diff --git a/openwfs/algorithms/dual_reference.py b/openwfs/algorithms/dual_reference.py index bffcff9..19c788e 100644 --- a/openwfs/algorithms/dual_reference.py +++ b/openwfs/algorithms/dual_reference.py @@ -17,8 +17,8 @@ class DualReference: A and B, as indicated by the boolean group_mask argument. The algorithm first keeps the pixels in group B fixed, and displays a sequence on patterns on the pixels of group A. It uses these measurements to construct an optimized wavefront that is displayed on the pixels of group A. - This process is then repeated for the pixels of group B, now using the *optimized* wavefront on group A as reference. - Optionally, the process can be repeated for a number of iterations, which each iteration using the current correction + This process is then repeated for the pixels of group B, now using the *optimized* wavefront on group A as + reference. Optionally, the process can be repeated for a number of iterations, which each iteration using the current correction pattern as a reference. This makes this algorithm suitable for non-linear feedback, such as multi-photon excitation fluorescence [Osnabrugge2019]_. @@ -27,12 +27,12 @@ class DualReference: References ---------- - .. [Tao2017] X. Tao, T. Lam, B. Zhu, et al., “Three-dimensional focusing through scattering media using conjugate adaptive - optics with remote focusing (CAORF),” Opt. Express 25, 10368–10383 (2017). + .. [Tao2017] X. Tao, T. Lam, B. Zhu, et al., “Three-dimensional focusing through scattering media using conjugate + adaptive optics with remote focusing (CAORF),” Opt. Express 25, 10368–10383 (2017). - .. [Osnabrugge2019] Gerwin Osnabrugge, Lyubov V. Amitonova, and Ivo M. Vellekoop. "Blind focusing through strongly scattering media - using wavefront shaping with nonlinear feedback", Optics Express, 27(8):11673–11688, 2019. - https://opg.optica.org/oe/ abstract.cfm?uri=oe-27-8-1167 + .. [Osnabrugge2019] Gerwin Osnabrugge, Lyubov V. Amitonova, and Ivo M. Vellekoop. "Blind focusing through strongly + scattering media using wavefront shaping with nonlinear feedback", Optics Express, 27(8):11673–11688, 2019. + https://opg.optica.org/oe/ abstract.cfm?uri=oe-27-8-1167 """ @@ -53,9 +53,11 @@ def __init__( Args: feedback: The feedback source, usually a detector that provides measurement data. slm: Spatial light modulator object. - phase_patterns: A tuple of two 3D arrays, containing the phase patterns for group A and group B, respectively. + phase_patterns: + A tuple of two 3D arrays, containing the phase patterns for group A and group B, respectively. The first two dimensions are the spatial dimensions, and should match the size of group_mask. - The 3rd dimension in the array is index of the phase pattern. The number of phase patterns in A and B may be different. + The 3rd dimension in the array is index of the phase pattern. + The number of phase patterns in A and B may be different. When None, the phase_patterns attribute must be set before executing the algorithm. amplitude: Tuple of 2D arrays, one array for each group. The arrays have shape equal to the shape of group_mask. When None, the amplitude attribute must be set before executing the algorithm. When @@ -68,14 +70,19 @@ def __init__( non-linear feedback and the SNR, more might be required. iterations: Number of times to optimize a mode set, e.g. when iterations = 5, the measurements are A, B, A, B, A. - optimized_reference: When `True`, during each iteration the other half of the SLM displays the optimized pattern so far (as in [1]). - When `False`, the algorithm optimizes A with a flat wavefront on B, and then optimizes B with a flat wavefront on A. - This mode also allows for multi-target optimization, where the algorithm optimizes multiple targets in parallel. + optimized_reference: + When `True`, during each iteration the other half of the SLM displays the optimized pattern so far (as in [1]). + When `False`, the algorithm optimizes A with a flat wavefront on B, + and then optimizes B with a flat wavefront on A. + This mode also allows for multi-target optimization, + where the algorithm optimizes multiple targets in parallel. The two halves are then combined (stitched) to form the full transmission matrix. - In this mode, it is essential that both A and B include a flat wavefront as mode 0. The measurement for - mode A0 and for B0 both give contain relative phase between group A and B, so there is a slight redundancy. + In this mode, it is essential that both A and B include a flat wavefront as mode 0. + The measurement for mode A0 and for B0 both give contain relative phase between group A and B, + so there is a slight redundancy. These two measurements are combined to find the final phase for stitching. - When set to `None` (default), the algorithm uses True if there is a single target, and False if there are multiple targets. + When set to `None` (default), the algorithm uses True if there is a single target, + and False if there are multiple targets. analyzer: The function used to analyze the phase stepping data. Must return a WFSResult object. Defaults to `analyze_phase_stepping` diff --git a/openwfs/algorithms/genetic.py b/openwfs/algorithms/genetic.py index c59b677..10426bb 100644 --- a/openwfs/algorithms/genetic.py +++ b/openwfs/algorithms/genetic.py @@ -33,7 +33,7 @@ class SimpleGenetic: ---------- [^1]: Conkey D B, Brown A N, Caravaca-Aguirre A M and Piestun R 'Genetic algorithm optimization for focusing through turbid media in noisy environments' Opt. Express 20 4840–9 (2012). - [^2]: Benjamin R Anderson et al 'A modular GUI-based program for genetic algorithm-based + [^2]: Benjamin R Anderson et al. 'A modular GUI-based program for genetic algorithm-based feedback-assisted wavefront shaping', J. Phys. Photonics 6 045008 (2024). """ diff --git a/openwfs/algorithms/troubleshoot.py b/openwfs/algorithms/troubleshoot.py index 4a82a15..4c8c5fc 100644 --- a/openwfs/algorithms/troubleshoot.py +++ b/openwfs/algorithms/troubleshoot.py @@ -8,6 +8,7 @@ from ..core import Detector, PhaseSLM +# TODO: review, replace by numpy/scipy functions where possible, remove or hide functions that are too specific def signal_std(signal_with_noise: np.ndarray, noise: np.ndarray) -> float: """ Compute noise corrected standard deviation of signal measurement. @@ -39,7 +40,7 @@ def cnr(signal_with_noise: np.ndarray, noise: np.ndarray) -> np.float64: ND array containing the measured signal including noise. The noise is assumed to be uncorrelated with the signal, such that var(measured) = var(signal) + var(noise). noise: - ND array containing only noise, e. g. a dark frame. + ND array containing only noise, e.g. a dark frame. Returns: Standard deviation of the signal, corrected for the variance due to given noise. @@ -54,9 +55,9 @@ def contrast_enhancement(signal_with_noise: np.ndarray, reference_with_noise: np Args: signal_with_noise: - ND array containing the measured signal including noise, e. g. image signal with shaped wavefront. + ND array containing the measured signal including noise, e.g. image signal with shaped wavefront. reference_with_noise: - ND array containing a reference signal including noise, e. g. image signal with a flat wavefront. + ND array containing a reference signal including noise, e.g. image signal with a flat wavefront. noise: ND array containing only noise. @@ -95,7 +96,7 @@ def find_pixel_shift(f: np.ndarray, g: np.ndarray) -> tuple[int, ...]: def field_correlation(a: np.ndarray, b: np.ndarray) -> float: """ - Compute field correlation, i. e. inner product of two fields, normalized by the product of the L2 norms, + Compute field correlation, i.e. inner product of two fields, normalized by the product of the L2 norms, such that field_correlation(f, s*f) == 1, where s is a scalar value. Also known as normalized first order correlation :math:`g_1`. @@ -137,8 +138,8 @@ def pearson_correlation(a: np.ndarray, b: np.ndarray, noise_var: np.ndarray = 0. a_dev = a - a.mean() # Deviations from mean a b_dev = b - b.mean() # Deviations from mean b covar = (a_dev * b_dev).mean() # Covariance - a_var_signal = a.var() - noise_var # Variance of signal in a, excluding noise - b_var_signal = b.var() - noise_var # Variance of signal in b, excluding noise + a_var_signal = a.var() - noise_var # Variance of signal in ``a``, excluding noise + b_var_signal = b.var() - noise_var # Variance of signal in ``b``, excluding noise return covar / np.sqrt(a_var_signal * b_var_signal) @@ -542,9 +543,9 @@ def troubleshoot( Args: measure_non_modulated_phase_steps: - algorithm: Wavefront Shaping algorithm object, e. g. StepwiseSequential. + algorithm: Wavefront Shaping algorithm object, e.g. StepwiseSequential. background_feedback: Feedback source that determines average background speckle intensity. - frame_source: Source object for reading frames, e. g. Camera. + frame_source: Source object for reading frames, e.g. Camera. shutter: Device object that can block/unblock light source. do_frame_capture: Boolean. If False, skip frame capture before and after running the WFS algorithm. Also skips computation of corresponding metrics. Also skips stability test. diff --git a/openwfs/algorithms/utilities.py b/openwfs/algorithms/utilities.py index 3fa72e3..235cd35 100644 --- a/openwfs/algorithms/utilities.py +++ b/openwfs/algorithms/utilities.py @@ -12,9 +12,9 @@ class WFSResult: Attributes: t (ndarray): Measured transmission matrix. If multiple targets were used, the first dimension(s) of `t` denote the columns of the transmission matrix (`a` indices), and the last dimensions(s) denote the targets, - i. e., the rows of the transmission matrix (`b` indices). + i.e., the rows of the transmission matrix (`b` indices). axis (int): Number of dimensions used for denoting a single column of the transmission matrix - (e. g., 2 dimensions representing the x and y coordinates of the SLM pixels). + (e.g., 2 dimensions representing the x and y coordinates of the SLM pixels). fidelity_noise (ndarray): The estimated loss in fidelity caused by the limited SNR (for each target). fidelity_amplitude (ndarray): Estimated reduction of the fidelity due to phase-only modulation (for each target) (≈ π/4 for fully developed speckle). @@ -42,7 +42,7 @@ def __init__( t(ndarray): measured transmission matrix. axis(int): number of dimensions used for denoting a single columns of the transmission matrix - (e. g. 2 dimensions representing the x and y coordinates of the SLM pixels) + (e.g. 2 dimensions representing the x and y coordinates of the SLM pixels) fidelity_noise(ArrayLike): the estimated loss in fidelity caused by the the limited snr (for each target). fidelity_amplitude(ArrayLike): @@ -82,7 +82,7 @@ def __str__(self) -> str: noise_warning = "OK" if self.fidelity_noise > 0.5 else "WARNING low signal quality." amplitude_warning = "OK" if self.fidelity_amplitude > 0.5 else "WARNING uneven contribution of optical modes." calibration_fidelity_warning = ( - "OK" if self.fidelity_calibration > 0.5 else ("WARNING non-linear phase response, check " "lookup table.") + "OK" if self.fidelity_calibration > 0.5 else "WARNING non-linear phase response, check " "lookup table." ) return f""" Wavefront shaping results: @@ -144,7 +144,7 @@ def weighted_average(attribute): ) -def analyze_phase_stepping(measurements: np.ndarray, axis: int, A: Optional[float] = None): +def analyze_phase_stepping(measurements: np.ndarray, axis: int): """Analyzes the result of phase stepping measurements, returning matrix `t` and noise statistics This function assumes that all measurements were made using the same reference field `A` @@ -159,9 +159,6 @@ def analyze_phase_stepping(measurements: np.ndarray, axis: int, A: Optional[floa and the last zero or more dimensions corresponding to the individual targets where the feedback was measured. axis(int): indicates which axis holds the phase steps. - A(Optional[float]): magnitude of the reference field. - This value is used to correctly normalize the returned transmission matrix. - When missing, the value of `A` is estimated from the measurements. With `phase_steps` phase steps, the measurements are given by @@ -187,16 +184,7 @@ def analyze_phase_stepping(measurements: np.ndarray, axis: int, A: Optional[floa segments = tuple(range(axis)) # Fourier transform the phase stepping measurements - t_f_raw = np.fft.fft(measurements, axis=axis) / phase_steps - - if A is None: # reference field strength not known: estimate from data - t_abs = np.abs(np.take(t_f_raw, 1, axis=axis)) - offset = np.take(t_f_raw, 0, axis=axis) - a_plus_b = np.sqrt(offset + 2.0 * t_abs) - a_minus_b = np.sqrt(offset - 2.0 * t_abs) - A = 0.5 * np.mean(a_plus_b + a_minus_b) - - t_f = t_f_raw / A + t_f = np.fft.fft(measurements, axis=axis) / phase_steps t = np.take(t_f, 1, axis=axis) # compute the effect of amplitude variations. diff --git a/openwfs/core.py b/openwfs/core.py index 570622b..36ef6f5 100644 --- a/openwfs/core.py +++ b/openwfs/core.py @@ -58,8 +58,8 @@ def _start(self): This function changes the global state to 'moving' or 'measuring' if needed, and it may block until this state switch is completed. - After switching, stores the time at which the operation will have ended in the `_end_time_ns` - field (i. e., `time.time_ns() + self.latency + self.duration`). + After switching, stores the time at which the operation will have ended in the ``_end_time_ns`` + field (i.e., ``time.time_ns() + self.latency + self.duration``). """ # acquire a global lock, to prevent multiple threads to switch moving/measuring state simultaneously @@ -132,7 +132,7 @@ def duration(self) -> Quantity[u.ms]: )` and the stabilization of the device. If the duration of an operation is not known in advance, - (e. g., when waiting for a hardware trigger), this function should return `np.inf * u.ms`. + (e.g., when waiting for a hardware trigger), this function should return `np.inf * u.ms`. Note: A device may update the duration dynamically. For example, a stage may compute the required time to @@ -144,7 +144,7 @@ def duration(self) -> Quantity[u.ms]: return self._duration def wait(self, up_to: Optional[Quantity[u.ms]] = None) -> None: - """Waits until the device is (almost) in the `ready` state, i. e., has finished measuring or moving. + """Waits until the device is (almost) in the `ready` state, i.e., has finished measuring or moving. This function is called by `_start` automatically to ensure proper synchronization between detectors and actuators, and it is called by `__del__` to ensure the device is not active when it is destroyed. @@ -163,7 +163,7 @@ def wait(self, up_to: Optional[Quantity[u.ms]] = None) -> None: *before* the device is finished. Raises: - Any other exception raised by the device in another thread (e. g., during `_fetch`). + Any other exception raised by the device in another thread (e.g., during `_fetch`). TimeoutError: if the device has `duration = ∞`, and `busy` does not return `True` within `self.timeout` RuntimeError: if `wait` is called from inside a setter or from inside `_fetch`. @@ -260,7 +260,7 @@ def __init__( Subclassed can override the `pixel_size` property to return the actual pixel size. duration: The maximum amount of time that elapses between returning from `trigger()` and the end of the measurement. If the duration of an operation is not known in advance, - (e. g., when waiting for a hardware trigger), this value should be `np.inf * u.ms` + (e.g., when waiting for a hardware trigger), this value should be `np.inf * u.ms` and the `busy` method should be overridden to return `False` when the measurement is finished. If None is passed, the subclass should override the `duration` property to return the actual duration. latency: The minimum amount of time between sending a command or trigger to the device @@ -299,7 +299,8 @@ def wait(self, up_to: Quantity[u.ms] = None) -> None: explicitly when waiting for data to be stored in the `out` argument of :meth:`~.Detector.trigger()`. Args: - up_to: if specified, this function may return `up_to` milliseconds *before* the hardware has finished measurements. + up_to: if specified, this function may return `up_to` milliseconds *before* the hardware + has finished measurements. If None, this function waits until the hardware has finished all measurements *and* all data is fetched, and stored in the `out` array if that was passed to trigger(). @@ -315,8 +316,8 @@ def trigger(self, *args, out=None, immediate=False, **kwargs) -> Future: """Triggers the detector to start acquisition of the data. This function does not wait for the measurement to complete. - Instead, it returns a `concurrent.futures.Future`.. - Call `.result()` on the returned object to wait for the data. + Instead, it returns a ``concurrent.futures.Future``. + Call ``.result()`` on the returned object to wait for the data. Here is a typical usage pattern: .. code-block:: python @@ -486,13 +487,13 @@ def coordinates(self, dimension: int) -> Quantity: """Returns an array with the coordinate values along the d-th axis. The coordinates represent the _centers_ of the grid points. For example, - for an array of shape `(2,)` the coordinates are `[0.5, 1.5] * pixel_size` + for an array of shape ``(2,)`` the coordinates are `[0.5, 1.5] * pixel_size` and not `[0, 1] * pixel_size`. If `self.pixel_size is None`, a pixel size of 1.0 is used. The coordinates are returned as an array with the same number of dimensions as `data_shape`, with the d-th dimension holding the coordinates. - This facilitates meshgrid-like computations, e. g. + This facilitates meshgrid-like computations, e.g. `cam.coordinates(0) + cam.coordinates(1)` gives a 2-dimensional array of coordinates. Args: @@ -521,7 +522,7 @@ class Processor(Detector, ABC): """Base class for all Processors. Processors can be used to build data processing graphs, where each Processor takes input from one or - more input Detectors and processes that data (e. g., cropping an image, averaging over an ROI, etc.). + more input Detectors and processes that data (e.g., cropping an image, averaging over an ROI, etc.). A processor, itself, is a Detector to allow chaining multiple processors together to combine functionality. To implement a processor, implement `_fetch`, and optionally override `data_shape`, `pixel_size`, and `__init__`. @@ -569,7 +570,7 @@ def latency(self) -> Quantity[u.ms]: @property def duration(self) -> Quantity[u.ms]: """Returns the last end time minus the first start time for all detectors - i. e., max (duration + latency) - min(latency). + i.e., max (duration + latency) - min(latency). Note that `latency` is allowed to vary over time for devices that can only be triggered periodically, so this `duration` may also vary over time. diff --git a/openwfs/devices/camera.py b/openwfs/devices/camera.py index 0030e90..bbc6c6b 100644 --- a/openwfs/devices/camera.py +++ b/openwfs/devices/camera.py @@ -32,7 +32,7 @@ class Camera(Detector): The node map should not be used to set properties that are available as properties in the Camera object, such as `duration` (exposure time), `width`, `height`, `binning`, etc. - Also, the node map should not be used to set properties while the camera is fetching a frame (i. e., + Also, the node map should not be used to set properties while the camera is fetching a frame (i.e., between `trigger()` and calling `result()` on the returned concurrent.futures.Future object). Note: diff --git a/openwfs/devices/galvo_scanner.py b/openwfs/devices/galvo_scanner.py index 1fe513f..23f8811 100644 --- a/openwfs/devices/galvo_scanner.py +++ b/openwfs/devices/galvo_scanner.py @@ -163,7 +163,7 @@ def scan(self, start: float, stop: float, sample_count: int, sample_rate: Quanti The launch point and landing point are returned along with the scan sequence. This function also returns a slice object, which represents the part of the sequence - that corresponds to a linear movement from start to stop. `slice.stop - slice.start = sample_count`. + that corresponds to a linear movement from start to stop. ``slice.stop - slice.start = sample_count``. The scan follows the coordinate convention used throughout OpenWFS and Astropy, where the coordinates correspond to the centers of the pixels. @@ -221,7 +221,7 @@ def compute_scale( Args: optical_deflection (Quantity[u.deg/u.V]): - The optical deflection (i. e. twice the mechanical angle) of the mirror + The optical deflection (i.e. twice the mechanical angle) of the mirror as a function of applied voltage. galvo_to_pupil_magnification (float): The magnification of the relay system between the galvo mirrors and the pupil. @@ -256,7 +256,7 @@ def compute_acceleration( Args: optical_deflection (Quantity[u.deg/u.V]): - The optical deflection (i. e. twice the mechanical angle) of the mirror + The optical deflection (i.e. twice the mechanical angle) of the mirror as a function of applied voltage. torque_constant (Quantity[u.N*u.m/u.A]): The torque constant of the galvo mirror driving coil. @@ -676,7 +676,7 @@ def pixel_size(self) -> Quantity: @property def duration(self) -> Quantity[u.ms]: """Total duration of scanning for one frame.""" - self._ensure_valid() # make sure _scan_pattern is up to date + self._ensure_valid() # make sure _scan_pattern is up-to-date return (self._scan_pattern.shape[1] / self._sample_rate).to(u.ms) @property diff --git a/openwfs/devices/slm/geometry.py b/openwfs/devices/slm/geometry.py index 560257d..3e492b7 100644 --- a/openwfs/devices/slm/geometry.py +++ b/openwfs/devices/slm/geometry.py @@ -23,7 +23,7 @@ class Geometry: To start a new triangle strip, insert the special index 0xFFFF into the index array. (tx, ty) are the texture coordinates that determine which pixel of the texture - (e. g. the array passed to `set_phases`) is drawn at each vertex. + (e.g. the array passed to `set_phases`) is drawn at each vertex. For each triangle, the screen coordinates (x,y) define a triangle on the screen, whereas the texture coordinates (tx, ty) define a triangle in the texture. OpenGL maps the texture triangle onto the screen triangle, using linear interpolation of the coordinates between diff --git a/openwfs/devices/slm/patch.py b/openwfs/devices/slm/patch.py index eb27dc9..66d2c8e 100644 --- a/openwfs/devices/slm/patch.py +++ b/openwfs/devices/slm/patch.py @@ -185,7 +185,7 @@ def __init__(self, slm, lookup_table: Optional[Sequence[int]], bit_depth: int): slm: SLM object that this patch belongs to lookup_table: 1-D array of gray values that will be used to map the phase values to the gray-scale output. see :attr:`~SLM.lookup_table` for details. - bit_depth: bit depth of the SLM. The maximum value in the lookup table can be 2**bit_depth - 1. + bit_depth: The bit depth of the SLM. The maximum value in the lookup table can be 2**bit_depth - 1. Note: this maximum value is mapped to 1.0 in the opengl shader, and converted back to 2**bit_depth by the opengl hardware. """ diff --git a/openwfs/devices/slm/shaders.py b/openwfs/devices/slm/shaders.py index 966f22d..02fb628 100644 --- a/openwfs/devices/slm/shaders.py +++ b/openwfs/devices/slm/shaders.py @@ -33,7 +33,7 @@ # the range -δ to δ maps to a gray value 0 instead of # negative values mapping to 255 and positive values mapping to 0. # Since the lookup table texture is configured to use GL_WRAP, -# only the fractional part of texCoord is used (i. e., texCoord - floor(texCoord)). +# only the fractional part of texCoord is used (i.e., texCoord - floor(texCoord)). # post_process_fragment_shader = """ #version 440 core diff --git a/openwfs/devices/slm/slm.py b/openwfs/devices/slm/slm.py index 5ab67b5..e7bdb62 100644 --- a/openwfs/devices/slm/slm.py +++ b/openwfs/devices/slm/slm.py @@ -206,7 +206,7 @@ def _on_resize(self): """Updates shape and refresh rate to the actual values of the window. Note that these values are in pixels, which may be different from the window size because the window size is - in screen coordinates, which may not always the same as pixels (e. g. on a retina display). + in screen coordinates, which may not always the same as pixels (e.g. on a retina display). For windowed SLMs, the refresh rate property is set to the refresh rate of the primary monitor. @@ -407,7 +407,7 @@ def update(self): Note: At the moment, :meth:`~.SLM.update` blocks until all OpenGL commands are processed, - and a vertical retrace occurs (i. e., the hardware signals the start of a new frame). + and a vertical retrace occurs (i.e., the hardware signals the start of a new frame). This behavior may change in the future and should not be relied on. Instead, use the automatic synchronization mechanism to synchronize detectors with the SLM hardware. diff --git a/openwfs/plot_utilities.py b/openwfs/plot_utilities.py index 690f274..fcfd450 100644 --- a/openwfs/plot_utilities.py +++ b/openwfs/plot_utilities.py @@ -1,16 +1,19 @@ from typing import Tuple, Union, Optional, Dict import numpy as np -from numpy import ndarray as nd from astropy import units as u from matplotlib import pyplot as plt -from matplotlib.colors import hsv_to_rgb from matplotlib.axes import Axes +from matplotlib.colors import hsv_to_rgb +from numpy import ndarray as nd from .core import Detector from .utilities import get_extent +# TODO: needs review and documentation. Remove single-use functions, simplify code. + + def grab_and_show(cam: Detector, axis=None): return imshow(cam.read(), axis=axis) @@ -169,6 +172,7 @@ def complex_colorwheel( ): """ Create an rgb image for a colorwheel representing the complex unit circle. + TODO: needs review Args: ax: Matplotlib Axes. diff --git a/openwfs/simulation/microscope.py b/openwfs/simulation/microscope.py index 15f7274..4b62746 100644 --- a/openwfs/simulation/microscope.py +++ b/openwfs/simulation/microscope.py @@ -76,7 +76,7 @@ def __init__( incident_field: Produces 2-D complex images containing the field output of the SLM. If no `slm_transform` is specified, the `pixel_size` attribute should correspond to normalized pupil coordinates - (e. g. with a disk of radius 1.0, i. e. an extent of 2.0, corresponding to an NA of 1.0) + (e.g. with a disk of radius 1.0, i.e. an extent of 2.0, corresponding to an NA of 1.0) incident_transform (Optional[Transform]): Optional Transform that transforms the phase pattern from the slm object (in slm.pixel_size units) to normalized pupil coordinates. @@ -91,7 +91,7 @@ def __init__( Optional Transform that transforms the phase pattern from the aberration object (in slm.pixel_size units) to normalized pupil coordinates. Typically, the slm image is already in normalized pupil coordinates, - but this transform may e. g., be used to scale an aberration pattern + but this transform may e.g., be used to scale an aberration pattern from extent 2.0 to 2.0 * NA. Note: diff --git a/openwfs/simulation/slm.py b/openwfs/simulation/slm.py index 9cf682e..97ebd59 100644 --- a/openwfs/simulation/slm.py +++ b/openwfs/simulation/slm.py @@ -27,7 +27,7 @@ def __init__( Args: slm_phases: The `Detector` that returns the phases of the slm pixels. field_amplitude: Field amplitude of the modulated pixels. - non_modulated_field_fraction: Non-modulated field (e. g. a front reflection). + non_modulated_field_fraction: Non-modulated field (e.g. a front reflection). """ super().__init__(slm_phases, multi_threaded=False) self.modulated_field_amplitude = field_amplitude diff --git a/openwfs/simulation/transmission.py b/openwfs/simulation/transmission.py index f0c7a12..99eeaa1 100644 --- a/openwfs/simulation/transmission.py +++ b/openwfs/simulation/transmission.py @@ -38,11 +38,11 @@ def __init__( Args: t: Transmission matrix. aberrations: An array containing the aberrations in radians. Can be used instead of a transmission matrix, - equivalent to specifying t = np.exp(1j * aberrations) / (aberrations.shape[0] * aberrations.shape[1]). + equivalent to specifying ``t = np.exp(1j * aberrations) / (aberrations.shape[0] * aberrations.shape[1])``. slm: multi_threaded (bool, optional): If True, the simulation will use multiple threads to compute the intensity in the focus. If False, the simulation will use a single thread. Defaults to True. - beam_amplitude (ScalarType, optional): The beam profile amplitude. Can be an np.ndarray. Defaults to 1.0. + beam_amplitude (ScalarType, optional): The amplitude profile of the incident beam. Defaults to 1.0. The constructor creates a MockSLM instance based on the shape of the aberrations, calculates the electric field at the SLM considering the aberrations and optionally the Gaussian beam profile, and initializes the diff --git a/openwfs/utilities/patterns.py b/openwfs/utilities/patterns.py index ca650fa..548a8a2 100644 --- a/openwfs/utilities/patterns.py +++ b/openwfs/utilities/patterns.py @@ -103,7 +103,7 @@ def tilt( corresponds to having a ramp from -2 to +2 over the height of the pattern When this pattern is used as a phase in a pupil-conjugate configuration, this corresponds to a displacement of -2/π times the Abbe diffraction limit - (e. g. a positive x-gradient g causes the focal point to move in the _negative_ x-direction) + (e.g. a positive x-gradient g causes the focal point to move in the _negative_ x-direction) extent: see module documentation phase_offset: optional additional phase offset to be added to the pattern """ @@ -183,7 +183,7 @@ def gaussian( Args: shape: see module documentation waist (ScalarType): location of the beam waist (1/e value) - relative to half of the size of the pattern (i. e. relative to the `radius` of the square) + relative to half of the size of the pattern (i.e. relative to the `radius` of the square) truncation_radius (ScalarType): when not None, specifies the radius of a disk that is used to truncate the Gaussian. All values outside the disk are set to 0. extent: see module documentation diff --git a/openwfs/utilities/utilities.py b/openwfs/utilities/utilities.py index 5102792..a81caf8 100644 --- a/openwfs/utilities/utilities.py +++ b/openwfs/utilities/utilities.py @@ -293,7 +293,7 @@ def project( The input image is scaled so that the pixel sizes match those of the output, and cropped/zero-padded so that the data shape matches that of the output. - Optionally, an additional transformation can be specified, e. g., to scale or translate the source image. + Optionally, an additional transformation can be specified, e.g., to scale or translate the source image. This transformation is specified as a 2x3 transformation matrix in homogeneous coordinates. Args: diff --git a/tests/test_algorithms_troubleshoot.py b/tests/test_algorithms_troubleshoot.py index daae1a1..515018d 100644 --- a/tests/test_algorithms_troubleshoot.py +++ b/tests/test_algorithms_troubleshoot.py @@ -70,7 +70,7 @@ def test_find_pixel_shift(): def test_field_correlation(): """ - Test the field correlation, i. e. g_1 normalized first order correlation function. + Test the field correlation, i.e. g_1 normalized first order correlation function. """ a = np.zeros(shape=(2, 3)) a[1, 2] = 2.0 @@ -93,7 +93,7 @@ def test_field_correlation(): def test_frame_correlation(): """ - Test the frame correlation, i. e. g_2 normalized second order correlation function. + Test the frame correlation, i.e. g_2 normalized second order correlation function. Test the following: g_2 correlation with self == 1/3 for distribution from `random.rand` g_2 correlation with other == 0 diff --git a/tests/test_scanning_microscope.py b/tests/test_scanning_microscope.py index fadee2d..83e8e5f 100644 --- a/tests/test_scanning_microscope.py +++ b/tests/test_scanning_microscope.py @@ -186,30 +186,4 @@ def test_park_beam(bidirectional): assert np.allclose(voltages[1, :], voltages[1, 0]) # all voltages should be the same assert np.allclose(voltages[0, :], voltages[0, 0]) # all voltages should be the same - -# test zooming -# ps = scanner.pixel_size -# scanner.zoom = 2.0 -# assert np.allclose(scanner.pixel_size, ps * 0.5) -# assert scanner.width == width -# assert scanner.height == height -# assert scanner.data_shape == (height, width) -# assert scanner.left == np.floor(2 * left + 0.5 * width) -# assert scanner.top == np.floor(2 * top + 0.5 * height) - -# zoomed = scanner.read().astype('float32') - 0x8000 -# scaled = place(zoomed.shape, 0.5 * ps, set_pixel_size(roi, ps)) -# assert np.allclose(get_pixel_size(scaled), 0.5 * ps) -# step = zoomed[1, 1] - zoomed[0, 0] -# assert np.allclose(zoomed, scaled - step / 2, atol=0.5 * step) - -# scanner.zoom = 1.0 -# reset_zoom = scanner.read().astype('float32') - 0x8000 -# assert np.allclose(reset_zoom, roi) - -# test setting dwell time -# original_duration = scanner.duration -# scanner.delay = 1.0 -# scanner.dwell_time = scanner.dwell_time * 2.0 -# assert scanner.duration == original_duration * 2.0 -# assert scanner.delay == 0.5 + # TODO: add test for zooming diff --git a/tests/test_simulation.py b/tests/test_simulation.py index 87daf5a..4fdfd4d 100644 --- a/tests/test_simulation.py +++ b/tests/test_simulation.py @@ -194,7 +194,7 @@ def inverse_phase_response_test_function(f, b, c, gamma): def lookup_table_test_function(f, b, c, gamma): """ - Compute the lookup indices (i. e. a lookup table) + Compute the lookup indices (i.e. a lookup table) for countering the synthetic phase response test function: 2π*(b + c*(phi/2π)^gamma). """ phase = inverse_phase_response_test_function(f, b, c, gamma)