diff --git a/poetry.lock b/poetry.lock index 274e5942..f4a243ad 100644 --- a/poetry.lock +++ b/poetry.lock @@ -184,6 +184,17 @@ files = [ {file = "blinker-1.8.2.tar.gz", hash = "sha256:8f77b09d3bf7c795e969e9486f39c2c5e9c39d4ee07424be2bc594ece9642d83"}, ] +[[package]] +name = "cachelib" +version = "0.9.0" +description = "A collection of cache libraries in the same API interface." +optional = true +python-versions = ">=3.7" +files = [ + {file = "cachelib-0.9.0-py3-none-any.whl", hash = "sha256:811ceeb1209d2fe51cd2b62810bd1eccf70feba5c52641532498be5c675493b3"}, + {file = "cachelib-0.9.0.tar.gz", hash = "sha256:38222cc7c1b79a23606de5c2607f4925779e37cdcea1c2ad21b8bae94b5425a5"}, +] + [[package]] name = "certifi" version = "2024.8.30" @@ -195,6 +206,54 @@ files = [ {file = "certifi-2024.8.30.tar.gz", hash = "sha256:bec941d2aa8195e248a60b31ff9f0558284cf01a52591ceda73ea9afffd69fd9"}, ] +[[package]] +name = "cftime" +version = "1.6.4.post1" +description = "Time-handling functionality from netcdf4-python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "cftime-1.6.4.post1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0baa9bc4850929da9f92c25329aa1f651e2d6f23e237504f337ee9e12a769f5d"}, + {file = "cftime-1.6.4.post1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6bb6b087f4b2513c37670bccd457e2a666ca489c5f2aad6e2c0e94604dc1b5b9"}, + {file = "cftime-1.6.4.post1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7d9bdeb9174962c9ca00015190bfd693de6b0ec3ec0b3dbc35c693a4f48efdcc"}, + {file = "cftime-1.6.4.post1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e735cfd544878eb94d0108ff5a093bd1a332dba90f979a31a357756d609a90d5"}, + {file = "cftime-1.6.4.post1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:1dcd1b140bf50da6775c56bd7ca179e84bd258b2f159b53eefd5c514b341f2bf"}, + {file = "cftime-1.6.4.post1-cp310-cp310-win_amd64.whl", hash = "sha256:e60b8f24b20753f7548f410f7510e28b941f336f84bd34e3cfd7874af6e70281"}, + {file = "cftime-1.6.4.post1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:1bf7be0a0afc87628cb8c8483412aac6e48e83877004faa0936afb5bf8a877ba"}, + {file = "cftime-1.6.4.post1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:0f64ca83acc4e3029f737bf3a32530ffa1fbf53124f5bee70b47548bc58671a7"}, + {file = "cftime-1.6.4.post1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7ebdfd81726b0cfb8b524309224fa952898dfa177c13d5f6af5b18cefbf497d"}, + {file = "cftime-1.6.4.post1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c9ea0965a4c87739aebd84fe8eed966e5809d10065eeffd35c99c274b6f8da15"}, + {file = "cftime-1.6.4.post1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:800a18aea4e8cb2b206450397cb8a53b154798738af3cdd3c922ce1ca198b0e6"}, + {file = "cftime-1.6.4.post1-cp311-cp311-win_amd64.whl", hash = "sha256:5dcfc872f455db1f12eabe3c3ba98e93757cd60ed3526a53246e966ccde46c8a"}, + {file = "cftime-1.6.4.post1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:a590f73506f4704ba5e154ef55bfbaed5e1b4ac170f3caeb8c58e4f2c619ee4e"}, + {file = "cftime-1.6.4.post1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:933cb10e1af4e362e77f513e3eb92b34a688729ddbf938bbdfa5ac20a7f44ba0"}, + {file = "cftime-1.6.4.post1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cf17a1b36f62e9e73c4c9363dd811e1bbf1170f5ac26d343fb26012ccf482908"}, + {file = "cftime-1.6.4.post1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8e18021f421aa26527bad8688c1acf0c85fa72730beb6efce969c316743294f2"}, + {file = "cftime-1.6.4.post1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:5835b9d622f9304d1c23a35603a0f068739f428d902860f25e6e7e5a1b7cd8ea"}, + {file = "cftime-1.6.4.post1-cp312-cp312-win_amd64.whl", hash = "sha256:7f50bf0d1b664924aaee636eb2933746b942417d1f8b82ab6c1f6e8ba0da6885"}, + {file = "cftime-1.6.4.post1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:5c89766ebf088c097832ea618c24ed5075331f0b7bf8e9c2d4144aefbf2f1850"}, + {file = "cftime-1.6.4.post1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:7f27113f7ccd1ca32881fdcb9a4bec806a5f54ae621fc1c374f1171f3ed98ef2"}, + {file = "cftime-1.6.4.post1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da367b23eea7cf4df071c88e014a1600d6c5bbf22e3393a4af409903fa397e28"}, + {file = "cftime-1.6.4.post1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6579c5c83cdf09d73aa94c7bc34925edd93c5f2c7dd28e074f568f7e376271a0"}, + {file = "cftime-1.6.4.post1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6b731c7133d17b479ca0c3c46a7a04f96197f0a4d753f4c2284c3ff0447279b4"}, + {file = "cftime-1.6.4.post1-cp313-cp313-win_amd64.whl", hash = "sha256:d2a8c223faea7f1248ab469cc0d7795dd46f2a423789038f439fee7190bae259"}, + {file = "cftime-1.6.4.post1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:9df3e2d49e548c62d1939e923800b08d2ab732d3ac8d75b857edd7982c878552"}, + {file = "cftime-1.6.4.post1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:2892b7e7654142d825655f60eb66c3e1af745901890316907071d44cf9a18d8a"}, + {file = "cftime-1.6.4.post1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a4ab54e6c04e68395d454cd4001188fc4ade2fe48035589ed65af80c4527ef08"}, + {file = "cftime-1.6.4.post1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:568b69fc52f406e361db62a4d7a219c6fb0ced138937144c3b3a511648dd6c50"}, + {file = "cftime-1.6.4.post1-cp38-cp38-win_amd64.whl", hash = "sha256:640911d2629f4a8f81f6bc0163a983b6b94f86d1007449b8cbfd926136cda253"}, + {file = "cftime-1.6.4.post1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:44e9f8052600803b55f8cb6bcac2be49405c21efa900ec77a9fb7f692db2f7a6"}, + {file = "cftime-1.6.4.post1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a90b6ef4a3fc65322c212a2c99cec75d1886f1ebaf0ff6189f7b327566762222"}, + {file = "cftime-1.6.4.post1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:652700130dbcca3ae36dbb5b61ff360e62aa09fabcabc42ec521091a14389901"}, + {file = "cftime-1.6.4.post1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:24a7fb6cc541a027dab37fdeb695f8a2b21cd7d200be606f81b5abc38f2391e2"}, + {file = "cftime-1.6.4.post1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:fc2c0abe2dbd147e1b1e6d0f3de19a5ea8b04956acc204830fd8418066090989"}, + {file = "cftime-1.6.4.post1-cp39-cp39-win_amd64.whl", hash = "sha256:0ee2f5af8643aa1b47b7e388763a1a6e0dc05558cd2902cffb9cbcf954397648"}, + {file = "cftime-1.6.4.post1.tar.gz", hash = "sha256:50ac76cc9f10ab7bd46e44a71c51a6927051b499b4407df4f29ab13d741b942f"}, +] + +[package.dependencies] +numpy = {version = ">1.13.3", markers = "python_version < \"3.12.0.rc1\""} + [[package]] name = "charset-normalizer" version = "3.3.2" @@ -490,6 +549,27 @@ files = [ {file = "dash_dangerously_set_inner_html-0.0.2.tar.gz", hash = "sha256:d7fe990755851fc4d2e22c8f10b7aea055cabf380bbceefba589779b269fea64"}, ] +[[package]] +name = "dash-extensions" +version = "1.0.15" +description = "Extensions for Plotly Dash." +optional = true +python-versions = "<4,>=3.8" +files = [ + {file = "dash_extensions-1.0.15-py3-none-any.whl", hash = "sha256:4cb04dfeb20d95498a780dc16109bdb283ff98855d412973748e108360f12fbe"}, + {file = "dash_extensions-1.0.15.tar.gz", hash = "sha256:01401ae67a00407a458827149549b549cea2c58db77dc5d5bc144e275aed3217"}, +] + +[package.dependencies] +dash = ">=2.15.0" +dataclass-wizard = ">=0.22.2,<0.23.0" +Flask-Caching = ">=2.1.0,<3.0.0" +jsbeautifier = ">=1.14.3,<2.0.0" +more-itertools = ">=9.0.0,<10.0.0" + +[package.extras] +mantine = ["dash-mantine-components (>=0.14.3,<0.15.0)"] + [[package]] name = "dash-html-components" version = "2.0.0" @@ -512,6 +592,25 @@ files = [ {file = "dash_table-5.0.0.tar.gz", hash = "sha256:18624d693d4c8ef2ddec99a6f167593437a7ea0bf153aa20f318c170c5bc7308"}, ] +[[package]] +name = "dataclass-wizard" +version = "0.22.3" +description = "Marshal dataclasses to/from JSON. Use field properties with initial values. Construct a dataclass schema with JSON input." +optional = true +python-versions = "*" +files = [ + {file = "dataclass-wizard-0.22.3.tar.gz", hash = "sha256:4c46591782265058f1148cfd1f54a3a91221e63986fdd04c9d59f4ced61f4424"}, + {file = "dataclass_wizard-0.22.3-py2.py3-none-any.whl", hash = "sha256:63751203e54b9b9349212cc185331da73c1adc99c51312575eb73bb5c00c1962"}, +] + +[package.dependencies] +typing-extensions = {version = ">=3.7.4.2", markers = "python_version <= \"3.9\""} + +[package.extras] +dev = ["Sphinx (==5.3.0)", "bump2version (==1.0.1)", "coverage (>=6.2)", "dataclass-factory (==2.12)", "dataclasses-json (==0.5.6)", "flake8 (>=3)", "jsons (==1.6.1)", "pip (>=21.3.1)", "pytest (==7.0.1)", "pytest-cov (==3.0.0)", "pytest-mock (>=3.6.1)", "pytimeparse (==1.1.8)", "sphinx-issues (==3.0.1)", "sphinx-issues (==4.0.0)", "tox (==3.24.5)", "twine (==3.8.0)", "watchdog[watchmedo] (==2.1.6)", "wheel (==0.37.1)", "wheel (==0.42.0)"] +timedelta = ["pytimeparse (>=1.1.7)"] +yaml = ["PyYAML (>=5.3)"] + [[package]] name = "datadog-api-client" version = "2.28.0" @@ -676,11 +775,24 @@ profile = ["gprof2dot (>=2022.7.29)"] [[package]] name = "docutils" -version = "0.21" +version = "0.20.1" description = "Docutils -- Python Documentation Utilities" optional = true +python-versions = ">=3.7" +files = [ + {file = "docutils-0.20.1-py3-none-any.whl", hash = "sha256:96f387a2c5562db4476f09f13bbab2192e764cac08ebbf3a34a95d9b1e4a59d6"}, + {file = "docutils-0.20.1.tar.gz", hash = "sha256:f08a4e276c3a1583a86dce3e34aba3fe04d02bba2dd51ed16106244e8a923e3b"}, +] + +[[package]] +name = "editorconfig" +version = "0.12.4" +description = "EditorConfig File Locator and Interpreter for Python" +optional = true python-versions = "*" -files = [] +files = [ + {file = "EditorConfig-0.12.4.tar.gz", hash = "sha256:24857fa1793917dd9ccf0c7810a07e05404ce9b823521c7dce22a4fb5d125f80"}, +] [[package]] name = "exceptiongroup" @@ -749,6 +861,21 @@ Werkzeug = ">=3.0.0" async = ["asgiref (>=3.2)"] dotenv = ["python-dotenv"] +[[package]] +name = "flask-caching" +version = "2.3.0" +description = "Adds caching support to Flask applications." +optional = true +python-versions = ">=3.8" +files = [ + {file = "Flask_Caching-2.3.0-py3-none-any.whl", hash = "sha256:51771c75682e5abc1483b78b96d9131d7941dc669b073852edfa319dd4e29b6e"}, + {file = "flask_caching-2.3.0.tar.gz", hash = "sha256:d7e4ca64a33b49feb339fcdd17e6ba25f5e01168cf885e53790e885f83a4d2cf"}, +] + +[package.dependencies] +cachelib = ">=0.9.0,<0.10.0" +Flask = "*" + [[package]] name = "fonttools" version = "4.54.0" @@ -1155,6 +1282,30 @@ files = [ {file = "joblib-1.4.2.tar.gz", hash = "sha256:2382c5816b2636fbd20a09e0f4e9dad4736765fdfb7dca582943b9c1366b3f0e"}, ] +[[package]] +name = "jsbeautifier" +version = "1.15.1" +description = "JavaScript unobfuscator and beautifier." +optional = true +python-versions = "*" +files = [ + {file = "jsbeautifier-1.15.1.tar.gz", hash = "sha256:ebd733b560704c602d744eafc839db60a1ee9326e30a2a80c4adb8718adc1b24"}, +] + +[package.dependencies] +editorconfig = ">=0.12.2" +six = ">=1.13.0" + +[[package]] +name = "kaleido" +version = "0.2.1.post1" +description = "Static image export for web-based visualization libraries with zero dependencies" +optional = true +python-versions = "*" +files = [ + {file = "kaleido-0.2.1.post1-py2.py3-none-manylinux2014_armv7l.whl", hash = "sha256:d313940896c24447fc12c74f60d46ea826195fc991f58569a6e73864d53e5c20"}, +] + [[package]] name = "kiwisolver" version = "1.4.7" @@ -1472,6 +1623,17 @@ files = [ {file = "mccabe-0.6.1.tar.gz", hash = "sha256:dd8d182285a0fe56bace7f45b5e7d1a6ebcbf524e8f3bd87eb0f125271b8831f"}, ] +[[package]] +name = "more-itertools" +version = "9.1.0" +description = "More routines for operating on iterables, beyond itertools" +optional = true +python-versions = ">=3.7" +files = [ + {file = "more-itertools-9.1.0.tar.gz", hash = "sha256:cabaa341ad0389ea83c17a94566a53ae4c9d07349861ecb14dc6d0345cf9ac5d"}, + {file = "more_itertools-9.1.0-py3-none-any.whl", hash = "sha256:d2bc7f02446e86a68911e58ded76d6561eea00cddfb2a91e7019bbb586c799f3"}, +] + [[package]] name = "multidict" version = "6.1.0" @@ -1598,6 +1760,53 @@ files = [ {file = "nest_asyncio-1.6.0.tar.gz", hash = "sha256:6f172d5449aca15afd6c646851f4e31e02c598d553a667e38cafa997cfec55fe"}, ] +[[package]] +name = "netcdf4" +version = "1.7.2" +description = "Provides an object-oriented python interface to the netCDF version 4 library" +optional = false +python-versions = ">=3.8" +files = [ + {file = "netCDF4-1.7.2-cp310-cp310-macosx_12_0_x86_64.whl", hash = "sha256:5e9b485e3bd9294d25ff7dc9addefce42b3d23c1ee7e3627605277d159819392"}, + {file = "netCDF4-1.7.2-cp310-cp310-macosx_14_0_arm64.whl", hash = "sha256:118b476fd00d7e3ab9aa7771186d547da645ae3b49c0c7bdab866793ebf22f07"}, + {file = "netCDF4-1.7.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:abe5b1837ff209185ecfe50bd71884c866b3ee69691051833e410e57f177e059"}, + {file = "netCDF4-1.7.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:28021c7e886e5bccf9a8ce504c032d1d7f98d86f67495fb7cf2c9564eba04510"}, + {file = "netCDF4-1.7.2-cp310-cp310-win_amd64.whl", hash = "sha256:7460b638e41c8ce4179d082a81cb6456f0ce083d4d959f4d9e87a95cd86f64cb"}, + {file = "netCDF4-1.7.2-cp311-cp311-macosx_12_0_x86_64.whl", hash = "sha256:09d61c2ddb6011afb51e77ea0f25cd0bdc28887fb426ffbbc661d920f20c9749"}, + {file = "netCDF4-1.7.2-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:fd2a16dbddeb8fa7cf48c37bfc1967290332f2862bb82f984eec2007bb120aeb"}, + {file = "netCDF4-1.7.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f54f5d39ffbcf1726a1e6fd90cb5fa74277ecea739a5fa0f424636d71beafe24"}, + {file = "netCDF4-1.7.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:902aa50d70f49d002d896212a171d344c38f7b8ca520837c56c922ac1535c4a3"}, + {file = "netCDF4-1.7.2-cp311-cp311-win_amd64.whl", hash = "sha256:3291f9ad0c98c49a4dd16aefad1a9abd3a1b884171db6c81bdcee94671cfabe3"}, + {file = "netCDF4-1.7.2-cp312-cp312-macosx_12_0_x86_64.whl", hash = "sha256:e73e3baa0b74afc414e53ff5095748fdbec7fb346eda351e567c23f2f0d247f1"}, + {file = "netCDF4-1.7.2-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:a51da09258b31776f474c1d47e484fc7214914cdc59edf4cee789ba632184591"}, + {file = "netCDF4-1.7.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cb95b11804fe051897d1f2044b05d82a1847bc2549631cdd2f655dde7de77a9c"}, + {file = "netCDF4-1.7.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f9d8a848373723f41ef662590b4f5e1832227501c9fd4513e8ad8da58c269977"}, + {file = "netCDF4-1.7.2-cp312-cp312-win_amd64.whl", hash = "sha256:568ea369e00b581302d77fc5fd0b8f78e520c7e08d0b5af5219ba51f3f1cd694"}, + {file = "netCDF4-1.7.2-cp313-cp313-macosx_12_0_x86_64.whl", hash = "sha256:205a5f1de3ddb993c7c97fb204a923a22408cc2e5facf08d75a8eb89b3e7e1a8"}, + {file = "netCDF4-1.7.2-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:96653fc75057df196010818367c63ba6d7e9af603df0a7fe43fcdad3fe0e9e56"}, + {file = "netCDF4-1.7.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:30d20e56b9ba2c48884eb89c91b63e6c0612b4927881707e34402719153ef17f"}, + {file = "netCDF4-1.7.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d6bfd38ba0bde04d56f06c1554714a2ea9dab75811c89450dc3ec57a9d36b80"}, + {file = "netCDF4-1.7.2-cp313-cp313-win_amd64.whl", hash = "sha256:5c5fbee6134ee1246c397e1508e5297d825aa19221fdf3fa8dc9727ad824d7a5"}, + {file = "netCDF4-1.7.2-cp38-cp38-macosx_12_0_x86_64.whl", hash = "sha256:6bf402c2c7c063474576e5cf89af877d0b0cd097d9316d5bc4fcb22b62f12567"}, + {file = "netCDF4-1.7.2-cp38-cp38-macosx_14_0_arm64.whl", hash = "sha256:5bdf3b34e6fd4210e34fdc5d1a669a22c4863d96f8a20a3928366acae7b3cbbb"}, + {file = "netCDF4-1.7.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:657774404b9f78a5e4d26506ac9bfe106e4a37238282a70803cc7ce679c5a6cc"}, + {file = "netCDF4-1.7.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e896d92f01fbf365e33e2513d5a8c4cfe16ff406aae9b6034e5ba1538c8c7a8"}, + {file = "netCDF4-1.7.2-cp39-cp39-macosx_12_0_x86_64.whl", hash = "sha256:eb87c08d1700fe67c301898cf5ba3a3e1f8f2fbb417fcd0e2ac784846b60b058"}, + {file = "netCDF4-1.7.2-cp39-cp39-macosx_14_0_arm64.whl", hash = "sha256:59b403032774c723ee749d7f2135be311bad7d00d1db284bebfab58b9d5cdb92"}, + {file = "netCDF4-1.7.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:572f71459ef4b30e8554dcc4e1e6f55de515acc82a50968b48fe622244a64548"}, + {file = "netCDF4-1.7.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f77e72281acc5f331f82271e5f7f014d46f5ca9bcaa5aafe3e46d66cee21320"}, + {file = "netCDF4-1.7.2-cp39-cp39-win_amd64.whl", hash = "sha256:d0fa7a9674fae8ae4877e813173c3ff7a6beee166b8730bdc847f517b282ed31"}, + {file = "netcdf4-1.7.2.tar.gz", hash = "sha256:a4c6375540b19989896136943abb6d44850ff6f1fa7d3f063253b1ad3f8b7fce"}, +] + +[package.dependencies] +certifi = "*" +cftime = "*" +numpy = "*" + +[package.extras] +tests = ["Cython", "packaging", "pytest"] + [[package]] name = "numpy" version = "1.24.4" @@ -1683,8 +1892,8 @@ files = [ [package.dependencies] numpy = [ {version = ">=1.20.3", markers = "python_version < \"3.10\""}, - {version = ">=1.21.0", markers = "python_version >= \"3.10\" and python_version < \"3.11\""}, {version = ">=1.23.2", markers = "python_version >= \"3.11\""}, + {version = ">=1.21.0", markers = "python_version >= \"3.10\" and python_version < \"3.11\""}, ] python-dateutil = ">=2.8.2" pytz = ">=2020.1" @@ -2627,8 +2836,9 @@ type = ["pytest-mypy"] configbuilder = ["dash", "dash-bootstrap-components", "dash-core-components", "dash-cytoscape", "dash-dangerously-set-inner-html", "dash-html-components", "dash-table", "docutils", "pandas", "waitress"] datahandler = ["xarray"] interplot = ["dill", "ipython", "pypiwin32"] +videomode = ["dash", "dash-bootstrap-components", "dash-extensions", "kaleido", "xarray"] [metadata] lock-version = "2.0" python-versions = ">=3.8,<3.12" -content-hash = "ed127d93cdab63deccb2817c147b36cebee04abe96393d0e1a73f51da0d4f962" +content-hash = "7f34f1ba5a91d99de288444c4b8cbc1615c2f171d77aec3e98529a8767d69546" diff --git a/pyproject.toml b/pyproject.toml index d2c3918a..8351f0bc 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,14 +4,9 @@ version = "v0.17.7" description = "The qualang_tools package includes various tools related to QUA programs in Python" authors = ["Quantum Machines "] license = "BSD-3-Clause" -packages = [ - { include = "qualang_tools" } -] -include = [ -] -exclude = [ - "**/tests/**", -] +packages = [{ include = "qualang_tools" }] +include = [] +exclude = ["**/tests/**"] readme = "README.md" homepage = "https://github.com/qua-platform/py-qua-tools" @@ -27,16 +22,19 @@ dash-html-components = { version = "^2.0.0", optional = true } dash-core-components = { version = "^2.0.0", optional = true } dash-bootstrap-components = { version = "^1.0.0", optional = true } dash-cytoscape = { version = "^0.3.0", optional = true } +dash-extensions = { version = "^1.0.0", optional = true } dash-table = { version = "^5.0.0", optional = true } dash-dangerously-set-inner-html = { version = "^0.0.2", optional = true } -docutils = { version = ">=0.14.0", optional = true } +docutils = { version = ">=0.14.0,<0.21", optional = true } waitress = { version = "^2.0.0", optional = true } dill = { version = "^0.3.4", optional = true } pypiwin32 = { version = "^223", optional = true } ipython = { version = "^8.10.0", optional = true } xarray = { version = "^2023.0.0", optional = true } +kaleido = { version = "^0.2.1", optional = true } scikit-learn = "^1.0.2" grpclib = "0.4.5" +netcdf4 = "^1.7.2" [tool.poetry.dev-dependencies] pytest = "^6.2.5" @@ -47,8 +45,27 @@ setuptools = "^69.0.2" [tool.poetry.extras] interplot = ["dill", "pypiwin32", "ipython"] -configbuilder = ["pandas", "dash", "dash-html-components", "dash-core-components", "dash-bootstrap-components", "dash-cytoscape", "dash-table", "dash-dangerously-set-inner-html", "docutils", "waitress"] +configbuilder = [ + "pandas", + "dash", + "dash-html-components", + "dash-core-components", + "dash-bootstrap-components", + "dash-cytoscape", + "dash-table", + "dash-dangerously-set-inner-html", + "docutils", + "waitress", +] datahandler = ["xarray", "netcdf4"] +videomode = [ + "dash", + "dash-extensions", + "xarray", + "kaleido", + "h5netcdf", + "dash-bootstrap-components", +] [tool.black] line-length = 120 diff --git a/qualang_tools/control_panel/video_mode/README.md b/qualang_tools/control_panel/video_mode/README.md new file mode 100644 index 00000000..f73ae7d8 --- /dev/null +++ b/qualang_tools/control_panel/video_mode/README.md @@ -0,0 +1,183 @@ +# Video Mode + +This module performs a continuous rapid 2D scan of two sweep axes, and measures a signal at each point. The results are shown through an interactive web frontend with a live plot and controls for the sweep parameters. + +The video mode has been designed as a modular tool that is composed of four parts: + +1. The `OPXDataAcquirer` class, which is responsible for the data acquisition. +2. The `ScanMode` class, which is responsible for how the 2D grid is traversed. +3. The `InnerLoopAction` class, which is responsible for what QUA code is performed during each step of the scan. +4. The `VideoMode` class, which handles interaction with the frontend. + +The `ScanMode` and `InnerLoopAction` classes are highly flexible and can be selected/modified to suit the specific needs of the user. For example, three different scan modes (`RasterScan`, `SpiralScan`, and `SwitchRasterScan`) are provided, which can be used to acquire data in different ways. Similarly, the `InnerLoopAction` class can be modified to perform additional actions, such as adding specific pulses prior to each measurement. + +## Installation + +First, it is necessary to install the `qualang_tools` package with the `videomode` extra packages: + +```bash +pip install qualang-tools[videomode] +``` + +## Basic Usage +To use the video mode, it is necessary to initialize the relevant classes and pass them to the `VideoMode` class. +We will go through a simple example to demonstrate the video mode. Most of the classes and functions described here have additional options which can be found in the docstrings and in the source code. + +If you don't have access to an OPX but still want to try the video mode, see the `Simulated Video Mode`section in `Advanced Usage` + +First, we assume that a `QuantumMachinesManager` is already connected with variable `qmm`, with a corresponding `qua_config` dictionary. + + +### Scan mode + +Next we define the scan mode, which in this case is a raster scan. +```python +from qualang_tools.control_panel.video_mode import scan_modes +scan_mode = scan_modes.RasterScan() +``` + +This scan can be visualized by calling +```python +scan_mode.plot_scan(x_points, y_points) +``` +where `x_points` and `y_points` are the number of sweep points along each axis. + +### Inner loop action + +The user has full freedom in the definition of the most inner loop sequence performed by the OPX which is defined under the `__call__()` method of an `InnerLoopAction` subclass. + +For example, the `BasicInnerLoopAction` performs a reflectometry measurement after updating the offsets of the x and y elements and waiting for a pre-measurement delay: + +```python +def __call__(self, x: QuaVariableType, y: QuaVariableType) -> Tuple[QuaVariableType, QuaVariableType]: + outputs = {"I": declare(fixed), "Q": declare(fixed)} + + set_dc_offset(self.x_elem, "single", x) + set_dc_offset(self.y_elem, "single", y) + align() + pre_measurement_delay_cycles = int(self.pre_measurement_delay * 1e9 // 4) + if pre_measurement_delay_cycles >= 4: + wait(pre_measurement_delay_cycles) + measure( + self.readout_pulse, + self.readout_elem, + None, + demod.full("cos", outputs["I"]), + demod.full("sin", outputs["Q"]), + ) + + return outputs["I"], outputs["Q"] +``` + +For this tutorial we will instantiate the `BasicInnerLoopAction` class: + +```python +# Define the inner loop action +from qualang_tools.control_panel.video_mode.inner_loop_actions import InnerLoopAction +inner_loop_action = BasicInnerLoopAction( + x_element="output_ch1", # Must be a valid QUA element + y_element="output_ch2", # Must be a valid QUA element + integration_time=10e-6, # Integration time in seconds + readout_element="measure_ch", # Must be a valid QUA element + readout_pulse="readout", # Name of the readout pulse registered in the readout_element +) +``` + + + +Note that this `BasicInnerLoopAction` assumes that the `readout_pulse` has two integration weights called `cos` and `sin` + +Next we define the sweep axes, which define the values that the 2D scan will take as coordinates. + +```python +from qualang_tools.control_panel.video_mode.sweep_axis import SweepAxis +x_axis = SweepAxis( + name="voltage_gate1", # Sweep axis name, used among others for plotting + span=0.03, # Span of the sweep in volts + points=51, # Number of points to sweep +) +y_axis = SweepAxis(name="voltage_gate2", span=0.03, points=51) +``` +The `SweepAxis` contains additional attributes, such as attenuation and a voltage offset, the latter of which is described in `Advanced Usage`. + +Next we define the data acquirer, which is the object that will handle the data acquisition. +```python +from qualang_tools.control_panel.video_mode.data_acquirer import OPXDataAcquirer +data_acquirer = OPXDataAcquirer( + qmm=qmm, + qua_config=qua_config, + qua_inner_loop_action=inner_loop_action, + scan_mode=scan_mode, + x_axis=x_axis, + y_axis=y_axis, +) +``` + +You can now test the data acquirer before using it in video mode. +```python +data_acquirer.run_program() +results = data_acquirer.acquire_data() +``` + +Finally, we can start the video mode. +```python +from qualang_tools.control_panel.video_mode.video_mode import VideoMode +video_mode = VideoMode(data_acquirer=data_acquirer) +video_mode.run() +``` + +Note that if you want to run this code in an interactive environment such as a Jupyter notebook, you should use `video_mode.run(use_reloader=False)`. + +You can now access video mode from your browser at `http://localhost:8050/` (the port may be different, see the output logs for details). + + +## Advanced Usage +### Voltage offsets + +The `SweepAxis` class has an `offset_parameter` attribute, which is an optional parameter that defines the sweep offset. This can be a QCoDeS DC voltage source parameter or a `VoltageParameter` object. + +As an example, let us assume that we have a QCoDeS parameter `x_gate` for the DC voltage of a gate: + +```python +x_offset() # Returns the DC voltage, e.g. 0.62 +``` + +In this case, we can pass this parameter to the `SweepAxis` class to define the sweep offset. +```python +x_axis = SweepAxis(name="gate", span=0.03, points=51, offset_parameter=x_offset) +``` +The video mode plot should now correctly show the sweep axes with the correct offset. + +Note that if the offset voltage is changed, it will need to be changed in the same kernel where the video mode is running. One solution for this is using the `VoltageControl` module in py-qua-tools. + + +### Simulated Video Mode +Below is an example of how to run the video mode without an actual OPX. +In this case, we will use the `RandomDataAcquirer` class, which simply displays uniformly-sampled random data. +```python +from qualang_tools.control_panel.video_mode import * + +x_axis = SweepAxis(name="X", span=0.1, points=101) +y_axis = SweepAxis(name="Y", span=0.1, points=101) + +data_acquirer = RandomDataAcquirer( + x_axis=x_axis, + y_axis=y_axis, + num_averages=5, +) + +live_plotter = VideoMode(data_acquirer=data_acquirer) +live_plotter.run() +``` + +# Debugging + +To see the logs which include useful debug information, you can update the logging configuration. + +```python +import logging + +logging.basicConfig(level=logging.DEBUG) +logging.getLogger("hpack.hpack").setLevel(logging.WARNING) +logging.getLogger("matplotlib").setLevel(logging.WARNING) +``` \ No newline at end of file diff --git a/qualang_tools/control_panel/video_mode/__init__.py b/qualang_tools/control_panel/video_mode/__init__.py new file mode 100644 index 00000000..a961ea48 --- /dev/null +++ b/qualang_tools/control_panel/video_mode/__init__.py @@ -0,0 +1,28 @@ +from qualang_tools.control_panel.video_mode.dash_tools import * +from qualang_tools.control_panel.video_mode.sweep_axis import SweepAxis +from qualang_tools.control_panel.video_mode.voltage_parameters import * +from qualang_tools.control_panel.video_mode.inner_loop_actions import * +from qualang_tools.control_panel.video_mode.scan_modes import * +from qualang_tools.control_panel.video_mode.data_acquirers import * +from qualang_tools.control_panel.video_mode.video_mode import * + + +if __name__ == "__main__": + import logging + + # Update the logging configuration + logging.basicConfig(level=logging.DEBUG) + logging.getLogger("hpack.hpack").setLevel(logging.WARNING) + logging.getLogger("matplotlib").setLevel(logging.WARNING) + x_axis = SweepAxis(name="X", span=0.1, points=51) + y_axis = SweepAxis(name="Y", span=0.1, points=101) + + data_acquirer = RandomDataAcquirer( + x_axis=x_axis, + y_axis=y_axis, + num_averages=5, + acquire_time=0.1, + ) + + live_plotter = VideoMode(data_acquirer=data_acquirer, update_interval=0.1) + live_plotter.run() diff --git a/qualang_tools/control_panel/video_mode/dash_tools.py b/qualang_tools/control_panel/video_mode/dash_tools.py new file mode 100644 index 00000000..02147677 --- /dev/null +++ b/qualang_tools/control_panel/video_mode/dash_tools.py @@ -0,0 +1,183 @@ +from abc import ABC, abstractmethod +from enum import Flag, auto +from typing import Any, Dict, List, Literal, Optional + +from dash import html +import plotly.graph_objects as go +import xarray as xr +import dash_bootstrap_components as dbc + + +__all__ = ["xarray_to_plotly", "BaseDashComponent", "ModifiedFlags"] + + +class ModifiedFlags(Flag): + """Flags indicating what needs to be modified after parameter changes.""" + + NONE = 0 + PARAMETERS_MODIFIED = auto() + PROGRAM_MODIFIED = auto() + CONFIG_MODIFIED = auto() + + +class BaseDashComponent(ABC): + def __init__(self, *args, component_id: str, **kwargs): + assert not args, "BaseDashComponent does not accept any positional arguments" + assert not kwargs, "BaseDashComponent does not accept any keyword arguments" + + self.component_id = component_id + + def update_parameters(self, parameters: Dict[str, Dict[str, Any]]) -> ModifiedFlags: + """Update the component's attributes based on the input values.""" + return ModifiedFlags.NONE + + def get_dash_components(self, include_subcomponents: bool = True) -> List[html.Div]: + """Return a list of Dash components. + + Args: + include_subcomponents (bool, optional): Whether to include subcomponents. Defaults to True. + + Returns: + List[html.Div]: A list of Dash components. + """ + return [] + + def get_component_ids(self) -> List[str]: + """Return a list of component IDs for this component including subcomponents.""" + return [self.component_id] + + +def xarray_to_plotly(da: xr.DataArray): + """Convert an xarray DataArray to a Plotly figure. + + Args: + da (xr.DataArray): The data array to convert. + + Returns: + plotly.graph_objects.Figure: A Plotly figure with the data. + """ + if len(da.coords) != 2: + raise ValueError("DataArray must have exactly 2 coordinates.") + + coords_iter = iter(da.coords.items()) + + y_label, y_coord = next(coords_iter) + y_label = y_coord.attrs.get("long_name", y_label) + y_unit = y_coord.attrs.get("units", "") + + x_label, x_coord = next(coords_iter) + x_label = x_coord.attrs.get("long_name", x_label) + x_unit = x_coord.attrs.get("units", "") + + z_label = da.attrs.get("long_name", da.name or "Value") + z_unit = da.attrs.get("units", "") + + xaxis_label = f"{x_label} ({x_unit})" if x_unit else x_label + yaxis_label = f"{y_label} ({y_unit})" if y_unit else y_label + zaxis_label = f"{z_label} ({z_unit})" if z_unit else z_label + + fig = go.Figure( + go.Heatmap( + z=da.values, + x=x_coord.values, + y=y_coord.values, + colorscale="plasma", + colorbar=dict(title=zaxis_label), + ) + ) + fig.update_layout(xaxis_title=xaxis_label, yaxis_title=yaxis_label) + return fig + + +def create_input_field( + id, + label, + value, + debounce=True, + input_style=None, + div_style=None, + units=None, + **kwargs, +): + if input_style is None: + input_style = {"width": "80px"} + + elements = [ + dbc.Col( + dbc.Label( + f"{label}:", + html_for=id, + className="mr-2", + style={"white-space": "nowrap"}, + ), + width="auto", + ), + dbc.Col( + dbc.Input( + id=id, + type="number", + value=value, + debounce=debounce, + style=input_style, + **kwargs, + ), + width="auto", + ), + ] + if units is not None: + elements.append(dbc.Col(dbc.Label(units, className="ml-2"), width="auto")) + + return dbc.Row( + elements, + className="align-items-center mb-2", + style=div_style, + ) + + +def create_axis_layout( + axis: Literal["x", "y"], + span: float, + points: int, + min_span: float, + max_span: Optional[float] = None, + units: Optional[str] = None, + component_id: Optional[str] = None, +): + if component_id is None: + ids = {"span": f"{axis.lower()}-span", "points": f"{axis.lower()}-points"} + else: + ids = { + "span": {"type": component_id, "index": f"{axis.lower()}-span"}, + "points": {"type": component_id, "index": f"{axis.lower()}-points"}, + } + return dbc.Col( + dbc.Card( + [ + dbc.CardHeader(axis.upper()), + dbc.CardBody( + [ + create_input_field( + id=ids["span"], + label="Span", + value=span, + min=min_span, + max=max_span, + input_style={"width": "100px"}, + units=units, + ), + create_input_field( + id=ids["points"], + label="Points", + value=points, + min=1, + max=501, + step=1, + ), + ] + ), + ], + className="h-100", + ), + md=6, + className="mb-3", + ) diff --git a/qualang_tools/control_panel/video_mode/data_acquirers/__init__.py b/qualang_tools/control_panel/video_mode/data_acquirers/__init__.py new file mode 100644 index 00000000..624643e6 --- /dev/null +++ b/qualang_tools/control_panel/video_mode/data_acquirers/__init__.py @@ -0,0 +1,7 @@ +from qualang_tools.control_panel.video_mode.data_acquirers.base_data_aqcuirer import BaseDataAcquirer +from qualang_tools.control_panel.video_mode.data_acquirers.random_data_acquirer import RandomDataAcquirer +from qualang_tools.control_panel.video_mode.data_acquirers.opx_data_acquirer import OPXDataAcquirer +from qualang_tools.control_panel.video_mode.data_acquirers.opx_quam_data_acquirer import OPXQuamDataAcquirer + + +__all__ = ["BaseDataAcquirer", "RandomDataAcquirer", "OPXDataAcquirer", "OPXQuamDataAcquirer"] diff --git a/qualang_tools/control_panel/video_mode/data_acquirers/base_data_aqcuirer.py b/qualang_tools/control_panel/video_mode/data_acquirers/base_data_aqcuirer.py new file mode 100644 index 00000000..a5ecf19e --- /dev/null +++ b/qualang_tools/control_panel/video_mode/data_acquirers/base_data_aqcuirer.py @@ -0,0 +1,174 @@ +from abc import ABC, abstractmethod +from typing import List, Dict, Any +import xarray as xr +import logging +import numpy as np +from dash import html +import dash_bootstrap_components as dbc + +from qualang_tools.control_panel.video_mode.sweep_axis import SweepAxis +from qualang_tools.control_panel.video_mode.dash_tools import create_axis_layout, create_input_field +from qualang_tools.control_panel.video_mode.dash_tools import BaseDashComponent, ModifiedFlags + + +__all__ = ["BaseDataAcquirer"] + + +class BaseDataAcquirer(BaseDashComponent, ABC): + """Base class for data acquirers. + + This class defines the interface for data acquirers, which are responsible for acquiring data from a device. + Subclasses must implement the `acquire_data` method to provide the actual data acquisition logic. + + Args: + x_axis: The x-axis of the data acquirer. + y_axis: The y-axis of the data acquirer. + num_averages: The number of averages to take as a rolling average. + """ + + def __init__( + self, + *, + x_axis: SweepAxis, + y_axis: SweepAxis, + num_averages: int = 1, + component_id: str = "data-acquirer", + **kwargs, + ): + assert not kwargs, f"Unexpected keyword arguments: {kwargs}" + super().__init__(component_id=component_id) + + self.x_axis = x_axis + self.y_axis = y_axis + self.num_averages = num_averages + self.data_history = [] + + logging.debug("Initializing DataGenerator") + + self.num_acquisitions = 0 + + self.data_array = xr.DataArray( + np.zeros((self.y_axis.points, self.x_axis.points)), + coords=[ + (self.y_axis.name, self.y_axis.sweep_values_with_offset), + (self.x_axis.name, self.x_axis.sweep_values_with_offset), + ], + attrs={"long_name": "Signal"}, + ) + for axis in [self.x_axis, self.y_axis]: + attrs = {"label": axis.label or axis.name} + if axis.units is not None: + attrs["units"] = axis.units + self.data_array.coords[axis.name].attrs.update(attrs) + logging.debug("DataGenerator initialized with initial data") + + @abstractmethod + def acquire_data(self) -> np.ndarray: + """Acquire data from the device. + + This method must be implemented by subclasses to provide the actual data acquisition logic. + """ + pass + + def update_data(self) -> xr.DataArray: + """Update the data array with the new data. + + This method acquires new data from the device and updates the data array. + It also performs a rolling average of the data to reduce noise. + """ + new_data = self.acquire_data() + self.num_acquisitions += 1 + + if new_data.shape != self.data_array.values.shape: + self.data_history.clear() + + self.data_history.append(new_data) + + if len(self.data_history) > self.num_averages: + self.data_history.pop(0) + + averaged_data = np.mean(self.data_history, axis=0) + + self.data_array = xr.DataArray( + averaged_data, + coords=[ + (self.y_axis.name, self.y_axis.sweep_values_with_offset), + (self.x_axis.name, self.x_axis.sweep_values_with_offset), + ], + attrs=self.data_array.attrs, # Preserve original attributes like units + ) + for axis in [self.x_axis, self.y_axis]: + attrs = {"label": axis.label or axis.name} + if axis.units is not None: + attrs["units"] = axis.units + self.data_array.coords[axis.name].attrs.update(attrs) + + mean_abs_data = np.mean(np.abs(averaged_data)) + logging.debug(f"Data acquired with shape: {self.data_array.shape}, mean(abs(data)) = {mean_abs_data}") + return self.data_array + + def get_dash_components(self, include_subcomponents: bool = True) -> List[html.Div]: + """Return the x and y axis components in a single row.""" + return [ + html.Div( + [ + dbc.Row( + [ + create_axis_layout( + axis="x", + component_id=self.component_id, + span=self.x_axis.span, + points=self.x_axis.points, + min_span=0.01, + max_span=None, + units=self.x_axis.units, + ), + create_axis_layout( + axis="y", + component_id=self.component_id, + span=self.y_axis.span, + points=self.y_axis.points, + min_span=0.01, + max_span=None, + units=self.y_axis.units, + ), + ], + className="g-0", + ), # g-0 removes gutters between columns + ] + ), + create_input_field( + id={"type": self.component_id, "index": "num-averages"}, + label="Averages", + value=self.num_averages, + min=1, + step=1, + debounce=True, + ), + ] + + def update_parameters(self, parameters: Dict[str, Dict[str, Any]]) -> ModifiedFlags: + """Update the data acquirer's attributes based on the input values.""" + params = parameters[self.component_id] + flags = ModifiedFlags.NONE + if self.num_averages != params["num-averages"]: + self.num_averages = params["num-averages"] + flags |= ModifiedFlags.PARAMETERS_MODIFIED + if self.x_axis.span != params["x-span"]: + self.x_axis.span = params["x-span"] + flags |= ModifiedFlags.PARAMETERS_MODIFIED + if self.x_axis.points != params["x-points"]: + self.x_axis.points = params["x-points"] + flags |= ModifiedFlags.PARAMETERS_MODIFIED + if self.y_axis.span != params["y-span"]: + self.y_axis.span = params["y-span"] + flags |= ModifiedFlags.PARAMETERS_MODIFIED + if self.y_axis.points != params["y-points"]: + self.y_axis.points = params["y-points"] + flags |= ModifiedFlags.PARAMETERS_MODIFIED + + return flags + + def get_component_ids(self) -> List[str]: + """Return a list of component IDs for this data acquirer.""" + return [self.component_id] diff --git a/qualang_tools/control_panel/video_mode/data_acquirers/opx_data_acquirer.py b/qualang_tools/control_panel/video_mode/data_acquirers/opx_data_acquirer.py new file mode 100644 index 00000000..065ff98c --- /dev/null +++ b/qualang_tools/control_panel/video_mode/data_acquirers/opx_data_acquirer.py @@ -0,0 +1,222 @@ +import numpy as np +import logging +from typing import Any, Dict, List, Literal, Optional, Callable +from time import perf_counter + +from dash import html +import dash_bootstrap_components as dbc + +from qm import QuantumMachinesManager, Program +from qm.jobs.running_qm_job import RunningQmJob +from qm.qua import program, declare_stream, infinite_loop_, save, stream_processing, wait + +from qualang_tools.control_panel.video_mode.dash_tools import ModifiedFlags +from qualang_tools.control_panel.video_mode.data_acquirers.base_data_aqcuirer import BaseDataAcquirer +from qualang_tools.control_panel.video_mode.sweep_axis import SweepAxis +from qualang_tools.control_panel.video_mode.scan_modes import ScanMode + + +__all__ = ["OPXDataAcquirer"] + + +class OPXDataAcquirer(BaseDataAcquirer): + """Data acquirer for OPX devices. + + This class is responsible for acquiring data from OPX devices. + + Args: + qmm: The QuantumMachinesManager instance. + qua_config: The QUAM configuration to use. + qua_inner_loop_action: The inner loop action to execute. + scan_mode: The scan mode to use. + x_axis: The x-axis of the data acquirer. + y_axis: The y-axis of the data acquirer. + num_averages: The number of averages to take as a rolling average. + result_type: The type of result to acquire. + initial_delay: The initial delay before starting each scan. + """ + + stream_vars = ["I", "Q"] + result_types = ["I", "Q", "amplitude", "phase"] + + def __init__( + self, + *, + qmm: QuantumMachinesManager, + qua_config: Dict[str, Any], + qua_inner_loop_action: Callable, + scan_mode: ScanMode, + x_axis: SweepAxis, + y_axis: SweepAxis, + num_averages=1, + result_type: Literal["I", "Q", "amplitude", "phase"] = "I", + initial_delay: Optional[float] = None, + **kwargs, + ): + self.qmm = qmm + self.qua_config = qua_config + self.qm = self.qmm.open_qm(self.qua_config) # type: ignore + + self.scan_mode = scan_mode + self.qua_inner_loop_action = qua_inner_loop_action + self.initial_delay = initial_delay + self.program: Optional[Program] = None + self.job: Optional[RunningQmJob] = None + self.result_type = result_type + self.results: Dict[str, Any] = {} + + super().__init__( + x_axis=x_axis, + y_axis=y_axis, + num_averages=num_averages, + **kwargs, + ) + + def generate_program(self) -> Program: + """Generate a QUA program to acquire data from the device.""" + x_vals = self.x_axis.sweep_values_unattenuated + y_vals = self.y_axis.sweep_values_unattenuated + + with program() as prog: + IQ_streams = {"I": declare_stream(), "Q": declare_stream()} + + with infinite_loop_(): + self.qua_inner_loop_action.initial_action() + if self.initial_delay is not None: + wait(int(self.initial_delay * 1e9) // 4) + + for x, y in self.scan_mode.scan(x_vals=x_vals, y_vals=y_vals): + I, Q = self.qua_inner_loop_action(x, y) + save(I, IQ_streams["I"]) + save(Q, IQ_streams["Q"]) + + self.qua_inner_loop_action.final_action() + + with stream_processing(): + streams = { + "I": IQ_streams["I"].buffer(self.x_axis.points * self.y_axis.points), + "Q": IQ_streams["Q"].buffer(self.x_axis.points * self.y_axis.points), + } + combined_stream = None + for var in self.stream_vars: + if combined_stream is None: + combined_stream = streams[var] + else: + combined_stream = combined_stream.zip(streams[var]) + combined_stream.save("combined") # type: ignore + return prog + + def process_results(self, results: Dict[str, Any]) -> np.ndarray: + """Process the results from the device. + + This method processes the results from the device and returns a 2D array. + The class variable `result_type` determines the type of result to acquire. + The `scan_mode` determines the order in which the data is acquired and sorted + """ + if self.result_type in ["I", "Q"]: + result = results[self.result_type] + elif self.result_type == "amplitude": + result = np.abs(results["I"] + 1j * results["Q"]) + elif self.result_type == "phase": + result = np.angle(results["I"] + 1j * results["Q"]) + else: + raise ValueError(f"Invalid result type: {self.result_type}") + + x_idxs, y_idxs = self.scan_mode.get_idxs(x_points=self.x_axis.points, y_points=self.y_axis.points) + results_2D = np.zeros((self.y_axis.points, self.x_axis.points), dtype=float) + results_2D[y_idxs, x_idxs] = result + + return results_2D + + def acquire_data(self) -> np.ndarray: + """Acquire data from the device. + + This method acquires data from the device and returns a 2D array. + """ + if self.program is None: + self.run_program() + + t0 = perf_counter() + results_tuple = self.job.result_handles.get("combined").fetch_all() # type: ignore + self.results = dict(zip(self.stream_vars, results_tuple)) # type: ignore + result_array = self.process_results(self.results) + logging.info(f"Time to acquire data: {(perf_counter() - t0) * 1e3:.2f} ms") + + return result_array + + def run_program(self, verify: bool = True) -> None: + """Run the QUA program. + + This method runs the QUA program and returns the results. + + Args: + verify: Whether to verify that data can be acquired once started. + """ + if self.program is None: + self.program = self.generate_program() + + self.job = self.qm.execute(self.program) + + if not verify: + return + + # Wait until one buffer is filled{ + self.job.result_handles.get("combined").wait_for_values(1) # type: ignore + + def get_dash_components(self, include_subcomponents: bool = True) -> List[html.Div]: + components = super().get_dash_components() + + components.append( + dbc.Row( + [ + dbc.Label("Result Type", style={"max-width": "150px"}), + dbc.Select( + id={"type": self.component_id, "index": "result-type"}, + options=[{"label": rt, "value": rt} for rt in self.result_types], + value=self.result_type, + style={"max-width": "150px"}, + ), + ] + ) + ) + + if include_subcomponents: + components.extend(self.scan_mode.get_dash_components()) + components.extend(self.qua_inner_loop_action.get_dash_components()) + + return components + + def generate_config(self) -> None: + raise NotImplementedError("OPXDataAcquirer does not implement generate_config") + + def update_parameters(self, parameters: Dict[str, Dict[str, Any]]) -> ModifiedFlags: + flags = super().update_parameters(parameters) + # Update program if any sweep axes have been modified + if flags & ModifiedFlags.PARAMETERS_MODIFIED: + flags |= ModifiedFlags.PROGRAM_MODIFIED + + params = parameters[self.component_id] + if self.result_type != params["result-type"]: + self.result_type = params["result-type"] + flags |= ModifiedFlags.PARAMETERS_MODIFIED + + flags |= self.scan_mode.update_parameters(parameters) + flags |= self.qua_inner_loop_action.update_parameters(parameters) + + if flags & ModifiedFlags.PARAMETERS_MODIFIED: + self.data_history.clear() + + if flags & ModifiedFlags.CONFIG_MODIFIED: + self.generate_config() + + if flags & (ModifiedFlags.CONFIG_MODIFIED | ModifiedFlags.PROGRAM_MODIFIED): + self.program = self.generate_program() + self.run_program() + + return flags + + def get_component_ids(self) -> List[str]: + component_ids = super().get_component_ids() + component_ids.append(self.scan_mode.component_id) + component_ids.append(self.qua_inner_loop_action.component_id) + return component_ids diff --git a/qualang_tools/control_panel/video_mode/data_acquirers/opx_quam_data_acquirer.py b/qualang_tools/control_panel/video_mode/data_acquirers/opx_quam_data_acquirer.py new file mode 100644 index 00000000..3d8f653e --- /dev/null +++ b/qualang_tools/control_panel/video_mode/data_acquirers/opx_quam_data_acquirer.py @@ -0,0 +1,62 @@ +from typing import Any, Callable, Literal, Optional + +from qm import QuantumMachinesManager + +from qualang_tools.control_panel.video_mode.data_acquirers.opx_data_acquirer import OPXDataAcquirer +from qualang_tools.control_panel.video_mode.scan_modes import ScanMode +from qualang_tools.control_panel.video_mode.sweep_axis import SweepAxis + + +__all__ = ["OPXQuamDataAcquirer"] + + +class OPXQuamDataAcquirer(OPXDataAcquirer): + """Data acquirer for OPX devices using QUAM. + + This class is responsible for acquiring data from OPX devices using QUAM. + + Args: + qmm: The QuantumMachinesManager instance. + machine: The QUAM machine instance to use. + qua_inner_loop_action: The inner loop action to execute. + scan_mode: The scan mode to use. + x_axis: The x-axis of the data acquirer. + y_axis: The y-axis of the data acquirer. + num_averages: The number of averages to take as a rolling average. + result_type: The type of result to acquire. + initial_delay: The initial delay before starting each scan. + """ + + def __init__( + self, + *, + qmm: QuantumMachinesManager, + machine: Any, + qua_inner_loop_action: Callable, + scan_mode: ScanMode, + x_axis: SweepAxis, + y_axis: SweepAxis, + num_averages=1, + result_type: Literal["I", "Q", "amplitude", "phase"] = "I", + initial_delay: Optional[float] = None, + **kwargs, + ): + self.machine = machine + qua_config = machine.generate_config() + + super().__init__( + qmm=qmm, + qua_config=qua_config, + qua_inner_loop_action=qua_inner_loop_action, + scan_mode=scan_mode, + x_axis=x_axis, + y_axis=y_axis, + num_averages=num_averages, + result_type=result_type, + initial_delay=initial_delay, + **kwargs, + ) + + def generate_config(self): + self.qua_config = self.machine.generate_config() + self.qm = self.qmm.open_qm(self.qua_config) diff --git a/qualang_tools/control_panel/video_mode/data_acquirers/random_data_acquirer.py b/qualang_tools/control_panel/video_mode/data_acquirers/random_data_acquirer.py new file mode 100644 index 00000000..60cf24d2 --- /dev/null +++ b/qualang_tools/control_panel/video_mode/data_acquirers/random_data_acquirer.py @@ -0,0 +1,66 @@ +import numpy as np +from time import sleep +from typing import Any, Dict, List + +from dash import html + +from qualang_tools.control_panel.video_mode.sweep_axis import SweepAxis +from qualang_tools.control_panel.video_mode.data_acquirers.base_data_aqcuirer import BaseDataAcquirer +from qualang_tools.control_panel.video_mode.dash_tools import create_input_field, ModifiedFlags + + +__all__ = ["RandomDataAcquirer"] + + +class RandomDataAcquirer(BaseDataAcquirer): + """Data acquirer that acquires random data.""" + + def __init__( + self, + *, + x_axis: SweepAxis, + y_axis: SweepAxis, + num_averages: int = 1, + acquire_time: float = 1, + **kwargs, + ): + self.acquire_time = acquire_time + super().__init__(x_axis=x_axis, y_axis=y_axis, num_averages=num_averages, **kwargs) + + def acquire_data(self) -> np.ndarray: + """Acquire random data. + + This method acquires random data from the simulated device. + """ + sleep(self.acquire_time) + results = np.random.rand(self.y_axis.points, self.x_axis.points) + return results + + def get_dash_components(self, include_subcomponents: bool = True) -> List[html.Div]: + dash_components = super().get_dash_components(include_subcomponents=include_subcomponents) + dash_components.extend( + [ + html.Div( + create_input_field( + id={"type": self.component_id, "index": "acquire-time"}, + label="Acquire time", + value=self.acquire_time, + min=0.1, + max=10, + step=0.1, + units="s", + ) + ) + ] + ) + return dash_components + + def update_parameters(self, parameters: Dict[str, Dict[str, Any]]) -> ModifiedFlags: + flags = super().update_parameters(parameters) + + params = parameters[self.component_id] + if self.acquire_time != params["acquire-time"]: + self.acquire_time = params["acquire-time"] + flags |= ModifiedFlags.PARAMETERS_MODIFIED + + return flags diff --git a/qualang_tools/control_panel/video_mode/inner_loop_actions.py b/qualang_tools/control_panel/video_mode/inner_loop_actions.py new file mode 100644 index 00000000..fd72625b --- /dev/null +++ b/qualang_tools/control_panel/video_mode/inner_loop_actions.py @@ -0,0 +1,286 @@ +from abc import ABC, abstractmethod +from typing import Tuple, List, Dict, Any + +from dash import html + +from qm.qua import ( + declare, + fixed, + demod, + set_dc_offset, + align, + wait, + measure, + QuaVariableType, + play, + ramp, + assign, + else_, + if_, + ramp_to_zero, +) +from qualang_tools.control_panel.video_mode.dash_tools import BaseDashComponent, ModifiedFlags, create_input_field +from qualang_tools.units.units import unit +from qm.qua.lib import Cast, Math + + +class InnerLoopAction(BaseDashComponent, ABC): + def __init__(self, component_id: str = "inner-loop"): + super().__init__(component_id=component_id) + + @abstractmethod + def __call__(self, x: QuaVariableType, y: QuaVariableType) -> Tuple[QuaVariableType, QuaVariableType]: + pass + + def initial_action(self): + pass + + def final_action(self): + pass + + +class BasicInnerLoopAction(InnerLoopAction): + """Inner loop action for the video mode: set voltages and measure. + + This class is responsible for performing the inner loop action for the video mode. + It is used to set the voltages and measure the readout pulse. + + Args: + x_element: The name of the element along the x-axis to set the voltage. + y_element: The name of the element along the y-axis to set the voltage. + readout_element: The name of the element to measure. + readout_pulse: The name of the pulse to measure. + pre_measurement_delay: The delay before the measurement in ns. + """ + + def __init__( + self, + x_element: str, + y_element: str, + readout_element: str, + readout_pulse: str = "readout", + pre_measurement_delay: float = 1e-6, + ): + super().__init__() + self.x_elem = x_element + self.y_elem = y_element + self.readout_elem = readout_element + self.readout_pulse = readout_pulse + self.pre_measurement_delay = pre_measurement_delay + + def set_dc_offsets(self, x: QuaVariableType, y: QuaVariableType): + set_dc_offset(self.x_elem, "single", x) + set_dc_offset(self.y_elem, "single", y) + + def __call__(self, x: QuaVariableType, y: QuaVariableType) -> Tuple[QuaVariableType, QuaVariableType]: + outputs = {"I": declare(fixed), "Q": declare(fixed)} + + self.set_dc_offsets(x, y) + align() + pre_measurement_delay_cycles = int(self.pre_measurement_delay * 1e9 // 4) + if pre_measurement_delay_cycles >= 4: + wait(pre_measurement_delay_cycles) + measure( + self.readout_pulse, + self.readout_elem, + None, + demod.full("cos", outputs["I"]), + demod.full("sin", outputs["Q"]), + ) + + return outputs["I"], outputs["Q"] + + def initial_action(self): + set_dc_offset(self.x_elem, "single", 0) + set_dc_offset(self.y_elem, "single", 0) + align() + + +class BasicInnerLoopActionQuam(InnerLoopAction): + """Inner loop action for the video mode: set voltages and measure. + + This class is responsible for performing the inner loop action for the video mode. + It is used to set the voltages and measure the readout pulse. + + Args: + x_element: The QUAM Channel object along the x-axis. + y_element: The QUAM Channel object along the y-axis. + readout_pulse: The QUAM Pulse object to measure. + pre_measurement_delay: The optional delay before the measurement. + """ + + def __init__( + self, + x_element, + y_element, + readout_pulse, + pre_measurement_delay: float = 0.0, + ramp_rate: float = 0.0, + use_dBm=False, + ): + super().__init__() + self.x_elem = x_element + self.y_elem = y_element + self.readout_pulse = readout_pulse + self.pre_measurement_delay = pre_measurement_delay + self.ramp_rate = ramp_rate + self.use_dBm = use_dBm + + self._last_x_voltage = None + self._last_y_voltage = None + self.reached_voltage = None + + def perform_ramp(self, element, previous_voltage, new_voltage): + ramp_cycles_ns_V = declare(int, int(1e9 / self.ramp_rate / 4)) + qua_ramp = declare(fixed, self.ramp_rate / 1e9) + dV = declare(fixed) + duration = declare(int) + self.reached_voltage = declare(fixed) + assign(dV, new_voltage - previous_voltage) + # duration = Math.abs(Cast.mul_int_by_fixed(ramp_cycles_ns_V, dV)) + assign(duration, Math.abs(Cast.mul_int_by_fixed(ramp_cycles_ns_V, dV))) + + with if_(duration > 4): + with if_(dV > 0): + assign(self.reached_voltage, previous_voltage + Cast.mul_fixed_by_int(qua_ramp, duration << 2)) + play(ramp(self.ramp_rate / 1e9), element.name, duration=duration) + with else_(): + assign(self.reached_voltage, previous_voltage - Cast.mul_fixed_by_int(qua_ramp, duration << 2)) + play(ramp(-self.ramp_rate / 1e9), element.name, duration=duration) + with else_(): + ramp_rate = dV * (1 / 16e-9) + play(ramp(ramp_rate), element.name, duration=4) + # element.play("step", amplitude_scale=dV << 2) + assign(self.reached_voltage, new_voltage) + + def set_dc_offsets(self, x: QuaVariableType, y: QuaVariableType): + if self.ramp_rate > 0: + if getattr(self.x_elem, "sticky", None) is None: + raise RuntimeError("Ramp rate is not supported for non-sticky elements") + if getattr(self.y_elem, "sticky", None) is None: + raise RuntimeError("Ramp rate is not supported for non-sticky elements") + + self.perform_ramp(self.x_elem, self._last_x_voltage, x) + assign(self._last_x_voltage, self.reached_voltage) + self.perform_ramp(self.y_elem, self._last_y_voltage, y) + assign(self._last_y_voltage, self.reached_voltage) + else: + self.x_elem.set_dc_offset(x) + self.y_elem.set_dc_offset(y) + + assign(self._last_x_voltage, x) + assign(self._last_y_voltage, y) + + def __call__(self, x: QuaVariableType, y: QuaVariableType) -> Tuple[QuaVariableType, QuaVariableType]: + self.set_dc_offsets(x, y) + align() + + pre_measurement_delay_cycles = int(self.pre_measurement_delay * 1e9 // 4) + if pre_measurement_delay_cycles >= 4: + wait(pre_measurement_delay_cycles) + + I, Q = self.readout_pulse.channel.measure(self.readout_pulse.id) + align() + + return I, Q + + def initial_action(self): + self._last_x_voltage = declare(fixed, 0.0) + self._last_y_voltage = declare(fixed, 0.0) + self.set_dc_offsets(0, 0) + align() + + def final_action(self): + if self.ramp_rate > 0: + if getattr(self.x_elem, "sticky", None) is None: + raise RuntimeError("Ramp rate is not supported for non-sticky elements") + if getattr(self.y_elem, "sticky", None) is None: + raise RuntimeError("Ramp rate is not supported for non-sticky elements") + + ramp_to_zero(self.x_elem.name) + ramp_to_zero(self.y_elem.name) + assign(self._last_x_voltage, 0.0) + assign(self._last_y_voltage, 0.0) + else: + self.set_dc_offsets(0, 0) + align() + + def get_dash_components(self, include_subcomponents: bool = True) -> List[html.Div]: + components = super().get_dash_components(include_subcomponents) + + additional_components = [ + create_input_field( + id={"type": self.component_id, "index": "readout_frequency"}, + label="Readout frequency", + value=self.readout_pulse.channel.intermediate_frequency, + units="Hz", + step=20e3, + ), + create_input_field( + id={"type": self.component_id, "index": "readout_duration"}, + label="Readout duration", + value=self.readout_pulse.length, + units="ns", + input_style={"width": "200px"}, + step=10, + ), + ] + + if self.use_dBm: + additional_components.append( + create_input_field( + id={"type": self.component_id, "index": "readout_power"}, + label="Readout power", + value=unit.volts2dBm(self.readout_pulse.amplitude), + units="dBm", + ), + ) + else: + additional_components.append( + create_input_field( + id={"type": self.component_id, "index": "readout_amplitude"}, + label="Readout amplitude", + value=self.readout_pulse.amplitude, + units="V", + ), + ) + + components.append(html.Div(additional_components)) + + return components + + def update_parameters(self, parameters: Dict[str, Dict[str, Any]]) -> ModifiedFlags: + """Update the data acquirer's attributes based on the input values.""" + try: + params = parameters[self.component_id] + except KeyError: + print(f"Inner loop action parameters: {list(parameters.keys())}") + raise + + flags = ModifiedFlags.NONE + if self.readout_pulse.channel.intermediate_frequency != params["readout_frequency"]: + self.readout_pulse.channel.intermediate_frequency = params["readout_frequency"] + flags |= ModifiedFlags.PARAMETERS_MODIFIED + flags |= ModifiedFlags.PROGRAM_MODIFIED + flags |= ModifiedFlags.CONFIG_MODIFIED + + if self.readout_pulse.length != params["readout_duration"]: + self.readout_pulse.length = params["readout_duration"] + flags |= ModifiedFlags.PARAMETERS_MODIFIED + flags |= ModifiedFlags.PROGRAM_MODIFIED + flags |= ModifiedFlags.CONFIG_MODIFIED + + if self.use_dBm: + if unit.volts2dBm(self.readout_pulse.amplitude) != params["readout_power"]: + self.readout_pulse.amplitude = unit.dBm2volts(params["readout_power"]) + flags |= ModifiedFlags.PARAMETERS_MODIFIED + flags |= ModifiedFlags.PROGRAM_MODIFIED + flags |= ModifiedFlags.CONFIG_MODIFIED + else: + if self.readout_pulse.amplitude != params["readout_amplitude"]: + self.readout_pulse.amplitude = params["readout_amplitude"] + flags |= ModifiedFlags.PARAMETERS_MODIFIED + flags |= ModifiedFlags.PROGRAM_MODIFIED + flags |= ModifiedFlags.CONFIG_MODIFIED + + return flags diff --git a/qualang_tools/control_panel/video_mode/scan_modes.py b/qualang_tools/control_panel/video_mode/scan_modes.py new file mode 100644 index 00000000..f12503ba --- /dev/null +++ b/qualang_tools/control_panel/video_mode/scan_modes.py @@ -0,0 +1,201 @@ +from abc import ABC, abstractmethod +from typing import Any, Dict, Sequence, Tuple, Generator +import numpy as np +from matplotlib import figure, axes, pyplot as plt +from matplotlib.ticker import MultipleLocator + +from qm.qua import declare, fixed, if_, assign, for_, for_each_, QuaVariableType + +from qualang_tools.loops import from_array +from qualang_tools.control_panel.video_mode.dash_tools import BaseDashComponent + + +class ScanMode(BaseDashComponent, ABC): + """Abstract base class for scan modes, e.g. raster scan, spiral scan, etc. + + The scan mode is used to generate the scan pattern for the video mode. + """ + + def __init__(self, component_id: str = "scan-mode"): + super().__init__(component_id=component_id) + + @abstractmethod + def get_idxs(self, x_points: int, y_points: int) -> Tuple[np.ndarray, np.ndarray]: + pass + + def plot_scan(self, x_points: int, y_points: int) -> Tuple[figure.Figure, axes.Axes]: + idxs_x, idxs_y = self.get_idxs(x_points, y_points) + + u = np.diff(idxs_x) + v = np.diff(idxs_y) + pos_x = idxs_x[:-1] + u / 2 + pos_y = idxs_y[:-1] + v / 2 + norm = np.sqrt(u**2 + v**2) + + fig, ax = plt.subplots() + ax.plot(idxs_x, idxs_y, marker="o") + ax.quiver(pos_x, pos_y, u / norm, v / norm, angles="xy", zorder=5, pivot="mid") + + ax.xaxis.grid(True, which="both") + ax.xaxis.set_minor_locator(MultipleLocator(abs(np.max(u)))) + ax.yaxis.grid(True, which="both") + ax.yaxis.set_minor_locator(MultipleLocator(abs(np.max(v)))) + plt.show() + + return fig, ax + + @abstractmethod + def scan( + self, x_vals: Sequence[float], y_vals: Sequence[float] + ) -> Generator[Tuple[QuaVariableType, QuaVariableType], None, None]: + pass + + +class RasterScan(ScanMode): + """Raster scan mode. + + The raster scan mode is a simple scan mode that scans the grid in a raster pattern. + """ + + def get_idxs(self, x_points: int, y_points: int) -> Tuple[np.ndarray, np.ndarray]: + x_idxs = np.tile(np.arange(x_points), y_points) + y_idxs = np.repeat(np.arange(y_points), x_points) + return x_idxs, y_idxs + + def scan( + self, x_vals: Sequence[float], y_vals: Sequence[float] + ) -> Generator[Tuple[QuaVariableType, QuaVariableType], None, None]: + voltages = {"x": declare(fixed), "y": declare(fixed)} + + with for_(*from_array(voltages["y"], y_vals)): # type: ignore + with for_(*from_array(voltages["x"], x_vals)): # type: ignore + yield voltages["x"], voltages["y"] + + +class SwitchRasterScan(ScanMode): + """Switch raster scan mode. + + The switch raster scan mode is a scan mode that scans the grid in a raster pattern, + but the direction of the scan is switched after each row or column. + This is useful when the scan length is similar to the bias tee frequency. + + Args: + start_from_middle: Whether to start the scan from the middle of the array. + For an array centered around 0, the scan will start with 0 and progressively increase in amplitude. + """ + + def __init__(self, component_id: str = "switch-raster-scan", start_from_middle: bool = True): + super().__init__(component_id=component_id) + self.start_from_middle = start_from_middle + + @staticmethod + def interleave_arr(arr: np.ndarray, start_from_middle: bool = True) -> np.ndarray: + mid_idx = len(arr) // 2 + if len(arr) % 2: + interleaved = [arr[mid_idx]] + arr1 = arr[mid_idx + 1 :] + arr2 = arr[mid_idx - 1 :: -1] + interleaved += [elem for pair in zip(arr1, arr2) for elem in pair] + else: + arr1 = arr[mid_idx:] + arr2 = arr[mid_idx - 1 :: -1] + interleaved = [elem for pair in zip(arr1, arr2) for elem in pair] + + if not start_from_middle: + interleaved = interleaved[::-1] + return np.array(interleaved) + + def get_idxs(self, x_points: int, y_points: int) -> Tuple[np.ndarray, np.ndarray]: + y_idxs = self.interleave_arr(np.arange(y_points), start_from_middle=self.start_from_middle) + x_idxs = np.tile(np.arange(x_points), y_points) + y_idxs = np.repeat(y_idxs, x_points) + return x_idxs, y_idxs + + def scan( + self, x_vals: Sequence[float], y_vals: Sequence[float] + ) -> Generator[Tuple[QuaVariableType, QuaVariableType], None, None]: + voltages = {"x": declare(fixed), "y": declare(fixed)} + + with for_each_(voltages["y"], self.interleave_arr(y_vals, start_from_middle=self.start_from_middle)): # type: ignore + with for_(*from_array(voltages["x"], x_vals)): # type: ignore + yield voltages["x"], voltages["y"] + + +class SpiralScan(ScanMode): + """Spiral scan mode. + + The spiral scan mode is a scan mode that scans the grid in a spiral pattern. + """ + + def get_idxs(self, x_points: int, y_points: int) -> Tuple[np.ndarray, np.ndarray]: + assert x_points == y_points, "Spiral only works for square grids" + + num_half_spirals = x_points + x_idx = x_points // 2 + y_idx = y_points // 2 + + idxs_x = [x_idx] + idxs_y = [y_idx] + + for half_spiral_idx in range(num_half_spirals): + initial_direction_RL = "L" if half_spiral_idx % 2 else "R" + direction_UD = "U" if half_spiral_idx % 2 else "D" + direction_LR = "R" if half_spiral_idx % 2 else "L" + + if half_spiral_idx: + x_idx += 1 if initial_direction_RL == "R" else -1 + idxs_x.append(x_idx) + idxs_y.append(y_idx) + + for _ in range(half_spiral_idx): + y_idx += 1 if direction_UD == "U" else -1 + idxs_x.append(x_idx) + idxs_y.append(y_idx) + + for _ in range(half_spiral_idx): + x_idx += 1 if direction_LR == "R" else -1 + idxs_x.append(x_idx) + idxs_y.append(y_idx) + + return np.array(idxs_x), np.array(idxs_y) + + def scan( + self, x_vals: Sequence[float], y_vals: Sequence[float] + ) -> Generator[Tuple[QuaVariableType, QuaVariableType], None, None]: + movement_direction = declare(fixed) + half_spiral_idx = declare(int) + k = declare(int) + x = declare(fixed) + y = declare(fixed) + voltages = {"x": x, "y": y} + + assert len(x_vals) == len( + y_vals + ), f"x_vals and y_vals must have the same length ({len(x_vals)} != {len(y_vals)})" + num_half_spirals = len(x_vals) + x_step = x_vals[1] - x_vals[0] + y_step = y_vals[1] - y_vals[0] + + assign(movement_direction, -1.0) + assign(x, 0.0) + assign(y, 0.0) + yield voltages["x"], voltages["y"] + + with for_(half_spiral_idx, 0, half_spiral_idx < num_half_spirals, half_spiral_idx + 1): # type: ignore + # First take one step in the opposite XY direction + with if_(half_spiral_idx > 0): # type: ignore + assign(x, x - x_step * movement_direction) # type: ignore + yield voltages["x"], voltages["y"] + + with for_(k, 0, k < half_spiral_idx, k + 1): # type: ignore + assign(y, y + y_step * movement_direction) # type: ignore + yield voltages["x"], voltages["y"] + + with for_(k, 0, k < half_spiral_idx, k + 1): # type: ignore + assign(x, x + x_step * movement_direction) # type: ignore + yield voltages["x"], voltages["y"] + + assign(movement_direction, -movement_direction) # type: ignore + + assign(x, 0) + assign(y, 0) diff --git a/qualang_tools/control_panel/video_mode/sweep_axis.py b/qualang_tools/control_panel/video_mode/sweep_axis.py new file mode 100644 index 00000000..f8a40a30 --- /dev/null +++ b/qualang_tools/control_panel/video_mode/sweep_axis.py @@ -0,0 +1,54 @@ +from dataclasses import dataclass +from typing import Optional + +import numpy as np + +from qualang_tools.control_panel.video_mode.voltage_parameters import VoltageParameter + + +__all__ = ["SweepAxis"] + + +@dataclass +class SweepAxis: + """Class representing a sweep axis. + + Attributes: + name: Name of the axis. + span: Span of the axis. + points: Number of points in the sweep. + label: Label of the axis. + units: Units of the axis. + offset_parameter: Offset parameter of the axis. + attenuation: Attenuation of the axis (0 by default) + """ + + name: str + span: float + points: int + label: Optional[str] = None + units: Optional[str] = None + offset_parameter: Optional[VoltageParameter] = None + attenuation: float = 0 + + @property + def sweep_values(self): + """Returns axis sweep values using span and points.""" + return np.linspace(-self.span / 2, self.span / 2, self.points) + + @property + def sweep_values_unattenuated(self): + """Returns axis sweep values without attenuation.""" + return self.sweep_values * 10 ** (self.attenuation / 20) + + @property + def sweep_values_with_offset(self): + """Returns axis sweep values with offset.""" + if self.offset_parameter is None: + return self.sweep_values_unattenuated + return self.sweep_values_unattenuated + self.offset_parameter.get_latest() + + @property + def scale(self): + """Returns axis scale factor, calculated from attenuation.""" + return 10 ** (-self.attenuation / 20) diff --git a/qualang_tools/control_panel/video_mode/utils.py b/qualang_tools/control_panel/video_mode/utils.py new file mode 100644 index 00000000..7b1b6c61 --- /dev/null +++ b/qualang_tools/control_panel/video_mode/utils.py @@ -0,0 +1,29 @@ +from typing import Any, Dict + + +__all__ = ["dicts_equal"] + + +def dicts_equal(d1: Dict[Any, Any], d2: Dict[Any, Any]) -> bool: + """Check if two dictionaries are equal. + + This method checks if two dictionaries are equal by comparing their keys and values recursively. + """ + if d1.keys() != d2.keys(): + return False + for key, value in d1.items(): + if isinstance(value, dict): + if not dicts_equal(value, d2[key]): + return False + elif isinstance(value, list): + if not isinstance(d2[key], list) or len(value) != len(d2[key]): + return False + for v1, v2 in zip(value, d2[key]): + if isinstance(v1, dict): + if not dicts_equal(v1, v2): + return False + elif v1 != v2: + return False + elif value != d2[key]: + return False + return True diff --git a/qualang_tools/control_panel/video_mode/video_mode.py b/qualang_tools/control_panel/video_mode/video_mode.py new file mode 100644 index 00000000..2631c847 --- /dev/null +++ b/qualang_tools/control_panel/video_mode/video_mode.py @@ -0,0 +1,336 @@ +from datetime import datetime +from pathlib import Path +from typing import Optional, Union +from dash import dcc, html, ALL, MATCH +from dash_extensions.enrich import DashProxy, Output, Input, State, BlockingCallbackTransform +import dash_bootstrap_components as dbc # Add this import + +import logging + +from qualang_tools.control_panel.video_mode.data_acquirers import BaseDataAcquirer +from .dash_tools import create_axis_layout, create_input_field, xarray_to_plotly + + +__all__ = ["VideoMode"] + + +class VideoMode: + """ + A class for visualizing and controlling data acquisition in video mode. + + This class provides a dashboard interface for visualizing and controlling data acquisition in video mode. + It uses Dash for the web interface and Plotly for the heatmap visualization. + + Attributes: + data_acquirer (BaseDataAcquirer): The data acquirer object that provides the data to be visualized. + save_path (Union[str, Path]): The path where data and images will be saved. + update_interval (float): The interval at which the data is updated in the dashboard (in seconds). + If the previous update was not finished in the given interval, the update will be skipped. + """ + + def __init__( + self, + data_acquirer: BaseDataAcquirer, + save_path: Union[str, Path] = "./video_mode_output", + update_interval: float = 0.1, + ): + self.data_acquirer = data_acquirer + self.save_path = Path(save_path) + self.paused = False + self._last_update_clicks = 0 + self._last_save_clicks = 0 + self.update_interval = update_interval + self._is_updating = False + + self.app = DashProxy( + __name__, + title="Video Mode", + transforms=[BlockingCallbackTransform(timeout=10)], + external_stylesheets=[dbc.themes.BOOTSTRAP], + ) # Add Bootstrap theme + self.create_layout() + + def create_layout(self): + """ + Create the layout for the video mode dashboard. + + This method sets up the Dash layout for the video mode control panel. It includes: + - A graph to display the heatmap of acquired data + - Controls for X and Y parameters (offset, span, points) + - Buttons for pausing/resuming data acquisition and saving data + - Display for the current iteration count + - Input for setting the number of averages + + The layout is designed to be responsive and user-friendly, with aligned input fields + and clear labeling. It uses a combination of Dash core components and HTML elements + to create an intuitive interface for controlling and visualizing the data acquisition + process. + + Returns: + None: The method sets up the `self.app.layout` attribute but doesn't return anything. + """ + self.fig = xarray_to_plotly(self.data_acquirer.data_array) + + self.app.layout = dbc.Container( + [ + dbc.Row( + [ + dbc.Col( # Settings + [ + html.H1("Video mode", className="mb-4"), + dbc.Row( + [ + dbc.Col( + dbc.Button( + "Pause", + id="pause-button", + n_clicks=0, + className="mb-3", + ), + width="auto", + ), + dbc.Col( + html.Div( + id="iteration-output", + children="Iteration: 0", + className="mb-3 ml-3 d-flex align-items-center", + ), + width="auto", + ), + ], + className="mb-4", + ), + html.Div(self.data_acquirer.get_dash_components(include_subcomponents=True)), + dbc.Row( + [ + dbc.Col( + dbc.Button( + "Update", + id="update-button", + n_clicks=0, + className="mt-3 mr-2", + ), + width="auto", + ), + dbc.Col( + dbc.Button( + "Save", + id="save-button", + n_clicks=0, + className="mt-3", + ), + width="auto", + ), + ], + ), + ], + width=5, + ), + dbc.Col( + dcc.Graph( + id="live-heatmap", + figure=self.fig, + style={"aspect-ratio": "1 / 1"}, + ), + width=7, + ), + ] + ), + dcc.Interval(id="interval-component", interval=self.update_interval * 1000, n_intervals=0), + ], + fluid=True, + style={"height": "100vh"}, + ) + logging.debug(f"Dash layout created, update interval: {self.update_interval*1000} ms") + self.add_callbacks() + + def add_callbacks(self): + @self.app.callback( + Output("pause-button", "children"), + [Input("pause-button", "n_clicks")], + ) + def toggle_pause(n_clicks): + self.paused = not self.paused + logging.debug(f"Paused: {self.paused}") + return "Resume" if self.paused else "Pause" + + @self.app.callback( + [ + Output("live-heatmap", "figure"), + Output("iteration-output", "children"), + ], + [ + Input("interval-component", "n_intervals"), + ], + blocking=True, + ) + def update_heatmap(n_intervals): + logging.debug(f"*** Dash callback {n_intervals} called at {datetime.now().strftime('%H:%M:%S.%f')[:-3]}") + + if self.paused or self._is_updating: + logging.debug(f"Updates paused at iteration {self.data_acquirer.num_acquisitions}") + return self.fig, f"Iteration: {self.data_acquirer.num_acquisitions}" + + # Increment iteration counter and update frontend + updated_xarr = self.data_acquirer.update_data() + self.fig = xarray_to_plotly(updated_xarr) + logging.debug(f"Updating heatmap, num_acquisitions: {self.data_acquirer.num_acquisitions}") + return self.fig, f"Iteration: {self.data_acquirer.num_acquisitions}" + + # Create states for all input components + component_states = [] + for component_id in self.data_acquirer.get_component_ids(): + component_states += [ + State({"type": component_id, "index": ALL}, "id"), + State({"type": component_id, "index": ALL}, "value"), + ] + + @self.app.callback( + [], + [Input("update-button", "n_clicks")], + component_states, + blocking=True, + ) + def update_params(n_update_clicks, *component_inputs): + if n_update_clicks <= self._last_update_clicks: + return + + params = {} + component_inputs_iterator = iter(component_inputs) + for component_id in self.data_acquirer.get_component_ids(): + ids, values = next(component_inputs_iterator), next(component_inputs_iterator) + params[component_id] = {id["index"]: value for id, value in zip(ids, values)} + + logging.debug(f"Updating params: {params}") + self.data_acquirer.update_parameters(params) + + @self.app.callback( + Output("save-button", "children"), + [Input("save-button", "n_clicks")], + ) + def save(n_clicks): + if n_clicks > self._last_save_clicks: + self._last_save_clicks = n_clicks + self.save() + return "Saved!" + return "Save" + + def run(self, debug: bool = True, use_reloader: bool = False): + logging.debug("Starting Dash server") + self.app.run_server(debug=debug, use_reloader=use_reloader) + + def save_data(self, idx: Optional[int] = None): + """ + Save the current data to an HDF5 file. + + This method saves the current data from the data acquirer to an HDF5 file in the specified data save path. + It automatically generates a unique filename by incrementing an index if not provided. + + Args: + idx (Optional[int]): The index to use for the filename. If None, an available index is automatically determined. + + Returns: + int: The index of the saved data file. + + Raises: + ValueError: If the maximum number of data files (9999) has been reached. + FileExistsError: If a file with the generated name already exists. + + Note: + - The data save path is created if it doesn't exist. + - The filename format is 'data_XXXX.h5', where XXXX is a four-digit index. + """ + data_save_path = self.save_path / "data" + logging.info(f"Attempting to save data to folder: {data_save_path}") + + if not data_save_path.exists(): + data_save_path.mkdir(parents=True) + logging.info(f"Created directory: {data_save_path}") + + if idx is None: + idx = 1 + while idx <= 9999 and (data_save_path / f"data_{idx}.h5").exists(): + idx += 1 + + if idx > 9999: + raise ValueError("Maximum number of data files (9999) reached. Cannot save more.") + + filename = f"data_{idx}.h5" + filepath = data_save_path / filename + + if filepath.exists(): + raise FileExistsError(f"File {filepath} already exists.") + self.data_acquirer.data_array.to_netcdf(filepath) # , engine="h5netcdf", format="NETCDF4") + logging.info(f"Data saved successfully: {filepath}") + logging.info("Data save operation completed.") + return idx + + def save_image(self): + """ + Save the current image to a file. + + This method saves the current figure as a PNG image in the specified image save path. + It automatically generates a unique filename by incrementing an index. + + Returns: + int: The index of the saved image file. + + Raises: + ValueError: If the maximum number of screenshots (9999) has been reached. + + Note: + - The image save path is created if it doesn't exist. + - The filename format is 'data_image_XXXX.png', where XXXX is a four-digit index. + """ + image_save_path = self.save_path / "images" + logging.info(f"Attempting to save image to folder: {image_save_path}") + if not image_save_path.exists(): + image_save_path.mkdir(parents=True) + logging.info(f"Created directory: {image_save_path}") + + idx = 1 + while idx <= 9999 and (image_save_path / f"data_image_{idx}.png").exists(): + idx += 1 + if idx <= 9999: + filename = f"data_image_{idx}.png" + filepath = image_save_path / filename + self.fig.write_image(filepath) + logging.info(f"Image saved successfully: {filepath}") + else: + raise ValueError("Maximum number of screenshots (9999) reached. Cannot save more.") + logging.info("Image save operation completed.") + + return idx + + def save(self): + """ + Save both the current image and data. + + This method saves the current figure as a PNG image and the current data as an HDF5 file. + It uses the same index for both files to maintain consistency. + + Returns: + int: The index of the saved files. + + Raises: + ValueError: If the maximum number of files (9999) has been reached. + + Note: + - The image is saved first, followed by the data. + - If data saving fails due to a FileExistsError, a warning is logged instead of raising an exception. + """ + if not self.save_path.exists(): + self.save_path.mkdir(parents=True) + logging.info(f"Created directory: {self.save_path}") + + # Save image first + idx = self.save_image() + + # Attempt to save data with the same index + try: + self.save_data(idx) + except FileExistsError: + logging.warning(f"Data file with index {idx} already exists. Image saved, but data was not overwritten.") + + logging.info(f"Save operation completed with index: {idx}") + return idx diff --git a/qualang_tools/control_panel/video_mode/voltage_parameters.py b/qualang_tools/control_panel/video_mode/voltage_parameters.py new file mode 100644 index 00000000..728c2d82 --- /dev/null +++ b/qualang_tools/control_panel/video_mode/voltage_parameters.py @@ -0,0 +1,35 @@ +import time +import logging + + +__all__ = ["VoltageParameter"] + + +# VoltageParameter Class remains unchanged +class VoltageParameter: + def __init__(self, name, label=None, initial_value=0.0, units="V"): + self.name = name + self.label = label + self.latest_value = initial_value + self._value = initial_value + self.units = units + logging.debug( + f"{self.name} initialized with value {self.latest_value} {self.units}" + ) + + def get(self): + time.sleep(0.2) # Simulate a 200ms delay + self.latest_value = self._value + logging.debug(f"Getting {self.name}: {self.latest_value} {self.units}") + return self.latest_value + + def set(self, new_value): + self._value = new_value + updated_value = self.get() # Return the value after setting + logging.debug( + f"Setting {self.name} to {new_value}: Actual value is {updated_value} {self.units}" + ) + return updated_value + + def get_latest(self): + return self.latest_value