diff --git a/.github/codecov.yml b/.github/codecov.yml
index 8b7d887b..cd070538 100644
--- a/.github/codecov.yml
+++ b/.github/codecov.yml
@@ -3,6 +3,8 @@ ignore:
- "**/python"
- "test/**/*"
- "src/mqt/qecc/cc_decoder/plots.py"
+ - "src/mqt/qecc/analog_information_decoding/utils/data_utils.py"
+ - "src/mqt/qecc/analog_information_decoding/code_construction/*"
coverage:
range: 60..90
diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml
index a09f067e..e494f41f 100644
--- a/.github/workflows/codeql-analysis.yml
+++ b/.github/workflows/codeql-analysis.yml
@@ -62,7 +62,7 @@ jobs:
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
- uses: github/codeql-action/init@v2
+ uses: github/codeql-action/init@v3
with:
languages: ${{ matrix.language }}
setup-python-dependencies: false
@@ -79,7 +79,7 @@ jobs:
run: cmake --build build
- name: Perform CodeQL Analysis
- uses: github/codeql-action/analyze@v2
+ uses: github/codeql-action/analyze@v3
with:
upload: False
output: sarif-results
@@ -93,6 +93,6 @@ jobs:
output: sarif-results/${{ matrix.language }}.sarif
- name: Upload SARIF
- uses: github/codeql-action/upload-sarif@v2
+ uses: github/codeql-action/upload-sarif@v3
with:
sarif_file: sarif-results/${{ matrix.language }}.sarif
diff --git a/.github/workflows/cpp-linter.yml b/.github/workflows/cpp-linter.yml
index 3a67f697..3e8756a9 100644
--- a/.github/workflows/cpp-linter.yml
+++ b/.github/workflows/cpp-linter.yml
@@ -47,7 +47,7 @@ jobs:
cmake -S . -B build -DCMAKE_BUILD_TYPE=Debug
- name: Run cpp-linter
- uses: cpp-linter/cpp-linter-action@v2
+ uses: cpp-linter/cpp-linter-action@v2.7.2
id: linter
env:
GITHUB_TOKEN: ${{ github.token }}
diff --git a/.github/workflows/deploy.yml b/.github/workflows/deploy.yml
index 17e8872b..e642cffa 100644
--- a/.github/workflows/deploy.yml
+++ b/.github/workflows/deploy.yml
@@ -43,8 +43,9 @@ jobs:
- name: Verify clean directory
run: git diff --exit-code
shell: bash
- - uses: actions/upload-artifact@v3
+ - uses: actions/upload-artifact@v4
with:
+ name: cibw-wheels-${{ matrix.runs-on }}-${{ strategy.job-index }}
path: ./wheelhouse/*.whl
build_sdist:
@@ -61,8 +62,9 @@ jobs:
run: pipx run build --sdist
- name: Check metadata
run: pipx run twine check dist/*
- - uses: actions/upload-artifact@v3
+ - uses: actions/upload-artifact@v4
with:
+ name: cibw-sdist
path: dist/*.tar.gz
upload_pypi:
@@ -76,8 +78,9 @@ jobs:
permissions:
id-token: write
steps:
- - uses: actions/download-artifact@v3
+ - uses: actions/download-artifact@v4
with:
- name: artifact
+ pattern: cibw-*
path: dist
+ merge-multiple: true
- uses: pypa/gh-action-pypi-publish@release/v1
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index a65735db..19ca4fc8 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -30,13 +30,13 @@ repos:
# Clean jupyter notebooks
- repo: https://github.com/srstevenson/nb-clean
- rev: 3.1.0
+ rev: 3.2.0
hooks:
- id: nb-clean
# Handling unwanted unicode characters
- repo: https://github.com/sirosen/texthooks
- rev: 0.6.2
+ rev: 0.6.3
hooks:
- id: fix-ligatures
- id: fix-smartquotes
@@ -51,7 +51,7 @@ repos:
# Python linting and formatting using ruff
- repo: https://github.com/astral-sh/ruff-pre-commit
- rev: v0.1.4
+ rev: v0.1.9
hooks:
- id: ruff
args: ["--fix", "--show-fixes"]
@@ -68,27 +68,31 @@ repos:
# Check static types with mypy
- repo: https://github.com/pre-commit/mirrors-mypy
- rev: v1.6.1
+ rev: v1.8.0
hooks:
- id: mypy
files: ^(src/mqt|test/python)
+ exclude: "code_construction* | ^data_utils\\.py$"
args: []
additional_dependencies:
- importlib_resources
- numpy
- pytest
- pytest-mock
+ - ldpc
+ - bposd
+ - numba
# Check for spelling
- repo: https://github.com/codespell-project/codespell
rev: v2.2.6
hooks:
- id: codespell
- args: ["-L", "wille,linz,applys", "--skip", "*.ipynb"]
+ args: ["-L", "wille,linz,applys,ser", "--skip", "*.ipynb"]
# Clang-format the C++ part of the code base automatically
- repo: https://github.com/pre-commit/mirrors-clang-format
- rev: v17.0.4
+ rev: v17.0.6
hooks:
- id: clang-format
types_or: [c++, c, cuda]
@@ -104,7 +108,7 @@ repos:
# Format configuration files with prettier
- repo: https://github.com/pre-commit/mirrors-prettier
- rev: v3.0.3
+ rev: v3.1.0
hooks:
- id: prettier
types_or: [yaml, markdown, html, css, scss, javascript, json]
@@ -117,14 +121,3 @@ repos:
language: pygrep
entry: PyBind|Numpy|Cmake|CCache|Github|PyTest|Mqt|Tum
exclude: .pre-commit-config.yaml
-
- # Checking sdist validity
- - repo: https://github.com/henryiii/check-sdist
- rev: v0.1.3
- hooks:
- - id: check-sdist
- args: [--inject-junk]
- additional_dependencies:
- - scikit-build-core[pyproject]>=0.5.0,<0.6 # TODO: remove upper cap once scikit-build-core is updated
- - setuptools-scm>=7
- - pybind11>=2.11
diff --git a/README.md b/README.md
index 04618424..77b1a925 100644
--- a/README.md
+++ b/README.md
@@ -34,6 +34,8 @@ The tool can be used to:
- The framework allows to apply different QECC schemes to quantum circuits and either exports the resulting
circuits or simulates them using Qiskit [[4]](https://qiskit.org/). Currently, six different ECCs are supported
with varying extent of functionality.
+- WIP: Decode bosonic quantum LDPC codes and conduct numerical simulations for analog information decoding under phenomenological
+ (cat qubit) noise.
@@ -132,6 +134,9 @@ Windows support is currently experimental.
If you use our tool for your research, we will be thankful if you refer to it by citing the appropriate publication:
+- [![a](https://img.shields.io/static/v1?label=arXiv&message=2311.01328&color=inactive&style=flat-square)](https://arxiv.org/abs/2311.01328)
+ L. Berent, T. Hillmann, J. Eisert, R. Wille, and J. Roffe, "Analog information decoding of bosonic quantum LDPC codes".
+
- [![a](https://img.shields.io/static/v1?label=arXiv&message=2303.14237&color=inactive&style=flat-square)](https://arxiv.org/abs/2303.14237)
L. Berent, L. Burgholzer, P.J. Derks, J. Eisert, and R. Wille, "Decoding quantum color codes with MaxSAT".
@@ -144,3 +149,13 @@ If you use our tool for your research, we will be thankful if you refer to it by
L. Berent, L. Burgholzer, and R.
Wille, "[Software Tools for Decoding Quantum Low-Density Parity Check Codes](https://arxiv.org/abs/2209.01180),"
in Asia and South Pacific Design Automation Conference (ASP-DAC), 2023
+
+## Credits
+
+The authors of this software are:
+
+- Lucas Berent
+- Lukas Burgholzer
+- Thomas Grurl
+- Peter-Jan H.S. Derks
+- Timo Hillmann
diff --git a/docs/source/AnalogInfo.rst b/docs/source/AnalogInfo.rst
new file mode 100644
index 00000000..85cbcf7d
--- /dev/null
+++ b/docs/source/AnalogInfo.rst
@@ -0,0 +1,56 @@
+Analog Information Decoding
+===========================
+This submodule provides means to conduct decoding simulations for quantum CSS codes with
+analog syndrome information as proposed in the corresponding paper :cite:labelpar:`berent2023analog`.
+Proper integration and setup is a work in progress.
+The main functionality is provided in the :code:`simulators` module, which contains the main classes
+that can be used to conduct several types of simulations:
+
+- :code:`ATD_Simulator`: Analog Tanner graph decoding,
+- :code:`Single-Shot Simulator`: Analog Single-Shot decoding with meta checks.
+- :code:`QSS_Simulator`: Quasi-Single-Shot decoding, and
+
+Moreover, :code:`memory_experiment` contains methods for analog overlapping window decoding, in
+particular the :code:`decode_multiround` method.
+
+Results
+-------
+The :code:`results` directory contains the results used in the paper :cite:labelpar:`berent2023analog`.
+
+Codes
+-----
+
+The :code:`codes` directory contains the parity-check matrices of the codes used in the paper :cite:labelpar:`berent2023analog`.
+Three dimensional toric codes can either be constructed with the hypergraph product construction
+or with a library, e.g., panqec :cite:labelpar:`huang2023panceq`.
+
+Code construction
+-----------------
+
+The :code:`code_construction` directory contains the code used to construct higher-dimensional hypergraph
+product codes and used the :code:`compute_distances.sh` script to automatically compute bounds on the
+distances of the constructed codes using the GAP library QDistRnd :cite:labelpar:`pryadko2023qdistrnd`.
+
+Utils
+-----
+Here we present an overview of the functionality of the utils package.
+
+Plotting
+++++++++
+The :code:`plotting` directory contains the code used to plot the results in the paper :cite:labelpar:`berent2023analog`.
+
+Data Utils
+++++++++++
+
+We have implemented several utility functions as part of this package that might be of independent
+interest for decoding simulations and data analysis. These are provided in the :code:`data_utils` module.
+
+Simulation Utils
+++++++++++++++++
+This module contains functionality needed throughout different simulation types.
+
+Dependencies
+++++++++++++
+
+The used BP+OSD implementation, as well as our implementation of the soft-syndrome minimum-sum decoder are provided
+in the ldpc2 package a preliminary beta version of which is available on github :cite:labelpar:`roffe2023ldpc`.
diff --git a/docs/source/conf.py b/docs/source/conf.py
index d0dcb6c2..b8f96c20 100644
--- a/docs/source/conf.py
+++ b/docs/source/conf.py
@@ -116,7 +116,7 @@
class CDAStyle(UnsrtStyle):
"""Custom style for including PDF links."""
- def format_url(self, _e: Entry) -> HRef:
+ def format_url(self, _e: Entry) -> HRef: # noqa: PLR6301
"""Format URL field as a link to the PDF."""
url = field("url", raw=True)
return href()[url, "[PDF]"]
diff --git a/docs/source/refs.bib b/docs/source/refs.bib
index f664a5a7..2165bbc4 100644
--- a/docs/source/refs.bib
+++ b/docs/source/refs.bib
@@ -108,3 +108,35 @@ @article{FowlerSurfaceCodes
doi = {10.1103/PhysRevA.80.052312},
url = {https://link.aps.org/doi/10.1103/PhysRevA.80.052312}
}
+@article{berent2023analog,
+ title={Analog information decoding of bosonic quantum LDPC codes},
+ author={Berent, Lucas and Hillmann, Timo and Eisert, Jens and Wille, Robert and Roffe, Joschka},
+ journal={arXiv preprint arXiv:2311.01328},
+ year={2023}
+}
+@misc{huang2023panceq,
+ author = {Huang, Eric and Pesah Arthur},
+ title = {PanQEC},
+ year = {2023},
+ publisher = {GitHub},
+ journal = {GitHub repository},
+ howpublished = {\url{https://github.com/panqec/panqec}},
+ commit = {a9dd27e}
+}
+
+@misc{roffe2023ldpc,
+ author = {Roffe Joschka},
+ title = {LDPC},
+ year = {2023},
+ publisher = {GitHub},
+ journal = {GitHub repository},
+ howpublished = {\url{https://github.com/quantumgizmos/ldpc/tree/ldpc_v2}},
+ commit = {9fe6d13}
+}
+
+@article{pryadko2023qdistrnd,
+ title={QDistRnd: A GAP package for computing the distance of quantum error-correcting codes},
+ author={Pryadko, Leonid P and Shabashov, Vadim A and Kozin, Valerii K},
+ journal={arXiv preprint arXiv:2308.15140},
+ year={2023}
+}
diff --git a/extern/mqt-core b/extern/mqt-core
index cb348652..562cb3fe 160000
--- a/extern/mqt-core
+++ b/extern/mqt-core
@@ -1 +1 @@
-Subproject commit cb348652d170978e5688959016ea15aa1157945b
+Subproject commit 562cb3fe64e77af7a62a92112ccb500d20a12fa0
diff --git a/noxfile.py b/noxfile.py
index bee7c72f..3fc8c661 100644
--- a/noxfile.py
+++ b/noxfile.py
@@ -16,6 +16,12 @@
PYTHON_ALL_VERSIONS = ["3.8", "3.9", "3.10", "3.11", "3.12"]
+BUILD_REQUIREMENTS = [
+ "scikit-build-core[pyproject]>=0.6.1",
+ "setuptools_scm>=7",
+ "pybind11>=2.11",
+]
+
if os.environ.get("CI", None):
nox.options.error_on_missing_interpreters = True
@@ -27,19 +33,6 @@ def lint(session: nox.Session) -> None:
session.run("pre-commit", "run", "--all-files", *session.posargs)
-@nox.session(reuse_venv=True)
-def pylint(session: nox.Session) -> None:
- """Run PyLint.
-
- Simply execute `nox -rs pylint` to run PyLint.
- """
- session.install(
- "scikit-build-core[pyproject]<0.6", "setuptools_scm", "pybind11"
- ) # TODO: remove upper cap once scikit-build-core is updated
- session.install("--no-build-isolation", "-ve.", "pylint")
- session.run("pylint", "mqt.qecc", *session.posargs)
-
-
def _run_tests(
session: nox.Session,
*,
@@ -51,21 +44,15 @@ def _run_tests(
env = {"PIP_DISABLE_PIP_VERSION_CHECK": "1"}
if os.environ.get("CI", None) and sys.platform == "win32":
- env["SKBUILD_CMAKE_ARGS"] = "-T ClangCL"
+ env["CMAKE_GENERATOR"] = "Ninja"
_extras = ["test", *extras]
if "--cov" in posargs:
_extras.append("coverage")
posargs.append("--cov-config=pyproject.toml")
- session.install(
- "scikit-build-core[pyproject]<0.6",
- "setuptools_scm",
- "pybind11",
- *install_args,
- env=env,
- ) # TODO: remove upper cap once scikit-build-core is updated
- install_arg = f"-ve.[{','.join(_extras)}]"
+ session.install(*BUILD_REQUIREMENTS, *install_args, env=env)
+ install_arg = f".[{','.join(_extras)}]"
session.install("--no-build-isolation", install_arg, *install_args, env=env)
session.run("pytest", *run_args, *posargs, env=env)
@@ -98,13 +85,8 @@ def docs(session: nox.Session) -> None:
if args.builder != "html" and args.serve:
session.error("Must not specify non-HTML builder with --serve")
- build_requirements = [
- "scikit-build-core[pyproject]<0.6",
- "setuptools_scm",
- "pybind11",
- ] # TODO: remove upper cap once scikit-build-core is updated
extra_installs = ["sphinx-autobuild"] if args.serve else []
- session.install(*build_requirements, *extra_installs)
+ session.install(*BUILD_REQUIREMENTS, *extra_installs)
session.install("--no-build-isolation", "-ve.[docs]")
session.chdir("docs")
diff --git a/pyproject.toml b/pyproject.toml
index 49167b8f..29925af0 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,6 +1,5 @@
[build-system]
-# TODO: remove upper cap once scikit-build-core is updated
-requires = ["scikit-build-core>=0.5.0,<0.6.0", "setuptools-scm>=7", "pybind11>=2.11"]
+requires = ["scikit-build-core>=0.6.1", "setuptools-scm>=7", "pybind11>=2.11"]
build-backend = "scikit_build_core.build"
[project]
@@ -10,7 +9,8 @@ readme = "README.md"
authors = [
{ name = "Lucas Berent", email = "lucas.berent@tum.de" },
{ name = "Lukas Burgholzer", email = "lukas.burgholzer@tum.de" },
- { name = "Peter-Jan H.S. Derks", email = "peter-janderks@hotmail.com" }
+ { name = "Peter-Jan H.S. Derks", email = "peter-janderks@hotmail.com" },
+ { name = "Timo Hillmann", email = "timo.hillmann@rwth-aachen.de"}
]
keywords = ["MQT", "quantum-computing", "error-correction", "MaxSAT", "QLDPC"]
license = { file = "LICENSE" }
@@ -43,6 +43,9 @@ dependencies = [
"numpy",
"qiskit-terra>=0.23.0",
"qiskit-aer>=0.12.1",
+ "bposd",
+ "numba",
+ "pymatching",
]
dynamic = ["version"]
@@ -80,7 +83,7 @@ Discussions = "https://github.com/cda-tum/mqt-qecc/discussions"
[tool.scikit-build]
# Protect the configuration against future changes in scikit-build-core
-minimum-version = "0.5.0"
+minimum-version = "0.6.1"
# Set the target to build
cmake.targets = ["pyqecc"]
@@ -146,7 +149,7 @@ log_cli_level = "INFO"
xfail_strict = true
filterwarnings = [
"error",
- "ignore:pkg_resources.*:DeprecationWarning:",
+ "ignore:.*pkg_resources.*:DeprecationWarning:",
"ignore:.*qiskit.__qiskit_version__.*:DeprecationWarning:qiskit:",
"ignore:.*qiskit.utils.algorithm_globals.QiskitAlgorithmGlobals*:DeprecationWarning:qiskit",
"ignore:.*Building a flow controller with keyword arguments is going to be deprecated*:PendingDeprecationWarning:qiskit",
@@ -170,9 +173,14 @@ enable_error_code = ["ignore-without-code", "redundant-expr", "truthy-bool"]
warn_unreachable = true
explicit_package_bases = true
pretty = true
+exclude = [
+ "code_construction*",
+ "^data_utils\\.py$"
+]
[[tool.mypy.overrides]]
-module = ["qiskit.*", "qiskit_aer.*", "qecsim.*" , "matplotlib.*", "scipy.*", "ldpc.*", "pytest_console_scripts.*", "z3.*"]
+module = ["qiskit.*", "qiskit_aer.*", "qecsim.*", "matplotlib.*", "scipy.*", "ldpc.*", "pytest_console_scripts.*",
+ "z3.*", "bposd.*", "numba.*", "pymatching.*"]
ignore_missing_imports = true
@@ -184,8 +192,11 @@ preview = true
unsafe-fixes = true
[tool.ruff.lint]
+exclude = [
+ "scripts/*",
+ "*/cc_decoder/plots.py"
+]
extend-select = [
- "E", "F", "W", # flake8
"A", # flake8-builtins
"ANN", # flake8-annotations
"ARG", # flake8-unused-arguments
@@ -197,9 +208,11 @@ extend-select = [
"EXE", # flake8-executable
"FA", # flake8-future-annotations
"FLY", # flynt
+ "FURB", # refurb
"I", # isort
"ICN", # flake8-import-conventions
"ISC", # flake8-implicit-str-concat
+ "LOG", # flake8-logging-format
"N", # flake8-naming
"NPY", # numpy
"PERF", # perflint
@@ -213,22 +226,45 @@ extend-select = [
"RET", # flake8-return
"RSE", # flake8-raise
"RUF", # Ruff-specific
+ "S", # flake8-bandit
"SLF", # flake8-self
"SLOT", # flake8-slots
"SIM", # flake8-simplify
+ "T20", # flake8-print
"TCH", # flake8-type-checking
- "TID", # flake8-tidy-imports
+ "TID251", # flake8-tidy-imports.banned-api
"TRY", # tryceratops
"UP", # pyupgrade
"YTT", # flake8-2020
]
-extend-ignore = [
- "ANN101", # Missing type annotation for self in method
- "PLR", # Design related pylint codes
+ignore = [
+ "ANN101", # Missing type annotation for `self` in method
+ "ANN102", # Missing type annotation for `cls` in classmethod
+ "ISC001", # Conflicts with formatter
+ "PLR09", # Too many <...>
+ "PLR2004", # Magic value used in comparison
+ "PLC0415", # Import should be at top of file
+ "PT004", # Incorrect, just usefixtures instead.
+ "S101", # Use of assert detected
]
isort.required-imports = ["from __future__ import annotations"]
+[tool.ruff.lint.flake8-tidy-imports.banned-api]
+"typing.Callable".msg = "Use collections.abc.Callable instead."
+"typing.Iterator".msg = "Use collections.abc.Iterator instead."
+"typing.Mapping".msg = "Use collections.abc.Mapping instead."
+"typing.Sequence".msg = "Use collections.abc.Sequence instead."
+"typing.Set".msg = "Use collections.abc.Set instead."
+"typing.Self".msg = "Use scikit_build_core._compat.typing.Self instead."
+"typing_extensions.Self".msg = "Use scikit_build_core._compat.typing.Self instead."
+"typing.assert_never".msg = "Use scikit_build_core._compat.typing.assert_never instead."
+"importlib.resources".msg = "Use scikit_build_core._compat.importlib.resources instead."
+"importlib_resources".msg = "Use scikit_build_core._compat.importlib.resources instead."
+
[tool.ruff.lint.per-file-ignores]
+"test/python/**" = ["T20", "ANN"]
+"docs/**" = ["T20"]
+"noxfile.py" = ["T20", "TID251"]
"*.pyi" = ["D"] # pydocstyle
"*.ipynb" = [
"D", # pydocstyle
@@ -236,7 +272,7 @@ isort.required-imports = ["from __future__ import annotations"]
"I002", # Allow missing `from __future__ import annotations` import
]
-[tool.ruff.pydocstyle]
+[tool.ruff.lint.pydocstyle]
convention = "google"
@@ -254,7 +290,7 @@ before-all = [
"git clone --branch v2.9.0 --depth 1 https://github.com/flintlib/flint2.git",
"cd flint2 && ./configure && make -j 3 && make install"
]
-environment = { DEPLOY="ON" }
+environment = { DEPLOY = "ON" }
[tool.cibuildwheel.macos]
archs = "x86_64"
diff --git a/scripts/examples/atd_example.py b/scripts/examples/atd_example.py
new file mode 100644
index 00000000..15a4d19c
--- /dev/null
+++ b/scripts/examples/atd_example.py
@@ -0,0 +1,69 @@
+""" example simulation script """
+
+import numpy as np
+import scipy
+from matplotlib import pyplot as plt
+
+from mqt.qecc.analog_information_decoding.simulators.analog_tannergraph_decoding import AtdSimulator
+from mqt.qecc.analog_information_decoding.utils.data_utils import BpParams
+
+code_path = "src/mqt/qecc/analog_information_decoding/codes/lifted_product/lp_l="
+s = np.linspace(0.10, 0.4, 11)
+p = 0.05
+for bp_method in ["msl"]:
+ for decoder in ["atd"]:
+ for c in [16, 21, 30]:
+ Hx = scipy.sparse.load_npz(code_path + str(c) + "_hx.txt").toarray()
+ Hz = scipy.sparse.load_npz(code_path + str(c) + "_hz.txt").toarray()
+ Lx = scipy.sparse.load_npz(code_path + str(c) + "_lx.txt").toarray()
+ Lz = scipy.sparse.load_npz(code_path + str(c) + "_lz.txt").toarray()
+ lers = []
+ ebs = []
+ for sigma in s:
+ print(sigma) # noqa: T201
+ sim = AtdSimulator(
+ hx=Hx,
+ lx=Lx,
+ hz=Hz,
+ lz=Lz,
+ codename=str(c),
+ data_err_rate=p,
+ sigma=sigma,
+ seed=666,
+ bp_params=BpParams(
+ max_bp_iter=100,
+ bp_method=bp_method,
+ osd_order=10,
+ osd_method="osd_cs",
+ ms_scaling_factor=0.75,
+ cutoff=5.0,
+ ),
+ decoding_method=decoder,
+ )
+ out = sim.run(samples=1500)
+
+ ler = (
+ out["z_ler"] * (1 - out["x_ler"]) + out["x_ler"] * (1 - out["z_ler"]) + out["x_ler"] * out["z_ler"]
+ )
+ ler_eb = (
+ out["z_ler_eb"] * (1 - out["x_ler_eb"])
+ + out["x_ler_eb"] * (1 - out["z_ler_eb"])
+ + out["x_ler_eb"] * out["z_ler_eb"]
+ )
+ lers.append(ler)
+ ebs.append(ler_eb)
+
+ plt.errorbar(
+ s,
+ lers,
+ ebs,
+ label=f"l={c}, {decoder} {bp_method}",
+ marker="o",
+ linestyle="solid",
+ )
+
+plt.legend()
+plt.xlabel("sigma")
+plt.ylabel("LER")
+plt.yscale("log")
+plt.show()
diff --git a/scripts/numerics/code_construct/constructHGPcodes.py b/scripts/numerics/code_construct/constructHGPcodes.py
index d5fb69c4..7b84f5bc 100644
--- a/scripts/numerics/code_construct/constructHGPcodes.py
+++ b/scripts/numerics/code_construct/constructHGPcodes.py
@@ -21,14 +21,12 @@
np.savetxt(f"./hgp_{qcode.code_params}_hx.txt", qcode.hx, fmt="%d", newline="\n")
# larger code
-a1 = pt.array(
- [
- [(0), (11), (7), (12)],
- [(1), (8), (1), (8)],
- [(11), (0), (4), (8)],
- [(6), (2), (4), (12)],
- ]
-)
+a1 = pt.array([
+ [(0), (11), (7), (12)],
+ [(1), (8), (1), (8)],
+ [(11), (0), (4), (8)],
+ [(6), (2), (4), (12)],
+])
H = a1.to_binary(lift_parameter=10)
qcode = hgp(H, H, compute_distance=True)
qcode.test()
diff --git a/scripts/numerics/code_construct/constructLPcodes.py b/scripts/numerics/code_construct/constructLPcodes.py
index 603a6890..db6ea22c 100644
--- a/scripts/numerics/code_construct/constructLPcodes.py
+++ b/scripts/numerics/code_construct/constructLPcodes.py
@@ -7,14 +7,12 @@
import numpy as np
from lifted_hgp import LiftedHgp
-a1 = pt.array(
- [
- [(0), (11), (7), (12)],
- [(1), (8), (1), (8)],
- [(11), (0), (4), (8)],
- [(6), (2), (4), (12)],
- ]
-)
+a1 = pt.array([
+ [(0), (11), (7), (12)],
+ [(1), (8), (1), (8)],
+ [(11), (0), (4), (8)],
+ [(6), (2), (4), (12)],
+])
qcode = LiftedHgp(lift_parameter=32, a=a1, b=a1)
if qcode.test:
diff --git a/scripts/numerics/visualize/visualizeDecodingRuntime.py b/scripts/numerics/visualize/visualizeDecodingRuntime.py
index 904674cb..5b9c8f01 100644
--- a/scripts/numerics/visualize/visualizeDecodingRuntime.py
+++ b/scripts/numerics/visualize/visualizeDecodingRuntime.py
@@ -67,9 +67,9 @@ def runtime() -> None:
for i in range(len(x_data)):
col, val = colors.popitem()
- if col in ("w", "k"):
+ if col in {"w", "k"}:
col, val = colors.popitem()
- if col in ("w", "k"):
+ if col in {"w", "k"}:
col, val = colors.popitem()
cols.append(col)
label = "% 6.3f" % pers[i]
@@ -127,9 +127,9 @@ def runtime_comparison() -> None:
for i in range(len(x_data)):
col, val = colors.popitem()
- if col in ("w", "k"):
+ if col in {"w", "k"}:
col, val = colors.popitem()
- if col in ("w", "k"):
+ if col in {"w", "k"}:
col, val = colors.popitem()
cols.append(col)
label = "%2.2f" % pers[i]
diff --git a/src/mqt/qecc/__init__.py b/src/mqt/qecc/__init__.py
index ade83b1f..0eaf92cd 100644
--- a/src/mqt/qecc/__init__.py
+++ b/src/mqt/qecc/__init__.py
@@ -7,6 +7,8 @@
from __future__ import annotations
from ._version import version as __version__
+from .analog_information_decoding.simulators.analog_tannergraph_decoding import AnalogTannergraphDecoder, AtdSimulator
+from .analog_information_decoding.simulators.quasi_single_shot_v2 import QssSimulator
from .pyqecc import (
Code,
Decoder,
@@ -32,4 +34,8 @@
"DecodingRunInformation",
"sample_iid_pauli_err",
"apply_ecc",
+ "AnalogTannergraphDecoder",
+ "AtdSimulator",
+ # "SoftInfoDecoder",
+ "QssSimulator",
]
diff --git a/src/mqt/qecc/analog_information_decoding/code_construction/code_constructor.py b/src/mqt/qecc/analog_information_decoding/code_construction/code_constructor.py
new file mode 100644
index 00000000..cd603aa7
--- /dev/null
+++ b/src/mqt/qecc/analog_information_decoding/code_construction/code_constructor.py
@@ -0,0 +1,246 @@
+"""Package for code construction."""
+
+from __future__ import annotations
+
+import json
+import subprocess
+from pathlib import Path
+from typing import TYPE_CHECKING, Any
+
+import ldpc.code_util
+import numpy as np
+import scipy.io as sio
+import scipy.sparse as scs
+from bposd.hgp import hgp
+from ldpc import mod2
+from scipy import sparse
+
+if TYPE_CHECKING:
+ from numpy.typing import NDArray
+
+
+def is_all_zeros(array: NDArray[np.int32]) -> bool:
+ """Check if array is all zeros."""
+ return not np.any(array)
+
+
+def run_checks_scipy(
+ d_1: NDArray[np.int32], d_2: NDArray[np.int32], d_3: NDArray[np.int32], d_4: NDArray[np.int32]
+) -> None:
+ """Run checks on the boundary maps."""
+ sd_1 = scs.csr_matrix(d_1)
+ sd_2 = scs.csr_matrix(d_2)
+ sd_3 = scs.csr_matrix(d_3)
+ sd_4 = scs.csr_matrix(d_4)
+
+ if not (
+ is_all_zeros((sd_1 * sd_2).todense() % 2)
+ and is_all_zeros((sd_2 * sd_3).todense() % 2)
+ and is_all_zeros((sd_3 * sd_4).todense() % 2)
+ ):
+ msg = "Error generating 4D code, boundary maps do not square to zero"
+ raise RuntimeError(msg)
+
+
+def generate_4d_product_code(
+ a_1: NDArray[np.int32], a_2: NDArray[np.int32], a_3: NDArray[np.int32], p: NDArray[np.int32], checks: bool = True
+) -> tuple[NDArray[np.int32], NDArray[np.int32], NDArray[np.int32], NDArray[np.int32]]:
+ """Generate 4D product code."""
+ r, c = p.shape
+ id_r: NDArray[np.int32] = np.identity(r, dtype=np.int32)
+ id_c: NDArray[np.int32] = np.identity(c, dtype=np.int32)
+ id_n0: NDArray[np.int32] = np.identity(a_1.shape[0], dtype=np.int32)
+ id_n1: NDArray[np.int32] = np.identity(a_2.shape[0], dtype=np.int32)
+ id_n2: NDArray[np.int32] = np.identity(a_3.shape[0], dtype=np.int32)
+ id_n3: NDArray[np.int32] = np.identity(a_3.shape[1], dtype=np.int32)
+
+ d_1: NDArray[np.int32] = np.hstack((np.kron(a_1, id_r), np.kron(id_n0, p))).astype(np.int32)
+
+ x = np.hstack((np.kron(a_2, id_r), np.kron(id_n1, p)))
+ y = np.kron(a_1, id_c)
+ dims = (y.shape[0], x.shape[1] - y.shape[1])
+ z = np.hstack((np.zeros(dims, dtype=np.int32), y))
+ d_2 = np.vstack((x, z))
+
+ x = np.hstack((np.kron(a_3, id_r), np.kron(id_n2, p)))
+ y = np.kron(a_2, id_c)
+ dims = (y.shape[0], x.shape[1] - y.shape[1])
+ z = np.hstack((np.zeros(dims, dtype=np.int32), y))
+ d_3: NDArray[np.int32] = np.vstack((x, z)).astype(np.int32)
+
+ d_4: NDArray[np.int32] = np.vstack((np.kron(id_n3, p), np.kron(a_3, id_c)))
+
+ if checks:
+ run_checks_scipy(d_1, d_2, d_3, d_4)
+
+ return d_1, d_2, d_3, d_4
+
+
+def generate_3d_product_code(
+ a_1: NDArray[np.int32], a_2: NDArray[np.int32], p: NDArray[np.int32]
+) -> tuple[NDArray[np.int32], NDArray[np.int32], NDArray[np.int32]]:
+ """Generate 3D product code."""
+ r, c = p.shape
+ id_r: NDArray[np.int32] = np.identity(r, dtype=np.int32)
+ id_c: NDArray[np.int32] = np.identity(c, dtype=np.int32)
+ id_n0: NDArray[np.int32] = np.identity(a_1.shape[0], dtype=np.int32)
+ id_n1: NDArray[np.int32] = np.identity(a_2.shape[0], dtype=np.int32)
+ id_n2: NDArray[np.int32] = np.identity(a_2.shape[1], dtype=np.int32)
+
+ d_1: NDArray[np.int32] = np.hstack((np.kron(a_1, id_r), np.kron(id_n0, p)))
+
+ x = np.hstack((np.kron(a_2, id_r), np.kron(id_n1, p)))
+ y = np.kron(a_1, id_c)
+ dims = (y.shape[0], x.shape[1] - y.shape[1])
+ z = np.hstack((np.zeros(dims, dtype=np.int32), y))
+ d_2: NDArray[np.int32] = np.vstack((x, z))
+
+ d_3: NDArray[np.int32] = np.vstack((np.kron(id_n2, p), np.kron(a_2, id_c)))
+
+ if not (is_all_zeros(d_1 @ d_2 % 2) and is_all_zeros(d_2 @ d_3 % 2)):
+ msg = "Error generating 3D code, boundary maps do not square to zero"
+ raise RuntimeError(msg)
+
+ return d_1, d_2, d_3
+
+
+def create_outpath(codename: str) -> str:
+ """Create output path for code files."""
+ path = f"generated_codes/{codename}/"
+ Path(path).mkdir(parents=True, exist_ok=True)
+ return path
+
+
+def save_code(
+ hx: NDArray[np.int32],
+ hz: NDArray[np.int32],
+ mx: NDArray[np.int32],
+ mz: NDArray[np.int32],
+ codename: str,
+ lx: NDArray[np.int32] | None,
+ lz: NDArray[np.int32] | None,
+) -> None:
+ """Save code to files."""
+ path = create_outpath(codename)
+ ms = [hx, hz, mx, mz, lx, lz] if lx is not None and lz is not None else [hx, hz, mx, mz]
+ names: list[str] = ["hx", "hz", "mx", "mz", "lx", "lz"]
+ for mat, name in zip(ms, names):
+ if mat is not None:
+ np.savetxt(path + name + ".txt", mat, fmt="%i")
+ sio.mmwrite(
+ path + name + ".mtx",
+ sparse.coo_matrix(mat),
+ comment="Field: GF(2)",
+ )
+
+
+def run_compute_distances(codename: str) -> None:
+ """Run compute distances bash script."""
+ path = "generated_codes/" + codename
+ subprocess.run(["bash", "compute_distances.sh", path], check=False) # noqa: S603, S607
+
+
+def _compute_distances(hx: NDArray[np.int32], hz: NDArray[np.int32], codename: str) -> None:
+ run_compute_distances(codename)
+ code_dict: Any = {}
+ _m, n = hx.shape
+ code_k = n - mod2.rank(hx) - mod2.rank(hz)
+ with Path(f"generated_codes/{codename}/info.txt").open() as f:
+ code_dict = dict(
+ line[: line.rfind("#")].split(" = ") for line in f if not line.startswith("#") and line.strip()
+ )
+
+ code_dict["n"] = n
+ code_dict["k"] = code_k
+ code_dict["dX"] = int(code_dict["dX"])
+ code_dict["dZ"] = int(code_dict["dZ"])
+ code_dict["dMX"] = int(code_dict["dMX"])
+ code_dict["dMZ"] = int(code_dict["dMZ"])
+
+ with Path(f"generated_codes/{codename}/code_params.txt").open("w") as file:
+ file.write(json.dumps(code_dict))
+
+
+def _compute_logicals(hx: NDArray[np.int32], hz: NDArray[np.int32]) -> tuple[NDArray[np.int32], NDArray[np.int32]]:
+ def compute_lz(hx: NDArray[np.int32], hz: NDArray[np.int32]) -> NDArray[np.int32]:
+ # lz logical operators
+ # lz\in ker{hx} AND \notin Im(Hz.T)
+
+ ker_hx = mod2.nullspace(hx) # compute the kernel basis of hx
+ im_hz_t = mod2.row_basis(hz) # compute the image basis of hz.T
+
+ # in the below we row reduce to find vectors in kx that are not in the image of hz.T.
+ log_stack = np.vstack([im_hz_t, ker_hx])
+ pivots = mod2.row_echelon(log_stack.T)[3]
+ log_op_indices = [i for i in range(im_hz_t.shape[0], log_stack.shape[0]) if i in pivots]
+ return log_stack[log_op_indices]
+
+ lx = compute_lz(hz, hx)
+ lz = compute_lz(hx, hz)
+ return lx, lz
+
+
+def create_code(
+ constructor: str,
+ seed_codes: list[NDArray[np.int32]],
+ codename: str,
+ compute_distance: bool = False,
+ compute_logicals: bool = False,
+ checks: bool = False,
+) -> None:
+ """Create 4D code."""
+ # Construct initial 2 dim code
+ if constructor == "hgp":
+ code = hgp(seed_codes[0], seed_codes[1])
+ else:
+ msg = f"No constructor specified or the specified constructor {constructor} not implemented."
+ raise ValueError(msg)
+
+ # Extend to 3D HGP
+ a1 = code.hx
+ a2 = code.hz.T
+ res = generate_3d_product_code(a1, a2, seed_codes[2])
+
+ # Build 4D HGP code
+ mx, hx, hz_t, mz_t = generate_4d_product_code(*res, seed_codes[3], checks=checks)
+
+ hz = hz_t.T
+ mz = mz_t.T
+
+ # Perform checks
+ if np.any(hz_t @ mz_t % 2) or np.any(hx @ hz_t % 2) or np.any(mx @ hx % 2):
+ msg = "err"
+ raise RuntimeError(msg)
+ save_code(hx, hz, mx, mz, codename, lx=None, lz=None)
+
+ if compute_logicals:
+ lx, lz = _compute_logicals(hx, hz)
+ save_code(hx, hz, mx, mz, codename, lx=lx, lz=lz)
+
+ else:
+ save_code(hx, hz, mx, mz, codename, lx=None, lz=None)
+
+ if compute_distance:
+ _compute_distances(hx, hz, codename)
+
+
+if __name__ == "__main__":
+ for d in range(3, 8):
+ seed_codes = [
+ ldpc.codes.ring_code(d),
+ ldpc.codes.ring_code(d),
+ ldpc.codes.ring_code(d),
+ ldpc.codes.ring_code(d),
+ ]
+
+ constructor = "hgp"
+ codename = f"4D_toric_{d:d}"
+ compute_distance = False
+ compute_logicals = True
+ create_code(
+ constructor,
+ seed_codes,
+ codename,
+ compute_distance,
+ compute_logicals,
+ )
diff --git a/src/mqt/qecc/analog_information_decoding/code_construction/compute_distances.sh b/src/mqt/qecc/analog_information_decoding/code_construction/compute_distances.sh
new file mode 100644
index 00000000..2b6e4b5a
--- /dev/null
+++ b/src/mqt/qecc/analog_information_decoding/code_construction/compute_distances.sh
@@ -0,0 +1,53 @@
+#!/bin/bash
+path=$1
+
+gap -q << EOF > $path/info.txt
+LoadPackage("QDistRnd");;
+hx:=ReadMTXE("$path/hx.mtx");;
+
+hz:=ReadMTXE("$path/hz.mtx");;
+
+Print("dZ = ", DistRandCSS(hx[3], hz[3], 150, 0,2), "\n");
+Print("dX = ", DistRandCSS(hz[3], hx[3], 150, 0,2), "\n");
+
+file := IO_File("$path/mx.txt");
+lines := IO_ReadLines(file);
+parityCheckMatrix := [];
+
+for l in lines do
+ row := ReplacedString(l, "\n", "");
+ r := SplitString(row, " ");
+ newr := [];
+ for b in r do
+ c := Int(b);
+ Add(newr,c);
+ od;
+ Add(parityCheckMatrix, newr);
+od;
+
+xcode := CheckMatCode(parityCheckMatrix, GF(2));
+
+file := IO_File("$path/mz.txt");
+lines := IO_ReadLines(file);
+parityCheckMatrix := [];
+
+for l in lines do
+ row := ReplacedString(l, "\n", "");
+ r := SplitString(row, " ");
+ newr := [];
+ for b in r do
+ c := Int(b);
+ Add(newr,c);
+ od;
+ Add(parityCheckMatrix, newr);
+od;
+
+zcode := CheckMatCode(parityCheckMatrix, GF(2));
+
+
+
+# Print("dMx = ", MinimumDistance(xcode), "\n"); only works for small codes quickly
+Print("dMx = ", MinimumDistanceLeon(xcode), "\n"); #https://gap-packages.github.io/guava/doc/chap4_mj.html#X8170B52D7C154247:~:text=4.8%2D4%20MinimumDistanceLeon
+Print("dMz = ", MinimumDistanceLeon(zcode), "\n");
+
+EOF
diff --git a/src/mqt/qecc/analog_information_decoding/code_construction/compute_distances_3D.sh b/src/mqt/qecc/analog_information_decoding/code_construction/compute_distances_3D.sh
new file mode 100644
index 00000000..45fe4d5a
--- /dev/null
+++ b/src/mqt/qecc/analog_information_decoding/code_construction/compute_distances_3D.sh
@@ -0,0 +1,13 @@
+#!/bin/bash
+path=$1
+
+gap -q << EOF > $path/info.txt
+LoadPackage("QDistRnd");;
+hx:=ReadMTXE("$path/hx.mtx");;
+
+hz:=ReadMTXE("$path/hz.mtx");;
+
+Print("dZ = ", DistRandCSS(hx[3], hz[3], 150, 0,2), "\n");
+Print("dX = ", DistRandCSS(hz[3], hx[3], 150, 0,2), "\n");
+
+EOF
diff --git a/src/mqt/qecc/analog_information_decoding/code_construction/sparse_code_constructor.py b/src/mqt/qecc/analog_information_decoding/code_construction/sparse_code_constructor.py
new file mode 100644
index 00000000..f0893a16
--- /dev/null
+++ b/src/mqt/qecc/analog_information_decoding/code_construction/sparse_code_constructor.py
@@ -0,0 +1,213 @@
+"""Sparse code constructor for 3D and 4D HGP codes."""
+
+from __future__ import annotations
+
+import json
+import subprocess
+from pathlib import Path
+from typing import TYPE_CHECKING, Any
+
+import numpy as np
+import scipy.io as sio
+from bposd.hgp import hgp
+from ldpc import mod2
+from scipy import sparse
+from scipy.sparse import coo_matrix, csr_matrix
+
+from . import code_constructor
+
+if TYPE_CHECKING:
+ from numpy.typing import NDArray
+
+
+def is_all_zeros(array: NDArray[np.int_]) -> bool:
+ """Check if array is all zeros."""
+ return not np.any(array)
+
+
+def sparse_all_zeros(mat: csr_matrix) -> bool:
+ """Check if sparse matrix is all zeros."""
+ mat.data %= 2
+ return bool(mat.sum() == 0)
+
+
+def run_checks_scipy(d_1: csr_matrix, d_2: csr_matrix, d_3: csr_matrix, d_4: csr_matrix) -> None:
+ """Run checks on the boundary maps."""
+ if not (sparse_all_zeros(d_1 @ d_2) and sparse_all_zeros(d_2 @ d_3) and sparse_all_zeros(d_3 @ d_4)):
+ msg = "Error generating 4D code, boundary maps do not square to zero"
+ raise RuntimeError(msg)
+
+
+def generate_4d_product_code(
+ a_1: csr_matrix,
+ a_2: csr_matrix,
+ a_3: csr_matrix,
+ p: csr_matrix,
+ checks: bool = True,
+) -> tuple[csr_matrix, csr_matrix, csr_matrix, csr_matrix]:
+ """Generate 4D HGP code."""
+ r, c = p.shape
+
+ id_r = sparse.identity(r, dtype=int)
+ id_c = sparse.identity(c, dtype=int)
+ id_n0 = sparse.identity(a_1.shape[0], dtype=int)
+ id_n1 = sparse.identity(a_2.shape[0], dtype=int)
+ id_n2 = sparse.identity(a_3.shape[0], dtype=int)
+ id_n3 = sparse.identity(a_3.shape[1], dtype=int)
+
+ d_1 = sparse.hstack((sparse.kron(a_1, id_r), sparse.kron(id_n0, p)))
+
+ x = sparse.hstack((sparse.kron(a_2, id_r), sparse.kron(id_n1, p)))
+ y = sparse.kron(a_1, id_c)
+ dims = (y.shape[0], x.shape[1] - y.shape[1])
+ nmat = csr_matrix(np.zeros(dims))
+ z = sparse.hstack((nmat, y))
+ d_2 = sparse.vstack((x, z))
+
+ x = sparse.hstack((sparse.kron(a_3, id_r), sparse.kron(id_n2, p)))
+ y = sparse.kron(a_2, id_c)
+ dims = (y.shape[0], x.shape[1] - y.shape[1])
+ mat = csr_matrix(np.zeros(dims))
+ z = sparse.hstack([mat, y])
+ d_3 = sparse.vstack((x, z))
+
+ d_4 = sparse.vstack((sparse.kron(id_n3, p), sparse.kron(a_3, id_c)))
+
+ if checks:
+ run_checks_scipy(d_1, d_2, d_3, d_4)
+
+ return d_1, d_2, d_3, d_4
+
+
+def generate_3d_product_code(
+ a_1: csr_matrix, a_2: csr_matrix, p: csr_matrix
+) -> tuple[csr_matrix, csr_matrix, csr_matrix]:
+ """Generate 3D HGP code."""
+ r, c = p.shape
+
+ id_r = sparse.identity(r, dtype=int)
+ id_c = sparse.identity(c, dtype=int)
+ id_n0 = sparse.identity(a_1.shape[0], dtype=int)
+ id_n1 = sparse.identity(a_2.shape[0], dtype=int)
+ id_n2 = sparse.identity(a_2.shape[1], dtype=int)
+
+ d_1 = sparse.hstack((sparse.kron(a_1, id_r), sparse.kron(id_n0, p)))
+
+ x = sparse.hstack((sparse.kron(a_2, id_r), sparse.kron(id_n1, p)))
+ y = sparse.kron(a_1, id_c)
+ dims = (y.shape[0], x.shape[1] - y.shape[1])
+ z = sparse.hstack((csr_matrix(np.zeros(dims), dtype=int), y))
+ d_2 = sparse.vstack((x, z))
+
+ d_3 = sparse.vstack((sparse.kron(id_n2, p), sparse.kron(a_2, id_c)))
+
+ if not (sparse_all_zeros(d_1 @ d_2) and sparse_all_zeros(d_2 @ d_3)):
+ msg = "Error generating 3D code, boundary maps do not square to zero"
+ raise RuntimeError(msg)
+
+ return d_1, d_2, d_3 # mx, hx, hzT # hx, hzT, mzT
+
+
+def create_outpath(codename: str) -> str:
+ """Create output path for code."""
+ path = f"/codes/generated_codes/{codename}/"
+ Path(path).mkdir(parents=True, exist_ok=True)
+ return path
+
+
+def save_code(
+ hx: csr_matrix,
+ hz: csr_matrix,
+ mz: csr_matrix,
+ codename: str,
+ lx: csr_matrix = None,
+ lz: csr_matrix = None,
+) -> None:
+ """Save code to file."""
+ path = create_outpath(codename)
+
+ matrices: list[csr_matrix] = [hx, hz, mz, lx, lz]
+ names = ["hx", "hz", "mz", "lx", "lz"]
+ for mat, name in zip(matrices, names):
+ if mat is not None:
+ path_str = path + name
+ try:
+ np.savetxt(path_str + ".txt", mat.todense(), fmt="%i")
+ except Exception:
+ np.savetxt(path_str + ".txt", mat, fmt="%i")
+ sio.mmwrite(
+ path_str + ".mtx",
+ coo_matrix(mat),
+ comment="Field: GF(2)",
+ field="integer",
+ )
+
+
+def run_compute_distances(codename: str) -> None:
+ """Run compute distances bash script."""
+ path = "/codes/generated_codes/" + codename
+ subprocess.run(["bash", "compute_distances_3D.sh", path], check=False) # noqa: S603, S607
+
+
+def _compute_distances(hx: NDArray[np.int32], hz: NDArray[np.int32], codename: str) -> None:
+ run_compute_distances(codename)
+ _, n = hx.shape
+ code_k = n - mod2.rank(hx) - mod2.rank(hz)
+ with Path(f"/codes/generated_codes/{codename}/info.txt").open() as f:
+ code_dict: dict[str, Any] = dict(
+ line[: line.rfind("#")].split(" = ") for line in f if not line.startswith("#") and line.strip()
+ )
+
+ code_dict["n"] = n
+ code_dict["k"] = code_k
+ code_dict["dX"] = int(code_dict["dX"])
+ code_dict["dZ"] = int(code_dict["dZ"])
+
+ with Path(f"/codes/generated_codes/{codename}/code_params.txt").open("w") as file:
+ file.write(json.dumps(code_dict))
+
+
+def _store_code_params(hx: csr_matrix, hz: csr_matrix, codename: str) -> None:
+ """Store code parameters in file."""
+ code_dict = {}
+ hx, hz = hx.todense(), hz.todense()
+ _m, n = hx.shape
+ code_k = n - mod2.rank(hx) - mod2.rank(hz)
+ code_dict["n"] = n
+ code_dict["k"] = code_k
+ with Path(f"/codes/generated_codes/{codename}/code_params.txt").open("w") as file:
+ file.write(json.dumps(code_dict))
+
+
+def create_code(
+ constructor: str,
+ seed_codes: list[csr_matrix],
+ codename: str,
+ compute_distance: bool = False,
+ compute_logicals: bool = False,
+) -> None:
+ """Create code."""
+ # Construct initial 2 dim code
+ if constructor == "hgp":
+ code = hgp(seed_codes[0], seed_codes[1])
+ else:
+ msg = f"No constructor specified or the specified constructor {constructor} not implemented."
+ raise ValueError(msg)
+
+ # Extend to 3D HGP
+ a1 = sparse.csr_matrix(code.hx)
+ a2 = sparse.csr_matrix(code.hz.T)
+ res = generate_3d_product_code(a1, a2, sparse.csr_matrix(seed_codes[2]))
+ hx, hz_t, mz_t = res
+
+ hz = hz_t.transpose()
+ mz = mz_t.transpose()
+ if compute_logicals:
+ lx, lz = code_constructor._compute_logicals(hx.todense(), hz.todense()) # noqa: SLF001
+ save_code(hx=hx, hz=hz, mz=mz, codename=codename, lx=lx, lz=lz)
+
+ if compute_distance:
+ _compute_distances(hx.todense(), hz.todense(), codename)
+
+ else:
+ _store_code_params(hx.todense(), hz.todense(), codename)
diff --git a/src/mqt/qecc/analog_information_decoding/codes/lifted_product/lp_l=16_hx.npz b/src/mqt/qecc/analog_information_decoding/codes/lifted_product/lp_l=16_hx.npz
new file mode 100644
index 00000000..403d058d
Binary files /dev/null and b/src/mqt/qecc/analog_information_decoding/codes/lifted_product/lp_l=16_hx.npz differ
diff --git a/src/mqt/qecc/analog_information_decoding/codes/lifted_product/lp_l=16_hz.npz b/src/mqt/qecc/analog_information_decoding/codes/lifted_product/lp_l=16_hz.npz
new file mode 100644
index 00000000..846160d1
Binary files /dev/null and b/src/mqt/qecc/analog_information_decoding/codes/lifted_product/lp_l=16_hz.npz differ
diff --git a/src/mqt/qecc/analog_information_decoding/codes/lifted_product/lp_l=21_hx.npz b/src/mqt/qecc/analog_information_decoding/codes/lifted_product/lp_l=21_hx.npz
new file mode 100644
index 00000000..bd126d89
Binary files /dev/null and b/src/mqt/qecc/analog_information_decoding/codes/lifted_product/lp_l=21_hx.npz differ
diff --git a/src/mqt/qecc/analog_information_decoding/codes/lifted_product/lp_l=21_hz.npz b/src/mqt/qecc/analog_information_decoding/codes/lifted_product/lp_l=21_hz.npz
new file mode 100644
index 00000000..6da6ca54
Binary files /dev/null and b/src/mqt/qecc/analog_information_decoding/codes/lifted_product/lp_l=21_hz.npz differ
diff --git a/src/mqt/qecc/analog_information_decoding/codes/lifted_product/lp_l=30_hx.npz b/src/mqt/qecc/analog_information_decoding/codes/lifted_product/lp_l=30_hx.npz
new file mode 100644
index 00000000..52a957a5
Binary files /dev/null and b/src/mqt/qecc/analog_information_decoding/codes/lifted_product/lp_l=30_hx.npz differ
diff --git a/src/mqt/qecc/analog_information_decoding/codes/lifted_product/lp_l=30_hz.npz b/src/mqt/qecc/analog_information_decoding/codes/lifted_product/lp_l=30_hz.npz
new file mode 100644
index 00000000..b48dce9e
Binary files /dev/null and b/src/mqt/qecc/analog_information_decoding/codes/lifted_product/lp_l=30_hz.npz differ
diff --git a/src/mqt/qecc/analog_information_decoding/results/bp-parameter-comparison/A comparison of BPOSD Settings.pdf b/src/mqt/qecc/analog_information_decoding/results/bp-parameter-comparison/A comparison of BPOSD Settings.pdf
new file mode 100644
index 00000000..8f6934a9
Binary files /dev/null and b/src/mqt/qecc/analog_information_decoding/results/bp-parameter-comparison/A comparison of BPOSD Settings.pdf differ
diff --git a/src/mqt/qecc/analog_information_decoding/simulators/analog_tannergraph_decoding.py b/src/mqt/qecc/analog_information_decoding/simulators/analog_tannergraph_decoding.py
new file mode 100644
index 00000000..00b02783
--- /dev/null
+++ b/src/mqt/qecc/analog_information_decoding/simulators/analog_tannergraph_decoding.py
@@ -0,0 +1,314 @@
+"""Analog Tannergraph Decoding Simulator."""
+
+from __future__ import annotations
+
+import json
+from pathlib import Path
+from typing import TYPE_CHECKING, Any
+
+import numpy as np
+from ldpc import bposd_decoder
+
+from ..utils import simulation_utils
+from ..utils.data_utils import (
+ BpParams,
+ calculate_error_rates,
+ is_converged,
+)
+
+if TYPE_CHECKING:
+ from numpy.typing import NDArray
+
+
+class AnalogTannergraphDecoder:
+ """Analog Tannergraph decoder.
+
+ Builds the analog tanner graph and uses this as decoding graph.
+ """
+
+ def __init__(
+ self,
+ pcm: NDArray[np.int32],
+ bp_params: BpParams,
+ error_channel: NDArray[np.float64],
+ sigma: float = 0.0,
+ ser: float | None = None,
+ ) -> None:
+ """Initialize the decoder."""
+ self.m, self.n = pcm.shape
+ self.sigma = sigma
+ self.H = pcm
+ self.bp_params = bp_params
+ self.atg = get_analog_pcm(pcm)
+ self.syndr_err_rate = ser
+ self.error_channel = error_channel
+
+ if self.sigma <= 0.0:
+ if self.syndr_err_rate is None:
+ msg = "Either sigma or ser must be specified"
+ raise ValueError(msg)
+ self.sigma = simulation_utils.get_sigma_from_syndr_er(self.syndr_err_rate)
+ elif self.syndr_err_rate is not None:
+ msg = "Only one of sigma or ser must be specified"
+ raise ValueError(msg)
+
+ self.bposd_decoder = bposd_decoder(
+ parity_check_matrix=self.atg,
+ channel_probs=np.hstack((self.error_channel, np.zeros(self.m))), # initd as dummy for now
+ max_iter=self.bp_params.max_bp_iter,
+ bp_method=self.bp_params.bp_method,
+ osd_order=self.bp_params.osd_order,
+ osd_method=self.bp_params.osd_method,
+ ms_scaling_factor=self.bp_params.ms_scaling_factor,
+ schedule=self.bp_params.schedule,
+ omp_thread_count=self.bp_params.omp_thread_count,
+ random_serial_schedule=self.bp_params.random_serial_schedule,
+ serial_schedule_order=self.bp_params.serial_schedule_order,
+ )
+
+ # self.bp_decoder = bp_decoder(
+ # parity_check_matrix=self.atg,
+ # channel_probs=np.hstack((self.error_channel, np.zeros(self.m))), # initd as dummy for now
+ # max_iter=self.bp_params.max_bp_iter,
+ # bp_method=self.bp_params.bp_method,
+ # ms_scaling_factor=self.bp_params.ms_scaling_factor,
+ # )
+
+ def _set_analog_syndrome(self, analog_syndrome: NDArray[np.float64]) -> None:
+ """Initializes the error channel of the BP decoder.
+
+ Ensures that the virtual nodes are initialized with the analog syndrome LLRs on decoding initialization.
+ :param analog_syndrome: the analog syndrome values to initialize the virtual nodes with.
+ """
+ new_channel = np.hstack((
+ self.bposd_decoder.channel_probs[: self.n],
+ simulation_utils.get_virtual_check_init_vals(analog_syndrome, self.sigma),
+ ))
+ self.bposd_decoder.update_channel_probs(new_channel)
+
+ def decode(self, analog_syndrome: NDArray[np.float64]) -> NDArray[np.int32]:
+ """Decode a given analog syndrome."""
+ self._set_analog_syndrome(analog_syndrome)
+ return self.bposd_decoder.decode(simulation_utils.get_binary_from_analog(analog_syndrome)) # type: ignore[no-any-return]
+
+
+class AtdSimulator:
+ """Analog Tanner graph Decoding Simulator."""
+
+ def __init__(
+ self,
+ hx: NDArray[np.int32],
+ lx: NDArray[np.int32],
+ hz: NDArray[np.int32],
+ lz: NDArray[np.int32],
+ codename: str,
+ seed: int,
+ bp_params: BpParams,
+ data_err_rate: float,
+ code_params: dict[str, Any],
+ syndr_err_rate: float | None = None,
+ sigma: float | None = None,
+ bias: NDArray[np.float64] | None = None,
+ experiment: str = "atd",
+ decoding_method: str = "atd",
+ output_path: str = "./",
+ **kwargs: Any, # noqa: ANN401
+ ) -> None:
+ """Initialize the simulator."""
+ if bias is None:
+ bias = np.array([1.0, 1.0, 1.0])
+ simulation_utils.set_seed(seed)
+ self.Hx = hx
+ self.Lx = lx
+ self.Hz = hz
+ self.Lz = lz
+ self.codename = codename
+ self.data_err_rate = data_err_rate
+ self.bias = bias
+ self.experiment = experiment
+ self.decoding_method = decoding_method
+ self.sigma = sigma
+ self.outfile = output_path
+ if sigma is None:
+ if syndr_err_rate is None:
+ msg = "Either sigma or ser must be specified"
+ raise ValueError(msg)
+
+ self.syndr_err_rate = syndr_err_rate
+ synd_err_channel = simulation_utils.error_channel_setup(
+ error_rate=self.syndr_err_rate,
+ xyz_error_bias=self.bias,
+ nr_qubits=1,
+ )
+
+ else:
+ if syndr_err_rate is not None:
+ msg = "Only one of sigma or ser must be specified"
+ raise ValueError(msg)
+
+ self.syndr_err_rate = simulation_utils.get_error_rate_from_sigma(sigma)
+ synd_err_channel = simulation_utils.error_channel_setup(
+ error_rate=self.syndr_err_rate,
+ xyz_error_bias=self.bias,
+ nr_qubits=1,
+ )
+
+ x_synd_err_rate = synd_err_channel[0][0] + synd_err_channel[1][0] # x + y errors, 1st bit only
+ z_synd_err_rate = synd_err_channel[2][0] + synd_err_channel[1][0] # z + y errors, 1st bit only
+ self.x_sigma = simulation_utils.get_sigma_from_syndr_er(x_synd_err_rate)
+ self.z_sigma = simulation_utils.get_sigma_from_syndr_er(z_synd_err_rate)
+
+ self.bp_params = bp_params
+ self.save_interval = kwargs.get("save_interval", 1_000)
+ self.eb_precission = kwargs.get("eb_precission", 1e-1)
+ self.input_values = self.__dict__.copy()
+
+ self.n = hx.shape[1]
+ self.code_params = code_params
+ del self.input_values["Hx"]
+ del self.input_values["Lx"]
+ del self.input_values["Hz"]
+ del self.input_values["Lz"]
+
+ # setup decoders
+ if self.decoding_method == "atd":
+ Decoder = AnalogTannergraphDecoder # noqa: N806
+
+ # single-sided error only, no bias
+ self.full_error_channel = simulation_utils.error_channel_setup(
+ error_rate=self.data_err_rate,
+ xyz_error_bias=self.bias,
+ nr_qubits=self.n,
+ )
+ self.x_decoder = Decoder(
+ error_channel=self.full_error_channel[0] + self.full_error_channel[1], # x + y errors
+ pcm=self.Hz,
+ sigma=self.x_sigma,
+ bp_params=self.bp_params,
+ )
+ self.z_decoder = Decoder(
+ error_channel=self.full_error_channel[2] + self.full_error_channel[1], # z + y errors
+ pcm=self.Hx,
+ sigma=self.z_sigma,
+ bp_params=self.bp_params,
+ )
+ self.x_bp_iterations = 0
+ self.z_bp_iterations = 0
+
+ def single_sample(self) -> tuple[bool, bool]:
+ """Samples and error and decodes once. Returns if the decoding round was successful separately for each side."""
+ residual_err: list[NDArray[np.int32]] = [
+ np.zeros(self.n).astype(np.int32),
+ np.zeros(self.n).astype(np.int32),
+ ] # no residual error
+ (
+ x_err,
+ z_err,
+ ) = simulation_utils.generate_err( # no residual error, only one side needed
+ nr_qubits=self.n,
+ channel_probs=self.full_error_channel,
+ residual_err=residual_err,
+ )
+
+ x_perf_syndr = (self.Hz @ x_err) % 2
+ x_noisy_syndr = simulation_utils.get_noisy_analog_syndrome(sigma=self.x_sigma, perfect_syndr=x_perf_syndr)
+ x_decoding = self.x_decoder.decode(x_noisy_syndr)[: self.n]
+ self.x_bp_iterations += self.x_decoder.bposd_decoder.iter
+ x_residual = (x_err + x_decoding) % 2
+
+ z_perf_syndr = (self.Hx @ z_err) % 2
+ z_noisy_syndr = simulation_utils.get_noisy_analog_syndrome(sigma=self.z_sigma, perfect_syndr=z_perf_syndr)
+ z_decoding = self.z_decoder.decode(z_noisy_syndr)[: self.n]
+ self.z_bp_iterations += self.z_decoder.bposd_decoder.iter
+ z_residual = (z_err + z_decoding) % 2
+
+ return not simulation_utils.is_logical_err(self.Lz, x_residual), not simulation_utils.is_logical_err(
+ self.Lx, z_residual
+ )
+
+ def run(self, samples: int) -> dict[str, Any]:
+ """Run the simulation."""
+ x_success_cnt = 0
+ z_success_cnt = 0
+ for runs in range(1, samples + 1):
+ out = self.single_sample()
+
+ x_success_cnt += int(out[0])
+ z_success_cnt += int(out[1])
+ if runs % self.save_interval == 1:
+ self.save_results(x_success_cnt, z_success_cnt, runs)
+
+ # check convergence only once during each save interval
+ if is_converged(
+ x_success_cnt,
+ z_success_cnt,
+ runs,
+ self.code_params,
+ self.eb_precission,
+ ):
+ print("Result has converged.") # noqa: T201
+ break
+
+ x_ler, x_ler_eb, x_wer, x_wer_eb = calculate_error_rates(x_success_cnt, runs, self.code_params)
+ z_ler, z_ler_eb, z_wer, z_wer_eb = calculate_error_rates(z_success_cnt, runs, self.code_params)
+ output = {
+ "code_K": self.code_params["k"],
+ "code_N": self.code_params["n"],
+ "nr_runs": runs,
+ "pers": self.data_err_rate,
+ "x_sigma": self.x_sigma,
+ "z_sigma": self.z_sigma,
+ "x_ler": x_ler,
+ "x_ler_eb": x_ler_eb,
+ "x_wer": x_wer,
+ "x_wer_eb": x_wer_eb,
+ "x_success_cnt": x_success_cnt,
+ "z_ler": z_ler,
+ "z_ler_eb": z_ler_eb,
+ "z_wer": z_wer,
+ "z_wer_eb": z_wer_eb,
+ "z_success_cnt": z_success_cnt,
+ "bp_params": self.bp_params,
+ "x_bp_iterations": self.x_bp_iterations / runs,
+ "z_bp_iterations": self.z_bp_iterations / runs,
+ }
+
+ self.save_results(x_success_cnt, z_success_cnt, runs) # save final results
+
+ return output
+
+ def save_results(self, x_success_cnt: int, z_success_cnt: int, runs: int) -> dict[str, Any]:
+ """Compute error rates and error bars and save output dict."""
+ x_ler, x_ler_eb, x_wer, x_wer_eb = calculate_error_rates(x_success_cnt, runs, self.code_params)
+ z_ler, z_ler_eb, z_wer, z_wer_eb = calculate_error_rates(z_success_cnt, runs, self.code_params)
+ output = {
+ "code_K": self.code_params["k"],
+ "code_N": self.code_params["n"],
+ "nr_runs": runs,
+ "pers": self.data_err_rate,
+ "x_sigma": self.x_sigma,
+ "z_sigma": self.z_sigma,
+ "x_ler": x_ler,
+ "x_ler_eb": x_ler_eb,
+ "x_wer": x_wer,
+ "x_wer_eb": x_wer_eb,
+ "x_success_cnt": x_success_cnt,
+ "z_ler": z_ler,
+ "z_ler_eb": z_ler_eb,
+ "z_wer": z_wer,
+ "z_wer_eb": z_wer_eb,
+ "z_success_cnt": z_success_cnt,
+ "bp_params": self.bp_params,
+ "x_bp_iterations": self.x_bp_iterations / runs,
+ "z_bp_iterations": self.z_bp_iterations / runs,
+ }
+
+ output.update(self.input_values)
+ with Path(self.outfile).open(mode="w") as f:
+ json.dump(output, f, ensure_ascii=False, indent=4, default=lambda o: o.__dict__)
+ return output
+
+
+def get_analog_pcm(pcm: NDArray[np.int32]) -> NDArray[np.int32]:
+ """Constructs apcm = [H | I_m] where I_m is the m x m identity matrix."""
+ return np.hstack((pcm, np.identity(pcm.shape[0], dtype=np.int32)))
diff --git a/src/mqt/qecc/analog_information_decoding/simulators/memory_experiment_v2.py b/src/mqt/qecc/analog_information_decoding/simulators/memory_experiment_v2.py
new file mode 100644
index 00000000..1ded3773
--- /dev/null
+++ b/src/mqt/qecc/analog_information_decoding/simulators/memory_experiment_v2.py
@@ -0,0 +1,161 @@
+"""This module contains the functions for the multiround decoding simulation."""
+
+from __future__ import annotations
+
+from typing import TYPE_CHECKING, Any
+
+import numpy as np
+from pymatching import Matching
+from scipy.sparse import block_diag, csr_matrix, eye, hstack
+
+from ..utils.simulation_utils import (
+ get_virtual_check_init_vals,
+)
+
+if TYPE_CHECKING:
+ from numpy.typing import NDArray
+
+
+def build_multiround_pcm(pcm: NDArray[np.int32], repetitions: int, matrix_format: str = "csr") -> csr_matrix:
+ """Builds the multiround parity-check matrix as described in the paper.
+
+ Each row corresponds to a round of measurements, the matrix for r repetitions has the form
+ H3D := (H_diag | id_r), where H_diag is the r-diagonal matrix containing the parity-check matrix H and
+ id_diag is a staircase block matrix containing two m-identity matrices in each row and column except the first one.
+ """
+ if not isinstance(pcm, csr_matrix):
+ pcm = csr_matrix(pcm)
+
+ pcm_rows, _ = pcm.shape
+
+ # Construct the block of PCMs
+ h_3dpcm = block_diag([pcm] * (repetitions + 1), format=matrix_format)
+
+ # Construct the block of identity matrices
+ h_3did_diag = block_diag([eye(pcm_rows, format=matrix_format)] * (repetitions + 1), format=matrix_format)
+
+ # Construct the block of identity matrices
+ h_3did_offdiag = eye(pcm_rows * (repetitions + 1), k=-pcm_rows, format=matrix_format)
+
+ # Construct the block of identity matrices
+ h_3did = h_3did_diag + h_3did_offdiag
+
+ # # hstack the two blocks
+ return hstack([h_3dpcm, h_3did], format=matrix_format)
+
+
+def move_syndrome(syndrome: NDArray[Any], data_type: Any = np.int32) -> NDArray[Any]: # noqa: ANN401
+ """Slides the window one region up, i.e., the syndrome of the first half is overwritten by the second half."""
+ region_size = int(syndrome.shape[1] / 2) # number of rounds in each region, T
+
+ # zero likes syndrome
+ new_syndrome = np.zeros(syndrome.shape, dtype=data_type)
+
+ # move the syndromes from the Tth round to the 0th round
+ new_syndrome[:, :region_size] = syndrome[:, region_size:]
+ return new_syndrome
+
+
+def get_updated_decoder(
+ decoding_method: str,
+ decoder: Any, # noqa: ANN401
+ new_channel: NDArray[np.float64],
+ h3d: Any | None = None, # noqa: ANN401
+) -> Any: # noqa: ANN401
+ """Updates the decoder with the new channel information and returns the updated decoder object."""
+ if decoding_method == "bposd":
+ decoder.update_channel_probs(new_channel)
+ return decoder
+ if decoding_method == "matching":
+ weights = np.clip(
+ np.log((1 - new_channel) / new_channel),
+ a_min=-16777215,
+ a_max=16777215,
+ )
+ return Matching(h3d, weights=weights)
+ msg = "Unknown decoding method"
+ raise ValueError(msg, decoding_method)
+
+
+def decode_multiround(
+ syndrome: NDArray[np.int32],
+ pcm: NDArray[np.int32],
+ decoder: Any, # noqa: ANN401
+ channel_probs: NDArray[np.float64], # needed for matching decoder does not have an update weights method
+ repetitions: int,
+ analog_syndr: NDArray[np.float64] | None,
+ last_round: bool = False,
+ check_block_size: int = 0,
+ sigma: float = 0.0,
+ h3d: NDArray[np.int32] | None = None, # needed for matching decoder
+ decoding_method: str = "bposd", # bposd or matching
+) -> tuple[Any, NDArray[np.int32], NDArray[np.float64], int]:
+ """Overlapping window decoding.
+
+ First, we compute the difference syndrome from the recorded syndrome of each measurement round for all measurement
+ rounds of the current window (consisting of two regions with equal size).
+ Then, we apply the correction returned from the decoder on the first region (commit region).
+ We then propagate the syndrome through the whole window (i.e., to the end of the second region).
+ """
+ analog_tg = analog_syndr is not None
+ # convert syndrome to difference syndrome
+ diff_syndrome = syndrome.copy()
+ diff_syndrome[:, 1:] = (syndrome[:, 1:] - syndrome[:, :-1]) % 2
+ bp_iter = 0
+
+ region_size = repetitions // 2 # assumes repetitions is even
+
+ if analog_tg:
+ # If we have analog information, we use it to initialize the time-like syndrome nodes, which are defined
+ # in the block of the H3D matrix after the diagonal H block.
+ analog_init_vals = get_virtual_check_init_vals(analog_syndr.flatten("F"), sigma) # type: ignore[union-attr]
+
+ new_channel = np.hstack((channel_probs[:check_block_size], analog_init_vals))
+
+ # in the last round, we have a perfect syndrome round to make sure we're in the codespace
+ if last_round:
+ new_channel[-pcm.shape[0] :] = 1e-15
+
+ decoder = get_updated_decoder(decoding_method, decoder, new_channel, h3d)
+
+ elif last_round:
+ new_channel = np.copy(channel_probs)
+ new_channel[-pcm.shape[0] :] = 1e-15
+
+ decoder = get_updated_decoder(decoding_method, decoder, new_channel, h3d)
+
+ decoded = decoder.decode(diff_syndrome.flatten("F"))
+
+ if decoding_method == "bposd":
+ bp_iter = decoder.iter
+
+ # extract space correction, first repetitions * n entire
+ space_correction = decoded[: pcm.shape[1] * repetitions].reshape((repetitions, pcm.shape[1])).T
+ # extract time correction
+
+ if last_round is False:
+ # this corresponds to the decoding on the second block of the H3D matrix
+ time_correction = decoded[pcm.shape[1] * repetitions :].reshape((repetitions, pcm.shape[0])).T
+
+ # append time correction with zeros
+ time_correction = np.hstack((time_correction, np.zeros((pcm.shape[0], 1), dtype=np.int32)))
+
+ # correct only in the commit region
+ decoded = (np.cumsum(space_correction, 1) % 2)[:, region_size - 1]
+
+ # get the syndrome according to the correction
+ corr_syndrome = (pcm @ decoded) % 2
+
+ # propagate the syndrome correction through the tentative region
+ syndrome[:, region_size:] = ((syndrome[:, region_size:] + corr_syndrome[:, None]) % 2).astype(np.int32)
+
+ # apply the time correction of round region_size - 1 to the syndrome at the beginning of the tentative region
+ syndrome[:, region_size] = ((syndrome[:, region_size] + time_correction[:, region_size - 1]) % 2).astype(
+ np.int32
+ )
+
+ else:
+ # correct in the commit and tentative region as the last round stabilizer is perfect
+ decoded = (np.cumsum(space_correction, 1) % 2)[:, -1]
+
+ return decoded.astype(np.int32), syndrome, analog_syndr, bp_iter # type: ignore[return-value]
diff --git a/src/mqt/qecc/analog_information_decoding/simulators/quasi_single_shot_v2.py b/src/mqt/qecc/analog_information_decoding/simulators/quasi_single_shot_v2.py
new file mode 100644
index 00000000..c82e5cb2
--- /dev/null
+++ b/src/mqt/qecc/analog_information_decoding/simulators/quasi_single_shot_v2.py
@@ -0,0 +1,288 @@
+"""Quasi single shot simulator."""
+
+from __future__ import annotations
+
+from typing import TYPE_CHECKING, Any
+
+import numpy as np
+from bposd import bposd_decoder
+from pymatching import Matching
+
+from ..utils.data_utils import BpParams, _check_convergence
+from ..utils.simulation_utils import (
+ error_channel_setup,
+ generate_err,
+ generate_syndr_err,
+ get_binary_from_analog,
+ get_noisy_analog_syndrome,
+ get_sigma_from_syndr_er,
+ is_logical_err,
+ save_results,
+ set_seed,
+)
+from .memory_experiment_v2 import (
+ build_multiround_pcm,
+ decode_multiround,
+ move_syndrome,
+)
+
+if TYPE_CHECKING:
+ from numpy.typing import NDArray
+
+
+class QssSimulator:
+ """Quasi single shot simulator."""
+
+ def __init__(
+ self,
+ pcm: NDArray[np.int_],
+ per: float,
+ ser: float,
+ logicals: NDArray[np.int_],
+ bias: NDArray[np.float64],
+ codename: str,
+ bp_params: BpParams,
+ code_params: dict[str, int],
+ decoding_method: str = "bposd", # bposd or matching
+ check_side: str = "X",
+ seed: int = 666,
+ analog_tg: bool = False,
+ repetitions: int = 0,
+ rounds: int = 0,
+ experiment: str = "qss",
+ outpath: str = "/",
+ **kwargs: Any, # noqa: ANN401
+ ) -> None:
+ """Initialize QSS Simulator.
+
+ :param pcm: parity-check matrix of code.
+
+ :param per: physical data error rate
+ :param ser: syndrome error rate
+ :param logicals: logical matrix
+ :param bias: bias array
+ :param codename: name of the code
+ :param bp_params: BP decoder parameters
+ :param check_side: side of the check (X or Z)
+ :param seed: random seed
+ :param analog_tg: switch analog decoding on/off
+ :param repetitions: number of total syndrome measurements, i.e., total time steps. Must be even.
+ :param rounds: number of decoding runs, i.e., number of times we slide the window - 1
+ :param experiment: name of experiment, for outpath creation
+ :param kwargs:
+ """
+ self.H = pcm
+ self.data_err_rate = per
+ self.syndr_err_rate = ser
+ self.check_side = check_side
+ self.L = logicals
+ self.bias = bias
+ self.codename = codename
+ self.bp_params = bp_params
+ self.decoding_method = decoding_method
+ self.save_interval = kwargs.get("save_interval", 50)
+ self.eb_precission = kwargs.get("eb_precission", 1e-2)
+ self.analog_tg = analog_tg
+ self.repetitions = repetitions
+ if repetitions % 2 != 0:
+ msg = "repetitions must be even"
+ raise ValueError(msg)
+
+ if self.decoding_method not in {"bposd", "matching"}:
+ msg = "Decoding method must be either bposd or matching"
+ raise ValueError(msg)
+
+ if self.repetitions % 2 != 0:
+ msg = "Repetitions must be even!"
+ raise ValueError(msg)
+
+ self.rounds = rounds
+ self.experiment = experiment
+ set_seed(seed)
+ self.code_params = code_params
+ self.input_values = self.__dict__.copy()
+
+ self.outfile = outpath
+
+ # Remove Arrays
+ del self.input_values["H"]
+ del self.input_values["L"]
+
+ self.num_checks, self.num_qubits = self.H.shape
+
+ self.x_bit_chnl, self.y_bit_chnl, self.z_bit_chnl = error_channel_setup(
+ error_rate=self.data_err_rate,
+ xyz_error_bias=bias,
+ nr_qubits=self.num_qubits,
+ )
+ self.x_syndr_err_chnl, self.y_syndr_err_chnl, self.z_syndr_err_chnl = error_channel_setup(
+ error_rate=self.syndr_err_rate,
+ xyz_error_bias=bias,
+ nr_qubits=self.num_checks,
+ )
+ if self.check_side == "X":
+ self.err_idx = 1
+ # Z bit/syndrome errors
+ self.data_err_channel = self.y_bit_chnl + self.z_bit_chnl
+ self.syndr_err_channel = 1.0 * (self.z_syndr_err_chnl + self.y_syndr_err_chnl)
+ else:
+ # we have X errors on qubits
+ self.err_idx = 0
+ # X bit/syndrome errors
+ self.data_err_channel = self.x_bit_chnl + self.y_bit_chnl
+ self.syndr_err_channel = 1.0 * (self.x_syndr_err_chnl + self.y_syndr_err_chnl)
+
+ # initialize the multiround parity-check matrix as described in the paper
+ self.H3D = build_multiround_pcm(
+ self.H,
+ self.repetitions - 1,
+ )
+
+ # the number of columns of the diagonal check matrix of the H3D matrix
+ self.check_block_size = self.num_qubits * (self.repetitions)
+
+ channel_probs: NDArray[np.float64] = np.zeros(self.H3D.shape[1]).astype(np.float64)
+ # The bits corresponding to the columns of the diagonal H-bock of H3D are initialized with the bit channel
+ channel_probs[: self.check_block_size] = np.array(self.data_err_channel.tolist() * (self.repetitions))
+
+ # The remaining bits (corresponding to the identity block of H3D)
+ # are initialized with the syndrome error channel
+ channel_probs[self.check_block_size :] = np.array(self.syndr_err_channel.tolist() * (self.repetitions))
+
+ # If we do ATG decoding, initialize sigma (syndrome noise strength)
+ if self.analog_tg:
+ self.sigma = get_sigma_from_syndr_er(
+ self.syndr_err_channel[0] # x/z + y
+ ) # assumes all sigmas are the same
+ self.bp_iterations = 0
+ if self.decoding_method == "bposd":
+ self.decoder = bposd_decoder(
+ parity_check_matrix=self.H3D,
+ channel_probs=channel_probs,
+ max_iter=self.bp_params.max_bp_iter,
+ bp_method=self.bp_params.bp_method,
+ osd_order=self.bp_params.osd_order,
+ osd_method=self.bp_params.osd_method,
+ ms_scaling_factor=self.bp_params.ms_scaling_factor,
+ )
+ elif self.decoding_method == "matching":
+ weights = np.log((1 - channel_probs) / channel_probs)
+ self.decoder = Matching(self.H3D, weights=weights)
+ self.channel_probs = channel_probs
+
+ def _decode_multiround(
+ self,
+ syndrome_mat: NDArray[np.int_],
+ analog_syndr_mat: NDArray[np.float64],
+ last_round: bool = False,
+ ) -> tuple[Any, NDArray[np.int32], NDArray[np.float64], int]:
+ return decode_multiround(
+ syndrome=syndrome_mat,
+ pcm=self.H,
+ decoder=self.decoder,
+ repetitions=self.repetitions,
+ last_round=last_round,
+ analog_syndr=analog_syndr_mat,
+ check_block_size=self.check_block_size,
+ sigma=self.sigma,
+ h3d=self.H3D if self.decoding_method == "matching" else None, # avoid passing matrix in case not needed
+ channel_probs=self.channel_probs,
+ decoding_method=self.decoding_method,
+ )
+
+ def _single_sample(self) -> int:
+ # prepare fresh syndrome matrix and error vector
+ # each column == measurement result of a single timestep
+ syndrome_mat: NDArray[np.int32] = np.zeros((self.num_checks, self.repetitions), dtype=np.int32)
+
+ if self.analog_tg:
+ analog_syndr_mat: NDArray[np.float64] = np.zeros((self.num_checks, self.repetitions), dtype=np.float64)
+
+ err: NDArray[np.int32] = np.zeros(self.num_qubits, dtype=np.int32)
+ cnt = 0 # counter for syndrome_mat
+
+ for rnd in range(self.rounds):
+ residual_err = [np.copy(err), np.copy(err)]
+ err = generate_err(
+ nr_qubits=self.num_qubits,
+ channel_probs=(
+ self.x_bit_chnl,
+ self.y_bit_chnl,
+ self.z_bit_chnl,
+ ),
+ residual_err=residual_err,
+ )[self.err_idx] # only first or last vector needed, depending on side (X or Z)
+ noiseless_syndrome = (self.H @ err) % 2
+
+ # add syndrome error
+ if rnd != (self.rounds - 1):
+ if self.analog_tg:
+ analog_syndrome = get_noisy_analog_syndrome(noiseless_syndrome, self.sigma)
+ syndrome = get_binary_from_analog(analog_syndrome)
+ else:
+ syndrome_error = generate_syndr_err(self.syndr_err_channel)
+ syndrome = (noiseless_syndrome + syndrome_error) % 2
+ else: # last round is perfect
+ syndrome = np.copy(noiseless_syndrome)
+ analog_syndrome = get_noisy_analog_syndrome(noiseless_syndrome, 0.0) # no noise
+
+ # fill the corresponding column of the syndrome/analog syndrome matrix
+ syndrome_mat[:, cnt] += syndrome
+ if self.analog_tg:
+ analog_syndr_mat[:, cnt] += analog_syndrome
+
+ cnt += 1 # move to next column of syndrome matrix
+
+ if cnt == self.repetitions: # if we have filled the syndrome matrix, decode
+ if rnd != (self.rounds - 1): # if not last round, decode and move syndrome
+ cnt = self.repetitions // 2 # reset counter to start of tentative region
+
+ # the correction is only the correction of the commit region
+ (corr, syndrome_mat, analog_syndr_mat, bp_iters) = self._decode_multiround(
+ syndrome_mat,
+ analog_syndr_mat,
+ last_round=False,
+ )
+ # we compute the average for all rounds since this equals a single sample
+ self.bp_iterations += int(bp_iters / self.rounds)
+ err = (err + corr) % 2
+ syndrome_mat = move_syndrome(syndrome_mat)
+ if self.analog_tg:
+ analog_syndr_mat = move_syndrome(analog_syndr_mat, data_type=np.float64)
+
+ else: # if we are in the last round, decode and stop
+ # the correction is the correction of the commit and tentative region
+ (corr, syndrome_mat, analog_syndr_mat, bp_iters) = self._decode_multiround(
+ syndrome_mat,
+ analog_syndr_mat,
+ last_round=True,
+ )
+ self.bp_iterations += int(bp_iters / self.rounds)
+ err = (err + corr) % 2
+ return int(not is_logical_err(self.L, err))
+
+ def _save_results(self, success_cnt: int, samples: int) -> dict[str, Any]:
+ return save_results(
+ success_cnt=success_cnt,
+ nr_runs=samples,
+ p=self.data_err_rate,
+ s=self.syndr_err_rate,
+ input_vals=self.input_values,
+ outfile=self.outfile,
+ code_params=self.code_params,
+ err_side="z" if self.check_side == "X" else "x",
+ bp_iterations=self.bp_iterations,
+ bp_params=self.bp_params,
+ )
+
+ def run(self, samples: int = 1) -> dict[str, Any]:
+ """Returns single data point."""
+ success_cnt = 0
+ for run in range(1, samples + 1):
+ success_cnt += self._single_sample()
+ if run % self.save_interval == 1:
+ self._save_results(success_cnt, run)
+ if _check_convergence(success_cnt, run, self.code_params, self.eb_precission):
+ print("Converged") # noqa: T201
+ break
+ return self._save_results(success_cnt, run)
diff --git a/src/mqt/qecc/analog_information_decoding/simulators/simulation.py b/src/mqt/qecc/analog_information_decoding/simulators/simulation.py
new file mode 100644
index 00000000..c4b7933a
--- /dev/null
+++ b/src/mqt/qecc/analog_information_decoding/simulators/simulation.py
@@ -0,0 +1,768 @@
+"""Single shot simulations."""
+
+from __future__ import annotations
+
+import json
+from pathlib import Path
+from timeit import default_timer as timer
+from typing import TYPE_CHECKING, Any
+
+import numpy as np
+from bposd import bposd_decoder
+
+from ..utils.data_utils import (
+ BpParams,
+ calculate_error_rates,
+ is_converged,
+ replace_inf,
+)
+from ..utils.data_utils import create_outpath as get_outpath
+from ..utils.simulation_utils import (
+ build_single_stage_pcm,
+ check_logical_err_h,
+ error_channel_setup,
+ generate_err,
+ generate_syndr_err,
+ get_binary_from_analog,
+ get_noisy_analog_syndrome,
+ get_sigma_from_syndr_er,
+ get_signed_from_binary,
+ get_virtual_check_init_vals,
+ is_logical_err,
+ set_seed,
+)
+
+if TYPE_CHECKING:
+ from numpy.typing import NDArray
+
+
+class SingleShotSimulator:
+ """Single shot decoding simulator."""
+
+ def __init__(
+ self,
+ codename: str,
+ per: float,
+ ser: float,
+ single_stage: bool,
+ seed: int,
+ bias: NDArray[np.float64],
+ x_meta: bool,
+ z_meta: bool,
+ sus_th_depth: int,
+ bp_params: BpParams,
+ code_params: dict[str, int],
+ hx: NDArray[np.int32],
+ hz: NDArray[np.int32],
+ mx: NDArray[np.int32] | None,
+ mz: NDArray[np.int32] | None,
+ lz: NDArray[np.int32] | None,
+ lx: NDArray[np.int32] | None,
+ analog_info: bool = False,
+ cutoff: int = 0,
+ analog_tg: bool = False,
+ **kwargs: Any, # noqa: ANN401
+ ) -> None:
+ """Initialize simulator."""
+ set_seed(seed)
+ self.codename = codename
+ self.data_err_rate = per
+ self.syndr_err_rate = ser
+ self.bias = bias
+ self.x_meta = x_meta
+ self.z_meta = z_meta
+ self.sus_th_depth = sus_th_depth
+ self.bp_params = bp_params
+ self.seed = seed
+ self.single_stage = single_stage
+ self.analog_info = analog_info
+ self.cutoff = cutoff
+ self.save_interval = kwargs.get("save_interval", 50)
+ self.eb_precission = kwargs.get("eb_precission", 1e-1)
+ self.analog_tg = analog_tg
+ self.x_bp_iters = 0
+ self.z_bp_iters = 0
+ # self.code_path = f"generated_codes/{codename}"
+ self.code_path = f"/codes/generated_codes/{codename}"
+ # Load code params
+
+ self.code_params = code_params
+
+ self.input_values = self.__dict__.copy()
+ self.outfile = get_outpath(**self.input_values)
+
+ # Set parity check matrices
+ self.Hx = hx
+ self.Hz = hz
+ if self.x_meta:
+ self.Mx = mx
+ if self.z_meta:
+ self.Mz = mz
+
+ self.n = self.Hx.shape[1] # m==shape[0] ambiguous for Hx, Hz, better use their shape directly
+ self.check_input()
+ # load logicals if possible
+ if lx is not None and lz is not None:
+ self.lx = lx
+ self.lz = lz
+ self._check_logicals = True
+ else:
+ self._check_logicals = False
+
+ (
+ self.x_bit_err_channel,
+ self.y_bit_err_channel,
+ self.z_bit_err_channel,
+ ) = error_channel_setup(self.data_err_rate, self.bias, self.n)
+ # encapsulates all bit error channels
+ self.data_error_channel = (
+ self.x_bit_err_channel,
+ self.y_bit_err_channel,
+ self.z_bit_err_channel,
+ )
+
+ # now setup syndrome error channels.
+ # This needs two calls since the syndromes may have different lengths
+ # first vector ([0]) is x channel probability vector
+ # by our convention the syndrome is named after the error.
+ full_x_syndr_err_chnl = error_channel_setup(self.syndr_err_rate, self.bias, self.Hz.shape[0])
+ full_z_syndr_err_chnl = error_channel_setup(self.syndr_err_rate, self.bias, self.Hx.shape[0])
+ self.x_syndr_error_channel = full_x_syndr_err_chnl[0] + full_x_syndr_err_chnl[1] # x+y
+ self.z_syndr_error_channel = full_z_syndr_err_chnl[2] + full_z_syndr_err_chnl[1] # z+y
+
+ # if we want to decode with analog syndrome noise and an analog decoder
+ if self.analog_info or self.analog_tg:
+ self.sigma_x = get_sigma_from_syndr_er(self.x_syndr_error_channel[0])
+ self.sigma_z = get_sigma_from_syndr_er(self.z_syndr_error_channel[0])
+
+ # if we want to decode with the analog tanner graph method construct respective matrices.
+ # These are assumed to exist in the *_setup() methods
+ if self.analog_tg:
+ self.x_apcm, self.z_apcm = self.construct_analog_pcms()
+
+ if self.single_stage:
+ self._single_stage_setup()
+ else:
+ self._two_stage_setup()
+
+ self._total_decoding_time = 0.0
+
+ def check_input(self) -> None:
+ """Check initialization parameters for consistency."""
+ if self.analog_tg is True and self.analog_info is True:
+ msg = "analog_tg and analog_info cannot be both True"
+ raise ValueError(msg)
+
+ def _single_sample(
+ self,
+ ) -> tuple[bool, bool]:
+ """Simulates a single sample for a given sustainable threshold depth."""
+ residual_err: list[NDArray[np.int32]] = [
+ np.zeros(self.n).astype(np.int32), # X-residual error part
+ np.zeros(self.n).astype(np.int32), # Z-residual error part
+ ]
+
+ # for single shot simulation we have sus_th_depth number of 'noisy' simulations (residual error carried over)
+ # followed by a single round of perfect syndrome extraction after the sustainable threshold loop
+ for _round in range(self.sus_th_depth):
+ x_err, z_err = generate_err(
+ nr_qubits=self.n,
+ channel_probs=self.data_error_channel,
+ residual_err=residual_err,
+ )
+ # by our convention, we call the syndrome after the error that is occurred
+ # however, the check_error_rate depends on which check errors,
+ # hence is named after the check matrix.
+ x_syndrome = self.Hz @ x_err % 2
+ z_syndrome = self.Hx @ z_err % 2
+
+ x_syndrome_w_err, z_syndrome_w_err = self._get_noisy_syndrome(x_syndrome, z_syndrome)
+
+ # collect total decoding time
+ start = timer()
+ if self.single_stage:
+ x_decoded, z_decoded = self._single_stage_decoding(x_syndrome_w_err, z_syndrome_w_err)
+ else:
+ x_decoded, z_decoded = self._two_stage_decoding(x_syndrome_w_err, z_syndrome_w_err)
+ end = timer()
+ self._total_decoding_time += end - start
+
+ residual_err = [
+ np.array((x_err + x_decoded) % 2, dtype=np.int32), # np conversion needed to avoid rt error
+ np.array((z_err + z_decoded) % 2, dtype=np.int32),
+ ]
+
+ # perfect measurement round at the end
+ x_err, z_err = generate_err(
+ nr_qubits=self.n,
+ channel_probs=self.data_error_channel,
+ residual_err=residual_err,
+ )
+
+ # X-syndrome: sx = Hz * ex
+ # Z-syndrome: sz = Hx * ez
+ x_syndrome = self.Hz @ x_err % 2
+ z_syndrome = self.Hx @ z_err % 2
+
+ start = timer()
+ x_decoded = self.x_bpd.decode(x_syndrome)
+ z_decoded = self.z_bpd.decode(z_syndrome)
+ end = timer()
+ self._total_decoding_time += end - start
+
+ # residual[0]: X-residual error, residual[1]: Z-residual error
+ residual_err = [(x_err + x_decoded) % 2, (z_err + z_decoded) % 2]
+
+ x_residual_err = residual_err[0]
+ z_residual_err = residual_err[1]
+
+ # check for logical errors
+ # check if residual X-error commutes with Z-logical and vice versa
+ # equivalently, check if residual X-error is in rowspace of Hz and vice versa
+ if self._check_logicals:
+ is_x_logical_error = is_logical_err(self.lz, x_residual_err)
+ is_z_logical_error = is_logical_err(self.lx, z_residual_err)
+ else:
+ is_x_logical_error = check_logical_err_h(self.Hz, x_err, x_residual_err)
+ is_z_logical_error = check_logical_err_h(self.Hx, z_err, z_residual_err)
+
+ return is_x_logical_error, is_z_logical_error
+
+ def _get_noisy_syndrome(
+ self, x_syndrome: NDArray[np.int32], z_syndrome: NDArray[np.int32]
+ ) -> tuple[NDArray[Any], NDArray[Any]]:
+ if self.syndr_err_rate != 0.0:
+ if self.analog_info or self.analog_tg: # analog syndrome error with converted sigma
+ x_syndrome_w_err = get_noisy_analog_syndrome(perfect_syndr=x_syndrome, sigma=self.sigma_x)
+ z_syndrome_w_err = get_noisy_analog_syndrome(perfect_syndr=z_syndrome, sigma=self.sigma_z)
+ else: # usual pauli error channel syndrome error
+ x_syndrome_err = generate_syndr_err(channel_probs=self.x_syndr_error_channel)
+ x_syndrome_w_err = (x_syndrome + x_syndrome_err) % 2 # type: ignore[assignment] # only occurs due to reused var name
+ z_syndrome_err = generate_syndr_err(channel_probs=self.z_syndr_error_channel)
+ z_syndrome_w_err = (z_syndrome + z_syndrome_err) % 2 # type: ignore[assignment] # only occurs due to reused var name
+ else:
+ x_syndrome_w_err = np.copy(x_syndrome)
+ z_syndrome_w_err = np.copy(z_syndrome)
+
+ return x_syndrome_w_err, z_syndrome_w_err
+
+ def _single_stage_decoding(
+ self, x_syndrome_w_err: NDArray[np.float64], z_syndrome_w_err: NDArray[np.float64]
+ ) -> tuple[NDArray[np.int32], NDArray[np.int32]]:
+ """Single stage decoding of the given syndromes.
+
+ If meta checks are activated, we apply the single-stage decoding method.
+ This is complemented by either analog_tg decoding or analog_info decoding (or standard decoding) of the
+ single stage matrix.
+
+ Otherwise, we apply the decoding methods to the standard check matrices, hence the meta code is not used.
+ """
+ # for convenience
+ x_err_channel = self.x_bit_err_channel + self.y_bit_err_channel
+ z_err_channel = self.z_bit_err_channel + self.y_bit_err_channel
+
+ if self.x_meta and self.Mx is not None:
+ z_decoded, self.z_bp_iters = self._decode_ss_with_meta(
+ syndrome_w_err=z_syndrome_w_err,
+ meta_pcm=self.Mx,
+ ss_bpd=self.ss_z_bpd,
+ bit_err_channel=z_err_channel,
+ sigma=self.sigma_z,
+ )
+ else: # do not use x meta checks, just decode with noisy syndrome on standard check matrix
+ z_decoded, self.z_bp_iters = self._decode_ss_no_meta(
+ syndrome_w_err=z_syndrome_w_err,
+ analog_tg_decoder=self.z_abpd,
+ standard_decoder=self.z_bpd,
+ bit_err_channel=z_err_channel,
+ sigma=self.sigma_z,
+ )
+
+ if self.z_meta and self.Mz is not None:
+ x_decoded, self.x_bp_iters = self._decode_ss_with_meta(
+ syndrome_w_err=x_syndrome_w_err,
+ meta_pcm=self.Mz,
+ ss_bpd=self.ss_x_bpd,
+ bit_err_channel=x_err_channel,
+ sigma=self.sigma_x,
+ )
+ else: # do not use x meta checks, just decode with noisy syndrome on check matrix or analog_tg method
+ x_decoded, self.x_bp_iters = self._decode_ss_no_meta(
+ syndrome_w_err=x_syndrome_w_err,
+ analog_tg_decoder=self.x_abpd,
+ standard_decoder=self.x_bpd,
+ bit_err_channel=x_err_channel,
+ sigma=self.sigma_x,
+ )
+ return x_decoded, z_decoded
+
+ def _decode_ss_no_meta(
+ self,
+ syndrome_w_err: NDArray[np.float64],
+ analog_tg_decoder: Any, # noqa: ANN401
+ standard_decoder: Any, # noqa: ANN401
+ bit_err_channel: NDArray[np.float64],
+ sigma: float,
+ ) -> tuple[NDArray[np.int32], int]:
+ """Decoding of syndrome without meta checks.
+
+ In case analog_tg is active, we use the analog tanner graph decoding method, which uses the analog syndrome.
+ Otherwise, we decode the standard check matrix with BPOSD, or the AI decoder in case analog_info is active.
+ """
+ if self.analog_tg:
+ decoded = self._analog_tg_decoding(
+ decoder=analog_tg_decoder,
+ hard_syndrome=get_binary_from_analog(syndrome_w_err),
+ analog_syndrome=syndrome_w_err,
+ bit_err_channel=bit_err_channel,
+ sigma=sigma,
+ )
+ it = analog_tg_decoder.iter
+ else:
+ decoded = standard_decoder.decode(syndrome_w_err)
+ it = standard_decoder.iter
+ return decoded, it
+
+ def _decode_ss_with_meta(
+ self,
+ syndrome_w_err: NDArray[np.float64],
+ ss_bpd: Any, # noqa: ANN401
+ meta_pcm: NDArray[np.int32],
+ bit_err_channel: NDArray[np.float64],
+ sigma: float,
+ ) -> tuple[NDArray[np.int32], int]:
+ """Single-Stage decoding for given syndrome.
+
+ If analog_tg is active, we use the analog tanner graph decoding method, which uses the analog syndrome to
+ initialize the virtual nodes s.t. they contain the soft info.
+
+ If analog_info is active, we use the analog_info decoder on the single-stage matrix.
+
+ Otherwise, standard single-stage decoding is applied
+ """
+ if self.analog_tg:
+ decoded = self._ss_analog_tg_decoding(
+ decoder=ss_bpd,
+ analog_syndrome=syndrome_w_err,
+ meta_pcm=meta_pcm,
+ bit_err_channel=bit_err_channel,
+ sigma=sigma,
+ )
+ else:
+ if self.analog_info:
+ meta_bin = (meta_pcm @ get_binary_from_analog(syndrome_w_err)) % 2
+ meta_syndr = get_signed_from_binary(meta_bin) # for AI decoder we need {-1,+1} syndrome as input
+ else:
+ meta_syndr = (meta_pcm @ syndrome_w_err) % 2 # type: ignore[assignment] # only occurs due to reused var name
+
+ ss_syndr = np.hstack((syndrome_w_err, meta_syndr))
+ # only first n bit are data, the other are virtual nodes and can be discarded for estimate
+ decoded = ss_bpd.decode(ss_syndr)[: self.n]
+ return decoded, ss_bpd.iter
+
+ def _ss_analog_tg_decoding(
+ self,
+ decoder: Any, # noqa: ANN401
+ analog_syndrome: NDArray[np.float64],
+ meta_pcm: NDArray[np.int32],
+ bit_err_channel: NDArray[np.float64],
+ sigma: float,
+ ) -> NDArray[np.int32]:
+ """Decodes the noisy analog syndrome using the single stage analog tanner graph and BPOSD.
+
+ That is, combines single-stage and analog tanner graph method.
+ In the standard single-stage method, BP is initialized with the bit channel + the syndrome channel.
+ In order for the virtual nodes to contain the analog info, we adapt the syndrome channel by computing
+ the 'analog channel' given the analog syndrome.
+ """
+ analog_channel = get_virtual_check_init_vals(analog_syndrome, sigma)
+ decoder.update_channel_probs(np.hstack((bit_err_channel, analog_channel)))
+
+ bin_syndr = get_binary_from_analog(analog_syndrome) # here we need to threshold since syndrome is analog
+ meta_syndr = (meta_pcm @ bin_syndr) % 2
+ ss_syndr = np.hstack((bin_syndr, meta_syndr))
+ # only first n bit are data, return them
+ return decoder.decode(ss_syndr)[: self.n] # type: ignore[no-any-return]
+
+ def _two_stage_decoding(
+ self, x_syndrome_w_err: NDArray[np.float64], z_syndrome_w_err: NDArray[np.float64]
+ ) -> tuple[NDArray[np.int32], NDArray[np.int32]]:
+ """Two stage decoding of single shot code.
+
+ Meta checks on either side (or both) can be deactivated.
+
+ If the meta checks for a side are activated the syndrome is thresholded and repaired using the meta code.
+
+ If analog_tg is used for decoding, the analog syndrome is used to initialize the BP decoder and the
+ thresholded (repaired) syndrome is used as input.
+
+ In case analog_info is activated x_bpd/z_bpd have been set accordinlgy in the setup() method.
+ """
+ if self.x_meta and self.Mx is not None:
+ # Mx corrects failures of Hx and Mz corrects failures of Mz
+ # thus, meta syndrome of z_syndrome = Mx*z_syndrome where z_syndrome = Hx*z_error
+ z_syndrome_repaired = self._meta_code_decoding(
+ syndrome_w_err=z_syndrome_w_err,
+ meta_pcm=self.Mx,
+ m_bp=self.mz_bp,
+ )
+ else:
+ z_syndrome_repaired = np.copy(z_syndrome_w_err).astype(np.int32)
+
+ if self.z_meta and self.Mz is not None:
+ x_syndrome_repaired = self._meta_code_decoding(
+ syndrome_w_err=x_syndrome_w_err,
+ meta_pcm=self.Mz,
+ m_bp=self.mx_bp,
+ )
+ else:
+ x_syndrome_repaired = np.copy(x_syndrome_w_err).astype(np.int32)
+
+ if self.analog_tg: # decode with analog tg method - update channel probs to init analog nodes
+ x_decoded = self._analog_tg_decoding(
+ decoder=self.x_abpd,
+ hard_syndrome=x_syndrome_repaired,
+ analog_syndrome=x_syndrome_w_err,
+ bit_err_channel=self.z_bit_err_channel + self.y_bit_err_channel,
+ sigma=self.sigma_x,
+ )
+ z_decoded = self._analog_tg_decoding(
+ decoder=self.z_abpd,
+ hard_syndrome=z_syndrome_repaired,
+ analog_syndrome=z_syndrome_w_err,
+ bit_err_channel=self.x_bit_err_channel + self.y_bit_err_channel,
+ sigma=self.sigma_z,
+ )
+ else:
+ x_decoded = self.x_bpd.decode(x_syndrome_repaired)
+ z_decoded = self.z_bpd.decode(z_syndrome_repaired)
+
+ return x_decoded, z_decoded
+
+ def _meta_code_decoding(
+ self,
+ syndrome_w_err: NDArray[np.float64],
+ meta_pcm: NDArray[np.int32],
+ m_bp: Any, # noqa: ANN401
+ ) -> NDArray[np.int32]:
+ """Decodes the noisy syndrome using the meta code.
+
+ If analog_info or analog_tg is activated the analog syndrome is thresholded to binary to be able to use
+ the meta code. The binary syndrome is repaired according to the meta code and a binary,
+ repaired syndrome is returned.
+ """
+ if self.analog_info or self.analog_tg:
+ syndrome_w_err_binary = get_binary_from_analog(syndrome_w_err)
+ else:
+ syndrome_w_err_binary = np.copy(syndrome_w_err)
+
+ meta_syndrome = (meta_pcm @ syndrome_w_err_binary) % 2
+ meta_decoded = m_bp.decode(meta_syndrome)
+ syndrome_w_err_binary += meta_decoded # repair syndrome
+ syndrome_w_err_binary %= 2
+
+ return syndrome_w_err_binary
+
+ def _analog_tg_decoding(
+ self,
+ decoder: Any, # noqa: ANN401
+ analog_syndrome: NDArray[np.float64],
+ hard_syndrome: NDArray[np.int32],
+ bit_err_channel: NDArray[np.float64],
+ sigma: float,
+ ) -> NDArray[np.int32]:
+ """Decodes the noisy analog syndrome using the analog tanner graph and BPOSD.
+
+ First, the channel probabilities need to be set according to the analog syndrome s.t. the decoder is
+ initialized properly.
+ Only the first n bits of the decoding estimate returned by the decoder are true estimates,
+ the other bits are acting as 'virtual checks' and thus excluded from the result.
+ The bit_err_channel is supposed to be the error channel for the n physical bits as usual.
+ """
+ analog_channel = get_virtual_check_init_vals(analog_syndrome, sigma)
+ decoder.update_channel_probs(np.hstack((bit_err_channel, analog_channel)))
+ # only first n bit are data so only return those
+ return decoder.decode(hard_syndrome)[: self.n] # type: ignore[no-any-return]
+
+ def _single_stage_setup(
+ self,
+ ) -> None:
+ """Sets up the single stage decoding.
+
+ * BPOSD decoders for the single-stage check matrices (cf Higgot & Breuckmann) are setup in case
+ there is a meta code for the respective side.
+ * BPOSD decoders for the check matrices Hx/Hz are set up for the last, perfect round
+ * In case analog_tg is activated, BPOSD for the analog tanner graph is setup
+ * If analog_info is active, SI-decoder is used to decode single-stage matrices and standard check matrices
+ instead of BPOSD.
+ """
+ # X-syndrome sx = Hz*ex
+ # x_syndr_error == error on Hz => corrected with Mz
+
+ # setup single stage matrices (Oscar&Niko method) H = [[H, Im], H ~ mxn, I ~ mxm, M ~ lxm
+ # [0, M]]
+ if self.z_meta and self.Mz is not None:
+ self.ss_z_pcm = build_single_stage_pcm(self.Hz, self.Mz)
+ else:
+ self.ss_z_pcm = None # type: ignore[assignment]
+ if self.x_meta and self.Mx is not None:
+ self.ss_x_pcm = build_single_stage_pcm(self.Hx, self.Mx)
+ else:
+ self.ss_x_pcm = None # type: ignore[assignment]
+
+ # X-checks := (Hx|Mx) => Influenced by Z-syndrome error rate
+ # ss_x_bpd used to decode X bit errors using Z-side check matrices
+ # single shot bp operates on the single shot pcm (Hz|Mz)
+ # hence we need x bit error rate and x syndrome error rate
+ x_err_channel = self.x_bit_err_channel + self.y_bit_err_channel
+ if self.ss_z_pcm is not None:
+ self.ss_x_bpd = self.get_decoder(
+ pcm=self.ss_z_pcm,
+ channel_probs=np.hstack((x_err_channel, self.x_syndr_error_channel)),
+ # sigma=self.sigma_x,
+ # analog_info=self.analog_info,
+ )
+
+ if self.analog_tg:
+ self.x_abpd = self.get_decoder(
+ pcm=self.z_apcm,
+ # second part dummy, needs to be updated for each syndrome
+ channel_probs=np.hstack((x_err_channel, np.zeros(self.Hz.shape[0]))),
+ # cutoff=self.cutoff,
+ # analog_info=False,
+ # sigma not needed, since we apply BPOSD
+ )
+ else:
+ self.x_abpd = None
+ self.x_bpd = self.get_decoder(
+ pcm=self.Hz,
+ channel_probs=x_err_channel,
+ # cutoff=self.cutoff,
+ # analog_info=self.analog_info,
+ )
+
+ z_err_channel = self.z_bit_err_channel + self.y_bit_err_channel
+ if self.ss_x_pcm is not None:
+ self.ss_z_bpd = self.get_decoder(
+ pcm=self.ss_x_pcm,
+ channel_probs=np.hstack((z_err_channel, self.z_syndr_error_channel)),
+ # cutoff=self.cutoff,
+ # analog_info=self.analog_info,
+ # sigma=self.sigma_z,
+ )
+
+ if self.analog_tg:
+ self.z_abpd = self.get_decoder(
+ pcm=self.x_apcm,
+ # second part dummy, needs to be updated for each syndrome
+ channel_probs=np.hstack((z_err_channel, np.zeros(self.Hx.shape[0]))),
+ # cutoff=self.cutoff,
+ # analog_info=self.analog_info,
+ # sigma not needed, since we apply BPOSD
+ )
+ else:
+ self.z_abpd = None
+ self.z_bpd = self.get_decoder(
+ pcm=self.Hx,
+ channel_probs=z_err_channel,
+ # cutoff=self.cutoff,
+ # analog_info=self.analog_info,
+ # sigma=self.sigma_z,
+ )
+
+ def _two_stage_setup(
+ self,
+ ) -> None:
+ """Sets up the two stage decoding.
+
+ * In case meta codes are present, BPOSD decoders for the meta codes are setup
+ * In case analo_tg is active, BPOSD decoders for the analog tanner graph are setup
+ * Additionally, BPOSD for Hx, Hz are setup for the final round
+ :return:
+ """
+ # used to decode errors in Hz == x-syndrome errors with x_synd_error rate
+ if self.z_meta and self.Mz is not None:
+ self.mx_bp = self.get_decoder(
+ pcm=self.Mz,
+ channel_probs=self.x_syndr_error_channel,
+ # cutoff=self.cutoff,
+ # analog_info=False, # =False and sigma not needed, since we don't have soft info for meta code
+ )
+
+ # used to decode errors in Hx
+ if self.x_meta and self.Mx is not None:
+ self.mz_bp = self.get_decoder(
+ pcm=self.Mx,
+ channel_probs=self.z_syndr_error_channel,
+ # cutoff=self.cutoff,
+ # analog_info=False, # =False and sigma not needed, since we don't have soft info for meta code
+ )
+
+ x_err_channel = self.x_bit_err_channel + self.y_bit_err_channel
+ if self.analog_tg:
+ self.x_abpd = self.get_decoder(
+ pcm=self.z_apcm,
+ channel_probs=np.hstack((x_err_channel, np.zeros(self.Hz.shape[0]))),
+ # second part dummy, needs to be updated for each syndrome
+ # cutoff=self.cutoff,
+ # analog_info=self.analog_info,
+ # sigma not needed, since we apply BPOSD
+ )
+ self.x_bpd = self.get_decoder(
+ pcm=self.Hz,
+ channel_probs=x_err_channel,
+ # cutoff=self.cutoff,
+ # analog_info=self.analog_info,
+ )
+
+ z_err_channel = self.z_bit_err_channel + self.y_bit_err_channel
+ if self.analog_tg:
+ self.z_abpd = self.get_decoder(
+ pcm=self.x_apcm,
+ # second part dummy, needs to be updated for each syndrome
+ channel_probs=np.hstack((z_err_channel, np.zeros(self.Hx.shape[0]))),
+ # cutoff=self.cutoff,
+ # analog_info=self.analog_info,
+ # sigma not needed, since we apply BPOSD
+ )
+ self.z_bpd = self.get_decoder(
+ pcm=self.Hx,
+ channel_probs=z_err_channel,
+ # cutoff=self.cutoff,
+ # analog_info=self.analog_info,
+ # sigma not needed, since we don't have analog info for meta code
+ )
+
+ def get_decoder(
+ self,
+ pcm: NDArray[np.int32],
+ channel_probs: NDArray[np.float64],
+ # cutoff: int = 0,
+ # sigma: float = 0.0,
+ # analog_info: bool = False,
+ ) -> Any: # noqa: ANN401
+ """Initialize decoder objects.
+
+ If analog_info is activated, the SoftInfoBpDecoder is used instead of the BPOSD decoder.
+ Note that analog_info and analog_tg cannot be used simultaneously.
+ """
+ return bposd_decoder(
+ parity_check_matrix=pcm,
+ channel_probs=channel_probs,
+ max_iter=self.bp_params.max_bp_iter,
+ bp_method=self.bp_params.bp_method,
+ osd_order=self.bp_params.osd_order,
+ osd_method=self.bp_params.osd_method,
+ ms_scaling_factor=self.bp_params.ms_scaling_factor,
+ )
+
+ def construct_analog_pcms(self) -> tuple[NDArray[np.int32], NDArray[np.int32]]:
+ """Constructs apcm = [H | I_m] where I_m is the m x m identity matrix."""
+ return np.hstack([self.Hx, np.identity(self.Hx.shape[0], dtype=np.int32)]), np.hstack([
+ self.Hz,
+ np.identity(self.Hz.shape[0], dtype=np.int32),
+ ])
+
+ def save_results(
+ self,
+ x_success_cnt: int,
+ z_success_cnt: int,
+ runs: int,
+ x_bp_iters: int,
+ z_bp_iters: int,
+ ) -> dict[str, Any]:
+ """Saves the results of the simulation to a json file."""
+ x_ler, x_ler_eb, x_wer, x_wer_eb = calculate_error_rates(x_success_cnt, runs, self.code_params)
+ z_ler, z_ler_eb, z_wer, z_wer_eb = calculate_error_rates(z_success_cnt, runs, self.code_params)
+
+ output = {
+ "code_K": self.code_params["k"],
+ "code_N": self.code_params["n"],
+ "nr_runs": runs,
+ "pers": self.data_err_rate,
+ "sers": self.syndr_err_rate,
+ "x_ler": x_ler,
+ "x_ler_eb": x_ler_eb,
+ "x_wer": x_wer,
+ "x_wer_eb": x_wer_eb,
+ "x_success_cnt": x_success_cnt,
+ "z_ler": z_ler,
+ "z_ler_eb": z_ler_eb,
+ "z_wer": z_wer,
+ "z_wer_eb": z_wer_eb,
+ "z_success_cnt": z_success_cnt,
+ "avg_x_bp_iter": x_bp_iters / runs,
+ "avg_z_bp_iter": z_bp_iters / runs,
+ "decoding_time": self._total_decoding_time,
+ }
+
+ output.update(self.input_values)
+ output["bias"] = replace_inf(output["bias"]) # type: ignore[assignment, arg-type]
+
+ with Path(self.outfile).open() as f:
+ f.write(
+ json.dumps(
+ output,
+ ensure_ascii=False,
+ indent=4,
+ default=lambda o: o.__dict__,
+ )
+ )
+ return output
+
+ def run(self, samples: int) -> dict[str, Any]:
+ """Run the simulation."""
+ x_success_cnt = 0
+ z_success_cnt = 0
+ x_bp_iters = 0
+ z_bp_iters = 0
+ for runs in range(1, samples + 1):
+ out = self._single_sample()
+ if not out[0]:
+ x_success_cnt += 1
+ if not out[1]:
+ z_success_cnt += 1
+ x_bp_iters += self.x_bp_iters
+ z_bp_iters += self.z_bp_iters
+
+ if runs % self.save_interval == 0:
+ self.save_results(x_success_cnt, z_success_cnt, runs, x_bp_iters=x_bp_iters, z_bp_iters=z_bp_iters)
+ if is_converged(
+ x_success_cnt,
+ z_success_cnt,
+ runs,
+ self.code_params,
+ self.eb_precission,
+ ):
+ print("Result has converged.") # noqa: T201
+ break
+
+ x_ler, x_ler_eb, x_wer, x_wer_eb = calculate_error_rates(x_success_cnt, runs, self.code_params)
+ z_ler, z_ler_eb, z_wer, z_wer_eb = calculate_error_rates(z_success_cnt, runs, self.code_params)
+ avg_x_bp_iter = x_bp_iters / runs
+ avg_z_bp_iter = z_bp_iters / runs
+ output = {
+ "code_K": self.code_params["k"],
+ "code_N": self.code_params["n"],
+ "nr_runs": runs,
+ "pers": self.data_err_rate,
+ "sers": self.syndr_err_rate,
+ "x_ler": x_ler,
+ "x_ler_eb": x_ler_eb,
+ "x_wer": x_wer,
+ "x_wer_eb": x_wer_eb,
+ "x_success_cnt": x_success_cnt,
+ "z_ler": z_ler,
+ "z_ler_eb": z_ler_eb,
+ "z_wer": z_wer,
+ "z_wer_eb": z_wer_eb,
+ "z_success_cnt": z_success_cnt,
+ "avg_x_bp_iters": avg_x_bp_iter,
+ "avg_z_bp_iters": avg_z_bp_iter,
+ "decoding_time": self._total_decoding_time,
+ }
+
+ output.update(self.input_values)
+
+ self.save_results(x_success_cnt, z_success_cnt, runs, x_bp_iters=x_bp_iters, z_bp_iters=z_bp_iters)
+ return output
diff --git a/src/mqt/qecc/analog_information_decoding/utils/data_utils.py b/src/mqt/qecc/analog_information_decoding/utils/data_utils.py
new file mode 100644
index 00000000..026bf484
--- /dev/null
+++ b/src/mqt/qecc/analog_information_decoding/utils/data_utils.py
@@ -0,0 +1,535 @@
+"""This module contains utility functions for loading and processing raw simulation data."""
+
+from __future__ import annotations
+
+import itertools
+import json
+import os
+from dataclasses import dataclass, fields
+from json.decoder import JSONDecodeError
+from pathlib import Path
+from typing import Any
+
+import numpy as np
+
+
+@dataclass
+class BpParams:
+ """Class to store parameters for BP decoding."""
+
+ bp_method: str = "msl"
+ max_bp_iter: int = 30
+ osd_order: int = 10
+ osd_method: str = "osd_cs"
+ ms_scaling_factor: float = 0.75
+ schedule: str = "parallel"
+ omp_thread_count: int = 1
+ random_serial_schedule: int = 0
+ serial_schedule_order: list[int] | None = None
+ cutoff: float = np.inf
+
+ @classmethod
+ def from_dict(cls, dict_: dict[str, Any]) -> BpParams:
+ """Creates a BpParams object from a dictionary."""
+ class_fields = {f.name for f in fields(cls)}
+ return BpParams(**{k: v for k, v in dict_.items() if k in class_fields})
+
+
+def extract_settings(filename: str) -> dict[str, list[str]]:
+ """Extracts all settings from a parameter file and returns them as a dictionary."""
+ keyword_lists: dict[str, list[str]] = {}
+
+ with Path(filename).open() as file:
+ for line in file:
+ json_data = json.loads(line.strip())
+ for keyword, value in json_data.items():
+ if keyword not in keyword_lists:
+ keyword_lists[keyword] = []
+ if value not in keyword_lists[keyword]:
+ keyword_lists[keyword].append(value)
+
+ return keyword_lists
+
+
+def load_data(
+ input_filenames: list[str],
+) -> list[dict[str, str]]:
+ """Loads data from a list of JSON files and returns it as a list of dictionaries."""
+ data = []
+ for file in input_filenames:
+ path = Path(file)
+
+ try:
+ ldata = json.load(path.open())
+ data.append(ldata)
+ except json.decoder.JSONDecodeError:
+ merge_json_files(str(path.with_suffix("")))
+ ldata = json.load(path.open())
+ data.append(ldata)
+ return data
+
+
+def calculate_error_rates(
+ success_cnt: int, runs: int, code_params: dict[str, int]
+) -> tuple[float, float, float, float]:
+ """Calculates logical error rates."""
+ logical_err_rate = 1.0 - (success_cnt / runs)
+ logical_err_rate_eb = np.sqrt((1 - logical_err_rate) * logical_err_rate / runs)
+ word_error_rate = 1.0 - (1 - logical_err_rate) ** (1 / code_params["k"])
+ word_error_rate_eb = (
+ logical_err_rate_eb * ((1 - logical_err_rate_eb) ** (1 / code_params["k"] - 1)) / code_params["k"]
+ )
+ return (
+ logical_err_rate,
+ logical_err_rate_eb,
+ word_error_rate,
+ word_error_rate_eb,
+ )
+
+
+def is_converged(x_success: int, z_success: int, runs: int, code_params: dict[str, int], precision: float) -> bool:
+ """Checks if the logical error rates for X and Z are converged."""
+ x_cond = _check_convergence(x_success, runs, code_params, precission_cutoff=precision)
+ z_cond = _check_convergence(z_success, runs, code_params, precission_cutoff=precision)
+ return x_cond == z_cond is True
+
+
+def _check_convergence(
+ success_cnt: int, runs: int, code_params: dict[str, int], precission_cutoff: float
+) -> bool | None:
+ _, _, ler, ler_eb = calculate_error_rates(success_cnt, runs, code_params)
+ if ler_eb != 0.0:
+ if ler_eb / ler < precission_cutoff:
+ return True
+ return None
+ return False
+
+
+def create_outpath(
+ x_meta: bool = False,
+ z_meta: bool = False,
+ bias: list[float] | None = None,
+ codename: str | None = None,
+ single_stage: bool = True,
+ sus_th_depth: int | None = None,
+ rounds: int | None = None,
+ identifier: int = 0,
+ analog_info: bool = False,
+ analog_tg: bool = False,
+ repetitions: int | None = None,
+ experiment: str = "wer_per_round",
+ **kwargs: dict[str, Any],
+) -> str:
+ """Creates a path for storing simulation results."""
+ path = f"results/{experiment:s}/"
+ if analog_info:
+ path += "analog_info/" # ranvendraan et al analog info decoder
+ elif analog_tg:
+ path += "analog_tg/" # analog tannergraph bp
+ else:
+ path += "hard_syndrome/" # standard hard syndrome decoding only
+ if bias is not None:
+ path += f"single_stage={single_stage}/bias={bias[0]}_{bias[1]}_{bias[2]}/"
+
+ if sus_th_depth:
+ path += f"sus_th_depth={sus_th_depth}/"
+ elif rounds:
+ path += f"rounds={rounds}/"
+
+ if repetitions:
+ path += f"repetitions={repetitions}/"
+
+ if x_meta:
+ path += "x-meta=true/"
+ else:
+ path += "x-meta=false/"
+
+ if z_meta:
+ path += "z-meta=true/"
+ else:
+ path += "z-meta=false/"
+
+ path += f"{codename:s}/"
+
+ if "syndr_err_rate" not in kwargs or kwargs["syndr_err_rate"] is None:
+ if "sigma" in kwargs:
+ path += f"per_{kwargs['data_err_rate']:.3e}_sigma_{kwargs['sigma']:.3e}/"
+ if "z_sigma" in kwargs:
+ path += f"per_{kwargs['data_err_rate']:.3e}_x_sigma_{kwargs['x_sigma']:.3e}_z_sigma_{kwargs['z_sigma']:.3e}"
+ else:
+ path += f"per_{kwargs['data_err_rate']:.3e}_ser_{kwargs['syndr_err_rate']:.3e}/"
+
+ Path(path).mkdir(parents=True, exist_ok=True)
+
+ f_loc = path + f"id_{identifier}.json"
+ while Path(f_loc).exists():
+ identifier += 1
+ f_loc = path + f"id_{identifier}.json"
+
+ Path(f_loc).touch()
+ return f_loc
+
+
+def replace_inf(lst: list[str]) -> list[str]:
+ """Replaces all occurrences of np.inf in a list with the string "i"."""
+ new_lst = []
+ for item in lst:
+ if np.isinf(item):
+ new_lst.append("i")
+ else:
+ new_lst.append(item)
+ return new_lst
+
+
+def product_dict(**kwargs: Any) -> Any: # noqa: ANN401
+ """Generate a iterator of dictionaries where each dictionary is a cartesian product.
+
+ of the values associated with each key in the input dictionary.
+ """
+ keys = kwargs.keys()
+ vals = kwargs.values()
+ for instance in itertools.product(*vals):
+ yield dict(zip(keys, instance))
+
+
+def zip_dict(**kwargs: dict[str, Any]) -> Any: # noqa: ANN401
+ """Create a iterator of dictionaries where each dictionary contains the zip() of the values associated with each key in the input dictionary."""
+ return (dict(zip(kwargs.keys(), values)) for values in zip(*kwargs.values()))
+
+
+def _update_error_rates(success_cnt: int, runs: int, code_k: int) -> tuple[float, float, float, float]:
+ """Calculates logical error rate, logical error rate error bar, word error rate, and word error rate error bar."""
+ logical_err_rate = 1.0 - (success_cnt / runs)
+ logical_err_rate_eb = np.sqrt((1 - logical_err_rate) * logical_err_rate / runs)
+ word_error_rate = 1.0 - (1 - logical_err_rate) ** (1 / code_k)
+ word_error_rate_eb = logical_err_rate_eb * ((1 - logical_err_rate_eb) ** (1 / code_k - 1)) / code_k
+ return (
+ logical_err_rate,
+ logical_err_rate_eb,
+ word_error_rate,
+ word_error_rate_eb,
+ )
+
+
+def merge_datasets(datasets: list[dict[str, Any]]) -> dict[str, Any]:
+ """Merges a list of dictionaries into a single dictionary.
+
+ The values for the fields "nr_runs", "x_success_cnt" and "z_success_cnt" are extracted from each dictionary and added together.
+
+ Args:
+ datasets (List[Dict[str, Any]]): A list of dictionaries to be merged.
+
+ Returns:
+ Dict[str, Any]: A dictionary containing the merged data.
+ """
+ if not datasets:
+ return {}
+
+ # Start with a copy of the first dictionary in the list
+ merged_data = dict(datasets[0])
+
+ # Extract and add up the values for "nr_runs", "x_success_cnt", and "z_success_cnt"
+ for data in datasets[1:]:
+ merged_data["nr_runs"] += data.get("nr_runs", 0)
+ merged_data["x_success_cnt"] += data.get("x_success_cnt", 0)
+ merged_data["z_success_cnt"] += data.get("z_success_cnt", 0)
+
+ # Update logical and word error rates based on accumulated data.
+ runs = merged_data["nr_runs"]
+ x_success_cnt = merged_data["x_success_cnt"]
+ z_success_cnt = merged_data["z_success_cnt"]
+
+ x_ler, x_ler_eb, x_wer, x_wer_eb = _update_error_rates(x_success_cnt, runs, merged_data["code_K"])
+ z_ler, z_ler_eb, z_wer, z_wer_eb = _update_error_rates(z_success_cnt, runs, merged_data["code_K"])
+
+ update_dict = {
+ "x_ler": x_ler,
+ "x_ler_eb": x_ler_eb,
+ "x_wer": x_wer,
+ "x_wer_eb": x_wer_eb,
+ "x_success_cnt": x_success_cnt,
+ "z_ler": z_ler,
+ "z_ler_eb": z_ler_eb,
+ "z_wer": z_wer,
+ "z_wer_eb": z_wer_eb,
+ "z_success_cnt": z_success_cnt,
+ }
+ merged_data.update(update_dict)
+ return merged_data
+
+
+def _merge_datasets_x(_datasets: list[dict[str, Any]]) -> dict[str, Any]:
+ """Merges a list of dictionaries into a single dictionary.
+
+ The values for the fields "nr_runs", "x_success_cnt" and "z_success_cnt" are extracted from each dictionary and added together.
+
+ Args:
+ datasets (List[Dict[str, Any]]): A list of dictionaries to be merged.
+
+ Returns:
+ Dict[str, Any]: A dictionary containing the merged data.
+ """
+ datasets = _datasets.copy()
+
+ if not datasets:
+ return {}
+
+ # remove datasets that do not contain x_success_cnt
+ for i, data in enumerate(datasets):
+ if "x_success_cnt" not in data:
+ datasets.pop(i)
+
+ # Start with a copy of the first dictionary in the list that contains z_success_cnt
+ # and remove that dict from the list
+ merged_data = {}
+ for i, data in enumerate(datasets):
+ if "x_success_cnt" in data:
+ merged_data = dict(datasets.pop(i))
+ break
+
+ # Extract and add up the values for "nr_runs", "x_success_cnt", and "z_success_cnt"
+ for data in datasets:
+ merged_data["nr_runs"] += data.get("nr_runs", 0)
+ merged_data["x_success_cnt"] += data.get("x_success_cnt", 0)
+
+ # Update logical and word error rates based on accumulated data.
+ runs = merged_data["nr_runs"]
+ x_success_cnt = merged_data["x_success_cnt"]
+
+ x_ler, x_ler_eb, x_wer, x_wer_eb = _update_error_rates(x_success_cnt, runs, merged_data["code_K"])
+
+ update_dict = {
+ "x_ler": x_ler,
+ "x_ler_eb": x_ler_eb,
+ "x_wer": x_wer,
+ "x_wer_eb": x_wer_eb,
+ "x_success_cnt": x_success_cnt,
+ }
+ merged_data.update(update_dict)
+ return merged_data
+
+
+def _merge_datasets_z(_datasets: list[dict[str, Any]]) -> dict[str, Any]:
+ """Merges a list of dictionaries into a single dictionary. The values for the fields "nr_runs".
+
+ "x_success_cnt" and "z_success_cnt" are extracted from each dictionary and added together.
+
+ Args:
+ datasets (List[Dict[str, Any]]): A list of dictionaries to be merged.
+
+ Returns:
+ Dict[str, Any]: A dictionary containing the merged data.
+ """
+ datasets = _datasets.copy()
+ # print("datasets", datasets)
+ if not datasets:
+ return {}
+
+ # remove datasets that do not contain z_success_cnt
+ for i, data in enumerate(datasets):
+ if "z_success_cnt" not in data:
+ datasets.pop(i)
+
+ # print(datasets)
+
+ # Start with a copy of the first dictionary in the list that contains z_success_cnt
+ # and remove that dict from the list
+ merged_data = {}
+ for i, data in enumerate(datasets):
+ if "z_success_cnt" in data:
+ merged_data = dict(datasets.pop(i))
+ break
+
+ # Extract and add up the values for "nr_runs", "x_success_cnt", and "z_success_cnt"
+ for data in datasets:
+ merged_data["nr_runs"] += data.get("nr_runs", 0)
+ merged_data["z_success_cnt"] += data.get("z_success_cnt", 0)
+
+ # Update logical and word error rates based on accumulated data.
+ runs = merged_data["nr_runs"]
+ z_success_cnt = merged_data["z_success_cnt"]
+
+ z_ler, z_ler_eb, z_wer, z_wer_eb = _update_error_rates(z_success_cnt, runs, merged_data["code_K"])
+
+ update_dict = {
+ "z_ler": z_ler,
+ "z_ler_eb": z_ler_eb,
+ "z_wer": z_wer,
+ "z_wer_eb": z_wer_eb,
+ "z_success_cnt": z_success_cnt,
+ }
+ merged_data.update(update_dict)
+ return merged_data
+
+
+def merge_json_files(input_path: str) -> None:
+ """Iterates through all subfolders in the input folder, loads all JSON files in each subfolder.
+
+ and merges them using the `merge_datasets` function. The resulting dictionaries are
+ stored in a list and then dumped as a JSON file in the input folder.
+
+ Args:
+ input_path (str): The path to the input folder.
+
+ Returns:
+ None
+ """
+ output_data: list[dict[str, Any]] = []
+ for folder_name in Path(input_path).iterdir():
+ folder_path = input_path / folder_name
+ if folder_path.is_dir():
+ data: list[dict[str, Any]] = []
+ for filename in folder_path.iterdir():
+ if filename.suffix == ".json":
+ file_path = folder_path / filename
+ with file_path.open() as file:
+ try:
+ json_data = json.load(file)
+ data.append(json_data)
+ except JSONDecodeError:
+ # don't care about json decode error here
+ pass
+ merged_data = merge_datasets(data)
+ if merged_data:
+ output_data.append(merged_data)
+
+ # save output to parent directory
+ code_name = Path(input_path).name
+ parent_dir = (Path(input_path) / os.pardir).resolve()
+ with (parent_dir / f"{code_name:s}.json").open("w") as output_file:
+ json.dump(output_data, output_file, ensure_ascii=False, indent=4)
+
+
+def merge_json_files_x(input_path: str) -> None:
+ """Iterates through all subfolders in the input folder, loads all JSON files in each subfolder.
+
+ and merges them using the `merge_datasets` function. The resulting dictionaries are
+ stored in a list and then dumped as a JSON file in the input folder.
+
+ Args:
+ input_path (str): The path to the input folder.
+
+ Returns:
+ None
+ """
+ output_data: list[dict[str, Any]] = []
+ for folder_name in Path(input_path).iterdir():
+ folder_path = input_path / folder_name
+ if folder_path.is_dir():
+ data: list[dict[str, Any]] = []
+ for filename in folder_path.iterdir():
+ if filename.suffix == ".json":
+ with (folder_path / filename).open() as file:
+ try:
+ json_data = json.load(file)
+ data.append(json_data)
+ except JSONDecodeError:
+ # don't care about json decode error here
+ pass
+ merged_data = _merge_datasets_x(data)
+ if merged_data:
+ output_data.append(merged_data)
+
+ # save output to parent directory
+ code_name = Path(input_path).name
+ parent_dir = (Path(input_path) / os.pardir).resolve()
+ with (parent_dir / f"{code_name:s}.json").open("w") as output_file:
+ json.dump(output_data, output_file, ensure_ascii=False, indent=4)
+
+
+def merge_json_files_z(input_path: str) -> None:
+ """Iterates through all subfolders in the input folder, loads all JSON files in each subfolder.
+
+ and merges them using the `merge_datasets` function. The resulting dictionaries are
+ stored in a list and then dumped as a JSON file in the input folder.
+
+ Args:
+ input_path (str): The path to the input folder.
+
+ Returns:
+ None
+ """
+ output_data: list[dict[str, Any]] = []
+ for folder_name in Path(input_path).iterdir():
+ folder_path = input_path / folder_name
+ if folder_path.is_dir():
+ data: list[dict[str, Any]] = []
+ for filename in folder_path.iterdir():
+ if filename.suffix == ".json":
+ with (folder_path / filename).open() as file:
+ try:
+ json_data = json.load(file)
+ data.append(json_data)
+ except JSONDecodeError:
+ # don't care about json decode error here
+ pass
+ merged_data = _merge_datasets_z(data)
+ if merged_data:
+ output_data.append(merged_data)
+
+ # save output to parent directory
+ code_name = Path(input_path).name
+ parent_dir = (Path(input_path) / os.pardir).resolve()
+ with (parent_dir / f"{code_name:s}.json").open("w") as output_file:
+ json.dump(output_data, output_file, ensure_ascii=False, indent=4)
+
+
+def merge_json_files_xz(input_path: str) -> None:
+ """Iterates through all subfolders in the input folder, loads all JSON files in each subfolder.
+
+ and merges them using the `merge_datasets` function. The resulting dictionaries are
+ stored in a list and then dumped as a JSON file in the input folder.
+
+ Args:
+ input_path (str): The path to the input folder.
+
+ Returns:
+ None
+ """
+ output_data: list[dict[str, Any]] = []
+ for folder_name in Path(input_path).iterdir():
+ folder_path = input_path / folder_name
+ if folder_path.is_dir():
+ data: list[dict[str, Any]] = []
+ for filename in folder_path.iterdir():
+ if filename.suffix == ".json":
+ with (folder_path / filename).open() as file:
+ try:
+ json_data = json.load(file)
+ data.append(json_data)
+ except JSONDecodeError:
+ # don't care about json decode error here
+ pass
+ merged_data_x = _merge_datasets_x(data)
+ merged_data_z = _merge_datasets_z(data)
+ merged_data = _combine_xz_data(merged_data_x, merged_data_z)
+ if merged_data:
+ output_data.append(merged_data)
+
+ # save output to parent directory
+ code_name = Path(input_path).name
+ parent_dir = (Path(input_path) / os.pardir).resolve()
+ with (parent_dir / f"{code_name:s}.json").open("w") as output_file:
+ json.dump(output_data, output_file, ensure_ascii=False, indent=4)
+
+
+def _combine_xz_data(xdata: dict[str, Any] | None, zdata: dict[str, Any] | None) -> dict[str, Any]:
+ """Combine the x and z data into a single dictionary.
+
+ Before doing that, rename "runs" in each dictionary to "x_runs" and "z_runs" respectively.
+
+ """
+ if xdata and zdata:
+ # print(xdata)
+ xdata["x_runs"] = xdata.pop("nr_runs")
+ zdata["z_runs"] = zdata.pop("nr_runs")
+ xdata.update(zdata)
+ return xdata
+ if xdata:
+ xdata["x_runs"] = xdata.pop("nr_runs")
+ return xdata
+ if zdata:
+ zdata["z_runs"] = zdata.pop("nr_runs")
+ return zdata
+
+ return {}
diff --git a/src/mqt/qecc/analog_information_decoding/utils/simulation_utils.py b/src/mqt/qecc/analog_information_decoding/utils/simulation_utils.py
new file mode 100644
index 00000000..a34ea8ff
--- /dev/null
+++ b/src/mqt/qecc/analog_information_decoding/utils/simulation_utils.py
@@ -0,0 +1,279 @@
+"""Simulation utilities for analog information decoding."""
+
+from __future__ import annotations
+
+import json
+from pathlib import Path
+from typing import TYPE_CHECKING, Any
+
+import numpy as np
+from ldpc.mod2 import rank
+from scipy.special import erfc, erfcinv
+
+from .data_utils import BpParams, calculate_error_rates, replace_inf
+
+if TYPE_CHECKING:
+ from numpy.typing import NDArray
+
+
+def set_seed(value: float) -> None:
+ """The appropriate way to set seeds when numba is used."""
+ np.random.seed(value) # noqa: NPY002
+
+
+def alist2numpy(fname: str) -> NDArray[np.int32]: # current original implementation is buggy
+ """Converts an alist file to a numpy array."""
+ alist_file: NDArray[np.str_] = np.loadtxt(fname, delimiter=",", dtype=str)
+ matrix_dimensions = alist_file[0].split()
+ m = int(matrix_dimensions[0])
+ n = int(matrix_dimensions[1])
+
+ mat: NDArray[np.int32] = np.zeros((m, n), dtype=np.int32)
+
+ for i in range(m):
+ columns = [item for item in alist_file[i + 4].split() if item.isdigit()]
+ columns_two: NDArray[np.int32] = np.array(columns, dtype=np.int32)
+ columns_two = columns_two - 1 # convert to zero indexing
+ mat[i, columns_two] = 1
+
+ return mat
+
+
+# Rewrite such that call signatures of check_logical_err_h
+# and check_logical_err_l are identical
+def check_logical_err_h(
+ check_matrix: NDArray[np.int_], original_err: NDArray[np.int_], decoded_estimate: NDArray[np.int_]
+) -> bool:
+ """Checks if the residual error is a logical error."""
+ _, n = check_matrix.shape
+
+ # compute residual err given original err
+ residual_err: NDArray[np.int32] = np.zeros((n, 1), dtype=np.int32)
+ for i in range(n):
+ residual_err[i][0] = original_err[i] ^ decoded_estimate[i]
+
+ ht = np.transpose(check_matrix)
+
+ htr = np.append(ht, residual_err, axis=1)
+
+ rank_ht = rank(check_matrix) # rank A = rank A.T
+
+ rank_htr = rank(htr)
+
+ return (rank_ht < rank_htr) is True
+
+
+# L is a numpy array, residual_err is vector s.t. dimensions match
+# residual_err is a logical iff it commutes with logicals of other side
+# i.e., an X residal is a logical iff it commutes with at least one Z logical and
+# an Z residual is a logical iff it commutes with at least one Z logical
+# Hence, L must be of same type as H and of different type than residual_err
+def is_logical_err(logicals: NDArray[np.int_], residual_err: NDArray[np.int_]) -> bool:
+ """Checks if the residual error is a logical error.
+
+ :returns: True if its logical error, False otherwise (is a stabilizer).
+ """
+ l_check = (logicals @ residual_err) % 2
+ return bool(l_check.any()) # check all zeros
+
+
+# adapted from https://github.com/quantumgizmos/bp_osd/blob/a179e6e86237f4b9cc2c952103fce919da2777c8/src/bposd/css_decode_sim.py#L430
+# and https://github.com/MikeVasmer/single_shot_3D_HGP/blob/master/sim_scripts/single_shot_hgp3d.cpp#L207
+# channel_probs = [x,y,z], residual_err = [x,z]
+def generate_err(
+ nr_qubits: int,
+ channel_probs: tuple[NDArray[np.float64], NDArray[np.float64], NDArray[np.float64]],
+ residual_err: list[NDArray[np.int_]],
+) -> tuple[NDArray[np.int_], NDArray[np.int_]]:
+ """Computes error vector with X and Z part given channel probabilities and residual error.
+
+ Assumes that residual error has two equally sized parts.
+ """
+ error_x = residual_err[0]
+ error_z = residual_err[1]
+ channel_probs_x = channel_probs[0]
+ channel_probs_y = channel_probs[1]
+ channel_probs_z = channel_probs[2]
+ residual_err_x = residual_err[0]
+ residual_err_z = residual_err[1]
+
+ for i in range(nr_qubits):
+ rand = np.random.random() # this returns a random float in [0,1)
+ # e.g. if err channel is p = 0.3, then an error will be applied if rand < p
+ if rand < channel_probs_z[i]: # if probability for z error high enough, rand < p, apply
+ # if there is a z error on the i-th bit, flip the bit but take residual error into account
+ # nothing on x part - probably redundant anyways
+ error_z[i] = (residual_err_z[i] + 1) % 2
+ elif ( # if p = 0.3 then 0.3 <= rand < 0.6 is the same sized interval as rand < 0.3
+ channel_probs_z[i] <= rand < (channel_probs_z[i] + channel_probs_x[i])
+ ):
+ # X error
+ error_x[i] = (residual_err_x[i] + 1) % 2
+ elif ( # 0.6 <= rand < 0.9
+ (channel_probs_z[i] + channel_probs_x[i])
+ <= rand
+ < (channel_probs_x[i] + channel_probs_y[i] + channel_probs_z[i])
+ ):
+ # y error == both x and z error
+ error_z[i] = (residual_err_z[i] + 1) % 2
+ error_x[i] = (residual_err_x[i] + 1) % 2
+
+ return error_x, error_z
+
+
+def get_analog_llr(analog_syndrome: NDArray[np.float64], sigma: float) -> NDArray[np.float64]:
+ """Computes analog LLRs given analog syndrome and sigma."""
+ if sigma <= 0.0:
+ return np.zeros_like(analog_syndrome).astype(np.float64)
+ return (2 * analog_syndrome) / (sigma**2)
+
+
+def get_sigma_from_syndr_er(ser: float) -> float:
+ """For analog Cat syndrome noise we need to convert the syndrome error model as described in the paper.
+
+ :return: sigma.
+ """
+ if ser == 0.0:
+ return 0.0
+ return float(1 / np.sqrt(2) / (erfcinv(2 * ser))) # see Eq. cref{eq:perr-to-sigma} in our paper
+
+
+def get_error_rate_from_sigma(sigma: float) -> float:
+ """For analog Cat syndrome noise we need to convert the syndrome error model as described in the paper.
+
+ :return: sigma.
+ """
+ if sigma == 0.0:
+ return 0.0
+ return float(0.5 * erfc(1 / np.sqrt(2 * sigma**2))) # see Eq. cref{eq:perr-to-sigma} in our paper
+
+
+def get_virtual_check_init_vals(noisy_syndr: NDArray[np.float64], sigma: float) -> NDArray[np.float64]:
+ """Computes a vector of values v_i from the noisy syndrome bits y_i s.t.
+
+ BP initializes the LLRs l_i of the analog nodes with the
+ analog info values (see paper section). v_i := 1/(e^{y_i}+1).
+ """
+ if sigma <= 0.0:
+ return np.zeros_like(noisy_syndr).astype(np.float64)
+ llrs = get_analog_llr(noisy_syndr, sigma)
+ return np.array(1 / (np.exp(np.abs(llrs)) + 1))
+
+
+def generate_syndr_err(channel_probs: NDArray[np.float64]) -> NDArray[np.int32]:
+ """Generates a random error vector given the error channel probabilities."""
+ error: NDArray[np.int32] = np.zeros_like(channel_probs, dtype=np.int32)
+
+ for i, p in np.ndenumerate(channel_probs):
+ rand = np.random.random()
+
+ if rand < p:
+ error[i] = 1
+
+ return error
+
+
+def get_noisy_analog_syndrome(perfect_syndr: NDArray[np.int_], sigma: float) -> NDArray[np.float64]:
+ """Generate noisy analog syndrome vector given the perfect syndrome and standard deviation sigma (~ noise strength).
+
+ Assumes perfect_syndr has entries in {0,1}.
+ """
+ # compute signed syndrome: 1 = check satisfied, -1 = check violated. float needed for Gaussian sampling call
+ sgns: NDArray[np.float64] = np.where(
+ perfect_syndr == 0.0,
+ np.ones_like(perfect_syndr),
+ np.full_like(perfect_syndr, -1.0),
+ ).astype(float)
+
+ # sample from Gaussian with zero mean and sigma std. dev: ~N(0, sigma_sq)
+ return np.array(np.random.default_rng().normal(loc=sgns, scale=sigma, size=perfect_syndr.shape)).astype(np.float64)
+
+
+def error_channel_setup(
+ error_rate: float, xyz_error_bias: NDArray[np.float64], nr_qubits: int
+) -> tuple[NDArray[np.float64], NDArray[np.float64], NDArray[np.float64]]:
+ """Set up an error_channel given the physical error rate, bias, and number of bits."""
+ xyz_error_bias = np.array(xyz_error_bias)
+ if xyz_error_bias[0] == np.inf:
+ px = error_rate
+ py = 0.0
+ pz = 0.0
+ elif xyz_error_bias[1] == np.inf:
+ px = 0.0
+ py = error_rate
+ pz = 0.0
+ elif xyz_error_bias[2] == np.inf:
+ px = 0.0
+ py = 0.0
+ pz = error_rate
+ else:
+ px, py, pz = (
+ error_rate * xyz_error_bias / np.sum(xyz_error_bias)
+ ) # Oscar only considers X or Z errors. For reproducibility remove normalization
+
+ channel_probs_x = np.ones(nr_qubits) * px
+ channel_probs_z = np.ones(nr_qubits) * pz
+ channel_probs_y = np.ones(nr_qubits) * py
+
+ return channel_probs_x, channel_probs_y, channel_probs_z
+
+
+def build_single_stage_pcm(pcm: NDArray[np.int_], meta: NDArray[np.int_]) -> NDArray[np.int_]:
+ """Build the single statge parity check matrix."""
+ id_r = np.identity(meta.shape[1])
+ zeros = np.zeros((meta.shape[0], pcm.shape[1]))
+ return np.block([[pcm, id_r], [zeros, meta]])
+
+
+def get_signed_from_binary(binary_syndrome: NDArray[np.int_]) -> NDArray[np.int_]:
+ """Maps the binary vector with {0,1} entries to a vector with {-1,1} entries."""
+ return np.where(
+ binary_syndrome == 0,
+ np.full(shape=binary_syndrome.shape, fill_value=1),
+ np.full(shape=binary_syndrome.shape, fill_value=-1),
+ )
+
+
+def get_binary_from_analog(analog_syndrome: NDArray[np.float64]) -> NDArray[np.int32]:
+ """Returns the thresholded binary vector.
+
+ Since in {-1,+1} notation -1 indicates a check violation, we map values <= 0 to 1 and values > 0 to 0.
+ """
+ return np.where(analog_syndrome <= 0.0, 1, 0).astype(np.int32)
+
+
+def save_results(
+ success_cnt: int,
+ nr_runs: int,
+ p: float,
+ s: float,
+ input_vals: dict[str, Any],
+ outfile: str,
+ code_params: dict[str, int],
+ bp_params: BpParams | None,
+ err_side: str = "X",
+ bp_iterations: int | None = None,
+) -> dict[str, Any]:
+ """Save results of a simulation run to a json file."""
+ ler, ler_eb, wer, wer_eb = calculate_error_rates(success_cnt, nr_runs, code_params)
+
+ output: dict[str, Any] = {
+ "code_K": code_params["k"],
+ "code_N": code_params["n"],
+ "nr_runs": nr_runs,
+ "pers": p,
+ "sers": s,
+ f"{err_side}_ler": ler,
+ f"{err_side}_ler_eb": ler_eb,
+ f"{err_side}_wer": wer,
+ f"{err_side}_wer_eb": wer_eb,
+ f"{err_side}_success_cnt": success_cnt,
+ "avg_bp_iterations": bp_iterations / nr_runs if bp_iterations is not None else 0,
+ "bp_params": bp_params,
+ }
+
+ output.update(input_vals)
+ output["bias"] = replace_inf(output["bias"])
+ with Path(outfile).open(mode="w") as out:
+ json.dump(output, out, ensure_ascii=False, indent=4, default=lambda o: o.__dict__)
+ return output
diff --git a/src/mqt/qecc/cc_decoder/decoder.py b/src/mqt/qecc/cc_decoder/decoder.py
index 48e6b029..f14cd9c6 100644
--- a/src/mqt/qecc/cc_decoder/decoder.py
+++ b/src/mqt/qecc/cc_decoder/decoder.py
@@ -124,7 +124,7 @@ def solve(
# This is just to measure the time it takes to solve the problem.
with Path("./solver-out_" + solver_path.split("/")[-1] + ".txt").open("a+") as out:
start = datetime.datetime.now()
- subprocess.run([solver_path, wcnf], stdout=out, check=False)
+ subprocess.run([solver_path, wcnf], stdout=out, check=False) # noqa: S603
solve_time = datetime.datetime.now() - start
# pop the context from the optimizer
diff --git a/src/mqt/qecc/cc_decoder/hexagonal_color_code.py b/src/mqt/qecc/cc_decoder/hexagonal_color_code.py
index 47313388..888e72dd 100644
--- a/src/mqt/qecc/cc_decoder/hexagonal_color_code.py
+++ b/src/mqt/qecc/cc_decoder/hexagonal_color_code.py
@@ -38,7 +38,7 @@ def red_row(self, x_max: int, y: int) -> None:
x_row = y
while i < x_max:
data_or_ancilla = i % 3
- if data_or_ancilla in (0, 2):
+ if data_or_ancilla in {0, 2}:
self.data_qubits.add((x_row, y))
else:
self.ancilla_qubits.add((x_row, y))
@@ -51,7 +51,7 @@ def blue_row(self, x_max: int, y: int) -> None:
x_row = y
while i < x_max:
data_or_ancilla = i % 3
- if data_or_ancilla in (0, 1):
+ if data_or_ancilla in {0, 1}:
self.data_qubits.add((x_row, y))
else:
self.ancilla_qubits.add((x_row, y))
@@ -64,7 +64,7 @@ def green_row(self, x_max: int, y: int) -> None:
x_row = y
while i < x_max:
data_or_ancilla = i % 3
- if data_or_ancilla in (1, 2):
+ if data_or_ancilla in {1, 2}:
self.data_qubits.add((x_row, y))
else:
self.ancilla_qubits.add((x_row, y))
diff --git a/src/mqt/qecc/cc_decoder/plots.py b/src/mqt/qecc/cc_decoder/plots.py
index 6803e907..28a87fcb 100644
--- a/src/mqt/qecc/cc_decoder/plots.py
+++ b/src/mqt/qecc/cc_decoder/plots.py
@@ -77,7 +77,7 @@ def calculate_threshold(
ler_eb_data.extend(ler_eb)
distance_data.extend([int(distance) for _ in range(len(per_array))])
- popt, pcov = curve_fit(threshold_fit, (per_data, distance_data), ler_data, maxfev=10000)
+ popt, _ = curve_fit(threshold_fit, (per_data, distance_data), ler_data, maxfev=10000)
if ax is not None:
ax.axvline(x=popt[-1], color="black", linestyle="dashed")
print("threshold: ", popt[-1])
@@ -116,7 +116,7 @@ def generate_plots(results_dir: Path, results_file: Path) -> None:
with file.open() as f:
data.append(json.loads(f.read()))
- fig, ax = plt.subplots(4, 4, figsize=(12, 12))
+ _, ax = plt.subplots(4, 4, figsize=(12, 12))
metrics: dict[int, Any] = {}
per_metrics: dict[float, Any] = {}
@@ -212,7 +212,7 @@ def generate_plots_tn(results_dir: Path, results_file: Path) -> None:
for xys in code_to_xys.values():
xys.sort(key=lambda xy: xy[0])
- fig, ax = plt.subplots(2, 2, figsize=(12, 10))
+ _, ax = plt.subplots(2, 2, figsize=(12, 10))
# add data
for code, xys in sorted(code_to_xys.items()):
ax[0][0].plot(*zip(*xys), "x-", label=f"d={code}")
@@ -242,7 +242,7 @@ def generate_plots_tn(results_dir: Path, results_file: Path) -> None:
pers = [0.001, 0.021, 0.051, 0.081, 0.111]
for d, cdata in sorted(code_to_xys.items()):
ds.append(d)
- for _, (p, t) in enumerate(cdata):
+ for p, t in cdata:
if p in pers:
if p not in p_data:
p_data[p] = {"d": [], "t": []}
@@ -263,7 +263,7 @@ def generate_plots_tn(results_dir: Path, results_file: Path) -> None:
def generate_plots_comp(results_dir: Path, results_file: Path) -> None:
"""Generate plots for the comparison of the different solvers."""
- fig, ax = plt.subplots(2, figsize=(12, 12))
+ _, ax = plt.subplots(2, figsize=(12, 12))
cols = [
"blue",
"orange",
@@ -292,7 +292,7 @@ def generate_plots_comp(results_dir: Path, results_file: Path) -> None:
with Path(fp).open() as ff:
data.append(json.loads(ff.read()))
- metrics: dict[int, dict[str, list[Any]]] = {}
+ metrics: dict[int, dict[str, Any]] = {}
for result in data:
d = result["distance"]
p = result["p"]
@@ -303,7 +303,7 @@ def generate_plots_comp(results_dir: Path, results_file: Path) -> None:
metrics[d]["avg_total_time"].append(result["avg_total_time"])
for d, mdata in sorted(metrics.items()):
- if d in (17, 21):
+ if d in {17, 21}:
(
mdata["p"],
mdata["avg_total_time"],
diff --git a/src/mqt/qecc/ecc_qiskit_wrapper.py b/src/mqt/qecc/ecc_qiskit_wrapper.py
index 056a1d7a..c8bf206e 100644
--- a/src/mqt/qecc/ecc_qiskit_wrapper.py
+++ b/src/mqt/qecc/ecc_qiskit_wrapper.py
@@ -79,7 +79,7 @@ def print_simulation_results(result: Result, n_shots: int, threshold_probability
# Print all results > threshold_probability
if summarized_counts[result_id] / n_shots > threshold_probability or printed_results == 0:
result_string = str(result_id)
- print("State |" + result_string + "> probability " + str(summarized_counts[result_id] / n_shots))
+ print("State |" + result_string + "> probability " + str(summarized_counts[result_id] / n_shots)) # noqa: T201
printed_results += 1
if printed_results == 1000:
break
@@ -152,7 +152,7 @@ def main() -> None:
ecc_frequency = args.fq
ecc_export_filename = args.e
if forced_simulator is not None and "stabilizer" in forced_simulator and "A" in error_channels:
- print(
+ print( # noqa: T201
'Warning: Non-unitary errors (such as for example amplitude damping ("A")) are not suitable for simulation '
"with a stabilizer based simulator and may cause an error during the simulation."
)
@@ -166,7 +166,7 @@ def main() -> None:
circ = QuantumCircuit.from_qasm_file(open_qasm_file)
if not any(gate[0].name == "measure" for gate in circ.data):
- print("Warning: No measurement gates found in the circuit. Adding measurement gates to all qubits.")
+ print("Warning: No measurement gates found in the circuit. Adding measurement gates to all qubits.") # noqa: T201
circ.measure_all()
# Initializing the quantum circuit
@@ -176,12 +176,12 @@ def main() -> None:
circ = QuantumCircuit().from_qasm_str(result["circ"])
if ecc_export_filename is not None:
- print("Exporting circuit to: " + str(ecc_export_filename))
+ print("Exporting circuit to: " + str(ecc_export_filename)) # noqa: T201
circ.qasm(filename=ecc_export_filename)
return
size = circ.num_qubits
- print(
+ print( # noqa: T201
"_____Trying to simulate with "
+ str(error_channels)
+ " (prob="
diff --git a/src/python/bindings.cpp b/src/python/bindings.cpp
index f91134e5..d2095184 100644
--- a/src/python/bindings.cpp
+++ b/src/python/bindings.cpp
@@ -20,7 +20,6 @@
#include "nlohmann/json.hpp"
#include "pybind11/pybind11.h"
#include "pybind11_json/pybind11_json.hpp"
-#include "python/qiskit/QasmQobjExperiment.hpp"
#include "python/qiskit/QuantumCircuit.hpp"
#include
@@ -39,12 +38,9 @@ py::dict applyEcc(const py::object& circ, const std::string& eccName, const size
auto&& file = circ.cast();
qc->import(file);
} else {
- py::object const quantumCircuit = py::module::import("qiskit").attr("QuantumCircuit");
- py::object const pyQasmQobjExperiment = py::module::import("qiskit.qobj").attr("QasmQobjExperiment");
+ py::object const quantumCircuit = py::module::import("qiskit").attr("QuantumCircuit");
if (py::isinstance(circ, quantumCircuit)) {
qc::qiskit::QuantumCircuit::import(*qc, circ);
- } else if (py::isinstance(circ, pyQasmQobjExperiment)) {
- qc::qiskit::QasmQobjExperiment::import(*qc, circ);
}
}
diff --git a/test/python/analog_info/test_analog_tannergraph_decoding.py b/test/python/analog_info/test_analog_tannergraph_decoding.py
new file mode 100644
index 00000000..f8ab4abf
--- /dev/null
+++ b/test/python/analog_info/test_analog_tannergraph_decoding.py
@@ -0,0 +1,177 @@
+"""Tests for analog Tannergraph decoder and simulator."""
+
+from __future__ import annotations
+
+from typing import TYPE_CHECKING
+from unittest import mock
+
+import numpy as np
+import pytest
+
+from mqt.qecc.analog_information_decoding.simulators.analog_tannergraph_decoding import (
+ AnalogTannergraphDecoder,
+ AtdSimulator,
+)
+from mqt.qecc.analog_information_decoding.utils import simulation_utils
+from mqt.qecc.analog_information_decoding.utils.data_utils import BpParams
+
+if TYPE_CHECKING:
+ from numpy.typing import NDArray
+
+
+@pytest.fixture()
+def code_params() -> dict[str, int]:
+ """Return code parameters."""
+ return {"n": 7, "k": 3, "d": 2}
+
+
+@pytest.fixture()
+def error_channel() -> NDArray[np.int32]:
+ """Return error channel."""
+ return np.array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1]).astype(np.int32)
+
+
+@pytest.fixture()
+def pcm() -> NDArray[np.int32]:
+ """Return parity check matrix."""
+ return np.array([[1, 0, 0, 1, 0, 1, 1], [0, 1, 0, 1, 1, 1, 1], [0, 0, 1, 0, 1, 0, 1]]).astype(np.int32)
+
+
+@pytest.fixture()
+def atd(error_channel: NDArray[np.float64], pcm: NDArray[np.int32]) -> AnalogTannergraphDecoder:
+ """Return distance of the hexagonal color code."""
+ return AnalogTannergraphDecoder(
+ pcm=pcm,
+ bp_params=BpParams(osd_method="osd0"),
+ error_channel=error_channel,
+ sigma=0.1,
+ ser=None,
+ )
+
+
+@pytest.fixture()
+def atd_simulator_sigma(pcm: NDArray[np.int32], code_params: dict[str, int]) -> AtdSimulator:
+ """Return AtdSimulator using sigma to initialize syndrome channel."""
+ return AtdSimulator(
+ hx=pcm,
+ hz=pcm,
+ lx=np.array([1, 1, 1, 1, 1, 1, 1]),
+ lz=np.array([1, 1, 1, 1, 1, 1, 1]),
+ codename="test",
+ data_err_rate=0.1,
+ sigma=0.1,
+ seed=666,
+ bp_params=BpParams(osd_method="osd0"),
+ decoding_method="atd",
+ code_params=code_params,
+ )
+
+
+@pytest.fixture()
+def atd_simulator_ser(pcm: NDArray[np.int32], code_params: dict[str, int]) -> AtdSimulator:
+ """Return AtdSimulator using error rate to initialize syndrome channel."""
+ per = 0.1
+ ser = 0.1
+ return AtdSimulator(
+ hx=pcm,
+ hz=pcm,
+ lx=np.array([1, 1, 1, 1, 1, 1, 1]),
+ lz=np.array([1, 1, 1, 1, 1, 1, 1]),
+ codename="test",
+ data_err_rate=per,
+ syndr_err_rate=ser,
+ seed=666,
+ bp_params=BpParams(osd_method="osd0"),
+ decoding_method="atd",
+ output_path="./res",
+ code_params=code_params,
+ )
+
+
+def test_set_analog_syndrome(atd: AnalogTannergraphDecoder, error_channel: NDArray[np.float64]) -> None:
+ """Test initialization of decoder channel probs with analog syndrome."""
+ analog_syndr = np.array([0.1, 0.1, 0.1])
+ expected_virtual_inits = np.array([2.0611536181902108e-09, 2.0611536181902108e-09, 2.0611536181902108e-09])
+ expected = np.hstack([error_channel, expected_virtual_inits])
+
+ res = atd.decode(analog_syndr)
+ assert np.array_equal(atd.bposd_decoder.channel_probs, expected)
+ assert res is not None
+ assert len(res) == 10
+
+
+def test_atd_simulator_data_error_channels_setup(pcm: NDArray[np.int32], code_params: dict[str, int]) -> None:
+ """Test simulator data error channel computation and initialization."""
+ per = 0.1
+ sigma = 0.1
+ sim = AtdSimulator(
+ hx=pcm,
+ hz=pcm,
+ lx=np.array([]),
+ lz=np.array([]),
+ codename="test",
+ data_err_rate=per,
+ sigma=sigma,
+ seed=666,
+ bp_params=BpParams(osd_method="osd0"),
+ decoding_method="atd",
+ code_params=code_params,
+ )
+ expected_err_chnl = simulation_utils.error_channel_setup(
+ error_rate=per,
+ xyz_error_bias=np.array([0.1, 0.1, 0.1]).astype(np.float64),
+ nr_qubits=pcm.shape[1],
+ )
+ assert np.array_equal(sim.x_decoder.error_channel, expected_err_chnl[0] + expected_err_chnl[1])
+ assert np.array_equal(sim.z_decoder.error_channel, expected_err_chnl[2] + expected_err_chnl[1])
+
+
+def test_atd_simulator_syndrome_error_channels_setup(atd_simulator_sigma: AtdSimulator) -> None:
+ """Test AtdSimulator syndrome channel computation and initialization."""
+ sigma = 0.1
+ ser = simulation_utils.get_error_rate_from_sigma(sigma)
+ expec_chnl = simulation_utils.error_channel_setup(
+ error_rate=ser,
+ xyz_error_bias=np.array([0.1, 0.1, 0.1]).astype(np.float64),
+ nr_qubits=1,
+ )
+ assert atd_simulator_sigma.syndr_err_rate == simulation_utils.get_error_rate_from_sigma(sigma=sigma)
+ assert atd_simulator_sigma.x_sigma == simulation_utils.get_sigma_from_syndr_er(expec_chnl[0][0] + expec_chnl[1][0])
+ assert atd_simulator_sigma.z_sigma == simulation_utils.get_sigma_from_syndr_er(expec_chnl[2][0] + expec_chnl[1][0])
+
+
+def test_atd_simulator_syndrome_error_channels_setup_ser(atd_simulator_ser: AtdSimulator) -> None:
+ """Test AtdSimulator syndrome error computattion and initialization using error rate."""
+ ser = 0.1
+ expec_chnl = simulation_utils.error_channel_setup(
+ error_rate=ser,
+ xyz_error_bias=np.array([0.1, 0.1, 0.1]).astype(np.float64),
+ nr_qubits=1,
+ )
+ assert atd_simulator_ser.x_sigma == simulation_utils.get_sigma_from_syndr_er(expec_chnl[0][0] + expec_chnl[1][0])
+ assert atd_simulator_ser.z_sigma == simulation_utils.get_sigma_from_syndr_er(expec_chnl[2][0] + expec_chnl[1][0])
+
+
+def test_single_sample(atd_simulator_ser: AtdSimulator) -> None:
+ """Test single sample overall."""
+ assert atd_simulator_ser.single_sample() is not None
+ assert atd_simulator_ser.x_bp_iterations is not None
+ assert atd_simulator_ser.z_bp_iterations is not None
+
+
+def test_safe_results(atd_simulator_ser: AtdSimulator) -> None:
+ """Test result saving."""
+ with mock.patch("json.dump", return_value=True):
+ res = atd_simulator_ser.save_results(1, 1, 1)
+ assert res is not None
+ assert res["code_K"] == 3
+ assert res["code_N"] == 7
+
+
+def test_run(atd_simulator_ser: AtdSimulator) -> None:
+ """Test run method."""
+ with mock.patch("json.dump", return_value=True):
+ res = atd_simulator_ser.run(1)
+ assert res is not None
+ assert res["code_K"] == 3
+ assert res["code_N"] == 7
diff --git a/test/python/analog_info/test_memory_experiment.py b/test/python/analog_info/test_memory_experiment.py
new file mode 100644
index 00000000..6ba4625b
--- /dev/null
+++ b/test/python/analog_info/test_memory_experiment.py
@@ -0,0 +1,139 @@
+"""Tests for the memory experiment simulator."""
+
+from __future__ import annotations
+
+from typing import TYPE_CHECKING
+
+import numpy as np
+import pytest
+from bposd import bposd_decoder
+
+from mqt.qecc.analog_information_decoding.simulators.memory_experiment_v2 import (
+ build_multiround_pcm,
+ decode_multiround,
+ move_syndrome,
+)
+
+if TYPE_CHECKING:
+ from numpy.typing import NDArray
+ from scipy.sparse import csr_matrix
+
+
+@pytest.fixture()
+def pcm() -> NDArray[np.int32]:
+ """Fixture for parity check matrix of a rep code."""
+ return np.array([[1, 1, 0], [0, 1, 1]])
+
+
+@pytest.fixture()
+def repetitions() -> int:
+ """Fixture for number of repetitions for multiround decoding."""
+ return 3
+
+
+@pytest.fixture()
+def h3d(pcm: NDArray[np.int32], repetitions: int) -> csr_matrix:
+ """Fixture for multiround parity check matrix."""
+ return build_multiround_pcm(pcm, repetitions - 1)
+
+
+@pytest.fixture()
+def channel_probs(repetitions: int, pcm: NDArray[np.int32]) -> NDArray[np.float64]:
+ """Fixture for error channel."""
+ return np.full(shape=repetitions * pcm.shape[1] + repetitions * pcm.shape[0], fill_value=0.1)
+
+
+@pytest.fixture()
+def decoder(channel_probs: NDArray[np.float64], h3d: NDArray[np.int32]) -> bposd_decoder:
+ """Fixture for decoder."""
+ return bposd_decoder(
+ parity_check_matrix=h3d,
+ channel_probs=channel_probs,
+ max_iter=15,
+ bp_method="msl",
+ osd_order=0,
+ osd_method="osd0",
+ ms_scaling_factor=0.5,
+ )
+
+
+def test_build_mr_pcm() -> None:
+ """Test build_multiround_pcm function."""
+ pcm: NDArray[np.int32] = np.array([[1, 1, 0], [0, 1, 1]]).astype(np.int32)
+ mr_pcm = build_multiround_pcm(pcm, 1)
+ np.zeros((2, 3))
+ np.identity(2)
+ r1 = np.hstack([pcm, np.zeros(pcm.shape), np.identity(2), np.zeros((2, 2))])
+ r2 = np.hstack([np.zeros(pcm.shape), pcm, np.identity(2), np.identity(2)])
+ expected = np.vstack((r1, r2))
+
+ assert np.array_equal(mr_pcm.toarray(), expected)
+
+
+def test_move_syndrome() -> None:
+ """Test move_syndrome function."""
+ # three bit syndrome over 4 rounds
+ syndr = np.array([[0, 0, 1, 0], [0, 0, 0, 1], [0, 0, 1, 0]])
+ res = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [1, 0, 0, 0]])
+ assert np.array_equal(res, move_syndrome(syndr))
+
+ syndr = np.array([[0, 0, 1, 1], [0, 0, 1, 1], [0, 0, 1, 1]])
+ res = np.array([[1, 1, 0, 0], [1, 1, 0, 0], [1, 1, 0, 0]])
+ assert np.array_equal(res, move_syndrome(syndr))
+
+ syndr = np.array([[0, 0, 1, 1, 0, 0], [0, 0, 1, 1, 0, 0], [0, 0, 1, 1, 0, 0]])
+ res = np.array([[1, 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 0]])
+ assert np.array_equal(res, move_syndrome(syndr))
+
+
+def test_decode_multiround_syndr_err(
+ pcm: NDArray[np.int32], channel_probs: NDArray[np.float64], repetitions: int, decoder: bposd_decoder
+) -> None:
+ """Test decoding of multiround syndrome for three bit repetition code."""
+ check_block_size = pcm.shape[1] * repetitions
+
+ analoy_syndr = np.array([[0.0, -1.0, 0.0], [0.0, 0.0, 0.0]])
+ sigma = 0.3
+ decoding_method = "bposd"
+ syndrome = np.array([[0, 1, 0], [0, 0, 0]])
+ res = decode_multiround(
+ pcm=pcm,
+ channel_probs=channel_probs,
+ analog_syndr=analoy_syndr,
+ decoder=decoder,
+ syndrome=syndrome,
+ repetitions=repetitions,
+ last_round=True,
+ check_block_size=check_block_size,
+ sigma=sigma,
+ decoding_method=decoding_method,
+ )
+ assert np.array_equal(res[0], np.array([0, 0, 0])) # estimate is all zeros
+ assert np.array_equal(res[1], syndrome)
+ assert np.array_equal(res[2], analoy_syndr)
+
+
+def test_decode_multiround_data_err(
+ pcm: NDArray[np.int32], channel_probs: NDArray[np.float64], repetitions: int, decoder: bposd_decoder
+) -> None:
+ """Test decoding of multiround syndrome for three bit repetition code."""
+ check_block_size = pcm.shape[1] * repetitions
+ analoy_syndr = np.array([[0.0, -1.0, -1.0], [0.0, 0.0, 0.0]])
+ sigma = 0.3
+ decoding_method = "bposd"
+ syndrome = np.array([[0, 1, 1], [0, 0, 0]])
+ res = decode_multiround(
+ pcm=pcm,
+ channel_probs=channel_probs,
+ analog_syndr=analoy_syndr,
+ decoder=decoder,
+ syndrome=syndrome,
+ repetitions=repetitions,
+ last_round=False,
+ check_block_size=check_block_size,
+ sigma=sigma,
+ decoding_method=decoding_method,
+ )
+ assert np.array_equal(res[0], np.array([1, 0, 0])) # estimate is all zeros
+ assert np.array_equal(res[1], syndrome)
+ assert np.array_equal(res[2], analoy_syndr)
diff --git a/test/python/analog_info/test_qss.py b/test/python/analog_info/test_qss.py
new file mode 100644
index 00000000..44b562f1
--- /dev/null
+++ b/test/python/analog_info/test_qss.py
@@ -0,0 +1,76 @@
+"""Test quasi singleshot simulator."""
+
+from __future__ import annotations
+
+from typing import TYPE_CHECKING
+from unittest import mock
+
+import numpy as np
+import pytest
+
+from mqt.qecc import QssSimulator
+from mqt.qecc.analog_information_decoding.utils import simulation_utils
+from mqt.qecc.analog_information_decoding.utils.data_utils import BpParams
+
+if TYPE_CHECKING:
+ from numpy.typing import NDArray
+
+
+@pytest.fixture()
+def pcm() -> NDArray[np.int32]:
+ """Return parity check matrix."""
+ return np.array([[1, 0, 0, 1, 0, 1, 1], [0, 1, 0, 1, 1, 1, 1], [0, 0, 1, 0, 1, 0, 1]]).astype(np.int32)
+
+
+@pytest.fixture()
+def code_params() -> dict[str, int]:
+ """Return code parameters."""
+ return {"n": 7, "k": 3, "d": 2}
+
+
+@pytest.fixture()
+def qss_simulator(pcm: NDArray[np.int32], code_params: dict[str, int]) -> QssSimulator:
+ """Return QSS simulator object."""
+ return QssSimulator(
+ pcm=pcm,
+ per=0.1,
+ ser=0.1,
+ logicals=np.array([1, 1, 1, 1, 1, 1, 1]),
+ bias=np.array([1.0, 1.0, 1.0]),
+ codename="test",
+ bp_params=BpParams(osd_method="osd0"),
+ outpath="./results",
+ repetitions=2,
+ code_params=code_params,
+ rounds=1,
+ )
+
+
+def test_err_channel_setup(qss_simulator: QssSimulator) -> None:
+ """Test computation of error channels."""
+ data_chnl = simulation_utils.error_channel_setup(
+ error_rate=0.1, xyz_error_bias=np.array([1.0, 1.0, 1.0]), nr_qubits=7
+ )
+ syndr_chnl = simulation_utils.error_channel_setup(
+ error_rate=0.1, xyz_error_bias=np.array([1.0, 1.0, 1.0]), nr_qubits=3
+ )
+ expected_syndr_chnl: NDArray[np.float64] = np.array(1.0 * (syndr_chnl[2] + syndr_chnl[1])).astype(np.float64)
+ expected_data_chnl: NDArray[np.float64] = np.array(1.0 * (data_chnl[2] + data_chnl[1])).astype(np.float64)
+ assert qss_simulator.err_idx == 1
+ assert np.allclose(qss_simulator.data_err_channel, expected_data_chnl)
+ assert np.allclose(qss_simulator.syndr_err_channel, expected_syndr_chnl)
+
+
+def test_decoder_error_channel_setup(qss_simulator: QssSimulator) -> None:
+ """Test simulator decoder error channel initialization."""
+ expected = np.full((1, 20), 0.06666667)
+ assert np.allclose(qss_simulator.decoder.channel_probs, expected)
+
+
+def test_single_sample(qss_simulator: QssSimulator) -> None:
+ """Test single sample overall."""
+ with mock.patch("pathlib.Path.open"), mock.patch("json.dump"):
+ res = qss_simulator.run(1)
+ assert res is not None
+ assert res["pers"] == 0.1
+ assert res["code_N"] == 7
diff --git a/test/python/analog_info/test_simulation_utils.py b/test/python/analog_info/test_simulation_utils.py
new file mode 100644
index 00000000..c08b82c5
--- /dev/null
+++ b/test/python/analog_info/test_simulation_utils.py
@@ -0,0 +1,457 @@
+"""Tests for the simulation_utils module."""
+
+from __future__ import annotations
+
+import math
+from typing import TYPE_CHECKING
+
+import numpy as np
+
+from mqt.qecc.analog_information_decoding.utils.simulation_utils import (
+ build_single_stage_pcm,
+ check_logical_err_h,
+ error_channel_setup,
+ generate_err,
+ generate_syndr_err,
+ get_analog_llr,
+ get_binary_from_analog,
+ get_error_rate_from_sigma,
+ get_noisy_analog_syndrome,
+ get_sigma_from_syndr_er,
+ get_signed_from_binary,
+ get_virtual_check_init_vals,
+ is_logical_err,
+)
+
+if TYPE_CHECKING:
+ from numpy.typing import NDArray
+
+
+def test_check_logical_err_h() -> None:
+ """Test check_logical_err_h function."""
+ h = np.array([[1, 0, 0, 1, 0, 1, 1], [0, 1, 0, 1, 1, 0, 1], [0, 0, 1, 0, 1, 1, 1]])
+ # check with logical
+ estimate = np.array([1, 0, 0, 0, 0, 0, 1])
+ assert check_logical_err_h(h, np.array([0, 0, 0, 0, 1, 1, 0]), estimate) is True
+ #
+ # check with stabilizer
+ estimate2 = np.array([0, 0, 0, 0, 0, 0, 1])
+ assert check_logical_err_h(h, np.array([1, 1, 1, 0, 0, 0, 0]), estimate2) is False
+
+ # check with all zeros
+ estimate3 = np.array([0, 0, 0, 0, 0, 0, 0])
+ assert check_logical_err_h(h, np.array([0, 0, 0, 0, 0, 0, 0]), estimate3) is False
+
+
+def test_is_logical_err() -> None:
+ """Test is_logical_err function."""
+ # check with logical
+ l_sc = np.array([
+ [
+ 1,
+ 0,
+ 0,
+ 1,
+ 0,
+ 0,
+ 1,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ ],
+ [
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 1,
+ 1,
+ 1,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ ],
+ ])
+ residual = np.array([
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 1,
+ 1,
+ 1,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ ])
+
+ assert is_logical_err(l_sc, residual) is True
+
+ # check with stabilizer
+ residual2 = np.array([
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 1,
+ 1,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ ])
+ assert is_logical_err(l_sc, residual2) is False
+
+ # check with all zeros
+ residual2 = np.array([
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ ])
+ assert is_logical_err(l_sc, residual2) is False
+
+ # check with non-min weight logical
+ residual3 = np.array([
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 1,
+ 1,
+ 1,
+ 1,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ ])
+ assert is_logical_err(l_sc, residual3) is True
+
+
+def test_get_analog_llr() -> None:
+ """Test get_analog_llr function."""
+ analog_syndr = np.array([0.5, 0, 0, -1, 0, 1])
+ sigma = 0.8
+
+ assert np.allclose(get_analog_llr(analog_syndr, sigma), np.array([1.5625, 0.0, 0.0, -3.125, 0.0, 3.125]))
+
+ sigma = 0.1
+ assert np.allclose(get_analog_llr(analog_syndr, sigma), np.array([100, 0.0, 0.0, -200.0, 0.0, 200.0]))
+
+
+def test_generate_err() -> None:
+ """Test generate_err function."""
+ # no errors
+ p = 0.0
+ n = 10
+ ch = np.ones(n) * p
+ channel = np.copy(ch), np.copy(ch), np.copy(ch)
+ residual: list[NDArray[np.int32]] = [np.zeros(n).astype(np.int32), np.zeros(n).astype(np.int32)]
+
+ expected = np.array([np.zeros(n).astype(np.float64), np.zeros(n).astype(np.float64)])
+ assert np.array_equal(generate_err(n, channel, residual), expected)
+
+ residual[0][0] = 1
+ residual[1][0] = 1
+
+ expected = np.array([np.copy(residual[0]), np.copy(residual[1])])
+ res = generate_err(n, channel, residual)
+ assert np.array_equal(res[0], expected[0])
+ assert np.array_equal(res[1], expected[1])
+
+
+def test_get_sigma_from_syndr_er() -> None:
+ """Test get_sigma_from_syndr_er function."""
+ ser = 0.1
+
+ assert math.ceil(get_sigma_from_syndr_er(ser)) == math.ceil(0.780304146072379)
+ ser = 1.0
+ assert math.ceil(get_sigma_from_syndr_er(ser)) == -0.0
+ ser = 0.0
+ assert math.ceil(get_sigma_from_syndr_er(ser)) == 0.0
+
+
+def test_get_error_rate_from_sigma() -> None:
+ """Test get_error_rate_from_sigma function."""
+ sigma = 0.3
+ assert np.isclose([get_error_rate_from_sigma(sigma)], [0.00042906])
+ sigma = 0.5
+ assert np.isclose([get_error_rate_from_sigma(sigma)], [0.02275])
+ sigma = 0.0
+ assert get_error_rate_from_sigma(sigma) == 0.0
+
+
+def test_get_virtual_check_init_vals() -> None:
+ """Test get_virtual_check_init_vals function."""
+ noisy_syndr = np.array([0.5, 0, 0, -1, 0, 10])
+ sigma = 0.8
+
+ assert np.allclose(
+ get_virtual_check_init_vals(noisy_syndr, sigma),
+ np.array([
+ 1.73288206e-001,
+ 5.00000000e-001,
+ 5.00000000e-001,
+ 4.20877279e-002,
+ 5.00000000e-001,
+ 1.91855567e-136,
+ ]),
+ )
+
+ sigma = 0.2
+ assert np.allclose(
+ get_virtual_check_init_vals(noisy_syndr, sigma),
+ np.array([3.72007598e-44, 5.00000000e-01, 5.00000000e-01, 1.38389653e-87, 5.00000000e-01, 0.00000000e00]),
+ )
+ sigma = 0.0
+ res = get_virtual_check_init_vals(noisy_syndr, sigma)
+
+ assert res[0] == 0.0
+
+
+def test_generate_syndr_err() -> None:
+ """Test generate_syndr_err function."""
+ channel = np.array([0.0, 1.0])
+
+ assert np.array_equal(generate_syndr_err(channel), np.array([0.0, 1.0]))
+
+ channel = np.array([0.0, 0.0, 0.0])
+ assert np.array_equal(generate_syndr_err(channel), np.zeros_like(channel).astype(float))
+
+ channel = np.array([1.0, 1.0, 1.0])
+ assert np.array_equal(generate_syndr_err(channel), np.ones_like(channel).astype(float))
+
+
+def test_get_noisy_analog_syndr() -> None:
+ """Test get_noisy_analog_syndr function."""
+ perfect_s = np.array([1, 0])
+ sigma = 0.0
+
+ assert np.array_equal(get_noisy_analog_syndrome(perfect_s, sigma), np.array([-1.0, 1.0]))
+
+
+def test_err_chnl_setup() -> None:
+ """Test error_channel_setup function."""
+ p = 0.1
+ bias = np.array([1.0, 1.0, 1.0])
+ n = 10
+ ar = np.ones(n) * p / 3
+ exp = np.array([np.copy(ar), np.copy(ar), np.copy(ar)])
+ res = error_channel_setup(p, bias, n)
+
+ assert np.array_equal(res, exp)
+
+ bias = np.array([1.0, 0.0, 0.0])
+ ar = np.ones(n) * p
+ exp = np.array([np.copy(ar), np.zeros(n), np.zeros(n)])
+ res = error_channel_setup(p, bias, n)
+
+ assert np.array_equal(res, exp)
+
+ bias = np.array([1.0, 1.0, 0.0])
+ ar = np.ones(n) * p / 2
+ exp = np.array([np.copy(ar), np.copy(ar), np.zeros(n)])
+ res = error_channel_setup(p, bias, n)
+
+ assert np.array_equal(res, exp)
+
+ bias = np.array([np.inf, 0.0, 0.0])
+ ar = np.ones(n) * p
+ exp = np.array([np.copy(ar), np.zeros(n), np.zeros(n)])
+ res = error_channel_setup(p, bias, n)
+
+ assert np.array_equal(res, exp)
+
+ bias = np.array([0.0, np.inf, 0.0])
+ ar = np.ones(n) * p
+ exp = np.array([np.zeros(n), np.copy(ar), np.zeros(n)])
+ res = error_channel_setup(p, bias, n)
+
+ assert np.array_equal(res, exp)
+
+
+def test_build_ss_pcm() -> None:
+ """Test build_single_stage_pcm function."""
+ h = np.array([[1, 1, 0, 0], [0, 1, 1, 0], [0, 0, 1, 1], [1, 0, 0, 1]])
+ m = np.array([[1, 1, 0, 0], [0, 1, 1, 0], [0, 0, 1, 1], [1, 0, 0, 1]])
+ id_r = np.identity(m.shape[1])
+ zeros = np.zeros((m.shape[0], h.shape[1]))
+ exp = np.block([[h, id_r], [zeros, m]])
+ assert np.array_equal(build_single_stage_pcm(h, m), exp)
+
+
+def test_get_signed_from_binary() -> None:
+ """Test get_signed_from_binary function."""
+ binary = np.array([1, 0, 0, 1, 0, 1])
+ exp = np.array([-1, 1, 1, -1, 1, -1])
+
+ assert np.array_equal(get_signed_from_binary(binary), exp)
+
+
+def test_get_binary_from_analog() -> None:
+ """Test get_binary_from_analog function."""
+ exp = np.array([1, 0, 0, 1, 0, 1])
+ analog = np.array([-1.0, 3.0, 1.0, -1.0, 1.0, -2])
+
+ assert np.array_equal(get_binary_from_analog(analog), exp)
diff --git a/test/python/analog_info/test_ss_simulation.py b/test/python/analog_info/test_ss_simulation.py
new file mode 100644
index 00000000..f7a8e699
--- /dev/null
+++ b/test/python/analog_info/test_ss_simulation.py
@@ -0,0 +1,168 @@
+"""Test single shot simulator."""
+
+from __future__ import annotations
+
+from typing import TYPE_CHECKING
+from unittest import mock
+
+import numpy as np
+import pytest
+
+from mqt.qecc.analog_information_decoding.simulators.simulation import SingleShotSimulator
+from mqt.qecc.analog_information_decoding.utils.simulation_utils import (
+ build_single_stage_pcm,
+ error_channel_setup,
+ get_sigma_from_syndr_er,
+)
+
+if TYPE_CHECKING:
+ from numpy.typing import NDArray
+
+from mqt.qecc.analog_information_decoding.utils.data_utils import BpParams
+
+
+@pytest.fixture()
+def code_params() -> dict[str, int]:
+ """Fixture for code params."""
+ return {"n": 7, "k": 1, "d": 3}
+
+
+@pytest.fixture()
+def mcm() -> NDArray[np.int32]:
+ """Fixture for meta check matrix."""
+ return np.array([[1, 1, 0], [0, 1, 1], [1, 0, 1]]).astype(np.int32)
+
+
+@pytest.fixture()
+def pcm() -> NDArray[np.int32]:
+ """Fixture for parity check matrix."""
+ return np.array([[1, 0, 0, 1, 0, 1, 1], [0, 1, 0, 1, 1, 0, 1], [0, 0, 1, 0, 1, 1, 1]]).astype(np.int32)
+
+
+@pytest.fixture()
+def bias() -> NDArray[np.float64]:
+ """Fixture for x,y,z bias vector."""
+ return np.array([1.0, 1.0, 1.0])
+
+
+@pytest.fixture()
+def error_rate() -> float:
+ """Fixture for error rate."""
+ return 0.1
+
+
+@pytest.fixture()
+def single_stage_analog_simulator(
+ error_rate: float,
+ pcm: NDArray[np.int32],
+ mcm: NDArray[np.int32],
+ code_params: dict[str, int],
+ bias: NDArray[np.float64],
+) -> SingleShotSimulator:
+ """Fixture for single stage analog single shot simulator."""
+ return SingleShotSimulator(
+ codename="test",
+ per=error_rate,
+ ser=error_rate,
+ single_stage=True,
+ seed=666,
+ bias=bias,
+ x_meta=True,
+ z_meta=False,
+ sus_th_depth=1,
+ bp_params=BpParams(osd_method="osd0"),
+ analog_info=False,
+ analog_tg=True,
+ hx=pcm,
+ hz=pcm,
+ mx=mcm,
+ mz=None,
+ lx=None,
+ lz=None,
+ code_params=code_params,
+ )
+
+
+@pytest.fixture()
+def two_stage_simulator(
+ error_rate: float,
+ pcm: NDArray[np.int32],
+ mcm: NDArray[np.int32],
+ code_params: dict[str, int],
+ bias: NDArray[np.float64],
+) -> SingleShotSimulator:
+ """Fixture for hard syndrome two stage simulator."""
+ return SingleShotSimulator(
+ codename="test",
+ per=error_rate,
+ ser=error_rate,
+ single_stage=False,
+ seed=666,
+ bias=bias,
+ x_meta=True,
+ z_meta=False,
+ sus_th_depth=1,
+ bp_params=BpParams(osd_method="osd0"),
+ analog_info=False,
+ analog_tg=False,
+ hx=pcm,
+ hz=pcm,
+ mx=mcm,
+ mz=None,
+ lx=None,
+ lz=None,
+ code_params=code_params,
+ )
+
+
+def test_analog_ss_simulator_setup(
+ error_rate: float,
+ bias: NDArray[np.float64],
+ pcm: NDArray[np.int32],
+ mcm: NDArray[np.int32],
+ single_stage_analog_simulator: SingleShotSimulator,
+) -> None:
+ """Test the initialization of error channels the simulator."""
+ chnl = error_channel_setup(error_rate, bias, pcm.shape[0])
+ expected_x_syndr_chnl = chnl[0] + chnl[1]
+ expected_z_syndr_chnl = chnl[2] + chnl[1]
+ assert np.allclose(expected_x_syndr_chnl, single_stage_analog_simulator.x_syndr_error_channel)
+ assert np.allclose(expected_z_syndr_chnl, single_stage_analog_simulator.z_syndr_error_channel)
+ assert single_stage_analog_simulator.sigma_x == get_sigma_from_syndr_er(expected_x_syndr_chnl[0])
+ assert single_stage_analog_simulator.sigma_z == get_sigma_from_syndr_er(expected_z_syndr_chnl[0])
+ assert np.array_equal(
+ single_stage_analog_simulator.x_apcm, np.hstack([pcm, np.identity(pcm.shape[0], dtype=np.int32)])
+ )
+ assert single_stage_analog_simulator.x_meta is True
+ assert np.array_equal(single_stage_analog_simulator.ss_x_pcm, build_single_stage_pcm(pcm, mcm))
+
+
+def test_single_stage_initialization(single_stage_analog_simulator: SingleShotSimulator) -> None:
+ """Test single stage single shot simulation initialization."""
+ res = single_stage_analog_simulator._single_sample() # noqa: SLF001
+ assert res[0] is not None
+ assert res[1] is not None
+
+
+def test_two_state_initialization(two_stage_simulator: SingleShotSimulator) -> None:
+ """Test two stage single shot simultation initialization."""
+ res = two_stage_simulator._two_stage_decoding( # noqa: SLF001
+ z_syndrome_w_err=np.array([0, 0, 0]), x_syndrome_w_err=np.array([0, 0, 0])
+ )
+ assert np.array_equal(res[0], np.array([0, 0, 0, 0, 0, 0, 0]))
+ assert np.array_equal(res[1], np.array([0, 0, 0, 0, 0, 0, 0]))
+
+
+def test_single_sample(single_stage_analog_simulator: SingleShotSimulator) -> None:
+ """Test single sample overall."""
+ with mock.patch("pathlib.Path.open"), mock.patch("json.dump"):
+ res = single_stage_analog_simulator.run(1)
+ assert res is not None
+ assert res["pers"] == 0.1
+ assert res["code_N"] == 7
+ assert res["x_ler"] is not None
+ assert res["x_wer"] is not None
+ assert res["z_wer"] is not None
+ assert res["z_ler"] is not None
+ assert res["x_success_cnt"] is not None
+ assert res["z_success_cnt"] is not None
diff --git a/test/python/cc_decoder/__init__.py b/test/python/cc_decoder/__init__.py
new file mode 100644
index 00000000..712fc5de
--- /dev/null
+++ b/test/python/cc_decoder/__init__.py
@@ -0,0 +1 @@
+"""Tests for the color code decoder module."""
diff --git a/test/python/test_cc_decoder.py b/test/python/cc_decoder/test_cc_decoder.py
similarity index 83%
rename from test/python/test_cc_decoder.py
rename to test/python/cc_decoder/test_cc_decoder.py
index 3ff5b6bd..ba90ead3 100644
--- a/test/python/test_cc_decoder.py
+++ b/test/python/cc_decoder/test_cc_decoder.py
@@ -37,7 +37,7 @@ def nr_sims() -> int:
@pytest.fixture()
def results_dir() -> str:
"""Return directory to store results."""
- return "./results"
+ return "./results/test/"
@pytest.fixture()
@@ -96,17 +96,15 @@ def test_z3_solver(
results_dir: str,
) -> None:
"""Test the Z3 solver."""
- ret = script_runner.run(
- [
- "mqt.qecc.cc-decoder",
- str(d3_hexcode.distance),
- str(p),
- "--nr_sims",
- str(nr_sims),
- "--results_dir",
- results_dir,
- ]
- )
+ ret = script_runner.run([
+ "mqt.qecc.cc-decoder",
+ str(d3_hexcode.distance),
+ str(p),
+ "--nr_sims",
+ str(nr_sims),
+ "--results_dir",
+ results_dir,
+ ])
assert ret.success
assert not ret.stderr
@@ -140,21 +138,19 @@ def test_external_solver(
# mock the subprocess.run call in the `solve` method of the `LightsOut` class
mocker.patch("subprocess.run", return_value=None)
- ret = script_runner.run(
- [
- "mqt.qecc.cc-decoder",
- str(d3_hexcode.distance),
- str(p),
- "--nr_sims",
- str(nr_sims),
- "--results_dir",
- results_dir,
- "--solver",
- solver,
- "--decoder",
- decoder,
- ]
- )
+ ret = script_runner.run([
+ "mqt.qecc.cc-decoder",
+ str(d3_hexcode.distance),
+ str(p),
+ "--nr_sims",
+ str(nr_sims),
+ "--results_dir",
+ results_dir,
+ "--solver",
+ solver,
+ "--decoder",
+ decoder,
+ ])
assert ret.success
assert not ret.stderr
@@ -183,19 +179,17 @@ def test_tn_decoder(script_runner: ScriptRunner, distance: int, p: float, nr_sim
"""Test the TN decoder."""
decoder = "tn"
- ret = script_runner.run(
- [
- "mqt.qecc.cc-decoder",
- str(distance),
- str(p),
- "--nr_sims",
- str(nr_sims),
- "--results_dir",
- results_dir,
- "--decoder",
- decoder,
- ]
- )
+ ret = script_runner.run([
+ "mqt.qecc.cc-decoder",
+ str(distance),
+ str(p),
+ "--nr_sims",
+ str(nr_sims),
+ "--results_dir",
+ results_dir,
+ "--decoder",
+ decoder,
+ ])
assert ret.success
assert not ret.stderr
@@ -216,17 +210,15 @@ def test_scenario_with_logical_errors(
script_runner: ScriptRunner, d3_hexcode: HexagonalColorCode, results_dir: str
) -> None:
"""Test the Z3 solver."""
- ret = script_runner.run(
- [
- "mqt.qecc.cc-decoder",
- str(d3_hexcode.distance),
- "0.2",
- "--nr_sims",
- "50",
- "--results_dir",
- results_dir,
- ]
- )
+ ret = script_runner.run([
+ "mqt.qecc.cc-decoder",
+ str(d3_hexcode.distance),
+ "0.2",
+ "--nr_sims",
+ "50",
+ "--results_dir",
+ results_dir,
+ ])
assert ret.success
assert not ret.stderr
diff --git a/test/python/test_square_octagon_color_code.py b/test/python/cc_decoder/test_square_octagon_color_code.py
similarity index 89%
rename from test/python/test_square_octagon_color_code.py
rename to test/python/cc_decoder/test_square_octagon_color_code.py
index e12d79b6..f2a5636c 100644
--- a/test/python/test_square_octagon_color_code.py
+++ b/test/python/cc_decoder/test_square_octagon_color_code.py
@@ -36,7 +36,7 @@ def nr_sims() -> int:
@pytest.fixture()
def results_dir() -> str:
"""Return directory to store results."""
- return "./results"
+ return "./results/test/"
@pytest.mark.parametrize("code", [SquareOctagonColorCode(distance=d) for d in range(3, 23, 2)])
@@ -77,19 +77,17 @@ def test_z3_solver(
) -> None:
"""Test the Z3 solver."""
d = d3_so_code.distance
- ret = script_runner.run(
- [
- "mqt.qecc.cc-decoder",
- "--type",
- "square_octagon",
- str(d),
- str(p),
- "--nr_sims",
- str(nr_sims),
- "--results_dir",
- results_dir,
- ]
- )
+ ret = script_runner.run([
+ "mqt.qecc.cc-decoder",
+ "--type",
+ "square_octagon",
+ str(d),
+ str(p),
+ "--nr_sims",
+ str(nr_sims),
+ "--results_dir",
+ results_dir,
+ ])
assert ret.success
assert not ret.stderr
diff --git a/test/python/test_utils.py b/test/python/cc_decoder/test_utils.py
similarity index 100%
rename from test/python/test_utils.py
rename to test/python/cc_decoder/test_utils.py
diff --git a/test/python/test_ecc_framework.py b/test/python/ecc_fw/test_ecc_framework.py
similarity index 75%
rename from test/python/test_ecc_framework.py
rename to test/python/ecc_fw/test_ecc_framework.py
index 9572a4cb..90244b50 100644
--- a/test/python/test_ecc_framework.py
+++ b/test/python/ecc_fw/test_ecc_framework.py
@@ -33,21 +33,19 @@ def test_with_stab_simulator(
) -> None:
"""Testing the script with different parameters."""
circ.qasm(filename="dummyCircuit.qasm")
- ret = script_runner.run(
- [
- "ecc_qiskit_wrapper",
- "-m",
- noise_models,
- "-p",
- "0.001",
- "-n",
- "1000",
- "-ecc",
- simulator,
- "-f",
- "dummyCircuit.qasm",
- ]
- )
+ ret = script_runner.run([
+ "ecc_qiskit_wrapper",
+ "-m",
+ noise_models,
+ "-p",
+ "0.001",
+ "-n",
+ "1000",
+ "-ecc",
+ simulator,
+ "-f",
+ "dummyCircuit.qasm",
+ ])
file_to_remove = pathlib.Path("dummyCircuit.qasm")
file_to_remove.unlink()
assert ret.success
@@ -56,25 +54,23 @@ def test_with_stab_simulator(
def test_failing_simulators(circ: QuantumCircuit, script_runner: ScriptRunner) -> None:
"""Testing the script with unsupported ecc."""
circ.qasm(filename="dummyCircuit.qasm")
- ret = script_runner.run(
- [
- "ecc_qiskit_wrapper",
- "-m",
- "D",
- "-p",
- "0.001",
- "-n",
- "1000",
- "-s",
- "1",
- "-fs",
- "extended_stabilizer",
- "-ecc",
- "UnsupportedEcc",
- "-f",
- "dummyCircuit.qasm",
- ]
- )
+ ret = script_runner.run([
+ "ecc_qiskit_wrapper",
+ "-m",
+ "D",
+ "-p",
+ "0.001",
+ "-n",
+ "1000",
+ "-s",
+ "1",
+ "-fs",
+ "extended_stabilizer",
+ "-ecc",
+ "UnsupportedEcc",
+ "-f",
+ "dummyCircuit.qasm",
+ ])
file_to_remove = pathlib.Path("dummyCircuit.qasm")
file_to_remove.unlink()
assert not ret.success
@@ -104,25 +100,23 @@ def test_unavailable_error_type(circ: QuantumCircuit, script_runner: ScriptRunne
def test_statevector_simulators(circ: QuantumCircuit, script_runner: ScriptRunner) -> None:
"""Testing the simulator with a different simulator."""
circ.qasm(filename="dummyCircuit.qasm")
- ret = script_runner.run(
- [
- "ecc_qiskit_wrapper",
- "-m",
- "BAPD",
- "-p",
- "0.001",
- "-n",
- "50",
- "-s",
- "1",
- "-fs",
- "statevector",
- "-ecc",
- "Id",
- "-f",
- "dummyCircuit.qasm",
- ]
- )
+ ret = script_runner.run([
+ "ecc_qiskit_wrapper",
+ "-m",
+ "BAPD",
+ "-p",
+ "0.001",
+ "-n",
+ "50",
+ "-s",
+ "1",
+ "-fs",
+ "statevector",
+ "-ecc",
+ "Id",
+ "-f",
+ "dummyCircuit.qasm",
+ ])
file_to_remove = pathlib.Path("dummyCircuit.qasm")
file_to_remove.unlink()
assert ret.success
@@ -131,15 +125,13 @@ def test_statevector_simulators(circ: QuantumCircuit, script_runner: ScriptRunne
def test_save_circuit(circ: QuantumCircuit, script_runner: ScriptRunner) -> None:
"""Saving a circuit after applying an ECC."""
circ.qasm(filename="dummyCircuit.qasm")
- ret = script_runner.run(
- [
- "ecc_qiskit_wrapper",
- "-e",
- "dummyCircuitWithEcc.qasm",
- "-f",
- "dummyCircuit.qasm",
- ]
- )
+ ret = script_runner.run([
+ "ecc_qiskit_wrapper",
+ "-e",
+ "dummyCircuitWithEcc.qasm",
+ "-f",
+ "dummyCircuit.qasm",
+ ])
for circuit_to_delete in ["dummyCircuit.qasm", "dummyCircuitWithEcc.qasm"]:
file_to_remove = pathlib.Path(circuit_to_delete)
file_to_remove.unlink()
diff --git a/test/python/test_simple.py b/test/python/uf_decoder/test_simple.py
similarity index 100%
rename from test/python/test_simple.py
rename to test/python/uf_decoder/test_simple.py