From e33d02285a4a7961b4eb1be47faf2623778ec9da Mon Sep 17 00:00:00 2001 From: Benjamin Gros Date: Fri, 17 May 2024 12:46:02 -0400 Subject: [PATCH 01/18] Add script to produce test subsets of data --- testing/create_test_dataset.py | 151 +++++++++++++++++++++++++++++++++ 1 file changed, 151 insertions(+) create mode 100644 testing/create_test_dataset.py diff --git a/testing/create_test_dataset.py b/testing/create_test_dataset.py new file mode 100644 index 0000000..5953b89 --- /dev/null +++ b/testing/create_test_dataset.py @@ -0,0 +1,151 @@ +import os +import tarfile +import tifffile +import xmltodict +import typer +from typing_extensions import Annotated +app = typer.Typer() + +def get_members_hybrid(source_dir, step, start_tile_x, start_tile_y, num_tiles_x, num_tiles_y): + """ + Opens up the given dataset in the tarfile and will extract only the needed members + based ont he slice step and tiles chosen by the user. + """ + members_wanted = [] + pairs_wanted = [] + slices_wanted = [] + + # get list of all wanted pairs + for numx in range(start_tile_x, start_tile_x+num_tiles_x): + numx = "0"+str(numx) + for numy in range(start_tile_y, start_tile_y+num_tiles_y): + numy = "0"+str(numy) + pair = (numx,numy) + pairs_wanted.append(pair) + + with tarfile.open(source_dir, 'r') as tar: + members = tar.getmembers() + slice = 0 + + # add potential slice numbers to a list + # will go quite over and could use fixing + while(slice < len(members)): + slices_wanted.append(slice) + slice+=step + max_slice = 0 + for member in members: + # get the pair and the slice of each member in the tarfile + grid = member.name.split("[")[1][:7] + x_num =grid.split(" x ")[0] + y_num = grid.split(" x ")[1] + pair = (x_num,y_num) + slice = int(member.name.split("Z")[1][:4]) + + # find max slice + if(slice>max_slice): + max_slice = slice + + # extract the correct tiff files from tar file + if(pair in pairs_wanted and slice in slices_wanted): + members_wanted.append(member) + tar.extract(member) + + # Remove slices that are greater than the max slice + slices_wanted = (slice for slice in slices_wanted if slice<= max_slice) + slices_wanted = list(slices_wanted) + return members_wanted, slices_wanted, pairs_wanted + + +def correct_metadata_tile(members, slices_wanted, pairs_wanted): + """ + Goes through each member in the given tar file and if it is the 0th slice + which contains all the data and metadata. It will adjust it to the configuartions set, such as + what slices should be in th enew dataset as well as what tiles to include + """ + for member in members: + # get the slice and the channel for each member + member_slice = int(member.name.split("Z")[1][:4]) + channel = int(member.name.split("C")[1][:2]) + if(member_slice == 0): + # load tiff file and access metadata + with tifffile.TiffFile(member.name) as tif: + # update data to only include the files own data + data = tif.series[0].asarray() + data = data[channel][member_slice][:][:] + meta_dict = xmltodict.parse(tif.ome_metadata) + + # Set to the correct number of slices + meta_dict['OME']['Image']['Pixels']['@SizeZ'] = len(list(slices_wanted)) + + # Remove excess tile configurations from metadata based on tiles + tile_files = meta_dict['OME']['Image']['ca:CustomAttributes']['TileConfiguration']['@TileConfiguration'].split(" ")[1:] + proper_files = [] + for file in tile_files: + grid = file.split("[")[1][:7] + x_num = grid.split(" x ")[0] + y_num = grid.split(" x ")[1] + pair = (x_num, y_num) + if(pair in pairs_wanted): + proper_files.append(file) + new_tile_config = "4" + for file in proper_files: + new_tile_config += " " + file + meta_dict['OME']['Image']['ca:CustomAttributes']['TileConfiguration']['@TileConfiguration'] = new_tile_config + + # Remove excess tile configurations from metadata based on slice + tile_files = meta_dict['OME']['Image']['ca:CustomAttributes']['TileConfiguration']['@TileConfiguration'].split(" ")[1:] + proper_files = [] + for file in tile_files: + if(int(file.split("Z")[1][:4]) in slices_wanted): + proper_files.append(file) + new_tile_config = "4" + for file in proper_files: + new_tile_config += " " + file + meta_dict['OME']['Image']['ca:CustomAttributes']['TileConfiguration']['@TileConfiguration'] = new_tile_config + + # Remove excess TiffData from metadata + tiff_data = meta_dict['OME']['Image']['Pixels']['TiffData'] + new_tiff_data = [] + for single_data in tiff_data: + filename = single_data['UUID']['@FileName'] + grid = filename.split("[")[1][:7] + x_num = grid.split(" x ")[0] + y_num = grid.split(" x ")[1] + pair = (x_num, y_num) + if(pair in pairs_wanted): + new_tiff_data.append(single_data) + meta_dict['OME']['Image']['Pixels']['TiffData'] = new_tiff_data + tiff_data = meta_dict['OME']['Image']['Pixels']['TiffData'] + new_tiff_data = [] + for single_data in tiff_data: + if(int(single_data['@FirstZ']) in list(slices_wanted)): + new_tiff_data.append(single_data) + meta_dict['OME']['Image']['Pixels']['TiffData'] = new_tiff_data + + # write out new metadata and adjusted data + new_description = xmltodict.unparse(meta_dict) + new_description = new_description.encode("UTF-8") + with tifffile.TiffWriter(member.name) as tw: + tw.write(data, description=new_description, metadata=None, planarconfig="CONTIG") + +def make_tar(members, output): + """ + Opens up a new tarfile at the specified output and will add all the members that + were selected from the main dataset and remove the local tiff files produced + """ + with tarfile.open(output, 'w') as tar: + for member in members: + tar.add(member.name, arcname=member.name) + os.remove(member.name) + +@app.command() +def create_test_subset_hybrid(path_to_source_tar:Annotated[str, typer.Argument(help="ex: dir1/dir2/dataset.tar")], + path_to_output_tar:Annotated[str, typer.Argument(help="ex: dir1/dir2/tes_dataset.tar")], + slice_step: int=20, x_start: int=0,num_x: int=3,y_start: int=0,num_y: int=3): + members, wanted_slices, wanted_pairs = get_members_hybrid(path_to_source_tar,slice_step, x_start,y_start, num_x, num_y) + correct_metadata_tile(members, wanted_slices, wanted_pairs) + make_tar(members, path_to_output_tar) + + +if __name__ == "__main__": + app() \ No newline at end of file From b7e7cd885436d282a664cdbfbdb5f14c1f608f22 Mon Sep 17 00:00:00 2001 From: Benjamin Gros Date: Fri, 17 May 2024 12:47:24 -0400 Subject: [PATCH 02/18] Update dependencies to run the dataset creation script --- poetry.lock | 231 ++++++++++++++++++++++++------------------------- pyproject.toml | 6 ++ 2 files changed, 119 insertions(+), 118 deletions(-) diff --git a/poetry.lock b/poetry.lock index a08873a..9c69cb3 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,10 +1,9 @@ -# This file is automatically @generated by Poetry 1.4.2 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. [[package]] name = "annotated-types" version = "0.6.0" description = "Reusable constraint types to use with typing.Annotated" -category = "main" optional = false python-versions = ">=3.8" files = [ @@ -16,7 +15,6 @@ files = [ name = "appdirs" version = "1.4.4" description = "A small Python module for determining appropriate platform-specific dirs, e.g. a \"user data dir\"." -category = "main" optional = false python-versions = "*" files = [ @@ -28,7 +26,6 @@ files = [ name = "arrow" version = "1.3.0" description = "Better dates & times for Python" -category = "main" optional = false python-versions = ">=3.8" files = [ @@ -42,13 +39,12 @@ types-python-dateutil = ">=2.8.10" [package.extras] doc = ["doc8", "sphinx (>=7.0.0)", "sphinx-autobuild", "sphinx-autodoc-typehints", "sphinx_rtd_theme (>=1.3.0)"] -test = ["dateparser (>=1.0.0,<2.0.0)", "pre-commit", "pytest", "pytest-cov", "pytest-mock", "pytz (==2021.1)", "simplejson (>=3.0.0,<4.0.0)"] +test = ["dateparser (==1.*)", "pre-commit", "pytest", "pytest-cov", "pytest-mock", "pytz (==2021.1)", "simplejson (==3.*)"] [[package]] name = "astor" version = "0.8.1" description = "Read/rewrite/write Python ASTs" -category = "main" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,>=2.7" files = [ @@ -60,7 +56,6 @@ files = [ name = "attrs" version = "23.2.0" description = "Classes Without Boilerplate" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -80,7 +75,6 @@ tests-no-zope = ["attrs[tests-mypy]", "cloudpickle", "hypothesis", "pympler", "p name = "babel" version = "2.14.0" description = "Internationalization utilities" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -95,7 +89,6 @@ dev = ["freezegun (>=1.0,<2.0)", "pytest (>=6.0)", "pytest-cov"] name = "bids-validator" version = "1.14.0" description = "Validator for the Brain Imaging Data Structure" -category = "main" optional = false python-versions = "*" files = [ @@ -107,7 +100,6 @@ files = [ name = "black" version = "24.3.0" description = "The uncompromising code formatter." -category = "dev" optional = false python-versions = ">=3.8" files = [ @@ -152,7 +144,6 @@ uvloop = ["uvloop (>=0.15.2)"] name = "boutiques" version = "0.5.26" description = "Schema for describing bash command-line tools" -category = "main" optional = false python-versions = "*" files = [ @@ -172,7 +163,6 @@ all = ["coverage", "docopt", "mock", "nexus-sdk", "oyaml", "pytest", "pytest-run name = "bracex" version = "2.4" description = "Bash style brace expander." -category = "dev" optional = false python-versions = ">=3.8" files = [ @@ -184,7 +174,6 @@ files = [ name = "certifi" version = "2024.2.2" description = "Python package for providing Mozilla's CA Bundle." -category = "main" optional = false python-versions = ">=3.6" files = [ @@ -196,7 +185,6 @@ files = [ name = "charset-normalizer" version = "3.3.2" description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." -category = "main" optional = false python-versions = ">=3.7.0" files = [ @@ -296,7 +284,6 @@ files = [ name = "click" version = "8.1.7" description = "Composable command line interface toolkit" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -311,7 +298,6 @@ colorama = {version = "*", markers = "platform_system == \"Windows\""} name = "colorama" version = "0.4.6" description = "Cross-platform colored terminal text." -category = "main" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" files = [ @@ -323,7 +309,6 @@ files = [ name = "configargparse" version = "1.7" description = "A drop-in replacement for argparse that allows options to also be set via config files and/or environment variables." -category = "main" optional = false python-versions = ">=3.5" files = [ @@ -339,7 +324,6 @@ yaml = ["PyYAML"] name = "connection-pool" version = "0.0.3" description = "thread safe connection pool" -category = "main" optional = false python-versions = "*" files = [ @@ -350,7 +334,6 @@ files = [ name = "copier" version = "9.1.1" description = "A library for rendering project templates." -category = "main" optional = false python-versions = ">=3.8" files = [ @@ -378,7 +361,6 @@ questionary = ">=1.8.1" name = "datrie" version = "0.8.2" description = "Super-fast, efficiently stored Trie for Python." -category = "main" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" files = [ @@ -413,7 +395,6 @@ files = [ name = "decorator" version = "5.1.1" description = "Decorators for Humans" -category = "main" optional = false python-versions = ">=3.5" files = [ @@ -425,7 +406,6 @@ files = [ name = "docopt" version = "0.6.2" description = "Pythonic argument parser, that will make you smile" -category = "main" optional = false python-versions = "*" files = [ @@ -436,7 +416,6 @@ files = [ name = "docutils" version = "0.20.1" description = "Docutils -- Python Documentation Utilities" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -448,7 +427,6 @@ files = [ name = "dpath" version = "2.1.6" description = "Filesystem-like pathing and searching for dictionaries" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -460,7 +438,6 @@ files = [ name = "dunamai" version = "1.19.1" description = "Dynamic version generation" -category = "main" optional = false python-versions = ">=3.5" files = [ @@ -475,7 +452,6 @@ packaging = ">=20.9" name = "fastjsonschema" version = "2.19.1" description = "Fastest Python implementation of JSON schema" -category = "main" optional = false python-versions = "*" files = [ @@ -490,7 +466,6 @@ devel = ["colorama", "json-spec", "jsonschema", "pylint", "pytest", "pytest-benc name = "formulaic" version = "0.5.2" description = "An implementation of Wilkinson formulas." -category = "main" optional = false python-versions = ">=3.7.2" files = [ @@ -515,7 +490,6 @@ calculus = ["sympy (>=1.3,<1.10)"] name = "funcy" version = "2.0" description = "A fancy and practical functional tools" -category = "main" optional = false python-versions = "*" files = [ @@ -527,7 +501,6 @@ files = [ name = "ghp-import" version = "2.1.0" description = "Copy your docs directly to the gh-pages branch." -category = "dev" optional = false python-versions = "*" files = [ @@ -545,7 +518,6 @@ dev = ["flake8", "markdown", "twine", "wheel"] name = "gitdb" version = "4.0.11" description = "Git Object Database" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -560,7 +532,6 @@ smmap = ">=3.0.1,<6" name = "gitpython" version = "3.1.41" description = "GitPython is a Python library used to interact with Git repositories" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -578,7 +549,6 @@ test = ["black", "coverage[toml]", "ddt (>=1.1.1,!=1.4.3)", "mock", "mypy", "pre name = "greenlet" version = "3.0.3" description = "Lightweight in-process concurrent programming" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -650,7 +620,6 @@ test = ["objgraph", "psutil"] name = "humanfriendly" version = "10.0" description = "Human friendly output for text interfaces using Python" -category = "main" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" files = [ @@ -665,7 +634,6 @@ pyreadline3 = {version = "*", markers = "sys_platform == \"win32\" and python_ve name = "idna" version = "3.6" description = "Internationalized Domain Names in Applications (IDNA)" -category = "main" optional = false python-versions = ">=3.5" files = [ @@ -677,7 +645,6 @@ files = [ name = "importlib-resources" version = "6.1.1" description = "Read resources from Python packages" -category = "main" optional = false python-versions = ">=3.8" files = [ @@ -693,7 +660,6 @@ testing = ["pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", name = "interface-meta" version = "1.3.0" description = "`interface_meta` provides a convenient way to expose an extensible API with enforced method signatures and consistent documentation." -category = "main" optional = false python-versions = ">=3.7,<4.0" files = [ @@ -705,7 +671,6 @@ files = [ name = "jinja2" version = "3.1.3" description = "A very fast and expressive template engine." -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -723,7 +688,6 @@ i18n = ["Babel (>=2.7)"] name = "jinja2-ansible-filters" version = "1.3.2" description = "A port of Ansible's jinja2 filters without requiring ansible core." -category = "main" optional = false python-versions = "*" files = [ @@ -742,7 +706,6 @@ test = ["pytest", "pytest-cov"] name = "jinja2-time" version = "0.2.0" description = "Jinja2 Extension for Dates and Times" -category = "main" optional = false python-versions = "*" files = [ @@ -758,7 +721,6 @@ jinja2 = "*" name = "jsonschema" version = "4.21.1" description = "An implementation of JSON Schema validation for Python" -category = "main" optional = false python-versions = ">=3.8" files = [ @@ -780,7 +742,6 @@ format-nongpl = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339- name = "jsonschema-specifications" version = "2023.12.1" description = "The JSON Schema meta-schemas and vocabularies, exposed as a Registry" -category = "main" optional = false python-versions = ">=3.8" files = [ @@ -795,7 +756,6 @@ referencing = ">=0.31.0" name = "jupyter-core" version = "5.7.1" description = "Jupyter core package. A base package on which Jupyter projects rely." -category = "main" optional = false python-versions = ">=3.8" files = [ @@ -816,7 +776,6 @@ test = ["ipykernel", "pre-commit", "pytest", "pytest-cov", "pytest-timeout"] name = "lazy-loader" version = "0.3" description = "lazy_loader" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -832,7 +791,6 @@ test = ["pytest (>=7.4)", "pytest-cov (>=4.1)"] name = "markdown" version = "3.6" description = "Python implementation of John Gruber's Markdown." -category = "dev" optional = false python-versions = ">=3.8" files = [ @@ -844,11 +802,34 @@ files = [ docs = ["mdx-gh-links (>=0.2)", "mkdocs (>=1.5)", "mkdocs-gen-files", "mkdocs-literate-nav", "mkdocs-nature (>=0.6)", "mkdocs-section-index", "mkdocstrings[python]"] testing = ["coverage", "pyyaml"] +[[package]] +name = "markdown-it-py" +version = "3.0.0" +description = "Python port of markdown-it. Markdown parsing, done right!" +optional = false +python-versions = ">=3.8" +files = [ + {file = "markdown-it-py-3.0.0.tar.gz", hash = "sha256:e3f60a94fa066dc52ec76661e37c851cb232d92f9886b15cb560aaada2df8feb"}, + {file = "markdown_it_py-3.0.0-py3-none-any.whl", hash = "sha256:355216845c60bd96232cd8d8c40e8f9765cc86f46880e43a8fd22dc1a1a8cab1"}, +] + +[package.dependencies] +mdurl = ">=0.1,<1.0" + +[package.extras] +benchmarking = ["psutil", "pytest", "pytest-benchmark"] +code-style = ["pre-commit (>=3.0,<4.0)"] +compare = ["commonmark (>=0.9,<1.0)", "markdown (>=3.4,<4.0)", "mistletoe (>=1.0,<2.0)", "mistune (>=2.0,<3.0)", "panflute (>=2.3,<3.0)"] +linkify = ["linkify-it-py (>=1,<3)"] +plugins = ["mdit-py-plugins"] +profiling = ["gprof2dot"] +rtd = ["jupyter_sphinx", "mdit-py-plugins", "myst-parser", "pyyaml", "sphinx", "sphinx-copybutton", "sphinx-design", "sphinx_book_theme"] +testing = ["coverage", "pytest", "pytest-cov", "pytest-regressions"] + [[package]] name = "markupsafe" version = "2.1.5" description = "Safely add untrusted strings to HTML/XML markup." -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -914,11 +895,21 @@ files = [ {file = "MarkupSafe-2.1.5.tar.gz", hash = "sha256:d283d37a890ba4c1ae73ffadf8046435c76e7bc2247bbb63c00bd1a709c6544b"}, ] +[[package]] +name = "mdurl" +version = "0.1.2" +description = "Markdown URL utilities" +optional = false +python-versions = ">=3.7" +files = [ + {file = "mdurl-0.1.2-py3-none-any.whl", hash = "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8"}, + {file = "mdurl-0.1.2.tar.gz", hash = "sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba"}, +] + [[package]] name = "mergedeep" version = "1.3.4" description = "A deep merge function for 🐍." -category = "dev" optional = false python-versions = ">=3.6" files = [ @@ -930,7 +921,6 @@ files = [ name = "mkdocs" version = "1.6.0" description = "Project documentation with Markdown." -category = "dev" optional = false python-versions = ">=3.8" files = [ @@ -961,7 +951,6 @@ min-versions = ["babel (==2.9.0)", "click (==7.0)", "colorama (==0.4)", "ghp-imp name = "mkdocs-get-deps" version = "0.2.0" description = "MkDocs extension that lists all dependencies according to a mkdocs.yml file" -category = "dev" optional = false python-versions = ">=3.8" files = [ @@ -978,7 +967,6 @@ pyyaml = ">=5.1" name = "mkdocs-include-markdown-plugin" version = "6.0.6" description = "Mkdocs Markdown includer plugin." -category = "dev" optional = false python-versions = ">=3.8" files = [ @@ -997,7 +985,6 @@ cache = ["platformdirs"] name = "mkdocs-material" version = "9.5.20" description = "Documentation that simply works" -category = "dev" optional = false python-versions = ">=3.8" files = [ @@ -1027,7 +1014,6 @@ recommended = ["mkdocs-minify-plugin (>=0.7,<1.0)", "mkdocs-redirects (>=1.2,<2. name = "mkdocs-material-extensions" version = "1.3.1" description = "Extension pack for Python Markdown and MkDocs Material." -category = "dev" optional = false python-versions = ">=3.8" files = [ @@ -1039,7 +1025,6 @@ files = [ name = "more-itertools" version = "10.2.0" description = "More routines for operating on iterables, beyond itertools" -category = "main" optional = false python-versions = ">=3.8" files = [ @@ -1051,7 +1036,6 @@ files = [ name = "mypy-extensions" version = "1.0.0" description = "Type system extensions for programs checked with the mypy type checker." -category = "dev" optional = false python-versions = ">=3.5" files = [ @@ -1063,7 +1047,6 @@ files = [ name = "nbformat" version = "5.9.2" description = "The Jupyter Notebook format" -category = "main" optional = false python-versions = ">=3.8" files = [ @@ -1085,7 +1068,6 @@ test = ["pep440", "pre-commit", "pytest", "testpath"] name = "nibabel" version = "5.2.0" description = "Access a multitude of neuroimaging data formats" -category = "main" optional = false python-versions = ">=3.8" files = [ @@ -1115,7 +1097,6 @@ zstd = ["pyzstd (>=0.14.3)"] name = "num2words" version = "0.5.13" description = "Modules to convert numbers to words. Easily extensible." -category = "main" optional = false python-versions = "*" files = [ @@ -1130,7 +1111,6 @@ docopt = ">=0.6.2" name = "numpy" version = "1.26.4" description = "Fundamental package for array computing in Python" -category = "main" optional = false python-versions = ">=3.9" files = [ @@ -1176,7 +1156,6 @@ files = [ name = "packaging" version = "23.2" description = "Core utilities for Python packages" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -1188,7 +1167,6 @@ files = [ name = "paginate" version = "0.5.6" description = "Divides large result sets into pages for easier browsing" -category = "dev" optional = false python-versions = "*" files = [ @@ -1199,7 +1177,6 @@ files = [ name = "pandas" version = "2.2.0" description = "Powerful data structures for data analysis, time series, and statistics" -category = "main" optional = false python-versions = ">=3.9" files = [ @@ -1268,7 +1245,6 @@ xml = ["lxml (>=4.9.2)"] name = "pathspec" version = "0.12.1" description = "Utility library for gitignore style pattern matching of file paths." -category = "main" optional = false python-versions = ">=3.8" files = [ @@ -1280,7 +1256,6 @@ files = [ name = "plac" version = "1.4.2" description = "The smartest command line arguments parser in the world" -category = "main" optional = false python-versions = "*" files = [ @@ -1292,7 +1267,6 @@ files = [ name = "platformdirs" version = "4.2.0" description = "A small Python package for determining appropriate platform-specific dirs, e.g. a \"user data dir\"." -category = "main" optional = false python-versions = ">=3.8" files = [ @@ -1308,7 +1282,6 @@ test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=7.4.3)", "pytest- name = "plumbum" version = "1.8.2" description = "Plumbum: shell combinators library" -category = "main" optional = false python-versions = ">=3.6" files = [ @@ -1328,7 +1301,6 @@ ssh = ["paramiko"] name = "prompt-toolkit" version = "3.0.36" description = "Library for building powerful interactive command lines in Python" -category = "main" optional = false python-versions = ">=3.6.2" files = [ @@ -1343,7 +1315,6 @@ wcwidth = "*" name = "psutil" version = "5.9.8" description = "Cross-platform lib for process and system monitoring in Python." -category = "main" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*" files = [ @@ -1372,7 +1343,6 @@ test = ["enum34", "ipaddress", "mock", "pywin32", "wmi"] name = "pulp" version = "2.7.0" description = "PuLP is an LP modeler written in python. PuLP can generate MPS or LP files and call GLPK, COIN CLP/CBC, CPLEX, and GUROBI to solve linear problems." -category = "main" optional = false python-versions = "*" files = [ @@ -1384,7 +1354,6 @@ files = [ name = "pvandyken-deprecated" version = "0.0.4" description = "Wrapper to manage deprecations" -category = "main" optional = false python-versions = ">=3.8" files = [ @@ -1400,7 +1369,6 @@ typing-extensions = ">=3.10.0" name = "pybids" version = "0.16.4" description = "bids: interface with datasets conforming to BIDS" -category = "main" optional = false python-versions = ">=3.8" files = [ @@ -1434,7 +1402,6 @@ tutorial = ["ipykernel", "jinja2", "jupyter-client", "markupsafe", "nbconvert"] name = "pydantic" version = "2.6.1" description = "Data validation using Python type hints" -category = "main" optional = false python-versions = ">=3.8" files = [ @@ -1454,7 +1421,6 @@ email = ["email-validator (>=2.0.0)"] name = "pydantic-core" version = "2.16.2" description = "" -category = "main" optional = false python-versions = ">=3.8" files = [ @@ -1546,7 +1512,6 @@ typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0" name = "pygments" version = "2.17.2" description = "Pygments is a syntax highlighting package written in Python." -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -1562,7 +1527,6 @@ windows-terminal = ["colorama (>=0.4.6)"] name = "pymdown-extensions" version = "10.8.1" description = "Extension pack for Python Markdown." -category = "dev" optional = false python-versions = ">=3.8" files = [ @@ -1581,7 +1545,6 @@ extra = ["pygments (>=2.12)"] name = "pyreadline3" version = "3.4.1" description = "A python implementation of GNU readline." -category = "main" optional = false python-versions = "*" files = [ @@ -1593,7 +1556,6 @@ files = [ name = "python-dateutil" version = "2.8.2" description = "Extensions to the standard Python datetime module" -category = "main" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" files = [ @@ -1608,7 +1570,6 @@ six = ">=1.5" name = "pytz" version = "2024.1" description = "World timezone definitions, modern and historical" -category = "main" optional = false python-versions = "*" files = [ @@ -1620,7 +1581,6 @@ files = [ name = "pywin32" version = "306" description = "Python for Window Extensions" -category = "main" optional = false python-versions = "*" files = [ @@ -1644,7 +1604,6 @@ files = [ name = "pyyaml" version = "6.0.1" description = "YAML parser and emitter for Python" -category = "main" optional = false python-versions = ">=3.6" files = [ @@ -1705,7 +1664,6 @@ files = [ name = "pyyaml-env-tag" version = "0.1" description = "A custom YAML tag for referencing environment variables in YAML files. " -category = "dev" optional = false python-versions = ">=3.6" files = [ @@ -1720,7 +1678,6 @@ pyyaml = "*" name = "pyyaml-include" version = "1.3.2" description = "Extending PyYAML with a custom constructor for including YAML files within YAML files" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -1738,7 +1695,6 @@ toml = ["toml"] name = "questionary" version = "2.0.1" description = "Python library to build pretty command line user prompts ⭐️" -category = "main" optional = false python-versions = ">=3.8" files = [ @@ -1753,7 +1709,6 @@ prompt_toolkit = ">=2.0,<=3.0.36" name = "referencing" version = "0.33.0" description = "JSON Referencing + Python" -category = "main" optional = false python-versions = ">=3.8" files = [ @@ -1769,7 +1724,6 @@ rpds-py = ">=0.7.0" name = "regex" version = "2024.4.28" description = "Alternative regular expression module, to replace re." -category = "dev" optional = false python-versions = ">=3.8" files = [ @@ -1858,7 +1812,6 @@ files = [ name = "requests" version = "2.31.0" description = "Python HTTP for Humans." -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -1880,7 +1833,6 @@ use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] name = "reretry" version = "0.11.8" description = "An easy to use, but functional decorator for retrying on exceptions." -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -1888,11 +1840,28 @@ files = [ {file = "reretry-0.11.8.tar.gz", hash = "sha256:f2791fcebe512ea2f1d153a2874778523a8064860b591cd90afc21a8bed432e3"}, ] +[[package]] +name = "rich" +version = "13.7.1" +description = "Render rich text, tables, progress bars, syntax highlighting, markdown and more to the terminal" +optional = false +python-versions = ">=3.7.0" +files = [ + {file = "rich-13.7.1-py3-none-any.whl", hash = "sha256:4edbae314f59eb482f54e9e30bf00d33350aaa94f4bfcd4e9e3110e64d0d7222"}, + {file = "rich-13.7.1.tar.gz", hash = "sha256:9be308cb1fe2f1f57d67ce99e95af38a1e2bc71ad9813b0e247cf7ffbcc3a432"}, +] + +[package.dependencies] +markdown-it-py = ">=2.2.0" +pygments = ">=2.13.0,<3.0.0" + +[package.extras] +jupyter = ["ipywidgets (>=7.5.1,<9)"] + [[package]] name = "rpds-py" version = "0.17.1" description = "Python bindings to Rust's persistent data structures (rpds)" -category = "main" optional = false python-versions = ">=3.8" files = [ @@ -2001,7 +1970,6 @@ files = [ name = "ruamel-yaml" version = "0.18.6" description = "ruamel.yaml is a YAML parser/emitter that supports roundtrip preservation of comments, seq/map flow style, and map key order" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -2020,7 +1988,6 @@ jinja2 = ["ruamel.yaml.jinja2 (>=0.2)"] name = "ruamel-yaml-clib" version = "0.2.8" description = "C version of reader, parser and emitter for ruamel.yaml derived from libyaml" -category = "main" optional = false python-versions = ">=3.6" files = [ @@ -2080,7 +2047,6 @@ files = [ name = "scipy" version = "1.12.0" description = "Fundamental algorithms for scientific computing in Python" -category = "main" optional = false python-versions = ">=3.9" files = [ @@ -2119,11 +2085,21 @@ dev = ["click", "cython-lint (>=0.12.2)", "doit (>=0.36.0)", "mypy", "pycodestyl doc = ["jupytext", "matplotlib (>2)", "myst-nb", "numpydoc", "pooch", "pydata-sphinx-theme (==0.9.0)", "sphinx (!=4.1.0)", "sphinx-design (>=0.2.0)"] test = ["asv", "gmpy2", "hypothesis", "mpmath", "pooch", "pytest", "pytest-cov", "pytest-timeout", "pytest-xdist", "scikit-umfpack", "threadpoolctl"] +[[package]] +name = "shellingham" +version = "1.5.4" +description = "Tool to Detect Surrounding Shell" +optional = false +python-versions = ">=3.7" +files = [ + {file = "shellingham-1.5.4-py2.py3-none-any.whl", hash = "sha256:7ecfff8f2fd72616f7481040475a65b2bf8af90a56c89140852d1120324e8686"}, + {file = "shellingham-1.5.4.tar.gz", hash = "sha256:8dbca0739d487e5bd35ab3ca4b36e11c4078f3a234bfce294b0a0291363404de"}, +] + [[package]] name = "simplejson" version = "3.19.2" description = "Simple, fast, extensible JSON encoder/decoder for Python" -category = "main" optional = false python-versions = ">=2.5, !=3.0.*, !=3.1.*, !=3.2.*" files = [ @@ -2231,7 +2207,6 @@ files = [ name = "six" version = "1.16.0" description = "Python 2 and 3 compatibility utilities" -category = "main" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" files = [ @@ -2243,7 +2218,6 @@ files = [ name = "smart-open" version = "6.4.0" description = "Utils for streaming large files (S3, HDFS, GCS, Azure Blob Storage, gzip, bz2...)" -category = "main" optional = false python-versions = ">=3.6,<4.0" files = [ @@ -2265,7 +2239,6 @@ webhdfs = ["requests"] name = "smmap" version = "5.0.1" description = "A pure Python implementation of a sliding window memory map manager" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -2277,7 +2250,6 @@ files = [ name = "snakebids" version = "0.11.0" description = "BIDS integration into snakemake workflows" -category = "main" optional = false python-versions = ">=3.8,<4.0" files = [ @@ -2299,17 +2271,13 @@ pybids = ">=0.16.0,<0.17" requests = ">=2.31.0" ruamel-yaml = ">=0.17.2" scipy = {version = ">=1.10.0", markers = "python_version >= \"3.9\""} -snakemake = [ - {version = ">=5.28.0,<8", markers = "python_version >= \"3.8\""}, - {version = ">=7.18.2,<8", markers = "python_version >= \"3.11\""}, -] +snakemake = {version = ">=7.18.2,<8", markers = "python_version >= \"3.11\""} typing-extensions = ">=3.10.0" [[package]] name = "snakefmt" version = "0.10.0" description = "The uncompromising Snakemake code formatter" -category = "dev" optional = false python-versions = ">=3.8.1,<4.0.0" files = [ @@ -2326,7 +2294,6 @@ toml = ">=0.10.2,<0.11.0" name = "snakemake" version = "7.32.4" description = "Workflow management system to create reproducible and scalable data analyses" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -2369,7 +2336,6 @@ reports = ["pygments"] name = "sqlalchemy" version = "2.0.27" description = "Database Abstraction Library" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -2457,7 +2423,6 @@ sqlcipher = ["sqlcipher3_binary"] name = "stopit" version = "1.1.2" description = "Timeout control decorator and context managers, raise any exception in another thread" -category = "main" optional = false python-versions = "*" files = [ @@ -2468,7 +2433,6 @@ files = [ name = "tabulate" version = "0.9.0" description = "Pretty-print tabular data" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -2483,7 +2447,6 @@ widechars = ["wcwidth"] name = "termcolor" version = "2.4.0" description = "ANSI color formatting for output in terminal" -category = "main" optional = false python-versions = ">=3.8" files = [ @@ -2498,7 +2461,6 @@ tests = ["pytest", "pytest-cov"] name = "throttler" version = "1.2.2" description = "Zero-dependency Python package for easy throttling with asyncio support" -category = "main" optional = false python-versions = "*" files = [ @@ -2509,11 +2471,27 @@ files = [ [package.extras] dev = ["aiohttp (>=3.8)", "codecov (>=2.1)", "flake8 (>=4.0)", "pytest (>=7.0)", "pytest-asyncio (>=0.16)", "pytest-cov (>=3.0)"] +[[package]] +name = "tifffile" +version = "2024.5.10" +description = "Read and write TIFF files" +optional = false +python-versions = ">=3.9" +files = [ + {file = "tifffile-2024.5.10-py3-none-any.whl", hash = "sha256:4154f091aa24d4e75bfad9ab2d5424a68c70e67b8220188066dc61946d4551bd"}, + {file = "tifffile-2024.5.10.tar.gz", hash = "sha256:aa1e1b12be952ab20717d6848bd6d4a5ee88d2aa319f1152bff4354ad728ec86"}, +] + +[package.dependencies] +numpy = "*" + +[package.extras] +all = ["defusedxml", "fsspec", "imagecodecs (>=2023.8.12)", "lxml", "matplotlib", "zarr"] + [[package]] name = "toml" version = "0.10.2" description = "Python Library for Tom's Obvious, Minimal Language" -category = "dev" optional = false python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*" files = [ @@ -2525,7 +2503,6 @@ files = [ name = "toposort" version = "1.10" description = "Implements a topological sort algorithm." -category = "main" optional = false python-versions = "*" files = [ @@ -2537,7 +2514,6 @@ files = [ name = "traitlets" version = "5.14.1" description = "Traitlets Python configuration system" -category = "main" optional = false python-versions = ">=3.8" files = [ @@ -2549,11 +2525,27 @@ files = [ docs = ["myst-parser", "pydata-sphinx-theme", "sphinx"] test = ["argcomplete (>=3.0.3)", "mypy (>=1.7.0)", "pre-commit", "pytest (>=7.0,<7.5)", "pytest-mock", "pytest-mypy-testing"] +[[package]] +name = "typer" +version = "0.12.3" +description = "Typer, build great CLIs. Easy to code. Based on Python type hints." +optional = false +python-versions = ">=3.7" +files = [ + {file = "typer-0.12.3-py3-none-any.whl", hash = "sha256:070d7ca53f785acbccba8e7d28b08dcd88f79f1fbda035ade0aecec71ca5c914"}, + {file = "typer-0.12.3.tar.gz", hash = "sha256:49e73131481d804288ef62598d97a1ceef3058905aa536a1134f90891ba35482"}, +] + +[package.dependencies] +click = ">=8.0.0" +rich = ">=10.11.0" +shellingham = ">=1.3.0" +typing-extensions = ">=3.7.4.3" + [[package]] name = "types-python-dateutil" version = "2.8.19.20240106" description = "Typing stubs for python-dateutil" -category = "main" optional = false python-versions = ">=3.8" files = [ @@ -2565,7 +2557,6 @@ files = [ name = "typing-extensions" version = "4.9.0" description = "Backported and Experimental Type Hints for Python 3.8+" -category = "main" optional = false python-versions = ">=3.8" files = [ @@ -2577,7 +2568,6 @@ files = [ name = "tzdata" version = "2024.1" description = "Provider of IANA time zone data" -category = "main" optional = false python-versions = ">=2" files = [ @@ -2589,7 +2579,6 @@ files = [ name = "urllib3" version = "2.2.0" description = "HTTP library with thread-safe connection pooling, file post, and more." -category = "main" optional = false python-versions = ">=3.8" files = [ @@ -2607,7 +2596,6 @@ zstd = ["zstandard (>=0.18.0)"] name = "watchdog" version = "4.0.0" description = "Filesystem events monitoring" -category = "dev" optional = false python-versions = ">=3.8" files = [ @@ -2649,7 +2637,6 @@ watchmedo = ["PyYAML (>=3.10)"] name = "wcmatch" version = "8.5.1" description = "Wildcard/glob file name matcher." -category = "dev" optional = false python-versions = ">=3.8" files = [ @@ -2664,7 +2651,6 @@ bracex = ">=2.1.1" name = "wcwidth" version = "0.2.13" description = "Measures the displayed width of unicode strings in a terminal" -category = "main" optional = false python-versions = "*" files = [ @@ -2676,7 +2662,6 @@ files = [ name = "wrapt" version = "1.16.0" description = "Module for decorators, wrappers and monkey patching." -category = "main" optional = false python-versions = ">=3.6" files = [ @@ -2752,11 +2737,21 @@ files = [ {file = "wrapt-1.16.0.tar.gz", hash = "sha256:5f370f952971e7d17c7d1ead40e49f32345a7f7a5373571ef44d800d06b1899d"}, ] +[[package]] +name = "xmltodict" +version = "0.13.0" +description = "Makes working with XML feel like you are working with JSON" +optional = false +python-versions = ">=3.4" +files = [ + {file = "xmltodict-0.13.0-py2.py3-none-any.whl", hash = "sha256:aa89e8fd76320154a40d19a0df04a4695fb9dc5ba977cbb68ab3e4eb225e7852"}, + {file = "xmltodict-0.13.0.tar.gz", hash = "sha256:341595a488e3e01a85a9d8911d8912fd922ede5fecc4dce437eb4b6c8d037e56"}, +] + [[package]] name = "yte" version = "1.5.4" description = "A YAML template engine with Python expressions" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -2772,4 +2767,4 @@ pyyaml = ">=6.0,<7.0" [metadata] lock-version = "2.0" python-versions = ">=3.11,<3.12" -content-hash = "b522ee97794ae60c046f7e9fd4c367575087fb748af7460fb66c963306b14669" +content-hash = "34ff8ffe758ebe806dd56f8c9525fceb7c62a762b899cd153bb1475867154173" diff --git a/pyproject.toml b/pyproject.toml index 2c3a889..d80f0a8 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -17,6 +17,12 @@ snakefmt = "^0.10.0" mkdocs-material = "^9.5.20" mkdocs-include-markdown-plugin = "^6.0.6" + +[tool.poetry.group.dataset_creation.dependencies] +typer = "^0.12.3" +xmltodict = "^0.13.0" +tifffile = "^2024.5.10" + [build-system] requires = ["poetry-core"] build-backend = "poetry.core.masonry.api" From 9019a7bcecacf06d12cf1a3eb5f589f030ae943c Mon Sep 17 00:00:00 2001 From: Benjamin Gros Date: Mon, 3 Jun 2024 15:26:24 -0400 Subject: [PATCH 03/18] Build in qc reports into the workflow --- config/config.yml | 11 +- qc_viewer/resources/sliceViewer/ff_corr.html | 72 +++++ .../resources/sliceViewer/image_expand.js | 42 +++ qc_viewer/resources/sliceViewer/style.css | 14 + .../resources/sliceViewer/whole_slices.html | 56 ++++ qc_viewer/resources/volumeViewer/cm_gray.png | Bin 0 -> 75 bytes .../resources/volumeViewer/cm_viridis.png | Bin 0 -> 238 bytes .../resources/volumeViewer/volRender.html | 22 ++ .../resources/volumeViewer/volRenderScript.js | 172 +++++++++++ workflow/Snakefile | 2 + workflow/rules/common.smk | 9 + workflow/rules/flatfield_corr.smk | 3 + workflow/rules/import.smk | 2 + workflow/rules/ome_zarr.smk | 22 ++ .../apply_basic_flatfield_corr_zarr.py | 1 + workflow/scripts/create_viewer.py | 288 ++++++++++++++++++ workflow/scripts/tif_to_zarr.py | 1 + 17 files changed, 716 insertions(+), 1 deletion(-) create mode 100644 qc_viewer/resources/sliceViewer/ff_corr.html create mode 100644 qc_viewer/resources/sliceViewer/image_expand.js create mode 100644 qc_viewer/resources/sliceViewer/style.css create mode 100644 qc_viewer/resources/sliceViewer/whole_slices.html create mode 100644 qc_viewer/resources/volumeViewer/cm_gray.png create mode 100644 qc_viewer/resources/volumeViewer/cm_viridis.png create mode 100644 qc_viewer/resources/volumeViewer/volRender.html create mode 100644 qc_viewer/resources/volumeViewer/volRenderScript.js create mode 100644 workflow/scripts/create_viewer.py diff --git a/config/config.yml b/config/config.yml index 7b09a08..abd2409 100644 --- a/config/config.yml +++ b/config/config.yml @@ -126,8 +126,17 @@ bids: readme_md: resources/bids_template_files/README.md samples_json: resources/bids_template_files/samples.json +report: + create_report: True + flatfield_corrected: + slice_start: 0 + slice_step: 1 + colour_map: gray + whole_slice_viewer: + slice_start: 0 + slice_step: 1 + colour_map: gray - containers: spimprep: 'docker://khanlab/spimprep-deps:main' diff --git a/qc_viewer/resources/sliceViewer/ff_corr.html b/qc_viewer/resources/sliceViewer/ff_corr.html new file mode 100644 index 0000000..450693e --- /dev/null +++ b/qc_viewer/resources/sliceViewer/ff_corr.html @@ -0,0 +1,72 @@ + + + + FlatField Correction Check + + + + Back + + + +

Before and After Flatfield Correction

+ + + + + + + + + + + + + + + + + +
+

Chunk - 0 Channel - 0

+
+ +

Corrected

+

Slice-1

+
+ +

Uncorrected

+

Slice-1

+
+ +

Corrected

+

Slice-2

+
+ +

Uncorrected

+

Slice-2

+
+

Chunk - 0 Channel - 1

+
+ +

Corrected

+

Slice-1

+
+ +

Uncorrected

+

Slice-1

+
+ +

Corrected

+

Slice-2

+
+ +

Uncorrected

+

Slice-2

+
+ + \ No newline at end of file diff --git a/qc_viewer/resources/sliceViewer/image_expand.js b/qc_viewer/resources/sliceViewer/image_expand.js new file mode 100644 index 0000000..a7777d0 --- /dev/null +++ b/qc_viewer/resources/sliceViewer/image_expand.js @@ -0,0 +1,42 @@ +// get every image, expandButtonToggle and the expand factor +const images = document.querySelectorAll('img'); +const expandButton = document.getElementById("expand") +const expand_factor = document.getElementById("expand_scale") +let expansion_scale = 1 +let expanded = false; + +// create a function to exxpand images when clicked on +function handleImageClick(event){ + const image = event.target + expansion_scale = expand_factor.value; + console.log(expansion_scale) + if(!expanded){ + image.style.transform = `scale(${expansion_scale})` + image.style.zIndex = 1 + const leftDistance = image.getBoundingClientRect().left; + if(leftDistance < 0){ + image.style.transform = `translateX(${Math.abs(leftDistance)+10}px) scale(2)`; + } + expanded=true + } else { + image.style.transform = "scale(1)" + image.style.position = 'relative' + image.style.zIndex=0 + expanded=false + } +} + +expandButton.addEventListener('change', ()=>{ + if(expandButton.checked){ + images.forEach(image => { + image.addEventListener('click', handleImageClick) + }) + expand_factor.style.display='inline'; + } else { + images.forEach(image => { + image.removeEventListener('click', handleImageClick) + image.style.transform = 'scale(1)' + }) + expand_factor.style.display = 'none' + } +}) diff --git a/qc_viewer/resources/sliceViewer/style.css b/qc_viewer/resources/sliceViewer/style.css new file mode 100644 index 0000000..c5680b4 --- /dev/null +++ b/qc_viewer/resources/sliceViewer/style.css @@ -0,0 +1,14 @@ +img { + height: auto; + width:200px; + z-index: 0; + position: relative; +} + +.expand-options { + float: right; +} + +#expand_scale { + display: none; +} \ No newline at end of file diff --git a/qc_viewer/resources/sliceViewer/whole_slices.html b/qc_viewer/resources/sliceViewer/whole_slices.html new file mode 100644 index 0000000..ad44965 --- /dev/null +++ b/qc_viewer/resources/sliceViewer/whole_slices.html @@ -0,0 +1,56 @@ + + + + Processed Slices + + + + Back + + + +

Processed Image Slices

+ + + + + + + + + + + + + + + + + + + + + +
+

Channel - 0

+
+ +

Slice-2

+
+ +

Slice-3

+
+

Channel - 1

+
+ +

Slice-2

+
+ +

Slice-3

+
+ + \ No newline at end of file diff --git a/qc_viewer/resources/volumeViewer/cm_gray.png b/qc_viewer/resources/volumeViewer/cm_gray.png new file mode 100644 index 0000000000000000000000000000000000000000..8ac33886701247b22d8bf45c08e531336351ec4c GIT binary patch literal 75 zcmeAS@N?(olHy`uVBq!ia0y~yU<5K57&(}LLPCNJ>*9nZ aObm4gnf{A0MQ#AfGI+ZBxvXEp} z{b|~AuoC)9WX2AAba=u`zD(}kv-7+k>&KLT%17MLPtUXPUOC@c5qp09FoL+CJ@5S^ z`-+)z9WsICV^6by1s{1cJB~VI?7F}X6UL{^Tp6=gyKqECIcBpM)305|y}V(+uvR*B o*#)6X0)-{P!f0WzOt47IA706c4o3)EGynhq07*qoM6N<$f(1=v^8f$< literal 0 HcmV?d00001 diff --git a/qc_viewer/resources/volumeViewer/volRender.html b/qc_viewer/resources/volumeViewer/volRender.html new file mode 100644 index 0000000..40fc65a --- /dev/null +++ b/qc_viewer/resources/volumeViewer/volRender.html @@ -0,0 +1,22 @@ + + + + 3D Brain + + + + + + + Back +
+ + + \ No newline at end of file diff --git a/qc_viewer/resources/volumeViewer/volRenderScript.js b/qc_viewer/resources/volumeViewer/volRenderScript.js new file mode 100644 index 0000000..efb2168 --- /dev/null +++ b/qc_viewer/resources/volumeViewer/volRenderScript.js @@ -0,0 +1,172 @@ +import * as THREE from 'three'; + +import { GUI } from 'three/addons/libs/lil-gui.module.min.js'; +import { OrbitControls } from 'three/addons/controls/OrbitControls.js'; +import { VolumeRenderShader1 } from 'three/addons/shaders/VolumeShader.js'; +import { VRButton } from 'three/addons/webxr/VRButton.js'; + +let renderer, + scene, + camera, + controls, + material, + volconfig, + cmtextures, + gui; +let channel = 0 +let isScene = false; + +init(); + +function init() { + + if(!isScene){ + scene = new THREE.Scene(); + } + + // Create renderer + renderer = new THREE.WebGLRenderer(); + renderer.setPixelRatio( window.devicePixelRatio ); + renderer.setSize( window.innerWidth, window.innerHeight ); + renderer.xr.enabled = true; + document.body.appendChild( renderer.domElement ); + document.body.appendChild(VRButton.createButton(renderer)) + + // Create camera (The volume renderer does not work very well with perspective yet) + const h = 512; // frustum height + const aspect = window.innerWidth / window.innerHeight; + camera = new THREE.OrthographicCamera( - h * aspect / 2, h * aspect / 2, h / 2, - h / 2, 1, 1000 ); + camera.position.set( - 64, - 64, 128 ); + camera.up.set( 0, 0, 1 ); // In our data, z is up + + // Create controls + controls = new OrbitControls( camera, renderer.domElement ); + controls.addEventListener( 'change', render ); + controls.target.set( 64, 64, 128 ); + controls.minZoom = 0.5; + controls.maxZoom = 4; + controls.enablePan = false; + controls.update(); + + + + // The gui for interaction + volconfig = { clim1: 0, clim2: 1, renderstyle: 'iso', isothreshold: 0.05, colormap: 'viridis', channel: 0 }; + gui = new GUI(); + gui.add( volconfig, 'clim1', 0, 1, 0.01 ).onChange( updateUniforms ); + gui.add( volconfig, 'clim2', 0, 1, 0.01 ).onChange( updateUniforms ); + gui.add( volconfig, 'colormap', { gray: 'gray', viridis: 'viridis' } ).onChange( updateUniforms ); + gui.add( volconfig, 'renderstyle', { mip: 'mip', iso: 'iso' } ).onChange( updateUniforms ); + gui.add( volconfig, 'isothreshold', 0, 1, 0.01 ).onChange( updateUniforms ); + + // Load the 4D array from the json file produced + new THREE.FileLoader().load( 'volumeData.json', function ( volume ) { + volume = JSON.parse(volume)[channel] + // get the length for each axes x,y,z; will only process one channel + console.log(volume) + let z_length = volume.length + let y_length = volume[0].length + let x_length = volume[0][0].length + // create a new array to transform the array to 1D + let newData = new Float32Array(x_length*y_length*z_length) + + // loop through every data point in the array + for(let z=0; z + + FlatField Correction Check + + + + Back + + + +

Before and After Flatfield Correction

+''') + + for chunk,(tile_corr, tile_uncorr) in enumerate(zip(proc_data[0], unproc_data[0])): + for chan_num, (channel_corr, channel_uncorr) in enumerate(zip(tile_corr, tile_uncorr)): + slice = slice_start + fw.write(f""" + + + """) + while(slice + +

Corrected

+

Slice-{slice}

+ +
''') + slice += slice_step + fw.write(" ") + fw.write(""" + + +
+

Chunk - {chunk} Channel - {chan_num}

+
+ +

Uncorrected

+

Slice-{slice}

+
+ + """) + + +def produce_whole_slice_images(proc_path, colour="gray", slice_start=0, slice_step=1): + ''' + This function produces an html page containing images of each slice for both channels. + The slice step can be chosen as well as the contrast for each channel to ensure the best quality images + ''' + proc_reader= Reader(parse_url(proc_path)) + proc_data=list(proc_reader())[0].data + produce_json(proc_data) + + root = "root" + os.makedirs(f"qc_viewer/{out_dir}/sliceViewer/{root}/images", exist_ok=True) + with open(f"qc_viewer/{out_dir}/sliceViewer/whole_slices.html", 'w') as fw: + fw.write(''' + + + Processed Slices + + + + Back + + + +

Processed Image Slices

+''') + for chan_num, channel in enumerate(proc_data[5]): + slice = slice_start + fw.write(f""" + + + """) + num_images = 0 + while(slice + + +""" + else: + new_html_text = f""" + +""" + fw.write(new_html_text) + slice += slice_step + num_images+=1 + fw.write(""" + + +
+

Channel - {chan_num}

+
+ +

Slice-{slice}

+
+ +

Slice-{slice}

+
+ + + """) + +def produce_json(data): + ''' + Produces a json format containing the most downsampled + image data to be rendered into a 3D image. + ''' + with open(f"qc_viewer/{out_dir}/volumeViewer/volumeData.json", 'w') as f: + data = np.array(data[-1]).tolist() + data = json.dumps(data) + f.write(data) + +def combine_sample_htmls(ffcorr_html, proc_html): + ''' + Produces and index.html page connecting the two image reports as well as + the 3D volume rendering page + ''' + with open(f'qc_viewer/{out_dir}/index.html', 'w') as f: + f.write(f""" + + + Processed Slices + + + + Back +
+ Flatfield Correction Before and After +
+ Full Processed Slices +
+ 3D Image + + """) + +def create_main_html(): + """ + This function creates an html file connecting all the samples viewers together. + If the file is empty it will produce the header and if it is not it just adds + another sampel link + """ + file="qc_viewer/index.html" + with open(file, 'a') as f: + if(os.path.getsize(file) <= 20): + f.write(f""" + + + Sample Check + + + +

Subject Reports

+ {out_dir.split('-')[0]} +
+ """) + else: + f.write(f""" + {out_dir.split('-')[0]} +
+ """) + + +if(generate_report): + """ + Runs entire script if is configured to in the config.yml file + If not it will just produce the target file for rule to pass + """ + produce_ff_images(ff_corr, ff_uncorr, slice_start=ff_s_start, slice_step=ff_s_start, + colour=ff_cmap) + produce_whole_slice_images(ome_zarr, slice_start=ws_s_start, slice_step=ws_s_step, + colour=ws_cmap) + combine_sample_htmls("ff_corr.html", "whole_slices.html") + create_main_html() +else: + directories=f"qc_viewer/{out_dir}/volumeViewer" + os.makedirs(directories, exist_ok=True) + with open(directories+"/volumeData.json", 'w') as f: + f.write("") + + diff --git a/workflow/scripts/tif_to_zarr.py b/workflow/scripts/tif_to_zarr.py index bfd4a86..49485f4 100644 --- a/workflow/scripts/tif_to_zarr.py +++ b/workflow/scripts/tif_to_zarr.py @@ -65,5 +65,6 @@ def single_imread(*args): print('writing images to zarr with dask') with ProgressBar(): da.to_zarr(darr,snakemake.output.zarr,overwrite=True,dimension_separator='/') + da.to_zarr(darr,snakemake.output.backup_zarr,overwrite=True,dimension_separator='/') From 6ab0ac61ea4dc7fe49dea19ffa7be932506de2a9 Mon Sep 17 00:00:00 2001 From: Benjamin Gros Date: Tue, 4 Jun 2024 11:21:09 -0400 Subject: [PATCH 04/18] Fix hard coded report targets and configuration errors --- config/config.yml | 4 +- qc_viewer/resources/sliceViewer/ff_corr.html | 2 + qc_viewer/resources/sliceViewer/style.css | 5 ++ workflow/Snakefile | 2 - workflow/rules/common.smk | 17 +++-- workflow/rules/ome_zarr.smk | 4 +- workflow/scripts/create_viewer.py | 72 +++++++------------- 7 files changed, 45 insertions(+), 61 deletions(-) diff --git a/config/config.yml b/config/config.yml index abd2409..3211680 100644 --- a/config/config.yml +++ b/config/config.yml @@ -130,11 +130,11 @@ report: create_report: True flatfield_corrected: slice_start: 0 - slice_step: 1 + slice_step: 10 # Shows every nth slice colour_map: gray whole_slice_viewer: slice_start: 0 - slice_step: 1 + slice_step: 10 colour_map: gray diff --git a/qc_viewer/resources/sliceViewer/ff_corr.html b/qc_viewer/resources/sliceViewer/ff_corr.html index 450693e..5a477f7 100644 --- a/qc_viewer/resources/sliceViewer/ff_corr.html +++ b/qc_viewer/resources/sliceViewer/ff_corr.html @@ -11,6 +11,7 @@ +

Before and After Flatfield Correction

@@ -68,5 +69,6 @@

Uncorrected

+
\ No newline at end of file diff --git a/qc_viewer/resources/sliceViewer/style.css b/qc_viewer/resources/sliceViewer/style.css index c5680b4..f12e3d6 100644 --- a/qc_viewer/resources/sliceViewer/style.css +++ b/qc_viewer/resources/sliceViewer/style.css @@ -5,6 +5,11 @@ img { position: relative; } +table { + margin-left: auto; + margin-right: auto; +} + .expand-options { float: right; } diff --git a/workflow/Snakefile b/workflow/Snakefile index 68a78b2..c6dbd76 100644 --- a/workflow/Snakefile +++ b/workflow/Snakefile @@ -32,8 +32,6 @@ rule all: input: get_all_targets(), get_bids_toplevel_targets(), - "qc_viewer/demo1-brain-blaze1x/volumeViewer/volumeData.json", - "qc_viewer/demo2-brain-blaze1x/volumeViewer/volumeData.json", localrule: True diff --git a/workflow/rules/common.smk b/workflow/rules/common.smk index 7c5acde..e2ba251 100644 --- a/workflow/rules/common.smk +++ b/workflow/rules/common.smk @@ -42,16 +42,15 @@ def get_all_targets(): stain=get_stains_by_row(i), ) ) - targets.extend( - expand( - "qc_viewer/{subject}-{sample}-{acq}/volumeViewer/volumeData.json", - subject=datasets.loc[i, "subject"], - sample=datasets.loc[i, "sample"], - acq=datasets.loc[i, "acq"], + if config['report']['create_report']: + targets.extend( + expand( + "qc_viewer/{subject}-{sample}-{acq}/volumeViewer/volumeData.json", + subject=datasets.loc[i, "subject"], + sample=datasets.loc[i, "sample"], + acq=datasets.loc[i, "acq"], + ) ) - ) - - return targets diff --git a/workflow/rules/ome_zarr.smk b/workflow/rules/ome_zarr.smk index 54497ce..3613c35 100644 --- a/workflow/rules/ome_zarr.smk +++ b/workflow/rules/ome_zarr.smk @@ -158,13 +158,13 @@ rule ome_zarr_to_nii: rule generate_report: + """Generate QC reports for workflow""" input: uncorr="bids/{subject}-{sample}-{acq}-uncorr.zarr", corr="bids/{subject}-{sample}-{acq}-corr.zarr", ome="bids/sub-{subject}/micr/sub-{subject}_sample-{sample}_acq-{acq}_SPIM.ome.zarr", params: - create_report=config['report']['create_report'], - ff_slice_start=config['report']['flatfield_corrected']['slice_start'], + ff_s_start=config['report']['flatfield_corrected']['slice_start'], ws_s_start=config['report']['whole_slice_viewer']['slice_start'], ff_s_step=config['report']['flatfield_corrected']['slice_step'], ff_cmap=config['report']['flatfield_corrected']['colour_map'], diff --git a/workflow/scripts/create_viewer.py b/workflow/scripts/create_viewer.py index e5b8a2b..dd6c597 100644 --- a/workflow/scripts/create_viewer.py +++ b/workflow/scripts/create_viewer.py @@ -1,26 +1,34 @@ -import matplotlib.pyplot as plt -import numpy as np -from ome_zarr.io import parse_url -from ome_zarr.reader import Reader +import subprocess import os import json import math from distutils.dir_util import copy_tree +from ome_zarr.io import parse_url +from ome_zarr.reader import Reader +try: + import matplotlib.pyplot as plt +except: + subprocess.run(['pip','install','matplotlib']) + import matplotlib.pyplot as plt +try: + import numpy as np +except: + subprocess.run(['pip','install', 'numpy']) -generate_report = snakemake.params.create_report - -ff_s_start=snakemake.params.ff_slice_start -ff_s_step=snakemake.params.ff_slice_step +# arguments for creating the flatfield correction comparisons +ff_s_start=snakemake.params.ff_s_start +ff_s_step=snakemake.params.ff_s_step ff_cmap=snakemake.params.ff_cmap ff_corr = snakemake.input.corr ff_uncorr = snakemake.input.uncorr +# arguments for creating whole slice images ws_s_step=snakemake.params.ws_s_step ws_s_start=snakemake.params.ws_s_start ws_cmap=snakemake.params.ws_cmap - ome_zarr= snakemake.input.ome +# Get output files output = snakemake.output.out out_dir = output.split("/")[1] @@ -85,20 +93,9 @@ def produce_ff_images(corrected_path, uncorrected_path, colour="gray", slice_sta """) while(slice""") num_images = 0 while(slice Back -
+

{out_dir.split("-")[0]}

Flatfield Correction Before and After
Full Processed Slices @@ -268,21 +256,13 @@ def create_main_html(): """) -if(generate_report): - """ - Runs entire script if is configured to in the config.yml file - If not it will just produce the target file for rule to pass - """ - produce_ff_images(ff_corr, ff_uncorr, slice_start=ff_s_start, slice_step=ff_s_start, - colour=ff_cmap) - produce_whole_slice_images(ome_zarr, slice_start=ws_s_start, slice_step=ws_s_step, - colour=ws_cmap) - combine_sample_htmls("ff_corr.html", "whole_slices.html") - create_main_html() -else: - directories=f"qc_viewer/{out_dir}/volumeViewer" - os.makedirs(directories, exist_ok=True) - with open(directories+"/volumeData.json", 'w') as f: - f.write("") + +produce_ff_images(ff_corr, ff_uncorr, slice_start=ff_s_start, slice_step=ff_s_step, + colour=ff_cmap) +produce_whole_slice_images(ome_zarr, slice_start=ws_s_start, slice_step=ws_s_step, + colour=ws_cmap) +combine_sample_htmls("ff_corr.html", "whole_slices.html") +create_main_html() + From 78b6180591b7a4165344cb0eef6818e98592e371 Mon Sep 17 00:00:00 2001 From: Benjamin Gros Date: Thu, 13 Jun 2024 12:31:07 -0400 Subject: [PATCH 05/18] Make minor changes to script and add some more volume viewing features --- qc_viewer/resources/sliceViewer/ff_corr.html | 74 ------------------- .../resources/sliceViewer/image_expand.js | 22 +++++- .../resources/sliceViewer/whole_slices.html | 56 -------------- .../resources/volumeViewer/volRenderScript.js | 51 +++++++++---- workflow/scripts/create_viewer.py | 57 +++++++++++--- 5 files changed, 102 insertions(+), 158 deletions(-) delete mode 100644 qc_viewer/resources/sliceViewer/ff_corr.html delete mode 100644 qc_viewer/resources/sliceViewer/whole_slices.html diff --git a/qc_viewer/resources/sliceViewer/ff_corr.html b/qc_viewer/resources/sliceViewer/ff_corr.html deleted file mode 100644 index 5a477f7..0000000 --- a/qc_viewer/resources/sliceViewer/ff_corr.html +++ /dev/null @@ -1,74 +0,0 @@ - - - - FlatField Correction Check - - - - Back - -
- - -

Before and After Flatfield Correction

- - - - - - - - - - - - - - - - - -
-

Chunk - 0 Channel - 0

-
- -

Corrected

-

Slice-1

-
- -

Uncorrected

-

Slice-1

-
- -

Corrected

-

Slice-2

-
- -

Uncorrected

-

Slice-2

-
-

Chunk - 0 Channel - 1

-
- -

Corrected

-

Slice-1

-
- -

Uncorrected

-

Slice-1

-
- -

Corrected

-

Slice-2

-
- -

Uncorrected

-

Slice-2

-
-
- - \ No newline at end of file diff --git a/qc_viewer/resources/sliceViewer/image_expand.js b/qc_viewer/resources/sliceViewer/image_expand.js index a7777d0..7453926 100644 --- a/qc_viewer/resources/sliceViewer/image_expand.js +++ b/qc_viewer/resources/sliceViewer/image_expand.js @@ -5,20 +5,27 @@ const expand_factor = document.getElementById("expand_scale") let expansion_scale = 1 let expanded = false; -// create a function to exxpand images when clicked on +// create a function to expand images when clicked on function handleImageClick(event){ + // get target image and the scaling factor const image = event.target expansion_scale = expand_factor.value; - console.log(expansion_scale) + + // expand image by the scaling factor if not already expanded if(!expanded){ image.style.transform = `scale(${expansion_scale})` image.style.zIndex = 1 + + // if it is going to expand off screen shift to the right const leftDistance = image.getBoundingClientRect().left; if(leftDistance < 0){ - image.style.transform = `translateX(${Math.abs(leftDistance)+10}px) scale(2)`; + image.style.transform = `translateX(${Math.abs(leftDistance)+10}px) scale(${expansion_scale})`; } + expanded=true + } else { + // scale images back to original size image.style.transform = "scale(1)" image.style.position = 'relative' image.style.zIndex=0 @@ -26,17 +33,26 @@ function handleImageClick(event){ } } +// Enables or disables the ability to expand images on click expandButton.addEventListener('change', ()=>{ + + // add listener to enable expansion if(expandButton.checked){ images.forEach(image => { image.addEventListener('click', handleImageClick) }) + + // ensure images expand properly expand_factor.style.display='inline'; + } else { + + //remove listener to disable expansion images.forEach(image => { image.removeEventListener('click', handleImageClick) image.style.transform = 'scale(1)' }) + expand_factor.style.display = 'none' } }) diff --git a/qc_viewer/resources/sliceViewer/whole_slices.html b/qc_viewer/resources/sliceViewer/whole_slices.html deleted file mode 100644 index ad44965..0000000 --- a/qc_viewer/resources/sliceViewer/whole_slices.html +++ /dev/null @@ -1,56 +0,0 @@ - - - - Processed Slices - - - - Back - - - -

Processed Image Slices

- - - - - - - - - - - - - - - - - - - - - -
-

Channel - 0

-
- -

Slice-2

-
- -

Slice-3

-
-

Channel - 1

-
- -

Slice-2

-
- -

Slice-3

-
- - \ No newline at end of file diff --git a/qc_viewer/resources/volumeViewer/volRenderScript.js b/qc_viewer/resources/volumeViewer/volRenderScript.js index efb2168..ecd4020 100644 --- a/qc_viewer/resources/volumeViewer/volRenderScript.js +++ b/qc_viewer/resources/volumeViewer/volRenderScript.js @@ -13,8 +13,8 @@ let renderer, volconfig, cmtextures, gui; -let channel = 0 let isScene = false; +let mesh; init(); @@ -51,40 +51,64 @@ function init() { // The gui for interaction - volconfig = { clim1: 0, clim2: 1, renderstyle: 'iso', isothreshold: 0.05, colormap: 'viridis', channel: 0 }; + volconfig = {channel: 0, cmax:30000, cmin: 500, clim1: 0, clim2: 1, renderstyle: 'iso', isothreshold: 0.05, colormap: 'viridis', channel: 0, zmax: 0,zmin: 0, ymax: 0, ymin: 0, xmax:0, xmin: 0 }; gui = new GUI(); - gui.add( volconfig, 'clim1', 0, 1, 0.01 ).onChange( updateUniforms ); + gui.add( volconfig, 'clim1', 0, 1, 0.01 ).onChange(); gui.add( volconfig, 'clim2', 0, 1, 0.01 ).onChange( updateUniforms ); gui.add( volconfig, 'colormap', { gray: 'gray', viridis: 'viridis' } ).onChange( updateUniforms ); gui.add( volconfig, 'renderstyle', { mip: 'mip', iso: 'iso' } ).onChange( updateUniforms ); gui.add( volconfig, 'isothreshold', 0, 1, 0.01 ).onChange( updateUniforms ); // Load the 4D array from the json file produced + load(false) + + window.addEventListener( 'resize', onWindowResize ); + +} + +function load(refresh){ new THREE.FileLoader().load( 'volumeData.json', function ( volume ) { - volume = JSON.parse(volume)[channel] + volume = JSON.parse(volume)[volconfig.channel] // get the length for each axes x,y,z; will only process one channel - console.log(volume) let z_length = volume.length let y_length = volume[0].length let x_length = volume[0][0].length + if(!refresh){ + volconfig.zmax = z_length + volconfig.ymax = y_length + volconfig.xmax = x_length + gui.add(volconfig, 'channel', 0, 1, 1).onFinishChange(()=>load(true)) + gui.add(volconfig, 'cmax', 0, 30000, 10).onFinishChange(()=>load(true)) + gui.add(volconfig, 'cmin', 0, 30000, 10).onFinishChange(()=>load(true)) + gui.add(volconfig, 'zmax', 1, z_length, 1 ).onFinishChange( ()=>load(true) ); + gui.add(volconfig, 'zmin', 0, z_length, 1 ).onFinishChange( ()=>load(true) ) + gui.add(volconfig, 'ymax', 1, y_length, 1 ).onFinishChange( ()=>load(true) ); + gui.add(volconfig, 'ymin', 0, y_length, 1 ).onFinishChange( ()=>load(true) ); + gui.add(volconfig, 'xmax', 1, x_length, 1).onFinishChange(()=>load(true)); + gui.add(volconfig, 'xmin', 0, x_length, 1 ).onFinishChange( ()=>load(true) ); + } else { + scene.remove(mesh) + } // create a new array to transform the array to 1D let newData = new Float32Array(x_length*y_length*z_length) // loop through every data point in the array - for(let z=0; z @@ -83,6 +92,7 @@ def produce_ff_images(corrected_path, uncorrected_path, colour="gray", slice_sta

Before and After Flatfield Correction

''') + # Add images into the table for each chunk and channel for chunk,(tile_corr, tile_uncorr) in enumerate(zip(proc_data[0], unproc_data[0])): for chan_num, (channel_corr, channel_uncorr) in enumerate(zip(tile_corr, tile_uncorr)): slice = slice_start @@ -92,16 +102,22 @@ def produce_ff_images(corrected_path, uncorrected_path, colour="gray", slice_sta """) + # Process every wanted slice within a chunk and channe; while(slice @@ -113,8 +129,11 @@ def produce_ff_images(corrected_path, uncorrected_path, colour="gray", slice_sta

Uncorrected

Slice-{slice}

''') + # Increase by user given slice step slice += slice_step + fw.write(" ") + fw.write(""" @@ -128,12 +147,19 @@ def produce_whole_slice_images(proc_path, colour="gray", slice_start=0, slice_st This function produces an html page containing images of each slice for both channels. The slice step can be chosen as well as the contrast for each channel to ensure the best quality images ''' + + # read ome-zarr data and convert to list proc_reader= Reader(parse_url(proc_path)) proc_data=list(proc_reader())[0].data + + # dump the list into a json to be volume rendered produce_json(proc_data) + # create another root directory to hold the fully preprocessed slice images root = "root" os.makedirs(f"qc_viewer/{out_dir}/sliceViewer/{root}/images", exist_ok=True) + + # create html page for the whole slices with open(f"qc_viewer/{out_dir}/sliceViewer/whole_slices.html", 'w') as fw: fw.write(''' @@ -152,7 +178,8 @@ def produce_whole_slice_images(proc_path, colour="gray", slice_start=0, slice_st

Processed Image Slices

''') - for chan_num, channel in enumerate(proc_data[5]): + # for each channel add the images of the most downsampled data + for chan_num, channel in enumerate(proc_data[-1]): slice = slice_start fw.write(f""" @@ -162,13 +189,17 @@ def produce_whole_slice_images(proc_path, colour="gray", slice_start=0, slice_st """) num_images = 0 while(slice @@ -188,6 +219,8 @@ def produce_whole_slice_images(proc_path, colour="gray", slice_start=0, slice_st fw.write(new_html_text) slice += slice_step num_images+=1 + + # end html table fw.write(""" @@ -198,19 +231,22 @@ def produce_whole_slice_images(proc_path, colour="gray", slice_start=0, slice_st def produce_json(data): ''' - Produces a json format containing the most downsampled - image data to be rendered into a 3D image. + Produces a json file containing the most downsampled + image data to be volume rendered into a 3D image. ''' + with open(f"qc_viewer/{out_dir}/volumeViewer/volumeData.json", 'w') as f: data = np.array(data[-1]).tolist() data = json.dumps(data) f.write(data) + def combine_sample_htmls(ffcorr_html, proc_html): ''' Produces and index.html page connecting the two image reports as well as the 3D volume rendering page ''' + with open(f'qc_viewer/{out_dir}/index.html', 'w') as f: f.write(f""" @@ -227,15 +263,18 @@ def combine_sample_htmls(ffcorr_html, proc_html):
3D Image - """) - + """) + + def create_main_html(): """ This function creates an html file connecting all the samples viewers together. If the file is empty it will produce the header and if it is not it just adds another sampel link """ + file="qc_viewer/index.html" + with open(file, 'a') as f: if(os.path.getsize(file) <= 20): f.write(f""" From 8466119d37231dbf1a8982f37c59dda52d816864 Mon Sep 17 00:00:00 2001 From: Benjamin Gros Date: Thu, 13 Jun 2024 15:23:58 -0400 Subject: [PATCH 06/18] Add readme for reports and reconfigure imports in create viewer script --- qc_viewer/README.md | 14 ++++++++++++++ workflow/scripts/create_viewer.py | 16 ++++++---------- 2 files changed, 20 insertions(+), 10 deletions(-) create mode 100644 qc_viewer/README.md diff --git a/qc_viewer/README.md b/qc_viewer/README.md new file mode 100644 index 0000000..f584224 --- /dev/null +++ b/qc_viewer/README.md @@ -0,0 +1,14 @@ +# QC Reports can be found here + +## To view move into this diretory + +```bash +cd qc_viewer +``` +## Then run the following command to open in the browser + +```bash +python -m http.server +``` + +## Open the link and navigate the links to view the reports for each subject \ No newline at end of file diff --git a/workflow/scripts/create_viewer.py b/workflow/scripts/create_viewer.py index 6e17217..6c76937 100644 --- a/workflow/scripts/create_viewer.py +++ b/workflow/scripts/create_viewer.py @@ -8,15 +8,12 @@ # TODO: Fix importing pyplot and numpy try: import matplotlib.pyplot as plt + import numpy as np except: subprocess.run(['pip','install','matplotlib']) import matplotlib.pyplot as plt -try: - import numpy as np -except: - subprocess.run(['pip','install', 'numpy']) - import numpy as np - + import numpy as np + # arguments for creating the flatfield correction comparisons ff_s_start=snakemake.params.ff_s_start ff_s_step=snakemake.params.ff_s_step @@ -51,8 +48,7 @@ def make_directories(): def produce_ff_images(corrected_path, uncorrected_path, colour="gray", slice_start=0, slice_step=1): ''' This function produces an html file containing images for each tile before and after flatfield correction - It can be decided how many slices are wanted. The contrast can also be adjusted for - different images and channels to ensure good quality. + It can be decided how many slices are wanted. ''' # Get corrected and uncorrected file paths @@ -144,8 +140,8 @@ def produce_ff_images(corrected_path, uncorrected_path, colour="gray", slice_sta def produce_whole_slice_images(proc_path, colour="gray", slice_start=0, slice_step=1): ''' - This function produces an html page containing images of each slice for both channels. - The slice step can be chosen as well as the contrast for each channel to ensure the best quality images + This function produces an html page containing images of each slice for all channels. + The slice step can be chosen in the config to choose how many slices you want to view ''' # read ome-zarr data and convert to list From 4088b4ec87f20f8ec8475afe23967b359f684dcc Mon Sep 17 00:00:00 2001 From: Benjamin Gros Date: Fri, 14 Jun 2024 09:54:23 -0400 Subject: [PATCH 07/18] Make small touch ups --- .../resources/volumeViewer/volRenderScript.js | 62 +++++++++++++------ workflow/scripts/create_viewer.py | 23 +++---- 2 files changed, 50 insertions(+), 35 deletions(-) diff --git a/qc_viewer/resources/volumeViewer/volRenderScript.js b/qc_viewer/resources/volumeViewer/volRenderScript.js index ecd4020..ed3ea39 100644 --- a/qc_viewer/resources/volumeViewer/volRenderScript.js +++ b/qc_viewer/resources/volumeViewer/volRenderScript.js @@ -5,6 +5,31 @@ import { OrbitControls } from 'three/addons/controls/OrbitControls.js'; import { VolumeRenderShader1 } from 'three/addons/shaders/VolumeShader.js'; import { VRButton } from 'three/addons/webxr/VRButton.js'; +/* +Script is a reworked version of a threejs example +refined for array data stored in json format + +Copyright © 2010-2024 three.js authors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE +*/ + let renderer, scene, camera, @@ -13,17 +38,14 @@ let renderer, volconfig, cmtextures, gui; -let isScene = false; + let mesh; init(); function init() { - if(!isScene){ - scene = new THREE.Scene(); - } - + scene = new THREE.Scene(); // Create renderer renderer = new THREE.WebGLRenderer(); renderer.setPixelRatio( window.devicePixelRatio ); @@ -70,16 +92,16 @@ function load(refresh){ new THREE.FileLoader().load( 'volumeData.json', function ( volume ) { volume = JSON.parse(volume)[volconfig.channel] // get the length for each axes x,y,z; will only process one channel - let z_length = volume.length - let y_length = volume[0].length - let x_length = volume[0][0].length + let z_length = volume.length; + let y_length = volume[0].length; + let x_length = volume[0][0].length; if(!refresh){ - volconfig.zmax = z_length - volconfig.ymax = y_length - volconfig.xmax = x_length - gui.add(volconfig, 'channel', 0, 1, 1).onFinishChange(()=>load(true)) - gui.add(volconfig, 'cmax', 0, 30000, 10).onFinishChange(()=>load(true)) - gui.add(volconfig, 'cmin', 0, 30000, 10).onFinishChange(()=>load(true)) + volconfig.zmax = z_length; + volconfig.ymax = y_length; + volconfig.xmax = x_length; + gui.add(volconfig, 'channel', 0, 1, 1).onFinishChange(()=>load(true)); + gui.add(volconfig, 'cmax', 0, 30000, 10).onFinishChange(()=>load(true)); + gui.add(volconfig, 'cmin', 0, 30000, 10).onFinishChange(()=>load(true)); gui.add(volconfig, 'zmax', 1, z_length, 1 ).onFinishChange( ()=>load(true) ); gui.add(volconfig, 'zmin', 0, z_length, 1 ).onFinishChange( ()=>load(true) ) gui.add(volconfig, 'ymax', 1, y_length, 1 ).onFinishChange( ()=>load(true) ); @@ -90,19 +112,19 @@ function load(refresh){ scene.remove(mesh) } // create a new array to transform the array to 1D - let newData = new Float32Array(x_length*y_length*z_length) + let newData = new Float32Array(x_length*y_length*z_length); // loop through every data point in the array for(let z=volconfig.zmin; z Date: Fri, 14 Jun 2024 12:05:56 -0400 Subject: [PATCH 08/18] Add log to generate_report rule --- workflow/rules/ome_zarr.smk | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/workflow/rules/ome_zarr.smk b/workflow/rules/ome_zarr.smk index 3613c35..a5149d0 100644 --- a/workflow/rules/ome_zarr.smk +++ b/workflow/rules/ome_zarr.smk @@ -172,6 +172,14 @@ rule generate_report: ws_cmap=config['report']['whole_slice_viewer']['colour_map'], output: out="qc_viewer/{subject}-{sample}-{acq}/volumeViewer/volumeData.json", + log: bids( + root="logs", + datatype="generate_report", + subject="{subject}", + sample="{sample}", + acq="{acq}", + suffix="log.txt", + ), localrule: True script: "../scripts/create_viewer.py" From e2910297619fb4d1d655e776173a0e81996b9eef Mon Sep 17 00:00:00 2001 From: Benjamin Gros Date: Fri, 14 Jun 2024 12:12:46 -0400 Subject: [PATCH 09/18] fix parsing error to allow logging --- workflow/rules/ome_zarr.smk | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/workflow/rules/ome_zarr.smk b/workflow/rules/ome_zarr.smk index a5149d0..54f00e0 100644 --- a/workflow/rules/ome_zarr.smk +++ b/workflow/rules/ome_zarr.smk @@ -172,7 +172,8 @@ rule generate_report: ws_cmap=config['report']['whole_slice_viewer']['colour_map'], output: out="qc_viewer/{subject}-{sample}-{acq}/volumeViewer/volumeData.json", - log: bids( + log: + bids( root="logs", datatype="generate_report", subject="{subject}", From 5036d23c7db8324e3674e360e1cdeda9ba3e9d2a Mon Sep 17 00:00:00 2001 From: Benjamin Gros Date: Fri, 14 Jun 2024 13:04:23 -0400 Subject: [PATCH 10/18] Lint snakemake workflow for pull request --- workflow/rules/common.smk | 2 +- workflow/rules/flatfield_corr.smk | 4 +--- workflow/rules/import.smk | 3 +-- workflow/rules/ome_zarr.smk | 16 +++++++--------- 4 files changed, 10 insertions(+), 15 deletions(-) diff --git a/workflow/rules/common.smk b/workflow/rules/common.smk index e2ba251..1fc8680 100644 --- a/workflow/rules/common.smk +++ b/workflow/rules/common.smk @@ -42,7 +42,7 @@ def get_all_targets(): stain=get_stains_by_row(i), ) ) - if config['report']['create_report']: + if config["report"]["create_report"]: targets.extend( expand( "qc_viewer/{subject}-{sample}-{acq}/volumeViewer/volumeData.json", diff --git a/workflow/rules/flatfield_corr.smk b/workflow/rules/flatfield_corr.smk index a09ba73..d02c8e2 100644 --- a/workflow/rules/flatfield_corr.smk +++ b/workflow/rules/flatfield_corr.smk @@ -92,9 +92,7 @@ rule apply_basic_flatfield_corr: ) ) ), - backup_zarr=temp(directory( - "bids/{subject}-{sample}-{acq}-corr.zarr" - )), + backup_zarr=temp(directory("bids/{subject}-{sample}-{acq}-corr.zarr")), benchmark: bids( root="benchmarks", diff --git a/workflow/rules/import.smk b/workflow/rules/import.smk index 553e782..682e13b 100644 --- a/workflow/rules/import.smk +++ b/workflow/rules/import.smk @@ -146,8 +146,7 @@ rule tif_to_zarr: ) ) ), - backup_zarr=temp(directory("bids/{subject}-{sample}-{acq}-uncorr.zarr" - )), + backup_zarr=temp(directory("bids/{subject}-{sample}-{acq}-uncorr.zarr")), benchmark: bids( root="benchmarks", diff --git a/workflow/rules/ome_zarr.smk b/workflow/rules/ome_zarr.smk index 54f00e0..9fba09a 100644 --- a/workflow/rules/ome_zarr.smk +++ b/workflow/rules/ome_zarr.smk @@ -164,15 +164,15 @@ rule generate_report: corr="bids/{subject}-{sample}-{acq}-corr.zarr", ome="bids/sub-{subject}/micr/sub-{subject}_sample-{sample}_acq-{acq}_SPIM.ome.zarr", params: - ff_s_start=config['report']['flatfield_corrected']['slice_start'], - ws_s_start=config['report']['whole_slice_viewer']['slice_start'], - ff_s_step=config['report']['flatfield_corrected']['slice_step'], - ff_cmap=config['report']['flatfield_corrected']['colour_map'], - ws_s_step=config['report']['whole_slice_viewer']['slice_step'], - ws_cmap=config['report']['whole_slice_viewer']['colour_map'], + ff_s_start=config["report"]["flatfield_corrected"]["slice_start"], + ws_s_start=config["report"]["whole_slice_viewer"]["slice_start"], + ff_s_step=config["report"]["flatfield_corrected"]["slice_step"], + ff_cmap=config["report"]["flatfield_corrected"]["colour_map"], + ws_s_step=config["report"]["whole_slice_viewer"]["slice_step"], + ws_cmap=config["report"]["whole_slice_viewer"]["colour_map"], output: out="qc_viewer/{subject}-{sample}-{acq}/volumeViewer/volumeData.json", - log: + log: bids( root="logs", datatype="generate_report", @@ -184,5 +184,3 @@ rule generate_report: localrule: True script: "../scripts/create_viewer.py" - - From c7f67bbc639cfef652b1cb1d64cb75a3490b1310 Mon Sep 17 00:00:00 2001 From: Ali Khan Date: Sun, 16 Jun 2024 21:59:59 -0400 Subject: [PATCH 11/18] removes the use of backup files --- workflow/rules/flatfield_corr.smk | 1 - workflow/rules/import.smk | 1 - workflow/rules/ome_zarr.smk | 22 +++++++++++++++++++--- workflow/scripts/tif_to_zarr.py | 1 - 4 files changed, 19 insertions(+), 6 deletions(-) diff --git a/workflow/rules/flatfield_corr.smk b/workflow/rules/flatfield_corr.smk index d02c8e2..f91f207 100644 --- a/workflow/rules/flatfield_corr.smk +++ b/workflow/rules/flatfield_corr.smk @@ -92,7 +92,6 @@ rule apply_basic_flatfield_corr: ) ) ), - backup_zarr=temp(directory("bids/{subject}-{sample}-{acq}-corr.zarr")), benchmark: bids( root="benchmarks", diff --git a/workflow/rules/import.smk b/workflow/rules/import.smk index 682e13b..c8c1008 100644 --- a/workflow/rules/import.smk +++ b/workflow/rules/import.smk @@ -146,7 +146,6 @@ rule tif_to_zarr: ) ) ), - backup_zarr=temp(directory("bids/{subject}-{sample}-{acq}-uncorr.zarr")), benchmark: bids( root="benchmarks", diff --git a/workflow/rules/ome_zarr.smk b/workflow/rules/ome_zarr.smk index 9fba09a..12a1620 100644 --- a/workflow/rules/ome_zarr.smk +++ b/workflow/rules/ome_zarr.smk @@ -160,9 +160,25 @@ rule ome_zarr_to_nii: rule generate_report: """Generate QC reports for workflow""" input: - uncorr="bids/{subject}-{sample}-{acq}-uncorr.zarr", - corr="bids/{subject}-{sample}-{acq}-corr.zarr", - ome="bids/sub-{subject}/micr/sub-{subject}_sample-{sample}_acq-{acq}_SPIM.ome.zarr", + uncorr=bids( + root=work, + subject="{subject}", + datatype="micr", + sample="{sample}", + acq="{acq}", + desc="raw", + suffix="SPIM.zarr", + ), + corr=bids( + root=work, + subject="{subject}", + datatype="micr", + sample="{sample}", + acq="{acq}", + desc="flatcorr", + suffix="SPIM.zarr", + ), + ome=get_input_ome_zarr_to_nii() params: ff_s_start=config["report"]["flatfield_corrected"]["slice_start"], ws_s_start=config["report"]["whole_slice_viewer"]["slice_start"], diff --git a/workflow/scripts/tif_to_zarr.py b/workflow/scripts/tif_to_zarr.py index 49485f4..bfd4a86 100644 --- a/workflow/scripts/tif_to_zarr.py +++ b/workflow/scripts/tif_to_zarr.py @@ -65,6 +65,5 @@ def single_imread(*args): print('writing images to zarr with dask') with ProgressBar(): da.to_zarr(darr,snakemake.output.zarr,overwrite=True,dimension_separator='/') - da.to_zarr(darr,snakemake.output.backup_zarr,overwrite=True,dimension_separator='/') From e1f108f212b792bee93e8b18e2cbd1f2ace3dbf5 Mon Sep 17 00:00:00 2001 From: Ali Khan Date: Sun, 16 Jun 2024 22:00:30 -0400 Subject: [PATCH 12/18] updating to basicpy 1.2.0 requires this change (will make sure poetry deps in spimprep-deps ask for ^1.2.0 now) --- workflow/scripts/apply_basic_flatfield_corr_zarr.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/workflow/scripts/apply_basic_flatfield_corr_zarr.py b/workflow/scripts/apply_basic_flatfield_corr_zarr.py index f73abc6..61fbd63 100644 --- a/workflow/scripts/apply_basic_flatfield_corr_zarr.py +++ b/workflow/scripts/apply_basic_flatfield_corr_zarr.py @@ -26,9 +26,9 @@ with open(model_path / 'settings.json' ) as fp: model = json.load(fp) - profiles = np.load(model_path / 'profiles.npy') - model["flatfield"] = profiles[0] - model["darkfield"] = profiles[1] + profiles = np.load(model_path / 'profiles.npz') + model["flatfield"] = profiles['flatfield'] + model["darkfield"] = profiles['darkfield'] basic = BaSiC() basic.flatfield = resize(model['flatfield'],img_shape,preserve_range=True) @@ -51,4 +51,3 @@ def apply_basic_parallel(x): with ProgressBar(): da.to_zarr(arr_stacked,snakemake.output.zarr,overwrite=True,dimension_separator='/') - da.to_zarr(arr_stacked,snakemake.output.backup_zarr,overwrite=True,dimension_separator='/') From 6ed9e230bad56f840e9caab44e5845d90fa87a96 Mon Sep 17 00:00:00 2001 From: Ali Khan Date: Sun, 16 Jun 2024 23:03:30 -0400 Subject: [PATCH 13/18] made the flatfield qc as a separate rule as an example note: still need to put the css and js in place.. would be nice to have these embedded in the html, maybe that's easier if we use jinja templating? --- workflow/rules/qc.smk | 40 ++++++++ workflow/scripts/generate_flatfield_qc.py | 107 ++++++++++++++++++++++ 2 files changed, 147 insertions(+) create mode 100644 workflow/rules/qc.smk create mode 100644 workflow/scripts/generate_flatfield_qc.py diff --git a/workflow/rules/qc.smk b/workflow/rules/qc.smk new file mode 100644 index 0000000..cd0094e --- /dev/null +++ b/workflow/rules/qc.smk @@ -0,0 +1,40 @@ +rule generate_flatfield_qc: + input: + uncorr=bids( + root=work, + subject="{subject}", + datatype="micr", + sample="{sample}", + acq="{acq}", + desc="raw", + suffix="SPIM.zarr", + ), + corr=bids( + root=work, + subject="{subject}", + datatype="micr", + sample="{sample}", + acq="{acq}", + desc="flatcorr", + suffix="SPIM.zarr", + ), + params: + ff_s_start=config["report"]["flatfield_corrected"]["slice_start"], + ff_s_step=config["report"]["flatfield_corrected"]["slice_step"], + ff_cmap=config["report"]["flatfield_corrected"]["colour_map"], + output: + html='qc/sub-{subject}_sample-{sample}_acq-{acq}_flatfieldqc.html', + images_dir=directory('qc/images/sub-{subject}_sample-{sample}_acq-{acq}') + log: + bids( + root="logs", + datatype="generate_flatfield_qc", + subject="{subject}", + sample="{sample}", + acq="{acq}", + suffix="log.txt", + ), + script: + "../scripts/generate_flatfield_qc.py" + + diff --git a/workflow/scripts/generate_flatfield_qc.py b/workflow/scripts/generate_flatfield_qc.py new file mode 100644 index 0000000..41a8caf --- /dev/null +++ b/workflow/scripts/generate_flatfield_qc.py @@ -0,0 +1,107 @@ +import os +import math +import matplotlib.pyplot as plt +import numpy as np +from pathlib import Path +from ome_zarr.io import parse_url +from ome_zarr.reader import Reader + +# script for creating flatfield before/after qc snapshots and html + + +slice_start=snakemake.params.ff_s_start +slice_step=snakemake.params.ff_s_step +colour=snakemake.params.ff_cmap +ff_corr = snakemake.input.corr +ff_uncorr = snakemake.input.uncorr +out_html = snakemake.output.html +images_dir = snakemake.output.images_dir + +#create output dir for images +Path(images_dir).mkdir(parents=True,exist_ok=True) + + +# Get corrected and uncorrected file paths +corr_zarr = corrected_path +uncorr_zarr = uncorrected_path + +# Read the corrected and uncorrected ome-zarr data +proc_reader= Reader(parse_url(corr_zarr)) +unproc_reader = Reader(parse_url(uncorr_zarr)) + +proc_data=list(proc_reader())[0].data +unproc_data = list(unproc_reader())[0].data + +# Create html file for flatfield corrected images +with open(out_html, 'w') as fw: + fw.write(f''' + + +FlatField Correction Check + + + +Back + + + +

Before and After Flatfield Correction

+''') + + # Add images into the table for each chunk and channel + for chunk,(tile_corr, tile_uncorr) in enumerate(zip(proc_data[0], unproc_data[0])): + for chan_num, (channel_corr, channel_uncorr) in enumerate(zip(tile_corr, tile_uncorr)): + slice = slice_start + fw.write(f""" + + + """) + # Process every wanted slice within a chunk and channe; + while(slice + +

Corrected

+

Slice-{slice}

+ +
''') + # Increase by user given slice step + slice += slice_step + + fw.write(" ") + + fw.write(""" + + +
+

Chunk - {chunk} Channel - {chan_num}

+
+ +

Uncorrected

+

Slice-{slice}

+
+ + """) + + + From 65eef7ea1eaf4bf698fe8be81591acabd5f3d3d9 Mon Sep 17 00:00:00 2001 From: Benjamin Gros Date: Tue, 2 Jul 2024 09:09:52 -0400 Subject: [PATCH 14/18] Separate QC rules, move html into jinja templates and downsample the volume rendering json in the z-direction --- qc/resources/ff_html_temp.html | 120 +++++++++++ qc/resources/qc_report_temp.html | 6 + qc/resources/subject_html_temp.html | 13 ++ qc/resources/volViewer/cm_gray.png | Bin 0 -> 75 bytes qc/resources/volViewer/cm_viridis.png | Bin 0 -> 238 bytes qc/resources/volViewer/volRender.html | 22 ++ qc/resources/volViewer/volRenderScript.js | 210 ++++++++++++++++++++ qc/resources/ws_html_temp.html | 117 +++++++++++ workflow/rules/common.smk | 2 +- workflow/rules/import.smk | 1 + workflow/rules/ome_zarr.smk | 47 +---- workflow/rules/qc.smk | 73 ++++++- workflow/scripts/generate_flatfield_qc.py | 148 ++++++-------- workflow/scripts/generate_subject_qc.py | 48 +++++ workflow/scripts/generate_volume_qc.py | 38 ++++ workflow/scripts/generate_whole_slice_qc.py | 65 ++++++ 16 files changed, 774 insertions(+), 136 deletions(-) create mode 100644 qc/resources/ff_html_temp.html create mode 100644 qc/resources/qc_report_temp.html create mode 100644 qc/resources/subject_html_temp.html create mode 100644 qc/resources/volViewer/cm_gray.png create mode 100644 qc/resources/volViewer/cm_viridis.png create mode 100644 qc/resources/volViewer/volRender.html create mode 100644 qc/resources/volViewer/volRenderScript.js create mode 100644 qc/resources/ws_html_temp.html create mode 100644 workflow/scripts/generate_subject_qc.py create mode 100644 workflow/scripts/generate_volume_qc.py create mode 100644 workflow/scripts/generate_whole_slice_qc.py diff --git a/qc/resources/ff_html_temp.html b/qc/resources/ff_html_temp.html new file mode 100644 index 0000000..658b26d --- /dev/null +++ b/qc/resources/ff_html_temp.html @@ -0,0 +1,120 @@ + + + FlatField Correction Check + + + + Back + + + + {%- for chunk in chunks %} + {%- set chunk_num = loop.index0 %} + {%- for channel in chunk %} + + + + + {%- for image in channel %} + + + {%- endfor %} + + {%- endfor %} + {%- endfor %} + + + \ No newline at end of file diff --git a/qc/resources/qc_report_temp.html b/qc/resources/qc_report_temp.html new file mode 100644 index 0000000..689e7c5 --- /dev/null +++ b/qc/resources/qc_report_temp.html @@ -0,0 +1,6 @@ + + + + QC Reports + + \ No newline at end of file diff --git a/qc/resources/subject_html_temp.html b/qc/resources/subject_html_temp.html new file mode 100644 index 0000000..76c9894 --- /dev/null +++ b/qc/resources/subject_html_temp.html @@ -0,0 +1,13 @@ + + + + {{ subject }} - {{ sample }} - {{ acq }} + + + Back
+ Flatfield Correction QC
+ Whole Slice QC +
+ Volume Rendered Image + + \ No newline at end of file diff --git a/qc/resources/volViewer/cm_gray.png b/qc/resources/volViewer/cm_gray.png new file mode 100644 index 0000000000000000000000000000000000000000..8ac33886701247b22d8bf45c08e531336351ec4c GIT binary patch literal 75 zcmeAS@N?(olHy`uVBq!ia0y~yU<5K57&(}LLPCNJ>*9nZ aObm4gnf{A0MQ#AfGI+ZBxvXEp} z{b|~AuoC)9WX2AAba=u`zD(}kv-7+k>&KLT%17MLPtUXPUOC@c5qp09FoL+CJ@5S^ z`-+)z9WsICV^6by1s{1cJB~VI?7F}X6UL{^Tp6=gyKqECIcBpM)305|y}V(+uvR*B o*#)6X0)-{P!f0WzOt47IA706c4o3)EGynhq07*qoM6N<$f(1=v^8f$< literal 0 HcmV?d00001 diff --git a/qc/resources/volViewer/volRender.html b/qc/resources/volViewer/volRender.html new file mode 100644 index 0000000..1976f66 --- /dev/null +++ b/qc/resources/volViewer/volRender.html @@ -0,0 +1,22 @@ + + + + 3D Brain + + + + + + + Back +
+ + + \ No newline at end of file diff --git a/qc/resources/volViewer/volRenderScript.js b/qc/resources/volViewer/volRenderScript.js new file mode 100644 index 0000000..6410902 --- /dev/null +++ b/qc/resources/volViewer/volRenderScript.js @@ -0,0 +1,210 @@ +import * as THREE from 'three'; + +import { GUI } from 'three/addons/libs/lil-gui.module.min.js'; +import { OrbitControls } from 'three/addons/controls/OrbitControls.js'; +import { VolumeRenderShader1 } from 'three/addons/shaders/VolumeShader.js'; + +/* +Script is a reworked version of a threejs example +refined for array data stored in json format + +Copyright © 2010-2024 three.js authors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE +*/ + +let renderer, + scene, + camera, + controls, + material, + volconfig, + cmtextures, + gui; + +let mesh; + +init(); + +function init() { + + scene = new THREE.Scene(); + // Create renderer + renderer = new THREE.WebGLRenderer(); + renderer.setPixelRatio( window.devicePixelRatio ); + renderer.setSize( window.innerWidth, window.innerHeight ); + document.body.appendChild( renderer.domElement ); + + // Create camera (The volume renderer does not work very well with perspective yet) + const h = 512; // frustum height + const aspect = window.innerWidth / window.innerHeight; + camera = new THREE.OrthographicCamera( - h * aspect / 2, h * aspect / 2, h / 2, - h / 2, 1, 1000 ); + camera.position.set( - 64, - 64, 128 ); + camera.up.set( 0, 0, 1 ); // In our data, z is up + + // Create controls + controls = new OrbitControls( camera, renderer.domElement ); + controls.addEventListener( 'change', render ); + controls.target.set( 64, 64, 128 ); + controls.minZoom = 0.5; + controls.maxZoom = 4; + controls.enablePan = false; + controls.update(); + + + + // The gui for interaction + volconfig = {channel: 0, cmax:30000, cmin: 500, clim1: 0, clim2: 1, renderstyle: 'iso', isothreshold: 0.05, colormap: 'viridis', channel: 0, zmax: 0,zmin: 0, ymax: 0, ymin: 0, xmax:0, xmin: 0 }; + gui = new GUI(); + gui.add( volconfig, 'clim1', 0, 1, 0.01 ).onChange(); + gui.add( volconfig, 'clim2', 0, 1, 0.01 ).onChange( updateUniforms ); + gui.add( volconfig, 'colormap', { gray: 'gray', viridis: 'viridis' } ).onChange( updateUniforms ); + gui.add( volconfig, 'renderstyle', { mip: 'mip', iso: 'iso' } ).onChange( updateUniforms ); + gui.add( volconfig, 'isothreshold', 0, 1, 0.01 ).onChange( updateUniforms ); + + // Load the 4D array from the json file produced + load(false) + + window.addEventListener( 'resize', onWindowResize ); + +} + +function load(refresh){ + new THREE.FileLoader().load( './volume_resources/volumeData.json', function ( volume ) { + volume = JSON.parse(volume)[volconfig.channel] + // get the length for each axes x,y,z; will only process one channel + let z_length = volume.length; + let y_length = volume[0].length; + let x_length = volume[0][0].length; + if(!refresh){ + volconfig.zmax = z_length; + volconfig.ymax = y_length; + volconfig.xmax = x_length; + gui.add(volconfig, 'channel', 0, 1, 1).onFinishChange(()=>load(true)); + gui.add(volconfig, 'cmax', 0, 30000, 10).onFinishChange(()=>load(true)); + gui.add(volconfig, 'cmin', 0, 30000, 10).onFinishChange(()=>load(true)); + gui.add(volconfig, 'zmax', 1, z_length, 1 ).onFinishChange( ()=>load(true) ); + gui.add(volconfig, 'zmin', 0, z_length, 1 ).onFinishChange( ()=>load(true) ) + gui.add(volconfig, 'ymax', 1, y_length, 1 ).onFinishChange( ()=>load(true) ); + gui.add(volconfig, 'ymin', 0, y_length, 1 ).onFinishChange( ()=>load(true) ); + gui.add(volconfig, 'xmax', 1, x_length, 1).onFinishChange(()=>load(true)); + gui.add(volconfig, 'xmin', 0, x_length, 1 ).onFinishChange( ()=>load(true) ); + } else { + scene.remove(mesh) + } + // create a new array to transform the array to 1D + let newData = new Float32Array(x_length*y_length*z_length); + + // loop through every data point in the array + for(let z=volconfig.zmin; z + + + Whole Slice Viewer + + + + Back + +
+

Chunk - {{ chunk_num }} Channel - {{ loop.index0 }}

+
+ +

Corrected

+

Slice-{{ image.slice }}

+
+ +

Uncorrected

+

Slice-{{ image.slice }}

+
+ + {%- for channel in channels %} + + + + {%- for slices in channel %} + + {%- for slice in slices %} + + {%- endfor %} + + {%- endfor %} + {%- endfor %} + +
+

Channel {{ loop.index0 }}

+
+ +

slice - {{ slice.slice }}

+
+ + + + \ No newline at end of file diff --git a/workflow/rules/common.smk b/workflow/rules/common.smk index 1fc8680..2d7617a 100644 --- a/workflow/rules/common.smk +++ b/workflow/rules/common.smk @@ -45,7 +45,7 @@ def get_all_targets(): if config["report"]["create_report"]: targets.extend( expand( - "qc_viewer/{subject}-{sample}-{acq}/volumeViewer/volumeData.json", + "qc/sub-{subject}_sample-{sample}_acq-{acq}/subject.html", subject=datasets.loc[i, "subject"], sample=datasets.loc[i, "sample"], acq=datasets.loc[i, "acq"], diff --git a/workflow/rules/import.smk b/workflow/rules/import.smk index c8c1008..7e2e2b0 100644 --- a/workflow/rules/import.smk +++ b/workflow/rules/import.smk @@ -171,3 +171,4 @@ rule tif_to_zarr: config["containers"]["spimprep"] script: "../scripts/tif_to_zarr.py" + diff --git a/workflow/rules/ome_zarr.smk b/workflow/rules/ome_zarr.smk index 12a1620..95caff2 100644 --- a/workflow/rules/ome_zarr.smk +++ b/workflow/rules/ome_zarr.smk @@ -154,49 +154,4 @@ rule ome_zarr_to_nii: container: config["containers"]["spimprep"] script: - "../scripts/ome_zarr_to_nii.py" - - -rule generate_report: - """Generate QC reports for workflow""" - input: - uncorr=bids( - root=work, - subject="{subject}", - datatype="micr", - sample="{sample}", - acq="{acq}", - desc="raw", - suffix="SPIM.zarr", - ), - corr=bids( - root=work, - subject="{subject}", - datatype="micr", - sample="{sample}", - acq="{acq}", - desc="flatcorr", - suffix="SPIM.zarr", - ), - ome=get_input_ome_zarr_to_nii() - params: - ff_s_start=config["report"]["flatfield_corrected"]["slice_start"], - ws_s_start=config["report"]["whole_slice_viewer"]["slice_start"], - ff_s_step=config["report"]["flatfield_corrected"]["slice_step"], - ff_cmap=config["report"]["flatfield_corrected"]["colour_map"], - ws_s_step=config["report"]["whole_slice_viewer"]["slice_step"], - ws_cmap=config["report"]["whole_slice_viewer"]["colour_map"], - output: - out="qc_viewer/{subject}-{sample}-{acq}/volumeViewer/volumeData.json", - log: - bids( - root="logs", - datatype="generate_report", - subject="{subject}", - sample="{sample}", - acq="{acq}", - suffix="log.txt", - ), - localrule: True - script: - "../scripts/create_viewer.py" + "../scripts/ome_zarr_to_nii.py" \ No newline at end of file diff --git a/workflow/rules/qc.smk b/workflow/rules/qc.smk index cd0094e..bc81fa4 100644 --- a/workflow/rules/qc.smk +++ b/workflow/rules/qc.smk @@ -1,4 +1,5 @@ rule generate_flatfield_qc: +"Generates an html file for comparing before and after flatfield correction" input: uncorr=bids( root=work, @@ -23,8 +24,9 @@ rule generate_flatfield_qc: ff_s_step=config["report"]["flatfield_corrected"]["slice_step"], ff_cmap=config["report"]["flatfield_corrected"]["colour_map"], output: - html='qc/sub-{subject}_sample-{sample}_acq-{acq}_flatfieldqc.html', - images_dir=directory('qc/images/sub-{subject}_sample-{sample}_acq-{acq}') + html='qc/sub-{subject}_sample-{sample}_acq-{acq}/flatfieldqc.html', + corr_images_dir=directory('qc/sub-{subject}_sample-{sample}_acq-{acq}/images/corr'), + uncorr_images_dir=directory('qc/sub-{subject}_sample-{sample}_acq-{acq}/images/uncorr'), log: bids( root="logs", @@ -37,4 +39,71 @@ rule generate_flatfield_qc: script: "../scripts/generate_flatfield_qc.py" +rule generate_whole_slice_qc: +"Generates an html file to view whole slices from preprocessed images" + input: + ome=get_input_ome_zarr_to_nii(), + params: + ws_s_start=config["report"]["whole_slice_viewer"]["slice_start"], + ws_s_step=config["report"]["whole_slice_viewer"]["slice_step"], + ws_cmap=config["report"]["whole_slice_viewer"]["colour_map"], + output: + html='qc/sub-{subject}_sample-{sample}_acq-{acq}/whole_slice_qc.html', + images_dir=directory('qc/sub-{subject}_sample-{sample}_acq-{acq}/images/whole'), + log: + bids( + root="logs", + datatype="generate_whole_slice_qc", + subject="{subject}", + sample="{sample}", + acq="{acq}", + suffix="log.txt", + ), + script: + "../scripts/generate_whole_slice_qc.py" + + +rule generate_volume_qc: +"Generates an html file to view the volume rendered image" + input: + ome=get_input_ome_zarr_to_nii(), + output: + resources=directory("qc/sub-{subject}_sample-{sample}_acq-{acq}/volume_resources"), + html="qc/sub-{subject}_sample-{sample}_acq-{acq}/volume_qc.html" + log: + bids( + root="logs", + datatype="generate_volume_qc", + subject="{subject}", + sample="{sample}", + acq="{acq}", + suffix="log.txt", + ), + script: + "../scripts/generate_volume_qc.py" + + +rule generate_subject_qc: +"Generates html files to access all the subjects qc reports in one place" + input: + ws_html="qc/sub-{subject}_sample-{sample}_acq-{acq}/whole_slice_qc.html", + ff_html="qc/sub-{subject}_sample-{sample}_acq-{acq}/flatfieldqc.html", + vol_html="qc/sub-{subject}_sample-{sample}_acq-{acq}/volume_qc.html", + output: + sub_html="qc/sub-{subject}_sample-{sample}_acq-{acq}/subject.html", + log: + bids( + root="logs", + datatype="generate_subject_qc", + subject="{subject}", + sample="{sample}", + acq="{acq}", + suffix="log.txt", + ), + script: + "../scripts/generate_subject_qc.py" + + + + diff --git a/workflow/scripts/generate_flatfield_qc.py b/workflow/scripts/generate_flatfield_qc.py index 41a8caf..d1a6200 100644 --- a/workflow/scripts/generate_flatfield_qc.py +++ b/workflow/scripts/generate_flatfield_qc.py @@ -1,107 +1,81 @@ +from jinja2 import Environment, FileSystemLoader import os -import math -import matplotlib.pyplot as plt -import numpy as np -from pathlib import Path from ome_zarr.io import parse_url from ome_zarr.reader import Reader +import matplotlib.pyplot as plt +import numpy as np +import math -# script for creating flatfield before/after qc snapshots and html +# load the html template from jinja +file_loader = FileSystemLoader(".") +env = Environment(loader=file_loader) +template = env.get_template("qc/resources/ff_html_temp.html") +# User set configurations +ff_s_start=snakemake.params.ff_s_start +ff_s_step=snakemake.params.ff_s_step +ff_cmap=snakemake.params.ff_cmap -slice_start=snakemake.params.ff_s_start -slice_step=snakemake.params.ff_s_step -colour=snakemake.params.ff_cmap +# input zarr files ff_corr = snakemake.input.corr ff_uncorr = snakemake.input.uncorr + +# output files out_html = snakemake.output.html -images_dir = snakemake.output.images_dir +corr_image_dir = snakemake.output.corr_images_dir +uncorr_image_dir = snakemake.output.uncorr_images_dir -#create output dir for images -Path(images_dir).mkdir(parents=True,exist_ok=True) +# Read the corrected and uncorrected ome-zarr data +proc_reader= Reader(parse_url(ff_corr)) +unproc_reader = Reader(parse_url(ff_uncorr)) +proc_data=list(proc_reader())[0].data +unproc_data = list(unproc_reader())[0].data +# create directories for corrected and uncorrected images +os.makedirs(corr_image_dir, exist_ok=True) +os.mkdir(uncorr_image_dir) -# Get corrected and uncorrected file paths -corr_zarr = corrected_path -uncorr_zarr = uncorrected_path -# Read the corrected and uncorrected ome-zarr data -proc_reader= Reader(parse_url(corr_zarr)) -unproc_reader = Reader(parse_url(uncorr_zarr)) +chunks = [] -proc_data=list(proc_reader())[0].data -unproc_data = list(unproc_reader())[0].data +# Create a list to store slices ordered by channel and tile +for chunk,(tile_corr, tile_uncorr) in enumerate(zip(proc_data[0], unproc_data[0])): + channels = [] + for chan_num, (channel_corr, channel_uncorr) in enumerate(zip(tile_corr, tile_uncorr)): + slice = ff_s_start + slices = [] + while(slice - -FlatField Correction Check - - - -Back - - - -

Before and After Flatfield Correction

-''') - - # Add images into the table for each chunk and channel - for chunk,(tile_corr, tile_uncorr) in enumerate(zip(proc_data[0], unproc_data[0])): - for chan_num, (channel_corr, channel_uncorr) in enumerate(zip(tile_corr, tile_uncorr)): - slice = slice_start - fw.write(f""" - - - """) - # Process every wanted slice within a chunk and channe; - while(slice - -

Corrected

-

Slice-{slice}

- -
''') - # Increase by user given slice step - slice += slice_step + # Save images + plt.imsave(corr_image_dir+"/"+corr_image_name, clipped_data_corr, cmap=ff_cmap) + plt.imsave(uncorr_image_dir+"/"+uncorr_image_name, clipped_data_uncorr, cmap=ff_cmap) - fw.write(" ") + # create an object to store key image info + image = {"slice": slice, "img_corr": corrected_img_path, "img_uncorr": uncorrected_img_path} + slices.append(image) + slice+=ff_s_step + channels.append(slices) + chunks.append(channels) - fw.write(""" - - -
-

Chunk - {chunk} Channel - {chan_num}

-
- -

Uncorrected

-

Slice-{slice}

-
- - """) +# pass the chunks array to the template to render the html +output = template.render(chunks=chunks, numColumns=3) +# Write out html file +with open(out_html, 'w') as f: + f.write(output) diff --git a/workflow/scripts/generate_subject_qc.py b/workflow/scripts/generate_subject_qc.py new file mode 100644 index 0000000..b7f7493 --- /dev/null +++ b/workflow/scripts/generate_subject_qc.py @@ -0,0 +1,48 @@ +from jinja2 import Environment, FileSystemLoader +import os.path as path + +# load jinja template +file_loader = FileSystemLoader(".") +env = Environment(loader=file_loader) +template = env.get_template("qc/resources/subject_html_temp.html") + +# input html files +ws_html = snakemake.input.ws_html +ff_html = snakemake.input.ff_html +vol_html = snakemake.input.vol_html + +# output html files +sub_html = snakemake.output.sub_html +total_html = "qc/qc_report.html" + +# Wildcards +subject = snakemake.wildcards.subject +sample = snakemake.wildcards.sample +acq = snakemake.wildcards.acq + +# Get relative path to the subjects QC htmls +ws_rel_path = path.relpath(path.dirname(sub_html), path.dirname(ws_html))+"/"+path.basename(ws_html) +ff_rel_path = path.relpath(path.dirname(sub_html), path.dirname(ff_html))+"/"+path.basename(ff_html) +vol_rel_path = path.relpath(path.dirname(sub_html), path.dirname(vol_html))+ "/" +path.basename(vol_html) + +# Fill in jinja template for subject html and write it out +output = template.render(back_link="../qc_report.html",subject=subject,sample=sample,acq=acq, + ffhtml=ff_rel_path,wshtml=ws_rel_path, volhtml=vol_rel_path) +with open(sub_html, 'w') as f: + f.write(output) + +# Create line to add link to subject into final qc report combining all subjects +sub_link = f'\n\t\t{subject}-{sample}-{acq}
' + +# if not first sample just add the one link +if(path.exists(total_html)): + with open(total_html,'a') as f: + f.write(sub_link) + +# if it is the first sample write out the template +else: + template = env.get_template("qc/resources/qc_report_temp.html") + output = template.render() + output+=sub_link + with open(total_html, 'w') as f: + f.write(output) diff --git a/workflow/scripts/generate_volume_qc.py b/workflow/scripts/generate_volume_qc.py new file mode 100644 index 0000000..7cb3bf5 --- /dev/null +++ b/workflow/scripts/generate_volume_qc.py @@ -0,0 +1,38 @@ +import json +import shutil +from pathlib import Path +from distutils.dir_util import copy_tree +from zarrnii import ZarrNii +import math + +# directory containing the volume rendering files +resource_dir = Path(snakemake.output.resources) +# where html file should be written +html_dest = snakemake.output.html + +# inputted ome-zarr path +ome_data = snakemake.input.ome + +# move volume renderer into the subjects directory +copy_tree("qc/resources/volViewer", str(resource_dir)) +shutil.move(resource_dir / "volRender.html", html_dest) + +# Get most downsampled ome-zarr image +ds_z = ZarrNii.from_path(ome_data,level=5, channels=[0,1]) +z_length = ds_z.darr.shape[1] + +# downsample it so it has at most 100 slices and ast least 50 slices in z-direction +if(z_length>50): + downsample_factor = math.floor(z_length/50) +else: + downsample_factor = 1 +ds_z.downsample(along_z=downsample_factor) + +# Write it to a JSON for js script to read +with open(resource_dir / "volumeData.json", 'w') as f: + json_data = json.dumps(ds_z.darr.compute().tolist()) + f.write(json_data) + + + + diff --git a/workflow/scripts/generate_whole_slice_qc.py b/workflow/scripts/generate_whole_slice_qc.py new file mode 100644 index 0000000..5967b98 --- /dev/null +++ b/workflow/scripts/generate_whole_slice_qc.py @@ -0,0 +1,65 @@ +import os +import math +from ome_zarr.io import parse_url +from ome_zarr.reader import Reader +import matplotlib.pyplot as plt +import numpy as np +from jinja2 import Environment, FileSystemLoader + +# load jinja html template +file_loader = FileSystemLoader(".") +env = Environment(loader=file_loader) +template = env.get_template("qc/resources/ws_html_temp.html") + +# user set configurations +ws_s_step=snakemake.params.ws_s_step +ws_s_start=snakemake.params.ws_s_start +ws_cmap=snakemake.params.ws_cmap + +# input ome-zarr file +ome= snakemake.input.ome + +# output paths +image_dir = snakemake.output.images_dir +out_html = snakemake.output.html + +# read ome-zarr data and convert to list +proc_reader= Reader(parse_url(ome)) +proc_data=list(proc_reader())[0].data + +os.makedirs(image_dir, exist_ok=True) + +channels = [] +# for each channel add the images of the most downsampled data +for chan_num, channel in enumerate(proc_data[-1]): + slice = ws_s_start + chan = [] + slices = [] + while(slice=len(channel): + chan.append(slices) + # full list containing all slices ordered by channel + channels.append(chan) + +# render the template and write out file +output = template.render(channels = channels) +with open(out_html, "w") as f: + f.write(output) From 40b1919f1e564142261064f0970884906e510b95 Mon Sep 17 00:00:00 2001 From: Benjamin Gros Date: Tue, 2 Jul 2024 12:13:39 -0400 Subject: [PATCH 15/18] Make linting changes --- workflow/Snakefile | 1 + workflow/rules/import.smk | 1 - workflow/rules/ome_zarr.smk | 2 +- workflow/rules/qc.smk | 66 +++++++++++++------------- workflow/scripts/generate_volume_qc.py | 2 +- 5 files changed, 37 insertions(+), 35 deletions(-) diff --git a/workflow/Snakefile b/workflow/Snakefile index c6dbd76..56d7627 100644 --- a/workflow/Snakefile +++ b/workflow/Snakefile @@ -40,3 +40,4 @@ include: "rules/flatfield_corr.smk" include: "rules/bigstitcher.smk" include: "rules/ome_zarr.smk" include: "rules/bids.smk" +include: "rules/qc.smk" diff --git a/workflow/rules/import.smk b/workflow/rules/import.smk index 7e2e2b0..c8c1008 100644 --- a/workflow/rules/import.smk +++ b/workflow/rules/import.smk @@ -171,4 +171,3 @@ rule tif_to_zarr: config["containers"]["spimprep"] script: "../scripts/tif_to_zarr.py" - diff --git a/workflow/rules/ome_zarr.smk b/workflow/rules/ome_zarr.smk index 95caff2..f33fc9c 100644 --- a/workflow/rules/ome_zarr.smk +++ b/workflow/rules/ome_zarr.smk @@ -154,4 +154,4 @@ rule ome_zarr_to_nii: container: config["containers"]["spimprep"] script: - "../scripts/ome_zarr_to_nii.py" \ No newline at end of file + "../scripts/ome_zarr_to_nii.py" diff --git a/workflow/rules/qc.smk b/workflow/rules/qc.smk index bc81fa4..9e0c892 100644 --- a/workflow/rules/qc.smk +++ b/workflow/rules/qc.smk @@ -1,32 +1,36 @@ rule generate_flatfield_qc: -"Generates an html file for comparing before and after flatfield correction" + "Generates an html file for comparing before and after flatfield correction" input: uncorr=bids( - root=work, - subject="{subject}", - datatype="micr", - sample="{sample}", - acq="{acq}", - desc="raw", - suffix="SPIM.zarr", - ), + root=work, + subject="{subject}", + datatype="micr", + sample="{sample}", + acq="{acq}", + desc="raw", + suffix="SPIM.zarr", + ), corr=bids( - root=work, - subject="{subject}", - datatype="micr", - sample="{sample}", - acq="{acq}", - desc="flatcorr", - suffix="SPIM.zarr", - ), + root=work, + subject="{subject}", + datatype="micr", + sample="{sample}", + acq="{acq}", + desc="flatcorr", + suffix="SPIM.zarr", + ), params: ff_s_start=config["report"]["flatfield_corrected"]["slice_start"], ff_s_step=config["report"]["flatfield_corrected"]["slice_step"], ff_cmap=config["report"]["flatfield_corrected"]["colour_map"], output: - html='qc/sub-{subject}_sample-{sample}_acq-{acq}/flatfieldqc.html', - corr_images_dir=directory('qc/sub-{subject}_sample-{sample}_acq-{acq}/images/corr'), - uncorr_images_dir=directory('qc/sub-{subject}_sample-{sample}_acq-{acq}/images/uncorr'), + html="qc/sub-{subject}_sample-{sample}_acq-{acq}/flatfieldqc.html", + corr_images_dir=directory( + "qc/sub-{subject}_sample-{sample}_acq-{acq}/images/corr" + ), + uncorr_images_dir=directory( + "qc/sub-{subject}_sample-{sample}_acq-{acq}/images/uncorr" + ), log: bids( root="logs", @@ -39,8 +43,9 @@ rule generate_flatfield_qc: script: "../scripts/generate_flatfield_qc.py" + rule generate_whole_slice_qc: -"Generates an html file to view whole slices from preprocessed images" + "Generates an html file to view whole slices from preprocessed images" input: ome=get_input_ome_zarr_to_nii(), params: @@ -48,8 +53,8 @@ rule generate_whole_slice_qc: ws_s_step=config["report"]["whole_slice_viewer"]["slice_step"], ws_cmap=config["report"]["whole_slice_viewer"]["colour_map"], output: - html='qc/sub-{subject}_sample-{sample}_acq-{acq}/whole_slice_qc.html', - images_dir=directory('qc/sub-{subject}_sample-{sample}_acq-{acq}/images/whole'), + html="qc/sub-{subject}_sample-{sample}_acq-{acq}/whole_slice_qc.html", + images_dir=directory("qc/sub-{subject}_sample-{sample}_acq-{acq}/images/whole"), log: bids( root="logs", @@ -64,12 +69,14 @@ rule generate_whole_slice_qc: rule generate_volume_qc: -"Generates an html file to view the volume rendered image" + "Generates an html file to view the volume rendered image" input: ome=get_input_ome_zarr_to_nii(), output: - resources=directory("qc/sub-{subject}_sample-{sample}_acq-{acq}/volume_resources"), - html="qc/sub-{subject}_sample-{sample}_acq-{acq}/volume_qc.html" + resources=directory( + "qc/sub-{subject}_sample-{sample}_acq-{acq}/volume_resources" + ), + html="qc/sub-{subject}_sample-{sample}_acq-{acq}/volume_qc.html", log: bids( root="logs", @@ -84,7 +91,7 @@ rule generate_volume_qc: rule generate_subject_qc: -"Generates html files to access all the subjects qc reports in one place" + "Generates html files to access all the subjects qc reports in one place" input: ws_html="qc/sub-{subject}_sample-{sample}_acq-{acq}/whole_slice_qc.html", ff_html="qc/sub-{subject}_sample-{sample}_acq-{acq}/flatfieldqc.html", @@ -102,8 +109,3 @@ rule generate_subject_qc: ), script: "../scripts/generate_subject_qc.py" - - - - - diff --git a/workflow/scripts/generate_volume_qc.py b/workflow/scripts/generate_volume_qc.py index 7cb3bf5..5e0c69a 100644 --- a/workflow/scripts/generate_volume_qc.py +++ b/workflow/scripts/generate_volume_qc.py @@ -26,7 +26,7 @@ downsample_factor = math.floor(z_length/50) else: downsample_factor = 1 -ds_z.downsample(along_z=downsample_factor) +ds_z = ds_z.downsample(along_z=downsample_factor) # Write it to a JSON for js script to read with open(resource_dir / "volumeData.json", 'w') as f: From 2e40ea2b07d51137a7c439027ff9fe77b50b54c0 Mon Sep 17 00:00:00 2001 From: Benjamin Gros Date: Thu, 11 Jul 2024 10:56:32 -0400 Subject: [PATCH 16/18] Remove old qc_viewer --- qc_viewer/README.md | 14 -- .../resources/sliceViewer/image_expand.js | 58 ----- qc_viewer/resources/sliceViewer/style.css | 19 -- qc_viewer/resources/volumeViewer/cm_gray.png | Bin 75 -> 0 bytes .../resources/volumeViewer/cm_viridis.png | Bin 238 -> 0 bytes .../resources/volumeViewer/volRender.html | 22 -- .../resources/volumeViewer/volRenderScript.js | 213 ------------------ 7 files changed, 326 deletions(-) delete mode 100644 qc_viewer/README.md delete mode 100644 qc_viewer/resources/sliceViewer/image_expand.js delete mode 100644 qc_viewer/resources/sliceViewer/style.css delete mode 100644 qc_viewer/resources/volumeViewer/cm_gray.png delete mode 100644 qc_viewer/resources/volumeViewer/cm_viridis.png delete mode 100644 qc_viewer/resources/volumeViewer/volRender.html delete mode 100644 qc_viewer/resources/volumeViewer/volRenderScript.js diff --git a/qc_viewer/README.md b/qc_viewer/README.md deleted file mode 100644 index f584224..0000000 --- a/qc_viewer/README.md +++ /dev/null @@ -1,14 +0,0 @@ -# QC Reports can be found here - -## To view move into this diretory - -```bash -cd qc_viewer -``` -## Then run the following command to open in the browser - -```bash -python -m http.server -``` - -## Open the link and navigate the links to view the reports for each subject \ No newline at end of file diff --git a/qc_viewer/resources/sliceViewer/image_expand.js b/qc_viewer/resources/sliceViewer/image_expand.js deleted file mode 100644 index 7453926..0000000 --- a/qc_viewer/resources/sliceViewer/image_expand.js +++ /dev/null @@ -1,58 +0,0 @@ -// get every image, expandButtonToggle and the expand factor -const images = document.querySelectorAll('img'); -const expandButton = document.getElementById("expand") -const expand_factor = document.getElementById("expand_scale") -let expansion_scale = 1 -let expanded = false; - -// create a function to expand images when clicked on -function handleImageClick(event){ - // get target image and the scaling factor - const image = event.target - expansion_scale = expand_factor.value; - - // expand image by the scaling factor if not already expanded - if(!expanded){ - image.style.transform = `scale(${expansion_scale})` - image.style.zIndex = 1 - - // if it is going to expand off screen shift to the right - const leftDistance = image.getBoundingClientRect().left; - if(leftDistance < 0){ - image.style.transform = `translateX(${Math.abs(leftDistance)+10}px) scale(${expansion_scale})`; - } - - expanded=true - - } else { - // scale images back to original size - image.style.transform = "scale(1)" - image.style.position = 'relative' - image.style.zIndex=0 - expanded=false - } -} - -// Enables or disables the ability to expand images on click -expandButton.addEventListener('change', ()=>{ - - // add listener to enable expansion - if(expandButton.checked){ - images.forEach(image => { - image.addEventListener('click', handleImageClick) - }) - - // ensure images expand properly - expand_factor.style.display='inline'; - - } else { - - //remove listener to disable expansion - images.forEach(image => { - image.removeEventListener('click', handleImageClick) - image.style.transform = 'scale(1)' - }) - - expand_factor.style.display = 'none' - } -}) diff --git a/qc_viewer/resources/sliceViewer/style.css b/qc_viewer/resources/sliceViewer/style.css deleted file mode 100644 index f12e3d6..0000000 --- a/qc_viewer/resources/sliceViewer/style.css +++ /dev/null @@ -1,19 +0,0 @@ -img { - height: auto; - width:200px; - z-index: 0; - position: relative; -} - -table { - margin-left: auto; - margin-right: auto; -} - -.expand-options { - float: right; -} - -#expand_scale { - display: none; -} \ No newline at end of file diff --git a/qc_viewer/resources/volumeViewer/cm_gray.png b/qc_viewer/resources/volumeViewer/cm_gray.png deleted file mode 100644 index 8ac33886701247b22d8bf45c08e531336351ec4c..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 75 zcmeAS@N?(olHy`uVBq!ia0y~yU<5K57&(}LLPCNJ>*9nZ aObm4gnf{A0MQ#AfGI+ZBxvXEp} z{b|~AuoC)9WX2AAba=u`zD(}kv-7+k>&KLT%17MLPtUXPUOC@c5qp09FoL+CJ@5S^ z`-+)z9WsICV^6by1s{1cJB~VI?7F}X6UL{^Tp6=gyKqECIcBpM)305|y}V(+uvR*B o*#)6X0)-{P!f0WzOt47IA706c4o3)EGynhq07*qoM6N<$f(1=v^8f$< diff --git a/qc_viewer/resources/volumeViewer/volRender.html b/qc_viewer/resources/volumeViewer/volRender.html deleted file mode 100644 index 40fc65a..0000000 --- a/qc_viewer/resources/volumeViewer/volRender.html +++ /dev/null @@ -1,22 +0,0 @@ - - - - 3D Brain - - - - - - - Back -
- - - \ No newline at end of file diff --git a/qc_viewer/resources/volumeViewer/volRenderScript.js b/qc_viewer/resources/volumeViewer/volRenderScript.js deleted file mode 100644 index ed3ea39..0000000 --- a/qc_viewer/resources/volumeViewer/volRenderScript.js +++ /dev/null @@ -1,213 +0,0 @@ -import * as THREE from 'three'; - -import { GUI } from 'three/addons/libs/lil-gui.module.min.js'; -import { OrbitControls } from 'three/addons/controls/OrbitControls.js'; -import { VolumeRenderShader1 } from 'three/addons/shaders/VolumeShader.js'; -import { VRButton } from 'three/addons/webxr/VRButton.js'; - -/* -Script is a reworked version of a threejs example -refined for array data stored in json format - -Copyright © 2010-2024 three.js authors - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE -*/ - -let renderer, - scene, - camera, - controls, - material, - volconfig, - cmtextures, - gui; - -let mesh; - -init(); - -function init() { - - scene = new THREE.Scene(); - // Create renderer - renderer = new THREE.WebGLRenderer(); - renderer.setPixelRatio( window.devicePixelRatio ); - renderer.setSize( window.innerWidth, window.innerHeight ); - renderer.xr.enabled = true; - document.body.appendChild( renderer.domElement ); - document.body.appendChild(VRButton.createButton(renderer)) - - // Create camera (The volume renderer does not work very well with perspective yet) - const h = 512; // frustum height - const aspect = window.innerWidth / window.innerHeight; - camera = new THREE.OrthographicCamera( - h * aspect / 2, h * aspect / 2, h / 2, - h / 2, 1, 1000 ); - camera.position.set( - 64, - 64, 128 ); - camera.up.set( 0, 0, 1 ); // In our data, z is up - - // Create controls - controls = new OrbitControls( camera, renderer.domElement ); - controls.addEventListener( 'change', render ); - controls.target.set( 64, 64, 128 ); - controls.minZoom = 0.5; - controls.maxZoom = 4; - controls.enablePan = false; - controls.update(); - - - - // The gui for interaction - volconfig = {channel: 0, cmax:30000, cmin: 500, clim1: 0, clim2: 1, renderstyle: 'iso', isothreshold: 0.05, colormap: 'viridis', channel: 0, zmax: 0,zmin: 0, ymax: 0, ymin: 0, xmax:0, xmin: 0 }; - gui = new GUI(); - gui.add( volconfig, 'clim1', 0, 1, 0.01 ).onChange(); - gui.add( volconfig, 'clim2', 0, 1, 0.01 ).onChange( updateUniforms ); - gui.add( volconfig, 'colormap', { gray: 'gray', viridis: 'viridis' } ).onChange( updateUniforms ); - gui.add( volconfig, 'renderstyle', { mip: 'mip', iso: 'iso' } ).onChange( updateUniforms ); - gui.add( volconfig, 'isothreshold', 0, 1, 0.01 ).onChange( updateUniforms ); - - // Load the 4D array from the json file produced - load(false) - - window.addEventListener( 'resize', onWindowResize ); - -} - -function load(refresh){ - new THREE.FileLoader().load( 'volumeData.json', function ( volume ) { - volume = JSON.parse(volume)[volconfig.channel] - // get the length for each axes x,y,z; will only process one channel - let z_length = volume.length; - let y_length = volume[0].length; - let x_length = volume[0][0].length; - if(!refresh){ - volconfig.zmax = z_length; - volconfig.ymax = y_length; - volconfig.xmax = x_length; - gui.add(volconfig, 'channel', 0, 1, 1).onFinishChange(()=>load(true)); - gui.add(volconfig, 'cmax', 0, 30000, 10).onFinishChange(()=>load(true)); - gui.add(volconfig, 'cmin', 0, 30000, 10).onFinishChange(()=>load(true)); - gui.add(volconfig, 'zmax', 1, z_length, 1 ).onFinishChange( ()=>load(true) ); - gui.add(volconfig, 'zmin', 0, z_length, 1 ).onFinishChange( ()=>load(true) ) - gui.add(volconfig, 'ymax', 1, y_length, 1 ).onFinishChange( ()=>load(true) ); - gui.add(volconfig, 'ymin', 0, y_length, 1 ).onFinishChange( ()=>load(true) ); - gui.add(volconfig, 'xmax', 1, x_length, 1).onFinishChange(()=>load(true)); - gui.add(volconfig, 'xmin', 0, x_length, 1 ).onFinishChange( ()=>load(true) ); - } else { - scene.remove(mesh) - } - // create a new array to transform the array to 1D - let newData = new Float32Array(x_length*y_length*z_length); - - // loop through every data point in the array - for(let z=volconfig.zmin; z Date: Mon, 15 Jul 2024 10:10:06 -0400 Subject: [PATCH 17/18] Add a readme for opening up generated qc reports, and change default configurations for the reports --- config/config.yml | 8 ++++---- qc/README.md | 19 +++++++++++++++++++ testing/create_test_dataset.py | 4 ++++ 3 files changed, 27 insertions(+), 4 deletions(-) create mode 100644 qc/README.md diff --git a/config/config.yml b/config/config.yml index 3211680..13900cc 100644 --- a/config/config.yml +++ b/config/config.yml @@ -130,12 +130,12 @@ report: create_report: True flatfield_corrected: slice_start: 0 - slice_step: 10 # Shows every nth slice - colour_map: gray + slice_step: 50 # Shows every nth slice + colour_map: viridis whole_slice_viewer: slice_start: 0 - slice_step: 10 - colour_map: gray + slice_step: 50 + colour_map: viridis containers: diff --git a/qc/README.md b/qc/README.md new file mode 100644 index 0000000..400b91e --- /dev/null +++ b/qc/README.md @@ -0,0 +1,19 @@ +# How to view QC reports + +### 1: Run the workflow with personalized report generation configurations + +### 2: Navigate to qc directory: + +```bash +cd ./qc +``` +### 3: Create a Python web server: + +```bash +python -m http.server +``` +### 4: Open the link generated in a browser and open qc_report.html + + +### Note: + Can view the whole slice images and the flatfield corrections without running the web server. However, to view the volume rendered brain, the web server is required. \ No newline at end of file diff --git a/testing/create_test_dataset.py b/testing/create_test_dataset.py index 5953b89..249fee2 100644 --- a/testing/create_test_dataset.py +++ b/testing/create_test_dataset.py @@ -142,6 +142,10 @@ def make_tar(members, output): def create_test_subset_hybrid(path_to_source_tar:Annotated[str, typer.Argument(help="ex: dir1/dir2/dataset.tar")], path_to_output_tar:Annotated[str, typer.Argument(help="ex: dir1/dir2/tes_dataset.tar")], slice_step: int=20, x_start: int=0,num_x: int=3,y_start: int=0,num_y: int=3): + """ + Creates a subset of a raw microscopy dataset consisting of tiff files stored within a tar file to be able to test the workflow more efficiently. + Can break the dataset up based on a slice step and picking the tiles within a slice. + """ members, wanted_slices, wanted_pairs = get_members_hybrid(path_to_source_tar,slice_step, x_start,y_start, num_x, num_y) correct_metadata_tile(members, wanted_slices, wanted_pairs) make_tar(members, path_to_output_tar) From de91331e0c3a4904b970953912619448eafd841b Mon Sep 17 00:00:00 2001 From: Benjamin Gros Date: Wed, 17 Jul 2024 08:56:20 -0400 Subject: [PATCH 18/18] Remove old script --- workflow/scripts/create_viewer.py | 296 ------------------------------ 1 file changed, 296 deletions(-) delete mode 100644 workflow/scripts/create_viewer.py diff --git a/workflow/scripts/create_viewer.py b/workflow/scripts/create_viewer.py deleted file mode 100644 index b09fb38..0000000 --- a/workflow/scripts/create_viewer.py +++ /dev/null @@ -1,296 +0,0 @@ -import os -import json -import math -from distutils.dir_util import copy_tree -from ome_zarr.io import parse_url -from ome_zarr.reader import Reader -import matplotlib.pyplot as plt -import numpy as np - -# arguments for creating the flatfield correction comparisons -ff_s_start=snakemake.params.ff_s_start -ff_s_step=snakemake.params.ff_s_step -ff_cmap=snakemake.params.ff_cmap -ff_corr = snakemake.input.corr -ff_uncorr = snakemake.input.uncorr - -# arguments for creating whole slice images -ws_s_step=snakemake.params.ws_s_step -ws_s_start=snakemake.params.ws_s_start -ws_cmap=snakemake.params.ws_cmap -ome_zarr= snakemake.input.ome - -# Get output files -output = snakemake.output.out -out_dir = output.split("/")[1] - -def make_directories(): - ''' - This function produces a directory for the subject to be able to view - the proper images - ''' - try: - os.mkdir(f"qc_viewer/{out_dir}") - except: - pass - - #copy viewers into their respective directory - copy_tree("qc_viewer/resources/volumeViewer", f"qc_viewer/{out_dir}/volumeViewer") - copy_tree("qc_viewer/resources/sliceViewer", f"qc_viewer/{out_dir}/sliceViewer") - -def produce_ff_images(corrected_path, uncorrected_path, colour="gray", slice_start=0, slice_step=1): - ''' - This function produces an html file containing images for each tile before and after flatfield correction - It can be decided how many slices are wanted. - ''' - - # Get corrected and uncorrected file paths - corr_zarr = corrected_path - uncorr_zarr = uncorrected_path - - # Read the corrected and uncorrected ome-zarr data - proc_reader= Reader(parse_url(corr_zarr)) - unproc_reader = Reader(parse_url(uncorr_zarr)) - - proc_data=list(proc_reader())[0].data - unproc_data = list(unproc_reader())[0].data - - # create a root folder to store the corrected and uncorrected images - root = "root" - make_directories() - os.makedirs(f"qc_viewer/{out_dir}/sliceViewer/{root}/images/corr", exist_ok=True) - os.mkdir(f"qc_viewer/{out_dir}/sliceViewer/{root}/images/uncorr") - - # Create html file for flatfield corrected images - with open(f"qc_viewer/{out_dir}/sliceViewer/ff_corr.html", 'w') as fw: - fw.write(f''' - - - FlatField Correction Check - - - - Back - - - -

Before and After Flatfield Correction

-''') - - # Add images into the table for each chunk and channel - for chunk,(tile_corr, tile_uncorr) in enumerate(zip(proc_data[0], unproc_data[0])): - for chan_num, (channel_corr, channel_uncorr) in enumerate(zip(tile_corr, tile_uncorr)): - slice = slice_start - fw.write(f""" - - - """) - # Process every wanted slice within a chunk and channe; - while(slice - -

Corrected

-

Slice-{slice}

- -
''') - # Increase by user given slice step - slice += slice_step - - fw.write(" ") - - fw.write(""" - - -
-

Chunk - {chunk} Channel - {chan_num}

-
- -

Uncorrected

-

Slice-{slice}

-
- - """) - - -def produce_whole_slice_images(proc_path, colour="gray", slice_start=0, slice_step=1): - ''' - This function produces an html page containing images of each slice for all channels. - The slice step can be chosen in the config to choose how many slices you want to view - ''' - - # read ome-zarr data and convert to list - proc_reader= Reader(parse_url(proc_path)) - proc_data=list(proc_reader())[0].data - - # dump the list into a json to be volume rendered - produce_json(proc_data) - - # create another root directory to hold the fully preprocessed slice images - root = "root" - os.makedirs(f"qc_viewer/{out_dir}/sliceViewer/{root}/images", exist_ok=True) - - # create html page for the whole slices - with open(f"qc_viewer/{out_dir}/sliceViewer/whole_slices.html", 'w') as fw: - fw.write(''' - - - Processed Slices - - - - Back - - - -

Processed Image Slices

-''') - # for each channel add the images of the most downsampled data - for chan_num, channel in enumerate(proc_data[-1]): - slice = slice_start - fw.write(f""" - - - """) - num_images = 0 - while(slice - - -""" - else: - new_html_text = f""" - -""" - fw.write(new_html_text) - slice += slice_step - num_images+=1 - - # end html table - fw.write(""" - - -
-

Channel - {chan_num}

-
- -

Slice-{slice}

-
- -

Slice-{slice}

-
- - - """) - -def produce_json(data): - ''' - Produces a json file containing the most downsampled - image data to be volume rendered into a 3D image. - ''' - - with open(f"qc_viewer/{out_dir}/volumeViewer/volumeData.json", 'w') as f: - data = np.array(data[-1]).tolist() - data = json.dumps(data) - f.write(data) - - -def combine_sample_htmls(ffcorr_html, proc_html): - ''' - Produces and index.html page connecting the two image reports as well as - the 3D volume rendering page - ''' - - with open(f'qc_viewer/{out_dir}/index.html', 'w') as f: - f.write(f""" - - - Processed Slices - - - - Back -

{out_dir.split("-")[0]}

- Flatfield Correction Before and After -
- Full Processed Slices -
- 3D Image - - """) - - -def create_main_html(): - """ - This function creates an html file connecting all the samples viewers together. - If the file is empty it will produce the header and if it is not it just adds - another sampel link - """ - - file="qc_viewer/index.html" - - with open(file, 'a') as f: - if(os.path.getsize(file) <= 20): - f.write(f""" - - - Sample Check - - - -

Subject Reports

- {out_dir.split('-')[0]} -
- """) - else: - f.write(f""" - {out_dir.split('-')[0]} -
- """) - - - -produce_ff_images(ff_corr, ff_uncorr, slice_start=ff_s_start, slice_step=ff_s_step, - colour=ff_cmap) -produce_whole_slice_images(ome_zarr, slice_start=ws_s_start, slice_step=ws_s_step, - colour=ws_cmap) -combine_sample_htmls("ff_corr.html", "whole_slices.html") -create_main_html() - - -