diff --git a/config/config.yml b/config/config.yml index 7b09a08..13900cc 100644 --- a/config/config.yml +++ b/config/config.yml @@ -126,8 +126,17 @@ bids: readme_md: resources/bids_template_files/README.md samples_json: resources/bids_template_files/samples.json +report: + create_report: True + flatfield_corrected: + slice_start: 0 + slice_step: 50 # Shows every nth slice + colour_map: viridis + whole_slice_viewer: + slice_start: 0 + slice_step: 50 + colour_map: viridis - containers: spimprep: 'docker://khanlab/spimprep-deps:main' diff --git a/poetry.lock b/poetry.lock index ec7cf8e..eb7df57 100644 --- a/poetry.lock +++ b/poetry.lock @@ -916,6 +916,31 @@ files = [ docs = ["mdx-gh-links (>=0.2)", "mkdocs (>=1.5)", "mkdocs-gen-files", "mkdocs-literate-nav", "mkdocs-nature (>=0.6)", "mkdocs-section-index", "mkdocstrings[python]"] testing = ["coverage", "pyyaml"] +[[package]] +name = "markdown-it-py" +version = "3.0.0" +description = "Python port of markdown-it. Markdown parsing, done right!" +category = "dev" +optional = false +python-versions = ">=3.8" +files = [ + {file = "markdown-it-py-3.0.0.tar.gz", hash = "sha256:e3f60a94fa066dc52ec76661e37c851cb232d92f9886b15cb560aaada2df8feb"}, + {file = "markdown_it_py-3.0.0-py3-none-any.whl", hash = "sha256:355216845c60bd96232cd8d8c40e8f9765cc86f46880e43a8fd22dc1a1a8cab1"}, +] + +[package.dependencies] +mdurl = ">=0.1,<1.0" + +[package.extras] +benchmarking = ["psutil", "pytest", "pytest-benchmark"] +code-style = ["pre-commit (>=3.0,<4.0)"] +compare = ["commonmark (>=0.9,<1.0)", "markdown (>=3.4,<4.0)", "mistletoe (>=1.0,<2.0)", "mistune (>=2.0,<3.0)", "panflute (>=2.3,<3.0)"] +linkify = ["linkify-it-py (>=1,<3)"] +plugins = ["mdit-py-plugins"] +profiling = ["gprof2dot"] +rtd = ["jupyter_sphinx", "mdit-py-plugins", "myst-parser", "pyyaml", "sphinx", "sphinx-copybutton", "sphinx-design", "sphinx_book_theme"] +testing = ["coverage", "pytest", "pytest-cov", "pytest-regressions"] + [[package]] name = "markupsafe" version = "2.1.5" @@ -986,6 +1011,18 @@ files = [ {file = "MarkupSafe-2.1.5.tar.gz", hash = "sha256:d283d37a890ba4c1ae73ffadf8046435c76e7bc2247bbb63c00bd1a709c6544b"}, ] +[[package]] +name = "mdurl" +version = "0.1.2" +description = "Markdown URL utilities" +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "mdurl-0.1.2-py3-none-any.whl", hash = "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8"}, + {file = "mdurl-0.1.2.tar.gz", hash = "sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba"}, +] + [[package]] name = "mergedeep" version = "1.3.4" @@ -1979,6 +2016,25 @@ files = [ {file = "reretry-0.11.8.tar.gz", hash = "sha256:f2791fcebe512ea2f1d153a2874778523a8064860b591cd90afc21a8bed432e3"}, ] +[[package]] +name = "rich" +version = "13.7.1" +description = "Render rich text, tables, progress bars, syntax highlighting, markdown and more to the terminal" +category = "dev" +optional = false +python-versions = ">=3.7.0" +files = [ + {file = "rich-13.7.1-py3-none-any.whl", hash = "sha256:4edbae314f59eb482f54e9e30bf00d33350aaa94f4bfcd4e9e3110e64d0d7222"}, + {file = "rich-13.7.1.tar.gz", hash = "sha256:9be308cb1fe2f1f57d67ce99e95af38a1e2bc71ad9813b0e247cf7ffbcc3a432"}, +] + +[package.dependencies] +markdown-it-py = ">=2.2.0" +pygments = ">=2.13.0,<3.0.0" + +[package.extras] +jupyter = ["ipywidgets (>=7.5.1,<9)"] + [[package]] name = "rpds-py" version = "0.19.0" @@ -2210,6 +2266,18 @@ dev = ["cython-lint (>=0.12.2)", "doit (>=0.36.0)", "mypy (==1.10.0)", "pycodest doc = ["jupyterlite-pyodide-kernel", "jupyterlite-sphinx (>=0.13.1)", "jupytext", "matplotlib (>=3.5)", "myst-nb", "numpydoc", "pooch", "pydata-sphinx-theme (>=0.15.2)", "sphinx (>=5.0.0)", "sphinx-design (>=0.4.0)"] test = ["Cython", "array-api-strict", "asv", "gmpy2", "hypothesis (>=6.30)", "meson", "mpmath", "ninja", "pooch", "pytest", "pytest-cov", "pytest-timeout", "pytest-xdist", "scikit-umfpack", "threadpoolctl"] +[[package]] +name = "shellingham" +version = "1.5.4" +description = "Tool to Detect Surrounding Shell" +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "shellingham-1.5.4-py2.py3-none-any.whl", hash = "sha256:7ecfff8f2fd72616f7481040475a65b2bf8af90a56c89140852d1120324e8686"}, + {file = "shellingham-1.5.4.tar.gz", hash = "sha256:8dbca0739d487e5bd35ab3ca4b36e11c4078f3a234bfce294b0a0291363404de"}, +] + [[package]] name = "simplejson" version = "3.19.2" @@ -2676,6 +2744,24 @@ files = [ [package.extras] dev = ["aiohttp (>=3.8)", "codecov (>=2.1)", "flake8 (>=4.0)", "pytest (>=7.0)", "pytest-asyncio (>=0.16)", "pytest-cov (>=3.0)"] +[[package]] +name = "tifffile" +version = "2024.7.2" +description = "Read and write TIFF files" +category = "dev" +optional = false +python-versions = ">=3.9" +files = [ + {file = "tifffile-2024.7.2-py3-none-any.whl", hash = "sha256:5a2ee608c9cc1f2e044d943dacebddc71d4827b6fad150ef4c644b7aefbe2d1a"}, + {file = "tifffile-2024.7.2.tar.gz", hash = "sha256:02e52e8872c0e9943add686d2fd8bcfb18f0a824760882cf5e35fcbc2c80e32c"}, +] + +[package.dependencies] +numpy = "*" + +[package.extras] +all = ["defusedxml", "fsspec", "imagecodecs (>=2023.8.12)", "lxml", "matplotlib", "zarr"] + [[package]] name = "toml" version = "0.10.2" @@ -2716,6 +2802,24 @@ files = [ docs = ["myst-parser", "pydata-sphinx-theme", "sphinx"] test = ["argcomplete (>=3.0.3)", "mypy (>=1.7.0)", "pre-commit", "pytest (>=7.0,<8.2)", "pytest-mock", "pytest-mypy-testing"] +[[package]] +name = "typer" +version = "0.12.3" +description = "Typer, build great CLIs. Easy to code. Based on Python type hints." +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "typer-0.12.3-py3-none-any.whl", hash = "sha256:070d7ca53f785acbccba8e7d28b08dcd88f79f1fbda035ade0aecec71ca5c914"}, + {file = "typer-0.12.3.tar.gz", hash = "sha256:49e73131481d804288ef62598d97a1ceef3058905aa536a1134f90891ba35482"}, +] + +[package.dependencies] +click = ">=8.0.0" +rich = ">=10.11.0" +shellingham = ">=1.3.0" +typing-extensions = ">=3.7.4.3" + [[package]] name = "types-python-dateutil" version = "2.9.0.20240316" @@ -2922,6 +3026,18 @@ files = [ {file = "wrapt-1.16.0.tar.gz", hash = "sha256:5f370f952971e7d17c7d1ead40e49f32345a7f7a5373571ef44d800d06b1899d"}, ] +[[package]] +name = "xmltodict" +version = "0.13.0" +description = "Makes working with XML feel like you are working with JSON" +category = "dev" +optional = false +python-versions = ">=3.4" +files = [ + {file = "xmltodict-0.13.0-py2.py3-none-any.whl", hash = "sha256:aa89e8fd76320154a40d19a0df04a4695fb9dc5ba977cbb68ab3e4eb225e7852"}, + {file = "xmltodict-0.13.0.tar.gz", hash = "sha256:341595a488e3e01a85a9d8911d8912fd922ede5fecc4dce437eb4b6c8d037e56"}, +] + [[package]] name = "yte" version = "1.5.4" @@ -2942,4 +3058,4 @@ pyyaml = ">=6.0,<7.0" [metadata] lock-version = "2.0" python-versions = ">=3.11,<3.13" -content-hash = "bba28d4f048bacd653e85de35c9f58fb62c14cc0cf70b8a3e8bc8acea80fec83" +content-hash = "80683e0966ab5325030e4b26b27a5001d2981916c6e40a5882f004ae56202c7b" diff --git a/pyproject.toml b/pyproject.toml index 0621f17..261abd5 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -16,6 +16,12 @@ snakefmt = "^0.10.0" mkdocs-material = "^9.5.20" mkdocs-include-markdown-plugin = "^6.0.6" + +[tool.poetry.group.dataset_creation.dependencies] +typer = "^0.12.3" +xmltodict = "^0.13.0" +tifffile = "^2024.5.10" + [build-system] requires = ["poetry-core"] build-backend = "poetry.core.masonry.api" diff --git a/qc/README.md b/qc/README.md new file mode 100644 index 0000000..400b91e --- /dev/null +++ b/qc/README.md @@ -0,0 +1,19 @@ +# How to view QC reports + +### 1: Run the workflow with personalized report generation configurations + +### 2: Navigate to qc directory: + +```bash +cd ./qc +``` +### 3: Create a Python web server: + +```bash +python -m http.server +``` +### 4: Open the link generated in a browser and open qc_report.html + + +### Note: + Can view the whole slice images and the flatfield corrections without running the web server. However, to view the volume rendered brain, the web server is required. \ No newline at end of file diff --git a/qc/resources/ff_html_temp.html b/qc/resources/ff_html_temp.html new file mode 100644 index 0000000..658b26d --- /dev/null +++ b/qc/resources/ff_html_temp.html @@ -0,0 +1,120 @@ + + + FlatField Correction Check + + + + Back + + + + {%- for chunk in chunks %} + {%- set chunk_num = loop.index0 %} + {%- for channel in chunk %} + + + + + {%- for image in channel %} + + + {%- endfor %} + + {%- endfor %} + {%- endfor %} + + + \ No newline at end of file diff --git a/qc/resources/qc_report_temp.html b/qc/resources/qc_report_temp.html new file mode 100644 index 0000000..689e7c5 --- /dev/null +++ b/qc/resources/qc_report_temp.html @@ -0,0 +1,6 @@ + + + + QC Reports + + \ No newline at end of file diff --git a/qc/resources/subject_html_temp.html b/qc/resources/subject_html_temp.html new file mode 100644 index 0000000..76c9894 --- /dev/null +++ b/qc/resources/subject_html_temp.html @@ -0,0 +1,13 @@ + + + + {{ subject }} - {{ sample }} - {{ acq }} + + + Back
+ Flatfield Correction QC
+ Whole Slice QC +
+ Volume Rendered Image + + \ No newline at end of file diff --git a/qc/resources/volViewer/cm_gray.png b/qc/resources/volViewer/cm_gray.png new file mode 100644 index 0000000..8ac3388 Binary files /dev/null and b/qc/resources/volViewer/cm_gray.png differ diff --git a/qc/resources/volViewer/cm_viridis.png b/qc/resources/volViewer/cm_viridis.png new file mode 100644 index 0000000..80df96b Binary files /dev/null and b/qc/resources/volViewer/cm_viridis.png differ diff --git a/qc/resources/volViewer/volRender.html b/qc/resources/volViewer/volRender.html new file mode 100644 index 0000000..1976f66 --- /dev/null +++ b/qc/resources/volViewer/volRender.html @@ -0,0 +1,22 @@ + + + + 3D Brain + + + + + + + Back +
+ + + \ No newline at end of file diff --git a/qc/resources/volViewer/volRenderScript.js b/qc/resources/volViewer/volRenderScript.js new file mode 100644 index 0000000..6410902 --- /dev/null +++ b/qc/resources/volViewer/volRenderScript.js @@ -0,0 +1,210 @@ +import * as THREE from 'three'; + +import { GUI } from 'three/addons/libs/lil-gui.module.min.js'; +import { OrbitControls } from 'three/addons/controls/OrbitControls.js'; +import { VolumeRenderShader1 } from 'three/addons/shaders/VolumeShader.js'; + +/* +Script is a reworked version of a threejs example +refined for array data stored in json format + +Copyright © 2010-2024 three.js authors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE +*/ + +let renderer, + scene, + camera, + controls, + material, + volconfig, + cmtextures, + gui; + +let mesh; + +init(); + +function init() { + + scene = new THREE.Scene(); + // Create renderer + renderer = new THREE.WebGLRenderer(); + renderer.setPixelRatio( window.devicePixelRatio ); + renderer.setSize( window.innerWidth, window.innerHeight ); + document.body.appendChild( renderer.domElement ); + + // Create camera (The volume renderer does not work very well with perspective yet) + const h = 512; // frustum height + const aspect = window.innerWidth / window.innerHeight; + camera = new THREE.OrthographicCamera( - h * aspect / 2, h * aspect / 2, h / 2, - h / 2, 1, 1000 ); + camera.position.set( - 64, - 64, 128 ); + camera.up.set( 0, 0, 1 ); // In our data, z is up + + // Create controls + controls = new OrbitControls( camera, renderer.domElement ); + controls.addEventListener( 'change', render ); + controls.target.set( 64, 64, 128 ); + controls.minZoom = 0.5; + controls.maxZoom = 4; + controls.enablePan = false; + controls.update(); + + + + // The gui for interaction + volconfig = {channel: 0, cmax:30000, cmin: 500, clim1: 0, clim2: 1, renderstyle: 'iso', isothreshold: 0.05, colormap: 'viridis', channel: 0, zmax: 0,zmin: 0, ymax: 0, ymin: 0, xmax:0, xmin: 0 }; + gui = new GUI(); + gui.add( volconfig, 'clim1', 0, 1, 0.01 ).onChange(); + gui.add( volconfig, 'clim2', 0, 1, 0.01 ).onChange( updateUniforms ); + gui.add( volconfig, 'colormap', { gray: 'gray', viridis: 'viridis' } ).onChange( updateUniforms ); + gui.add( volconfig, 'renderstyle', { mip: 'mip', iso: 'iso' } ).onChange( updateUniforms ); + gui.add( volconfig, 'isothreshold', 0, 1, 0.01 ).onChange( updateUniforms ); + + // Load the 4D array from the json file produced + load(false) + + window.addEventListener( 'resize', onWindowResize ); + +} + +function load(refresh){ + new THREE.FileLoader().load( './volume_resources/volumeData.json', function ( volume ) { + volume = JSON.parse(volume)[volconfig.channel] + // get the length for each axes x,y,z; will only process one channel + let z_length = volume.length; + let y_length = volume[0].length; + let x_length = volume[0][0].length; + if(!refresh){ + volconfig.zmax = z_length; + volconfig.ymax = y_length; + volconfig.xmax = x_length; + gui.add(volconfig, 'channel', 0, 1, 1).onFinishChange(()=>load(true)); + gui.add(volconfig, 'cmax', 0, 30000, 10).onFinishChange(()=>load(true)); + gui.add(volconfig, 'cmin', 0, 30000, 10).onFinishChange(()=>load(true)); + gui.add(volconfig, 'zmax', 1, z_length, 1 ).onFinishChange( ()=>load(true) ); + gui.add(volconfig, 'zmin', 0, z_length, 1 ).onFinishChange( ()=>load(true) ) + gui.add(volconfig, 'ymax', 1, y_length, 1 ).onFinishChange( ()=>load(true) ); + gui.add(volconfig, 'ymin', 0, y_length, 1 ).onFinishChange( ()=>load(true) ); + gui.add(volconfig, 'xmax', 1, x_length, 1).onFinishChange(()=>load(true)); + gui.add(volconfig, 'xmin', 0, x_length, 1 ).onFinishChange( ()=>load(true) ); + } else { + scene.remove(mesh) + } + // create a new array to transform the array to 1D + let newData = new Float32Array(x_length*y_length*z_length); + + // loop through every data point in the array + for(let z=volconfig.zmin; z + + + Whole Slice Viewer + + + + Back + +
+

Chunk - {{ chunk_num }} Channel - {{ loop.index0 }}

+
+ +

Corrected

+

Slice-{{ image.slice }}

+
+ +

Uncorrected

+

Slice-{{ image.slice }}

+
+ + {%- for channel in channels %} + + + + {%- for slices in channel %} + + {%- for slice in slices %} + + {%- endfor %} + + {%- endfor %} + {%- endfor %} + +
+

Channel {{ loop.index0 }}

+
+ +

slice - {{ slice.slice }}

+
+ + + + \ No newline at end of file diff --git a/testing/create_test_dataset.py b/testing/create_test_dataset.py new file mode 100644 index 0000000..249fee2 --- /dev/null +++ b/testing/create_test_dataset.py @@ -0,0 +1,155 @@ +import os +import tarfile +import tifffile +import xmltodict +import typer +from typing_extensions import Annotated +app = typer.Typer() + +def get_members_hybrid(source_dir, step, start_tile_x, start_tile_y, num_tiles_x, num_tiles_y): + """ + Opens up the given dataset in the tarfile and will extract only the needed members + based ont he slice step and tiles chosen by the user. + """ + members_wanted = [] + pairs_wanted = [] + slices_wanted = [] + + # get list of all wanted pairs + for numx in range(start_tile_x, start_tile_x+num_tiles_x): + numx = "0"+str(numx) + for numy in range(start_tile_y, start_tile_y+num_tiles_y): + numy = "0"+str(numy) + pair = (numx,numy) + pairs_wanted.append(pair) + + with tarfile.open(source_dir, 'r') as tar: + members = tar.getmembers() + slice = 0 + + # add potential slice numbers to a list + # will go quite over and could use fixing + while(slice < len(members)): + slices_wanted.append(slice) + slice+=step + max_slice = 0 + for member in members: + # get the pair and the slice of each member in the tarfile + grid = member.name.split("[")[1][:7] + x_num =grid.split(" x ")[0] + y_num = grid.split(" x ")[1] + pair = (x_num,y_num) + slice = int(member.name.split("Z")[1][:4]) + + # find max slice + if(slice>max_slice): + max_slice = slice + + # extract the correct tiff files from tar file + if(pair in pairs_wanted and slice in slices_wanted): + members_wanted.append(member) + tar.extract(member) + + # Remove slices that are greater than the max slice + slices_wanted = (slice for slice in slices_wanted if slice<= max_slice) + slices_wanted = list(slices_wanted) + return members_wanted, slices_wanted, pairs_wanted + + +def correct_metadata_tile(members, slices_wanted, pairs_wanted): + """ + Goes through each member in the given tar file and if it is the 0th slice + which contains all the data and metadata. It will adjust it to the configuartions set, such as + what slices should be in th enew dataset as well as what tiles to include + """ + for member in members: + # get the slice and the channel for each member + member_slice = int(member.name.split("Z")[1][:4]) + channel = int(member.name.split("C")[1][:2]) + if(member_slice == 0): + # load tiff file and access metadata + with tifffile.TiffFile(member.name) as tif: + # update data to only include the files own data + data = tif.series[0].asarray() + data = data[channel][member_slice][:][:] + meta_dict = xmltodict.parse(tif.ome_metadata) + + # Set to the correct number of slices + meta_dict['OME']['Image']['Pixels']['@SizeZ'] = len(list(slices_wanted)) + + # Remove excess tile configurations from metadata based on tiles + tile_files = meta_dict['OME']['Image']['ca:CustomAttributes']['TileConfiguration']['@TileConfiguration'].split(" ")[1:] + proper_files = [] + for file in tile_files: + grid = file.split("[")[1][:7] + x_num = grid.split(" x ")[0] + y_num = grid.split(" x ")[1] + pair = (x_num, y_num) + if(pair in pairs_wanted): + proper_files.append(file) + new_tile_config = "4" + for file in proper_files: + new_tile_config += " " + file + meta_dict['OME']['Image']['ca:CustomAttributes']['TileConfiguration']['@TileConfiguration'] = new_tile_config + + # Remove excess tile configurations from metadata based on slice + tile_files = meta_dict['OME']['Image']['ca:CustomAttributes']['TileConfiguration']['@TileConfiguration'].split(" ")[1:] + proper_files = [] + for file in tile_files: + if(int(file.split("Z")[1][:4]) in slices_wanted): + proper_files.append(file) + new_tile_config = "4" + for file in proper_files: + new_tile_config += " " + file + meta_dict['OME']['Image']['ca:CustomAttributes']['TileConfiguration']['@TileConfiguration'] = new_tile_config + + # Remove excess TiffData from metadata + tiff_data = meta_dict['OME']['Image']['Pixels']['TiffData'] + new_tiff_data = [] + for single_data in tiff_data: + filename = single_data['UUID']['@FileName'] + grid = filename.split("[")[1][:7] + x_num = grid.split(" x ")[0] + y_num = grid.split(" x ")[1] + pair = (x_num, y_num) + if(pair in pairs_wanted): + new_tiff_data.append(single_data) + meta_dict['OME']['Image']['Pixels']['TiffData'] = new_tiff_data + tiff_data = meta_dict['OME']['Image']['Pixels']['TiffData'] + new_tiff_data = [] + for single_data in tiff_data: + if(int(single_data['@FirstZ']) in list(slices_wanted)): + new_tiff_data.append(single_data) + meta_dict['OME']['Image']['Pixels']['TiffData'] = new_tiff_data + + # write out new metadata and adjusted data + new_description = xmltodict.unparse(meta_dict) + new_description = new_description.encode("UTF-8") + with tifffile.TiffWriter(member.name) as tw: + tw.write(data, description=new_description, metadata=None, planarconfig="CONTIG") + +def make_tar(members, output): + """ + Opens up a new tarfile at the specified output and will add all the members that + were selected from the main dataset and remove the local tiff files produced + """ + with tarfile.open(output, 'w') as tar: + for member in members: + tar.add(member.name, arcname=member.name) + os.remove(member.name) + +@app.command() +def create_test_subset_hybrid(path_to_source_tar:Annotated[str, typer.Argument(help="ex: dir1/dir2/dataset.tar")], + path_to_output_tar:Annotated[str, typer.Argument(help="ex: dir1/dir2/tes_dataset.tar")], + slice_step: int=20, x_start: int=0,num_x: int=3,y_start: int=0,num_y: int=3): + """ + Creates a subset of a raw microscopy dataset consisting of tiff files stored within a tar file to be able to test the workflow more efficiently. + Can break the dataset up based on a slice step and picking the tiles within a slice. + """ + members, wanted_slices, wanted_pairs = get_members_hybrid(path_to_source_tar,slice_step, x_start,y_start, num_x, num_y) + correct_metadata_tile(members, wanted_slices, wanted_pairs) + make_tar(members, path_to_output_tar) + + +if __name__ == "__main__": + app() \ No newline at end of file diff --git a/workflow/Snakefile b/workflow/Snakefile index c6dbd76..56d7627 100644 --- a/workflow/Snakefile +++ b/workflow/Snakefile @@ -40,3 +40,4 @@ include: "rules/flatfield_corr.smk" include: "rules/bigstitcher.smk" include: "rules/ome_zarr.smk" include: "rules/bids.smk" +include: "rules/qc.smk" diff --git a/workflow/rules/common.smk b/workflow/rules/common.smk index bb26469..cb257ad 100644 --- a/workflow/rules/common.smk +++ b/workflow/rules/common.smk @@ -42,7 +42,15 @@ def get_all_targets(): stain=get_stains_by_row(i), ) ) - + if config["report"]["create_report"]: + targets.extend( + expand( + "qc/sub-{subject}_sample-{sample}_acq-{acq}/subject.html", + subject=datasets.loc[i, "subject"], + sample=datasets.loc[i, "sample"], + acq=datasets.loc[i, "acq"], + ) + ) return targets diff --git a/workflow/rules/qc.smk b/workflow/rules/qc.smk new file mode 100644 index 0000000..9e0c892 --- /dev/null +++ b/workflow/rules/qc.smk @@ -0,0 +1,111 @@ +rule generate_flatfield_qc: + "Generates an html file for comparing before and after flatfield correction" + input: + uncorr=bids( + root=work, + subject="{subject}", + datatype="micr", + sample="{sample}", + acq="{acq}", + desc="raw", + suffix="SPIM.zarr", + ), + corr=bids( + root=work, + subject="{subject}", + datatype="micr", + sample="{sample}", + acq="{acq}", + desc="flatcorr", + suffix="SPIM.zarr", + ), + params: + ff_s_start=config["report"]["flatfield_corrected"]["slice_start"], + ff_s_step=config["report"]["flatfield_corrected"]["slice_step"], + ff_cmap=config["report"]["flatfield_corrected"]["colour_map"], + output: + html="qc/sub-{subject}_sample-{sample}_acq-{acq}/flatfieldqc.html", + corr_images_dir=directory( + "qc/sub-{subject}_sample-{sample}_acq-{acq}/images/corr" + ), + uncorr_images_dir=directory( + "qc/sub-{subject}_sample-{sample}_acq-{acq}/images/uncorr" + ), + log: + bids( + root="logs", + datatype="generate_flatfield_qc", + subject="{subject}", + sample="{sample}", + acq="{acq}", + suffix="log.txt", + ), + script: + "../scripts/generate_flatfield_qc.py" + + +rule generate_whole_slice_qc: + "Generates an html file to view whole slices from preprocessed images" + input: + ome=get_input_ome_zarr_to_nii(), + params: + ws_s_start=config["report"]["whole_slice_viewer"]["slice_start"], + ws_s_step=config["report"]["whole_slice_viewer"]["slice_step"], + ws_cmap=config["report"]["whole_slice_viewer"]["colour_map"], + output: + html="qc/sub-{subject}_sample-{sample}_acq-{acq}/whole_slice_qc.html", + images_dir=directory("qc/sub-{subject}_sample-{sample}_acq-{acq}/images/whole"), + log: + bids( + root="logs", + datatype="generate_whole_slice_qc", + subject="{subject}", + sample="{sample}", + acq="{acq}", + suffix="log.txt", + ), + script: + "../scripts/generate_whole_slice_qc.py" + + +rule generate_volume_qc: + "Generates an html file to view the volume rendered image" + input: + ome=get_input_ome_zarr_to_nii(), + output: + resources=directory( + "qc/sub-{subject}_sample-{sample}_acq-{acq}/volume_resources" + ), + html="qc/sub-{subject}_sample-{sample}_acq-{acq}/volume_qc.html", + log: + bids( + root="logs", + datatype="generate_volume_qc", + subject="{subject}", + sample="{sample}", + acq="{acq}", + suffix="log.txt", + ), + script: + "../scripts/generate_volume_qc.py" + + +rule generate_subject_qc: + "Generates html files to access all the subjects qc reports in one place" + input: + ws_html="qc/sub-{subject}_sample-{sample}_acq-{acq}/whole_slice_qc.html", + ff_html="qc/sub-{subject}_sample-{sample}_acq-{acq}/flatfieldqc.html", + vol_html="qc/sub-{subject}_sample-{sample}_acq-{acq}/volume_qc.html", + output: + sub_html="qc/sub-{subject}_sample-{sample}_acq-{acq}/subject.html", + log: + bids( + root="logs", + datatype="generate_subject_qc", + subject="{subject}", + sample="{sample}", + acq="{acq}", + suffix="log.txt", + ), + script: + "../scripts/generate_subject_qc.py" diff --git a/workflow/scripts/generate_flatfield_qc.py b/workflow/scripts/generate_flatfield_qc.py new file mode 100644 index 0000000..d1a6200 --- /dev/null +++ b/workflow/scripts/generate_flatfield_qc.py @@ -0,0 +1,81 @@ +from jinja2 import Environment, FileSystemLoader +import os +from ome_zarr.io import parse_url +from ome_zarr.reader import Reader +import matplotlib.pyplot as plt +import numpy as np +import math + +# load the html template from jinja +file_loader = FileSystemLoader(".") +env = Environment(loader=file_loader) +template = env.get_template("qc/resources/ff_html_temp.html") + +# User set configurations +ff_s_start=snakemake.params.ff_s_start +ff_s_step=snakemake.params.ff_s_step +ff_cmap=snakemake.params.ff_cmap + +# input zarr files +ff_corr = snakemake.input.corr +ff_uncorr = snakemake.input.uncorr + +# output files +out_html = snakemake.output.html +corr_image_dir = snakemake.output.corr_images_dir +uncorr_image_dir = snakemake.output.uncorr_images_dir + +# Read the corrected and uncorrected ome-zarr data +proc_reader= Reader(parse_url(ff_corr)) +unproc_reader = Reader(parse_url(ff_uncorr)) +proc_data=list(proc_reader())[0].data +unproc_data = list(unproc_reader())[0].data + +# create directories for corrected and uncorrected images +os.makedirs(corr_image_dir, exist_ok=True) +os.mkdir(uncorr_image_dir) + + +chunks = [] + +# Create a list to store slices ordered by channel and tile +for chunk,(tile_corr, tile_uncorr) in enumerate(zip(proc_data[0], unproc_data[0])): + channels = [] + for chan_num, (channel_corr, channel_uncorr) in enumerate(zip(tile_corr, tile_uncorr)): + slice = ff_s_start + slices = [] + while(slice{subject}-{sample}-{acq}
' + +# if not first sample just add the one link +if(path.exists(total_html)): + with open(total_html,'a') as f: + f.write(sub_link) + +# if it is the first sample write out the template +else: + template = env.get_template("qc/resources/qc_report_temp.html") + output = template.render() + output+=sub_link + with open(total_html, 'w') as f: + f.write(output) diff --git a/workflow/scripts/generate_volume_qc.py b/workflow/scripts/generate_volume_qc.py new file mode 100644 index 0000000..5e0c69a --- /dev/null +++ b/workflow/scripts/generate_volume_qc.py @@ -0,0 +1,38 @@ +import json +import shutil +from pathlib import Path +from distutils.dir_util import copy_tree +from zarrnii import ZarrNii +import math + +# directory containing the volume rendering files +resource_dir = Path(snakemake.output.resources) +# where html file should be written +html_dest = snakemake.output.html + +# inputted ome-zarr path +ome_data = snakemake.input.ome + +# move volume renderer into the subjects directory +copy_tree("qc/resources/volViewer", str(resource_dir)) +shutil.move(resource_dir / "volRender.html", html_dest) + +# Get most downsampled ome-zarr image +ds_z = ZarrNii.from_path(ome_data,level=5, channels=[0,1]) +z_length = ds_z.darr.shape[1] + +# downsample it so it has at most 100 slices and ast least 50 slices in z-direction +if(z_length>50): + downsample_factor = math.floor(z_length/50) +else: + downsample_factor = 1 +ds_z = ds_z.downsample(along_z=downsample_factor) + +# Write it to a JSON for js script to read +with open(resource_dir / "volumeData.json", 'w') as f: + json_data = json.dumps(ds_z.darr.compute().tolist()) + f.write(json_data) + + + + diff --git a/workflow/scripts/generate_whole_slice_qc.py b/workflow/scripts/generate_whole_slice_qc.py new file mode 100644 index 0000000..5967b98 --- /dev/null +++ b/workflow/scripts/generate_whole_slice_qc.py @@ -0,0 +1,65 @@ +import os +import math +from ome_zarr.io import parse_url +from ome_zarr.reader import Reader +import matplotlib.pyplot as plt +import numpy as np +from jinja2 import Environment, FileSystemLoader + +# load jinja html template +file_loader = FileSystemLoader(".") +env = Environment(loader=file_loader) +template = env.get_template("qc/resources/ws_html_temp.html") + +# user set configurations +ws_s_step=snakemake.params.ws_s_step +ws_s_start=snakemake.params.ws_s_start +ws_cmap=snakemake.params.ws_cmap + +# input ome-zarr file +ome= snakemake.input.ome + +# output paths +image_dir = snakemake.output.images_dir +out_html = snakemake.output.html + +# read ome-zarr data and convert to list +proc_reader= Reader(parse_url(ome)) +proc_data=list(proc_reader())[0].data + +os.makedirs(image_dir, exist_ok=True) + +channels = [] +# for each channel add the images of the most downsampled data +for chan_num, channel in enumerate(proc_data[-1]): + slice = ws_s_start + chan = [] + slices = [] + while(slice=len(channel): + chan.append(slices) + # full list containing all slices ordered by channel + channels.append(chan) + +# render the template and write out file +output = template.render(channels = channels) +with open(out_html, "w") as f: + f.write(output)