diff --git a/.github/workflows/test_and_deploy.yml b/.github/workflows/test_and_deploy.yml index 03e5d4a..ba29221 100644 --- a/.github/workflows/test_and_deploy.yml +++ b/.github/workflows/test_and_deploy.yml @@ -7,7 +7,6 @@ on: push: branches: - main - - dev-v.0.2 tags: - "v*" # Push events to matching v*, i.e. v1.0, v20.15.10 pull_request: @@ -22,7 +21,7 @@ jobs: strategy: matrix: platform: [ubuntu-latest, windows-latest, macos-latest] - python-version: [3.8, 3.9, "3.10"] + python-version: ["3.10", 3.9] env: DISPLAY: ':99.0' steps: @@ -47,24 +46,21 @@ jobs: - name: Install dependencies run: | python -m pip install --upgrade pip - python -m pip install pytest pytest-cookies tox + python -m pip install pytest pytest-qt pytest-cookies + pip install -e ".[testing]" + pip install "mmcv<2.2.0,>=2.0.0rc4" --find-links https://download.openmmlab.com/mmcv/dist/${{ matrix.python-version }}/torch2.4.0/cpu - # this runs the platform-specific tests declared in tox.ini - - name: Test uses: aganders3/headless-gui@v1 with: run: | - pip install -e ".[testing]" python -m pytest -s -v --color=yes - env: - PLATFORM: ${{ matrix.platform }} deploy: # this will run when you have tagged a commit, starting with "v*" # and requires that you have put your twine API key in your # github secrets (see readme for details) - needs: [test] + # needs: [test] runs-on: ubuntu-latest if: contains(github.ref, 'tags') steps: diff --git a/.napari/DESCRIPTION.md b/.napari/DESCRIPTION.md index 3ca73c6..da6cb33 100644 --- a/.napari/DESCRIPTION.md +++ b/.napari/DESCRIPTION.md @@ -1,12 +1,12 @@ ## Description -A napari plugin to automatically count lung organoids from microscopy imaging data. A Faster R-CNN model was trained on patches of microscopy data. Model inference is run using a sliding window approach, with a 50% overlap and the option for predicting on multiple window sizes and scales, the results of which are then merged using NMS. +A napari plugin to automatically count lung organoids from microscopy imaging data. Several object detection DL models were trained on patches of 2D microscopy data. Model inference is run using a sliding window approach, with a 50% overlap and the option for predicting on multiple window sizes and scales, the results of which are then merged using NMS. ![Alt Text](https://github.com/HelmholtzAI-Consultants-Munich/napari-organoid-counter/blob/main/readme-content/demo-plugin-v2.gif) ## What's new in v2? Here is a list of the main changes v2 of napari-organoid-counter offers: -* Use of Faster R-CNN model for object detection +* Use of DL models for object detection - pretrained models: Faster R-CNN, YOLOv3, SSD, and RTMDet. The data used for training these models along with the code for training can be found [here](https://www.kaggle.com/datasets/christinabukas/mutliorg). * Pyramid model inference with a sliding window approach and tunable parameters for window size and window downsampling rate * Model confidence added as tunable parameter * Allow to load and correct existing annotations (note: these must have been saved previously from v2 of this plugin) @@ -20,16 +20,21 @@ Technical Extensions: ## Installation -You can install `napari-organoid-counter` via [pip](https://pypi.org/project/napari-organoid-counter/): +This plugin has been tested with python 3.9 and 3.10 - you may consider using conda to create your dedicated environment before running the `napari-organoid-counter`. - pip install napari-organoid-counter +1. You can install `napari-organoid-counter` via [pip](https://pypi.org/project/napari-organoid-counter/): + ```pip install napari-organoid-counter``` -To install latest development version : + To install latest development version : - pip install git+https://github.com/HelmholtzAI-Consultants-Munich/napari-organoid-counter.git + ```pip install git+https://github.com/HelmholtzAI-Consultants-Munich/napari-organoid-counter.git``` -For installing on a Windows machine via napari, follow the instuctions [here](https://github.com/HelmholtzAI-Consultants-Munich/napari-organoid-counter/blob/main/readme-content/How%20to%20install%20on%20a%20Windows%20machine.pdf). +2. Additionally, you will then need to install one additional dependency: + + ``` mim install "mmcv<2.2.0,>=2.0.0rc4" ``` + +For installing on a Windows machine directly from within napari, follow the instuctions [here](https://github.com/HelmholtzAI-Consultants-Munich/napari-organoid-counter/blob/main/readme-content/How%20to%20install%20on%20a%20Windows%20machine.pdf). Step 2 additionally needs to be performed here too (mim install "mmcv<2.2.0,>=2.0.0rc4"). ## Quickstart @@ -69,6 +74,11 @@ This plugin has been developed and tested with 2D CZI microscopy images of lunch [2] Eva Maxfield Brown, Talley Lambert, Peter Sobolewski, Napari-AICSImageIO Contributors (2021). Napari-AICSImageIO: Image Reading in Napari using AICSImageIO [Computer software]. GitHub. https://github.com/AllenCellModeling/napari-aicsimageio +The latest version also uses models developed with the ```mmdetection``` package [3], see [here](https://github.com/open-mmlab/mmdetection) + +[3] Chen, Kai, et al. "MMDetection: Open mmlab detection toolbox and benchmark." arXiv preprint arXiv:1906.07155 (2019). + + ## How to Cite If you use this plugin for your work, please cite it using the following: diff --git a/README.md b/README.md index 4489697..b316267 100644 --- a/README.md +++ b/README.md @@ -1,13 +1,14 @@ # Napari Organoid Counter - Version 0.2 is out! +[![napari hub](https://img.shields.io/endpoint?url=https://api.napari-hub.org/shields/napari-organoid-counter)](https://napari-hub.org/plugins/napari-organoid-counter) ![stability-stable](https://img.shields.io/badge/stability-stable-green.svg) [![DOI](https://zenodo.org/badge/476715320.svg)](https://zenodo.org/badge/latestdoi/476715320) [![License](https://img.shields.io/pypi/l/napari-organoid-counter.svg?color=green)](https://github.com/HelmholtzAI-Consultants-Munich/napari-organoid-counter/raw/main/LICENSE) [![PyPI](https://img.shields.io/pypi/v/napari-organoid-counter.svg?color=green)](https://pypi.org/project/napari-organoid-counter) -[![Python Version](https://img.shields.io/badge/python-3.8%20%7C%203.9%20%7C%203.10-blue)](https://python.org) +[![Python Version](https://img.shields.io/badge/python-3.9%20%7C%203.10-blue)](https://python.org) [![tests](https://github.com/HelmholtzAI-Consultants-Munich/napari-organoid-counter/workflows/tests/badge.svg)](https://github.com/HelmholtzAI-Consultants-Munich/napari-organoid-counter/actions) [![codecov](https://codecov.io/gh/HelmholtzAI-Consultants-Munich/napari-organoid-counter/branch/main/graph/badge.svg)](https://codecov.io/gh/HelmholtzAI-Consultants-Munich/napari-organoid-counter) -[![napari hub](https://img.shields.io/endpoint?url=https://api.napari-hub.org/shields/napari-organoid-counter)](https://napari-hub.org/plugins/napari-organoid-counter) + A napari plugin to automatically count lung organoids from microscopy imaging data. Note: this plugin only supports single channel grayscale images. @@ -22,21 +23,21 @@ This [napari] plugin was generated with [Cookiecutter] using [@napari]'s [cookie ## Installation -You can install `napari-organoid-counter` via [pip]: +This plugin has been tested with python 3.9 and 3.10 - you may consider using conda to create your dedicated environment before running the `napari-organoid-counter`. - pip install napari-organoid-counter +1. You can install `napari-organoid-counter` via [pip](https://pypi.org/project/napari-organoid-counter/): + ``` pip install napari-organoid-counter``` -To install latest development version : + To install latest development version : - pip install git+https://github.com/HelmholtzAI-Consultants-Munich/napari-organoid-counter.git - - -For the dev branch you can clone this repo and install with: + ```pip install git+https://github.com/HelmholtzAI-Consultants-Munich/napari-organoid-counter.git``` - pip install -e . +2. Additionally, you will then need to install one additional dependency: -For installing on a Windows machine via napari, follow the instuctions [here](https://github.com/HelmholtzAI-Consultants-Munich/napari-organoid-counter/blob/main/readme-content/How%20to%20install%20on%20a%20Windows%20machine.pdf). + ``` mim install "mmcv<2.2.0,>=2.0.0rc4" ``` + +For installing on a Windows machine directly from within napari, follow the instuctions [here](https://github.com/HelmholtzAI-Consultants-Munich/napari-organoid-counter/blob/main/readme-content/How%20to%20install%20on%20a%20Windows%20machine.pdf). Step 2 additionally needs to be performed here too (mim install "mmcv<2.2.0,>=2.0.0rc4"). ## What's new in v2? Checkout our *What's New in v2* [here](https://github.com/HelmholtzAI-Consultants-Munich/napari-organoid-counter/blob/main/.napari/DESCRIPTION.md#whats-new-in-v2). @@ -48,7 +49,7 @@ For more information on this plugin, its' intended audience, as well as Quicksta ## Contributing -Contributions are very welcome. Tests can be run with [tox], please ensure +Contributions are very welcome. Tests can be run with [pytest], please ensure the coverage at least stays the same before you submit a pull request. ## License @@ -65,6 +66,10 @@ Distributed under the terms of the [MIT] license, [2] Eva Maxfield Brown, Talley Lambert, Peter Sobolewski, Napari-AICSImageIO Contributors (2021). Napari-AICSImageIO: Image Reading in Napari using AICSImageIO [Computer software]. GitHub. https://github.com/AllenCellModeling/napari-aicsimageio +The latest version also uses models developed with the ```mmdetection``` package [3], see [here](https://github.com/open-mmlab/mmdetection) + +[3] Chen, Kai, et al. "MMDetection: Open mmlab detection toolbox and benchmark." arXiv preprint arXiv:1906.07155 (2019). + ## Issues If you encounter any problems, please [file an issue] along with a detailed description. @@ -83,7 +88,6 @@ If you encounter any problems, please [file an issue] along with a detailed desc [file an issue]: https://github.com/HelmholtzAI-Consultants-Munich/napari-organoid-counter/issues [napari]: https://github.com/napari/napari -[tox]: https://tox.readthedocs.io/en/latest/ [pip]: https://pypi.org/project/pip/ [PyPI]: https://pypi.org/ diff --git a/napari_organoid_counter/_orgacount.py b/napari_organoid_counter/_orgacount.py index 8425309..8e655a2 100644 --- a/napari_organoid_counter/_orgacount.py +++ b/napari_organoid_counter/_orgacount.py @@ -1,12 +1,13 @@ -import torch -from torchvision.transforms import ToTensor - from urllib.request import urlretrieve from napari.utils import progress from napari_organoid_counter._utils import * from napari_organoid_counter import settings +#update_version_in_mmdet_init_file('mmdet', '2.2.0', '2.3.0') +import torch +import mmdet +from mmdet.apis import DetInferencer class OrganoiDL(): ''' @@ -19,8 +20,6 @@ class OrganoiDL(): The confidence threshold of the model cur_min_diam: float The minimum diameter of the organoids - transfroms: torchvision.transforms.ToTensor - The transformation for converting numpy image to tensor so it can be given as an input to the model model: frcnn The Faster R-CNN model img_scale: list of floats @@ -45,7 +44,6 @@ def __init__(self, handle_progress): self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') self.cur_confidence = 0.05 self.cur_min_diam = 30 - self.transfroms = ToTensor() self.model = None self.img_scale = [0., 0.] @@ -60,24 +58,23 @@ def set_scale(self, img_scale): def set_model(self, model_name): ''' Initialise model instance and load model checkpoint and send to device. ''' - self.model = frcnn(num_classes=2, rpn_score_thresh=0, box_score_thresh = self.cur_confidence) - self.load_model_checkpoint(model_name) - self.model = self.model.to(self.device) - def download_model(self, model='default'): + model_checkpoint = join_paths(str(settings.MODELS_DIR), settings.MODELS[model_name]["filename"]) + mmdet_path = os.path.dirname(mmdet.__file__) + config_dst = join_paths(mmdet_path, str(settings.CONFIGS[model_name]["destination"])) + # download the corresponding config if it doesn't exist already + if not os.path.exists(config_dst): + urlretrieve(settings.CONFIGS[model_name]["source"], config_dst, self.handle_progress) + self.model = DetInferencer(config_dst, model_checkpoint, self.device, show_progress=False) + + def download_model(self, model_name='yolov3'): ''' Downloads the model from zenodo and stores it in settings.MODELS_DIR ''' - # specify the url of the file which is to be downloaded - down_url = settings.MODELS[model]["source"] + # specify the url of the model which is to be downloaded + down_url = settings.MODELS[model_name]["source"] # specify save location where the file is to be saved - save_loc = join_paths(str(settings.MODELS_DIR), settings.MODELS[model]["filename"]) - # Downloading using urllib - urlretrieve(down_url,save_loc, self.handle_progress) - - def load_model_checkpoint(self, model_name): - ''' Loads the model checkpoint for the model specified in model_name ''' - model_checkpoint = join_paths(settings.MODELS_DIR, settings.MODELS[model_name]["filename"]) - ckpt = torch.load(model_checkpoint, map_location=self.device) - self.model.load_state_dict(ckpt) #.state_dict()) + save_loc = join_paths(str(settings.MODELS_DIR), settings.MODELS[model_name]["filename"]) + # downloading using urllib + urlretrieve(down_url, save_loc, self.handle_progress) def sliding_window(self, test_img, @@ -120,20 +117,20 @@ def sliding_window(self, for i in progress(range(0, prepadded_height, step)): for j in progress(range(0, prepadded_width, step)): # crop - img_crop = test_img[:, :, i:(i+window_size), j:(j+window_size)] + img_crop = test_img[i:(i+window_size), j:(j+window_size)] # get predictions - output = self.model(img_crop.float()) - preds = output[0]['boxes'] - if preds.size(0)==0: continue + output = self.model(img_crop) + preds = output['predictions'][0]['bboxes'] + if len(preds)==0: continue else: - for bbox_id in range(preds.size(0)): - y1, x1, y2, x2 = preds[bbox_id].cpu().detach() # predictions from model will be in form x1,y1,x2,y2 + for bbox_id in range(len(preds)): + y1, x1, y2, x2 = preds[bbox_id] # predictions from model will be in form x1,y1,x2,y2 x1_real = torch.div(x1+i, rescale_factor, rounding_mode='floor') x2_real = torch.div(x2+i, rescale_factor, rounding_mode='floor') y1_real = torch.div(y1+j, rescale_factor, rounding_mode='floor') y2_real = torch.div(y2+j, rescale_factor, rounding_mode='floor') pred_bboxes.append(torch.Tensor([x1_real, y1_real, x2_real, y2_real])) - scores_list.append(output[0]['scores'][bbox_id].cpu().detach()) + scores_list.append(output['predictions'][0]['scores'][bbox_id]) return pred_bboxes, scores_list def run(self, @@ -170,9 +167,7 @@ def run(self, ready_img, prepadded_height, prepadded_width = prepare_img(img, step, window_size, - rescale_factor, - self.transfroms, - self.device) + rescale_factor) # and run sliding window over whole image bboxes, scores = self.sliding_window(ready_img, step, @@ -184,7 +179,7 @@ def run(self, scores) # stack results bboxes = torch.stack(bboxes) - scores = torch.stack(scores) + scores = torch.Tensor(scores) # apply NMS to remove overlaping boxes bboxes, pred_scores = apply_nms(bboxes, scores) self.pred_bboxes[shapes_name] = bboxes diff --git a/napari_organoid_counter/_utils.py b/napari_organoid_counter/_utils.py index c2cc49e..541fa84 100644 --- a/napari_organoid_counter/_utils.py +++ b/napari_organoid_counter/_utils.py @@ -1,6 +1,7 @@ from contextlib import contextmanager import os from pathlib import Path +import pkgutil import numpy as np import math @@ -10,9 +11,6 @@ from skimage.color import gray2rgb import torch -import torch.nn as nn -from torchvision.models import detection -from torchvision.models.detection.faster_rcnn import FastRCNNPredictor from torchvision.ops import nms from napari_organoid_counter import settings @@ -104,7 +102,7 @@ def squeeze_img(img): """ Squeeze image - all dims that have size one will be removed """ return np.squeeze(img) -def prepare_img(test_img, step, window_size, rescale_factor, trans, device): +def prepare_img(test_img, step, window_size, rescale_factor): """ The original image is prepared for running model inference """ # squeeze and resize image test_img = squeeze_img(test_img) @@ -119,10 +117,8 @@ def prepare_img(test_img, step, window_size, rescale_factor, trans, device): test_img = (255*test_img).astype(np.uint8) test_img = gray2rgb(test_img) #[H,W,C] - # convert to tensor and send to device - test_img = trans(test_img) - test_img = torch.unsqueeze(test_img, axis=0) #[B, C, H, W] - test_img = test_img.to(device) + # convert from RGB to GBR - expected from DetInferencer + test_img = test_img[..., ::-1] return test_img, img_height, img_width @@ -175,20 +171,25 @@ def apply_normalization(img): img_norm = (255 * (img - img_min) / (img_max - img_min)).astype(np.uint8) return img_norm -class frcnn(nn.Module): - def __init__(self, num_classes,rpn_score_thresh=0,box_score_thresh=0.05): - """ An FRCNN module loads the pretrained FasterRCNN model """ - super(frcnn, self).__init__() - # define classes and load pretrained model - self.num_classes = num_classes - self.model = detection.fasterrcnn_resnet50_fpn(pretrained=True, rpn_score_thresh = rpn_score_thresh, box_score_thresh = box_score_thresh) - # get number of input features for the classifier - self.in_features = self.model.roi_heads.box_predictor.cls_score.in_features - # replace the pre-trained head with a new one - self.model.roi_heads.box_predictor = FastRCNNPredictor(self.in_features, self.num_classes) - self.model.eval() - - def forward(self, x, return_all=False): - """ A forward pass through the model """ - return self.model(x) - \ No newline at end of file +def get_package_init_file(package_name): + loader = pkgutil.get_loader(package_name) + if loader is None or not hasattr(loader, 'get_filename'): + raise ImportError(f"Cannot find package {package_name}") + package_path = loader.get_filename(package_name) + # Determine the path to the __init__.py file + if os.path.isdir(package_path): + init_file_path = os.path.join(package_path, '__init__.py') + else: + init_file_path = package_path + if not os.path.isfile(init_file_path): + raise FileNotFoundError(f"__init__.py file not found for package {package_name}") + return init_file_path + +def update_version_in_mmdet_init_file(package_name, old_version, new_version): + init_file_path = get_package_init_file(package_name) + with open(init_file_path, 'r') as file: + lines = file.readlines() + with open(init_file_path, 'w') as file: + for line in lines: + if f"mmcv_maximum_version = '{old_version}'" in line: + file.write(line.replace(old_version, new_version)) \ No newline at end of file diff --git a/napari_organoid_counter/_widget.py b/napari_organoid_counter/_widget.py index 2007a59..37fe7d0 100644 --- a/napari_organoid_counter/_widget.py +++ b/napari_organoid_counter/_widget.py @@ -73,7 +73,8 @@ def __init__(self, settings.init() settings.MODELS_DIR.mkdir(parents=True, exist_ok=True) utils.add_local_models() - self.model_name = list(settings.MODELS.keys())[0] + self.model_id = 2 # yolov3 + self.model_name = list(settings.MODELS.keys())[self.model_id] # init params self.window_sizes = window_sizes @@ -603,6 +604,7 @@ def _setup_model_box(self): # setup drop down option for selecting which image to process self.model_selection = QComboBox() for name in settings.MODELS.keys(): self.model_selection.addItem(name) + self.model_selection.setCurrentIndex(self.model_id) self.model_selection.currentIndexChanged.connect(self._on_model_selection_changed) # and add all these to the layout diff --git a/napari_organoid_counter/napari.yaml b/napari_organoid_counter/napari.yaml index cb891a4..80cf637 100644 --- a/napari_organoid_counter/napari.yaml +++ b/napari_organoid_counter/napari.yaml @@ -1,5 +1,5 @@ name: napari-organoid-counter -display_name: napari OrganoidCounter +display_name: napari organoid counter contributions: commands: - id: napari-organoid-counter.OrganoidCounterWidget diff --git a/napari_organoid_counter/settings.py b/napari_organoid_counter/settings.py index b715ff3..b8b4039 100644 --- a/napari_organoid_counter/settings.py +++ b/napari_organoid_counter/settings.py @@ -4,13 +4,41 @@ def init(): global MODELS MODELS = { - "model_1 (default)": {"filename": "model_v1.ckpt", "source": "https://zenodo.org/record/7708763/files/model_v1.ckpt"}, - "model_2": {"filename": "model_v2.ckpt", "source": "https://zenodo.org/record/8146857/files/model_v2.ckpt"}, + "faster r-cnn": {"filename": "faster-rcnn_r50_fpn_organoid_best_coco_bbox_mAP_epoch_68.pth", + "source": "https://zenodo.org/records/11388549/files/faster-rcnn_r50_fpn_organoid_best_coco_bbox_mAP_epoch_68.pth" + }, + "ssd": {"filename": "ssd_organoid_best_coco_bbox_mAP_epoch_86.pth", + "source": "https://zenodo.org/records/11388549/files/ssd_organoid_best_coco_bbox_mAP_epoch_86.pth" + }, + "yolov3": {"filename": "yolov3_416_organoid_best_coco_bbox_mAP_epoch_27.pth", + "source": "https://zenodo.org/records/11388549/files/yolov3_416_organoid_best_coco_bbox_mAP_epoch_27.pth" + }, + "rtmdet": {"filename": "rtmdet_l_organoid_best_coco_bbox_mAP_epoch_323.pth", + "source": "https://zenodo.org/records/11388549/files/rtmdet_l_organoid_best_coco_bbox_mAP_epoch_323.pth" + }, } global MODELS_DIR MODELS_DIR = Path.home() / ".cache/napari-organoid-counter/models" global MODEL_TYPE - MODEL_TYPE = '.ckpt' + MODEL_TYPE = '.pth' + + global CONFIGS + CONFIGS = { + "faster r-cnn": {"source": "https://zenodo.org/records/11388549/files/faster-rcnn_r50_fpn_organoid.py", + "destination": ".mim/configs/faster_rcnn/faster-rcnn_r50_fpn_organoid.py" + }, + "ssd": {"source": "https://zenodo.org/records/11388549/files/ssd_organoid.py", + "destination": ".mim/configs/ssd/ssd_organoid.py" + }, + "yolov3": {"source": "https://zenodo.org/records/11388549/files/yolov3_416_organoid.py", + "destination": ".mim/configs/yolo/yolov3_416_organoid.py" + }, + "rtmdet": {"source": "https://zenodo.org/records/11388549/files/rtmdet_l_organoid.py", + "destination": ".mim/configs/rtmdet/rtmdet_l_organoid.py" + } +} + + diff --git a/setup.cfg b/setup.cfg index ca76316..04b9973 100644 --- a/setup.cfg +++ b/setup.cfg @@ -5,7 +5,7 @@ author = christinab12 author_email = christina.bukas@helmholtz-muenchen.de url = https://github.com/HelmholtzAI-Consultants-Munich/napari-organoid-counter license = MIT -description = A plugin to automatically count lung organoids +description = A plugin to automatically count lung organoids using Deep Learning. long_description = file: README.md long_description_content_type = text/markdown classifiers = @@ -30,14 +30,17 @@ project_urls = packages = napari_organoid_counter include_package_data = True -python_requires = >=3.8, <3.11 +python_requires = >=3.9, <3.11 setup_requires = setuptools_scm # add your package requirements here install_requires = - napari[all]>=0.4.17 + napari[all]>=0.4.17,<0.5.0 napari-aicsimageio>=0.7.2 - torch>=1.13.1 - torchvision>=0.14.1 + torch>=2.3.1 + torchvision>=0.18.1 + openmim + mmengine>=0.10.4 + mmdet>=3.3.0 [options.extras_require] testing = diff --git a/tox.ini b/tox.ini index 2bb6897..c0dc644 100644 --- a/tox.ini +++ b/tox.ini @@ -1,11 +1,9 @@ -# For more information about tox, see https://tox.readthedocs.io/en/latest/ [tox] -envlist = py{38,39,310}-{linux,macos,windows} +envlist = py{39,310}-{linux,macos,windows} isolated_build=true [gh-actions] python = - 3.8: py38 3.9: py39 3.10: py310 @@ -29,4 +27,7 @@ passenv = PYVISTA_OFF_SCREEN extras = testing -commands = pytest -v --color=yes --cov=napari_organoid_counter --cov-report=xml \ No newline at end of file +commands = + pip install . + mim install "mmcv>=2.2.0" + pytest -v --color=yes --cov=napari_organoid_counter --cov-report=xml