From 6b548513da85fba2036d5a28d85ed57dbb63c94d Mon Sep 17 00:00:00 2001 From: Alexander Getka Date: Wed, 25 Sep 2024 17:32:51 -0400 Subject: [PATCH 1/2] Add cache-clear, add longpath note to README --- DLMUSE/__main__.py | 30 ++++++++++++++++++++++++++++-- README.md | 7 +++++++ 2 files changed, 35 insertions(+), 2 deletions(-) diff --git a/DLMUSE/__main__.py b/DLMUSE/__main__.py index d82811c..b9d199b 100644 --- a/DLMUSE/__main__.py +++ b/DLMUSE/__main__.py @@ -24,14 +24,14 @@ def main() -> None: parser.add_argument( "-i", type=str, - required=True, + required=False, help="[Required] Input folder. Remember to use the correct channel numberings for your files (_0000 etc). " "File endings must be the same as the training dataset!", ) parser.add_argument( "-o", type=str, - required=True, + required=False, help="[Required] Output folder. If it does not exist it will be created. Predicted segmentations will " "have the same name as their source images.", ) @@ -187,10 +187,34 @@ def main() -> None: help="Set this flag to disable progress bar. Recommended for HPC environments (non interactive " "jobs)", ) + parser.add_argument( + "--clear_cache", + action="store_true", + required=False, + default=False, + help="Set this flag to clear any cached models before running. This is recommended if a previous download failed." + ) + args = parser.parse_args() args.f = [args.f] + if args.clear_cache: + shutil.rmtree(os.path.join( + Path(__file__).parent, + "nnunet_results" + )) + shutil.rmtree(os.path.join( + Path(__file__).parent, + ".cache" + )) + if not args.input or not args.output: + print("Cache cleared and missing either -i / -o. Exiting.") + sys.exit(0) + + if not args.input or not args.output: + parser.error("The following arguments are required: -i, -o") + # data conversion src_folder = args.i # input folder @@ -220,6 +244,8 @@ def main() -> None: % (args.d, args.d, args.c), ) + + # Check if model exists. If not exist, download using HuggingFace print(f"Using model folder: {model_folder}") if not os.path.exists(model_folder): diff --git a/README.md b/README.md index 4034132..28c4e6a 100644 --- a/README.md +++ b/README.md @@ -32,6 +32,13 @@ A pre-trained nnUNet model can be found at our [hugging face account](https://hu DLMUSE -i "image_folder" -o "path to output folder" -device cpu ``` +#### Troubleshooting model download failures +Our model download process creates several deep directory structures. If you are on Windows and your model download process fails, it may be due to Windows file path limitations. + +To enable long path support in Windows 10, version 1607, and later, the registry key `HKEY_LOCAL_MACHINE\SYSTEM\CurrentControlSet\Control\FileSystem LongPathsEnabled (Type: REG_DWORD)` must exist and be set to 1. + +If this affects you, we recommend re-running DLMUSE with the `--clear_cache` flag set on the first run. + ## Contact For more information, please contact [CBICA Software](mailto:software@cbica.upenn.edu). From e90aaf9615440ec1fd0e6e34a762cea81f00defb Mon Sep 17 00:00:00 2001 From: KyleB Date: Thu, 26 Sep 2024 18:19:18 -0400 Subject: [PATCH 2/2] updated -h and README --- DLMUSE/__main__.py | 195 ++++++++++++++++++++++++--------------------- README.md | 9 ++- 2 files changed, 113 insertions(+), 91 deletions(-) diff --git a/DLMUSE/__main__.py b/DLMUSE/__main__.py index b9d199b..805e9a6 100644 --- a/DLMUSE/__main__.py +++ b/DLMUSE/__main__.py @@ -13,28 +13,103 @@ warnings.simplefilter(action="ignore", category=FutureWarning) warnings.simplefilter(action="ignore", category=UserWarning) +VERSION = 1.0 def main() -> None: + prog="DLMUSE" parser = argparse.ArgumentParser( - description="Use this to run inference with nnU-Net. This function is used when " - "you want to manually specify a folder containing a trained nnU-Net " - "model. This is useful when the nnunet environment variables " - "(nnUNet_results) are not set." + prog=prog, + description="DLMUSE - MUlti-atlas region Segmentation utilizing Ensembles of registration algorithms and parameters.", + usage=""" + DLMUSE v{VERSION} + Segmentation of the brain into MUSE ROIs from the Nifti image (.nii.gz) of the LPS oriented Intra Cranial Volume (ICV - see DLICV method). + + Required arguments: + [-i, --in_dir] The filepath of the input directory + [-o, --out_dir] The filepath of the output directory + Optional arguments: + [-device] cpu|cuda|mps - Depending on your system configuration (default: cuda) + [-h, --help] Show this help message and exit. + [-V, --version] Show program's version number and exit. + EXAMPLE USAGE: + DLMUSE -i /path/to/input \ + -o /path/to/output \ + -device cpu|cuda|mps + + """.format(VERSION=VERSION), + add_help=False ) + + # Required Arguments parser.add_argument( "-i", type=str, - required=False, - help="[Required] Input folder. Remember to use the correct channel numberings for your files (_0000 etc). " - "File endings must be the same as the training dataset!", + required=True, + help="[REQUIRED] Input folder with LPS oriented T1 sMRI Intra Cranial Volumes (ICV) in Nifti format (nii.gz).", ) parser.add_argument( "-o", type=str, + required=True, + help="[REQUIRED] Output folder for the segmentation results in Nifti format (nii.gz).", + ) + + # Optional Arguments + parser.add_argument( + "-device", + type=str, + default="cuda", + required=False, + help="[Recommended] Use this to set the device the inference should run with. Available options are 'cuda' (GPU), " + "'cpu' (CPU) or 'mps' (Apple M-series chips supporting 3D CNN).", + ) + parser.add_argument( + "-V", + "--version", + action="version", + version=prog + ": v{VERSION}.".format(VERSION=VERSION), + help="Show the version and exit.", + ) + parser.add_argument( + "--verbose", + action="store_true", + help="Provides additional details when set.", + ) + parser.add_argument( + "--save_probabilities", + action="store_true", + help='Set this to export predicted class "probabilities". Required if you want to ensemble ' + "multiple configurations.", + ) + parser.add_argument( + "--continue_prediction", + action="store_true", + help="Continue an aborted previous prediction (will not overwrite existing files)", + ) + parser.add_argument( + "--disable_progress_bar", + action="store_true", required=False, - help="[Required] Output folder. If it does not exist it will be created. Predicted segmentations will " - "have the same name as their source images.", + default=False, + help="Set this flag to disable progress bar. Recommended for HPC environments (non interactive " + "jobs)", ) + parser.add_argument( + "--clear_cache", + action="store_true", + required=False, + default=False, + help="Set this flag to clear any cached models before running. This is recommended if a previous download failed." + ) + parser.add_argument( + "--disable_tta", + action="store_true", + required=False, + default=False, + help="[nnUnet Arg] Set this flag to disable test time data augmentation in the form of mirroring. " + "Faster, but less accurate inference. Not recommended.", + ) + ### DEPRECIATED #### # parser.add_argument( # "-m", # type=str, @@ -46,14 +121,14 @@ def main() -> None: type=str, required=False, default="903", - help="Dataset with which you would like to predict. You can specify either dataset name or id", + help="[nnUnet Arg] Dataset with which you would like to predict. You can specify either dataset name or id", ) parser.add_argument( "-p", type=str, required=False, default="nnUNetPlans", - help="Plans identifier. Specify the plans in which the desired configuration is located. " + help="[nnUnet Arg] Plans identifier. Specify the plans in which the desired configuration is located. " "Default: nnUNetPlans", ) parser.add_argument( @@ -61,31 +136,23 @@ def main() -> None: type=str, required=False, default="nnUNetTrainer", - help="What nnU-Net trainer class was used for training? Default: nnUNetTrainer", + help="[nnUnet Arg] nnU-Net trainer class used for training. " + "Default: nnUNetTrainer", ) parser.add_argument( "-c", type=str, required=False, default="3d_fullres", - help="nnU-Net configuration that should be used for prediction. Config must be located " + help="[nnUnet Arg] nnU-Net configuration that should be used for prediction. Config must be located " "in the plans specified with -p", ) - # parser.add_argument( - # "-f", - # nargs="+", - # type=str, - # required=False, - # default=(0), - # help="Specify the folds of the trained model that should be used for prediction. " - # "Default: (0)", - # ) parser.add_argument( "-f", type=int, required=False, default=0, - help="Specify the folds of the trained model that should be used for prediction. " + help="[nnUnet Arg] Specify the folds of the trained model that should be used for prediction. " "Default: 0", ) parser.add_argument( @@ -93,48 +160,23 @@ def main() -> None: type=float, required=False, default=0.5, - help="Step size for sliding window prediction. The larger it is the faster but less accurate " - "the prediction. Default: 0.5. Cannot be larger than 1. We recommend the default." + help="[nnUnet Arg] Step size for sliding window prediction. The larger it is the faster " + "but less accurate prediction. Default: 0.5. Cannot be larger than 1. We recommend the default. " "Default: 0.5", ) - parser.add_argument( - "--disable_tta", - action="store_true", - required=False, - default=False, - help="Set this flag to disable test time data augmentation in the form of mirroring. Faster, " - "but less accurate inference. Not recommended.", - ) - parser.add_argument( - "--verbose", - action="store_true", - help="Set this if you like being talked to. You will have " - "to be a good listener/reader.", - ) - parser.add_argument( - "--save_probabilities", - action="store_true", - help='Set this to export predicted class "probabilities". Required if you want to ensemble ' - "multiple configurations.", - ) - parser.add_argument( - "--continue_prediction", - action="store_true", - help="Continue an aborted previous prediction (will not overwrite existing files)", - ) parser.add_argument( "-chk", type=str, required=False, default="checkpoint_final.pth", - help="Name of the checkpoint you want to use. Default: checkpoint_final.pth", + help="[nnUnet Arg] Name of the checkpoint you want to use. Default: checkpoint_final.pth", ) parser.add_argument( "-npp", type=int, required=False, default=3, - help="Number of processes used for preprocessing. More is not always better. Beware of " + help="[nnUnet Arg] Number of processes used for preprocessing. More is not always better. Beware of " "out-of-RAM issues. Default: 3", ) parser.add_argument( @@ -142,7 +184,7 @@ def main() -> None: type=int, required=False, default=3, - help="Number of processes used for segmentation export. More is not always better. Beware of " + help="[nnUnet Arg] Number of processes used for segmentation export. More is not always better. Beware of " "out-of-RAM issues. Default: 3", ) parser.add_argument( @@ -150,50 +192,27 @@ def main() -> None: type=str, required=False, default=None, - help="Folder containing the predictions of the previous stage. Required for cascaded models.", + help="[nnUnet Arg] Folder containing the predictions of the previous stage. Required for cascaded models.", ) parser.add_argument( "-num_parts", type=int, required=False, default=1, - help="Number of separate nnUNetv2_predict call that you will be making. Default: 1 (= this one " - "call predicts everything)", + help="[nnUnet Arg] Number of separate nnUNetv2_predict call that you will be making. " + "Default: 1 (= this will predict everything with a single call)", ) parser.add_argument( "-part_id", type=int, required=False, default=0, - help="If multiple nnUNetv2_predict exist, which one is this? IDs start with 0 can end with " - "num_parts - 1. So when you submit 5 nnUNetv2_predict calls you need to set -num_parts " - "5 and use -part_id 0, 1, 2, 3 and 4. Simple, right? Note: You are yourself responsible " - "to make these run on separate GPUs! Use CUDA_VISIBLE_DEVICES (google, yo!)", - ) - parser.add_argument( - "-device", - type=str, - default="cuda", - required=False, - help="Use this to set the device the inference should run with. Available options are 'cuda' " - "(GPU), 'cpu' (CPU) and 'mps' (Apple M1/M2). Do NOT use this to set which GPU ID! " - "Use CUDA_VISIBLE_DEVICES=X nnUNetv2_predict [...] instead!", - ) - parser.add_argument( - "--disable_progress_bar", - action="store_true", - required=False, - default=False, - help="Set this flag to disable progress bar. Recommended for HPC environments (non interactive " - "jobs)", - ) - parser.add_argument( - "--clear_cache", - action="store_true", - required=False, - default=False, - help="Set this flag to clear any cached models before running. This is recommended if a previous download failed." + help="[nnUnet Arg] If multiple nnUNetv2_predict exist, which one is this? IDs start with 0 " + "can end with num_parts - 1. So when you submit 5 nnUNetv2_predict calls you need to set " + "-num_parts 5 and use -part_id 0, 1, 2, 3 and 4. Note: You are yourself responsible to make these run on separate GPUs! " + "Use CUDA_VISIBLE_DEVICES.", ) + args = parser.parse_args() @@ -208,17 +227,16 @@ def main() -> None: Path(__file__).parent, ".cache" )) - if not args.input or not args.output: + if not args.i or not args.o: print("Cache cleared and missing either -i / -o. Exiting.") sys.exit(0) - if not args.input or not args.output: + if not args.i or not args.o: parser.error("The following arguments are required: -i, -o") # data conversion src_folder = args.i # input folder - # des_folder = args.o if not os.path.exists(args.o): # create output folder if it does not exist os.makedirs(args.o) @@ -245,7 +263,6 @@ def main() -> None: ) - # Check if model exists. If not exist, download using HuggingFace print(f"Using model folder: {model_folder}") if not os.path.exists(model_folder): @@ -274,7 +291,7 @@ def main() -> None: if args.device == "cpu": import multiprocessing - + # use half of the available threads in the system. torch.set_num_threads(multiprocessing.cpu_count() // 2) device = torch.device("cpu") print("Running in CPU mode.") diff --git a/README.md b/README.md index 28c4e6a..6114746 100644 --- a/README.md +++ b/README.md @@ -29,10 +29,15 @@ A pre-trained nnUNet model can be found at our [hugging face account](https://hu ### From command line ```bash -DLMUSE -i "image_folder" -o "path to output folder" -device cpu +DLMUSE -i "input_folder" -o "output_folder" -device cpu ``` +For more details, please refer to -#### Troubleshooting model download failures +```bash +DLMUSE -h +``` + +## \[Windows Users\] Troubleshooting model download failures Our model download process creates several deep directory structures. If you are on Windows and your model download process fails, it may be due to Windows file path limitations. To enable long path support in Windows 10, version 1607, and later, the registry key `HKEY_LOCAL_MACHINE\SYSTEM\CurrentControlSet\Control\FileSystem LongPathsEnabled (Type: REG_DWORD)` must exist and be set to 1.