Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Workflows update | Badges | Torch | New pipeline API #9

Open
wants to merge 25 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
25 commits
Select commit Hold shift + click to select a range
f1b55c1
Added PyPI and build badges
spirosmaggioros Nov 6, 2024
dc8eb5c
Strings instead of floats for python version
spirosmaggioros Nov 6, 2024
33843c2
Add pathlib to requirements
spirosmaggioros Nov 6, 2024
ffca005
Make install_requires a list instead of reading requirements.txt
spirosmaggioros Nov 6, 2024
4653034
Torch up to 2.5.1
spirosmaggioros Nov 6, 2024
7feffd2
Make requirement just torch
spirosmaggioros Nov 6, 2024
ff9b680
Install requirements first
spirosmaggioros Nov 6, 2024
7428539
Change name on Ubuntu build test
spirosmaggioros Nov 6, 2024
51e48f6
Changed macos versions
spirosmaggioros Nov 6, 2024
4ed42cb
Remove 3.13 on macos tests
spirosmaggioros Nov 6, 2024
058b155
python 3.13 support only for ubuntu | macos build till 3.12
spirosmaggioros Nov 9, 2024
fcc196c
Fixed torch version
spirosmaggioros Nov 9, 2024
4647f05
Can't install torch==2.2.1 with nightly version, on hold till they fu…
spirosmaggioros Nov 9, 2024
fbad4ea
Remove 3.13 on ubuntu
spirosmaggioros Nov 9, 2024
0f456ce
Created DLICV pipelien file for better import
spirosmaggioros Dec 11, 2024
ed2f01a
Created DLICV pipelien file for better import | arguments fixed and w…
spirosmaggioros Dec 11, 2024
871d48a
Created DLICV pipelien file for better import | arguments fixed and w…
spirosmaggioros Dec 11, 2024
3318595
Merge branch 'main' into spiros-dev
spirosmaggioros Dec 11, 2024
7f11318
require nnunetv2==2.5.1
spirosmaggioros Dec 12, 2024
1d415a9
require nnunetv2==2.2.1
spirosmaggioros Dec 12, 2024
6d37538
require nnunetv2==2.5.1
spirosmaggioros Dec 12, 2024
3162090
Fixed small typo on setup.py
spirosmaggioros Dec 12, 2024
e5b0cc1
Correct package version
spirosmaggioros Dec 12, 2024
c65fabf
Don't see anything wrong with torch 2.3.1
spirosmaggioros Dec 12, 2024
c3874a4
Update macos workflow to use the latest version
spirosmaggioros Dec 12, 2024
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
32 changes: 32 additions & 0 deletions .github/workflows/macos-build.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
name: Macos Build

on:
push:
branches:
- main
- spiros-dev
pull_request:
branches:
- main
- spiros-dev

jobs:
test:
runs-on: macos-latest
strategy:
matrix:
python-version: ["3.9", "3.10", "3.11", "3.12"]

steps:
- name: Checkout
uses: actions/checkout@v4

- name: Setup python version ${{ matrix.python-version }}
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}

- name: Install dependencies
run: |
pip install -r requirements.txt
python3 -m pip install -e .
32 changes: 32 additions & 0 deletions .github/workflows/ubuntu-build.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
name: Ubuntu Build

on:
push:
branches:
- main
- spiros-dev
pull_request:
branches:
- main
- spiros-dev

jobs:
test:
runs-on: ubuntu-latest
strategy:
matrix:
python-version: ["3.9", "3.10", "3.11", "3.12"]

steps:
- name: Checkout
uses: actions/checkout@v4

- name: Setup python version ${{ matrix.python-version }}
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}

- name: Install dependencies
run: |
pip install -r requirements.txt
python3 -m pip install -e .
163 changes: 20 additions & 143 deletions DLICV/__main__.py
Original file line number Diff line number Diff line change
@@ -1,20 +1,13 @@
import argparse
import json
import os
import shutil
import sys
import warnings
from pathlib import Path

import torch

from .utils import prepare_data_folder, rename_and_copy_files
from .dlicv_pipeline import run_pipeline

warnings.simplefilter(action="ignore", category=FutureWarning)
warnings.simplefilter(action="ignore", category=UserWarning)

# VERSION = pkg_resources.require("NiChart_DLMUSE")[0].version
VERSION = 1.0
VERSION = "1.0.4"


def main() -> None:
Expand All @@ -25,7 +18,6 @@ def main() -> None:
usage="""
DLICV v{VERSION}
ICV calculation for structural MRI data.

Required arguments:
[-i, --in_dir] The filepath of the input directory
[-o, --out_dir] The filepath of the output directory
Expand Down Expand Up @@ -203,142 +195,27 @@ def main() -> None:
)

args = parser.parse_args()
args.f = [0]
args.i = args.in_dir
args.o = args.out_dir

if args.clear_cache:
shutil.rmtree(os.path.join(Path(__file__).parent, "nnunet_results"))
shutil.rmtree(os.path.join(Path(__file__).parent, ".cache"))
if not args.i or not args.o:
print("Cache cleared and missing either -i / -o. Exiting.")
sys.exit(0)

if not args.i or not args.o:
parser.error("The following arguments are required: -i, -o")

# data conversion
src_folder = args.i # input folder
if not os.path.exists(args.o): # create output folder if it does not exist
os.makedirs(args.o)

des_folder = os.path.join(args.o, "renamed_image")

# check if -i argument is a folder, list (csv), or a single file (nii.gz)
if os.path.isdir(args.i): # if args.i is a directory
src_folder = args.i
prepare_data_folder(des_folder)
rename_dic, rename_back_dict = rename_and_copy_files(src_folder, des_folder)
datalist_file = os.path.join(des_folder, "renaming.json")
with open(datalist_file, "w", encoding="utf-8") as f:
json.dump(rename_dic, f, ensure_ascii=False, indent=4)
print(f"Renaming dic is saved to {datalist_file}")

model_folder = os.path.join(
Path(__file__).parent,
"nnunet_results",
"Dataset%s_Task%s_dlicv/nnUNetTrainer__nnUNetPlans__3d_fullres/"
% (args.d, args.d),
)

if args.clear_cache:
shutil.rmtree(os.path.join(Path(__file__).parent, "nnunet_results"))
shutil.rmtree(os.path.join(Path(__file__).parent, ".cache"))

# Check if model exists. If not exist, download using HuggingFace
if not os.path.exists(model_folder):
# HF download model
print("DLICV model not found, downloading...")

from huggingface_hub import snapshot_download

local_src = Path(__file__).parent
snapshot_download(repo_id="nichart/DLICV", local_dir=local_src)
print("DLICV model has been successfully downloaded!")
else:
print("Loading the model...")

prepare_data_folder(args.o)

# Check for invalid arguments - advise users to see nnUNetv2 documentation
assert args.part_id < args.num_parts, "See nnUNetv2_predict -h."

assert args.device in [
"cpu",
"cuda",
"mps",
], f"-device must be either cpu, mps or cuda. Other devices are not tested/supported. Got: {args.device}."

if args.device == "cpu":
import multiprocessing

torch.set_num_threads(
multiprocessing.cpu_count() // 2
) # use half of the threads (better for PC)
device = torch.device("cpu")
elif args.device == "cuda":
# multithreading in torch doesn't help nnU-Netv2 if run on GPU
torch.set_num_threads(1)
torch.set_num_interop_threads(1)
device = torch.device("cuda")
else:
device = torch.device("mps")

# exports for nnunetv2 purposes
os.environ["nnUNet_raw"] = "/nnunet_raw/"
os.environ["nnUNet_preprocessed"] = "/nnunet_preprocessed"
os.environ["nnUNet_results"] = (
"/nnunet_results" # where model will be located (fetched from HF)
)

from nnunetv2.inference.predict_from_raw_data import nnUNetPredictor

# Initialize nnUnetPredictor
predictor = nnUNetPredictor(
tile_step_size=args.step_size,
use_gaussian=True,
use_mirroring=not args.disable_tta,
perform_everything_on_device=True,
device=device,
verbose=args.verbose,
verbose_preprocessing=args.verbose,
allow_tqdm=not args.disable_progress_bar,
run_pipeline(
args.in_dir,
args.out_dir,
args.device,
args.clear_cache,
args.d,
args.part_id,
args.num_parts,
args.step_size,
args.disable_tta,
args.verbose,
args.disable_progress_bar,
args.chk,
args.save_probabilities,
args.continue_prediction,
args.npp,
args.nps,
args.prev_stage_predictions,
)

# Retrieve the model and its weight
predictor.initialize_from_trained_model_folder(
model_folder, args.f, checkpoint_name=args.chk
)

# Final prediction
predictor.predict_from_files(
des_folder,
args.o,
save_probabilities=args.save_probabilities,
overwrite=not args.continue_prediction,
num_processes_preprocessing=args.npp,
num_processes_segmentation_export=args.nps,
folder_with_segs_from_prev_stage=args.prev_stage_predictions,
num_parts=args.num_parts,
part_id=args.part_id,
)

# After prediction, convert the image name back to original
files_folder = args.o

for filename in os.listdir(files_folder):
if filename.endswith(".nii.gz"):
original_name = rename_back_dict[filename]
os.rename(
os.path.join(files_folder, filename),
os.path.join(files_folder, original_name),
)
# Remove the (temporary) des_folder directory
if os.path.exists(des_folder):
shutil.rmtree(des_folder)

print("DLICV Process Done!")


if __name__ == "__main__":
main()
Loading
Loading