Skip to content

Commit

Permalink
Bugfix-01 (#5)
Browse files Browse the repository at this point in the history
- Updated Zenodo metadata
- Fixed deprecated `np.int`
- Resolved dependencies
- Added analysis section in inference notebook
- Added hyper-parameter tuning in inference notebook
- Accept multiple input formats
  • Loading branch information
xiazeyu authored Mar 6, 2024
1 parent 7ee0868 commit 98614f3
Show file tree
Hide file tree
Showing 6 changed files with 2,103 additions and 1,310 deletions.
61 changes: 61 additions & 0 deletions .zenodo.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,61 @@
{
"upload_type": "software",
"title": "DT_SegNet",
"creators": [
{
"name": "Xia, Zeyu",
"affiliation": "Queensland University of Technology",
"orcid": "0000-0003-0234-5857"
},
{
"name": "Ma, Kan",
"affiliation": "University of Birmingham",
"orcid": "0000-0001-5729-5477"
},
{
"name": "Cheng, Sibo",
"affiliation": "Imperial College London",
"orcid": "0000-0002-8707-2589"
},
{
"name": "Blackburn, Thomas",
"affiliation": "University of Birmingham",
"orcid": "0000-0001-9160-3285"
},
{
"name": "Peng, Ziling",
"affiliation": "Institute of Advanced Science Facilities"
},
{
"name": "Zhu, Kewei",
"affiliation": "University of York"
},
{
"name": "Zhang, Weihang",
"affiliation": "Imperial College London"
},
{
"name": "Xiao, Dunhui",
"affiliation": "Tongji University"
},
{
"name": "Knowles, Alexander J",
"affiliation": "University of Birmingham"
},
{
"name": "Arcucci, Rossella",
"affiliation": "Imperial College London"
}
],
"description": "A comprehensive, two-tiered deep learning approach designed for precise object detection and segmentation in electron microscopy (EM) images.",
"access_right": "open",
"license": "mit",
"related_identifiers": [
{
"scheme": "doi",
"identifier": "10.1039/D3CP00402C",
"relation": "isSupplementTo",
"resource_type": "article"
}
]
}
Original file line number Diff line number Diff line change
Expand Up @@ -243,7 +243,7 @@ def split_points_by_order(tpoints, groups):
(bs, 2 * x, 3), -1, dtype=np.float32) for x in groups
]

last_point_indx_group = np.zeros((bs, num_groups, 2), dtype=np.int)
last_point_indx_group = np.zeros((bs, num_groups, 2), dtype=np.int32)
for group_indx, group_size in enumerate(groups):
last_point_indx_group[:, group_indx, 1] = group_size

Expand Down
4 changes: 2 additions & 2 deletions 1_Detection_Model/requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -11,8 +11,8 @@ requests>=2.23.0
scipy>=1.4.1
torch>=1.7.0
torchvision>=0.8.1
tqdm
protobuf<=3.20.1 # https://github.com/ultralytics/yolov5/issues/8012
tqdm>=4.64.0
# protobuf<=3.20.1 # https://github.com/ultralytics/yolov5/issues/8012

# Logging -------------------------------------
tensorboard>=2.4.1
Expand Down
6 changes: 3 additions & 3 deletions 1_Detection_Model/utils/dataloaders.py
Original file line number Diff line number Diff line change
Expand Up @@ -484,7 +484,7 @@ def __init__(self,
self.im_files = list(cache.keys()) # update
self.label_files = img2label_paths(cache.keys()) # update
n = len(shapes) # number of images
bi = np.floor(np.arange(n) / batch_size).astype(np.int) # batch index
bi = np.floor(np.arange(n) / batch_size).astype(np.int32) # batch index
nb = bi[-1] + 1 # number of batches
self.batch = bi # batch index of image
self.n = n
Expand Down Expand Up @@ -526,7 +526,7 @@ def __init__(self,
elif mini > 1:
shapes[i] = [1, 1 / mini]

self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(np.int) * stride
self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(np.int32) * stride

# Cache images into RAM/disk for faster training (WARNING: large datasets may exceed system resources)
self.ims = [None] * n
Expand Down Expand Up @@ -896,7 +896,7 @@ def extract_boxes(path=DATASETS_DIR / 'coco128'): # from utils.dataloaders impo
b = x[1:] * [w, h, w, h] # box
# b[2:] = b[2:].max() # rectangle to square
b[2:] = b[2:] * 1.2 + 3 # pad
b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int)
b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int32)

b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image
b[[1, 3]] = np.clip(b[[1, 3]], 0, h)
Expand Down
2 changes: 1 addition & 1 deletion 3_Segmentation_Model/requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -5,4 +5,4 @@ tqdm
filelock
scipy
prettytable
sklearn == 0.0
scikit-learn
Loading

0 comments on commit 98614f3

Please sign in to comment.