Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

allow for incomplete data ingestion, fix ypix bug, add mean image to caiman seg class #227

Merged
merged 17 commits into from
Mar 4, 2024
Merged
Show file tree
Hide file tree
Changes from 4 commits
Commits
Show all changes
17 commits
Select commit Hold shift + click to select a range
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -119,7 +119,7 @@ To update data later, `cd` into the test directory and run `gin get-content`

## Class descriptions:

* **SegmentationExtractor:** An abstract class that contains all the meta-data and output data from the ROI segmentation operation when applied to the pre-processed data. It also contains methods to read from and write to various data formats ouput from the processing pipelines like SIMA, CaImAn, Suite2p, CNNM-E.
* **SegmentationExtractor:** An abstract class that contains all the meta-data and output data from the ROI segmentation operation when applied to the pre-processed data. It also contains methods to read from and write to various data formats ouput from the processing pipelines like SIMA, CaImAn, Suite2p, CNMF-E.

* **NumpySegmentationExtractor:** Contains all data coming from a file format for which there is currently no support. To construct this, all data must be entered manually as arguments.

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,8 @@ def __init__(self, file_path: PathType):
self._roi_response_dff = self._trace_extractor_read("F_dff")
self._roi_response_neuropil = self._trace_extractor_read("C")
self._roi_response_deconvolved = self._trace_extractor_read("S")
self._image_correlation = self._summary_image_read()
self._image_correlation = self._correlation_image_read()
self._image_mean = self._summary_image_read()
self._sampling_frequency = self._dataset_file["params"]["data"]["fr"][()]
self._image_masks = self._image_mask_sparse_read()

Expand All @@ -75,10 +76,16 @@ def _trace_extractor_read(self, field):
if field in self._dataset_file["estimates"]:
return lazy_ops.DatasetView(self._dataset_file["estimates"][field]).lazy_transpose()

def _summary_image_read(self):
def _correlation_image_read(self):
if self._dataset_file["estimates"].get("Cn"):
return np.array(self._dataset_file["estimates"]["Cn"])

def _summary_image_read(self):
if self._dataset_file["estimates"].get("b"):
FOV_shape = self._dataset_file["params"]["data"]["dims"][()]
b_sum = self._dataset_file["estimates"]["b"][:].sum(axis=1)
return np.array(b_sum).reshape(FOV_shape, order="F")
CodyCBakerPhD marked this conversation as resolved.
Show resolved Hide resolved

def get_accepted_list(self):
accepted = self._dataset_file["estimates"]["idx_components"]
if len(accepted.shape) == 0:
Expand Down
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
import shutil
from pathlib import Path
from typing import Optional
import warnings
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
import warnings

No need for warning either if the rest is cleaned up


import numpy as np

Expand All @@ -22,6 +23,9 @@ def __init__(
folder_path: Optional[PathType] = None,
combined: bool = False,
plane_no: IntType = 0,
search_plane_subdirectory: bool = True,
allow_incomplete_import: bool = False,
warn_missing_files: bool = True,
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
allow_incomplete_import: bool = False,
warn_missing_files: bool = True,

With cleanup below, optional warning no longer needed - incomplete import always enabled

file_path: Optional[PathType] = None,
):
"""
Expand All @@ -34,6 +38,14 @@ def __init__(
if the plane is a combined plane as in the Suite2p pipeline
plane_no: int
the plane for which to extract segmentation for.
search_plane_subdirectory: bool
If True, will search for files in 'folder_path/plane{plane_no}',
else will search for files in 'folder_path'.
CodyCBakerPhD marked this conversation as resolved.
Show resolved Hide resolved
allow_incomplete_import: bool
If True, will not raise an error if the file is incomplete.
warn_missing_files: bool
If True, will raise a warning if a file is incomplete and
allow_incomplete_import is True.
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
allow_incomplete_import: bool
If True, will not raise an error if the file is incomplete.
warn_missing_files: bool
If True, will raise a warning if a file is incomplete and
allow_incomplete_import is True.

With cleanup below, optional warning no longer needed - incomplete import always enabled

file_path: str or Path [Deprecated]
~/suite2p folder location on disk

Expand All @@ -56,12 +68,27 @@ def __init__(
self.plane_no = plane_no
self.folder_path = Path(folder_path)

self.stat = self._load_npy("stat.npy")
self._roi_response_raw = self._load_npy("F.npy", mmap_mode="r").T
self._roi_response_neuropil = self._load_npy("Fneu.npy", mmap_mode="r").T
self._roi_response_deconvolved = self._load_npy("spks.npy", mmap_mode="r").T
self.iscell = self._load_npy("iscell.npy", mmap_mode="r")
self.ops = self._load_npy("ops.npy").item()
self._search_plane_subdirectory = search_plane_subdirectory

def try_load_npy(filename, mmap_mode=None, fn_transform=lambda x: x):
"""
This function allows for incomplete import of files.
"""
try:
return fn_transform(self._load_npy(filename, mmap_mode=mmap_mode))
except FileNotFoundError:
if allow_incomplete_import:
warnings.warn(f"File {filename} not found.") if warn_missing_files else None
return None
else:
raise FileNotFoundError(f"File {filename} not found.")
CodyCBakerPhD marked this conversation as resolved.
Show resolved Hide resolved

self.stat = try_load_npy("stat.npy")
self._roi_response_raw = try_load_npy("F.npy", mmap_mode="r", fn_transform=lambda x: x.T)
self._roi_response_neuropil = try_load_npy("Fneu.npy", mmap_mode="r", fn_transform=lambda x: x.T)
self._roi_response_deconvolved = try_load_npy("spks.npy", mmap_mode="r", fn_transform=lambda x: x.T)
self.iscell = try_load_npy("iscell.npy", mmap_mode="r")
self.ops = try_load_npy("ops.npy", fn_transform=lambda x: x.item())

self._channel_names = [f"OpticalChannel{i}" for i in range(self.ops["nchannels"])]
self._sampling_frequency = self.ops["fs"] * [2 if self.combined else 1][0]
Expand All @@ -70,7 +97,11 @@ def __init__(
self._image_mean = self._summary_image_read("meanImg")

def _load_npy(self, filename, mmap_mode=None):
RichieHakim marked this conversation as resolved.
Show resolved Hide resolved
file_path = self.folder_path / f"plane{self.plane_no}" / filename
if self._search_plane_subdirectory:
file_path = self.folder_path / f"plane{self.plane_no}" / filename
else:
file_path = self.folder_path / filename
assert file_path.exists(), f"File {file_path} does not exist, but is required for this extractor."
return np.load(file_path, mmap_mode=mmap_mode, allow_pickle=mmap_mode is None)
RichieHakim marked this conversation as resolved.
Show resolved Hide resolved

def get_accepted_list(self):
Expand Down Expand Up @@ -115,7 +146,7 @@ def get_roi_pixel_masks(self, roi_ids=None):
pixel_mask.append(
np.vstack(
[
self.ops["Ly"] - 1 - self.stat[i]["ypix"],
self.stat[i]["ypix"],
self.stat[i]["xpix"],
self.stat[i]["lam"],
]
Expand Down Expand Up @@ -173,7 +204,7 @@ def write_segmentation(segmentation_object: SegmentationExtractor, save_path: Pa
for no, i in enumerate(stat):
stat[no] = {
"med": roi_locs[no, :].tolist(),
"ypix": segmentation_object.get_image_size()[0] - 1 - pixel_masks[no][:, 0],
"ypix": pixel_masks[no][:, 0],
"xpix": pixel_masks[no][:, 1],
"lam": pixel_masks[no][:, 2],
}
Expand Down