Skip to content

Commit

Permalink
Using ruff rule to enforce the existence of docstrings in public func…
Browse files Browse the repository at this point in the history
…tions (#1062)

Co-authored-by: Paul Adkisson <[email protected]>
  • Loading branch information
h-mayorquin and pauladkisson authored Sep 10, 2024
1 parent d35b364 commit ab37a4e
Show file tree
Hide file tree
Showing 9 changed files with 163 additions and 52 deletions.
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
### Improvements
* Using ruff to enforce existence of public classes' docstrings [PR #1034](https://github.com/catalystneuro/neuroconv/pull/1034)
* Separated tests that use external data by modality [PR #1049](https://github.com/catalystneuro/neuroconv/pull/1049)
* Using ruff to enforce existence of public functions's docstrings [PR #1062](https://github.com/catalystneuro/neuroconv/pull/1062)
* Improved device metadata of `IntanRecordingInterface` by adding the type of controller used [PR #1059](https://github.com/catalystneuro/neuroconv/pull/1059)


Expand Down
32 changes: 17 additions & 15 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -8,15 +8,15 @@ version = "0.6.2"
description = "Convert data from proprietary formats to NWB format."
readme = "README.md"
authors = [
{name = "Cody Baker"},
{name = "Szonja Weigl"},
{name = "Heberto Mayorquin"},
{name = "Paul Adkisson"},
{name = "Luiz Tauffer"},
{name = "Ben Dichter", email = "[email protected]"}
{ name = "Cody Baker" },
{ name = "Szonja Weigl" },
{ name = "Heberto Mayorquin" },
{ name = "Paul Adkisson" },
{ name = "Luiz Tauffer" },
{ name = "Ben Dichter", email = "[email protected]" },
]
urls = { "Homepage" = "https://github.com/catalystneuro/neuroconv" }
license = {file = "license.txt"}
license = { file = "license.txt" }
keywords = ["nwb"]
classifiers = [
"Intended Audience :: Science/Research",
Expand Down Expand Up @@ -91,14 +91,10 @@ neuroconv = "neuroconv.tools.yaml_conversion_specification._yaml_conversion_spec
[tool.pytest.ini_options]
minversion = "6.0"
addopts = "-ra --doctest-glob='*.rst'"
testpaths = [
"docs/conversion_examples_gallery/",
"tests"
]
testpaths = ["docs/conversion_examples_gallery/", "tests"]
doctest_optionflags = "ELLIPSIS"



[tool.black]
line-length = 120
target-version = ['py38', 'py39', 'py310']
Expand All @@ -121,17 +117,23 @@ extend-exclude = '''
'''



[tool.ruff]

[tool.ruff.lint]
select = ["F401", "I", "D101"] # TODO: eventually, expand to other 'F' linting
select = [
"F401", # Unused import
"I", # All isort rules
"D101", # Missing docstring in public class
"D103", # Missing docstring in public function
]
fixable = ["ALL"]

[tool.ruff.lint.per-file-ignores]
"**__init__.py" = ["F401", "I"]
"tests/**" = ["D"] # We are not enforcing docstrings in tests
"tests/**" = ["D"] # We are not enforcing docstrings in tests
"src/neuroconv/tools/testing/data_interface_mixins.py" = ["D"] # We are not enforcing docstrings in the interface mixings
"docs/conf.py" = ["D"] # We are not enforcing docstrings in the conf.py file
"docs/conversion_examples_gallery/conftest.py" = ["D"] # We are not enforcing docstrings in the conversion examples

[tool.ruff.lint.isort]
relative-imports-order = "closest-to-furthest"
Expand Down
1 change: 1 addition & 0 deletions setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@


def read_requirements(file):
"""Read requirements from a file."""
with open(root / file) as f:
return f.readlines()

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@

from pydantic import FilePath

from .header_tools import parse_nev_basic_header, parse_nsx_basic_header
from .header_tools import _parse_nev_basic_header, _parse_nsx_basic_header
from ..baserecordingextractorinterface import BaseRecordingExtractorInterface
from ..basesortingextractorinterface import BaseSortingExtractorInterface
from ....utils import get_schema_from_method_signature
Expand Down Expand Up @@ -60,7 +60,7 @@ def __init__(
def get_metadata(self) -> dict:
metadata = super().get_metadata()
# Open file and extract headers
basic_header = parse_nsx_basic_header(self.source_data["file_path"])
basic_header = _parse_nsx_basic_header(self.source_data["file_path"])
if "TimeOrigin" in basic_header:
metadata["NWBFile"].update(session_start_time=basic_header["TimeOrigin"])
if "Comment" in basic_header:
Expand Down Expand Up @@ -101,7 +101,7 @@ def __init__(self, file_path: FilePath, sampling_frequency: float = None, verbos
def get_metadata(self) -> dict:
metadata = super().get_metadata()
# Open file and extract headers
basic_header = parse_nev_basic_header(self.source_data["file_path"])
basic_header = _parse_nev_basic_header(self.source_data["file_path"])
if "TimeOrigin" in basic_header:
session_start_time = basic_header["TimeOrigin"]
metadata["NWBFile"].update(session_start_time=session_start_time.strftime("%Y-%m-%dT%H:%M:%S"))
Expand Down
56 changes: 28 additions & 28 deletions src/neuroconv/datainterfaces/ecephys/blackrock/header_tools.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
from struct import calcsize, unpack


def processheaders(curr_file, packet_fields):
def _processheaders(curr_file, packet_fields):
"""
:param curr_file: {file} the current BR datafile to be processed
:param packet_fields : {named tuple} the specific binary fields for the given header
Expand Down Expand Up @@ -45,11 +45,11 @@ def processheaders(curr_file, packet_fields):
return packet_formatted


def format_filespec(header_list):
def _format_filespec(header_list):
return str(next(header_list)) + "." + str(next(header_list)) # eg 2.3


def format_timeorigin(header_list):
def _format_timeorigin(header_list):
year = next(header_list)
month = next(header_list)
_ = next(header_list)
Expand All @@ -61,51 +61,51 @@ def format_timeorigin(header_list):
return datetime(year, month, day, hour, minute, second, millisecond * 1000)


def format_stripstring(header_list):
def _format_stripstring(header_list):
string = bytes.decode(next(header_list), "latin-1")
return string.split(STRING_TERMINUS, 1)[0]


def format_none(header_list):
def _format_none(header_list):
return next(header_list)


FieldDef = namedtuple("FieldDef", ["name", "formatStr", "formatFnc"])
STRING_TERMINUS = "\x00"


def parse_nsx_basic_header(nsx_file):
def _parse_nsx_basic_header(nsx_file):
nsx_basic_dict = [
FieldDef("FileSpec", "2B", format_filespec), # 2 bytes - 2 unsigned char
FieldDef("BytesInHeader", "I", format_none), # 4 bytes - uint32
FieldDef("Label", "16s", format_stripstring), # 16 bytes - 16 char array
FieldDef("Comment", "256s", format_stripstring), # 256 bytes - 256 char array
FieldDef("Period", "I", format_none), # 4 bytes - uint32
FieldDef("TimeStampResolution", "I", format_none), # 4 bytes - uint32
FieldDef("TimeOrigin", "8H", format_timeorigin), # 16 bytes - 8 uint16
FieldDef("ChannelCount", "I", format_none),
FieldDef("FileSpec", "2B", _format_filespec), # 2 bytes - 2 unsigned char
FieldDef("BytesInHeader", "I", _format_none), # 4 bytes - uint32
FieldDef("Label", "16s", _format_stripstring), # 16 bytes - 16 char array
FieldDef("Comment", "256s", _format_stripstring), # 256 bytes - 256 char array
FieldDef("Period", "I", _format_none), # 4 bytes - uint32
FieldDef("TimeStampResolution", "I", _format_none), # 4 bytes - uint32
FieldDef("TimeOrigin", "8H", _format_timeorigin), # 16 bytes - 8 uint16
FieldDef("ChannelCount", "I", _format_none),
] # 4 bytes - uint32
datafile = open(nsx_file, "rb")
filetype_id = bytes.decode(datafile.read(8), "latin-1")
if filetype_id == "NEURALSG":
# this won't contain fields that can be added to NWBFile metadata
return dict()
return processheaders(datafile, nsx_basic_dict)
return _processheaders(datafile, nsx_basic_dict)


def parse_nev_basic_header(nev_file):
def _parse_nev_basic_header(nev_file):
nev_basic_dict = [
FieldDef("FileTypeID", "8s", format_stripstring), # 8 bytes - 8 char array
FieldDef("FileSpec", "2B", format_filespec), # 2 bytes - 2 unsigned char
FieldDef("AddFlags", "H", format_none), # 2 bytes - uint16
FieldDef("BytesInHeader", "I", format_none), # 4 bytes - uint32
FieldDef("BytesInDataPackets", "I", format_none), # 4 bytes - uint32
FieldDef("TimeStampResolution", "I", format_none), # 4 bytes - uint32
FieldDef("SampleTimeResolution", "I", format_none), # 4 bytes - uint32
FieldDef("TimeOrigin", "8H", format_timeorigin), # 16 bytes - 8 x uint16
FieldDef("CreatingApplication", "32s", format_stripstring), # 32 bytes - 32 char array
FieldDef("Comment", "256s", format_stripstring), # 256 bytes - 256 char array
FieldDef("NumExtendedHeaders", "I", format_none),
FieldDef("FileTypeID", "8s", _format_stripstring), # 8 bytes - 8 char array
FieldDef("FileSpec", "2B", _format_filespec), # 2 bytes - 2 unsigned char
FieldDef("AddFlags", "H", _format_none), # 2 bytes - uint16
FieldDef("BytesInHeader", "I", _format_none), # 4 bytes - uint32
FieldDef("BytesInDataPackets", "I", _format_none), # 4 bytes - uint32
FieldDef("TimeStampResolution", "I", _format_none), # 4 bytes - uint32
FieldDef("SampleTimeResolution", "I", _format_none), # 4 bytes - uint32
FieldDef("TimeOrigin", "8H", _format_timeorigin), # 16 bytes - 8 x uint16
FieldDef("CreatingApplication", "32s", _format_stripstring), # 32 bytes - 32 char array
FieldDef("Comment", "256s", _format_stripstring), # 256 bytes - 256 char array
FieldDef("NumExtendedHeaders", "I", _format_none),
]
datafile = open(nev_file, "rb")
return processheaders(datafile, nev_basic_dict)
return _processheaders(datafile, nev_basic_dict)
94 changes: 91 additions & 3 deletions src/neuroconv/tools/roiextractors/roiextractors.py
Original file line number Diff line number Diff line change
Expand Up @@ -682,9 +682,38 @@ def add_imaging_to_nwbfile(
iterator_type: Optional[str] = "v2",
iterator_options: Optional[dict] = None,
parent_container: Literal["acquisition", "processing/ophys"] = "acquisition",
):
) -> NWBFile:
"""
Add imaging data from an ImagingExtractor object to an NWBFile.
Parameters
----------
imaging : ImagingExtractor
The extractor object containing the imaging data.
nwbfile : NWBFile
The NWB file where the imaging data will be added.
metadata : dict, optional
Metadata for the NWBFile, by default None.
photon_series_type : {"TwoPhotonSeries", "OnePhotonSeries"}, optional
The type of photon series to be added, by default "TwoPhotonSeries".
photon_series_index : int, optional
The index of the photon series in the provided imaging data, by default 0.
iterator_type : str, optional
The type of iterator to use for adding the data. Commonly used to manage large datasets, by default "v2".
iterator_options : dict, optional
Additional options for controlling the iteration process, by default None.
parent_container : {"acquisition", "processing/ophys"}, optional
Specifies the parent container to which the photon series should be added, either as part of "acquisition" or
under the "processing/ophys" module, by default "acquisition".
Returns
-------
NWBFile
The NWB file with the imaging data added
"""
add_devices_to_nwbfile(nwbfile=nwbfile, metadata=metadata)
add_photon_series_to_nwbfile(
nwbfile = add_photon_series_to_nwbfile(
imaging=imaging,
nwbfile=nwbfile,
metadata=metadata,
Expand All @@ -695,6 +724,8 @@ def add_imaging_to_nwbfile(
parent_container=parent_container,
)

return nwbfile


def write_imaging(
imaging: ImagingExtractor,
Expand Down Expand Up @@ -1158,8 +1189,31 @@ def add_background_plane_segmentation_to_nwbfile(
iterator_options: Optional[dict] = None,
compression_options: Optional[dict] = None, # TODO: remove completely after 10/1/2024
) -> NWBFile:
# TODO needs docstring
"""
Add background plane segmentation data from a SegmentationExtractor object to an NWBFile.
Parameters
----------
segmentation_extractor : SegmentationExtractor
The extractor object containing background segmentation data.
nwbfile : NWBFile
The NWB file to which the background plane segmentation will be added.
metadata : dict, optional
Metadata for the NWBFile, by default None.
background_plane_segmentation_name : str, optional
The name of the background PlaneSegmentation object to be added, by default None.
mask_type : str, optional
Type of mask to use for segmentation; options are "image", "pixel", or "voxel", by default "image".
iterator_options : dict, optional
Options for iterating over the segmentation data, by default None.
compression_options : dict, optional
Deprecated: options for compression; will be removed after 2024-10-01, by default None.
Returns
-------
NWBFile
The NWBFile with the added background plane segmentation data.
"""
# TODO: remove completely after 10/1/2024
if compression_options is not None:
warnings.warn(
Expand Down Expand Up @@ -1724,6 +1778,40 @@ def add_segmentation_to_nwbfile(
iterator_options: Optional[dict] = None,
compression_options: Optional[dict] = None, # TODO: remove completely after 10/1/2024
) -> NWBFile:
"""
Add segmentation data from a SegmentationExtractor object to an NWBFile.
Parameters
----------
segmentation_extractor : SegmentationExtractor
The extractor object containing segmentation data.
nwbfile : NWBFile
The NWB file where the segmentation data will be added.
metadata : dict, optional
Metadata for the NWBFile, by default None.
plane_segmentation_name : str, optional
The name of the PlaneSegmentation object to be added, by default None.
background_plane_segmentation_name : str, optional
The name of the background PlaneSegmentation, if any, by default None.
include_background_segmentation : bool, optional
If True, includes background plane segmentation, by default False.
include_roi_centroids : bool, optional
If True, includes the centroids of the regions of interest (ROIs), by default True.
include_roi_acceptance : bool, optional
If True, includes the acceptance status of ROIs, by default True.
mask_type : str, optional
Type of mask to use for segmentation; can be either "image" or "pixel", by default "image".
iterator_options : dict, optional
Options for iterating over the data, by default None.
compression_options : dict, optional
Deprecated: options for compression; will be removed after 2024-10-01, by default None.
Returns
-------
NWBFile
The NWBFile with the added segmentation data.
"""

# TODO: remove completely after 10/1/2024
if compression_options is not None:
warnings.warn(
Expand Down
5 changes: 3 additions & 2 deletions src/neuroconv/tools/spikeinterface/spikeinterface.py
Original file line number Diff line number Diff line change
Expand Up @@ -1992,7 +1992,7 @@ def add_sorting_analyzer_to_nwbfile(
sorting_copy.set_property(prop, tm[prop])

add_electrodes_info_to_nwbfile(recording, nwbfile=nwbfile, metadata=metadata)
electrode_group_indices = get_electrode_group_indices(recording, nwbfile=nwbfile)
electrode_group_indices = _get_electrode_group_indices(recording, nwbfile=nwbfile)
unit_electrode_indices = [electrode_group_indices] * len(sorting.unit_ids)

add_units_table_to_nwbfile(
Expand Down Expand Up @@ -2214,7 +2214,8 @@ def add_waveforms(
)


def get_electrode_group_indices(recording, nwbfile):
def _get_electrode_group_indices(recording, nwbfile):
""" """
if "group_name" in recording.get_property_keys():
group_names = list(np.unique(recording.get_property("group_name")))
elif "group" in recording.get_property_keys():
Expand Down
11 changes: 11 additions & 0 deletions src/neuroconv/tools/testing/mock_probes.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,17 @@


def generate_mock_probe(num_channels: int, num_shanks: int = 3):
"""
Generate a mock probe with specified number of channels and shanks.
Parameters:
num_channels (int): The number of channels in the probe.
num_shanks (int, optional): The number of shanks in the probe. Defaults to 3.
Returns:
pi.Probe: The generated mock probe.
"""
import probeinterface as pi

# The shank ids will be 0, 0, 0, ..., 1, 1, 1, ..., 2, 2, 2, ...
Expand Down
9 changes: 8 additions & 1 deletion src/neuroconv/utils/json_schema.py
Original file line number Diff line number Diff line change
Expand Up @@ -298,7 +298,14 @@ def get_schema_from_hdmf_class(hdmf_class):
return schema


def get_metadata_schema_for_icephys():
def get_metadata_schema_for_icephys() -> dict:
"""
Returns the metadata schema for icephys data.
Returns:
dict: The metadata schema for icephys data.
"""
schema = get_base_schema(tag="Icephys")
schema["required"] = ["Device", "Electrodes"]
schema["properties"] = dict(
Expand Down

0 comments on commit ab37a4e

Please sign in to comment.