From d577ede22e3e5f8c03589f5d20b1d3e5b193afde Mon Sep 17 00:00:00 2001 From: grg2rsr Date: Thu, 26 Sep 2024 10:08:43 +0100 Subject: [PATCH 1/2] _consistency_checks.py dev --- src/ibl_to_nwb/testing/_consistency_checks.py | 439 ++++++++---------- 1 file changed, 198 insertions(+), 241 deletions(-) diff --git a/src/ibl_to_nwb/testing/_consistency_checks.py b/src/ibl_to_nwb/testing/_consistency_checks.py index 490ac00..3b6ad52 100644 --- a/src/ibl_to_nwb/testing/_consistency_checks.py +++ b/src/ibl_to_nwb/testing/_consistency_checks.py @@ -1,10 +1,10 @@ from pathlib import Path -from numpy.testing import assert_array_equal +from numpy.testing import assert_array_equal, assert_array_less from one.api import ONE from pandas.testing import assert_frame_equal from pynwb import NWBHDF5IO, NWBFile - +import numpy as np def check_written_nwbfile_for_consistency(*, one: ONE, nwbfile_path: Path): """ @@ -25,7 +25,9 @@ def check_written_nwbfile_for_consistency(*, one: ONE, nwbfile_path: Path): # TODO: fill in the rest of the routed calls -def _check_wheel_data(*, eid: str, one: ONE, nwbfile: NWBFile): +def _check_wheel_data( + *, eid: str, one: ONE, nwbfile: NWBFile, revision: str = None +): processing_module = nwbfile.processing["behavior"] wheel_position_series = processing_module.data_interfaces["CompassDirection"].spatial_series["WheelPositionSeries"] wheel_movement_table = nwbfile.processing["behavior"].data_interfaces["WheelMovementIntervals"][:] @@ -51,241 +53,196 @@ def _check_wheel_data(*, eid: str, one: ONE, nwbfile: NWBFile): assert_array_equal(x=data_from_ONE, y=data_from_NWB) -# def _check_lick_data(*, eid: str, one: ONE, nwbfile: NWBFile): -# """read-after-write test for the datainterface `LickInterface`. -# TODO DOCME -# Args: -# nwbfile (NWBFile): nwbfile object. -# one (ONE): ONE object. -# eid (str): experiment uuid / equivalent to session_id -# """ -# table = nwbfile.processing["behavior"].data_interfaces["LickTimes"][:] -# data_nwb = table["lick_time"].values -# data_one = one.load_dataset(eid, "licks.times") -# check_arrays(data_nwb, data_one, full_check=full_check) -# -# -# def test_RoiMotionEnergyInterface( -# nwbfile: NWBFile, one: ONE, eid: str, full_check: bool = False, verbose: bool = False, revision: str = None -# ): -# """read-after-write test for the datainterface `RoiMotionEnergyInterface`. -# TODO DOCME -# Args: -# nwbfile (NWBFile): nwbfile object. -# one (ONE): ONE object. -# eid (str): experiment uuid / equivalent to session_id -# """ -# camera_views = ["body", "left", "right"] -# -# for view in camera_views: -# # data -# data_nwb = nwbfile.processing["behavior"].data_interfaces["%sCameraMotionEnergy" % view.capitalize()].data[:] -# data_one = one.load_dataset(eid, "%sCamera.ROIMotionEnergy" % view, collection="alf") -# check_arrays(data_nwb, data_one, full_check=full_check) -# -# # timestamps -# data_nwb = ( -# nwbfile.processing["behavior"].data_interfaces["%sCameraMotionEnergy" % view.capitalize()].timestamps[:] -# ) -# data_one = one.load_dataset(eid, "_ibl_%sCamera.times" % view, collection="alf") -# check_arrays(data_nwb, data_one, full_check=full_check) -# -# -# def test_IblPoseEstimationInterface( -# nwbfile: NWBFile, one: ONE, eid: str, full_check: bool = False, verbose: bool = False, revision: str = None -# ): -# """read-after-write test for the datainterface `IblPoseEstimationInterface`. -# TODO DOCME -# Args: -# nwbfile (NWBFile): nwbfile object. -# one (ONE): ONE object. -# eid (str): experiment uuid / equivalent to session_id -# """ -# -# camera_views = ["body", "left", "right"] -# -# for view in camera_views: -# nodes = nwbfile.processing["behavior"].data_interfaces["PoseEstimation%sCamera" % view.capitalize()].nodes[:] -# -# for node in nodes: -# # x -# data_nwb = ( -# nwbfile.processing["behavior"] -# .data_interfaces["PoseEstimation%sCamera" % view.capitalize()] -# .pose_estimation_series[node] -# .data[:][:, 0] -# ) -# data_one = one.load_dataset(eid, "_ibl_%sCamera.dlc.pqt" % view, collection="alf")["%s_x" % node].values -# check_arrays(data_nwb, data_one, full_check=full_check) -# -# # y -# data_nwb = ( -# nwbfile.processing["behavior"] -# .data_interfaces["PoseEstimation%sCamera" % view.capitalize()] -# .pose_estimation_series[node] -# .data[:][:, 1] -# ) -# data_one = one.load_dataset(eid, "_ibl_%sCamera.dlc.pqt" % view, collection="alf")["%s_y" % node].values -# check_arrays(data_nwb, data_one, full_check=full_check) -# -# # confidence -# data_nwb = ( -# nwbfile.processing["behavior"] -# .data_interfaces["PoseEstimation%sCamera" % view.capitalize()] -# .pose_estimation_series[node] -# .confidence[:] -# ) -# data_one = one.load_dataset(eid, "_ibl_%sCamera.dlc.pqt" % view, collection="alf")[ -# "%s_likelihood" % node -# ].values -# check_arrays(data_nwb, data_one, full_check=full_check) -# -# # timestamps -# data_nwb = ( -# nwbfile.processing["behavior"] -# .data_interfaces["PoseEstimation%sCamera" % view.capitalize()] -# .pose_estimation_series[node] -# .timestamps[:] -# ) -# data_one = one.load_dataset(eid, "_ibl_%sCamera.times" % view, collection="alf") -# check_arrays(data_nwb, data_one, full_check=full_check) -# -# -# def test_BrainwideMapTrialsInterface( -# nwbfile: NWBFile, one: ONE, eid: str, full_check: bool = False, verbose: bool = False, revision: str = None -# ): -# """read-after-write test for the datainterface `BrainwideMapTrialsInterface`. -# TODO DOCME -# Args: -# nwbfile (NWBFile): nwbfile object. -# one (ONE): ONE object. -# eid (str): experiment uuid / equivalent to session_id -# """ -# -# data_nwb = nwbfile.trials[:] -# data_one = one.load_dataset(eid, "_ibl_trials.table", collection="alf") -# -# naming_map = { -# "start_time": "intervals_0", -# "stop_time": "intervals_1", -# "choice": "choice", -# "feedback_type": "feedbackType", -# "reward_volume": "rewardVolume", -# "contrast_left": "contrastLeft", -# "contrast_right": "contrastRight", -# "probability_left": "probabilityLeft", -# "feedback_time": "feedback_times", -# "response_time": "response_times", -# # 'stim_off_time': '', -# "stim_on_time": "stimOn_times", -# "go_cue_time": "goCue_times", -# "first_movement_time": "firstMovement_times", -# } -# naming_map = {v: k for k, v in naming_map.items()} -# -# check_tables(data_one, data_nwb, naming_map=naming_map) -# -# -# def test_PupilTrackingInterface( -# nwbfile: NWBFile, one: ONE, eid: str, full_check: bool = False, verbose: bool = False, revision: str = None -# ): -# """read-after-write test for the datainterface `PupilTrackingInterface`. -# TODO DOCME -# Args: -# nwbfile (NWBFile): nwbfile object. -# one (ONE): ONE object. -# eid (str): experiment uuid / equivalent to session_id -# """ -# -# camera_views = ["left", "right"] -# for view in camera_views: -# # raw -# data_nwb = ( -# nwbfile.processing["behavior"] -# .data_interfaces["%sPupilTracking" % view.capitalize()] -# .time_series["%sRawPupilDiameter" % view.capitalize()] -# .data[:] -# ) -# data_one = one.load_dataset(eid, "_ibl_%sCamera.features.pqt" % view, collection="alf")[ -# "pupilDiameter_raw" -# ].values -# -# check_arrays(data_nwb, data_one, full_check=full_check) -# -# # smooth -# data_nwb = ( -# nwbfile.processing["behavior"] -# .data_interfaces["%sPupilTracking" % view.capitalize()] -# .time_series["%sSmoothedPupilDiameter" % view.capitalize()] -# .data[:] -# ) -# data_one = one.load_dataset(eid, "_ibl_%sCamera.features.pqt" % view, collection="alf")[ -# "pupilDiameter_smooth" -# ].values -# -# check_arrays(data_nwb, data_one, full_check=full_check) -# -# -# def test_IblSortingInterface( -# nwbfile: NWBFile, one: ONE, eid: str, full_check: bool = False, verbose: bool = False, revision: str = None -# ): -# """_summary_ -# Args: -# nwbfile (_type_): _description_ -# one (_type_): _description_ -# eid (_type_): _description_ -# full_check (bool, optional): _description_. Defaults to False. -# revision (_type_, optional): _description_. Defaults to None. -# Returns: -# _type_: _description_ -# """ -# -# units_table = nwbfile.units[:] -# probe_names = units_table["probe_name"].unique() -# -# if full_check: -# inds = units_table.index -# else: -# inds = units_table.sample(20).index -# -# spike_times = {} -# spike_clusters = {} -# cluster_uuids = {} -# -# # for fast spike extraction -# def get_spikes_for_cluster(spike_clusters, spike_times, cluster): -# # requires that spike_times and spike_clusters are sorted -# start_ix, stop_ix = np.searchsorted(spike_clusters, [cluster, cluster + 1]) -# return np.sort(spike_times[start_ix:stop_ix]) -# -# # get and prep data once -# for probe_name in probe_names: -# -# # include revision TODO FIXME this will likely change - check back in with Miles -# if revision is not None: -# collection = f"alf/{probe_name}/pykilosort/{revision}" -# else: -# collection = f"alf/{probe_name}/pykilosort" -# -# spike_times[probe_name] = one.load_dataset(eid, "spikes.times", collection=collection) -# spike_clusters[probe_name] = one.load_dataset(eid, "spikes.clusters", collection=collection) -# cluster_uuids[probe_name] = one.load_dataset(eid, "clusters.uuids", collection=collection) -# -# # pre-sort for fast access -# sort_ix = np.argsort(spike_clusters[probe_name]) -# spike_clusters[probe_name] = spike_clusters[probe_name][sort_ix] -# spike_times[probe_name] = spike_times[probe_name][sort_ix] -# -# for ix in inds: -# probe_name = units_table.loc[ix, "probe_name"] -# uuid = units_table.loc[ix, "uuid"] -# nwb_spike_times = units_table.loc[ix, "spike_times"] -# -# cluster_id = np.where(cluster_uuids[probe_name] == uuid)[0][0] -# one_spike_times = get_spikes_for_cluster(spike_clusters[probe_name], spike_times[probe_name], cluster_id) -# -# # more verbose but slower for more than ~20 checks -# # one_spike_times = spike_times[probe_name][spike_clusters[probe_name] == cluster_id] -# -# # testing -# testing.assert_array_less(np.max((one_spike_times - nwb_spike_times) * 30000), 1) +def _check_lick_data(*, eid: str, one: ONE, nwbfile: NWBFile): + table = nwbfile.processing["behavior"].data_interfaces["LickTimes"][:] + data_from_NWB = table["lick_time"].values + data_from_ONE = one.load_dataset(eid, "licks.times") + assert_array_equal(x=data_from_ONE, y=data_from_NWB) + + +def _check_RoiMotionEnergyInterface( + *, eid: str, one: ONE, nwbfile: NWBFile, revision: str = None +): + + camera_views = ["body", "left", "right"] + + for view in camera_views: + # data + data_from_NWB = nwbfile.processing["behavior"].data_interfaces["%sCameraMotionEnergy" % view.capitalize()].data[:] + data_from_ONE = one.load_dataset(eid, "%sCamera.ROIMotionEnergy" % view, collection="alf") + assert_array_equal(x=data_from_ONE, y=data_from_NWB) + + # timestamps + data_from_NWB = ( + nwbfile.processing["behavior"].data_interfaces["%sCameraMotionEnergy" % view.capitalize()].timestamps[:] + ) + data_from_ONE = one.load_dataset(eid, "_ibl_%sCamera.times" % view, collection="alf") + assert_array_equal(x=data_from_ONE, y=data_from_NWB) + + +def _check_IblPoseEstimationInterface( + *, eid: str, one: ONE, nwbfile: NWBFile, revision: str = None +): + + camera_views = ["body", "left", "right"] + + for view in camera_views: + nodes = nwbfile.processing["behavior"].data_interfaces["PoseEstimation%sCamera" % view.capitalize()].nodes[:] + + for node in nodes: + # x + data_from_NWB = ( + nwbfile.processing["behavior"] + .data_interfaces["PoseEstimation%sCamera" % view.capitalize()] + .pose_estimation_series[node] + .data[:][:, 0] + ) + data_from_ONE = one.load_dataset(eid, "_ibl_%sCamera.dlc.pqt" % view, collection="alf")["%s_x" % node].values + assert_array_equal(x=data_from_ONE, y=data_from_NWB) + + # y + data_from_NWB = ( + nwbfile.processing["behavior"] + .data_interfaces["PoseEstimation%sCamera" % view.capitalize()] + .pose_estimation_series[node] + .data[:][:, 1] + ) + data_from_ONE = one.load_dataset(eid, "_ibl_%sCamera.dlc.pqt" % view, collection="alf")["%s_y" % node].values + assert_array_equal(x=data_from_ONE, y=data_from_NWB) + + # confidence + data_from_NWB = ( + nwbfile.processing["behavior"] + .data_interfaces["PoseEstimation%sCamera" % view.capitalize()] + .pose_estimation_series[node] + .confidence[:] + ) + data_from_ONE = one.load_dataset(eid, "_ibl_%sCamera.dlc.pqt" % view, collection="alf")[ + "%s_likelihood" % node + ].values + assert_array_equal(x=data_from_ONE, y=data_from_NWB) + + # timestamps + data_from_NWB = ( + nwbfile.processing["behavior"] + .data_interfaces["PoseEstimation%sCamera" % view.capitalize()] + .pose_estimation_series[node] + .timestamps[:] + ) + data_from_ONE = one.load_dataset(eid, "_ibl_%sCamera.times" % view, collection="alf") + assert_array_equal(x=data_from_ONE, y=data_from_NWB) + + +def _check_BrainwideMapTrialsInterface( + *, eid: str, one: ONE, nwbfile: NWBFile, revision: str = None +): + + data_from_NWB = nwbfile.trials[:] + data_from_ONE = one.load_dataset(eid, "_ibl_trials.table", collection="alf") + data_from_ONE['stimOff_times'] = one.load_dataset(eid, "_ibl_trials.stimOff_times", collection="alf") + data_from_ONE.index.name = 'id' + + naming_map = { + "start_time": "intervals_0", + "stop_time": "intervals_1", + "choice": "choice", + "feedback_type": "feedbackType", + "reward_volume": "rewardVolume", + "contrast_left": "contrastLeft", + "contrast_right": "contrastRight", + "probability_left": "probabilityLeft", + "feedback_time": "feedback_times", + "response_time": "response_times", + "stim_off_time": 'stimOff_times', + "stim_on_time": "stimOn_times", + "go_cue_time": "goCue_times", + "first_movement_time": "firstMovement_times", + } + + # reordering and renaming the columns + data_from_ONE = data_from_ONE[[naming_map[col] for col in data_from_NWB.columns]] + data_from_ONE.columns = naming_map.keys() + + assert_frame_equal(left=data_from_NWB, right=data_from_ONE) + +def _check_PupilTrackingInterface( + *, eid: str, one: ONE, nwbfile: NWBFile, revision: str = None +): + + camera_views = ["left", "right"] + for view in camera_views: + # raw + data_from_NWB = ( + nwbfile.processing["behavior"] + .data_interfaces["%sPupilTracking" % view.capitalize()] + .time_series["%sRawPupilDiameter" % view.capitalize()] + .data[:] + ) + data_from_ONE = one.load_dataset(eid, "_ibl_%sCamera.features.pqt" % view, collection="alf")[ + "pupilDiameter_raw" + ].values + + assert_array_equal(x=data_from_ONE, y=data_from_NWB) + + # smooth + data_from_NWB = ( + nwbfile.processing["behavior"] + .data_interfaces["%sPupilTracking" % view.capitalize()] + .time_series["%sSmoothedPupilDiameter" % view.capitalize()] + .data[:] + ) + data_from_ONE = one.load_dataset(eid, "_ibl_%sCamera.features.pqt" % view, collection="alf")[ + "pupilDiameter_smooth" + ].values + + assert_array_equal(x=data_from_ONE, y=data_from_NWB) + + +def _check_IblSortingInterface( + *, eid: str, one: ONE, nwbfile: NWBFile, revision: str = None +): + + units_table = nwbfile.units[:] + probe_names = units_table["probe_name"].unique() + + spike_times = {} + spike_clusters = {} + cluster_uuids = {} + + # for fast spike extraction + def get_spikes_for_cluster(spike_clusters, spike_times, cluster): + # requires that spike_times and spike_clusters are sorted + start_ix, stop_ix = np.searchsorted(spike_clusters, [cluster, cluster + 1]) + return np.sort(spike_times[start_ix:stop_ix]) + + # get and prep data once + for probe_name in probe_names: + + # include revision TODO FIXME this will likely change - check back in with Miles + if revision is not None: + collection = f"alf/{probe_name}/pykilosort/{revision}" + else: + collection = f"alf/{probe_name}/pykilosort" + + spike_times[probe_name] = one.load_dataset(eid, "spikes.times", collection=collection) + spike_clusters[probe_name] = one.load_dataset(eid, "spikes.clusters", collection=collection) + cluster_uuids[probe_name] = one.load_dataset(eid, "clusters.uuids", collection=collection) + + # pre-sort for fast access + sort_ix = np.argsort(spike_clusters[probe_name]) + spike_clusters[probe_name] = spike_clusters[probe_name][sort_ix] + spike_times[probe_name] = spike_times[probe_name][sort_ix] + + for ix in units_table.index: + probe_name = units_table.loc[ix, "probe_name"] + uuid = units_table.loc[ix, "uuid"] + spike_times_from_NWB = units_table.loc[ix, "spike_times"] + + cluster_id = np.where(cluster_uuids[probe_name] == uuid)[0][0] + spike_times_from_ONE = get_spikes_for_cluster(spike_clusters[probe_name], spike_times[probe_name], cluster_id) + + # more verbose but slower for more than ~20 checks + # spike_times_from_ONE = spike_times[probe_name][spike_clusters[probe_name] == cluster_id] + + # testing + assert_array_less(np.max((spike_times_from_ONE - spike_times_from_NWB) * 30000), 1) From 65f83df50d340e5078ba9b08763e7e8e6ec638a0 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Thu, 26 Sep 2024 09:27:10 +0000 Subject: [PATCH 2/2] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- src/ibl_to_nwb/testing/_consistency_checks.py | 46 +++++++++---------- 1 file changed, 21 insertions(+), 25 deletions(-) diff --git a/src/ibl_to_nwb/testing/_consistency_checks.py b/src/ibl_to_nwb/testing/_consistency_checks.py index 3b6ad52..a08e314 100644 --- a/src/ibl_to_nwb/testing/_consistency_checks.py +++ b/src/ibl_to_nwb/testing/_consistency_checks.py @@ -1,10 +1,11 @@ from pathlib import Path +import numpy as np from numpy.testing import assert_array_equal, assert_array_less from one.api import ONE from pandas.testing import assert_frame_equal from pynwb import NWBHDF5IO, NWBFile -import numpy as np + def check_written_nwbfile_for_consistency(*, one: ONE, nwbfile_path: Path): """ @@ -25,9 +26,7 @@ def check_written_nwbfile_for_consistency(*, one: ONE, nwbfile_path: Path): # TODO: fill in the rest of the routed calls -def _check_wheel_data( - *, eid: str, one: ONE, nwbfile: NWBFile, revision: str = None -): +def _check_wheel_data(*, eid: str, one: ONE, nwbfile: NWBFile, revision: str = None): processing_module = nwbfile.processing["behavior"] wheel_position_series = processing_module.data_interfaces["CompassDirection"].spatial_series["WheelPositionSeries"] wheel_movement_table = nwbfile.processing["behavior"].data_interfaces["WheelMovementIntervals"][:] @@ -60,15 +59,15 @@ def _check_lick_data(*, eid: str, one: ONE, nwbfile: NWBFile): assert_array_equal(x=data_from_ONE, y=data_from_NWB) -def _check_RoiMotionEnergyInterface( - *, eid: str, one: ONE, nwbfile: NWBFile, revision: str = None -): +def _check_RoiMotionEnergyInterface(*, eid: str, one: ONE, nwbfile: NWBFile, revision: str = None): camera_views = ["body", "left", "right"] for view in camera_views: # data - data_from_NWB = nwbfile.processing["behavior"].data_interfaces["%sCameraMotionEnergy" % view.capitalize()].data[:] + data_from_NWB = ( + nwbfile.processing["behavior"].data_interfaces["%sCameraMotionEnergy" % view.capitalize()].data[:] + ) data_from_ONE = one.load_dataset(eid, "%sCamera.ROIMotionEnergy" % view, collection="alf") assert_array_equal(x=data_from_ONE, y=data_from_NWB) @@ -80,9 +79,7 @@ def _check_RoiMotionEnergyInterface( assert_array_equal(x=data_from_ONE, y=data_from_NWB) -def _check_IblPoseEstimationInterface( - *, eid: str, one: ONE, nwbfile: NWBFile, revision: str = None -): +def _check_IblPoseEstimationInterface(*, eid: str, one: ONE, nwbfile: NWBFile, revision: str = None): camera_views = ["body", "left", "right"] @@ -97,7 +94,9 @@ def _check_IblPoseEstimationInterface( .pose_estimation_series[node] .data[:][:, 0] ) - data_from_ONE = one.load_dataset(eid, "_ibl_%sCamera.dlc.pqt" % view, collection="alf")["%s_x" % node].values + data_from_ONE = one.load_dataset(eid, "_ibl_%sCamera.dlc.pqt" % view, collection="alf")[ + "%s_x" % node + ].values assert_array_equal(x=data_from_ONE, y=data_from_NWB) # y @@ -107,7 +106,9 @@ def _check_IblPoseEstimationInterface( .pose_estimation_series[node] .data[:][:, 1] ) - data_from_ONE = one.load_dataset(eid, "_ibl_%sCamera.dlc.pqt" % view, collection="alf")["%s_y" % node].values + data_from_ONE = one.load_dataset(eid, "_ibl_%sCamera.dlc.pqt" % view, collection="alf")[ + "%s_y" % node + ].values assert_array_equal(x=data_from_ONE, y=data_from_NWB) # confidence @@ -133,14 +134,12 @@ def _check_IblPoseEstimationInterface( assert_array_equal(x=data_from_ONE, y=data_from_NWB) -def _check_BrainwideMapTrialsInterface( - *, eid: str, one: ONE, nwbfile: NWBFile, revision: str = None -): +def _check_BrainwideMapTrialsInterface(*, eid: str, one: ONE, nwbfile: NWBFile, revision: str = None): data_from_NWB = nwbfile.trials[:] data_from_ONE = one.load_dataset(eid, "_ibl_trials.table", collection="alf") - data_from_ONE['stimOff_times'] = one.load_dataset(eid, "_ibl_trials.stimOff_times", collection="alf") - data_from_ONE.index.name = 'id' + data_from_ONE["stimOff_times"] = one.load_dataset(eid, "_ibl_trials.stimOff_times", collection="alf") + data_from_ONE.index.name = "id" naming_map = { "start_time": "intervals_0", @@ -153,7 +152,7 @@ def _check_BrainwideMapTrialsInterface( "probability_left": "probabilityLeft", "feedback_time": "feedback_times", "response_time": "response_times", - "stim_off_time": 'stimOff_times', + "stim_off_time": "stimOff_times", "stim_on_time": "stimOn_times", "go_cue_time": "goCue_times", "first_movement_time": "firstMovement_times", @@ -165,9 +164,8 @@ def _check_BrainwideMapTrialsInterface( assert_frame_equal(left=data_from_NWB, right=data_from_ONE) -def _check_PupilTrackingInterface( - *, eid: str, one: ONE, nwbfile: NWBFile, revision: str = None -): + +def _check_PupilTrackingInterface(*, eid: str, one: ONE, nwbfile: NWBFile, revision: str = None): camera_views = ["left", "right"] for view in camera_views: @@ -198,9 +196,7 @@ def _check_PupilTrackingInterface( assert_array_equal(x=data_from_ONE, y=data_from_NWB) -def _check_IblSortingInterface( - *, eid: str, one: ONE, nwbfile: NWBFile, revision: str = None -): +def _check_IblSortingInterface(*, eid: str, one: ONE, nwbfile: NWBFile, revision: str = None): units_table = nwbfile.units[:] probe_names = units_table["probe_name"].unique()