From d3b0b8fd6ff144aa6e2ef1a66f91e3eb9398e543 Mon Sep 17 00:00:00 2001 From: grg2rsr Date: Thu, 19 Sep 2024 11:36:25 +0100 Subject: [PATCH 1/5] some path fixes --- .../convert_brainwide_map_processed_only_local_testing.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/ibl_to_nwb/brainwide_map/convert_brainwide_map_processed_only_local_testing.py b/ibl_to_nwb/brainwide_map/convert_brainwide_map_processed_only_local_testing.py index af56503..729b5b8 100644 --- a/ibl_to_nwb/brainwide_map/convert_brainwide_map_processed_only_local_testing.py +++ b/ibl_to_nwb/brainwide_map/convert_brainwide_map_processed_only_local_testing.py @@ -56,18 +56,20 @@ # sessions_to_run = list(set(brain_wide_sessions) - set(already_written_processed_sessions)) nwbfile_path = base_path / "nwbfiles" / session / f"{session}.nwb" -nwbfile_path.parent.mkdir(exist_ok=True) +os.makedirs(nwbfile_path.parent, exist_ok=True) +# nwbfile_path.parent.mkdir(exist_ok=True) stub_test: bool = False cleanup: bool = False # assert len(os.environ.get("DANDI_API_KEY", "")) > 0, "Run `export DANDI_API_KEY=...`!" -nwbfile_path.parent.mkdir(exist_ok=True) - # Download behavior and spike sorted data for this session session_path = base_path / "ibl_conversion" / session cache_folder = base_path / "ibl_conversion" / session / "cache" +os.makedirs(session_path, exist_ok=True) +os.makedirs(cache_folder, exist_ok=True) + session_one = ONE( base_url="https://openalyx.internationalbrainlab.org", password="international", From da1545b87d94ee703e124ba43f30f25db765861a Mon Sep 17 00:00:00 2001 From: grg2rsr Date: Thu, 19 Sep 2024 11:38:19 +0100 Subject: [PATCH 2/5] read_after_write added. Currently, only processed data, work in progress (documentation missing), but as is passes for dual probe converted with --- ibl_to_nwb/brainwide_map/read_after_write.py | 371 ++++++++++++++++++ .../brainwide_map/read_after_write_testing.py | 44 +++ 2 files changed, 415 insertions(+) create mode 100644 ibl_to_nwb/brainwide_map/read_after_write.py create mode 100644 ibl_to_nwb/brainwide_map/read_after_write_testing.py diff --git a/ibl_to_nwb/brainwide_map/read_after_write.py b/ibl_to_nwb/brainwide_map/read_after_write.py new file mode 100644 index 0000000..ca47e5a --- /dev/null +++ b/ibl_to_nwb/brainwide_map/read_after_write.py @@ -0,0 +1,371 @@ +import os +import numpy as np +import pandas as pd +from pathlib import Path +import h5py +from pynwb import NWBHDF5IO, NWBFile +from one.api import ONE +import logging +from typing import Optional + + +def check_arrays(array_a: np.ndarray, array_b: np.ndarray, full_check: bool = False): + """checks if two arrays contain the same numerical values + + Args: + array_a (np.ndarray): _description_ + array_b (np.ndarray): _description_ + full_check (bool, optional): If True, compares all values of the arrays. If False, checks a small subsample only. Defaults to False. + """ + + # check shapes + assert array_a.shape == array_b.shape + + # check if NaNs are the same + assert np.all(pd.isna(array_a) == pd.isna(array_b)) + + # subset to non-NaN values + ix = ~pd.isna(array_a) + array_a = array_a[ix] + array_b = array_b[ix] + + # if full check, check all samples + if full_check: + assert np.all(array_a == array_b) + + # check just a random subset of samples + else: + inds = np.random.randint(0, array_a.shape[0], size=10) + assert np.all(array_a[inds] == array_b[inds]) + + +def check_series(series_a: pd.Series, series_b: pd.Series, full_check: bool = False): + """checks if two pd.Series contain the same numerical values. Checks if NaN values are the same. + + Args: + series_a (pd.Series): _description_ + series_b (pd.Series): _description_ + full_check (bool, optional): _description_. Defaults to False. + """ + + # if it has NaNs, check if all NaN-indices are the same + assert np.all(pd.isna(series_a) == pd.isna(series_b).values) + + # and if they are, use only non-NaN values for comparison + # ix = ~pd.isna(series_a) + # check_arrays(series_a.loc[ix].values, series_b.loc[ix].values, full_check=full_check) + check_arrays(series_a.values, series_b.values, full_check=full_check) + + +def check_tables(table_a: pd.DataFrame, table_b: pd.DataFrame, naming_map: dict = None, full_check: bool = False): + """checks if two pd.DataFrames contain the same numerical values. Performs an "is in" comparison: checks if data of table_a is present in table_b. + + Args: + table_a (pd.DataFrame): _description_ + table_b (pd.DataFrame): _description_ + naming_map (dict, optional): if naming map is given, it is used to map the names of columns of table_a to those of table_b. Defaults to None, checks if columns are identical. + full_check (bool, optional): _description_. Defaults to False. + """ + # convert column names if necessary + if naming_map is not None: + table_a_cols = table_a.columns + table_b_cols = [naming_map[col] for col in table_a.columns] + else: + # if no map is given, columns have to be the same (but not in the same order) + assert np.all([col in table_b.columns for col in table_a.columns]) + table_a_cols = table_a.columns + table_b_cols = table_a.columns + + for col_a, col_b in zip(table_a_cols, table_b_cols): + check_series(table_a[col_a], table_b[col_b], full_check=full_check) + + +def test_WheelInterface( + nwbfile: NWBFile, one: ONE, eid: str, full_check: bool = False, verbose: bool = False, revision: str = None +): + """_summary_ + + Args: + nwbfile (NWBFile): nwbfile object + one (ONE): ONE object + eid (str): ONE experiment uuid + full_check (bool, optional): if True, verifies all values, if False, performs checks on a sample. Defaults to False. + verbose (bool, optional): _description_. Defaults to False. + revision (str, optional): _description_. Defaults to None. + """ + + # wheel position + data_nwb = ( + nwbfile.processing["behavior"].data_interfaces["CompassDirection"].spatial_series["WheelPositionSeries"].data[:] + ) + data_one = one.load_dataset(eid, "_ibl_wheel.position", collection="alf") + check_arrays(data_nwb, data_one, full_check=full_check) + + # wheel timestamps + data_nwb = ( + nwbfile.processing["behavior"] + .data_interfaces["CompassDirection"] + .spatial_series["WheelPositionSeries"] + .timestamps[:] + ) + data_one = one.load_dataset(eid, "_ibl_wheel.timestamps", collection="alf") + check_arrays(data_nwb, data_one, full_check=full_check) + + # wheel moves + table = nwbfile.processing["behavior"].data_interfaces["WheelMovementIntervals"][:] + + # intervals + data_nwb = table[["start_time", "stop_time"]].values + data_one = one.load_dataset(eid, "_ibl_wheelMoves.intervals", collection="alf") + check_arrays(data_nwb, data_one, full_check=full_check) + + # peak amplitude + data_nwb = table["peak_amplitude"].values + data_one = one.load_dataset(eid, "_ibl_wheelMoves.peakAmplitude", collection="alf") + check_arrays(data_nwb, data_one, full_check=full_check) + + +def test_LickInterface( + nwbfile: NWBFile, one: ONE, eid: str, full_check: bool = False, verbose: bool = False, revision: str = None +): + """read-after-write test for the datainterface `LickInterface`. + TODO DOCME + + Args: + nwbfile (NWBFile): nwbfile object. + one (ONE): ONE object. + eid (str): experiment uuid / equivalent to session_id + """ + table = nwbfile.processing["behavior"].data_interfaces["LickTimes"][:] + data_nwb = table["lick_time"].values + data_one = one.load_dataset(eid, "licks.times") + check_arrays(data_nwb, data_one, full_check=full_check) + + +def test_RoiMotionEnergyInterface( + nwbfile: NWBFile, one: ONE, eid: str, full_check: bool = False, verbose: bool = False, revision: str = None +): + """read-after-write test for the datainterface `RoiMotionEnergyInterface`. + TODO DOCME + + Args: + nwbfile (NWBFile): nwbfile object. + one (ONE): ONE object. + eid (str): experiment uuid / equivalent to session_id + """ + camera_views = ["body", "left", "right"] + + for view in camera_views: + # data + data_nwb = nwbfile.processing["behavior"].data_interfaces["%sCameraMotionEnergy" % view.capitalize()].data[:] + data_one = one.load_dataset(eid, "%sCamera.ROIMotionEnergy" % view, collection='alf') + check_arrays(data_nwb, data_one, full_check=full_check) + + # timestamps + data_nwb = ( + nwbfile.processing["behavior"].data_interfaces["%sCameraMotionEnergy" % view.capitalize()].timestamps[:] + ) + data_one = one.load_dataset(eid, "_ibl_%sCamera.times" % view, collection='alf') + check_arrays(data_nwb, data_one, full_check=full_check) + + +def test_IblPoseEstimationInterface( + nwbfile: NWBFile, one: ONE, eid: str, full_check: bool = False, verbose: bool = False, revision: str = None +): + """read-after-write test for the datainterface `IblPoseEstimationInterface`. + TODO DOCME + + Args: + nwbfile (NWBFile): nwbfile object. + one (ONE): ONE object. + eid (str): experiment uuid / equivalent to session_id + """ + + camera_views = ["body", "left", "right"] + + for view in camera_views: + nodes = nwbfile.processing["behavior"].data_interfaces["PoseEstimation%sCamera" % view.capitalize()].nodes[:] + + for node in nodes: + # x + data_nwb = ( + nwbfile.processing["behavior"] + .data_interfaces["PoseEstimation%sCamera" % view.capitalize()] + .pose_estimation_series[node] + .data[:][:, 0] + ) + data_one = one.load_dataset(eid, "_ibl_%sCamera.dlc.pqt" % view, collection='alf')["%s_x" % node].values + check_arrays(data_nwb, data_one, full_check=full_check) + + # y + data_nwb = ( + nwbfile.processing["behavior"] + .data_interfaces["PoseEstimation%sCamera" % view.capitalize()] + .pose_estimation_series[node] + .data[:][:, 1] + ) + data_one = one.load_dataset(eid, "_ibl_%sCamera.dlc.pqt" % view, collection='alf')["%s_y" % node].values + check_arrays(data_nwb, data_one, full_check=full_check) + + # confidence + data_nwb = ( + nwbfile.processing["behavior"] + .data_interfaces["PoseEstimation%sCamera" % view.capitalize()] + .pose_estimation_series[node] + .confidence[:] + ) + data_one = one.load_dataset(eid, "_ibl_%sCamera.dlc.pqt" % view, collection='alf')["%s_likelihood" % node].values + check_arrays(data_nwb, data_one, full_check=full_check) + + # timestamps + data_nwb = ( + nwbfile.processing["behavior"] + .data_interfaces["PoseEstimation%sCamera" % view.capitalize()] + .pose_estimation_series[node] + .timestamps[:] + ) + data_one = one.load_dataset(eid, "_ibl_%sCamera.times" % view, collection='alf') + check_arrays(data_nwb, data_one, full_check=full_check) + + +def test_BrainwideMapTrialsInterface( + nwbfile: NWBFile, one: ONE, eid: str, full_check: bool = False, verbose: bool = False, revision: str = None +): + """read-after-write test for the datainterface `BrainwideMapTrialsInterface`. + TODO DOCME + + Args: + nwbfile (NWBFile): nwbfile object. + one (ONE): ONE object. + eid (str): experiment uuid / equivalent to session_id + """ + + data_nwb = nwbfile.trials[:] + data_one = one.load_dataset(eid, "_ibl_trials.table", collection="alf") + + naming_map = { + "start_time": "intervals_0", + "stop_time": "intervals_1", + "choice": "choice", + "feedback_type": "feedbackType", + "reward_volume": "rewardVolume", + "contrast_left": "contrastLeft", + "contrast_right": "contrastRight", + "probability_left": "probabilityLeft", + "feedback_time": "feedback_times", + "response_time": "response_times", + # 'stim_off_time': '', + "stim_on_time": "stimOn_times", + "go_cue_time": "goCue_times", + "first_movement_time": "firstMovement_times", + } + naming_map = {v: k for k, v in naming_map.items()} + + check_tables(data_one, data_nwb, naming_map=naming_map) + + +def test_PupilTrackingInterface( + nwbfile: NWBFile, one: ONE, eid: str, full_check: bool = False, verbose: bool = False, revision: str = None +): + """read-after-write test for the datainterface `PupilTrackingInterface`. + TODO DOCME + + Args: + nwbfile (NWBFile): nwbfile object. + one (ONE): ONE object. + eid (str): experiment uuid / equivalent to session_id + """ + + camera_views = ["left", "right"] + for view in camera_views: + # raw + data_nwb = ( + nwbfile.processing["behavior"] + .data_interfaces["%sPupilTracking" % view.capitalize()] + .time_series["%sRawPupilDiameter" % view.capitalize()] + .data[:] + ) + data_one = one.load_dataset(eid, "_ibl_%sCamera.features.pqt" % view, collection="alf")[ + "pupilDiameter_raw" + ].values + + check_arrays(data_nwb, data_one, full_check=full_check) + + # smooth + data_nwb = ( + nwbfile.processing["behavior"] + .data_interfaces["%sPupilTracking" % view.capitalize()] + .time_series["%sSmoothedPupilDiameter" % view.capitalize()] + .data[:] + ) + data_one = one.load_dataset(eid, "_ibl_%sCamera.features.pqt" % view, collection="alf")[ + "pupilDiameter_smooth" + ].values + + check_arrays(data_nwb, data_one, full_check=full_check) + + +def test_IblSortingInterface( + nwbfile: NWBFile, one: ONE, eid: str, full_check: bool = False, verbose: bool = False, revision: str = None +): + """_summary_ + + Args: + nwbfile (_type_): _description_ + one (_type_): _description_ + eid (_type_): _description_ + full_check (bool, optional): _description_. Defaults to False. + revision (_type_, optional): _description_. Defaults to None. + + Returns: + _type_: _description_ + """ + + units_table = nwbfile.units[:] + probe_names = units_table["probe_name"].unique() + + if full_check: + inds = units_table.index + else: + inds = units_table.sample(20).index + + spike_times = {} + spike_clusters = {} + cluster_uuids = {} + + # for fast spike extraction + def get_spikes_for_cluster(spike_clusters, spike_times, cluster): + # requires that spike_times and spike_clusters are sorted + start_ix, stop_ix = np.searchsorted(spike_clusters, [cluster, cluster + 1]) + return np.sort(spike_times[start_ix:stop_ix]) + + # get and prep data once + for probe_name in probe_names: + + # include revision TODO FIXME this will likely change - check back in with Miles + if revision is not None: + collection = f"alf/{probe_name}/pykilosort/{revision}" + else: + collection = f"alf/{probe_name}/pykilosort" + + spike_times[probe_name] = one.load_dataset(eid, "spikes.times", collection=collection) + spike_clusters[probe_name] = one.load_dataset(eid, "spikes.clusters", collection=collection) + cluster_uuids[probe_name] = one.load_dataset(eid, "clusters.uuids", collection=collection) + + # pre-sort for fast access + sort_ix = np.argsort(spike_clusters[probe_name]) + spike_clusters[probe_name] = spike_clusters[probe_name][sort_ix] + spike_times[probe_name] = spike_times[probe_name][sort_ix] + + for ix in inds: + probe_name = units_table.loc[ix, "probe_name"] + uuid = units_table.loc[ix, "uuid"] + nwb_spike_times = units_table.loc[ix, "spike_times"] + + cluster_id = np.where(cluster_uuids[probe_name] == uuid)[0][0] + one_spike_times = get_spikes_for_cluster(spike_clusters[probe_name], spike_times[probe_name], cluster_id) + + # more verbose but slower for more than ~20 checks + # one_spike_times = spike_times[probe_name][spike_clusters[probe_name] == cluster_id] + + assert np.max((one_spike_times - nwb_spike_times) * 30000) < 1 diff --git a/ibl_to_nwb/brainwide_map/read_after_write_testing.py b/ibl_to_nwb/brainwide_map/read_after_write_testing.py new file mode 100644 index 0000000..9098dd5 --- /dev/null +++ b/ibl_to_nwb/brainwide_map/read_after_write_testing.py @@ -0,0 +1,44 @@ +import os +import numpy as np +import pandas as pd +from pathlib import Path +import h5py +from pynwb import NWBHDF5IO, NWBFile +from one.api import ONE +import logging +import read_after_write as raw + +# the session +eid = "caa5dddc-9290-4e27-9f5e-575ba3598614" # dual probe session + +# local setup +base_path = Path.home() / "ibl_scratch" +nwb_path = base_path / "nwbfiles" / f"{eid}" / f"{eid}.nwb" +cache_folder = base_path / "ibl_conversion" / eid / "cache" + +# ONE instantiation +os.makedirs(cache_folder, exist_ok=True) + +one = ONE( + base_url="https://openalyx.internationalbrainlab.org", + password="international", + silent=False, + cache_dir=cache_folder, +) + +# one_revision = dict(revision='2024-05-06') + +# NWB file +h5py_file = h5py.File(nwb_path, "r") +io = NWBHDF5IO(file=h5py_file, load_namespaces=True) +nwbfile = io.read() + +raw.test_IblSortingInterface(nwbfile, one, eid) +raw.test_WheelInterface(nwbfile, one, eid) +raw.test_RoiMotionEnergyInterface(nwbfile, one, eid) +raw.test_BrainwideMapTrialsInterface(nwbfile, one, eid) +raw.test_IblPoseEstimationInterface(nwbfile, one, eid) +raw.test_LickInterface(nwbfile, one, eid) +raw.test_PupilTrackingInterface(nwbfile, one, eid) + +print("all tests passed") # replace with logger \ No newline at end of file From 8d13432da1f5f8b97510449fc7d63d8fec66fb95 Mon Sep 17 00:00:00 2001 From: grg2rsr Date: Thu, 19 Sep 2024 12:45:49 +0100 Subject: [PATCH 3/5] blackened --- ibl_to_nwb/brainwide_map/read_after_write.py | 20 ++++++++++--------- .../brainwide_map/read_after_write_testing.py | 2 +- 2 files changed, 12 insertions(+), 10 deletions(-) diff --git a/ibl_to_nwb/brainwide_map/read_after_write.py b/ibl_to_nwb/brainwide_map/read_after_write.py index ca47e5a..89eb9f3 100644 --- a/ibl_to_nwb/brainwide_map/read_after_write.py +++ b/ibl_to_nwb/brainwide_map/read_after_write.py @@ -20,10 +20,10 @@ def check_arrays(array_a: np.ndarray, array_b: np.ndarray, full_check: bool = Fa # check shapes assert array_a.shape == array_b.shape - + # check if NaNs are the same assert np.all(pd.isna(array_a) == pd.isna(array_b)) - + # subset to non-NaN values ix = ~pd.isna(array_a) array_a = array_a[ix] @@ -158,14 +158,14 @@ def test_RoiMotionEnergyInterface( for view in camera_views: # data data_nwb = nwbfile.processing["behavior"].data_interfaces["%sCameraMotionEnergy" % view.capitalize()].data[:] - data_one = one.load_dataset(eid, "%sCamera.ROIMotionEnergy" % view, collection='alf') + data_one = one.load_dataset(eid, "%sCamera.ROIMotionEnergy" % view, collection="alf") check_arrays(data_nwb, data_one, full_check=full_check) # timestamps data_nwb = ( nwbfile.processing["behavior"].data_interfaces["%sCameraMotionEnergy" % view.capitalize()].timestamps[:] ) - data_one = one.load_dataset(eid, "_ibl_%sCamera.times" % view, collection='alf') + data_one = one.load_dataset(eid, "_ibl_%sCamera.times" % view, collection="alf") check_arrays(data_nwb, data_one, full_check=full_check) @@ -194,7 +194,7 @@ def test_IblPoseEstimationInterface( .pose_estimation_series[node] .data[:][:, 0] ) - data_one = one.load_dataset(eid, "_ibl_%sCamera.dlc.pqt" % view, collection='alf')["%s_x" % node].values + data_one = one.load_dataset(eid, "_ibl_%sCamera.dlc.pqt" % view, collection="alf")["%s_x" % node].values check_arrays(data_nwb, data_one, full_check=full_check) # y @@ -204,7 +204,7 @@ def test_IblPoseEstimationInterface( .pose_estimation_series[node] .data[:][:, 1] ) - data_one = one.load_dataset(eid, "_ibl_%sCamera.dlc.pqt" % view, collection='alf')["%s_y" % node].values + data_one = one.load_dataset(eid, "_ibl_%sCamera.dlc.pqt" % view, collection="alf")["%s_y" % node].values check_arrays(data_nwb, data_one, full_check=full_check) # confidence @@ -214,7 +214,9 @@ def test_IblPoseEstimationInterface( .pose_estimation_series[node] .confidence[:] ) - data_one = one.load_dataset(eid, "_ibl_%sCamera.dlc.pqt" % view, collection='alf')["%s_likelihood" % node].values + data_one = one.load_dataset(eid, "_ibl_%sCamera.dlc.pqt" % view, collection="alf")[ + "%s_likelihood" % node + ].values check_arrays(data_nwb, data_one, full_check=full_check) # timestamps @@ -224,7 +226,7 @@ def test_IblPoseEstimationInterface( .pose_estimation_series[node] .timestamps[:] ) - data_one = one.load_dataset(eid, "_ibl_%sCamera.times" % view, collection='alf') + data_one = one.load_dataset(eid, "_ibl_%sCamera.times" % view, collection="alf") check_arrays(data_nwb, data_one, full_check=full_check) @@ -290,7 +292,7 @@ def test_PupilTrackingInterface( ].values check_arrays(data_nwb, data_one, full_check=full_check) - + # smooth data_nwb = ( nwbfile.processing["behavior"] diff --git a/ibl_to_nwb/brainwide_map/read_after_write_testing.py b/ibl_to_nwb/brainwide_map/read_after_write_testing.py index 9098dd5..4c5345a 100644 --- a/ibl_to_nwb/brainwide_map/read_after_write_testing.py +++ b/ibl_to_nwb/brainwide_map/read_after_write_testing.py @@ -41,4 +41,4 @@ raw.test_LickInterface(nwbfile, one, eid) raw.test_PupilTrackingInterface(nwbfile, one, eid) -print("all tests passed") # replace with logger \ No newline at end of file +print("all tests passed") # replace with logger From f245a06fcf23da93bf00053350dae1c3a1b8ac5d Mon Sep 17 00:00:00 2001 From: grg2rsr Date: Sat, 21 Sep 2024 09:38:02 +0100 Subject: [PATCH 4/5] using numpy.testing instead of hand-written code for the numerical comparions --- ibl_to_nwb/brainwide_map/read_after_write.py | 34 +++++++------------- 1 file changed, 11 insertions(+), 23 deletions(-) diff --git a/ibl_to_nwb/brainwide_map/read_after_write.py b/ibl_to_nwb/brainwide_map/read_after_write.py index 89eb9f3..e358087 100644 --- a/ibl_to_nwb/brainwide_map/read_after_write.py +++ b/ibl_to_nwb/brainwide_map/read_after_write.py @@ -7,7 +7,7 @@ from one.api import ONE import logging from typing import Optional - +from numpy import testing def check_arrays(array_a: np.ndarray, array_b: np.ndarray, full_check: bool = False): """checks if two arrays contain the same numerical values @@ -18,25 +18,15 @@ def check_arrays(array_a: np.ndarray, array_b: np.ndarray, full_check: bool = Fa full_check (bool, optional): If True, compares all values of the arrays. If False, checks a small subsample only. Defaults to False. """ - # check shapes - assert array_a.shape == array_b.shape - - # check if NaNs are the same - assert np.all(pd.isna(array_a) == pd.isna(array_b)) - - # subset to non-NaN values - ix = ~pd.isna(array_a) - array_a = array_a[ix] - array_b = array_b[ix] - # if full check, check all samples if full_check: - assert np.all(array_a == array_b) + testing.assert_allclose(array_a, array_b) # check just a random subset of samples else: - inds = np.random.randint(0, array_a.shape[0], size=10) - assert np.all(array_a[inds] == array_b[inds]) + inds = np.random.randint(0, np.prod(array_a.shape), size=10) + testing.assert_allclose(np.ravel(array_a)[inds], np.ravel(array_b)[inds]) + def check_series(series_a: pd.Series, series_b: pd.Series, full_check: bool = False): @@ -48,12 +38,9 @@ def check_series(series_a: pd.Series, series_b: pd.Series, full_check: bool = Fa full_check (bool, optional): _description_. Defaults to False. """ - # if it has NaNs, check if all NaN-indices are the same - assert np.all(pd.isna(series_a) == pd.isna(series_b).values) - - # and if they are, use only non-NaN values for comparison - # ix = ~pd.isna(series_a) - # check_arrays(series_a.loc[ix].values, series_b.loc[ix].values, full_check=full_check) + # -> all of this functionality is now moved to check_arrays() + # this function as of now is obsolete but kept for potential future integration + check_arrays(series_a.values, series_b.values, full_check=full_check) @@ -369,5 +356,6 @@ def get_spikes_for_cluster(spike_clusters, spike_times, cluster): # more verbose but slower for more than ~20 checks # one_spike_times = spike_times[probe_name][spike_clusters[probe_name] == cluster_id] - - assert np.max((one_spike_times - nwb_spike_times) * 30000) < 1 + + # testing + np.testing.assert_array_less(np.max((one_spike_times - nwb_spike_times) * 30000), 1) From d65ecca8ef2c20c9be384d7a380c5bf325a5608a Mon Sep 17 00:00:00 2001 From: grg2rsr Date: Sat, 21 Sep 2024 10:07:45 +0100 Subject: [PATCH 5/5] tiniest fix --- ibl_to_nwb/brainwide_map/read_after_write.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ibl_to_nwb/brainwide_map/read_after_write.py b/ibl_to_nwb/brainwide_map/read_after_write.py index e358087..be94cf7 100644 --- a/ibl_to_nwb/brainwide_map/read_after_write.py +++ b/ibl_to_nwb/brainwide_map/read_after_write.py @@ -358,4 +358,4 @@ def get_spikes_for_cluster(spike_clusters, spike_times, cluster): # one_spike_times = spike_times[probe_name][spike_clusters[probe_name] == cluster_id] # testing - np.testing.assert_array_less(np.max((one_spike_times - nwb_spike_times) * 30000), 1) + testing.assert_array_less(np.max((one_spike_times - nwb_spike_times) * 30000), 1)