From b7e3150fcfb68d11f9ea9bcfd59b0c680961c1b7 Mon Sep 17 00:00:00 2001 From: Kay Robbins <1189050+VisLab@users.noreply.github.com> Date: Sat, 12 Oct 2024 11:15:57 -0500 Subject: [PATCH] Tabular files now allowed to be empty --- hed/models/base_input.py | 3 - .../bids_tests/eeg_ds003645s_empty/CHANGES | 6 + .../bids_tests/eeg_ds003645s_empty/README | 24 +++ .../dataset_description.json | 24 +++ .../eeg_ds003645s_empty/participants.json | 17 +++ .../eeg_ds003645s_empty/participants.tsv | 3 + ...sub-004_task-FacePerception_run-1_eeg.json | 24 +++ .../sub-004_task-FacePerception_run-1_eeg.set | 0 ...b-004_task-FacePerception_run-1_events.tsv | 1 + .../task-FacePerception_events.json | 137 ++++++++++++++++++ tests/tools/bids/test_bids_dataset.py | 19 +++ 11 files changed, 255 insertions(+), 3 deletions(-) create mode 100644 tests/data/bids_tests/eeg_ds003645s_empty/CHANGES create mode 100644 tests/data/bids_tests/eeg_ds003645s_empty/README create mode 100644 tests/data/bids_tests/eeg_ds003645s_empty/dataset_description.json create mode 100644 tests/data/bids_tests/eeg_ds003645s_empty/participants.json create mode 100644 tests/data/bids_tests/eeg_ds003645s_empty/participants.tsv create mode 100644 tests/data/bids_tests/eeg_ds003645s_empty/sub-004/eeg/sub-004_task-FacePerception_run-1_eeg.json create mode 100644 tests/data/bids_tests/eeg_ds003645s_empty/sub-004/eeg/sub-004_task-FacePerception_run-1_eeg.set create mode 100644 tests/data/bids_tests/eeg_ds003645s_empty/sub-004/eeg/sub-004_task-FacePerception_run-1_events.tsv create mode 100644 tests/data/bids_tests/eeg_ds003645s_empty/task-FacePerception_events.json diff --git a/hed/models/base_input.py b/hed/models/base_input.py index f02ffe62a..baa6c6c3b 100644 --- a/hed/models/base_input.py +++ b/hed/models/base_input.py @@ -475,6 +475,3 @@ def _open_dataframe_file(self, file, has_column_names, input_type): raise HedFileError(HedExceptions.GENERIC_ERROR, str(e), self.name) from e else: raise HedFileError(HedExceptions.INVALID_EXTENSION, "", file) - - if self._dataframe.size == 0: - raise HedFileError(HedExceptions.INVALID_DATAFRAME, "Invalid dataframe(malformed datafile, etc)", file) diff --git a/tests/data/bids_tests/eeg_ds003645s_empty/CHANGES b/tests/data/bids_tests/eeg_ds003645s_empty/CHANGES new file mode 100644 index 000000000..739a5e76d --- /dev/null +++ b/tests/data/bids_tests/eeg_ds003645s_empty/CHANGES @@ -0,0 +1,6 @@ +1.0.0 2021-05-11 + - First release +Revision history for Face Recognition experiment by Wakeman-Henson + +version 1.0 - April 2021 + - Initial release of EEG data in this experiment for HED education purposes diff --git a/tests/data/bids_tests/eeg_ds003645s_empty/README b/tests/data/bids_tests/eeg_ds003645s_empty/README new file mode 100644 index 000000000..c9be69270 --- /dev/null +++ b/tests/data/bids_tests/eeg_ds003645s_empty/README @@ -0,0 +1,24 @@ +**Introduction:** +This dataset consists of the MEEG (sMRI+MEG+EEG) portion of the multi-subject, multi-modal face processing dataset (ds000117). This dataset was originally acquired and shared by Daniel Wakeman and Richard Henson (https://pubmed.ncbi.nlm.nih.gov/25977808/). The data has been repackaged in EEGLAB format and has undergone minimal preprocessing as well as reorganization and annotation of the dataset events. + +**Overview of the experiment:** +Eighteen participants completed two recording sessions spaced three months apart – one session recorded fMRI and the other simultaneously recorded MEG and EEG data. During each session, participants performed the same simple perceptual task, responding to presented photographs of famous, unfamiliar, and scrambled faces by pressing one of two keyboard keys to indicate a subjective yes or no decision as to the relative spatial symmetry of the viewed face. Famous faces were feature-matched to unfamiliar faces; half the faces were female. The two sessions (MEEG, fMRI) had different organizations of event timing and presentation because of technological requirements of the respective imaging modalities. Each individual face was presented twice during the session. For half of the presented faces, the second presentation followed immediately after the first. For the other half, the second presentation was delayed by 5-15 face presentations. + +**Preprocessing:** +The preprocessing, which was performed using the `wh_extracteeg_BIDS.m` located in the code directory, includes the following steps: +* Ignore MRI data except for sMRI. +* Extract EEG channels out of the MEG/EEG fif data +* Add fiducials +* Rename EOG and EKG channels +* Extract events from event channel +* Remove spurious events 5, 6, 7, 13, 14, 15, 17, 18 and 19 +* Remove spurious event 24 for subject 3 run 4 +* Rename events taking into account button assigned to each subject +* Correct event latencies (events have a shift of 34 ms) +* Resample data to 250 Hz (this step is performed because this dataset is used in a tutorial for EEGLAB and needs to be lightweight) +* Remove event fields `urevent` and `duration` +* Save as EEGLAB .set format + +**Data curators:** +Ramon Martinez, Dung Truong, Scott Makeig, Arnaud Delorme (UCSD, La Jolla, CA, USA), Kay Robbins (UTSA, San Antonio, TX, USA) + diff --git a/tests/data/bids_tests/eeg_ds003645s_empty/dataset_description.json b/tests/data/bids_tests/eeg_ds003645s_empty/dataset_description.json new file mode 100644 index 000000000..ab5887cd5 --- /dev/null +++ b/tests/data/bids_tests/eeg_ds003645s_empty/dataset_description.json @@ -0,0 +1,24 @@ +{ + "Name": "Face processing MEEG dataset with HED annotation", + "BIDSVersion": "1.9.0", + "HEDVersion": "8.2.0", + "License": "CC0", + "Authors": [ + "Daniel G. Wakeman", + "Richard N Henson", + "Dung Truong (curation)", + "Kay Robbins (curation)", + "Scott Makeig (curation)", + "Arno Delorme (curation)" + ], + "ReferencesAndLinks": [ + "Wakeman, D., Henson, R. (2015). A multi-subject, multi-modal human neuroimaging dataset. Sci Data 2, 150001. https://doi.org/10.1038/sdata.2015.1", + "Robbins, K., Truong, D., Appelhoff, S., Delorme, A., & Makeig, S. (2021). Capturing the nature of events and event context using Hierarchical Event Descriptors (HED). In press for NeuroImage Special Issue Practice in MEEG. NeuroImage 245 (2021) 118766. Online: https://www.sciencedirect.com/science/article/pii/S1053811921010387.", + "Robbins, K., Truong, D., Jones, A., Callanan, I., & Makeig, S. (2021). Building FAIR functionality: Annotating events in time series data using Hierarchical Event Descriptors (HED). Neuroinformatics Special Issue Building the NeuroCommons. Neuroinformatics https://doi.org/10.1007/s12021-021-09537-4. Online: https://link.springer.com/article/10.1007/s12021-021-09537-4." + ], + "Funding": [ + "Experiment was supported by the UK Medical Research Council (MC_A060_5PR10) and Elekta Ltd.", + "Curation was supported by: Army Research Laboratory W911NF-10-2-0022, NIH R01 EB023297-03, NIH R01 NS047293-l4, and NIH R24 MH120037-01." + ], + "DatasetDOI": "10.18112/openneuro.ds003645.v1.0.0" +} \ No newline at end of file diff --git a/tests/data/bids_tests/eeg_ds003645s_empty/participants.json b/tests/data/bids_tests/eeg_ds003645s_empty/participants.json new file mode 100644 index 000000000..fdc834413 --- /dev/null +++ b/tests/data/bids_tests/eeg_ds003645s_empty/participants.json @@ -0,0 +1,17 @@ +{ + "participant_id": { + "LongName": "Participant identifier", + "Description": "Unique subject identifier" + }, + "gender": { + "Description": "Sex of the subject", + "Levels": { + "M": "male", + "F": "female" + } + }, + "age": { + "Description": "Age of the subject", + "Units": "years" + } +} \ No newline at end of file diff --git a/tests/data/bids_tests/eeg_ds003645s_empty/participants.tsv b/tests/data/bids_tests/eeg_ds003645s_empty/participants.tsv new file mode 100644 index 000000000..519bba980 --- /dev/null +++ b/tests/data/bids_tests/eeg_ds003645s_empty/participants.tsv @@ -0,0 +1,3 @@ +participant_id age gender +sub-002 31 M +sub-003 25 M diff --git a/tests/data/bids_tests/eeg_ds003645s_empty/sub-004/eeg/sub-004_task-FacePerception_run-1_eeg.json b/tests/data/bids_tests/eeg_ds003645s_empty/sub-004/eeg/sub-004_task-FacePerception_run-1_eeg.json new file mode 100644 index 000000000..7cdbd553a --- /dev/null +++ b/tests/data/bids_tests/eeg_ds003645s_empty/sub-004/eeg/sub-004_task-FacePerception_run-1_eeg.json @@ -0,0 +1,24 @@ +{ + "TaskName": "FacePerception", + "TaskDescription": "Subjects viewed stimuli on a screen during six, 7.5 minute runs. The stimuli were photographs of either a famous face (known to most of British or a scrambled face, and appeared for a random duration between 800 and 1,000 ms. Subjects were instructed to fixate centrally throughout the experiment. To ensure attention to each stimulus, participants were asked to press one of two keys with either their left or right index finger (assignment counter-balanced across participants). Their key-press was based on how symmetric they regarded each image: pressing one or the other key depending whether they thought the image was 'more' or 'less symmetric' than average.", + "InstitutionAddress": "15 Chaucer Road, Cambridge, UK", + "InstitutionName": "MRC Cognition & Brain Sciences Unit", + "EEGReference": "nose", + "EEGGround": "left collar bone", + "SamplingFrequency": 250, + "PowerLineFrequency": 50, + "SoftwareFilters": { + "LowPassFilter": { + "cutoff": "350 (Hz)" + } + }, + "EEGPlacementScheme": "extended 10-10% system", + "CapManufacturer": "Easycap", + "EEGChannelCount": 70, + "EOGChannelCount": 2, + "RecordingType": "continuous", + "MiscChannelCount": 309, + "RecordingDuration": 494, + "ECGChannelCount": 0, + "EMGChannelCount": 0 +} \ No newline at end of file diff --git a/tests/data/bids_tests/eeg_ds003645s_empty/sub-004/eeg/sub-004_task-FacePerception_run-1_eeg.set b/tests/data/bids_tests/eeg_ds003645s_empty/sub-004/eeg/sub-004_task-FacePerception_run-1_eeg.set new file mode 100644 index 000000000..e69de29bb diff --git a/tests/data/bids_tests/eeg_ds003645s_empty/sub-004/eeg/sub-004_task-FacePerception_run-1_events.tsv b/tests/data/bids_tests/eeg_ds003645s_empty/sub-004/eeg/sub-004_task-FacePerception_run-1_events.tsv new file mode 100644 index 000000000..5d5a4f7a6 --- /dev/null +++ b/tests/data/bids_tests/eeg_ds003645s_empty/sub-004/eeg/sub-004_task-FacePerception_run-1_events.tsv @@ -0,0 +1 @@ +onset duration sample event_type face_type rep_status trial rep_lag value stim_file diff --git a/tests/data/bids_tests/eeg_ds003645s_empty/task-FacePerception_events.json b/tests/data/bids_tests/eeg_ds003645s_empty/task-FacePerception_events.json new file mode 100644 index 000000000..91d9f1b82 --- /dev/null +++ b/tests/data/bids_tests/eeg_ds003645s_empty/task-FacePerception_events.json @@ -0,0 +1,137 @@ +{ + "onset": { + "Description": "Position of event marker in seconds relative to the start.", + "Units": "s" + }, + "duration": { + "Description": "Duration of the event in seconds.", + "Units": "s" + }, + "event_type": { + "LongName": "Event category", + "Description": "The main category of the event.", + "Levels": { + "show_face": "Display a face to mark end of pre-stimulus and start of blink-inhibition.", + "show_face_initial": "Display a face at the beginning of the recording.", + "show_circle": "Display a white circle to mark end of the stimulus and blink inhibition.", + "show_cross": "Display only a white cross to mark start of trial and fixation.", + "left_press": "Experiment participant presses a key with left index finger.", + "right_press": "Experiment participant presses a key with right index finger.", + "setup_left_sym": "Setup for experiment with pressing key with left index finger means a face with above average symmetry.", + "setup_right_sym": "Setup for experiment with pressing key with right index finger means a face with above average symmetry.", + "double_press": "Experiment participant presses both keys ." + }, + "HED": { + "show_face": "Sensory-event, Experimental-stimulus, (Def/Face-image, Onset), (Def/Blink-inhibition-task,Onset),(Def/Cross-only, Offset)", + "show_face_initial": "Sensory-event, Experimental-stimulus, (Def/Face-image, Onset), (Def/Blink-inhibition-task,Onset), (Def/Fixation-task, Onset)", + "show_circle": "Sensory-event, (Intended-effect, Cue), (Def/Circle-only, Onset), (Def/Face-image, Offset), (Def/Blink-inhibition-task, Offset), (Def/Fixation-task, Offset)", + "show_cross": "Sensory-event, (Intended-effect, Cue), (Def/Cross-only, Onset), (Def/Fixation-task, Onset), (Def/Circle-only, Offset)", + "left_press": "Agent-action, Participant-response, Def/Press-left-finger", + "right_press": "Agent-action, Participant-response, Def/Press-right-finger", + "setup_left_sym": "Experiment-structure, (Def/Left-sym-cond, Onset), (Def/Initialize-recording, Onset)", + "setup_right_sym": "Experiment-structure, (Def/Right-sym-cond, Onset), (Def/Initialize-recording, Onset)", + "double_press": "Agent-action, Indeterminate-action, (Press, Keyboard-key)" + } + }, + "face_type": { + "Description": "Factor indicating type of face image being displayed.", + "Levels": { + "famous_face": "A face that should be recognized by the participants.", + "unfamiliar_face": "A face that should not be recognized by the participants.", + "scrambled_face": "A scrambled face image generated by taking face 2D FFT." + }, + "HED": { + "famous_face": "Def/Famous-face-cond", + "unfamiliar_face": "Def/Unfamiliar-face-cond", + "scrambled_face": "Def/Scrambled-face-cond" + } + }, + "rep_status": { + "Description": "Factor indicating whether this image has been already seen.", + "Levels": { + "first_show": "Factor level indicating the first display of this face.", + "immediate_repeat": "Factor level indicating this face was the same as previous one.", + "delayed_repeat": "Factor level indicating face was seen 5 to 15 trials ago." + }, + "HED": { + "first_show": "Def/First-show-cond", + "immediate_repeat": "Def/Immediate-repeat-cond", + "delayed_repeat": "Def/Delayed-repeat-cond" + } + }, + "trial": { + "Description": "Indicates which trial this event belongs to." + }, + "rep_lag": { + "Description": "How face images before this one was the image was previously presented.", + "HED": "(Face, Item-interval/#)" + }, + "stim_file": { + "Description": "Path of the stimulus file in the stimuli directory.", + "HED": "(Image, Pathname/#)" + }, + "hed_def_sensory": { + "Description": "Metadata dictionary for gathering sensory definitions", + "HED": { + "cross_only_def": "(Definition/Cross-only, (Visual-presentation, (Foreground-view, (White, Cross), (Center-of, Computer-screen)), (Background-view, Black), Description/A white fixation cross on a black background in the center of the screen.))", + "face_image_def": "(Definition/Face-image, (Visual-presentation, (Foreground-view, ((Image, Face, Hair), Color/Grayscale), ((White, Cross), (Center-of, Computer-screen))), (Background-view, Black), Description/A happy or neutral face in frontal or three-quarters frontal pose with long hair cropped presented as an achromatic foreground image on a black background with a white fixation cross superposed.))", + "circle_only_def": "(Definition/Circle-only, (Visual-presentation, (Foreground-view, ((White, Circle), (Center-of, Computer-screen))), (Background-view, Black), Description/A white circle on a black background in the center of the screen.))" + } + }, + "hed_def_actions": { + "Description": "Metadata dictionary for gathering participant action definitions", + "HED": { + "press_left_finger_def": "(Definition/Press-left-finger, ((Index-finger, (Left-side-of, Experiment-participant)), (Press, Keyboard-key), Description/The participant presses a key with the left index finger to indicate a face symmetry judgment.))", + "press_right_finger_def": "(Definition/Press-right-finger, ((Index-finger, (Right-side-of, Experiment-participant)), (Press, Keyboard-key), Description/The participant presses a key with the right index finger to indicate a face symmetry evaluation.))" + } + }, + "hed_def_conds": { + "Description": "Metadata dictionary for gathering experimental condition definitions", + "HED": { + "famous_face_cond_def": "(Definition/Famous-face-cond, (Condition-variable/Face-type, (Image, (Face, Famous)), Description/A face that should be recognized by the participants))", + "unfamiliar_face_cond_def": "(Definition/Unfamiliar-face-cond, (Condition-variable/Face-type, (Image, (Face, Unfamiliar)), Description/A face that should not be recognized by the participants.))", + "scrambled_face_cond_def": "(Definition/Scrambled-face-cond, (Condition-variable/Face-type, (Image, (Face, Disordered)), Description/A scrambled face image generated by taking face 2D FFT.))", + "first_show_cond_def": "(Definition/First-show-cond, ((Condition-variable/Repetition-type, (Item-count/1, Face), Item-interval/0), Description/Factor level indicating the first display of this face.))", + "immediate_repeat_cond_def": "(Definition/Immediate-repeat-cond, ((Condition-variable/Repetition-type, (Item-count/2, Face), Item-interval/1), Description/Factor level indicating this face was the same as previous one.))", + "delayed_repeat_cond_def": "(Definition/Delayed-repeat-cond, (Condition-variable/Repetition-type, (Item-count/2, Face), (Item-interval, (Greater-than-or-equal-to, Item-interval/5)), Description/Factor level indicating face was seen 5 to 15 trials ago.))", + "left_sym_cond_def": "(Definition/Left-sym-cond, (Condition-variable/Key-assignment, ((Index-finger, (Left-side-of, Experiment-participant)), (Behavioral-evidence, Symmetrical)), ((Index-finger, (Right-side-of, Experiment-participant)), (Behavioral-evidence, Asymmetrical)), Description/Left index finger key press indicates a face with above average symmetry.))", + "right_sym_cond_def": "(Definition/Right-sym-cond, (Condition-variable/Key-assignment, ((Index-finger, (Right-side-of, Experiment-participant)), (Behavioral-evidence, Symmetrical)), ((Index-finger, (Left-side-of, Experiment-participant)), (Behavioral-evidence, Asymmetrical)), Description/Right index finger key press indicates a face with above average symmetry.))" + } + }, + "hed_def_tasks": { + "Description": "Metadata dictionary for gathering task definitions", + "HED": { + "face_symmetry_evaluation_task_def": "(Definition/Face-symmetry-evaluation-task, (Task, Experiment-participant, (See, Face), (Discriminate, (Face, Symmetrical)), (Press, Keyboard-key), Description/Evaluate degree of image symmetry and respond with key press evaluation.))", + "blink_inhibition_task_def": "(Definition/Blink-inhibition-task, (Task, Experiment-participant, Inhibit-blinks, Description/Do not blink while the face image is displayed.))", + "fixation_task_def": "(Definition/Fixation-task, (Task, Experiment-participant, (Fixate, Cross), Description/Fixate on the cross at the screen center.))" + } + }, + "hed_def_setup": { + "Description": "Metadata dictionary for gathering setup definitions", + "HED": { + "setup_def": "(Definition/Initialize-recording, (Recording))" + } + + }, + "value": { + "Description": "Numerical event marker", + "Levels": { + "x0": "Disappearance of face image and display of the inter-stimulus circle simultaneously", + "x1": "Disappearance of face image and display of the inter-stimulus circle simultaneously", + "x2": "Initial setup with left finger key press indicating above average symmetry", + "x3": "Initial setup with right finger key press indicating above average symmetry", + "x5": "Initial presentation of famous face", + "x6": "Immediate repeated presentation of famous face", + "x7": "Delayed repeated presentation of famous face", + "x13": "Initial presentation of unfamiliar face", + "x14": "Immediate repeated presentation of unfamiliar face", + "x15": "Delayed repeated presentation of unfamiliar face", + "x17": "Initial presentation of scrambled face", + "x18": "Immediate repeated presentation of scrambled face", + "x19": "Delayed repeated presentation of scrambled face", + "x256": "Left finger key press", + "x4096": "Right finger key press", + "x4352": "Left and right finger key presses" + } + } +} diff --git a/tests/tools/bids/test_bids_dataset.py b/tests/tools/bids/test_bids_dataset.py index ac1696080..02b167dea 100644 --- a/tests/tools/bids/test_bids_dataset.py +++ b/tests/tools/bids/test_bids_dataset.py @@ -15,6 +15,8 @@ def setUpClass(cls): '../../data/bids_tests/eeg_ds003645s_hed') cls.library_path = os.path.realpath(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../data/bids_tests/eeg_ds003645s_hed_library')) + cls.empty_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), + '../../data/bids_tests/eeg_ds003645s_empty') def test_constructor(self): bids = BidsDataset(self.root_path) @@ -81,6 +83,23 @@ def test_validator_libraries(self): issues = bids.validate(check_for_warnings=False) self.assertFalse(issues, "BidsDataset with libraries should validate") + def test_empty(self): + bids = BidsDataset(self.empty_path, tabular_types=['participants', 'events']) + parts = bids.get_tabular_group("participants") + self.assertIsInstance(parts, BidsFileGroup, "BidsDataset participants should be a BidsFileGroup") + self.assertEqual(len(parts.sidecar_dict), 1, "BidsDataset should have one participants.json file") + self.assertEqual(len(parts.datafile_dict), 1, "BidsDataset should have one participants.tsv file") + self.assertIsInstance(bids.dataset_description, dict, "BidsDataset dataset_description should be a dict") + for group in bids.tabular_files.values(): + self.assertIsInstance(group, BidsFileGroup, "BidsDataset event files should be in a BidsFileGroup") + self.assertTrue(bids.schema, "BidsDataset constructor extracts a schema from the dataset.") + self.assertIsInstance(bids.schema, HedSchema, "BidsDataset schema should be HedSchema") + issues1 = bids.validate(check_for_warnings=False) + self.assertFalse(issues1, "BidsDataset with empty events should validate") + issues2 = bids.validate(check_for_warnings=True) + self.assertTrue(issues2, "BidsDataset with empty events should validate") + self.assertEqual(len(issues2), 1) + def test_validator_types(self): bids = BidsDataset(self.root_path, tabular_types=None) issues = bids.validate(check_for_warnings=False)