From 283d8599a5416c71e44d8cbbe5adf9f8b8482c31 Mon Sep 17 00:00:00 2001 From: Stefan Appelhoff Date: Mon, 19 Aug 2024 11:28:13 +0200 Subject: [PATCH 1/5] fix doc and logging mark_channels (#1293) * fix doc and logging mark_channels * sane behavior, add changelog * Update doc/whats_new.rst Co-authored-by: Daniel McCloy * Update mne_bids/write.py Co-authored-by: Daniel McCloy * Update mne_bids/write.py Co-authored-by: Daniel McCloy * fix set logic --------- Co-authored-by: Daniel McCloy --- doc/whats_new.rst | 1 + mne_bids/write.py | 36 +++++++++++++++++++++++++----------- 2 files changed, 26 insertions(+), 11 deletions(-) diff --git a/doc/whats_new.rst b/doc/whats_new.rst index 4b6d2c846..42133bedf 100644 --- a/doc/whats_new.rst +++ b/doc/whats_new.rst @@ -39,6 +39,7 @@ Detailed list of changes - :func:`mne_bids.read_raw_bids` no longer warns about unit changes in channels upon reading, as that information is taken from ``channels.tsv`` and judged authorative, by `Stefan Appelhoff`_ (:gh:`1282`) - MEG OPM channels are now experimentally included, by `Amaia Benitez`_ (:gh:`1222`) +- :func:`mne_bids.mark_channels` will no longer create a ``status_description`` column filled with ``n/a`` in the ``channels.tsv`` file, by `Stefan Appelhoff`_ (:gh:`1293`) 🛠 Requirements ^^^^^^^^^^^^^^^ diff --git a/mne_bids/write.py b/mne_bids/write.py index d34b0dbb4..c7bd1896d 100644 --- a/mne_bids/write.py +++ b/mne_bids/write.py @@ -2460,18 +2460,24 @@ def mark_channels(bids_path, *, ch_names, status, descriptions=None, verbose=Non type (e.g., only EEG or MEG data) is present in the dataset, it will be selected automatically. ch_names : str | list of str - The names of the channel(s) to mark with a ``status`` and possibly a + The names of the channel(s) to mark with a ``status`` and optionally a ``description``. Can be an empty list to indicate all channel names. status : 'good' | 'bad' | list of str - The status of the channels ('good', or 'bad'). Default is 'bad'. If it - is a list, then must be a list of 'good', or 'bad' that has the same - length as ``ch_names``. + The status of the channels ('good', or 'bad'). If it is a list, then must be a + list of 'good', or 'bad' that has the same length as ``ch_names``. descriptions : None | str | list of str - Descriptions of the reasons that lead to the exclusion of the + Descriptions of the reasons that lead to the marking ('good' or 'bad') of the channel(s). If a list, it must match the length of ``ch_names``. If ``None``, no descriptions are added. %(verbose)s + Notes + ----- + If the 'status' or 'status_description' columns were not present in the + corresponding tsv file before using this function, they may be created with default + values ('good' for status, 'n/a' for status_description) for all channels that are + not differently specified (by using ``ch_names``, ``status``, and ``descriptions``). + Examples -------- Mark a single channel as bad. @@ -2526,7 +2532,9 @@ def mark_channels(bids_path, *, ch_names, status, descriptions=None, verbose=Non # set descriptions based on how it's passed in if isinstance(descriptions, str): descriptions = [descriptions] * len(ch_names) + write_descriptions = True elif not descriptions: + write_descriptions = False descriptions = [None] * len(ch_names) # make sure statuses is a list of strings @@ -2543,18 +2551,24 @@ def mark_channels(bids_path, *, ch_names, status, descriptions=None, verbose=Non f"({len(ch_names)})." ) - if not all(status in ["good", "bad"] for status in status): + if not set(status).issubset({"good", "bad"}): raise ValueError( 'Setting the status of a channel must only be "good", or "bad".' ) # Read sidecar and create required columns if they do not exist. if "status" not in tsv_data: - logger.info('No "status" column found in input file. Creating.') + logger.info( + 'No "status" column found in channels file.' + 'Creating it with default value "good".' + ) tsv_data["status"] = ["good"] * len(tsv_data["name"]) - if "status_description" not in tsv_data: - logger.info('No "status_description" column found in input file. Creating.') + if "status_description" not in tsv_data and write_descriptions: + logger.info( + 'No "status_description" column found in input file. ' + 'Creating it with default value "n/a".' + ) tsv_data["status_description"] = ["n/a"] * len(tsv_data["name"]) # Now actually mark the user-requested channels as bad. @@ -2565,13 +2579,13 @@ def mark_channels(bids_path, *, ch_names, status, descriptions=None, verbose=Non idx = tsv_data["name"].index(ch_name) logger.info( f"Processing channel {ch_name}:\n" - f" status: bad\n" + f" status: {status_}\n" f" description: {description}" ) tsv_data["status"][idx] = status_ # only write if the description was passed in - if description is not None: + if description: tsv_data["status_description"][idx] = description _write_tsv(channels_fname, tsv_data, overwrite=True) From bed28fa6ba2c382019c958bc61158d5d5c6368a7 Mon Sep 17 00:00:00 2001 From: Eric Larson Date: Wed, 21 Aug 2024 17:45:45 +1000 Subject: [PATCH 2/5] MAINT: Bump to 3.10 min req (#1297) * MAINT: Bump to 3.10 min req * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * FIX: More * FIX: Which * Apply suggestions from code review Co-authored-by: Stefan Appelhoff * DOC: Comment --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Stefan Appelhoff --- .github/workflows/unit_tests.yml | 38 ++++++++++++-------------------- doc/whats_new.rst | 1 + mne_bids/path.py | 36 ++++++++++++++---------------- mne_bids/tsv_handler.py | 2 +- mne_bids/utils.py | 4 ++-- mne_bids/write.py | 6 ++--- pyproject.toml | 4 +++- 7 files changed, 41 insertions(+), 50 deletions(-) diff --git a/.github/workflows/unit_tests.yml b/.github/workflows/unit_tests.yml index 145e36d91..ab73b884d 100644 --- a/.github/workflows/unit_tests.yml +++ b/.github/workflows/unit_tests.yml @@ -47,7 +47,7 @@ jobs: fail-fast: false matrix: os: [ubuntu-latest] - python-version: ["3.9", "3.12"] # Oldest and newest supported versions + python-version: ["3.10", "3.12"] # Oldest and newest supported versions steps: - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v5 @@ -113,40 +113,29 @@ jobs: test: # For GitHub "required" CI checks, add in branch protection: # - # 6 checks: - # for each OS (ubuntu, macos, windows): - # 3.9 / mne-stable / full / validator-stable + # 8 checks: + # for each machine type (ubuntu, macos, macos-13, windows): + # (NOTE: macos-13=x86_64, macos>13=arm64) + # 3.10 / mne-stable / full / validator-stable # 3.12 / mne-stable / full / validator-stable # - # 1 additional check for Apple Silicon (doesn't support Python 3.9): - # 3.12 / mne-stable / full / validator-stable - # - # 5 additional checks with alternative MNE-Python and BIDS validator versions: + # 3 additional checks with alternative MNE-Python and BIDS validator versions: # ubuntu / 3.12 / mne-main / full / validator-main - # ubuntu / 3.9 / mne-prev / full / validator-stable + # ubuntu / 3.10 / mne-prev / full / validator-stable # ubuntu / 3.12 / mne-stable / minimal / validator-stable timeout-minutes: 60 runs-on: ${{ matrix.os }} strategy: fail-fast: false matrix: - os: [ubuntu-latest, macos-latest, windows-latest] - python-version: ["3.9", "3.12"] # Oldest and newest supported versions + os: [ubuntu-latest, macos-latest, macos-13, windows-latest] + python-version: ["3.10", "3.12"] # Oldest and newest supported versions mne-version: [mne-stable] mne-bids-install: [full] bids-validator-version: [validator-stable] include: # special test runs running only on single CI systems to save resources - # - # macOS-14 (Apple Silicon) only works with Python 3.10+ - # Once we drop support for Python 3.9, move it to the "proper" matrix above. - - os: macos-14 - python-version: "3.12" - mne-version: mne-stable - mne-bids-install: full - bids-validator-version: validator-stable - # Test development versions - os: ubuntu-latest python-version: "3.12" @@ -155,7 +144,7 @@ jobs: bids-validator-version: validator-main # Test previous MNE stable version - os: ubuntu-latest - python-version: "3.9" + python-version: "3.10" mne-version: mne-prev-stable mne-bids-install: full bids-validator-version: validator-stable @@ -198,14 +187,15 @@ jobs: - name: Install MNE (stable) if: matrix.mne-version == 'mne-stable' run: | - git clone --single-branch --branch maint/1.6 https://github.com/mne-tools/mne-python.git + git clone --single-branch --branch maint/1.8 https://github.com/mne-tools/mne-python.git python -m pip install -e ./mne-python - name: Install MNE (previous stable) if: matrix.mne-version == 'mne-prev-stable' + # Have to install NumPy<2.1 here because of a change in positional arg handling + MNE 1.7 run: | - git clone --single-branch --branch maint/1.6 https://github.com/mne-tools/mne-python.git - python -m pip install -e ./mne-python + git clone --single-branch --branch maint/1.7 https://github.com/mne-tools/mne-python.git + python -m pip install -e ./mne-python "numpy<2.1" - name: Install MNE (main) if: matrix.mne-version == 'mne-main' diff --git a/doc/whats_new.rst b/doc/whats_new.rst index 42133bedf..f549a0d7b 100644 --- a/doc/whats_new.rst +++ b/doc/whats_new.rst @@ -45,6 +45,7 @@ Detailed list of changes ^^^^^^^^^^^^^^^ - MNE-BIDS now requires MNE-Python 1.6.0 or higher. +- MNE-BIDS now requires Python 3.10 or higher. 🪲 Bug fixes ^^^^^^^^^^^^ diff --git a/mne_bids/path.py b/mne_bids/path.py index 17fa557e9..63d6d1259 100644 --- a/mne_bids/path.py +++ b/mne_bids/path.py @@ -14,7 +14,6 @@ from os import path as op from pathlib import Path from textwrap import indent -from typing import Optional import numpy as np from mne.utils import _check_fname, _validate_type, logger, verbose @@ -448,7 +447,7 @@ def directory(self): return Path(data_path) @property - def subject(self) -> Optional[str]: + def subject(self) -> str | None: """The subject ID.""" return self._subject @@ -457,7 +456,7 @@ def subject(self, value): self.update(subject=value) @property - def session(self) -> Optional[str]: + def session(self) -> str | None: """The acquisition session.""" return self._session @@ -466,7 +465,7 @@ def session(self, value): self.update(session=value) @property - def task(self) -> Optional[str]: + def task(self) -> str | None: """The experimental task.""" return self._task @@ -475,7 +474,7 @@ def task(self, value): self.update(task=value) @property - def run(self) -> Optional[str]: + def run(self) -> str | None: """The run number.""" return self._run @@ -484,7 +483,7 @@ def run(self, value): self.update(run=value) @property - def acquisition(self) -> Optional[str]: + def acquisition(self) -> str | None: """The acquisition parameters.""" return self._acquisition @@ -493,7 +492,7 @@ def acquisition(self, value): self.update(acquisition=value) @property - def processing(self) -> Optional[str]: + def processing(self) -> str | None: """The processing label.""" return self._processing @@ -502,7 +501,7 @@ def processing(self, value): self.update(processing=value) @property - def recording(self) -> Optional[str]: + def recording(self) -> str | None: """The recording name.""" return self._recording @@ -511,7 +510,7 @@ def recording(self, value): self.update(recording=value) @property - def space(self) -> Optional[str]: + def space(self) -> str | None: """The coordinate space for an anatomical or sensor position file.""" return self._space @@ -520,7 +519,7 @@ def space(self, value): self.update(space=value) @property - def description(self) -> Optional[str]: + def description(self) -> str | None: """The description entity.""" return self._description @@ -529,7 +528,7 @@ def description(self, value): self.update(description=value) @property - def suffix(self) -> Optional[str]: + def suffix(self) -> str | None: """The filename suffix.""" return self._suffix @@ -538,7 +537,7 @@ def suffix(self, value): self.update(suffix=value) @property - def root(self) -> Optional[Path]: + def root(self) -> Path | None: """The root directory of the BIDS dataset.""" return self._root @@ -547,7 +546,7 @@ def root(self, value): self.update(root=value) @property - def datatype(self) -> Optional[str]: + def datatype(self) -> str | None: """The BIDS data type, e.g. ``'anat'``, ``'meg'``, ``'eeg'``.""" return self._datatype @@ -556,7 +555,7 @@ def datatype(self, value): self.update(datatype=value) @property - def split(self) -> Optional[str]: + def split(self) -> str | None: """The split of the continuous recording file for ``.fif`` data.""" return self._split @@ -565,7 +564,7 @@ def split(self, value): self.update(split=value) @property - def extension(self) -> Optional[str]: + def extension(self) -> str | None: """The extension of the filename, including a leading period.""" return self._extension @@ -1474,7 +1473,7 @@ def search_folder_for_text( def _check_max_depth(max_depth): """Check that max depth is a proper input.""" msg = "`max_depth` must be a positive integer or None" - if not isinstance(max_depth, (int, type(None))): + if not isinstance(max_depth, int | type(None)): raise ValueError(msg) if max_depth is None: max_depth = float("inf") @@ -2057,9 +2056,8 @@ def get_entity_vals( for filename in filenames: # Skip ignored directories - # XXX In Python 3.9, we can use Path.is_relative_to() here if any( - [str(filename).startswith(str(ignore_dir)) for ignore_dir in ignore_dirs] + [Path(filename).is_relative_to(ignore_dir) for ignore_dir in ignore_dirs] ): continue @@ -2219,7 +2217,7 @@ def _infer_datatype(*, root, sub, ses): def _path_to_str(var): """Make sure var is a string or Path, return string representation.""" - if not isinstance(var, (Path, str)): + if not isinstance(var, Path | str): raise ValueError( f"All path parameters must be either strings or " f"pathlib.Path objects. Found type {type(var)}." diff --git a/mne_bids/tsv_handler.py b/mne_bids/tsv_handler.py index 73bc228bc..532caa370 100644 --- a/mne_bids/tsv_handler.py +++ b/mne_bids/tsv_handler.py @@ -153,7 +153,7 @@ def _from_tsv(fname, dtypes=None): data_dict = OrderedDict() if dtypes is None: dtypes = [str] * info.shape[1] - if not isinstance(dtypes, (list, tuple)): + if not isinstance(dtypes, list | tuple): dtypes = [dtypes] * info.shape[1] if not len(dtypes) == info.shape[1]: raise ValueError( diff --git a/mne_bids/utils.py b/mne_bids/utils.py index 0bc80c2c1..fe3ad5738 100644 --- a/mne_bids/utils.py +++ b/mne_bids/utils.py @@ -210,7 +210,7 @@ def _age_on_date(bday, exp_date): def _check_types(variables): """Make sure all vars are str or None.""" for var in variables: - if not isinstance(var, (str, type(None))): + if not isinstance(var, str | type(None)): raise ValueError( f"You supplied a value ({var}) of type " f"{type(var)}, where a string or None was " @@ -271,7 +271,7 @@ def _get_mrk_meas_date(mrk): """Find the measurement date from a KIT marker file.""" info = get_kit_info(mrk, False)[0] meas_date = info.get("meas_date", None) - if isinstance(meas_date, (tuple, list, np.ndarray)): + if isinstance(meas_date, tuple | list | np.ndarray): meas_date = meas_date[0] if isinstance(meas_date, datetime): meas_datetime = meas_date diff --git a/mne_bids/write.py b/mne_bids/write.py index c7bd1896d..16b817be8 100644 --- a/mne_bids/write.py +++ b/mne_bids/write.py @@ -90,7 +90,7 @@ def _is_numeric(n): - return isinstance(n, (np.integer, np.floating, int, float)) + return isinstance(n, np.integer | np.floating | int | float) def _channels_tsv(raw, fname, overwrite=False): @@ -459,7 +459,7 @@ def _participants_tsv(raw, subject_id, fname, overwrite=False): if isinstance(age, tuple): # can be removed once MNE >= 1.8 is required age = date(*age) meas_date = raw.info.get("meas_date", None) - if isinstance(meas_date, (tuple, list, np.ndarray)): + if isinstance(meas_date, tuple | list | np.ndarray): meas_date = meas_date[0] if meas_date is not None and age is not None: @@ -2267,7 +2267,7 @@ def _get_t1w_mgh(fs_subject, fs_subjects_dir): def _get_landmarks(landmarks, image_nii, kind=""): import nibabel as nib - if isinstance(landmarks, (str, Path)): + if isinstance(landmarks, str | Path): landmarks, coord_frame = read_fiducials(landmarks) landmarks = np.array( [landmark["r"] for landmark in landmarks], dtype=float diff --git a/pyproject.toml b/pyproject.toml index bd35aa6bb..b47bffe94 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -12,7 +12,7 @@ maintainers = [ ] license = { text = "BSD-3-Clause" } readme = { file = "README.md", content-type = "text/markdown" } -requires-python = ">=3.9" +requires-python = ">=3.10" keywords = [ "meg", "eeg", @@ -151,4 +151,6 @@ filterwarnings = [ "ignore:datetime\\.datetime\\.utcfromtimestamp.* is deprecated and scheduled for removal in a future version.*:DeprecationWarning", # matplotlib "ignore:Figure.*is non-interactive.*cannot be shown:UserWarning", + # NumPy 2.1 bug (probably) + "ignore:__array__ implementation doesn.*:DeprecationWarning", ] From e724dec48c2b54e314c336d0fadddb63c9a6adf3 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Wed, 21 Aug 2024 08:08:13 +0000 Subject: [PATCH 3/5] [pre-commit.ci] pre-commit autoupdate (#1296) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.5.7 → v0.6.1](https://github.com/astral-sh/ruff-pre-commit/compare/v0.5.7...v0.6.1) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Stefan Appelhoff --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index e9b0730c8..675ba9f56 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,6 +1,6 @@ repos: - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.5.7 + rev: v0.6.1 hooks: - id: ruff name: ruff mne_bids/ From f919962182cb1eb392cbae64d0a0b22d411001e7 Mon Sep 17 00:00:00 2001 From: Scott Huberty <52462026+scott-huberty@users.noreply.github.com> Date: Fri, 23 Aug 2024 02:28:10 -0700 Subject: [PATCH 4/5] FIX: Workaround header dropdown rendering issue (#1298) * FIX: explicitly set the number of items that should appear in the header. This suppresses a dropdown from kicking in after 5 items, effectively creating a dropdown with only 1 item. Instead, this just shows all 6 items in the header bar. Closes #1295 , which raised an issue with the rendering of the dropdown. If a dropdown is ever introduced again, this issue may rear its head again. * FIX: html_theme_options already defined --- doc/conf.py | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/conf.py b/doc/conf.py index 38db22561..b16ccd6bf 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -145,6 +145,7 @@ "use_edit_page_button": False, "navigation_with_keys": False, "show_toc_level": 1, + "header_links_before_dropdown": 6, "navbar_end": ["theme-switcher", "version-switcher", "navbar-icon-links"], "analytics": dict(google_analytics_id="G-C8SH9E98QC"), "switcher": { From 471bd0cdbe29a93bfc258d981ec6b5faa37b9e9c Mon Sep 17 00:00:00 2001 From: Thomas Hartmann Date: Fri, 23 Aug 2024 15:10:01 +0200 Subject: [PATCH 5/5] Add event metadata handling (#1285) * add failing test * code works but should not * we have a failing test * do not use trial type in tsv * my tests are green * make the linter happy * add docstrings * fix docs * add first time stuff --------- Co-authored-by: Stefan Appelhoff --- CITATION.cff | 4 ++ doc/authors.rst | 1 + doc/whats_new.rst | 2 + mne_bids/read.py | 18 +++++++- mne_bids/tests/test_write.py | 66 +++++++++++++++++++++++++++++ mne_bids/write.py | 80 ++++++++++++++++++++++++++++++++---- 6 files changed, 161 insertions(+), 10 deletions(-) diff --git a/CITATION.cff b/CITATION.cff index 727e91000..98bec4910 100644 --- a/CITATION.cff +++ b/CITATION.cff @@ -187,6 +187,10 @@ authors: family-names: Benitez affiliation: 'Magnetoencephalography Core, National Institutes of Health, Bethesda, Maryland, USA' orcid: 'https://orcid.org/0000-0001-6364-7272' + - given-names: Thomas + family-names: Hartmann + affiliation: 'Paris-Lodron-University Salzburg, Centre for Cogntitive Neuroscience, Department of Psychology, Salzburg, Austria' + orcid: 'https://orcid.org/0000-0002-8298-8125' - given-names: Alexandre family-names: Gramfort affiliation: 'Université Paris-Saclay, Inria, CEA, Palaiseau, France' diff --git a/doc/authors.rst b/doc/authors.rst index 487d69cb5..1d0cb016e 100644 --- a/doc/authors.rst +++ b/doc/authors.rst @@ -48,3 +48,4 @@ .. _Julius Welzel: https://github.com/JuliusWelzel .. _Kaare Mikkelsen: https://github.com/kaare-mikkelsen .. _Amaia Benitez: https://github.com/AmaiaBA +.. _Thomas Hartmann: https://github.com/thht diff --git a/doc/whats_new.rst b/doc/whats_new.rst index f549a0d7b..492feddd0 100644 --- a/doc/whats_new.rst +++ b/doc/whats_new.rst @@ -19,6 +19,7 @@ The following authors contributed for the first time. Thank you so much! 🤩 * `Kaare Mikkelsen`_ * `Amaia Benitez`_ +* `Thomas Hartmann`_ The following authors had contributed before. Thank you for sticking around! 🤘 @@ -33,6 +34,7 @@ Detailed list of changes ^^^^^^^^^^^^^^^ - :meth:`mne_bids.BIDSPath.match()` and :func:`mne_bids.find_matching_paths` now have additional parameters ``ignore_json`` and ``ignore_nosub``, to give users more control over which type of files are matched, by `Kaare Mikkelsen`_ (:gh:`1281`) +- :func:`mne_bids.write_raw_bids()` can now handle event metadata as a pandas DataFrame, by `Thomas Hartmann`_ (:gh:`1285`) 🧐 API and behavior changes ^^^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/mne_bids/read.py b/mne_bids/read.py index ba0b649fd..e02818998 100644 --- a/mne_bids/read.py +++ b/mne_bids/read.py @@ -169,7 +169,7 @@ def _read_events(events, event_id, raw, bids_path=None): # If we have events, convert them to Annotations so they can be easily # merged with existing Annotations. - if events.size > 0: + if events.size > 0 and event_id is not None: ids_without_desc = set(events[:, 2]) - set(event_id.values()) if ids_without_desc: raise ValueError( @@ -200,6 +200,22 @@ def _read_events(events, event_id, raw, bids_path=None): raw.set_annotations(annotations) del id_to_desc_map, annotations, new_annotations + if events.size > 0 and event_id is None: + new_annotations = mne.annotations_from_events( + events=events, + sfreq=raw.info["sfreq"], + orig_time=raw.annotations.orig_time, + ) + + raw = raw.copy() # Don't alter the original. + annotations = raw.annotations.copy() + + # We use `+=` here because `Annotations.__iadd__()` does the right + # thing and also performs a sanity check on `Annotations.orig_time`. + annotations += new_annotations + raw.set_annotations(annotations) + del annotations, new_annotations + # Now convert the Annotations to events. all_events, all_desc = events_from_annotations( raw, diff --git a/mne_bids/tests/test_write.py b/mne_bids/tests/test_write.py index e39a8d4ee..629c7c422 100644 --- a/mne_bids/tests/test_write.py +++ b/mne_bids/tests/test_write.py @@ -20,6 +20,7 @@ import mne import numpy as np +import pandas as pd import pytest from mne.datasets import testing from mne.io import anonymize_info @@ -4113,3 +4114,68 @@ def test_write_neuromag122(_bids_validate, tmp_path): ) write_raw_bids(raw, bids_path, overwrite=True, allow_preload=True, format="FIF") _bids_validate(bids_root) + + +@testing.requires_testing_data +def test_write_evt_metadata(_bids_validate, tmp_path): + """Test writing events and metadata to BIDS.""" + bids_root = tmp_path / "bids" + raw_fname = data_path / "MEG" / "sample" / "sample_audvis_trunc_raw.fif" + raw = _read_raw_fif(raw_fname) + events = mne.find_events(raw, initial_event=True) + df_list = [] + for idx, event in enumerate(events): + direction = None + if event[2] in (1, 3): + direction = "left" + elif event[2] in (2, 4): + direction = "right" + + event_type = "button_press" if event[2] == 32 else "stimulus" + stimulus_kind = None + if event[2] == 5: + stimulus_kind = "smiley" + elif event[2] in (1, 2): + stimulus_kind = "auditory" + elif event[2] in (3, 4): + stimulus_kind = "visual" + + df_list.append( + { + "direction": direction, + "event_type": event_type, + "stimulus_kind": stimulus_kind, + } + ) + + event_metadata = pd.DataFrame(df_list) + + bids_path = _bids_path.copy().update(root=bids_root, datatype="meg") + write_raw_bids( + raw, + bids_path=bids_path, + events=events, + event_metadata=event_metadata, + overwrite=True, + extra_columns_descriptions={ + "direction": "The direction of the stimulus", + "event_type": "The type of the event", + "stimulus_kind": "The stimulus modality", + }, + ) + _bids_validate(bids_root) + events_tsv_path = bids_path.copy().update(suffix="events", extension=".tsv") + events_json_path = events_tsv_path.copy().update(extension=".json") + + assert events_tsv_path.fpath.exists() + assert events_json_path.fpath.exists() + + events_json = json.loads(events_json_path.fpath.read_text()) + events_tsv = _from_tsv(events_tsv_path) + + assert "trial_type" not in events_tsv + assert "trial_type" not in events_json + + for cur_col in event_metadata.columns: + assert cur_col in events_tsv + assert cur_col in events_json diff --git a/mne_bids/write.py b/mne_bids/write.py index 16b817be8..8255ffffb 100644 --- a/mne_bids/write.py +++ b/mne_bids/write.py @@ -266,7 +266,9 @@ def _get_fid_coords(dig_points, raise_error=True): return fid_coords, coord_frame -def _events_tsv(events, durations, raw, fname, trial_type, overwrite=False): +def _events_tsv( + events, durations, raw, fname, trial_type, event_metadata=None, overwrite=False +): """Create an events.tsv file and save it. This function will write the mandatory 'onset', and 'duration' columns as @@ -290,6 +292,9 @@ def _events_tsv(events, durations, raw, fname, trial_type, overwrite=False): trial_type : dict | None Dictionary mapping a brief description key to an event id (value). For example {'Go': 1, 'No Go': 2}. + event_metadata : pandas.DataFrame | None + Additional metadata to be stored in the events.tsv file. Must have one + row per event. overwrite : bool Whether to overwrite the existing file. Defaults to False. @@ -319,19 +324,30 @@ def _events_tsv(events, durations, raw, fname, trial_type, overwrite=False): else: del data["trial_type"] + if event_metadata is not None: + for key, values in event_metadata.items(): + data[key] = values + _write_tsv(fname, data, overwrite) -def _events_json(fname, overwrite=False): +def _events_json(fname, extra_columns=None, has_trial_type=True, overwrite=False): """Create participants.json for non-default columns in accompanying TSV. Parameters ---------- fname : str | mne_bids.BIDSPath Output filename. + extra_columns : dict | None + Dictionary with additional columns to be added to the events.json file. + has_trial_type : bool + Whether the events.tsv file should contain a 'trial_type' column. overwrite : bool Whether to overwrite the output file if it exists. """ + if extra_columns is None: + extra_columns = dict() + new_data = { "onset": { "Description": ( @@ -361,9 +377,16 @@ def _events_json(fname, overwrite=False): "associated with the event." ) }, - "trial_type": {"Description": "The type, category, or name of the event."}, } + if has_trial_type: + new_data["trial_type"] = { + "Description": "The type, category, or name of the event." + } + + for key, value in extra_columns.items(): + new_data[key] = {"Description": value} + # make sure to append any JSON fields added by the user fname = Path(fname) if fname.exists(): @@ -1378,6 +1401,8 @@ def write_raw_bids( bids_path, events=None, event_id=None, + event_metadata=None, + extra_columns_descriptions=None, *, anonymize=None, format="auto", @@ -1463,8 +1488,9 @@ def write_raw_bids( call ``raw.set_annotations(None)`` before invoking this function. .. note:: - Descriptions of all event codes must be specified via the - ``event_id`` parameter. + Either, descriptions of all event codes must be specified via the + ``event_id`` parameter or each event must be accompanied by a + row in ``event_metadata``. event_id : dict | None Descriptions or names describing the event codes, if you passed @@ -1475,6 +1501,11 @@ def write_raw_bids( contains :class:`~mne.Annotations`, you can use this parameter to assign event codes to each unique annotation description (mapping from description to event code). + event_metadata : pandas.DataFrame | None + Metadata for each event in ``events``. Each row corresponds to an event. + extra_columns_descriptions : dict | None + A dictionary that maps column names of the ``event_metadata`` to descriptions. + Each column of ``event_metadata`` must have a corresponding entry in this. anonymize : dict | None If `None` (default), no anonymization is performed. If a dictionary, data will be anonymized depending on the dictionary @@ -1678,8 +1709,24 @@ def write_raw_bids( '"bids_path.task = "' ) - if events is not None and event_id is None: - raise ValueError("You passed events, but no event_id dictionary.") + if events is not None and event_id is None and event_metadata is None: + raise ValueError( + "You passed events, but no event_id dictionary " "or event_metadata." + ) + + if event_metadata is not None and extra_columns_descriptions is None: + raise ValueError( + "You passed event_metadata, but no " + "extra_columns_descriptions dictionary." + ) + + if event_metadata is not None: + for column in event_metadata.columns: + if column not in extra_columns_descriptions: + raise ValueError( + f"Extra column {column} in event_metadata " + f"is not described in extra_columns_descriptions." + ) _validate_type( item=empty_room, item_name="empty_room", types=(mne.io.BaseRaw, BIDSPath, None) @@ -1974,8 +2021,15 @@ def write_raw_bids( # Write events. if not data_is_emptyroom: events_array, event_dur, event_desc_id_map = _read_events( - events, event_id, raw, bids_path=bids_path + events, + event_id, + raw, + bids_path=bids_path, ) + + if event_metadata is not None: + event_desc_id_map = None + if events_array.size != 0: _events_tsv( events=events_array, @@ -1983,9 +2037,17 @@ def write_raw_bids( raw=raw, fname=events_tsv_path.fpath, trial_type=event_desc_id_map, + event_metadata=event_metadata, + overwrite=overwrite, + ) + has_trial_type = event_desc_id_map is not None + + _events_json( + fname=events_json_path.fpath, + extra_columns=extra_columns_descriptions, + has_trial_type=has_trial_type, overwrite=overwrite, ) - _events_json(fname=events_json_path.fpath, overwrite=overwrite) # Kepp events_array around for BrainVision writing below. del event_desc_id_map, events, event_id, event_dur