Skip to content

Commit

Permalink
deploy
Browse files Browse the repository at this point in the history
  • Loading branch information
github-actions[bot] committed Oct 4, 2024
0 parents commit d2b94e8
Show file tree
Hide file tree
Showing 201 changed files with 48,492 additions and 0 deletions.
4 changes: 4 additions & 0 deletions .buildinfo
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
# Sphinx build info version 1
# This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done.
config: b6d79bf3e4bd43eea2136ebc3d5c5312
tags: 645f666f9bcd5a90fca523b33c5a78b7
Empty file added .nojekyll
Empty file.
Binary file not shown.
Original file line number Diff line number Diff line change
@@ -0,0 +1,90 @@
"""
===================
Adding New Features
===================
"""
# %%
import py_neuromodulation as nm
import numpy as np
from typing import Iterable

# %%
# In this example we will demonstrate how a new feature can be added to the existing feature pipeline.
# This can be done by creating a new feature class that implements the protocol class :class:`~features.NMFeature`
# and registering it with the :func:`~features.AddCustomFeature` function.


# %%
# Let's create a new feature class called `ChannelMean` that calculates the mean signal for each channel.
# We can optinally make it inherit from :class:`~features.NMFeature` but as long as it has an adequate constructor
# and a `calc_feature` method with the appropriate signatures it will work.
# The :func:`__init__` method should take the settings, channel names and sampling frequency as arguments.
# The `calc_feature` method should take the data and a dictionary of features as arguments and return the updated dictionary.
class ChannelMean:
def __init__(
self, settings: nm.NMSettings, ch_names: Iterable[str], sfreq: float
) -> None:
# If required for feature calculation, store the settings,
# channel names and sampling frequency (optional)
self.settings = settings
self.ch_names = ch_names
self.sfreq = sfreq

# Here you can add any additional initialization code
# For example, you could store parameters for the functions\
# used in the calc_feature method

self.feature_name = "channel_mean"

def calc_feature(self, data: np.ndarray) -> dict:
# First, create an empty dictionary to store the calculated features
feature_results = {}

# Here you can add any feature calculation code
# This example simply calculates the mean signal for each channel
ch_means = np.mean(data, axis=1)

# Store the calculated features in the feature_results dictionary
# Be careful to use a unique keyfor each channel and metric you compute
for ch_idx, ch in enumerate(self.ch_names):
feature_results[f"{self.feature_name}_{ch}"] = ch_means[ch_idx]

# Return the updated feature_results dictionary to the stream
return feature_results


nm.add_custom_feature("channel_mean", ChannelMean)

# %%
# Now we can instantiate settings and observe that the new feature has been added to the list of features
settings = nm.NMSettings() # Get default settings

settings.features

# %%
# Let's create some artificial data to demonstrate the feature calculation.
N_CHANNELS = 5
N_SAMPLES = 10000 # 10 seconds of random data at 1000 Hz sampling frequency

data = np.random.random([N_CHANNELS, N_SAMPLES])
stream = nm.Stream(
sfreq=1000,
data=data,
settings = settings,
sampling_rate_features_hz=10,
verbose=False,
)

feature_df = stream.run()
columns = [col for col in feature_df.columns if "channel_mean" in col]

feature_df[columns]


# %%
# Remove feature so that it does not interfere with other examples
nm.remove_custom_feature("channel_mean")



261 changes: 261 additions & 0 deletions _downloads/0dc588ba0560fad53b77fac59849f12c/plot_1_example_BIDS.ipynb
Original file line number Diff line number Diff line change
@@ -0,0 +1,261 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"\n# ECoG Movement decoding example\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"This example notebook read openly accessible data from the publication\n*Electrocorticography is superior to subthalamic local field potentials\nfor movement decoding in Parkinson\u2019s disease*\n(`Merk et al. 2022 <https://elifesciences.org/articles/75126>_`).\nThe dataset is available [here](https://doi.org/10.7910/DVN/io.FLM).\n\nFor simplicity one example subject is automatically shipped within\nthis repo at the *py_neuromodulation/data* folder, stored in\n[iEEG BIDS](https://www.nature.com/articles/s41597-019-0105-7) format.\n\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"from sklearn import metrics, model_selection, linear_model\nimport matplotlib.pyplot as plt\n\nimport py_neuromodulation as nm"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Let's read the example using [mne_bids](https://mne.tools/mne-bids/stable/index.html).\nThe resulting raw object is of type [mne.RawArray](https://mne.tools/stable/generated/mne.io.RawArray.html).\nWe can use the properties such as sampling frequency, channel names, channel types all from the mne array and create the *nm_channels* DataFrame:\n\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"(\n RUN_NAME,\n PATH_RUN,\n PATH_BIDS,\n PATH_OUT,\n datatype,\n) = nm.io.get_paths_example_data()\n\n(\n raw,\n data,\n sfreq,\n line_noise,\n coord_list,\n coord_names,\n) = nm.io.read_BIDS_data(PATH_RUN=PATH_RUN)\n\nchannels = nm.utils.set_channels(\n ch_names=raw.ch_names,\n ch_types=raw.get_channel_types(),\n reference=\"default\",\n bads=raw.info[\"bads\"],\n new_names=\"default\",\n used_types=(\"ecog\", \"dbs\", \"seeg\"),\n target_keywords=[\"MOV_RIGHT\"],\n)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"This example contains the grip force movement traces, we'll use the *MOV_RIGHT* channel as a decoding target channel.\nLet's check some of the raw feature and time series traces:\n\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"plt.figure(figsize=(12, 4), dpi=300)\nplt.subplot(121)\nplt.plot(raw.times, data[-1, :])\nplt.xlabel(\"Time [s]\")\nplt.ylabel(\"a.u.\")\nplt.title(\"Movement label\")\nplt.xlim(0, 20)\n\nplt.subplot(122)\nfor idx, ch_name in enumerate(channels.query(\"used == 1\").name):\n plt.plot(raw.times, data[idx, :] + idx * 300, label=ch_name)\nplt.legend(bbox_to_anchor=(1, 0.5), loc=\"center left\")\nplt.title(\"ECoG + STN-LFP time series\")\nplt.xlabel(\"Time [s]\")\nplt.ylabel(\"Voltage a.u.\")\nplt.xlim(0, 20)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"settings = nm.NMSettings.get_fast_compute()\n\nsettings.features.welch = True\nsettings.features.fft = True\nsettings.features.bursts = True\nsettings.features.sharpwave_analysis = True\nsettings.features.coherence = True\n\nsettings.coherence_settings.channels = [(\"LFP_RIGHT_0\", \"ECOG_RIGHT_0\")]\n\nsettings.coherence_settings.frequency_bands = [\"high beta\", \"low gamma\"]\nsettings.sharpwave_analysis_settings.estimator[\"mean\"] = []\nsettings.sharpwave_analysis_settings.sharpwave_features.enable_all()\nfor sw_feature in settings.sharpwave_analysis_settings.sharpwave_features.list_all():\n settings.sharpwave_analysis_settings.estimator[\"mean\"].append(sw_feature)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"stream = nm.Stream(\n sfreq=sfreq,\n channels=channels,\n settings=settings,\n line_noise=line_noise,\n coord_list=coord_list,\n coord_names=coord_names,\n verbose=True,\n)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"features = stream.run(\n data=data,\n out_dir=PATH_OUT,\n experiment_name=RUN_NAME,\n save_csv=True,\n)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Feature Analysis Movement\nThe obtained performances can now be read and visualized using the :class:`analysis.Feature_Reader`.\n\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"# initialize analyzer\nfeature_reader = nm.analysis.FeatureReader(\n feature_dir=PATH_OUT,\n feature_file=RUN_NAME,\n)\nfeature_reader.label_name = \"MOV_RIGHT\"\nfeature_reader.label = feature_reader.feature_arr[\"MOV_RIGHT\"]"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"feature_reader.feature_arr.iloc[100:108, -6:]"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"print(feature_reader.feature_arr.shape)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"feature_reader._get_target_ch()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"feature_reader.plot_target_averaged_channel(\n ch=\"ECOG_RIGHT_0\",\n list_feature_keywords=None,\n epoch_len=4,\n threshold=0.5,\n ytick_labelsize=7,\n figsize_x=12,\n figsize_y=12,\n)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"feature_reader.plot_all_features(\n ytick_labelsize=6,\n clim_low=-2,\n clim_high=2,\n ch_used=\"ECOG_RIGHT_0\",\n time_limit_low_s=0,\n time_limit_high_s=20,\n normalize=True,\n save=True,\n)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"nm.analysis.plot_corr_matrix(\n feature=feature_reader.feature_arr.filter(regex=\"ECOG_RIGHT_0\"),\n ch_name=\"ECOG_RIGHT_0_avgref\",\n feature_names=list(\n feature_reader.feature_arr.filter(regex=\"ECOG_RIGHT_0_avgref\").columns\n ),\n feature_file=feature_reader.feature_file,\n show_plot=True,\n figsize=(15, 15),\n)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Decoding\n\nThe main focus of the *py_neuromodulation* pipeline is feature estimation.\nNevertheless, the user can also use the pipeline for machine learning decoding.\nIt can be used for regression and classification problems and also dimensionality reduction such as PCA and CCA.\n\nHere, we show an example using a sklearn linear regression model. The used labels came from a continuous grip force movement target, named \"MOV_RIGHT\".\n\nFirst we initialize the :class:`~decode.Decoder` class, which the specified *validation method*, here being a simple 3-fold cross validation,\nthe evaluation metric, used machine learning model, and the channels we want to evaluate performances for.\n\nThere are many more implemented methods, but we will here limit it to the ones presented.\n\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"model = linear_model.LinearRegression()\n\nfeature_reader.decoder = nm.analysis.Decoder(\n features=feature_reader.feature_arr,\n label=feature_reader.label,\n label_name=feature_reader.label_name,\n used_chs=feature_reader.used_chs,\n model=model,\n eval_method=metrics.r2_score,\n cv_method=model_selection.KFold(n_splits=3, shuffle=True),\n)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"performances = feature_reader.run_ML_model(\n estimate_channels=True,\n estimate_gridpoints=False,\n estimate_all_channels_combined=True,\n save_results=True,\n)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"The performances are a dictionary that can be transformed into a DataFrame:\n\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"df_per = feature_reader.get_dataframe_performances(performances)\n\ndf_per"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"ax = nm.analysis.plot_df_subjects(\n df_per,\n x_col=\"sub\",\n y_col=\"performance_test\",\n hue=\"ch_type\",\n PATH_SAVE=PATH_OUT / RUN_NAME / (RUN_NAME + \"_decoding_performance.png\"),\n figsize_tuple=(8, 5),\n)\nax.set_ylabel(r\"$R^2$ Correlation\")\nax.set_xlabel(\"Subject 000\")\nax.set_title(\"Performance comparison Movement decoding\")\nplt.tight_layout()"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.12.6"
}
},
"nbformat": 4,
"nbformat_minor": 0
}
Loading

0 comments on commit d2b94e8

Please sign in to comment.