diff --git a/MANIFEST.in b/MANIFEST.in index acacb7022..0a9960553 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -5,10 +5,12 @@ include ibllib/atlas/cosmos.npy include ibllib/atlas/swanson.npy include ibllib/atlas/mappings.pqt include ibllib/io/extractors/extractor_types.json +include ibllib/io/extractors/task_extractor_map.json include brainbox/tests/wheel_test.p recursive-include brainbox/tests/fixtures * recursive-include ibllib/qc/reference * graft ibllib/tests/extractors/data graft ibllib/io/extractors/ephys_sessions +graft ibllib/io/extractors/mesoscope graft ibllib/tests/fixtures recursive-include oneibl/tests/fixtures * diff --git a/brainbox/behavior/__init__.py b/brainbox/behavior/__init__.py index e69de29bb..f214be88e 100644 --- a/brainbox/behavior/__init__.py +++ b/brainbox/behavior/__init__.py @@ -0,0 +1 @@ +"""Behaviour analysis functions for the IBL task.""" diff --git a/brainbox/behavior/dlc.py b/brainbox/behavior/dlc.py index 9d73ee4e2..c3f48d32e 100644 --- a/brainbox/behavior/dlc.py +++ b/brainbox/behavior/dlc.py @@ -12,7 +12,7 @@ from scipy.stats import zscore from neurodsp.smooth import smooth_interpolate_savgol -from brainbox.processing import bincount2D +from iblutil.numerical import bincount2D import brainbox.behavior.wheel as bbox_wheel logger = logging.getLogger('ibllib') diff --git a/brainbox/behavior/pyschofit.py b/brainbox/behavior/pyschofit.py index 7c97800c5..4162eb90d 100644 --- a/brainbox/behavior/pyschofit.py +++ b/brainbox/behavior/pyschofit.py @@ -1,5 +1,5 @@ """ -The psychofit toolbox contains tools to fit two-alternative psychometric +(DEPRECATED) The psychofit toolbox contains tools to fit two-alternative psychometric data. The fitting is done using maximal likelihood estimation: one assumes that the responses of the subject are given by a binomial distribution whose mean is given by the psychometric function. @@ -16,14 +16,28 @@ For more info, see: - Examples: Examples of use of psychofit toolbox Matteo Carandini, 2000-2015 + +NB: USE THE PSYCHOFIT PIP PACKAGE INSTEAD. """ import functools +import warnings +import traceback +import logging + import numpy as np import scipy.optimize from scipy.special import erf +for line in traceback.format_stack(): + print(line.strip()) + +msg = 'brainbox.behavior.pyschofit has been deprecated. Install psychofit via pip. See stack above' +warnings.warn(msg, DeprecationWarning) +logging.getLogger(__name__).warning(msg) + + def mle_fit_psycho(data, P_model='weibull', parstart=None, parmin=None, parmax=None, nfits=5): """ Maximumum likelihood fit of psychometric function. diff --git a/brainbox/behavior/training.py b/brainbox/behavior/training.py index c96fc0396..adb086660 100644 --- a/brainbox/behavior/training.py +++ b/brainbox/behavior/training.py @@ -13,7 +13,7 @@ from one.api import ONE from one.alf.exceptions import ALFObjectNotFound -import brainbox.behavior.pyschofit as psy +import psychofit as psy _logger = logging.getLogger('ibllib') @@ -123,9 +123,9 @@ def get_subject_training_status(subj, date=None, details=True, one=None): def get_sessions(subj, date=None, one=None): """ - Download and load in training data for a specfied subject. If a date is given it will load data - from the three (or as many are available) previous sessions up to the specified date, if not it - will load data from the last three training sessions that have data available + Download and load in training data for a specified subject. If a date is given it will load + data from the three (or as many as are available) previous sessions up to the specified date. + If not it will load data from the last three training sessions that have data available. :param subj: subject nickname (must match the name registered on Alyx) :type subj: string @@ -227,7 +227,7 @@ def get_training_status(trials, task_protocol, ephys_sess_dates, n_delay): """ Compute training status of a subject from three consecutive training datasets - :param trials: dict containing trials objects from three consective training sessions + :param trials: dict containing trials objects from three consecutive training sessions :type trials: Bunch :param task_protocol: task protocol used for the three training session, can be 'training', 'biased' or 'ephys' @@ -385,7 +385,7 @@ def compute_training_info(trials, trials_all): """ Compute all relevant performance metrics for when subject is on trainingChoiceWorld - :param trials: dict containing trials objects from three consective training sessions, + :param trials: dict containing trials objects from three consecutive training sessions, keys are session dates :type trials: Bunch :param trials_all: trials object with data concatenated over three training sessions @@ -410,7 +410,7 @@ def compute_bias_info(trials, trials_all): """ Compute all relevant performance metrics for when subject is on biasedChoiceWorld - :param trials: dict containing trials objects from three consective training sessions, + :param trials: dict containing trials objects from three consecutive training sessions, keys are session dates :type trials: Bunch :param trials_all: trials object with data concatenated over three training sessions @@ -667,7 +667,7 @@ def criterion_delay(n_trials, perf_easy): def plot_psychometric(trials, ax=None, title=None, plot_ci=False, ci_aplha=0.32, **kwargs): """ - Function to plot pyschometric curve plots a la datajoint webpage + Function to plot psychometric curve plots a la datajoint webpage :param trials: :return: """ @@ -730,7 +730,7 @@ def plot_psychometric(trials, ax=None, title=None, plot_ci=False, ci_aplha=0.32, def plot_reaction_time(trials, ax=None, title=None, plot_ci=False, ci_alpha=0.32, **kwargs): """ - Function to plot reaction time against contrast a la datajoint webpage (inversed for some reason??) + Function to plot reaction time against contrast a la datajoint webpage (inverted for some reason??) :param trials: :return: """ diff --git a/brainbox/behavior/wheel.py b/brainbox/behavior/wheel.py index 8c937ac2c..77a56a131 100644 --- a/brainbox/behavior/wheel.py +++ b/brainbox/behavior/wheel.py @@ -1,8 +1,13 @@ """ -Set of functions to handle wheel data +Set of functions to handle wheel data. """ +import logging +import warnings +import traceback + import numpy as np from numpy import pi +from iblutil.numerical import between_sorted import scipy.interpolate as interpolate import scipy.signal from scipy.linalg import hankel @@ -13,11 +18,11 @@ __all__ = ['cm_to_deg', 'cm_to_rad', 'interpolate_position', - 'last_movement_onset', + 'get_movement_onset', 'movements', 'samples_to_cm', 'traces_by_trial', - 'velocity_smoothed'] + 'velocity_filtered'] # Define some constants ENC_RES = 1024 * 4 # Rotary encoder resolution, assumes X4 encoding @@ -49,6 +54,8 @@ def interpolate_position(re_ts, re_pos, freq=1000, kind='linear', fill_gaps=None Timestamps of interpolated positions """ t = np.arange(re_ts[0], re_ts[-1], 1 / freq) # Evenly resample at frequency + if t[-1] > re_ts[-1]: + t = t[:-1] # Occasionally due to precision errors the last sample may be outside of range. yinterp = interpolate.interp1d(re_ts, re_pos, kind=kind)(t) if fill_gaps: @@ -63,7 +70,7 @@ def interpolate_position(re_ts, re_pos, freq=1000, kind='linear', fill_gaps=None def velocity(re_ts, re_pos): """ - Compute wheel velocity from non-uniformly sampled wheel data. Returns the velocity + (DEPRECATED) Compute wheel velocity from non-uniformly sampled wheel data. Returns the velocity at the same samples locations as the position through interpolation. Parameters @@ -78,6 +85,13 @@ def velocity(re_ts, re_pos): np.ndarray numpy array of velocities """ + for line in traceback.format_stack(): + print(line.strip()) + + msg = 'brainbox.behavior.wheel.velocity has been deprecated. Use velocity_filtered instead.' + warnings.warn(msg, DeprecationWarning) + logging.getLogger(__name__).warning(msg) + dp = np.diff(re_pos) dt = np.diff(re_ts) # Compute raw velocity @@ -92,12 +106,23 @@ def velocity(re_ts, re_pos): def velocity_filtered(pos, fs, corner_frequency=20, order=8): """ - Compute wheel velocity from uniformly sampled wheel data + Compute wheel velocity from uniformly sampled wheel data. + + pos: array_like + Vector of uniformly sampled wheel positions. + fs : float + Frequency in Hz of the sampling frequency. + corner_frequency : float + Corner frequency of low-pass filter. + order : int + Order of Butterworth filter. - :param pos: vector of uniformly sampled wheel positions - :param fs: scalar, sampling frequency - :param corner_frequency: scalar, corner frequency of low-pass filter - :param order: scalar, order of Butterworth filter + Returns + ------- + vel : np.ndarray + Array of velocity values. + acc : np.ndarray + Array of acceleration values. """ sos = scipy.signal.butter(**{'N': order, 'Wn': corner_frequency / fs * 2, 'btype': 'lowpass'}, output='sos') vel = np.insert(np.diff(scipy.signal.sosfiltfilt(sos, pos)), 0, 0) * fs @@ -107,7 +132,7 @@ def velocity_filtered(pos, fs, corner_frequency=20, order=8): def velocity_smoothed(pos, freq, smooth_size=0.03): """ - Compute wheel velocity from uniformly sampled wheel data + (DEPRECATED) Compute wheel velocity from uniformly sampled wheel data. Parameters ---------- @@ -125,6 +150,13 @@ def velocity_smoothed(pos, freq, smooth_size=0.03): acc : np.ndarray Array of acceleration values """ + for line in traceback.format_stack(): + print(line.strip()) + + msg = 'brainbox.behavior.wheel.velocity_smoothed has been deprecated. Use velocity_filtered instead.' + warnings.warn(msg, DeprecationWarning) + logging.getLogger(__name__).warning(msg) + # Define our smoothing window with an area of 1 so the units won't be changed std_samps = np.round(smooth_size * freq) # Standard deviation relative to sampling frequency N = std_samps * 6 # Number of points in the Gaussian covering +/-3 standard deviations @@ -141,15 +173,24 @@ def velocity_smoothed(pos, freq, smooth_size=0.03): def last_movement_onset(t, vel, event_time): """ - Find the time at which movement started, given an event timestamp that occurred during the - movement. Movement start is defined as the first sample after the velocity has been zero - for at least 50ms. Wheel inputs should be evenly sampled. + (DEPRECATED) Find the time at which movement started, given an event timestamp that occurred during the + movement. + + Movement start is defined as the first sample after the velocity has been zero for at least 50ms. + Wheel inputs should be evenly sampled. :param t: numpy array of wheel timestamps in seconds :param vel: numpy array of wheel velocities :param event_time: timestamp anywhere during movement of interest, e.g. peak velocity :return: timestamp of movement onset """ + for line in traceback.format_stack(): + print(line.strip()) + + msg = 'brainbox.behavior.wheel.last_movement_onset has been deprecated. Use get_movement_onset instead.' + warnings.warn(msg, DeprecationWarning) + logging.getLogger(__name__).warning(msg) + # Look back from timestamp threshold = 50e-3 mask = t < event_time @@ -166,6 +207,42 @@ def last_movement_onset(t, vel, event_time): return t +def get_movement_onset(intervals, event_times): + """ + Find the time at which movement started, given an event timestamp that occurred during the + movement. + + Parameters + ---------- + intervals : numpy.array + The wheel movement intervals. + event_times : numpy.array + Sorted event timestamps anywhere during movement of interest, e.g. peak velocity, feedback + time. + + Returns + ------- + numpy.array + An array the length of event_time of intervals. + + Examples + -------- + Find the last movement onset before each trial response time + + >>> trials = one.load_object(eid, 'trials') + >>> wheelMoves = one.load_object(eid, 'wheelMoves') + >>> onsets = last_movement_onset(wheelMoves.intervals, trials.response_times) + """ + if not np.all(np.diff(event_times) > 0): + raise ValueError('event_times must be in ascending order.') + onsets = np.full(event_times.size, np.nan) + for i in np.arange(intervals.shape[0]): + onset = between_sorted(event_times, intervals[i, :]) + if np.any(onset): + onsets[onset] = intervals[i, 0] + return onsets + + def movements(t, pos, freq=1000, pos_thresh=8, t_thresh=.2, min_gap=.1, pos_thresh_onset=1.5, min_dur=.05, make_plots=False): """ @@ -296,7 +373,7 @@ def movements(t, pos, freq=1000, pos_thresh=8, t_thresh=.2, min_gap=.1, pos_thre if make_plots: fig, axes = plt.subplots(nrows=2, sharex='all') indices = np.sort(np.hstack((onset_samps, offset_samps))) # Points to split trace - vel, acc = velocity_smoothed(pos, freq, 0.015) + vel, acc = velocity_filtered(pos, freq) # Plot the wheel position and velocity for ax, y in zip(axes, (pos, vel)): @@ -440,6 +517,6 @@ def to_mask(a, b): return [(cuts[n][0, :], cuts[n][1, :]) for n in range(len(cuts))] if separate else cuts -if __name__ == "__main__": +if __name__ == '__main__': import doctest doctest.testmod() diff --git a/brainbox/ephys_plots.py b/brainbox/ephys_plots.py index 1b6dd63cc..56ec34b40 100644 --- a/brainbox/ephys_plots.py +++ b/brainbox/ephys_plots.py @@ -3,7 +3,8 @@ import matplotlib.pyplot as plt from brainbox.plot_base import (ImagePlot, ScatterPlot, ProbePlot, LinePlot, plot_line, plot_image, plot_probe, plot_scatter, arrange_channels2banks) -from brainbox.processing import bincount2D, compute_cluster_average +from brainbox.processing import compute_cluster_average +from iblutil.numerical import bincount2D from ibllib.atlas.regions import BrainRegions diff --git a/brainbox/examples/Psychometric curves.ipynb b/brainbox/examples/Psychometric curves.ipynb index e702aeb64..932c94dc1 100644 --- a/brainbox/examples/Psychometric curves.ipynb +++ b/brainbox/examples/Psychometric curves.ipynb @@ -11,11 +11,9 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "This notebook contains examples of fitting and plotting psychometric data for both training and biased sessions using ONE. It also demonstrates how to fetch and plot the fitted parameters using DataJoint.\n", + "This notebook contains examples of fitting and plotting psychometric data for both training and biased sessions using ONE.\n", "\n", - "The module for fitting psychometric data is called 'psychofit' and is avaliable in both Python and MATLAB. The python module complete with examples can be found [here](https://github.com/cortex-lab/psychofit), however there is also a copy in [ibl-pipeline](https://github.com/int-brain-lab/IBL-pipeline/blob/master/ibl_pipeline/utils/psychofit.py).\n", - "\n", - "In order to run these examples you must how ibl_pipline and ibllib on your path." + "The module for fitting psychometric data is called 'psychofit' and is available in both Python and MATLAB. The python module complete with examples can be found [here](https://github.com/cortex-lab/psychofit)." ] }, { @@ -36,16 +34,11 @@ "source": [ "%matplotlib notebook\n", "\n", - "from datetime import datetime # Only for formating title\n", - "\n", "import numpy as np\n", "import matplotlib.pyplot as plt\n", "import seaborn as sns\n", - "\n", - "from oneibl.one import ONE\n", - "import brainbox.behavior.pyschofit as psy\n", - "from ibl_pipeline import behavior, acquisition, subject\n", - "from ibl_pipeline.analyses.behavior import PsychResultsBlock, PsychResults\n", + "from one.api import ONE\n", + "import psychofit as psy\n", "\n", "one = ONE()\n", "\n", @@ -924,7 +917,7 @@ "# help(psy.mle_fit_psycho) # The fitting function\n", "# help(psy.erf_psycho_2gammas) # The function to fit to\n", "\n", - "# data: 3 x n matrix where first row corrsponds to stim levels (%), \n", + "# data: 3 x n matrix where first row corresponds to stim levels (%),\n", "# the second to number of trials for each stim level (int),\n", "# the third to proportion rightward (float between 0 and 1)\n", "data = np.vstack((xx, nn, pp))\n", @@ -940,7 +933,7 @@ " 'nfits': 10\n", "}\n", "\n", - "# Fit the parameters for the erf function with 2 seperate lapses\n", + "# Fit the parameters for the erf function with 2 separate lapses\n", "pars, L = psy.mle_fit_psycho(data, 'erf_psycho_2gammas', **kwargs);\n", "\n", "# graphics\n", @@ -974,7 +967,7 @@ "contrast = signed_contrast(trials) # Function defined in previous section\n", "\n", "# data: a dict whose keys are the block type and values a\n", - "# 3 x n matrix where first row corrsponds to stim levels (%), \n", + "# 3 x n matrix where first row corresponds to stim levels (%),\n", "# the second to number of trials for each stim level (int),\n", "# the third to proportion rightward (float between 0 and 1)\n", "data = {}\n", @@ -989,7 +982,11 @@ { "cell_type": "code", "execution_count": 7, - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [ { "data": { @@ -1803,7 +1800,7 @@ "for pL, da in data.items():\n", " # Fit it\n", " pars, L = psy.mle_fit_psycho(da, 'erf_psycho_2gammas', **kwargs);\n", - " \n", + "\n", " # Print pars\n", " print('prob left = {:.1f}, bias = {:2.0f}, threshold = {:2.0f}, lapse = {:.01f}, {:.01f}'.format(pL, *pars))\n", "\n", @@ -1813,1689 +1810,7 @@ " plt.plot(x, psy.erf_psycho_2gammas(pars, x), label=f'{int(pL*100)}', color=colours[pL])\n", "\n", "# Get some details for the title\n", - "det = one.get_details(eid)\n", - "ref = f\"{datetime.fromisoformat(det['start_time']).date()}_{det['number']:d}_{det['subject']}\"\n", - "\n", - "plt.title(ref)\n", - "plt.legend()\n", - "makepretty()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Using DataJoint\n", - "### Plotting an unbiased training session" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "metadata": {}, - "outputs": [], - "source": [ - "# Pick a random training session with good performance\n", - "sessions = acquisition.Session & 'task_protocol LIKE \"%training%\"' & behavior.CompleteTrialSession\n", - "query = (PsychResults & 'performance > 0.7') * sessions.proj('session_uuid')\n", - "\n", - "# Fetch the data\n", - "fields = ('signed_contrasts', 'n_trials_stim_right', 'prob_choose_right', 'bias', 'threshold', 'lapse_low', 'lapse_high')\n", - "blob = query.fetch(*fields, limit=1, squeeze=True) # Fetch first in list\n", - "xx, nn, pp, (*pars) = [v[0] for v in blob] # Unpack data into variables\n", - "pars = np.array(pars)" - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "metadata": { - "scrolled": false - }, - "outputs": [ - { - "data": { - "application/javascript": [ - "/* Put everything inside the global mpl namespace */\n", - "window.mpl = {};\n", - "\n", - "\n", - "mpl.get_websocket_type = function() {\n", - " if (typeof(WebSocket) !== 'undefined') {\n", - " return WebSocket;\n", - " } else if (typeof(MozWebSocket) !== 'undefined') {\n", - " return MozWebSocket;\n", - " } else {\n", - " alert('Your browser does not have WebSocket support. ' +\n", - " 'Please try Chrome, Safari or Firefox ≥ 6. ' +\n", - " 'Firefox 4 and 5 are also supported but you ' +\n", - " 'have to enable WebSockets in about:config.');\n", - " };\n", - "}\n", - "\n", - "mpl.figure = function(figure_id, websocket, ondownload, parent_element) {\n", - " this.id = figure_id;\n", - "\n", - " this.ws = websocket;\n", - "\n", - " this.supports_binary = (this.ws.binaryType != undefined);\n", - "\n", - " if (!this.supports_binary) {\n", - " var warnings = document.getElementById(\"mpl-warnings\");\n", - " if (warnings) {\n", - " warnings.style.display = 'block';\n", - " warnings.textContent = (\n", - " \"This browser does not support binary websocket messages. \" +\n", - " \"Performance may be slow.\");\n", - " }\n", - " }\n", - "\n", - " this.imageObj = new Image();\n", - "\n", - " this.context = undefined;\n", - " this.message = undefined;\n", - " this.canvas = undefined;\n", - " this.rubberband_canvas = undefined;\n", - " this.rubberband_context = undefined;\n", - " this.format_dropdown = undefined;\n", - "\n", - " this.image_mode = 'full';\n", - "\n", - " this.root = $('
');\n", - " this._root_extra_style(this.root)\n", - " this.root.attr('style', 'display: inline-block');\n", - "\n", - " $(parent_element).append(this.root);\n", - "\n", - " this._init_header(this);\n", - " this._init_canvas(this);\n", - " this._init_toolbar(this);\n", - "\n", - " var fig = this;\n", - "\n", - " this.waiting = false;\n", - "\n", - " this.ws.onopen = function () {\n", - " fig.send_message(\"supports_binary\", {value: fig.supports_binary});\n", - " fig.send_message(\"send_image_mode\", {});\n", - " if (mpl.ratio != 1) {\n", - " fig.send_message(\"set_dpi_ratio\", {'dpi_ratio': mpl.ratio});\n", - " }\n", - " fig.send_message(\"refresh\", {});\n", - " }\n", - "\n", - " this.imageObj.onload = function() {\n", - " if (fig.image_mode == 'full') {\n", - " // Full images could contain transparency (where diff images\n", - " // almost always do), so we need to clear the canvas so that\n", - " // there is no ghosting.\n", - " fig.context.clearRect(0, 0, fig.canvas.width, fig.canvas.height);\n", - " }\n", - " fig.context.drawImage(fig.imageObj, 0, 0);\n", - " };\n", - "\n", - " this.imageObj.onunload = function() {\n", - " fig.ws.close();\n", - " }\n", - "\n", - " this.ws.onmessage = this._make_on_message_function(this);\n", - "\n", - " this.ondownload = ondownload;\n", - "}\n", - "\n", - "mpl.figure.prototype._init_header = function() {\n", - " var titlebar = $(\n", - " '
');\n", - " var titletext = $(\n", - " '
');\n", - " titlebar.append(titletext)\n", - " this.root.append(titlebar);\n", - " this.header = titletext[0];\n", - "}\n", - "\n", - "\n", - "\n", - "mpl.figure.prototype._canvas_extra_style = function(canvas_div) {\n", - "\n", - "}\n", - "\n", - "\n", - "mpl.figure.prototype._root_extra_style = function(canvas_div) {\n", - "\n", - "}\n", - "\n", - "mpl.figure.prototype._init_canvas = function() {\n", - " var fig = this;\n", - "\n", - " var canvas_div = $('
');\n", - "\n", - " canvas_div.attr('style', 'position: relative; clear: both; outline: 0');\n", - "\n", - " function canvas_keyboard_event(event) {\n", - " return fig.key_event(event, event['data']);\n", - " }\n", - "\n", - " canvas_div.keydown('key_press', canvas_keyboard_event);\n", - " canvas_div.keyup('key_release', canvas_keyboard_event);\n", - " this.canvas_div = canvas_div\n", - " this._canvas_extra_style(canvas_div)\n", - " this.root.append(canvas_div);\n", - "\n", - " var canvas = $('');\n", - " canvas.addClass('mpl-canvas');\n", - " canvas.attr('style', \"left: 0; top: 0; z-index: 0; outline: 0\")\n", - "\n", - " this.canvas = canvas[0];\n", - " this.context = canvas[0].getContext(\"2d\");\n", - "\n", - " var backingStore = this.context.backingStorePixelRatio ||\n", - "\tthis.context.webkitBackingStorePixelRatio ||\n", - "\tthis.context.mozBackingStorePixelRatio ||\n", - "\tthis.context.msBackingStorePixelRatio ||\n", - "\tthis.context.oBackingStorePixelRatio ||\n", - "\tthis.context.backingStorePixelRatio || 1;\n", - "\n", - " mpl.ratio = (window.devicePixelRatio || 1) / backingStore;\n", - "\n", - " var rubberband = $('');\n", - " rubberband.attr('style', \"position: absolute; left: 0; top: 0; z-index: 1;\")\n", - "\n", - " var pass_mouse_events = true;\n", - "\n", - " canvas_div.resizable({\n", - " start: function(event, ui) {\n", - " pass_mouse_events = false;\n", - " },\n", - " resize: function(event, ui) {\n", - " fig.request_resize(ui.size.width, ui.size.height);\n", - " },\n", - " stop: function(event, ui) {\n", - " pass_mouse_events = true;\n", - " fig.request_resize(ui.size.width, ui.size.height);\n", - " },\n", - " });\n", - "\n", - " function mouse_event_fn(event) {\n", - " if (pass_mouse_events)\n", - " return fig.mouse_event(event, event['data']);\n", - " }\n", - "\n", - " rubberband.mousedown('button_press', mouse_event_fn);\n", - " rubberband.mouseup('button_release', mouse_event_fn);\n", - " // Throttle sequential mouse events to 1 every 20ms.\n", - " rubberband.mousemove('motion_notify', mouse_event_fn);\n", - "\n", - " rubberband.mouseenter('figure_enter', mouse_event_fn);\n", - " rubberband.mouseleave('figure_leave', mouse_event_fn);\n", - "\n", - " canvas_div.on(\"wheel\", function (event) {\n", - " event = event.originalEvent;\n", - " event['data'] = 'scroll'\n", - " if (event.deltaY < 0) {\n", - " event.step = 1;\n", - " } else {\n", - " event.step = -1;\n", - " }\n", - " mouse_event_fn(event);\n", - " });\n", - "\n", - " canvas_div.append(canvas);\n", - " canvas_div.append(rubberband);\n", - "\n", - " this.rubberband = rubberband;\n", - " this.rubberband_canvas = rubberband[0];\n", - " this.rubberband_context = rubberband[0].getContext(\"2d\");\n", - " this.rubberband_context.strokeStyle = \"#000000\";\n", - "\n", - " this._resize_canvas = function(width, height) {\n", - " // Keep the size of the canvas, canvas container, and rubber band\n", - " // canvas in synch.\n", - " canvas_div.css('width', width)\n", - " canvas_div.css('height', height)\n", - "\n", - " canvas.attr('width', width * mpl.ratio);\n", - " canvas.attr('height', height * mpl.ratio);\n", - " canvas.attr('style', 'width: ' + width + 'px; height: ' + height + 'px;');\n", - "\n", - " rubberband.attr('width', width);\n", - " rubberband.attr('height', height);\n", - " }\n", - "\n", - " // Set the figure to an initial 600x600px, this will subsequently be updated\n", - " // upon first draw.\n", - " this._resize_canvas(600, 600);\n", - "\n", - " // Disable right mouse context menu.\n", - " $(this.rubberband_canvas).bind(\"contextmenu\",function(e){\n", - " return false;\n", - " });\n", - "\n", - " function set_focus () {\n", - " canvas.focus();\n", - " canvas_div.focus();\n", - " }\n", - "\n", - " window.setTimeout(set_focus, 100);\n", - "}\n", - "\n", - "mpl.figure.prototype._init_toolbar = function() {\n", - " var fig = this;\n", - "\n", - " var nav_element = $('
');\n", - " nav_element.attr('style', 'width: 100%');\n", - " this.root.append(nav_element);\n", - "\n", - " // Define a callback function for later on.\n", - " function toolbar_event(event) {\n", - " return fig.toolbar_button_onclick(event['data']);\n", - " }\n", - " function toolbar_mouse_event(event) {\n", - " return fig.toolbar_button_onmouseover(event['data']);\n", - " }\n", - "\n", - " for(var toolbar_ind in mpl.toolbar_items) {\n", - " var name = mpl.toolbar_items[toolbar_ind][0];\n", - " var tooltip = mpl.toolbar_items[toolbar_ind][1];\n", - " var image = mpl.toolbar_items[toolbar_ind][2];\n", - " var method_name = mpl.toolbar_items[toolbar_ind][3];\n", - "\n", - " if (!name) {\n", - " // put a spacer in here.\n", - " continue;\n", - " }\n", - " var button = $('');\n", - " button.click(method_name, toolbar_event);\n", - " button.mouseover(tooltip, toolbar_mouse_event);\n", - " nav_element.append(button);\n", - " }\n", - "\n", - " // Add the status bar.\n", - " var status_bar = $('');\n", - " nav_element.append(status_bar);\n", - " this.message = status_bar[0];\n", - "\n", - " // Add the close button to the window.\n", - " var buttongrp = $('
');\n", - " var button = $('');\n", - " button.click(function (evt) { fig.handle_close(fig, {}); } );\n", - " button.mouseover('Stop Interaction', toolbar_mouse_event);\n", - " buttongrp.append(button);\n", - " var titlebar = this.root.find($('.ui-dialog-titlebar'));\n", - " titlebar.prepend(buttongrp);\n", - "}\n", - "\n", - "mpl.figure.prototype._root_extra_style = function(el){\n", - " var fig = this\n", - " el.on(\"remove\", function(){\n", - "\tfig.close_ws(fig, {});\n", - " });\n", - "}\n", - "\n", - "mpl.figure.prototype._canvas_extra_style = function(el){\n", - " // this is important to make the div 'focusable\n", - " el.attr('tabindex', 0)\n", - " // reach out to IPython and tell the keyboard manager to turn it's self\n", - " // off when our div gets focus\n", - "\n", - " // location in version 3\n", - " if (IPython.notebook.keyboard_manager) {\n", - " IPython.notebook.keyboard_manager.register_events(el);\n", - " }\n", - " else {\n", - " // location in version 2\n", - " IPython.keyboard_manager.register_events(el);\n", - " }\n", - "\n", - "}\n", - "\n", - "mpl.figure.prototype._key_event_extra = function(event, name) {\n", - " var manager = IPython.notebook.keyboard_manager;\n", - " if (!manager)\n", - " manager = IPython.keyboard_manager;\n", - "\n", - " // Check for shift+enter\n", - " if (event.shiftKey && event.which == 13) {\n", - " this.canvas_div.blur();\n", - " // select the cell after this one\n", - " var index = IPython.notebook.find_cell_index(this.cell_info[0]);\n", - " IPython.notebook.select(index + 1);\n", - " }\n", - "}\n", - "\n", - "mpl.figure.prototype.handle_save = function(fig, msg) {\n", - " fig.ondownload(fig, null);\n", - "}\n", - "\n", - "\n", - "mpl.find_output_cell = function(html_output) {\n", - " // Return the cell and output element which can be found *uniquely* in the notebook.\n", - " // Note - this is a bit hacky, but it is done because the \"notebook_saving.Notebook\"\n", - " // IPython event is triggered only after the cells have been serialised, which for\n", - " // our purposes (turning an active figure into a static one), is too late.\n", - " var cells = IPython.notebook.get_cells();\n", - " var ncells = cells.length;\n", - " for (var i=0; i= 3 moved mimebundle to data attribute of output\n", - " data = data.data;\n", - " }\n", - " if (data['text/html'] == html_output) {\n", - " return [cell, data, j];\n", - " }\n", - " }\n", - " }\n", - " }\n", - "}\n", - "\n", - "// Register the function which deals with the matplotlib target/channel.\n", - "// The kernel may be null if the page has been refreshed.\n", - "if (IPython.notebook.kernel != null) {\n", - " IPython.notebook.kernel.comm_manager.register_target('matplotlib', mpl.mpl_figure_comm);\n", - "}\n" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/html": [ - "" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "prob left = 0.2, bias = -7, threshold = 38, lapse = 0.0, 0.0\n", - "prob left = 0.5, bias = 11, threshold = 18, lapse = 0.1, 0.0\n", - "prob left = 0.8, bias = 7, threshold = 8, lapse = 0.0, 0.1\n" - ] - } - ], - "source": [ - "plt.figure()\n", - "# For each block type, plot the data\n", - "for pL, xx, nn, pp, (*pars) in zip(*blocks):\n", - " # Print pars\n", - " print('prob left = {:.1f}, bias = {:2.0f}, threshold = {:2.0f}, lapse = {:.01f}, {:.01f}'.format(pL, *pars))\n", - " # Plot contrast values\n", - " plt.plot(xx * 100, pp, colours[pL] + 'o') # colours map defined in above section\n", - " x = np.arange(-100, 100) # The x-axis values for our curve\n", - " plt.plot(x, psy.erf_psycho_2gammas(pars, x), label=f'{int(pL*100)}', color=colours[pL])\n", - " \n", - "# Get some details for the title\n", - "query = (acquisition.Session & {'session_uuid': uuid}) * subject.Subject\n", - "start_time, number, nickname = query.fetch1('session_start_time', 'session_number', 'subject_nickname')\n", - "\n", - "plt.title(f'{start_time.date()}_{number}_{nickname}')\n", + "plt.title(one.eid2ref(eid, as_dict=False))\n", "plt.legend()\n", "makepretty()" ] diff --git a/brainbox/examples/dim_reduction.py b/brainbox/examples/dim_reduction.py index 2c6d28547..832461678 100644 --- a/brainbox/examples/dim_reduction.py +++ b/brainbox/examples/dim_reduction.py @@ -8,7 +8,7 @@ from sklearn.decomposition import PCA, FactorAnalysis, FastICA from one.api import ONE -from brainbox.processing import bincount2D +from iblutil.numerical import bincount2D def find_nearest(array, value): diff --git a/brainbox/examples/docs_wheel_moves.ipynb b/brainbox/examples/docs_wheel_moves.ipynb index db55cddbe..8024e782d 100644 --- a/brainbox/examples/docs_wheel_moves.ipynb +++ b/brainbox/examples/docs_wheel_moves.ipynb @@ -11,14 +11,12 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 1, "metadata": {}, "outputs": [], "source": [ "#%matplotlib notebook\n", "\n", - "import re\n", - "\n", "import numpy as np\n", "import matplotlib.pyplot as plt\n", "import seaborn as sns\n", @@ -28,39 +26,27 @@ "import brainbox.behavior.wheel as wh\n", "from ibllib.io.extractors.ephys_fpga import extract_wheel_moves\n", "from ibllib.io.extractors.training_wheel import extract_first_movement_times\n", - "# from ibllib.misc.exp_ref import eid2ref\n", "\n", - "one = ONE(base_url='https://openalyx.internationalbrainlab.org', silent=True)\n", + "one = ONE()\n", "sns.set_style('whitegrid')" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 2, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "2020-09-21_1_SWC_043\n" + ] + } + ], "source": [ - "# NB: This function will soon be available from ibllib.misc.exp_ref\n", - "def eid2ref(eid):\n", - " \"\"\"\n", - " Get human-readable session ref from path\n", - " :param eid: The experiment uuid to find reference for\n", - " :return: dict containing 'subject', 'date' and 'sequence'\n", - " \"\"\"\n", - " path_str = str(one.eid2path(eid))\n", - " pattern = r'(?P[\\w-]+)([\\\\/])(?P\\d{4}-\\d{2}-\\d{2})(\\2)(?P\\d{3})'\n", - " match = re.search(pattern, path_str)\n", - " return match.groupdict()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "eid = 'c7bd79c9-c47e-4ea5-aea3-74dda991b48e'\n", - "eid2ref(eid)" + "eid = '4ecb5d24-f5cc-402c-be28-9d0f7cb14b3a'\n", + "print(one.eid2ref(eid, as_dict=False))" ] }, { @@ -76,8 +62,9 @@ "![Quadrature encoding schematic](http://www.ni.com/cms/images/devzone/tut/ipuuzhqc3503.jpg)\n", "\n", "For more information on the rotary encoder see these links:\n", - "[National Instruments guide to rotary encoders](http://www.ni.com/tutorial/7109/en/)\n", - "[Datasheet for the Kuebler](https://www.kuebler.com/pdf?2400-2420_en.pdf)" + "\n", + "* [National Instruments guide to rotary encoders](http://www.ni.com/tutorial/7109/en/)\n", + "* [Datasheet for the Kuebler](https://www.kuebler.com/pdf?2400-2420_en.pdf)" ] }, { @@ -90,9 +77,17 @@ }, { "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "The wheel diameter is 6.2 cm and the number of ticks is 4096 per revolution\n" + ] + } + ], "source": [ "device_info = ('The wheel diameter is {} cm and the number of ticks is {} per revolution'\n", " .format(wh.WHEEL_DIAMETER, wh.ENC_RES))\n", @@ -113,9 +108,22 @@ }, { "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "wheel.position: \n", + " [ 1.53398079e-03 -0.00000000e+00 -1.53398079e-03 ... -4.52088682e+02\n", + " -4.52090216e+02 -4.52091750e+02]\n", + "wheel.timestamps: \n", + " [2.64973500e-02 3.13635300e-02 3.42632400e-02 ... 4.29549951e+03\n", + " 4.29570042e+03 4.29584134e+03]\n" + ] + } + ], "source": [ "wheel = one.load_object(eid, 'wheel', collection='alf')\n", "\n", @@ -137,14 +145,13 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 5, "metadata": {}, "outputs": [], "source": [ "try:\n", " # Warning: Some older sessions may not have a wheelMoves dataset\n", " wheel_moves = one.load_object(eid, 'wheelMoves', collection='alf')\n", - " assert wheel_moves, 'object not found'\n", "except AssertionError:\n", " wheel_moves = extract_wheel_moves(wheel.timestamps, wheel.position)" ] @@ -163,9 +170,18 @@ }, { "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": "
", + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYUAAAEECAYAAADHzyg1AAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjQuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8rg+JYAAAACXBIWXMAAAsTAAALEwEAmpwYAAAlu0lEQVR4nO3de3QU5d0H8O/mgoSEi+SC1LeQyBGNBkXi5XgQQkWx6qlihSaBEypaKcQF6iVFUMgG3oNGT1sPlw3QVkAUiBFLqVhRERKgKIXzRpMYBLUB2wK5iZAl9933j2SHvc3eMjO7s8/3cw6HZGZ25pns88zvucw8Y7DZbDYQEREBiAp1AoiIKHwwKBARkYRBgYiIJAwKREQkYVAgIiKJkEGhrq4u1EnQnGjnLNr5AjxnUah9zkIGhdbW1lAnQXOinbNo5wvwnEWh9jkLGRSIiMgzBgUiIpIwKBARkYRBgYiIJAwKREQkETcomEyhTgERUdjRLChYrVYsW7YM2dnZyMvLw6lTp5zWf/LJJ3j00UeRnZ2Nt99+W/0EFRUxMBARudAsKHz88cfo6OhAaWkpnn32Wbz88svSus7OTrz00kt4/fXXsWXLFpSWlqKhoUG1tFit1p4fiopUOwYRkR5pFhSOHTuGCRMmAADGjh2L6upqad0333yDESNGYPDgwejXrx8yMzNx9OhR5RNhMgEGA27MyLi8zGBgi4GIqFeMVgdqaWlBQkKC9Ht0dDS6uroQExODlpYWDBw4UFoXHx+PlpYWn/usq6sL7Om+7GwkNTQg2Wx2Xl5UhIaGBjQajf7vS2fa2tpQW1sb6mRoRrTzBXjOolDinNPT02XXaRYUEhISYLFYpN+tVitiYmI8rrNYLE5BQk5qamrgCamp8bg4uaYGyV7+UHpXW1vrNSNEGtHOF+A5i0Ltc9as+2jcuHGoqKgAAFRWVmL06NHSulGjRuHUqVM4f/48Ojo6cPToUdxyyy3qJGTSpMCWExEJRLOWwr333otDhw4hJycHNpsNK1euxN/+9jdcunQJ2dnZeP755/HEE0/AZrPh0UcfxbBhw7RKGhER9dIsKERFRWH58uVOy0aNGiX9fPfdd+Puu+9WPyH79we2nIhIIOI9vLZ/Pxry892Xl5fzLiQiEp54QQHAgCNHPK9ga4GIBCdkULh0++2eV3CwmYgEJ2RQICIiz4QMCuw+IiLyTMigwO4jIiLPhAwKbCkQEXkmZFBgS4GIyDMhgwIREXkmZFBg9xERkWdCBgV2HxEReSZkUGBLgYjIMyGDwuk33gAKC91XcP4jIhKckEGBiBywIkQOxA0KnEKbqEdRUc//DA4EkYMC38BGhO7u7sv/24MDCU3coMCWAonMZAIMBkT3vifd/n/nCy+EMFEUDsQNCmwpELmJXbkSMBjYlSQwcYMCERG50ewdzWGH3UckqkmTem6/9oblQFjithTYfUSi8ieP85kdYYkbFNhSIFH5m8dZFoQkblBgS4FEZDL57jqyY1kQkrhBgYiI3IgbFNh9RCIKJH+zLAhJ3KDA7iMSUSD5m2VBSLwl1d/lRHondyvqyJHAqVPuyzknkpDYUvB3OZHeyeXtxx7zPJU8CUncoEBERG7YfeTvciK9CybPszwIR9yWAruPSDTe8jzLA/ViS8Hf5UR6JjfInJXVM5Asd/FneRAOWwr+LifSM1/5neWBeokbFEwmz3dcFBXxFjwiEpa4QQFgFxKJw1deZ1mgXmIHBTaZSRTsPiI/iR0UWDsiEZhMl59O9rZNVpb78qIiBgbBiB0UWDsikRUWOo+fsTwQRA8KRETkRLOg0NbWhvnz52PGjBl48skn0dzc7HG75uZmTJkyBe3t7eonit1HJAJ/8znLA0HDoLBt2zaMHj0aW7duxdSpU2E2m922OXDgAB5//HE0NjZqkyg2l0kE/uZzlgeChkHh2LFjmDBhAgBg4sSJOHz4sHtioqKwceNGDBkyRKtkERGRA1WmuSgrK8PmzZudliUmJmLgwIEAgPj4eFy8eNHtc+PHjw/oOHV1dWhtbQ04fW1tbaitrcWI3bsR72G9ZfdunM7ODni/4cx+zqIQ7XwB+XP2N5/rsTzwew5Oenq67DpVgsL06dMxffp0p2VGoxEWiwUAYLFYMGjQoD4fJzU1NajP1dbW9vxRHnwQOHrUbX38gw96/aPpkXTOghDtfAEv5+xvPtdheeD3rDzNuo/GjRuH8t4JuSoqKpCZmanVoeVxYI0i3aRJnp9RsE+E54jlgaBhUMjNzcXJkyeRm5uL0tJSGI1GAMDGjRuxd+9erZLhjANrFOkCyeMsDwQNp86Oi4vDqlWr3JbPnj3bbdknn3yiRZKIiMiF2A+vsblMkS6QPM7yQBA9KLC5TJGO3UcUILGDAmtGFOkCyeOcFI8gelBgzYgiXaB5nGVCeGIHBSIiciJ2UGD3EUUyf96j4IplQnhiBwU2lUlEru9RcMQyITyxgwJrRRTJgsnfLBPCEzsosFZEkSyY/M0yITyxgwIRETkROyiwqUyRjN1HFASxgwKbyhTJ2H1EQRA7KPAJTopkbClQEMQOCgBrRhS52FKgIDAoEEUiuQfXvD2jQAQGBTaXKTIFm69ZHoTHoMDmMkWiYPM1y4PwGBRYM6JIxJYCBYlBgTUjikRsKVCQGBSI6DKTqWcw2lVREQeoBREjt+Kuu+4CAHR2dqK1tRXDhw/H2bNnkZiYiE8++USzBKqOzWWKNMFMme2IZUJosi2FgwcP4uDBg5gwYQL27NmDPXv24MMPP8RNN92kZfrUx+YyicLf21FZJoTms/vo3//+N4YPHw4AGDZsGM6cOaN6ojTFWhFFmr7maZYJofkMCqNGjUJBQQG2bNmCZ599FpmZmVqkSzusFVGk6WueZpkQmuyYgt2KFStQUVGBkydP4oEHHsDkyZO1SBcRBYNPMlMf+WwpXLhwAS0tLUhOTsaFCxewfv16LdKlHTaVKZIokZ9ZJoTms6WwYMECpKam4sSJE7jiiisQFxenRbq0M2kSUF7ueTmR3iiRn1kmhObXcwrLly9HWloaNm7ciB9++EHtNGmLtSKKEElr1vTtVlQ7lgmh+RUU2tvb0draCoPBgEuXLqmdJm1xUI0iXaDjCSwTQvMZFGbOnInNmzdj/PjxyMrKwjXXXKNFuoiIKAR8jim0t7djzpw5AID7778fCQkJqidKU2wqU4QYcOSI5xWB5mWWCaH5bCm8/fbb0s8RFxAANpUpYly6/XbPKwLNyywTQvPZUujo6MDUqVORlpaGqKieGPK73/1O9YRphrUiigQmE5LNZmX2xTIhNJ9B4bnnntMiHaHD2+8okgXz0BrLhNB8BoXb5ZqkREQUcfg+BTaVKRIomY9ZJoQmGxSOHDmC7u5uLdMSGhxUo0igZD5mmRCabFCoqanBwoULsWjRIuzatQvnz5/XMFkaYq2IIoGS+dhkArKy3JcXFTEwCEA2KMyePRtr1qzB0qVLERMTg+LiYhiNRmzYsEHL9Klv/37Prx8sL+eskqQfStfu2VoQls+B5oSEBDzwwAN44IEHYLPZUFlZGdSB2traUFBQgKamJsTHx6O4uBhDhw512mbTpk3YvXs3ACArKwtGozGoY2lq0qSefwwgFCqcLpsUFNBAs8FgwC233BLUgbZt24bRo0dj69atmDp1Kswu91R/99132LVrF7Zv347S0lIcPHgQx48fD+pYAetL07u8XJlJyIiCpUYXKLtVheWzpaCUY8eO4Ve/+hUAYOLEiW5B4aqrrsKf/vQnREdHAwC6urpwxRVXeN1nXV0dWltbA05LW1sbamtrpd+TbrwRyR7uy2648UY0Omznqru7Gxm9P9fn56Np/vyA06IV13OOdCKdb7D5V+t9qkGk79lOiXNOT0+XXedXUGhqakJ7e7v0+49+9COv25eVlWHz5s1OyxITEzFw4EAAQHx8PC5evOi0PjY2FkOHDoXNZsMrr7yCG264AWlpaV6Pk5qa6k/y3dTW1jr/UWpqPG6XXFODZE9/PA8P96SUlCClpCRsm+xu5xzhhDrfQPNvqPapAqG+515qn7PPoGAymVBRUYGUlBTYbDYYDAZs377d62emT5+O6dOnOy0zGo2wWCwAAIvFgkGDBrl9rr29HUuWLEF8fDwKPQ3+qiXQJzjlticKBTWeQOZTzcLyGRS++OILfPzxx9K8R8EaN24cysvLcdNNN6GiogKZmZlO6202G/Lz83HHHXdIs7Lqkn18IQxbC0REvvgMCiNHjkR7e3ufX8OZm5uLRYsWITc3F7GxsdKkehs3bsSIESNgtVpx5MgRdHR04MCBAwCAZ555JuiB7YAEMqjmTyuBg3GkJQ40k4J8BoUzZ87gJz/5CUaOHAkAfnUfeRIXF4dVq1a5LZ89e7b0c1VVVcD7VUQgTWV/ggKb2KQVk8lzfuzr2Ba7j4TlMyhE1DTZRETklc+BgujoaBQXF2POnDlYuXIlbDabFunSViBNZX+az2xik1bU6uYJZfcRx+NCymdQePHFF/Hwww9j27ZteOSRR/DCCy9okS5tBfJIvz/NZ06RQVpRazqKUE1zYX86m+UnZHwGhfb2dkyePBmDBg3CPffcg66uLi3SpS2lWwqBbEfUF5HWUrDfvcfAEDI+g0J3dze++uorAMBXX30Fg8GgeqI0p3RLIZDtiPoiUloKJhPgem1hYAgJnwPNL774IpYsWYL6+noMGzYMK1as0CJdRCQKuQn9AD73EwI+g8INN9yAHTt2aJGW0GH3EelVJHQf+donA4OmZLuPFixYAAC466673P5FHH+bynL3hAeyTyKleHtGoa8Xby27j1hWwopsS8H+oFlZWRmGDx8uLf/mm2/UT5XW/K0VyW03eDDwww/Oy4qKerZni4HUEorBYKVr7f7OI+bpnEwmJDU0AMnJntNjMrF1EQTZlsKJEydw4MABzJ07F4cOHcLBgwdRUVGBZ555Rsv0acPfWpHcdmPHBrZfIiWoWZs3mTy/kVBp/qbV9Tbv3nGIZLNZPlA5jlMwOPhNNihcuHAB77//PpqamvDee+9h9+7d+OCDDzBjxgwt00dE5MzDwHRNTY20rLW1FdXV1QCA6urqntmZHQMHA4R3Nh+qq6t9baI7X375pfOCrCybDXD/l5Xl33aDB/v3+RByO+cIJ8T5+ptvw3X/3o4hd9zCQq/btNx6q+y6qqqqy7/bbD370iG187ZsS2H58uXS/zk5OU7/Ig67j0iP1B4MVnv/3gbKPXVd+fHq2/ijRwEAZ349121dxpgx0s/19fX+v0ZXsJaF7EBzfn4+AOD3v/+9ZokJmb4ONFdWBrZfIiWoPdCso/1XV1UhY8wYVPfOtBwTE4Ph69ehuqoKKWZzz1sRHaQMGwag5zW69b3XOruRI0dKb4kEIP8QnWtXVIR0Tcm2FJKSkgAAly5dQn19PRobG7FkyRKcPn1as8Rphi0F0iO9txS87d/PY1huvRUoLERGRs/b0jMyMpCRkYHrr79eWp7S+z74H86fd/t8SkkJMsaMQeKatejssqKzy4pTp06hurpa+gfA6XdpeVEROjo6Lu/MV8tDJwHD5zQXhYWF6NevH0pKSvD0009jzZo1WqSLiETmz91PNhtOv/HG5Yut6/aOF+HCQgwePFj61d6iqK6qQnVVFZqMTyE2pudy2NllReKatcgYM0bqcrL/7Bg8gJ67ND0Fj+PHjzu91x6A/91V3phMSFL5GuzzieaYmBhce+216OzsxNixY9Hd3a1qgkKir91HdXWel/NJTFKLt6khlBLq7iN7d0zvnEjVVVVITU1FwsCBngOGt3LmEjgcWxZ23d3d+Prrr9HZ2Ykm41NoMj7Vs41Dt1SK2YxbbrlZ+ozjOIXj72d+PRcnez8vrQOk4OEPg8GAtLQ0DBgw4PLCoiIkA/LPZijAZ1AwGAx49tlnMXHiRLz//vt9fi1nWPL3LVNy2z32WM//ahdSIl/6+sY1R2q/fc3f/RcWAkVFly/gfTlHl9aDo+joaFx33XUePyYd22wGzGZYLBbEJyRIwQJwDh6dXVagy4qr1pVg+Pp1TtsAPUHj7Nx5PpP77bffOqfD/oOKkwUabDbvb81pbm5GVVUVsrKy8Nlnn+G6667DkCFDVEmMVmpra5Genu68UC6DZmVdrrl42wbw/fkQ8njOESziz9ef/BrOxwh03zKDuJp8z3IDyAZDzw2uLr9funTJ7WIOOAcNf9i7qFwDixMlKwG9fLYU+vXrh08//RRvvfUWUlNTZSOp7vlTa/G1Dd9pS1rR4h3Kah4j0H2HsgtW7tiuXVi9vw8YMMCpW8qR3HJXHR0dOHHiBABIXVme7qJSo4vaZ1BYsmQJbrvtNjz00EM4cuQInn/+eaxbJxO1iIhE4Xoh9nVhDmDakH79+rkHELMZcAgKx/7vc/z4R1chJSXF7/36w2dQ+P7775GXlwcASE9Px549exRNQNjwNejlrblrMsnXcMKg64gikBaT4al5jFC+AzpUlKjNFxaivr4eKSUl+J/hw5CYmNj3fbrw63WcDQ0NAIDGxkZYrVbFExEWfN2T3df1RErSIr+peQyWl+CYTGiaPx8oLMSwYcMQHR2t+CF8thQWLlyInJwcJCQkwGKxRO6b13zVXPq6nkhJocxvSvRjs7z0jYpjLD5bCuPHj8eePXuwfv16fPTRR7jzzjtVS0xIsaVAeqJFflNr+mw1Xw5EfeYzKHz44YeYMmUK5s2bhylTpuDQoUNapIuIiELAZ/eR2WxGWVkZEhMT0djYiLlz52L8+PFapE1b7D4ivdDiaWY7NfI1y0pY89lSGDJkiDTCnZSUhISEBNUTFRLsPiKda8jPV76vWY18zbIS1ny2FBISEvDEE0/gtttuQ01NDdra2qTptCPq1Zx9bQmYTD0/u/aV8l3NpDSZvDTgyBHNjsWWQuTyGRQmT54s/Tysdw7yiOTrCUslnngmUoJMPrt0++2I1+hYfW4psJyELZ9B4ZFHHtEiHUREFAZ8jikIQ4mBZDaLSQt67j7ScpCcgsKgYKfEQDIH0EgLMvnp0u23a3YsxfO0CrN9UnAYFOzYUiC90LKlICfY+fxZRsIeg4IdWwqkF1q2FJR+qpllJOz5HGgmojAi1ydfWIjG7OyeVzUS9QFbCnbsPiI9CEUeU/KYLCNhj0HBzluz1t8JvNg0JqW59tuHIo8peUyWkbCnWVBoa2vD/PnzMWPGDDz55JNobm522+att97Co48+imnTpmHfvn1aJa2HtxqMv7Ub1oJISZMm9XQVOV4w9dxS4O2ouqBZUNi2bRtGjx6NrVu3YurUqTCbzU7rm5ubsXXrVmzfvh2bNm2CyWSCzfGl2GrzVoPxt3bDWhApyd46LS+//HIrvbcUPOHtqGFFs6Bw7NgxTJgwAQAwceJEHD582Gn90KFD8de//hWxsbFobGzEoEGDYDAYtEoeUfgwmQCXvB8VHR26C6fcHUjB3pZKYc1gU6E6XlZWhs2bNzstS0xMxLJlyzBq1ChYrVZMmjQJFRUVbp998803sXr1auTl5cFoNHo9Tl1dHVpbWwNOX1tbG/r37++2fMSsWYg/etRteXdCAqJbWtyWW269FaffeMOvfXjaVkty5xyp9Hq+cvnHzltePLFhg2rnrES+VqNs6PV77gslzjk9PV1+pU0jTz31lO3zzz+32Ww224ULF2wPPvig7Lbt7e22WbNm2Q4fPqxKWr788kvPKwoLbTbA/V9WluflhYX+78PTthqSPecIpdvzlcs/fuRFVc9ZiXytQtnQ7ffcB2qfs2bdR+PGjUN5bx9pRUUFMjMzndZ/++23MBqNsNlsiI2NRb9+/RAVpfHNUXIDZ5WV/m/PwWbqC1/5xNNdcFlZ6nfjKJGvWTZ0QbOrbm5uLk6ePInc3FyUlpZKXUMbN27E3r17cc011+D6669HdnY2cnJycPPNN+N2NZ7Q9EZu4GzsWP+352Az9YWSt3kqaf9+z+MK5eX+BySWDV3Q7InmuLg4rFq1ym357NmzpZ+NRqPPcQQiIlIPp7lwxO4jCqVg7+PXKm/1JW/LvVhHi64vCgifaHbE7iPSI63yVl/yNsuFbjAoOGJLgUIp3POIydRTs3fl+tS1JywXusGg4IgtBQqlYPKI1k8DB5u/WS50g0GBiIgkDAqO2H1EoSSXR0aO9Lw8nOYM8jblBSfC0xUGBUfsPqJQkssjjz3W8+yvI5stNAFByTexhVNQIwmDApFe2C/GSr4ek8gFg4IjueZ7IPdX9+UODRKbr65Hey09XGvXcl1I7FLVFQYFR4FctL1tyy4kCpS/b/cLh4AQaBcSy4Ou8IlmR0pM7uVtHWtGJEdvecbf9PJJZt1hS8ERWwoUKnrLM3wbYcRiUCCiwPFtbBGLQSFY7D4iJekxz/iTZj2el+AYFBwFMoDG7iNSkh7zjK80+zt4TmGFQcGVv5mVLYXIp2U3iB7zjK806/GciEHBjb81M7YUIp99agYtgoMe84yvNOvxnIi3pBJ5Yp04EVEATp8+jRFaBgc9sf89XOc1KirqaQ3IdR3x7xjW2FJwxe4jsZlMgMGAqAMHAAAj7JPRqTmhm54njFNiEkkKKwwKrpToPlLiJecUfgwGbb8/PdSqlZhEksIKu49cKdFS8LaeNaXwJff0rSM1upL0nFcCmS+MdIEtBVdKtBS8rWdNKTzJ3T6pBT3nFblWsSd6aPkQg0JQmLkjjx5q5UQaYFBw5c/FoS/b8OITfrxN2qbFNOh6zyv+pJMT4OkGg4Irfwp7X7bRQ5eASHw9davF96j3vKJUmaGwwIFmV6FoKTjWoFib0o4/t4KqXYvX8+2odiaT/HMJALtbdYYtBVehaCkUFV3+x8ITeo4XsVDdXqy3C+n+/e7vkQZC9y5pChqDgiulXkzu59TCXV1d7utJfXI1dL1djMONY57nu6R1iUHBk2CfQfB3u/37pSdnY2Jj3ddr/ZCUiALpFlKzC0nvg8yu7O8oZ3DVLQYFT4J9BsHf7fz5PLuS1BPolM5qDQRH6tTS9koP6RIHmj1RqkD2tRao5wtDOAv0e1GqNm+/UDqOVyixXyIFsaXgibcaYCDNYm81TKUGtFkjC1ygNX8lBpvtYxhFRaiurkZ9fn5kthJI99hSCAVvt+858jXPjutDVwwQvoVigNnlmJ1dVnRbPdypQxQG2FIIhFIXjkDm2JGpNXYvXXp5P/bbWamHt+8o2C6bYD/nIQjdcsvNGL5+HernzUN1VRUAoPuuu3j7JoUFBgVPTCbAZoPNar28LJgCq8TtrZ66NEwmRP/v/7ottnmakkHEi4xrgHTsyw+2yyaYwWaZVsn3Cxagq7MTKWYzMjIyAADRve9vIAo1dh95YTAYen7oy4W9r/3Drl1IXqZ3NlRU9NzO6pjeEN7F1NXVhePHj2t+3AwA3d3dqK2tRVpaGvoXFaF98WJ07d6NeE8fUGMuKy9PKl955ZVAjEPR4/38FEYYFHzpa5eRP3P0248DeO8G8nd6Z5cxi+rqaqf/tdTZZfW9kQKuWleC4evXSb9njBkDAKifNw/9Afzw9NNIOXrU/YP+fr/22yxdvx/7YLO/ecTTxHAituYobDEo+KLHAusSOOwXyDO/nouzc+dpnpzYGPV7KZuMT6HJ+BSAnvOtnzcPKSUlSCkpAQDpf/tyAOju6kJ0dLTqaXPCieEozDEoqM3XZGF2/rzzuQ9dUfXz5qEpPx8enp9WXWpqKhISEjQ9Zn1+fs9tn+gJEtVVVYiKikJaWhpQUgJkZQUeEPztQvI2FbceKxkkFM2CQltbGwoKCtDU1IT4+HgUFxdj6NChbttZrVbMmTMHkydPRm5urlbJU5evLiR7F4Zc99CkSd4HSU0m2LKyesYUPLHZ0FRbi4z09ICTrkuFhYiOjka6w/naB3Tt64O6OMt9j661f3+3IwpDmt19tG3bNowePRpbt27F1KlTYTabPW732muv4YcfftAqWdrwcheSbdky57tj5F7q8tprnvfdW0s1eJu2WDSuF3zXv4HStXXHwfxImAqbhKZZUDh27BgmTJgAAJg4cSIOHz7sts0HH3wAg8GAiRMnapUs7ZhMzre4AkBWFgyuFxC52uTYsZ6XO25fWHj5Amj/md0Vyv0N+nKLMb8L0glVuo/KysqwefNmp2WJiYkYOHAgACA+Ph4XL150Wn/ixAm89957WLVqFdauXevXcerq6tDa2hpw+tra2lBbWxvw55SQlJ+PZLMZDfn5aDQaAZd0JDU0INnD5ywWi8fbKRsaGtBo30d2trSPxt6f7fsP5TmHglrnK/f92L8HX+vVJNp3DPCcg5XupStZlaAwffp0TJ8+3WmZ0WiExWIB0HOBGzRokNP6nTt34ty5c/jlL3+J//znP4iNjcXVV1/ttdWQmpoaVPpqa2u9/lFUtXYtkJyMZJPJ48UDyR6XIt7T7ZRZWUheu9Z9Px6WhfScQ0C1862p8bg4uaYGyaWlgEy3aHJyMpJV/vuL9h0DPGc1aDbQPG7cOJSXl+Omm25CRUUFMjMzndb/9re/lX5evXo1kpKSIrMbCfDejWBf50+/NAcutefteQU57DoiHdFsTCE3NxcnT55Ebm4uSktLYTQaAQAbN27E3r17tUpG5OCFJnTkbk2trAxse6IwpFlLIS4uDqtWrXJbPnv2bLdl8+fP1yJJ4av39lTrsmWIWrECAFBdVSU9hObxXbikHblbTseO5a2opHucEC+MRS1fLv0s3Wcv4i2meuHlORIiveATzeHO9UXovMDoC7uOSGcYFMKdYxBgQAgPvBmAIhi7j4iC4U8LgHMdkQ4xKBAFQ+69zY7YSiAdYvcRkcK677qLb1Ij3WJLgShYva9ttfbO6fVl79PODAikZwwKRH0U1Ttl+Q033MBbhkn3GBSIlGAPBhxYJp1jUCBSAoMBRQgGBSIikjAoEBGRhEGBiIgkDApERCRhUCAiIonBZuPk/ERE1IMtBSIikjAoEBGRhEGBiIgkDApERCRhUCAiIgmDAhERSRgUiIhIIkxQsFqtWLZsGbKzs5GXl4dTp06FOkma+fzzz5GXlxfqZGiis7MTBQUFmDFjBqZNm4a9e/eGOkmq6+7uxuLFi5GTk4OZM2fi9OnToU6SZpqampCVlYVvvvkm1EnRxNSpU5GXl4e8vDwsXrxYlWMI8zrOjz/+GB0dHSgtLUVlZSVefvlllJSUhDpZqvvjH/+IXbt2IS4uLtRJ0cSuXbswZMgQvPrqq/j+++/xyCOPYPLkyaFOlqr27dsHANi+fTs+++wzvPTSS0Lk7c7OTixbtgz9+/cPdVI00d7eDgDYsmWLqscRpqVw7NgxTOh9beLYsWNRXV0d4hRpY8SIEVi9enWok6GZn/70p1i4cKH0e3R0dAhTo4177rkHK1asAAD897//RVJSUohTpI3i4mLk5OQgJSUl1EnRxPHjx9Ha2orHH38cs2bNQmVlpSrHESYotLS0ICEhQfo9OjoaXV1dIUyRNu677z7ExAjTIER8fDwSEhLQ0tKCBQsW4De/+U2ok6SJmJgYLFq0CCtWrMB9990X6uSo7t1338XQoUOlip4I+vfvjyeeeAJ//vOfUVRUhOeee06Va5gwQSEhIQEWi0X63Wq1CnWxFMmZM2cwa9YsPPzww/jZz34W6uRopri4GHv27MHSpUtx6dKlUCdHVTt27MA//vEP5OXloba2FosWLUJDQ0Ook6WqtLQ0PPTQQzAYDEhLS8OQIUNUOWdhgsK4ceNQ0fuC9crKSowePTrEKSI1NDY24vHHH0dBQQGmTZsW6uRoYufOnVi/fj0AIC4uDgaDIeK7zd566y28+eab2LJlC9LT01FcXIzk5ORQJ0tV77zzDl5++WUAwLlz59DS0qLKOQtTVb733ntx6NAh5OTkwGazYeXKlaFOEqlg3bp1uHDhAsxmM8xmM4CewfZIHoycMmUKFi9ejJkzZ6KrqwtLlizBFVdcEepkkcKmTZuGxYsXIzc3FwaDAStXrlSlt4NTZxMRkUSY7iMiIvKNQYGIiCQMCkREJGFQICIiCYMCERFJGBRIeO3t7SgrKwPQ86SskpPo7dmzBzt27FBsf0RqY1Ag4TU0NEhB4ec//7miE+iVl5cjKytLsf0RqU2Yh9eI5Kxbtw5ff/011qxZA5vNhqSkJFxzzTXYsGEDYmNjcfbsWeTk5ODTTz/F8ePHMWvWLMyYMQNHjhzBH/7wB0RHR+PHP/4xli9fjtjYWGm/NpsN33//vdMEde3t7Vi4cCFaWlrQ1taGgoIC3HHHHfj73/+OTZs2ISoqCpmZmXjuuefQ1NSE559/HhcvXoTNZkNxcTFSU1ND8BcikTAokPDmzp2LEydOwGg0Os0oe/bsWezcuRM1NTVYuHAhPvroI5w7dw5GoxG5ublYunQptm7disTERLz22mv4y1/+gl/84hfS57/44gtkZGQ4Hev06dNobGzEpk2b0NTUhLq6Opw/fx6rV6/Gjh07EBcXh4KCAhw6dAj79u3D3XffjdzcXBw+fBhffPEFgwKpjkGBSMa1116L2NhYDBw4ECNGjEC/fv0wePBgtLe3o7m5GfX19dIsrG1tbRg/frzT5/ft24cpU6a47XPmzJl45pln0NXVhby8PJw+fRrNzc2YM2cOAMBiseC7777Dv/71L2n+pjvvvFP9EyYCgwIRoqKiYLVa3ZYbDAbZz1x55ZW46qqrYDabMXDgQOzduxcDBgxw2ub48eNuU3d/9dVXsFgs2LBhA+rr65GTk4N33nkHw4cPx+uvv47Y2Fi8++67SE9Px7fffouqqipcf/31+Oc//4n9+/ejoKBAkXMmksOgQMJLTExEZ2cnXn31Vb8nzouKisILL7yAOXPmwGazIT4+Hq+88oq0/ty5cx5f/pKamoq1a9di586diI2NxYIFCzB06FA89thjyMvLQ3d3N66++mrcf//9mDt3LpYsWYJdu3YBACdxJE1wQjwiIpLwllQiIpIwKBARkYRBgYiIJAwKREQkYVAgIiIJgwIREUkYFIiISPL/ns80Zfhz+p4AAAAASUVORK5CYII=\n" + }, + "metadata": {}, + "output_type": "display_data" + } + ], "source": [ "pos, t = wh.interpolate_position(wheel.timestamps, wheel.position)\n", "sec = 5 # Number of seconds to plot\n", @@ -195,7 +211,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 7, "metadata": {}, "outputs": [], "source": [ @@ -206,9 +222,18 @@ }, { "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], + "execution_count": 8, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": "
", + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYUAAAEECAYAAADHzyg1AAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjQuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8rg+JYAAAACXBIWXMAAAsTAAALEwEAmpwYAABJeklEQVR4nO3deVhU5dsH8O8MILvsiooDarhrLlkuWS65pWbuImGaRVYuWZEp7qamZpr+MtNcepXUNNe0ciu33M3cUBMVNBFBkJ1hmef943YGBmaGGWYD5v5cF9dhzpw58xyWuc+z3Y9ECCHAGGOMAZBauwCMMcbKDw4KjDHGVDgoMMYYU+GgwBhjTIWDAmOMMRV7axfAGBcvXoSjo6PBr5PL5WV6XUVma9dsa9cL8DXbClNcs1wuR4sWLTQ+V6GDgqOjIxo1amTw66Kjo8v0uorM1q7Z1q4X4Gu2Faa45ujoaK3PcfMRY4wxFYsFBYVCgenTp2Po0KEICwtDbGys2vOHDx/GwIEDMXToUPz000+WKhZjjLEiLBYUDh48iNzcXGzZsgUff/wxvvjiC9VzeXl5mD9/PtauXYsNGzZgy5YtSExMNGt5JqyZgNqLa0M6S4qgpUGIuhxl1vdjjLGKwGJ9CufPn0fHjh0BAC1atMCVK1dUz8XExEAmk8HDwwMA0Lp1a5w7dw69evXSeU65XK6zbUybqEtRWHZ7GVCFHsemxuLtXW/jwX8P0Cewj8HnqwhycnLK9LOqqGzteoHyf81CCBQUFMCUmXWEELh06ZLJzlcRGHLNEokEdnZ2kEgkep/fYkEhIyMDbm5uqsd2dnbIz8+Hvb09MjIy4O7urnrO1dUVGRkZpZ6zrB3Na39ZqwoISjkFOfjm+jeI6Blh8PkqAlvrkLO16wXK/zXfuXMH7u7u8PHxMehDSpfs7Gw4Ozub5FwVhb7XLITA48ePkZ6ejjp16qg9Vy46mt3c3JCZmal6rFAoYG9vr/G5zMxMtSBhag+zHmrcH5caZ7b3ZMzW5eTkmDQgMN0kEgl8fHyQk5Nj0OssFhRatWqFo0ePAqD5BfXr11c9V69ePcTGxuLJkyfIzc3FuXPn0LJlS7OVxd/FX+N+mYfMbO/JGAMHBAsry8/bYs1H3bp1w4kTJzBs2DAIITBv3jzs2bMHWVlZGDp0KD777DOMHj0aQggMHDgQ1atXN1tZJjabiJkXZiIrL0u1z8XBBXO7zjXbezLGWEVgsaAglUoxe/ZstX316tVTfd+lSxd06dLFImXpE9gHNWvVROSWdxFnnwmZZyDmdp2L0GahFnl/xljpoi5HIfJQJOJS4yDzkJXb/9EnT57g2LFj6Nu3r7WLYhI2O3kttFko7rpNhWIWcPetS+Xyj40xWxV1OQrhe8IRmxoLAYHY1FiE7wkvl0PHb9y4gcOHD1u7GCZTodNcGC04mLYxMYAZ+zAYY4aJPBSp1rwLAFl5WYg8FGnUDVxeXh6mTJmCe/fuoaCgAKNGjcKmTZvQsGFD/Pvvv8jIyMDXX38NX19fTJgwARkZGcjJyUFERAReeOEF/Prrr1i/fj2kUilat26NTz75BCtXrsT169exZcsWeHl5YfXq1bC3t0etWrWwcOFCSKUV6967YpXW1OrWpW1MjHXLwRhTo20koLEjBJUf3Js3b8a6deuwdOlSpKSkoHnz5li/fj06dOiAvXv3Ii4uDklJSVi5ciUWL16MnJwcPHnyBMuXL8f69euxadMmJCQk4MSJExgzZgzatm2LoUOH4pdffsHIkSOxadMmvPjii3oNrS9vbDsoKMfu3r1r1WIwZjW7dgGdOgEffABcuGDt0qhoGwlo7AjBmJgYtGnTBgANha9Xrx7i4uLQuHFjAIC/vz/kcjmCg4MRGhqKjz76CLNmzYJCoUBcXBySk5MRHh6OsLAwxMTE4N69e2rnnzx5Ms6ePYs33ngDFy5cqHC1BMDWg4KHB+DuDsTx/ARmg86eBYYNA65dA9asAfr0AQwc024uc7vOhYuDi9o+U4wQrFevHs6dOweAJtTevHkTAQEBJY67ceMGMjMzsWrVKnzxxReYM2cOAgICUKNGDVU6njfeeAPPPvsspFIpFAoFAKqJjBs3Dhs3bgQAHDhwwKjyWoNt9ylIJIBMBhSL9oxVerm5wIgRQPXqFByuXgU6dwY2bADeecfapVP1G5h69NGQIUMwbdo0hISEQC6XY+zYsdi+fXuJ44KCgvDNN99g586dcHBwwPjx4+Ht7Y2RI0ciLCwMBQUFqFWrFnr16oW0tDTcvHkT69evR/PmzTFq1Ch4enrC1dUVnTp1Mqq81iARpkxEYmFlndav9roePYCUFODMGROXrnwp7ykQTM3Wrhcw8JrnzgWmTgX27gVefRUQAmjcmILEn39av3x64jQXpdP0c9f1u7Dt5iMAqFUL+O8/a5eCMctJSAAWLAD69aOAAFCtedAg4NgxIDnZuuVjVsVBoWZN4OFDoKDA2iVhzDKWLAEyMigwFNW7N6BQABWwHZyZDgeFGjXoH8HM6zcwVi6kpwPffAMMHQo0aKD+XOvWQNWqQCWaiMUMx0HB/2lyvPh465aDMUtYv55qCR99VPI5BwegfXvgxAmLF4uVHxwUlEEhIcG65WDMEtaupdn7T8fql9CuHQ1RTU+3bLlYucFBQRkUHmpeY4GxSuP6deDiReDNN7Uf06YNjUQqRxPZmGVxUFCm6OaaAqvstm2j7ZAh2o9p1Yq25SAoREUBQUGAVErbKDPlwrt06RJ69+6NxYsXIyoqCv369cO+ffv0fv2TJ0+wZ88e8xTOCjgouLoCLi7Ao0fWLglj5rVjBzUP1aih/Zjq1enLyuseR0UB4eFAbCxVXGJj6bE5AsPx48cxbNgwfPzxxzhw4AAWLlyIV5VDdfXAWVIrG4kE8PPjoMAqt/h4uvv//PPSj23aFLhyxfxl0iEyEshST5KKrCzaH2rEpObiWVK7deuGbdu2wcHBAdnZ2bhy5QoiIyOxYMECLFq0yCazpHJQAOjOiJuPWGV26BBte/Ys/dgmTSgXkhB002QF2tKRGZumTPnBrfzAHzBgADp16oTg4GCEhITg2LFjmDlzJhQKBZKSkrB+/Xo8fvwYd+/eVWVJ/fnnn+Hs7IyIiAhVltTNmzdj6NChGD9+PEaOHInevXtj586dyMjIQNWqVY0rtIVVrBBmLtWq8TwFVrkdOgT4+Oi3bkijRkBmJnD/vvnLpYVMSzJUbfv1pS1LanGcJdXW+flxUGCV2+HDlCJbnw+p+vVpe/OmWYuky9y51NVXlIsL7TcGZ0ktHTcfAVRTePTIqtVlxszmzh1qd/nkE/2OV65I+O+/QNeu5iuXDsp+g8hIKrpMRgHBmP4EQHOW1PsaakS2nCWVgwIA+PoCeXlAWhqtscBYZXL0KG1fflm/42vVAhwdrb4iYWio8UGguCpVqmBB8ZxPRWzYsEH1/bJly0o8369fP/Tr109tn7OzM3799VfV4y5dupigpNbDzUcA1RQAbkJildORI4C3N40q0odUSkvV8jK1NomDAkB9CgAHBVb5CEGdzPr2JyjVrQvcvm22YrHyi4MCwEGBVV7Xr1OjfLduhr2uTh3qi6i4a3CxMtK7T+Hx48eQy+WqxzVr1jRLgazC15e2HBRYZbN7N2179TLsdUFB1MeWmgp4epq6VKwc0ysozJw5E0ePHkW1atUghIBEIsHmzZvNXTbLUfYpJCVZtxyMmZIQwObNwPPPA4GBhr02KIi2d+7oN7eBVRp6BYVLly7h4MGDFXIihl5cXABnZ64psMrlr78oK+qKFYa/VhlEYmM5KNgYvT7lAwMD1ZqOKiVfXw4KrPIQAoiIoNTwYWGGv145ddjYvBLlyNGjR7FlyxZrF8Msbty4gbNnz5rkXHrVFOLj49G5c2cEPr17qHTNRwCnumCVy5o1wMmTwPffA25uhr/ezw9wcqKaQiXx0ksvWbsIZrN//374+vqqUngYQ6+gsHjxYqPfqNzz9eU+BVY5pKUB06YBHToAb71VtnNIJFRbMGdNwQSzfasoFIVDbf/8U+ex27dvx+3bt1Vpsv39/XHv3j00a9YMs2bNUjs2LCwMDRo0wL///gsXFxc899xzOH78ONLS0rB27Vq4uLioZVsdNWoU2rZti9DQUOzbtw8SiQSzZs1C+/btIZPJ8PnT7LSenp6YN28erl27hlWrVsHBwQEPHz7EsGHDcOrUKVy/fh0jRozA8OHDcebMGSxZsgR2dnaoXbs2Zs+ejT179uDw4cPIy8tDXFwc3nnnHXTo0AE7duyAg4MDmjRpgubNmxv1M9UrKNjZ2WHevHmIiYlBUFAQJk+ebNSblkt+fsCNG9YuBWPGW72aVhLcudO4tC2BgZWqplDU3bt3sWbNGjg7O+OVV15BYmIi/JRD059q3rw5pk6ditGjR8PJyQnr1q3DpEmTcPbsWTx8+LBEttXNmzejQYMGOHfuHJ599lmcOXMGkZGRGD58OObNm4dnnnkGW7duxffff4/27dvj4cOH2LlzJ65evYoJEybgwIEDSEhIwNixYxESEoJp06bhxx9/hI+PD5YuXYodO3bA3t4eGRkZWL9+Pe7evYsxY8ZgwIAB6N+/P3x9fY0OCICeQWHq1KkICQlBmzZtVBf6ww8/GP3m5QonxWOVQUEBsHQp0Lkz8MILxp2rdm3g8mWTFEujUu7s9ZGbnQ1nZ2eDXyeTyeD2tFnNz89PY59pkyZNAABVq1bFM888o/peLpcjJiYG7du3B1CYbfXevXsYMmQIduzYgcTERHTp0gX29vaIiYlR1UTy8vJQp04dAJSJ1cHBAe7u7pDJZKhSpQo8PDwgl8uRnJyMR48e4cMPPwQA5OTkoEOHDpDJZGjQoAEAoEaNGsjNzTX42kujV1CQy+Xo+jQx1iuvvIJ169aZvCBW5+dH6YKzs2kkEmMV0d69lPL666+NP5dMRuuMyOWUC6kSkRiZ+FKZbbVbt25q2Va9vb2xaNEiJCQkYPr06QCAOnXqYMGCBahZsybOnz+PxKc3n7rK4OXlBX9/f6xYsQLu7u44dOgQXFxcEB8fr/F1EolElanVWHqNPiooKMCNp00rN27cMPoHWi4pq468AhuryNavp7/l114z/lwyGY1isuK6CuXVkCFD8OTJE4SEhGDEiBEYO3YsfHx8IJFI0KNHD+Tl5akG5sycOROTJk3C8OHDsXjxYtWdvi5SqRSRkZEIDw/HsGHD8OOPP6K+MqW5Bk2bNkVUVBROnTpl/MUJPVy9elUMGDBAvPjii2LgwIHi2rVr+rxMTXZ2thg7dqwICQkRb7/9tnj8+HGJY9atWycGDRokBg0aJJYvX17qOctSDq2v27lTCECIc+fKdM7yrqw/q4rK1q5XCCGunz4tRJUqQnz4oWlOePAg/U/88YdJTmeO30lWVpbJz1neGXrNmn7uun4XejUfNW7cGD///LNRwWfTpk2oX78+xo0bh71792LFihWYOnWq6vl79+5h9+7d2Lp1KyQSCYYPH45XXnkFDRs2NOp99cb5j1gF537oEJCbC4SEmOaEtWvTthLNVWCl09l8NH78eADAiy++WOLLUOfPn0fHjh0B0HjhkydPqj3v7++P77//HnZ2dpBKpcjPz4ejJdsxyxIUli8HWrcGfvrJPGVizADuBw7QB/lzz5nmhBwUbJLOmoJykYmtW7eiRo0aqv0xpeRZ37p1a4nRST4+PnB3dwcAuLq6Ij09Xe15BwcHeHt7QwiBhQsXonHjxqpeem3kcjmio6N1HqNJTk5OiddJ09LQAEDC5ctI1uOc9o8eod7HH0OalwcxfDhiCwqQ3aKFwWWxFE3XXJnZ2vVKsrJQ/+RJJA8YgAQTDq0O9vJC+qVLeGiCn2VeXh6ysrJM2icphEB2drbJzlcRGHLNQgjk5eUZ9L+gMyjcvHkTCQkJ+PLLL/Hpp59CCAGFQoHFixdj165dWl83ePBgDB48WG3f2LFjkZmZCQDIzMxE1apVS7xOLpdjypQpcHV1xYwZM0otvKOjIxo1alTqccVFR0eXfJ0QgIMDqkulqK7POQ8epNXaTp+GZMAABH33nUmG2JmLxmuuxGztevHLL0BODrxHjoS3Ka+7bl14pafDywTnvHPnDrKyslQdsqaQXcYhqRWZvtcshMDjx4/h7u5e4gZbV5DQGRTS0tKwb98+PH78GL/88gsAqNr7DdWqVSscOXIEzZs3x9GjR9G6desSF/D+++/jhRdeQHh4uMHnN5pEYthchd9/pwXOn38emDiR1r+9eBEox7UFVont24cCFxfYmTqVg0xmskmdAQEBuH//vmpIpink5eXBwcHBZOerCAy5ZicnJwQEBBh0fp1B4bnnnsNzzz2Hq1evqiZylFVISAgmTZqEkJAQODg4qFJnrFu3DjKZDAqFAmfOnEFubi6OHTsGAPjoo4/Q0pIZGvUNCkIAx48DytrQqFHAlCnA2rWAhnVdGTO7/fuR9fzzcDd1P5xMBuzfT3/zRt7dOzg46G4SzsqifrqMDETViUTkbCfExVER5s7VvF6zzdUIYf5r1hkUZs+ejenTp2P27NklqnuGJsRzdnbWuBD2qFGjVN9fNufsSX34+ek3T+HWLVp8RDlj1Nsb6NsX2LoV+OorwF7vtYsYM15cHBATg8zBg+Fu6nMHBtKkzuRkwMfH1GcvVFAA9OwJHDuGKIQgXKJA1tNF32JjAWXjgabAwExL56fX+++/DwD46quvLFIYq6teXb/Fyi9epG2rVoX7hgwBfv4ZOHUKKMPoLMbK7OhRAECWCTJklqBcbOfuXfMGhVWrgGPHgLVrETk5BFkJTmpPZ2UBkZEcFCxBZ1DwfbpMZVZWFjIyMiCVSvHVV19hzJgxqFWrlkUKaFH61hQuXQLs7ICiTWo9egAODtThx0GBWdKxY4CHB+R6zJQ1WN26tL1zh4Zfm0NuLrUPdewIjByJuNGam6mKjow9efIktm7dinPnzqFnz55o06YNgoKCkJKSgsTERCQlJSE7OxsymQx16tRBUFCQzXVIl5Ve7RwzZsxAZGQkli9fjokTJ2LRokVo166ductmedWqUVU5K4tWY9Pm6lXgmWfU88F4eFBz0sGD5i8nY0WdOAG0a1eYQtqUlH0A+tSgy2r7duC//6i2IJFAJtOcnLV2bYFLly5j/vz52Lx5MxwdHeHv74/IyEi93qZWrVpo3Lgxunfvjvbt26N27dpwcnJSfdnb21fOFD4G0iso2NvbIzg4GHl5eWjRogUKCgrMXS7rUK7V/OhRYbVZk+hoQFNHT6dOwPz5QHo64G7y1l3GSkpNBa5do+ZLc6haldYaMWdQWLOG+i569gRAlYbwcLo3U5JIsnDvXjiefTYKjo6OmDFjBiIiIhAXFwdfX19cvnwZ9+/fh5eXF/z8/ODn5wcnJyfExsbizp07uH37NmJiYnDu3DlERERoLIZUKlUFCGdnZ9X3Li4ucHd3h7u7O6pWraraenh4wNfXV/V+3t7ecHV1haurK1xcXODo6Fghg4xeQUEikeDjjz/GSy+9hH379lXealj16rRNSNAeFPLy6B+kX7+Sz734InWYnT0LdOlitmIypnL2LI0MatvWfO8RHAz8+695zv3gAXD4MI3ee1rTUfYbREYCcXECVaumomnTH9GpUxDq1VuL3r17o5ryBg6U+rqLlv+3WrVqqVJcK8XHx+Ps2bN49OgRcnJySnxlZ2erPc7KykJaWhoSExORlpaG9PR0pKenIy8vT+elSaVSODs7q4KEs7MzqlSpAgcHB9VX8ceavpTHVKlSBU2bNoVMuVSqmegVFJYsWYLLly/j5ZdfxunTp7FkyRKzFspqlEFBV79CbCwFBk3tt8qOvjNnOCgwyzh9mrbPPw/Ex5vnPerXBw4cMM+5t28HFIoSPcihocpdEgCeAN432VvWqFEDr5kgi2xWVhaSkpKQmJiIxMREJCcnIzMzE1lZWaqv4o/z8vJUX7m5ucjMzFTbV/wrNzdX7bEQAk5OTvjggw/w5ZdfmuCnUZJeQaFKlSo4deoUoqKiEBQUpFfq1wqpaPORNjdv0lZTGltvb+qYO3/e9GVjTJPTp4GGDQFPT/MFhYYNgR9+AJ48ofcxpZ9+Apo2pfeoYFxcXCCTycx+566Un5+Ps2fP4uuvv4aXl5fZ3kevnqkpU6agZs2amDhxImrVqoXPPvvMbAWyKmVN4eFD7cfcukXbpysxldCyJfD336YtF2OaCEFDoJ9/3rzvoxxld/Wqac8bH0+TQAcONO15Kyl7e3u0a9dONfDHXPQKCikpKQgLC0OjRo3w5ptvIi0tzWwFsionJxpFpCsoxMQArq6FtYriWrSgYzIyzFJExlRu36YZ+OYeCfjss7T95x/Tnnf3bgpsAwaY9rzMKHoFBblcrspXkpiYaLJl38ql6tWpo1mb27eBevW0T/lXLpxt7dnZrPI7cYK2HTqY931q16Y5PGfPmva8O3fS/1KzZqY9LzOKXn0KH374oSpnUV5eHubMmWPuclmPv3/pNQVd7Z/KP/DLl81/B8ds29GjgJeX+iRKc5BIaHTTX3+Z7pzp6cChQ8D48UbnVGKmpVdNISMjAwqFAnZ2dhBCVN55CgBQo4b2DjshaGanrqRegYHUvHTlinnKx5jSH3/QLGBzTForrkMHGmRhqs7sX3+lUXymWEuamZRef00rVqzA1q1b8csvv2DLli1YunSpmYtlRTVqaK8pPHwI5OToDgpSKd25cVBg5nTnDjVlWmro8yuv0NZUM/Z37aLReuZu+mIG0ysoeHp6wudpMixfX1+4ubmZtVBWVaMGdRIXWxkOAP0jAoX5YLRp0sT0IzUYK+r332nbo4dl3q9lS/rf0LG4lt5yc6mm0Lcv5RBj5YpeQcHNzQ2jR4/GypUrMW7cOOTk5OCrr76qnNlTa9ak7YMHJZ9TBgVdKTAACgqPHgFJSSYtGrMxBw/SPARNAzv27aMaq6XmDEmlwKBBwJ49NF/BGMeOASkpQP/+JikaMy29Opq7du2q+r66cix/ZVU0KBT/h1MGhVLWjlYb1/3yy6YtH7MN+/cX1gJGjgTWrSt8LiuLng8Pt2wn7ahRtAjOypWAMXOVtm+nhJPKJilWrugVFPrbUkRXpgT/77+Sz925Q6OTSsv91Lgxba9d46DAyuaLL2jOzLBhwHffASNGAJ0703N79wJyOfD665YtU8uWFKgWLQLeekv7XB1dFAoaitq9Ow3IYOWOBYYtVDDK9Uzv3y/5XGkjj5Rq16YsqdyvwMri4UPgzz+BDz8Eliyhv8lZs2j0GwBs3EgfyNa44fjqK8rMOn162V5/6hTVwnnCWrnFQaE4V1fK76IpKNy+XXonM0BVeh6BxMrqt98oAPTvT7XSiAjgyBEa13/tGi3k9Pbb1umkbdyY5hZ8913Z5i1s2kTrkFi6lsP0xkFBk9q1gXv31Pfl5tI+fYICQEm+uKbAyuLgQZpZr5wIGR5Ogxtef53a4b296YPZWmbNAmQyICwMyM7W/3VyORAVRdfB642UWxwUNKldW33tP4BSZisUNC1fH02a0OgjXbOjGdPkyBHgpZcKJ6U5OdH639Wq0eCHPXsKkzdag7s7ZU29fRv45BP9XxcVRaOO3n7bfGVjRuOgoElgYMn1AJXZUfUNCpwDiZVFXBw1XRZf57tVK/oQ/uMP8y6oo69OnajPY8UKGmJamvx8YOFCqkEXGc3Iyh8OCpoEBtIdTdFssKWlzC5OmVny4kWTFo1Vcsp2+oow03f2bGrWGjdO81yKov7v/4AbN4CZMznXUTnHQUET5Qgj5bwEgPK+uLvrX2338aFRI7y2AjPEX3/RYAflTUV55u5OCyr/8w91IGuTmgpMngy0b88T1ioADgqaKDuTiy5WfuMGtecacpfTsiVw4YJpy8Yqt2PHaNEce72mEFnfsGE0Iunzz7XXFiZNAh4/Br7+2jLJ+5hR+DekibKJSNlkBADR0YYvGfjcc1TDqKyLElVm8fGUn0fX0qymlpICXLpEmU8rCqkUmDoVuH6dOsCLO3SIhq+OG0f/D6zc46CgiacnjfS4cYMep6VR559yprK+2rWj8eZnzpi8iMxMhAAWL6ba4quvUnPh4MG0wpm5/fkn3W1bKvOpqQwaRD+vL74onGAHAMnJNNIoOBiYN8965WMG4aCgTaNGVDsACkcQKUcU6euFF2iC0dGjpi0bM4979yjP0CefAD17UtK5KVPoDrhXLxpnb0579wJVq1Lbe0Xi4EBNRKdOATt20D65nH6W//0HbNhQemoYVm5wUNBGmf5aoSjsLDa0869qVepXOHTI9OVjppOVRW3iwcHUYRoZSfMCevWijtTNm4Hz5ylAmEteHiWK692bPmQrmrfeov+Zd98FfvyRJtnt2UNpOl54wdqlYwbgoKBNixbUbHT7NjX/VKtWmCzPED16UPrj5GTtx2Rm0jhuZjFRl6MQtDQI0llSBM3wQNRP0yi//61bFCCKdoi+/jowejTwv/+pDz4wpW3bqE/hjTfMc35zs7enoCYEEBpKteuoKOCDD6xdMmYgDgraKO9uTp4Ejh+n/oGyjK/u1w8oKKB8NZqcOEGZV6tWpQBy+3bZy8z0EnU5CuF7whGbGgsBgVi3fIQPdkTU9NcpfYMms2fT9ssvTV+g7GxKHdGgATVbVVT161Mw2LaNJuENH27tErEy4KCgTZMmNNdg6VKar9C9e9nO07o1TYb78ceSz8nl1BHn5UV3omfOUPOBIflkmMEiD0UiKy9LbV+WQo7IQ5HaX1SzJrWRr11rmk5nhYJqBteu0YfnjRu0VkFFH7JZowYwcCDd5LAKqYL/BZqRnR3d5V+4QJ1kAweW7TxSKSUO27+/ZC1g3Toayvftt/SBsGULPZ41S69T379/H3Jzd35WQnGpcQbtV/nwQ2r7X77csDfMz6dO6zffpH4LHx/qN/D2ppuPvXspJXW3boadlzEzqCAzZKxk1ixKRhYaalwCsjFjgPnzaXGSb7+lfXI5MGcO5bF59VXa1707vdeyZTSuu1gfRlQU9YHGxQm4uiYjI+NTODpuR8+ePTF//nw0atSo7GUsAyEEFAqF2laf7419vrRjHRwckJqaiuDgYNg/nQSWn5+P5ORkJCUlwbXAFRl2GSWuR+ahpelIqVEjqsl9+y3w6aeAPmuV375NNxdXrtCiOd26Uf+Ut3fhV8eO+mffZczMLBYUcnJyEBERgcePH8PV1RULFiyAt7d3ieMUCgXCw8PRtWtXhISEWKp4mgUEAN98Y/x5atWi9Mfffw+89x4Nbf38c1psZMMG9b6K2bNpBMzixXT3+FRUFJ0iKwsAJMjI8IGd3Tp07twLR49OQIsWLfDuu+9i4sSJcHBwwM6dO7Fv3z5cvXoVCoUCubm5sLOzM9mHd0UgkUjg7e0NhUKBlJSUwieaAfYD7JEvKezcd3Fwwdyuc0s/6WefUbK6NWuACRN0H3vrFuUwys+nWmC/frSWAGPlmMWCwqZNm1C/fn2MGzcOe/fuxYoVKzB16tQSxy1duhSpqamWKpblzJhByxD26EEzpo8fp+aE4hOV6talpRe/+Qb46CPVSnCRkcqAUKigwBHR0WG4dq0bIiMj8e2332J5kaaN4OBgvPzyy7C3t0daWhq8vLwglUohkUggkUhK/d5axxp7LrlcjkuXLkEqleLRo0eQSqXw9fVVfTVr1gwXFRcReSgScalxkHnIMLfrXIQ2Cy3999i+Pa14tmABDb90ctJ83JMntLpYfj4NJjB0NjxjVmKxoHD+/Hm8/TSP+ksvvYQVK1aUOOa3336DRCLBSy+9ZKliWU716jQqY+RI6mT86KPCES3FTZtGd5bvvQfs3g1IJCWWd1CKiwP8/f2xZs0azJw5E5s3b0Z+fj4GDBiABg0aqI6Ljo62ePOSNTVp0kTn9TZBE/2CQHESCWX67NyZBiFoWsBeCBqKGR1NfQkcEFgFYpagsHXrVvzwww9q+3x8fOD+dLUlV1dXpKenqz1/8+ZN/PLLL1i2bBm+0bPJRi6XI1o569gAOTk5ZXqd0by8gF27Ch9r+6QH4D1+PKovWIDHI0ci6f33UcOnKR4klVzo3N8/F9HRhWPn+/TpA4Ca4Ypeo9Wu2UrMer3Vq6N2x45wnjcPMR07oqBYM6j7r78i4McfkThuHJICAgpnxpuZrf2OAb5msxAW8sEHH4h//vlHCCFEWlqa6N27t9rzCxYsEIMGDRJvvPGG6Ny5s+jevbs4cuSIznNeu3atTGUp6+ssSqEQYsIEIei+U2xEiHBBhvKhAIRwcRFi40b9TlchrtmEzH69164JIZUKMWaM+v7Hj4WoVk2Ili2FyM83bxlKFMm2fsdC8DWb4xwWaz5q1aoVjhw5gubNm+Po0aNo3bq12vOffvqp6vvly5fD19e3cjYj6UsiKUwRcP8+QuvWBe6kIvJ/roiLozlWc+fSYCVmBY0aUUfzkiXAkCHUnARQc1JiIg1BtrOzbhkZKwOLBYWQkBBMmjQJISEhcHBwwOLFiwEA69atg0wmQ1deoq8kiQQoMgIrFECoAUviMjObPZv6DIYMoRQPp04Bq1cDEREVY5EcxjSwWFBwdnbGsmXLSuwfNWpUiX3jxo2zRJEYM46bG40oa9sWUNZqe/Wi+SeMVVA8eY0xYzRsSFl0Dx6klCYtW/IaxKxC46DAmLHq1AHeecfapWDMJDj3EWOMMRUOCowxxlQkQhRdVLViuXjxIhw5lwxjjBlELpejRYsWGp+r0EGBMcaYaXHzEWOMMRUOCowxxlQ4KDDGGFPhoMAYY0yFgwJjjDEVDgqMMcZUOCgwxhhT4aDAGGNMhYMCY4wxFQ4KjDHGVDgoMMYYU+GgwBhjTIWDAmOMMRUOCowxxlQ4KDDGGFPhoMAYY0yFgwJjjDEVDgqMMcZU7K1dAGOUdY1muVxuc2s729o129r1AnzNtsIU16xrjeYKHRQcHR3RqFEjg18XHR1dptdVZLZ2zbZ2vQBfs60wxTVHR0drfY6bjxhjjKlwUDCEXA4cPQoUFFi7JIwxZhYcFAzxySfAyy8DixZZuySMMWYW5Soo5OXlISIiAsOHD8egQYNw6NAhaxepUEEBEBVF369bZ92yMMaYmZSrjubdu3fD09MTixYtQkpKCvr374+uXbtau1gkOhpISQFatAAuXgQePgT8/a1dKsYYM6lyVVPo2bMnJkyYoHpsZ2dnxdIU888/tA0Pp+2FC9YrC2OMmUm5qim4uroCADIyMjB+/Hh8+OGHOo+Xy+U6h1Zpk5OTY/Dr/I4fh4+dHf5t2hT1ASQcPozkOnUMfm9rKcs1V2S2dr0AX7OtMPc1l6ugAADx8fH44IMPMHz4cPTt21fnsRadp/DkCSCToX7HjoCfH6qnpqJ6BRofbWvjuW3tegG+Zlth7nkK5SooJCUl4a233sL06dPRrl07axdH3Z07QN269H29ekBMjHXLwxhjZlCu+hRWrlyJtLQ0rFixAmFhYQgLC0NOTo61i0Xi4gCZjL6vUwe4e9eqxWGMMXMoVzWFqVOnYurUqdYuRkl5eTTaqHZteiyTAdu2AQoFIC1XcZUxxozCn2j6ePgQEAIICKDHAQEUKBITrVsuxhgzMQ4K+njwgLY1atC2Vi3a/vefdcrDGGNmwkFBH/HxtFUGhZo11fczxlglwUFBHw8f0lY5g1m5Ve5njLFKgoOCPh49om21arStXp22HBSYtSUnAzt3Uh8XYybAQUEfCQmAlxfg4ECPnZyAqlVpP2Om9PgxsHQpkJZW+rFCAH37Av37AzNmmL1ozDZwUNBHYmJhLUGpWrXCGgRjpjJtGjBxIrBwYenHHj0K/PUXDYv+9ltIcnPNXz5W6XFQ0EdiIuDrq76vWjUekspMb88e2u7fX/qxP/wAuLsDW7YAT57A5cwZzcf9/TfQrBmwYYPpyskqLQ4K+khKAvz81PdxTYGZ2oMHwP37gL09cPmy7hX+5HJg+3ZgwACgd2/A2RluR45oPnbOHODKFWDCBCA72zxlZ5UGBwV9JCWVrCn4+tJ+xkxFmZ49JATIyaHUKtocOACkpgJDhwLOzkCXLhQUhFA/Lj+fjg0OpvVA9u0zX/lZpcBBoTRCUOefj4/6fj8/CgrF/wkZK6srV2j7+uu0vX1b+7E7dgAeHoByEarXX0eV+/eBc+fUj7t6FcjIAKZOBby9gV27TF7sMjl/nprIMjKsXRJWDAeF0mRk0HC/4kHB15fuwlJTrVMuVvlcvUoTJFu2pMexsZqPEwL49VegRw+gShXaN2gQFM7OwNdfqx/799+0ff554NVXqaaQn2+e8utr+nTgueeo/DIZ8Ntv1i0PU8NBoTTJybT19lbfr2xO4iYkZirXrgGNG1MaFYlEe/PR9es0m75bt8J9np5IDg2ldcQPHy7cf+kSDaEODgZee41qvSdOmPc6dLlyBfj8c2DECGrWCgwEBg7UXStiFsVBoTSPH9NWU00B4KDATEOhoHXAGzemu//q1anTWZPjx2n70ktqu5Pee48+/MeMKZzMduUK0KQJYGcH9OpFAWLbNjNeSCmWLaMyfPUV8MorhaOtpk+3XpmYGg4KpVHWFLy81PdzUGCmFBdHTZVNmtDjWrW0J1w8fZpuUoKD1XYLZ2dg0SLg33+Bn3+mnZcu0XBUAHBzA/r0oSGs1pjTkJ1N7z14cOFNVkAArXu+ZQuP5isnOCiUJiWFttx8xMzp8mXaKj/AdQWFc+eoTV4iKflc375AUBCwZg3NuE9IAJo3L3x+9GiaX/PTTyYtvl727KGZ2iNGqO9/+23q57BmDYapcFAojTmDwnvvAW3bAllZeh0eFUX/71IpbaOiyv7WrJy5eJE+5JVBoWbNwpTtRcnl1CHdqpXm80ilwBtvUL+CsmnmuecKn+/end5j2jT9UmmY0tq1FOw6dVLf36QJUL9+YXmZVXFQKI0yKBRvPnJ3p1xIZQ0K9+4BK1dSU4Ae/wxRUVTLjo2lwSexsfSYA0PZRF2OQtDSIEhnSRG0NAhRl034g8zLM3yo8rlz1Bzk7k6Pa9ak/iy5XP24a9forrpFC+3nCg2lPop33gEcHdWDglQKrFhBf3/DhumeIGdKJ08Cv/9O/R12diWf79EDOHLEOs1aTA0HhdIkJ9OHv7Oz+n6JhOYqlDXVxcGDhd//+Weph0dGlqxQZGXRfmaYqMtRCN8TjtjUWAgIxKbGInxPuGkCw5EjdAPRoYP+w5WFoJuDF14o3Kdcs6N4Jt6LF2n77LPaz9ewIdC5M33/dLazmhdfBL75hoa1rlqlXxnL6t9/KWFf16400mj8eM3Hvfwy9TmcP2/e8rBScVAozZMngKen5vZbY2Y1nzhBHx6dOun1j6BtdKKuSa9Ms8hDkcjKU4+wWXlZiDxkZIQVgpLZuboCZ89SCoonT0p/3b//Utt/hw6F+5RBoXgT0uXL9CH/zDO6z7l+PTB5csl5C0rh4fR+CxdSrcIcEhMpOB09Sn0ZR49SdmFNlNd+8qR5ysL0xkGhNE+elOxPUPLzK/uICeWdYYsW1EZcSjVeJjNsP9MuLlVzJNW2X2///EOTxWbOpPbzI0doItrdu7pfp5xXoLy7BwqDQvHO5kuXCoeY6iKTAfPmFa4rXpxEAnzwAZXt2DHd5yqrsWMpMBw6BCxfrvuP1d+fnteW1I9ZDAeF0jx5QukENClrptTUVAoE7doBTZtSO1ApHxxz5wIuEvVkZlWq5GPuXMPf3tb5u/hr3C/zkAHp6ZR99N49w0+8ZQt9WA8eDISF0XyClBT6Xlcfw6+/UtNK0SGm2moKly6pjyYyRt++1OewY4dpzlfUjz/SCKfp03X3fxTVpg3VsJhVmSQoXFHmbKmMlM1HmpQ1U+rRo/Qh0bFj4bj0Un6GoZ0fYJUYjUDPVEgkAnZ299Gs2TKEhhr+9rYu6FYQUGyhMpd8CeY2/5Du1keOpIlhhmQUFYLmBnTuXDgyrW1b4MsvKThoGwKamUkze/v0UW+i9PGhSWxFawoJCXQTohyhZCw3N2rr37vXNOdTSkqiWkj79sCkSfq/7rnnaGazcm6QFgcPHsTSpUuRpeeoPWYYkwSFNWvWYMiQIdi4cSPSjBjmplAoMH36dAwdOhRhYWGI1Zb7xZJKCwrp6YanI/7lF2p3bt+eagoSCd0B6rJ1K0KxCXdP/AeFQoJ33pmL69enIycnp8ShcXFxWLZsGWJiYgwrlw24ceMGTn9/Gr0LeiPQIxASSBDoWA2r9gChXSfS72HSJKq5GbL+wOXL1DcwaJD6/lGj6E55woTCuQhF7dpFfz/FXyeVUm2haFBQ3jg0bap/uUrTsydw6xZgyr+VFSvo/2bVKkoDri/lKCkdfWy///47unfvjokTJ2LkyJFGFZNpIUzkyZMnYuPGjWLkyJHio48+EqdOnTL4HL///ruYNGmSEEKIv//+W4wZM0bn8deuXStTWQ16XbVqQrzzjubn1qwRAhDizh39z5eZKYSnpxChoYX76tcXol8/7a9JSBBCJhPi+edVu/bu3SsAiH379qkdmpycLAICAgQA4erqKnbv3i2EKPvPqqIqer1JSUniwoULIicnR/Tt21e4urqKhIQE9RecOyfErFlCnD8vhEIhRNOmaj/vUkVGCiGV0u+quCtXhPD1pb+V0FAhHj2ivwO5XIhWrYSoV0+IgoKSr+vYUYiXXip8vHQpnSM+vtRr1lt0NJ3zu+8Mf602DRoI0aWL4a9LSaGyfP55iadOnjwp5syZI6pVqyaaNGkipk6dKgCIVatWqY5RKBQiNjZW/P333+LRo0dGXED5Zor/ZV3nMCCM65aUlIQHDx4gJSUF9erVw2+//YadO3di/vz5ep/j/Pnz6NixIwCgRYsW5aNZKjVVe02hRg3axsfTbDJ9PF0lC+HhhfteeIEyRQpRcpRTaioNIXz0CNi8WbW7S5cucHd3x88//4xevXqp9i9cuBD//fcfdu3ahdmzZ+O1115D27ZtUVBQAIlEAnt7e0gkEigUCkg0jaiqICQSCYQQUCgUsH96N5qdnQ0vLy+0adMGPj4+uHnzJg4cOIDVq1cjNzcXjo6OkMvlWLx4MaoVX161dWv6Uho1Cvj4Y7o71+fO/OefaSRZ8fMC1EQYHU35fr78snByiURCv/PNm6lmUFzt2uqjca5epUEP1auXXh59NWhAf8dHjqj/TZbVnTvAjRvA++8b/lpPTxpOW2wE0u3bt9GpUyfI5XIEBgZi27ZtqFOnDn788UcsXLgQb775JqKjozFy5EhcfDpkVyKRYODAgZgxYwY8PDxw//591K5dGwFPO95v3LiBVatWISEhAe7u7lAoFMjPz4fd0w58IQSEEHBwcIC7uzucnJzU9tvb28PT0xMeHh6ws7NDTk4O4uPjkfR0NKK9vT0UCgWEELCzs4NCoUBGRgYyMzMhhIBEIlH9DQOAVCpFQUEBFAoFnJyckJeXh8zMTPj7+yM1NRVXrlzB8OHDERkZifv37yMuLg49evQowy+odCYJCoMHD4aTkxOGDBmCCRMmoMrTdL6jR4826DwZGRlwc3NTPbazs0N+fr7qn744uVyO6Ohog8u7evVqLFmyBGfPnoWrq6vW4yS5uWgol+ORXI7HGt7HMTsbdQHcP30a6doCRzGyVatgHxiI276+9EEBwKNRI9TcsAF3tm1DTrEPIN/ly+F76xZif/gB2Z6eqtcAFBi2bNmCMWPGwNXVFSkpKVi2bBl69eqF4OBgrF69GuvXr8dff/2FgoICuLq6Ij8/H0IISKVSKJ4ORSz6x1neAkXRchX9XhnkpFIp8p4mf3N1dUV8fDwWLlyIgqejuezt7fH666+jbdu2uHTpEoKDg9GzZ89S/27s2rbFM46OSIuMRPwXX+g8tsrdu6h3/ToeDhyIFF3nDQtDlQ4d4L5/P2BnB2lWFrKbNkVG8+Zqv1clPxcX+Ny7h+tXrgB2dgg8fx6oUwex169rPH1OTk6Z/h9qNWsGpz//REwZXlucx44dqAngdlAQ5GU4n3/Tpqi6fz9uPr1mAPjqq69QUFCAAwcOoEaNGhBC4Pbt2/j444/xwQcfoGPHjrh8+TLc3d0xZcoUVK9eHVevXsWGDRuwrVjqjHbt2iEgIAA7duyAVCpF9erVkZmZqbphUv7dKD+08/PzkZGRgdynk+qU+xVahvFWfTrktqCgAFKpVPW3KpVK4eLiAmdnZ7W/ZSWFQgGpVAqpVIrc3FzY2dnBxcUFJ0+ehIODAwICAjBt2jRcvnwZhw4dQlBQEGTmGnpodD1ECPHPP/+oPT59+nSZzjNv3jyxd+9e1eOOHTvqPL6s1ahPP/1UABCpqam6D3z4kKqz//uf5ucTE+n5pUv1e+P0dCEcHISIiFDf//ixEI6OQrz3nvr+/HwhatYUok8fjac7ffq0ACDeffddcfbsWdG/f39hZ2en8ediS81HWVlZYteuXeLs2bMiJSWl7Cf67DP6/f76q+7jliwxvBlRHytX0nnj4qhJy8tLiHff1Xp4mX/HixbR+5iiySU8XAgPD83NYfrYtInKcvKkaleDBg1Et27dNB4eEREh/Pz8RN++fUV8sWa1hIQEsXr1arFy5Uqxd+9eMWfOHBEYGCicnJzEW2+9VbIJ0QAFBQXi8ePH4s6dO+LWrVvi3r17Qi6Xl/l8pVEoFOLtt98WAES1atWM/n/W9XqjgsLZs2fFpk2bRI8ePcTmzZvF5s2bRVRUlOjdu3eZzvfbb7+p9SmMHj1a5/Fl/cF89tlnAkDpHxg3btAf6IYNmp9XKIRwdhbio4/0e+P9++l8v/1W8rmRI4VwcRHiyZPCfUeP0vGbNmk95cSJEwUA1ddXX32l8ThbCgpCmOh6s7KEaNJEiIAA+l6bHj2oHd3Ufv+dfv9HjlA/AiDE119rPbzM13zoEJ3799/LWNAiWrQQQssHuF6Sk+nG6en/VFxcnAAgFi9erPHwslyzQqEoe/msSKFQiH/++UecOXPG6HPp+rkZNfqoatWqSEpKQm5uLhITE5GYmIiUlBRERESU6XzdunVDlSpVMGzYMMyfPx+TJ082pnhaKZtItFUBVZRpCrQ1DUkkNL68tMlJSidP0mvatSv53Hvv0XyFotXdHTtoWGLv3lpPuXjxYpw/fx67du3CzZs3MXHiRP3Kwkrn7Ez5/+/f155kKiuL2uN79jT9+9erR9tbt6g/ASgcwmxKynkPmkZHGSI7m87Rpk3Zz+HlBfTti6jv0hEkU0AmCwBwB3L5oFJfqq/y1kSqL4lEgubNm6s1sZuDUX0K9evXR/369TFkyJCSHXdlIJVKMXv2bKPPo8/7AHoEBWWKAm2T1wD6x711S783Pn2aFlHRNNW/TRvqrN65k1ICCAFs306raymTpGkgkUjQqlUrtNKWNZMZp3Nn+iBeu5ZSPBd3+DCQk0NLXZpaYCDdFNy4QUOfAfMEBV9f6iC/ds2481y4QDPzn3/eqNNENZ2H8O0ByMpU3rMG4fPPBWQy8LwcCzCqpjD+aXKrAQMG4MUXX1T7Ks/0DgrKmoKuoNCgAXDzZunZJjUlPStKIqEPlsOHKTPmyZOUCnXwYN3nZeYlkQDDh9PvQ9Ms582bqSZZPB20KdjbU0rpq1dp/oSfH6WDMIeGDSn46EuZwrtoVtNTp2ir7W9cT5E/NEAW1AeAZGVJOPmjhRgVFJYtWwYAOH78OPbv34/jx4/j559/xnHlcoHllMHNR7qCQrNmdKf477+6z3XrFqVC1vUP06MHNUecOEF3pi4ulFSNWZdyYplyNTOlK1dopvIbb9AdvTm0aEH5lM6d076GginUr083N/rYvp3WRWjalGouymVDjx6lmrORgYuTP1qXSWY0/+9//1MFiLlz52KVudPxGkk5lvj//u//dB+oT1BQzsI8fVr3uZRJx4pmwiyuSxfKRbNqFeWOGT5cZ9MRs5D69SlVtXKeiBDA//5HaSy8vc2bv7xtW8p/dOkSzYA3l+BgSqFRWrrvK1eAoUMpS+uqVZTae+RIqjn88QelzTASJ3+0LpMEhcOHD+Ozzz4DQLWHw8qsj+XUoKd3fmdLS76l/AfR9cHcuDHlqSntmg8epElHjRtrP8bNjXLPK9fQ/eQT3edkljNiBAX+c+coydu4cZQf6fRp8zXpAOqDDF57zXzvo0zFXVq6iwULqAa7dy8t4rNwIWVBHTSI+j1MULOdO5feoigXF3DyRwsxyeQ1iUSC3NxcVKlSBXl5eSUmZpQ3/v7+ePXVV3GrtA7i1FTqFNaVplgqpWafffuoX0HTsXl5NGP5tdc0r8tQ1MKF1BzVvz/1V7DyYfRo4IsvqOP/yRPqdP7uO80zkU0pKAhYt44S5+mbbbQslCOdYmK0N1PJ5TQibvhwuhECgHffpeakX36hWnO3bkYXRdmZHBlJTUYyGQUE7mS2DJMEhWHDhqFv376oX78+bt++jXfeeccUpzWr2rVr40xpudt1pc0uql8/au756y/KfFrcn39SCuX+/fUpmHlSGTPjeHgAu3cDn39OaUc+/dT8AUHJEonf6talra6awunTFJz69CncJ5XScrIHDtDfvol+JqGhHASsxWRpLrp27arKL+JVfD3jcsjf3x9JSUnIy8uDg4OD5oNSU/ULCj170kiRvXs1B4WdO6n+2727UWVmVta2Ld0RV0bu7jS6SVdQUOYkKt4v5uREazOwSsEkYf3GjRt455138N5772HUqFG4Zux4Zwu4W/Uu8CHgOM9R+8Lt+gaFqlXpA0Nbv8Lvv1MHXPG1chkrT+rV0x0U/v6bmrOUTUesUjJJUPj8888xd+5cnDhxAvPnz7fIBDRj/BL7CzZlbAI8oXvhdl0ZUovr2JH+aYov/BEfT/9o5hjHzpgpPfOM7qBw5YrpFvhh5ZZJgoIQAg0bNgQANGrUSGtW0/JiyeUlyBW5avs0Ltyua4Gd4l54AcjPp3V6i1L2W7RtW6ayMmYx9erRBD0NCzehoIDm4jRqZPlyMYsySVCwt7fHH3/8gfT0dBw+fFiVOru8epj1UOP+Egu369vRDBSO2LhwQX3/339T55s5R44wZgrBwTQH4/btks/FxdEw6aLrSLNKySRBYe7cudixYwdCQkKwa9cuzJkzxxSnNRudC7crCWFY81FAACXzKr6s5qVLVC0vPvCasfJG+YGvaWazMlAoRymxSsuodh7lwhN+fn748ssvTVIgS5jYbCJmnp+BrPzCtZVdHFwwt2uR2TEZGVRl1jcoSCTU3lo80yS3w7KKon592moKCspMwHXqWKw4zDqMCgo9e/ZUrSKkzCek/P7QoUMmKaA59Ansg5rVqiFyw0jEeQIyj0DM7ToXoc2KDIxWZkg1ZHht06bAxo2Fy2rm5FDH3bBhpiw+Y+bh6Umz7jUlxouLo7/pp8tZssrLqKBQPJ3FkydP4OHhUSHylYe2ehOh7cKBiROBGRqWW0xJoa2+NQWAgkJaGiUIq12b7rgUCt2pLRgrTxo00B4UatQAtM3pYZWGSfoUzp49iz59+iAkJATLli3D1q1bTXFa8/Pw0J4ATBkUDKkpKHPdX7lCW+XCKBwUWEWhLSgob3RYpWeSoLB06VJs3LgRvr6+GDNmDDZt2mSK05qfuzvd2WtSlqCg7DtQ9isoFx/nHEasomjQAEhKojTvRf33H6XLZpWeSYKCRCKBp6cnJBIJHB0d4erqWvqLygN398IVrYorS1Dw8qLsXX//TY8vXaJ/MkdH48rJmKUob2CKdzZzULAZJgkKgYGBWLx4MVJSUrBq1SrUrFnTFKc1v6pVtQeF5GTaensbds7WrSm9MkBzFniZTFaRaBqBlJlJNeqK8n/NjGKSoJCUlAR/f3+0bt0aLi4u5X6egoqumkJyMjX9GLrITdu2tMramTO0OIqR69UyZlF16tDffdGVBB88oC0HBZtgkqAwduxYxMTE4MKFC0hJScHj4u2R5VVpQcHLy/BUwK+8Qttx42jbpUvZy8eYpTk4AIGBdGOjFB9P2xo1rFMmZlEmSVLUrFkzNGvWDKmpqZg5cya6deuGK8oROOVZaUHB0KYjAGjZkhZBP3OGhqjyyCNW0RRPjKcMClxTsAkmqSmcO3cOU6ZMwRtvvIFnnnkGBw8eNMVpzc/NTXtQSEoCfH0NP6dEAqxfT6usrV1b+kprjJU3deuqBwVl8xHXFGyCSWoKP/zwAwYPHoy5c+dWiIlrKlWrUjoLhaJkM1FSEuWOL4sXXgB27TK6eIxZRb16NPouJYWaUB88oBF0FWDxLGY8kwSF5cuXG32O9PR0REREICMjA3l5efjss8/QsmVLE5ROB2UncmZmyQ7lpCRac5YxW6NMenf7No2me/CAmo4q0g0fKzMLLTJbunXr1qFt27bYuHGj5RbqUQaC4k1IQpS9+Yixiq5oUAAKgwKzCeVmNZyRI0eq1mEoKCiAox4TvuRyOaKjow1+r5ycHERHR6NqWhpqAYi5eBG5RdJdSNPT0SA3FwlCILkM5y+PlNdsK2ztegHTXbM0Px8NADw6eRKPmzZF3Tt3IG/UCP+Vw58n/55NzypBYevWrfjhhx/U9s2bNw/NmzdHYmIiIiIiMGXKlFLP4+joiEZlWAkqOjqaXvd02F29atXUV5R6Oka7etOmqF5JVppSXbONsLXrBUx8zX5+qJaejmoNGwIJCXAcOBBVy+HPk3/PZT+HNlYJCoMHD8bgwYNL7L9x4wY++ugjfPrpp3jeEpO+qlalbfH8RwkJtK1e3fxlYKw8euYZumlKSqIU8DJZ6a9hlUK5aT66desWJkyYgKVLl6rWezY7DgqMaVa/PnDgQOHiOmUdiccqnHITFBYvXozc3FzMnUurn7m5ueHbb78175tqCwoPn67h7K952U7GKr2GDYEffihM7sjLcNqMchMUzB4ANFGOPtIUFKRSwM/P8mVirDxo2pS2W7fSUNRnnrFueZjFlJshqVbh4UHb4kEhPp6ajuzsLF8mxsoDZXbfgweB4GDA2dm65WEWY9tBwdERqFKl5OprDx7wlH5m22rWLEyj/eKL1i0LsyjbDgoArcFcPCjwgiKMAZGRQEAA8MEH1i4JsyAOCp6ewJMn6vvu3+egwNiIEcC9e7xQlI3hoFA8KGRnU9rsgABrlYgxxqyGg4KXV+HSmwDVEgCgdm3rlIcxxqyIg4K3t3pQiIujLQcFxpgN4qDg7Q0UXT40Npa2gYHWKQ9jjFkRBwVfX+pTyM+nx7GxNFmH+xQYYzaIg4Jy1rKytnD3Lo08eprGmzHGbAkHhWrVaKtMgnf7NlCnjvXKwxhjVsRBQTlzOT6etjExtEYtY4zZIA4Kyklq//0HZGRQcAgOtm6ZGGPMSjgo1KpFHctxccD167SvQQPrlokxxqyEg0KVKjTSKCYGuHqV9jVpYt0yMcaYlZSb9RSsqkEDIDoa8PGhFMHcfMQYs1FcUwAo4dfly5Q7vk0bXkeBMWazOCgAQJcuQG4uNR/17Gnt0jDGmNVwUACArl2B55+nhUVGj7Z2aRhjzGq4TwEA7O2BkyeBggLAwcHapWGMMavhoKAkldIXY4zZMP4UZIwxpsJBgTHGmIpECCGsXYiyunjxIhwdHa1dDMYYq1DkcjlatGih8bkKHRQYY4yZFjcfMcYYU+GgwBhjTIWDAmOMMRUOCowxxlQ4KDDGGFPhoMAYY0zFZoKCQqHA9OnTMXToUISFhSE2NtbaRbKYf/75B2FhYdYuhkXk5eUhIiICw4cPx6BBg3Do0CFrF8nsCgoKMHnyZAwbNgyhoaGIi4uzdpEs5vHjx3j55ZcRExNj7aJYxOuvv46wsDCEhYVh8uTJZnkPm8l9dPDgQeTm5mLLli24ePEivvjiC3z77bfWLpbZrV69Grt374azs7O1i2IRu3fvhqenJxYtWoSUlBT0798fXbt2tXaxzOqPP/4AAGzevBmnT5/G/PnzbeJvOy8vD9OnT4eTk5O1i2IRcrkcALBhwwazvo/N1BTOnz+Pjh07AgBatGiBK1euWLlEliGTybB8+XJrF8NievbsiQkTJqge29nAgkmvvPIK5syZAwB48OABfH19rVwiy1iwYAGGDRuGatWqWbsoFnH9+nVkZ2fjrbfewogRI3Dx4kWzvI/NBIWMjAy4ubmpHtvZ2SE/P9+KJbKMHj16wN7eZiqEcHV1hZubGzIyMjB+/Hh8+OGH1i6SRdjb22PSpEmYM2cOevToYe3imN327dvh7e2tutGzBU5OThg9ejTWrFmDWbNm4ZNPPjHLZ5jNBAU3NzdkZmaqHisUCpv6sLQl8fHxGDFiBPr164e+fftauzgWs2DBAvz++++YNm0asrKyrF0cs/r555/x119/ISwsDNHR0Zg0aRISExOtXSyzqlOnDl577TVIJBLUqVMHnp6eZrlmmwkKrVq1wtGjRwFQIr369etbuUTMHJKSkvDWW28hIiICgwYNsnZxLGLnzp347rvvAADOzs6QSCSVvtksKioKGzduxIYNG9CoUSMsWLAAfn5+1i6WWW3btg1ffPEFACAhIQEZGRlmuWabuVXu1q0bTpw4gWHDhkEIgXnz5lm7SMwMVq5cibS0NKxYsQIrVqwAQJ3tlbkzsnv37pg8eTJCQ0ORn5+PKVOmcPbgSmjQoEGYPHkyQkJCIJFIMG/ePLO0dnCWVMYYYyo203zEGGOsdBwUGGOMqXBQYIwxpsJBgTHGmAoHBcYYYyo2MySVMVORy+XYvXs37Ozs4OHhUelzKzHbwkGBMQMlJiZi69at+Omnn6xdFMZMjoMCYwZauXIlbt26hYYNG2LGjBmoW7cuVq1aBQcHBzx8+BDDhg3DqVOncP36dYwYMQLDhw/HmTNnsGTJEtjZ2aF27dqYPXs2HBwcrH0pjJXAQYExA40ZMwY3b95US8b28OFD7Ny5E1evXsWECRNw4MABJCQkYOzYsQgJCcG0adPw448/wsfHB0uXLsWOHTswZMgQK14FY5pxUGDMBIKDg+Hg4AB3d3fIZDJUqVIFHh4ekMvlSE5OxqNHj1QZW3NyctChQwfrFpgxLTgoMGYgqVQKhUKhtk8ikWg93svLC/7+/lixYgXc3d1x6NAhuLi4mLuYjJUJBwXGDOTj44O8vDzk5OTodbxUKkVkZCTCw8MhhICrqysWLlxo5lIyVjacEI8xxpgKT15jjDGmwkGBMcaYCgcFxhhjKhwUGGOMqXBQYIwxpsJBgTHGmAoHBcYYYyr/D3bp7RpAA/90AAAAAElFTkSuQmCC\n" + }, + "metadata": {}, + "output_type": "display_data" + } + ], "source": [ "# Detect wheel movements for the first 5 seconds\n", "mask = t < (t[0] + sec)\n", @@ -222,14 +247,24 @@ "cell_type": "markdown", "metadata": {}, "source": [ + "The `onsets` and `offsets` are stored in the `wheelMoves.intervals` dataset.\n", + "\n", "For scale, the stimulus must be moved 35 visual degrees to reach threshold. The wheel gain is 4 degrees/mm (__NB__: the gain is double for the first session or so, see [Appendix 2 of the behavior paper](https://docs.google.com/document/d/1RA6wgbWfxD2kGlpNxt0n3HVcW4TEIx8e-YO7k_W1pHs/edit))" ] }, { "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], + "execution_count": 9, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "The wheel must be turned ~0.3 rad to move the stimulus to threshold\n" + ] + } + ], "source": [ "threshold_deg = 35 # visual degrees\n", "gain = 4 # deg / mm\n", @@ -243,19 +278,25 @@ "metadata": {}, "source": [ "### Calculating velocity\n", - "Wheel velocity can be calculated using the `velocity_smoothed` function, which returns the velocity and acceleration convolved with a Gaussian window. As with the `movements` function, the input is expected to be evenly sampled, therefore you should interpolate the wheel data before calling this function. The default window size of 3ms is reasonable, and interpolating at a frequency of 1000 (the default) is sufficiently high." + "Wheel velocity can be calculated using the `velocity_filtered` function, which returns the velocity and acceleration passed through a (non-causal) 20 Hz low-pass Butterworth filter.\n", + "As with the `movements` function, the input is expected to be evenly sampled, therefore you should interpolate the wheel data before calling this function.\n", + "\n", + "
\n", + "Info: Why filter?\n", + "From the sampling distribution, there is no antialiasing needed, but the imprecision on the timestamps + positions gives a noisy estimate of the velocity.\n", + "
" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 10, "metadata": {}, "outputs": [], "source": [ "# pos was the output of interpolate_position using the default frequency of 1000Hz\n", "Fs = 1000\n", "pos, t = wh.interpolate_position(wheel.timestamps, wheel.position, freq=Fs)\n", - "vel, acc = wh.velocity_smoothed(pos, Fs)" + "vel, acc = wh.velocity_filtered(pos, Fs)" ] }, { @@ -263,23 +304,39 @@ "metadata": {}, "source": [ "### Last move onset\n", - "The `movements` algorithm is the recommended way of detecting movement onsets because it is quicker and more accurate, however there is another function that will return the last movement onset before a particular event. This is useful for quickly finding the movement that reached threshold for a given trial. This function finds the first sample after the velocity has been zero for at least 50ms. Because it uses velocity, the smoothed derivative of position, it is less accurate. **NB**: The more accurate approach is to find all moves for which the onset occured before feedback time and the offset occured afterwards." + "The \\_ibl\\_trials.firstMovement_times dataset contains the timestamps of the first recorded movement in a trial.\n", + "The `get_movement_onset` function will find the times at which movement started, given an event timestamp that occurred during the movement.\n", + "In the following example we find the onsets of the movements that led to a feedback response and calculate the movement response time for each trial." ] }, { "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], + "execution_count": 11, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": "
", + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYcAAAD3CAYAAAD2S5gLAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjQuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8rg+JYAAAACXBIWXMAAAsTAAALEwEAmpwYAAA/FElEQVR4nO3deVxU5f4H8M/swzAsyiaEimAiooi5hEVpBZmmWd1crqERtxS7ds2FRJNwAxWhumkXza7LLyuXXLpmi7ZomZg7JpuCJiAqm8AMMAszz++PkdEJVBiGM8P4fb9evZw5M+c8369j853nOed5Do8xxkAIIYTchm/tAAghhNgeKg6EEEKaoOJACCGkCSoOhBBCmqDiQAghpAmhtQMwx5kzZyCRSIzP1Wq1yXN7YY952WNOAOVl6y6WKQEA/h5yAPaT1+1akpNarUZoaGiLjtchi4NEIkFQUJDxeU5Ojslze2GPedljTgDlZesW/ZIBANj2uCGXjppXSVU9AMDH1aHJay3JKScnp8VtdcjiQAgh96NZ284AALZNG9rubVFxIITYvTeffNDaIVgEl3lQcSCE2L3wB92tHYJFcJkHXa1ECLF7WSXVyCqptnYYbVZYUYfCijpO2qLiQAixe0v2ZmPJ3mxrh9FmcV9mIu7LTE7aomElQgjpIGZF9uKsLSoOhBDSQYT5u3HW1n01rKTXM2w/XoTiG9yM2RFCiCUVlClRcHNCX3u7r3oO1fVaLN6bBa2eYeZTD2La4/4QCu6r+kgI6cAW7PoDAM1zsLhOjmIcmD0MSftysOr7POzPvo60cf3R01Nu7dAIIe3o7WcCrR2CRXCZh1nFQaFQIC4uDkqlElqtFvHx8RgwYACOHDmC1NRUCIVCDB06FLNmzTLZr66uDnPmzEF1dTUcHBywatUqdO7cGWfOnEFSUhIEAgHCw8MxY8YMiyTXHB9XB3z08kN4JrMECV+dw7Mf/oq5TwciJrwHBHxeu7VLCLGegd07WzsEi+AyD7PGVDZu3IiwsDBs2bIFy5cvx5IlSwAAKSkpSElJwbZt23Ds2DHk5eWZ7Ld9+3YEBwfj888/x7PPPov//Oc/AIDExESkpaXhiy++QGZmJrKystqY1r2N6e+D/bMex2MPeiDpmxy8mH4EedcU7d4uIYR7Jy9X4uTlSmuH0WZ51xScfU+Z1XOIjo6GWCwGAOh0OuNKgEFBQaiqqoJWq4VarYZAIGiyn06nAwCUlJTA3d0dSqUSGo0G3bp1AwCEh4cjIyMDwcHBZifVUp5OUqyfMhBfn72KxP9lYczqw1g4OghThvq1e9uEEO6kfGf4ocrFWH17evercwBs5JzDjh07sHnzZpNtycnJCAkJQVlZGeLi4rBgwQIAQGBgIGJjY+Hq6orAwED4+/s3OZ5AIMCUKVNw/vx5bNy4EUqlEnL5rTF/R0dHFBUV3TUmtVptsrqgSqVq1WqDf9VTDHw02hvv/1aGd7/KwpkLxfjHoM7g86w7zNTWvGyRPeYEUF62rq7OcIViYy4dNa+/95ECaH51VYvnxMyUm5vLRo0axQ4ePMgYY6y6upqFhYWxa9euMcYYW7lyJVu/fv0d98/Pz2dPPfUUUygUbOTIkcbtmzZtYp988sld287Ozr7rc3M16PQs8atzrPu8r9mMz08xlbbBIsc1l6XysiX2mBNjlJetG7/2CBu/9ojxub3kdbuW5NSavM0655Cfn4+ZM2ciLS0Nw4YNAwBIpVLIZDLIZDIAgKenJ2pqakz2W7duHfbs2QMAkMlkEAgEkMvlEIlEKCwsBGMMhw8fxqBBg9pQ7swn4POQOKYP5j3TG3szSxCz6TgUKq1VYiGEkL/ico0os845pKWlQaPRICkpCQAgl8uRnp6O+Ph4xMTEQCKRwMnJCStWrAAAxMTEYO3atfjb3/6GefPmYefOndDpdEhOTgYALF68GHPnzoVOp0N4eDj69+9vofRaj8fjYfrwAHg4STBv51lM/PgoNscMgbvcvu4aRQjpeBrXh7KJcw7NSU9Pb3Z7ZGQkIiMjm2zfsGEDAMDd3R3//e9/m7weGhqK7du3mxNKu3lpoC/c5GJM33ISEz8+ih3ThqKTo9jaYRFCzPDumD7WDsEiuMyDpgffxROBntj06hAUVtbhtf87AZVWZ+2QCCFmCPZxQbCPi7XDaDMu86DicA9h/m74YEIoThXewIzPT0PToLd2SISQVjp8oRyHL5RbO4w2yyyqQmZRFSdtUXFogVH9vLHkuWD8kHMdb3x2EuoG6kEQ0pGs/ukCVv90wdphtFnyNzlI/oabS3Dvq7WV2mLyUD+Ax0PCnnOY+n8nsW7yQEhFgnvuRwghlrJkbF/O2qKeQytMDuuOFS/2w6HzZZjx+WlodTTERAjhTmAXJwR2ceKkLSoOrTRxSDcsHWsYYpq7IxM6PbN2SISQ+wSXa0TRsJIZJg/1g1Ktw8rvciETC5H8Ql/wrLzUBiHE/nG5RhQVBzNNHx4ApVqLj34ugEwswMJng6hAEGKjkl/sZ+0QLILLPKg4tMHcpwNRq9bhv4cvQSjgIf6Z3lQgCLFBAR72cUMvLvOg4tAGPJ5hLaYGvR7rDl2EkM/D3KcDqUAQYmN+yL4OAIjo42XlSNrm6MUKAIb5V+2NikMb8Xg8LHmuL3R64KOfCyDg8zE7spe1wyKE3Gb9rxcBdPzi8P6B8wDonEOHwefzkPR8X+j0enz44wUIeDzMjHjQ2mERQuzMqpe4W5SUioOF8Pk8rHgxBDo98P4P51GnbaBzEIQQi+rmJuOsLSoOFsTn87DqpRDIxAKsO3QR1XVaJL3QDwI+FQhCSNs1rg8V/qB7u7dFxcHC+HwelowNhqtMhNU/5aNGpcX7E0IhEdJSG4SQtmlcH4qKQwfF4/Ew5+lAuDiIsGxfDhSqE0iPGgi5hP66CbGG9yeEWjsEi+AyD1o+ox299pg/Vr0UgiMFFRi75jDySxXWDomQ+5KPqwN8XB2sHUabcZkHFYd2Nm5QV2z5x8Oortdi7Jrf8O0fV60dEiH3nb2ZJdibWWLtMNrsYF4pDuaVctIWFQcODA1ww9dvPoZeXZww/bNTWP5tDhpoRVdCOLPl6GVsOXrZ2mG0WfrBAqQfLOCkLRoE50gXFym2Tg3Dkr3ZWHfoIv4orsYHE0Lh6Sy1dmiEkA5i9aQBnLVFPQcOSYQCJL3QDykvheDE5Rt46r1D2HL0MvS07DchpAU8naTwdOLmByUVBysYP6grvpv5GPr6uGDhnnMYty4DBWVKa4dFCLFxP2RfN64T1d6oOFiJv4ccn7/+MFLH9ceF6wqM+vevWP/LRbp5ECHkjtb/etG4TlR7o3MOVsTj8fDSQF88/qA7Fuw+h6RvcvDNuatY9VJ/9PS0jyWGCbEF6VEDrR2CRXCZB/UcbICnsxTrpwzEvyeG4lJ5LUZ9+CtSvsuFUqOzdmiE2IXOjmJ0dhRbO4w24zIP6jnYCB6Ph7GhD2BogBuS9uXgPwcL8KmEj39VyzB5aHdIRbT8hi3S6RmuVtdDo6PhQFu240QRAMO8o47su3OGeVLP9PVu97aoONgYTycp/j1xAF5/zB/v7jyJpG9ysOG3S/jXUw/ipYG+EAmos2crLpXXYubW0zhbXA0+D/BzL0UvTyf06uKEXl5yBHo5wc/dkT4zG/DlyWIAHb84bPztTwBUHO5rfR9wQVKkN26IPLBqfx7m7/oD6w4V4K2IXng2xJu+cKzsy5PFePercxAJ+FgwqjcuFV/DDZ0U568rsD/7GhqvK3AQCfDG8ABMHeZPiy+SNlv/yiDO2jKrOCgUCsTFxUGpVEKr1SI+Ph4DBgzAkSNHkJqaCqFQiKFDh2LWrFkm+9XV1WHOnDmorq6Gg4MDVq1ahc6dO2P//v1ISUmBt7ehGr755psYMmRI27OzA4/0dMeuADf8mFOK1P15eGvbGSR/k4NJD3fDpCHdaBKdFXz0cz5WfZ+Hh3t0xgcTQ+Ht4oCcHA2CgoIAACqtDgVlSkOhyLqOtAPnsefMFSS/0A8Pc3B7R2K/nKUiztoyqzhs3LgRYWFhiI6OxsWLFzFnzhzs3r0bKSkpSE1NRUBAACZNmoS8vDwEBgYa99u+fTuCg4MxY8YM7Nq1C//5z3+wcOFCZGVlIS4uDiNGjLBYYvaEx+Mhoo8XnuztiYPnS7H5yGV88MMFrPkpHyP7eePVR/3wULdO1g7zvrDpt0tY9X0exob64L3xoc3eq0MqEiDYxwXBPi54YYAvfs4rRcKec5jw8VGMG+iLBaOC0MkOTo4S7jWuDzWmv0+7t2VWcYiOjoZYbPjHrdPpIJFIAABBQUGoqqqCVquFWq2GQCBosp9OZ7gCp6SkBO7uhjXJs7KykJOTg82bNyMkJARz586FUHjn0NRqNXJycozPVSqVyXN70Vxe3gDihzphSrAUX+fVYH/2NezNLMGgBxwQ1b8TAj1suyfRkT+r/RcUeP9IGYZ2leG1fhKcz8s1vna3vLoAWD3KC59n3sCuU8X4/lwJXhvkhogAeZvuFKjRMeSWqXD2Wj3OXVeBAXCRCOAqFcDFwfCnq1QAFyn/5p8CyMX8VrXZkT+v29XV1QGAMZeOmtfHPxmKQ09xdZPXLJ0TjzF218ssduzYgc2bN5tsS05ORkhICMrKyvD6669jwYIFGDJkCDZt2oQ1a9bA1dUVgYGBWL16Nfj8pmPjU6ZMwfnz57Fx40YEBQVh48aNiIiIgK+vLxITE9GrVy9ERUXdMaacnBxjF7655/aiJXnVqhuw5ehlrD1UgBt1WjzV2xPThwdgYPdONnmL0o76WX15shhvf5mJR3u645NXBjU5f9DSvHKv1eCd3edw8vINhPl3RtIL/RDgYTqnRd2gQ2WtBhVKDcqVapQrNahQqlFRa3jeuD2/VAl1gx58HtDHxxkOIgEqlBpU1GpQXa9ttn0hn4fOjmK4ySVwl4vhdvNxZ0fxzecSeDgZ/nOTi3HxwvkO+Xn9Vf3Ny8IdxIbPraP+O/xrHrdrSU6tyfuexeFO8vLyMHv2bLz99tsYNmwYampqMGLECOzZswdeXl5ISUlB586d8dprrzW7f0FBAaZNm4YffvgBNTU1cHZ2BgAcOnQI33//PZKTk+/YNhWHppTqBmw+8ic+/uUiquu1CPF1QfQjfhjZ17vZf0jW0hE/q42/XcLivdkI7+mO9VMGmf0/ZiO9nmHr8SKs+DYHKq0ejz3ojhqVFhVKDcqUaihUDc3uJxby4SE3fGm7OYrRw12OoQFuGNKjM1wcTMeiNQ163Ki7VUwqGwtLraHQGJ5rUFFreL3uDnNq5GI+urjK4CE3FIzOjmLIxAI4iARwEAsgEwshEwsgFQkgEwv+8lgIh5vvFQtt6wKKjvjv8F4sXRzMGlbKz8/HzJkz8cEHH6B3794AAKlUCplMBpnMcANsT09PVFZWmuy3bt06eHl54fnnn4dMJoNAIABjDM899xy2bt2KLl26ICMjA8HBweaEdV+TS4T45xM98eqjfth56go2Hr6E2dszsXDPOYwI7oKxoT4I7+kOIV3l1GKMMfz7xwv44IcLGBHshQ//PsAiVxzx+TxMergbIvt4YcW3ufjjShXcHCXo4+MMd7nE+GveTW74Ne8ul8BNLoGjWNDi3qBYyIeXsxReLbxgoV6jQ0WtoZdSrlCjTKlGuUKN84XX0CCSoUyhxtniKlTUaqDS6qBt5bwOIZ9nLBQysQAON4vKrSJj+lgmFkIqEsBRLIC7XAJvVyl8XBzgKhOZ1SP+NONPAMDkoX6t3teW7D5tuCT3hQG+7d6WWcUhLS0NGo0GSUlJAAC5XI709HTEx8cjJiYGEokETk5OWLFiBQAgJiYGa9euxd/+9jfMmzcPO3fuhE6nQ3JyMng8HpYtW4YZM2ZAKpUiICAA48ePt1yG9xmZWIjJYd3x8pBu+P1SJb46cwX7/riK3aevwF0uxugQH4wN9UFoV1ebHHayFZoGPZZ8nYUtRwvx0kBfrHixn8ULq4eTBGnj+1v0mOZyEAvgK5bBt5PMZHtOTkOzvzS1Oj3qNDrUa3So1+pQp2lAvUZn2KbV/eVxA+puPldpdX953IBypdq4T71Ghzqt7o5rjDmIBMZC4e0ihberA7ycJXC8WUykIr6xyDiIDL0YB7EA/8ssAZ/H6/DFYesxw2Q+LoqD2cNK1kTDSq2jbtDh59wyfHXmCn7MLYWmQQ8/NxmeDfHGs/18EOTtxFmhuD0nxhhyripwXaEy/I/u4gBnqdDqRSuzqArL9mXj+J83MG2YP+aN6A1+M1cl3Y7+DVoOYwwanR4qjR61mgaUKtS4WlWPkmoVSqrqcbW6HiVVKlytrkepQo3WfIOJhYbiIeTp4SyTmhYUkQDSxh7MzaIibXws4hufG3o0QvT2doK7XNJ+fxHN0N68SVhz85xsYliJdCwSoQDP9O2CZ/p2QXW9Ft+fu4avMq8g/WABPvq5AD3cHTGqXxc82dsLoV1dm70805LKFGrsOFmEHSeKcam81uQ1uUQIbxep8V65Pjcfe7tK8cDNbZaYAKjXMyjUDVCotFCoGqBQNaCwsg6f/34ZpwqrIJcI8e+JoRgb+kCb2yKtw+PxIBEKIBEK4CITwcfVAaFdXZt9r6ZBj8paza2eh9bQI1Fpb/VgVFod1v96EXo9MLq/D1RaHa6WVUAiczK+v16jQ3W99ua+epPj3Y2fmwwPdeuEh7p3wsDundC7S/v+0OJy8isVh/uMi4MI4wd3xfjBXVGuVOP7rGv45o+rxkLhKhPh8Qc9ENHHC8MDPSw66aZeo8NnmTew8/OfUa/VYYhfZ8QO80dPTzmuVatRUlWPkup6w59VKmSVVKNcqTE5hkTIR29vZ/R7wBn9HnBBLy8nNOgZFCotauoNX/Y1N7/sb33x3yoAjY+VmoZmf3H6ucmQOKYPXhroCycOJxwR84iFfHRxufd5la/PGtYkih9pOEfa0l/QjDGoG/TGQtFYNGrqtfjjSjVOXr6BXy6UYdfpKwCA/r4uiBvRG+EPurchqzvjco0oKg73MXe5BC8/3B0vP9wd1XVa/Jpfhp9zy3DofCn+l1kCkYCHoQHuGNvfByP6doFcYt4/F61Ojz2nr+C9A+dxtVqFUf26YHZkYIuWJVdpdbh2czjhSlU9zl9X4I8r1fjqdAm2HC28434iAQ9OUhGcpEI43/zTz11m3OYkFcFZKrztsQiuMhH6eDvfcwiJ3D94PJ5xKOmv00wf6WkoAIwxFFXW49CFMqT/nI+o//6OSQ93Q9LzfS3ei+ByjSgqDgQA4CITYXSID0aH+ECnZzhTdAP7s65j3x9XMWdHJt7Z8wci+3TB86E+eLyXR4u6tyqtDrtOXUH6oXwUVdYjxNcFs4d2wrjhD7U4LqlIAD93R/i5O5ps1+sZLlfWIb9UCamIf9uXvqEYSIStm+xF7Nu2aUPb7dg8Hg/d3GSY7NYd4wb6Im1/Htb/egleTlLMjHjQom21Zx5/RcWBNCHg8zCwe2cM7N4Z8SN741ThDew5XYKvz5Zgb2YJOslEeDbEG8+HPoCHunVCmVKNgjIlLpbVGv+8WK5E8Y16MAb07+qKRWOC8WRvT+Tm5t47gBbg83no4e6IHn8pGoRYk1QkwIJRQShXarD6pwuI7OOFPj7O1g7LLFQcyF3xeLcKxbtj+uCX82XYc6YEX54sxpajhRAL+NDcvIICMFxq6O/hiNCunfDiAF8M6dEZjwS40a94YlUf/1IAAJj6eEC7t8Xj8ZA4pg9+vVCGeTvPYvcbj1jsMugvjhmGUv8+pJtFjnc3VBxIi4kEfDwV5IWngrygVDfg+3PXkH21Bt3dZPB3l8PfwxFdnKU0Zk9szo85pQC4KQ4A4CoTY9FzwZjx+WlsOXoZ0Y/2sMhxvz5rWFuJigOxWXKJEH8b6Iu/WTsQQmzUs/288X9+l/HJ4UuYPNTPIpeIf/ZamAUiaxlaS4EQQtoBj8dDTLgfim/U46fcUmuH02pUHAghpJ08FeQFN0cx9py5YpHjfZrxp3GdqPZGxYEQYvca5ypwTSTg49kQb/yQfR0KVfPLqLfGDzml+CGHm14InXMghNi9zTHWu+3w2NAH8H8Zl7E/6zr+NrBtC+ZxmQf1HAghpB091M0Vvp0c8NXNW3x2FFQcCCF278MfL+DDHy9YpW0ej4fRIT74Lb8cSnXzN3JqqQ2HL2HD4UsWiuzuqDgQQuzeb/nl+C2/3GrtP/agO3R6hmOXKtp0nCMF5ThSwE0edM6BEELa2cDunSAW8vFbfgWe7O1l9nE+eWWwBaO6O+o5EEJIO5OKBBjs18mqvZfWouJACCEceCTAHbnXFChXqs0+xse/FBjXiWpvVBwIIXavk0yMTjKxVWN49Ob9HzIKzD/vcOpyFU5drrJQRHdH5xwIIXZv7eSB1g4B/R5wgZNUiCMF5RjT38esY3CZB/UcCCGEAwI+D2H+bvgtv21XLHGFigMhxO6t/C4XK7+zzI2m2uKRADcUVtbhanW9Wfv/52A+/nMw38JRNY+GlQghdu/U5RvWDgEAEOLrAgDIulIDbxeHVu+fXVJj6ZDuiIoDIYRwpHcXZ/B4QFZJDSL6tH6+w5pJLb//elvRsBIhhHDEUSJED3dHZJVUWzuUe6LiQAghHAr2cUGWmcNDXK4RRcWBEGL3vF2k8HaRWjsMAECwjzOuVNWjqk7T6n0vlilxsUzZDlE1ZdY5B4VCgbi4OCiVSmi1WsTHx2PAgAE4cuQIUlNTIRQKMXToUMyaNavZ/QsKCjB+/HgcOXIEEokEZ86cQVJSEgQCAcLDwzFjxow2JUUIIbf7YOIAa4dgFOzjDMBwcvmRmxPjWorLPMzqOWzcuBFhYWHYsmULli9fjiVLlgAAUlJSkJKSgm3btuHYsWPIy8trsq9SqcTKlSshFt+arZiYmIi0tDR88cUXyMzMRFZWlpnpEEKIbQv2uXnFEodXHpnDrOIQHR2NiRMnAgB0Oh0kEgkAICgoCFVVVdBqtVCr1RAITG/LxxhDQkICZs+eDQcHw2VcSqUSGo0G3bp1A4/HQ3h4ODIyMtqSEyGEmFi8NwuL99rGj87OjmJ4u0jNOin93v48vLe/6Y/u9nDPYaUdO3Zg8+bNJtuSk5MREhKCsrIyxMXFYcGCBQCAwMBAxMbGwtXVFYGBgfD39zfZb82aNRg2bBh69+5t3KZUKiGXy43PHR0dUVRUdNeY1Go1cnJyjM9VKpXJc3thj3nZY04A5WXrTuRfAwDk5Bh+D1s7r25OfJy6VNbqGHIuG+4fnZOjb/KaxXNiZsrNzWWjRo1iBw8eZIwxVl1dzcLCwti1a9cYY4ytXLmSrV+/3mSfiIgIFhUVxaKioljfvn3ZpEmTmEKhYCNHjjS+Z9OmTeyTTz65a9vZ2dl3fW4v7DEve8yJMcrL1o1fe4SNX3vE+NzaeaXtz2M94r9mdeoGix2zJTm1Jm+zTkjn5+dj5syZ+OCDD4y9AKlUCplMBplMBgDw9PREZWWlyX4HDhwwPn7yySexYcMGSCQSiEQiFBYWomvXrjh8+DCdkCaE2LU+3s7QMyDnWg0e6tbJ2uE0y6zikJaWBo1Gg6SkJACAXC5Heno64uPjERMTA4lEAicnJ6xYsQIAEBMTg7Vr15qchL7d4sWLMXfuXOh0OoSHh6N///5mpkMIIbav8YqlrJLWFYfG9aHmPdP7Hu9sO7OKQ3p6erPbIyMjERkZ2WT7hg0bmmz76aefjI9DQ0Oxfft2c0IhhJB78vdwtHYIJnw7OUAuESL/uqJV+5kzN8JctLYSIcTuLX8xxNohmODxePBzl+FSRV2r9uMyD5ohTQghVuDn5og/y2utHcYdUXEghNi9+bvOYv6us9YOw0QPd0cU36iDpqHpZal3krQvG0n7stsxqltoWIkQYvcultneL/Qe7o7QM6Cwsg49PeX33gGAStvyQtJWVBwIIcQK/NwNJ8n/LK9tcXFY+nzf9gzJBA0rEUKIFfRwu1kcKmyvVwNQcSCEEKvo5CiGi4MIl1pxUprLNaJoWIkQYvf63Jx0Zmv83B1ttudAxYEQYvcSxwRbO4Rm9XCT4fifN1r8fi7zoGElQgixEj93R5RU10Ol1Vk7lCaoOBBC7N5bW0/jra2nrR1GEz3cHcEYcLmFM6UT9pxDwp5z7RyVARUHQojdu1qtwtVqlbXDaKLHzctZW3pSWiriQyri5mubzjkQQoiVGOc6tPCk9DvP9mnPcExQz4EQQqzEWSqCm6PYJtdYouJACCFW5Ofu2OJhJS7XiKJhJUKI3Xuou23ebQ0wrM56OL+sRe91lTV/w7T2QMWBEGL3uLhzmrm6dnZAqUINTYMeYuHdB3O4zIOGlQghxIp8XBzAGHC9xraupqLiQAixe7GfnkTspyetHUazfFwdAABXqurv+d65OzIxd0dme4cEgIaVCCH3gRsc3nu5tbxdpQCAq9X3Lg4+LtL2DseIigMhhFiRj4uh51BSde9hpdlPB7Z3OEY0rEQIIVbkIBagk0zUomElLlFxIIQQK/NxdcDVFhQHLteIomElQojde7Snu7VDuCsfVwcUtmDxPX+Plt1O1BKoOBBC7N6/nnrQ2iHclY+LFEcvVtzzfVzmQcNKhBBiZT6uDlCoGlCj0lo7FCMqDoQQu/fKhmN4ZcMxa4dxR9435zpcvccVSzM+P4UZn5/iIiQaViKE2D9bvNPa7R64OdehpLoegV2c7vg+Lu+FbVbPQaFQIDY2FlFRUZgwYQJOnzacPT9y5AhefPFFjB8/Hu+///4d9y8oKMDAgQOhVqsBAPv370dERAQmT56MyZMn49gx263whBBiaY2zpEvuccXSG8N74o3hPbkIybyew8aNGxEWFobo6GhcvHgRc+bMwe7du5GSkoLU1FQEBARg0qRJyMvLQ2Cg6aQNpVKJlStXQiy+tbpgVlYW4uLiMGLEiLZlQwghHZCnkxQCPu+ew0pcMqvnEB0djYkTJwIAdDodJBIJACAoKAhVVVXQarVQq9UQCAQm+zHGkJCQgNmzZ8PBwcG4PSsrCzt37sSkSZOwYsUKNDQ0mJsPIYR0OAI+D12cpffsOXC5RtQ9ew47duzA5s2bTbYlJycjJCQEZWVliIuLw4IFCwAAgYGBiI2NhaurKwIDA+Hv72+y35o1azBs2DD07m267Oyjjz6KiIgI+Pr6IjExEVu3bkVUVNQdY1Kr1cjJyTE+V6lUJs/thT3mZY85AZSXrevb2fBnYy62mJeLWI8LJRV3jesBqWGNqObeY/GcmJlyc3PZqFGj2MGDBxljjFVXV7OwsDB27do1xhhjK1euZOvXrzfZJyIigkVFRbGoqCjWt29fNmnSJOO+jQ4ePMjmz59/17azs7Pv+txe2GNe9pgTY5RXR2OLeb35+Sn22MqfzN6/JTm1Jm+zzjnk5+dj5syZ+OCDD4y9AKlUCplMBplMBgDw9PREZWWlyX4HDhwwPn7yySexYcMGMMbw3HPPYevWrejSpQsyMjIQHBxsbq0jhJAOycfVAd+euwq9noHP51k7HPNOSKelpUGj0SApKQkAIJfLkZ6ejvj4eMTExEAikcDJyQkrVqwAAMTExGDt2rUmJ6Eb8Xg8LFu2DDNmzIBUKkVAQADGjx/fhpQIIcTUhHUZAIBt04ZaOZI783GVQqtjKK9Vw9Op+aW5X9t8HADwySuD2z0es4pDenp6s9sjIyMRGRnZZPuGDRuabPvpp5+Mj8PDwxEeHm5OKIQQYhduX7r7TsXhkQDu1oiiSXCEEGIDGm/6U1JVj9Curs2+Jya8B2fx0PIZhBBiAx5o4UQ4rlBxIIQQG+DiIIKDSHDXO8JxuUYUDSsRQuze6BBva4dwTzweDz6u0rveSzoiyJOzeKg4EELs3uShftYOoUV8XB3uOqzEZR40rEQIsXv1Gh3qNba9MitguGLpio2sr0TFgRBi96I3HkP0Rttf7dnH1QHlSjXUDc0Xspc/OYqXPznKSSw0rEQIITai8XLWa9UqdHdzbPL66BAfzmKh4kAIITbi1uWszReHvw/pxlksNKxECCE2oqU3/eECFQdCCLER3i63Zkk3Z8K6DOM6Ue2NhpUIIXbvpYG+1g6hRaQiAdwcxSipbv6KJS7zoOJACLF74wZ1tXYILeZ9l4lwXOZBw0qEELtXWatBZa3G2mG0SBdnKa7doeeg1emh1ek5iYOKAyHE7k3fchLTt3Bz7+W28nKW4npN88Uh6pPfEfXJ75zEQcNKhBBiQ7o4S3GjTguVVgepSGDy2sQh3A0rUXEghBAb4nXziqXSGjW6uclMXnthAHcnpGlYiRBCbEgX55uzpJsZWuJyjSgqDoQQYkO6uNy5OHC5RhQNKxFC7F5UWHdrh9BiXjd7DtebuWKJyzyoOBBC7N6Y/twtWNdWzlIhHESCZnsOXOZBw0qEELtXUlVvE+sVtQSPx0MXF2mzxaFGpUWNSstJHNRzIITYvVnbzgAAtk0bat1AWsjLWdLssNLrm08A4CYPKg6EEGJjujhLceLyjSbbX33Uj7MYqDgQQoiN8XKRorRGDcYYeDyecfszfb05i4HOORBCiI3p4iyFRqdvsh4Ul2tEUXEghBAbc6eJcFyuEWXWsJJCoUBcXByUSiW0Wi3i4+MxYMAAHDlyBKmpqRAKhRg6dChmzZplsh9jDI8//jj8/PwAAKGhoZgzZw7OnDmDpKQkCAQChIeHY8aMGW1OjBBCGr3+mL+1Q2iVxiU0rteoEOzjYtzOZR5mFYeNGzciLCwM0dHRuHjxIubMmYPdu3cjJSUFqampCAgIwKRJk5CXl4fAwEDjfoWFhQgODsbatWtNjpeYmIjVq1eja9eumDp1KrKyshAcHNy2zAgh5KaIPl7WDqFVjBPhatQm27nMw6ziEB0dDbFYDADQ6XSQSCQAgKCgIFRVVUGr1UKtVkMgMF1RMCsrC9evX8fkyZMhlUoxf/58eHp6QqPRoFs3w42zw8PDkZGRQcWBEGIxBWVKAECAh9zKkbSMh9zwnVr6l+JQqjAMM3k6Sds9hnsWhx07dmDz5s0m25KTkxESEoKysjLExcVhwYIFAIDAwEDExsbC1dUVgYGB8Pc37QJ5eHhg6tSpGDlyJE6cOIG4uDh89NFHkMtvfWCOjo4oKiq6a0xqtRo5OTnG5yqVyuS5vbDHvOwxJ4DysnVvf1cCAEh5xjDDuCPk5SLlI6/wKnJyGozb/prH7Syd0z2Lw7hx4zBu3Lgm2/Py8jB79my8/fbbGDJkCGpqarBu3Trs27cPXl5eSElJwYYNG/Daa68Z9+nbt6+xNzFo0CBcv34djo6OqK2tNb6ntrYWzs7Od41JIpEgKCjI+DwnJ8fkub2wx7zsMSeA8rJ1sl+qAMCYS0fIy9u1DBqBg0mcc/huAICgQM8m729JTq0pHmZdrZSfn4+ZM2ciLS0Nw4YNAwBIpVLIZDLIZIb1xz09PVFTU2Oy35o1a4y9kNzcXPj4+MDJyQkikQiFhYVgjOHw4cMYNGiQOWERQojd8HKWolRhOqw0PNATw5spDO3BrHMOaWlp0Gg0SEpKAgDI5XKkp6cjPj4eMTExkEgkcHJywooVKwAAMTExWLt2LaZOnYq4uDgcOnQIAoEAy5cvBwAsXrwYc+fOhU6nQ3h4OPr372+h9AghpGPycpYg95rpD+zG9aF8XB3avX2zikN6enqz2yMjIxEZGdlk+4YNGwAAYrEYH3/8cZPXQ0NDsX37dnNCIYQQu+TlLEWZQg2dnkHAN8yS5nKNKFo+gxBi99588kFrh9Bqnk4S6BlQoVTD8+alrVzmQcWBEGL3wh90t3YIrdZYEEoVt4oDl3nQ8hmEELuXVVKNrJJqa4fRKrcmwt1aQqOwog6FFXWctE/FgRBi95bszcaSvdnWDqNVPJ0ME+FunyUd92Um4r7M5KR9GlYihBAb5HGzODTOigaAWZG9OGufigMhhNggkYAPd7nYpOcQ5u/GWfs0rEQIITbK00mK0tvOORSUKY3rRLU36jkQQoiN8nSW4Pptw0oLdv0BgOY5EEKIRbz9TOC932SDvJykyC65NUuayzyoOBBC7N7A7p2tHYJZvJwlKFeq0aDTQyjgc5oHnXMghNi9k5crcfJypbXDaDVPZ6lhlvTN+0bnXVMg75qCk7apOBBC7F7Kd3lI+S7P2mG0WuNch8ab/rz71Tm8+9U5TtqmYSVCCLFRt8+S7gcXLBjF3T0oqDgQQoiNMhaHm1cs9e/qylnbNKxECCE2yl0uBo93awkNLteIouJACCE2Sijgw81RgrKbPQcu14iiYSVCiN17d0wfa4dgNi9nibHnwGUeVBwIIXYv2MfF2iGYzctZaly2m8s8aFiJEGL3Dl8ox+EL5dYOwyyeTrd6DplFVcgsquKkXeo5EELs3uqfLgDouHeEq6g1zJJO/iYHAK2t1CparRbFxcVQqVT3fnMHodVqkZOTY+0wLKotOUmlUvj6+kIkElk4KkJsl5ezBIwB5UoNlozty1m7dlMciouL4eTkBD8/P/B4PGuHYxH19fVwcHCwdhgWZW5OjDFUVFSguLgYPXr0aIfICLFNnk63JsLRPAczqFQquLm52U1hIKZ4PB7c3NzsqmdISEt4OTfeEU7N6RpRdtNzAECFwc7R50vuR7cvofHJrxcB0DkHQgixiOQX+1k7BLO5OYrB5wGlNSpO87CbYSXSOmq1Gjt27OCsvePHjyM3N5ez9gi5XYCHHAEecmuHYRahgA83ueFyVi7zsNuew4R1GU22jQ7xxuShfqjX6BC98ViT118a6Itxg7qislaD6VtOmrzGRTeOS2VlZdixYwfGjRvHSXs7d+7EqFGj0L17d07aI+R2P2RfBwBE9PGyciTm8XKWoFShwtGLFQCAMH+3dm/TbosD13bt2oWff/4ZKpUKZWVlmDJlCn788UdcuHABb7/9NiIiIvC///0Pmzdvhlgshp+fH5YsWYJZs2ZhypQpGDJkCM6ePYv09HR8+OGHSExMxKVLlwAAb731Fh5++GGMGTMGgwYNwvnz59GjRw+4ubnhxIkTEIvF+Pjjj6FSqfDOO+/gxo0bAICFCxciMDAQTz/9NB566CFcunQJbm5uWL16NdauXYv8/HysWbMGM2bMMOaRnZ2NpUuXQiAQQCKRYOnSpdDr9ZgzZw66dOmCoqIi9OvXD4sXL8bJkyexcuVKCIVCODs7IzU1FRKJBImJibh8+TL0ej3eeustODo64tdff0VWVhb+/e9/w9/f3yqfEbl/rb85Vt9hi4OTFFerVXj/wHkAHP1YZWaoqalh06ZNYy+//DIbP348O3XqFGOMsd9++4298MILbNy4cey9995rsp9er2fh4eEsKiqKRUVFsdTUVMYYY99//z176qmnjNt///33u7afnZ3d5Plft3Ft586d7NVXX2WMMfb111+zl156ien1epaRkcGmT5/OKisrWUREBFMoFIwxxpKSktinn37KDh48yOLj4xljjC1atIj99NNP7LPPPmMpKSmsrq6OVVZWslGjRjHGGHviiSfYiRMnGGOMjRgxgh08eJAxxtjLL7/MsrOzWUpKCvvss88YY4xdunSJTZw4kTHGWO/evVlJSQljjLEJEyaw06dPs6KiIjZu3LgmebzwwgvGv8sDBw6wN998kxUVFbEhQ4YwhULBGhoa2PDhw1lpaSlbsWIF+/jjj5lOp2MHDhxgV65cMcbOGDOJfd68eezQoUOsrq6uTX/P1v6c78RW42ore8lr/NojbPzaI8bnHS2v+J2ZbODS/exyeS27XF7b7HtaklNr8jar57Bx40aEhYUhOjoaFy9exJw5c7B7926kpKQgNTUVAQEBmDRpEvLy8hAYeOuG2IWFhQgODsbatWtNjpeVlYW4uDiMGDGibZXOyoKCDDficHJyQkBAAHg8HlxcXKBWq1FUVISePXtCLjeMFw4ePBiHDx/GpEmTsGrVKlRVVeHEiRNYuHAhli5dipMnT+L06dMQCARoaGgw9gaCg4MBAM7OzggICDA+VqvVOH/+PI4ePYpvv/0WAFBTY7gxeadOneDt7Q0A8Pb2hlqtvmMOpaWlxjwGDx6MtLQ0AEC3bt2MsXt4eECtViM2NhZr167FK6+8Ai8vL4SEhOD8+fM4efIkzp49CwAmsRNCzOPpJEVFrQberlKIBNycKjarOERHR0MsFgMAdDodJBLDdbhBQUGoqqqCVquFWq2GQCAw2S8rKwvXr1/H5MmTIZVKMX/+fPj7+yMrKws5OTnYvHkzQkJCMHfuXAiFHW/E626XWvr6+qKgoAB1dXWQyWQ4duwYevToAT6fj2eeeQaLFi1CREQEBAIB/P390aVLF7zyyivg8XhIT0+Hi4vLPdvw9/fHc889hzFjxqCiosJ4wrm5ffh8PvR6fZPtnp6eyM3NRe/evXH8+HH4+fnd8Rh79+7FCy+8gHnz5mHdunXYvn27MfbY2FioVCpj7DweD4yxu/79EUKa5+UsBWPAN39chZujhJNlQO75Dbxjxw5s3rzZZFtycjJCQkJQVlaGuLg4LFiwAAAQGBiI2NhYuLq6IjAwsMnYsoeHB6ZOnYqRI0fixIkTiIuLw86dO/Hoo48iIiICvr6+SExMxNatWxEVFXXHmNRqtckSDCqVCgKBAPX19a1K3pI0Gg0aGhpQX19v8lilUkGv18PBwQHTpk1DVFQU+Hw+unbtin/+85+or6/Hs88+i9GjR+Orr75CfX09xo4diyVLliAmJga1tbUYP3481Go19Ho96uvrodfrodfroVKpUF9fD51OB7VajejoaCxatAhffPEFamtrERsbi/r6ejDGjH83je+VyWRQq9VYvnw53nrrLWMeCQkJWLx4MRhjEAgEWLRokTGHxmM0tt2rVy+8/fbbkMlkEAqFePfdd+Hh4YElS5Zg0qRJUCqVxtj79OmDVatWYcWKFcYejzlsdUkRlUplk3G1lb3kVVdXBwDGXDpaXprqWgDAh/uzIRPx4faMT5P3WDynFg9A/UVubi4bNWqUcdy7urqahYWFsWvXrjHGGFu5ciVbv369yT51dXVMrVYbnz/66KNMr9ez6upq47aDBw+y+fPn37VtWzzn0B7aOj5vi+icQ8diL3lduVHHrty49W+vo+V1tqiKdZ/3Nfv898smedzO0ucczBq8ys/Px8yZM5GWloZhw4YBMCyKJpPJIJPJABiGJxrHvButWbPG2AvJzc2Fj4+h+j333HO4du0aACAjI8M4rk4IIZbg4+oAH9eOu05Z4xIaDXrGWR5mDeynpaVBo9EgKSkJACCXy5Geno74+HjExMRAIpHAyckJK1asAADExMRg7dq1mDp1KuLi4nDo0CEIBAIsX74cPB4Py5Ytw4wZMyCVShEQEIDx48dbLkNCyH1vb2YJAGBM/6bDMR2Bm1wCPg84fqkCXTs5YHigZ7u3aVZxSE9Pb3Z7ZGQkIiMjm2zfsGEDABivx/+r8PBwhIeHmxMKIYTc05ajlwF03OIg4PPg4SRBRkEFrteobbc4EEII4ZankxRyqRD/nhjKSXu0thIhhHQAXs4SVNVpjfd3aG9UHAghpAPwdJaiuLLOuE5Ue6PicB/bsmULAMO6UKmpqRY77pNPPnnXWdi3mzx5MgoKCky2FRQUYPLkyRaLhxB74OkkgULdgI9/uchJe3ZbHCasy8COE0UAAK1OjwnrMrD7dDEAoF6jw4R1GcYrGGpUWkxYl4Hvzl0FAFTWajBhXYaxQpcq7PPuY3e6sIAQe5MeNRDpUQOtHUabNN70Z9FzfThpj05IW8iuXbuwc+dO6PV6/Otf/0JVVRU2bdoEPp+PgQMHYu7cuc2uYrp//378+OOPUCqVuHHjBv75z39ixIgR+O233/Dee+/BwcEBrq6uSE5ORk5ODtavXw+RSITi4mKMGjUK06dPx/79+7F+/XoIhUI88MADSElJQW1tbbMrtDZKT09HdXU1Fi1ahJCQEGRmZiImJgaVlZX4+9//jgkTJmD06NHw8/ODWCzG4sWLmz1efHw8CgsLoVar8Y9//AOjRo0CACxatAjFxYZivGbNGshkMixYsAB//vknAODVV181vhcwrOk0d+5cMMbg4eHBxUdG7iOdHcXWDqHNGuc6qBuaLnvTLlo8Xc6G2OIM6Z07d7LY2FjGGGM3btxgI0eONM4Gnjt3Ljt8+HCzq5ju3LmTRUdHM51Ox8rKytjw4cOZRqNhTzzxBLt06RJjjLFNmzaxFStWsKNHj7KRI0cyrVbLamtr2UMPPcQYY+zNN99kX3/9NWOMsd27d7Pq6uo7rtB6u0ceecQYe3R0NNPr9ayoqIiNHDmSMWZYBTYrK4sxxpo9nkKhYMOHD2cVFRWsoqKC/e9//zPud/z4ccaYYTXWffv2sU8//ZQlJSWxuro6plAoWGRkJKuoqGBRUVEsPz+frVixgm3bto0xxti+fftYVFRUs3/P1v6c78RW42ore8lr+/FCtv14ofF5R8zrj2LDLOnkfc3HbhMzpEnzevToAcCw+mxlZSWmTp1qHFMvKipCbGwsKisr8corr+C7774zLi44ePBg8Pl8uLu7w9nZGeXl5ZDL5fDy8jK+fuHCBQBAr169IBQKIZPJIJUaupnz58/H8ePHERUVhVOnToHP5+P8+fPYuXMnJk+ejISEhCaz1f+qT58+4PF48PDwgEp1axitMafmjieXy5GQkICEhATMmjULGo3GuF/fvn0BAO7u7lCpVCgoKMDgwYMBGCZNBgQEoKioyPj+CxcuICQkBADw0EMPmfkJENK8L08W48uTxdYOo008b/Ycvs+6xkl7NKxkQXy+odb6+vrC29sbGzZsgEgkwq5duxAUFNTsKqY+Pj7IysoCAJSXl0OpVMLT0xNKpRJlZWXo1q0bjh07dtfVUbdt24Y333wTbm5uePfdd3HgwIE7rtB6O3bbKql3Wu21MafmjldaWoqsrCx89NFHUKvVGDZsGMaOHdvs8QICAnDixAmEh4dDqVTi/Pnz8PX1Nb7u7++P06dPo3fv3vjjjz9a+ldOyH3DzdEwSzqSoxsWUXFoB507d0Z0dDQmT54MnU6HBx54ACNHjoRGo0F8fDxkMhlEIhGWLFmC48ePo7y8HK+88goUCgUSExMhEAiwbNkyzJkzBwKBAC4uLli+fLmx9/BXISEhePXVV+Hq6gpHR0cMHz4cw4cPxzvvvIPt27dDqVSa3O2tUUBAAObOnYtHHnnknjnFxsY2OZ6HhwfKysrw/PPPQyaTISYm5o5LrY8fPx4JCQmIjo6GVqvFjBkz4OZ261aHM2fOxKxZs/DNN9+YFA1CiIGAz4OnkxTV9VpO2uMx1vEW2c/JyTHekKbxOQCTbR3Frl27cPHiRcydO7fJa/X19XBw6LiLhTWnrTn99bO3FbYaV1vZS16N95RvvL1mR81r4Z4/0FkmxuynA5u81pKcWpM39RwIIaSDuHBdyVlbVBys7MUXX7R2CITYvU2vDrF2CBbBZR52VRwYY3e9jSbp2DrgCCixEQ5iwb3f1AFwmYfdXMoqlUpRUVFBXyB2ijGGiooK4+W7hLTGpxl/4tOMP60dRpvtPl1sXOmhvdlNz8HX1xfFxcUoKyuzdigWo9VqIRKJrB2GRbUlJ6lUSlcyEbN8fdawNM7koX7WDaSNth4zzA16YUD7/39gN8VBJBIZJ2zZi456RcXd2GNOhHBly2sPc9aW3RQHQgixdyIBd2cC7OacAyGE2LsdJ4qMq023NyoOhBDSQXC5RlSHnCF95swZSCQSa4dBCCEdilqtRmhoaIve2yGLAyGEkPZFw0qEEEKaoOJACCGkCSoOhBBCmqDiQAghpAkqDoQQQpqg4kAIIaQJm1w+Q6fTYeHChbh06RIEAgGWL18OjUaDhIQEMMbQu3dvJCQkQCAQYPv27di6dSuEQiGmT5+OJ554wuRYOTk5WLp0KQQCAcRiMVauXAl3d/cOn1ejvXv3YsuWLdi2bRvH2RhYMqeKigosXLgQNTU10Ol0SElJQbdu3Tp8Xjk5Ocbbv/r5+SEpKcl4b25bzgsAKisrMXHiROzdu7fJ3KLLly8jPj4ePB4PDz74IBITE62SlyVz6qjfF8Dd82rUqu8LZoMOHDjA4uPjGWOMHT16lMXGxrLp06ezY8eOMcYYmzdvHtu/fz8rLS1lo0ePZmq1mtXU1Bgf3+7ll19m2dnZjDHGvvjiC5acnMxtMrexZF6MMZadnc2mTJnCxo0bx2ket7NkTvPmzWP79u1jjDGWkZHBfv75Z05zuZ0l83rjjTfYwYMHGWOMzZ49m/3444/cJnOblubFGGO//PILGzt2LBswYABTqVRNjjVt2jR29OhRxhhjCQkJxv24ZsmcOuL3BWP3zoux1n9f2OSwUkREBJYuXQoAKCkpgbu7O1avXo3BgwdDo9GgrKwMbm5uOHv2LAYMGACxWAwnJyd069YNubm5Jsd67733jKuA6nQ6q86stmReN27cQGpqKhYsWGCNVIwsmdOpU6dw/fp1REdHY+/evRgyxHp377JkXkFBQaiqqgJjDLW1tRAKrddhb2leAMDn87Fx40a4uro2e6ysrCzjZ/T444/jyJEjnOTwV5bMqSN+XwD3zsuc7wubLA4AIBQKMW/ePCxduhQjRoyAQCDAlStXMHr0aNy4cQM9evSAUqmEk5OTcR9HR0colab3WPX09ARg+OLZsmULoqOjuUyjCUvkpdPp8M4772DBggVwdHS0RhomLPVZXblyBc7Ozti0aRO8vb2xfv16rlMxYam8GoeSRo4ciYqKCjz8MHfLLjenJXkBwKOPPopOnTrd8TjstjsvOjo6QqFQcBJ/cyyVU0f8vgDunpfZ3xdt7vu0s9LSUjZ8+HBWW1tr3LZ9+3b29ttvsx9++IElJiYat7/xxhvs7NmzTY6xb98+Nnr0aFZYWMhFyC3SlrwyMzPZqFGjWFRUFBs3bhwbMGAAW7ZsGZfhN6utn9UjjzzCKisrGWOMZWVlsddee42TuO+lrXmFhYWx8+fPM8YY27JlC1u0aBEncd/L3fK63RNPPNHsUMVjjz1mfHzgwAG2ePHi9gu2hdqaE2Md7/vids3lZe73hU32HPbs2YN169YBABwcHMDj8TBjxgz8+eefAAy/Uvh8PkJCQnDy5Emo1WooFAoUFBSgV69eJsf66quvsGXLFnz66afo2rUr16mYsFReISEh2LdvHz799FO899576NmzJ9555x1rpGTRz2rgwIE4dOgQAOD48ePo2bMnp7nczpJ5ubi4QC6XAzD8Mq2pqeE0l9u1NK+W6NOnD37//XcAwC+//IJBgwa1S8z3YsmcOuL3xb2Y+31hkwvv1dXVYf78+SgvL0dDQwNef/11dO7cGSkpKRCJRHBwcMCyZcvg6emJ7du3Y9u2bWCMYdq0aRgxYgTy8/OxZcsWJCQkYOjQofD29oazszMAYPDgwfjXv/7VofNatGiR8ZjFxcWYPXs2tm/f3uFzunLlChYuXIj6+nrI5XKkpaXBxcWlw+d14sQJpKamQigUQiQSYenSpVa73Wlr8mr05JNP4ttvv4VEIjHJ69KlS0hISIBWq4W/vz+WLVtmvHKmI+bUkb8v7paXud8XNlkcCCGEWJdNDisRQgixLioOhBBCmqDiQAghpAkqDoQQQpqg4kAIIaQJKg6EEEKaoOJACCGkif8HkNGhYBDjNCoAAAAASUVORK5CYII=\n" + }, + "metadata": {}, + "output_type": "display_data" + } + ], "source": [ "trial_data = one.load_object(eid, 'trials', collection='alf')\n", - "idx = 23 # trial index\n", - "ts = wh.last_movement_onset(t, vel, trial_data['feedback_times'][idx]);\n", + "ts = wh.get_movement_onset(wheel_moves.intervals, trial_data.response_times)\n", "\n", + "# The time from final movement onset to response threshold\n", + "movement_response_times = trial_data.response_times - ts\n", + "\n", + "idx = 15 # trial index\n", "mask = np.logical_and(trial_data['goCue_times'][idx] < t, t < trial_data['feedback_times'][idx])\n", "plt.figure();\n", "plt.plot(t[mask], pos[mask]);\n", - "plt.axvline(x=ts);\n", + "plt.axvline(x=ts[idx], label='movement onset', linestyle='--');\n", + "plt.axvline(x=trial_data.response_times[idx], label='response threshold', linestyle=':');\n", + "plt.legend()\n", "plt.show()" ] }, @@ -299,10 +356,24 @@ }, { "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], + "execution_count": 12, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Session 2020-09-19_1_CSH_ZAD_029\n" + ] + } + ], "source": [ + "eid = 'c7bd79c9-c47e-4ea5-aea3-74dda991b48e'\n", + "print('Session ' + one.eid2ref(eid, as_dict=False))\n", + "wheel = one.load_object(eid, 'wheel', collection='alf')\n", + "pos, t = wh.interpolate_position(wheel.timestamps, wheel.position)\n", + "vel, acc = wh.velocity_filtered(pos, 1e3)\n", + "\n", "# Load the reaction times\n", "# brainbox.io.one.load_wheel_reaction_times\n", "rt = load_wheel_reaction_times(eid)" @@ -310,9 +381,18 @@ }, { "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], + "execution_count": 13, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": "
", + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAQ8AAADOCAYAAAAg7IebAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjQuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8rg+JYAAAACXBIWXMAAAsTAAALEwEAmpwYAAApyUlEQVR4nO3deVhU9f4H8PfsA8wAAoIrLgiCoAJuWVFZcs2tAlMWBSp/LS559VKZdeWWGpqV5Vpa6jWvJmabS95uKi5ppaIgKIqikuICojAzwOzn98c4I+jADMPsfF7P4/MEZzjzOQ2+Ped7Pt/vYTEMw4AQQlqI7egCCCGuicKDEGIRCg9CiEUoPAghFqHwIIRYhMKDEGIRrqMLMEd+fj4EAoHJ1ykUCrNe5wroWJyTuxyLucehUCgQHR1tdJtLhIdAIEBERITJ1xUXF5v1OldAx+Kc3OVYzD2O4uLiJrfRZQshxCI2CQ+tVousrCwkJSUhLS0NZWVljbZv374dCQkJGDduHDZv3myLEgghNmaTy5Y9e/ZAqVQiJycH+fn5WLRoET7//HPD9sWLF2Pnzp3w9PTE6NGjMXr0aPj4+NiiFEKIjdjkzCMvLw9xcXEAgOjoaBQVFTXa3rt3b0ilUiiVSjAMAxaLZYsyiBOrkilwoULm6DJIK9jkzEMmk0EkEhm+5nA4UKvV4HJ1bxcaGopx48bBw8MD8fHx8Pb2bnZ/CoWi2YEbPblcbtbrXIG7H8vy3ytx4lo91o8LdlBVlnGXz8Uax2GT8BCJRKitrTV8rdVqDcFx9uxZ7N+/H3v37oWnpyfefPNN7N69GyNHjmxyf3S3xbUZOxbVsVpIlHUud4zu8rk47d2W2NhYHDx4EICuRyMsLMywTSwWQygUQiAQgMPhwM/PDxKJxBZlECdWU69CnVIDjZZWhHBVNjnziI+Px+HDh5GcnAyGYZCdnY0dO3agrq4OSUlJSEpKQmpqKng8HoKDg5GQkNDq95SrNMgrr0N4OI2huAJJvQoAIJOr4ePJc3A1xBI2CQ82m4158+Y1+l5ISIjhv1NSUpCSkmLV9zx66Tb+uecGtF5lSB/a3ar7JtZXczc8pAoVhYeLcpsmsUd7BWBQZw8s2FmMwqs1ji6HmKAPD5lC7eBKiKXcJjzYbBYyHw2Ev4iPqZvzDL+cxPmoNFrUKTUAAKmcwsNVuU14AICPkIMVqTG4Xi3H7G2nQMuzOqeGwS6j8HBZbhUeADCgmx/eero3/nv6Bv595LKjyyFGNAwPKV22uCy3Cw8AeDmuJ4ZHBCL752LkX6l2dDnkPo3CQ06Xl67KLcODxWLh4/H9ESgWYtqmE6ipo19QZyKhyxa34JbhAQC+nnysSI1BhVSON7YV0PiHE2k05kGXLS7LbcMDAGKC2+HtkRH49cxNrP3tkqPLIXdJGl22UHi4KrcODwB46ZHuGBEZhEW7z+LEX3ccXQ7BvTOPABGfwsOFuX14sFgsLH6+Pzr6CjF90wncqVU6uqQ2r6ZeBSGPDX8vAWQKGo9yVW4fHgDg48HDytRY3JIpkfltAbQ0GcuhaupV8PHgQSTk0piHC2sT4QEA/br44t3REdh3tgJrDl10dDltmiE8BFy6bHFhbSY8ACB9aDeM7tsRH/1yDscu33Z0OW2WPjzEQi7dqnVhbSo8WCwWFo7riy7tPPD65pOokikcXVKbJKlXG8KDOkxdV5sKDwDwFurGP27XKTFrK41/OEJNvQrehssWGjB1VW0uPAAgqrMPssb0wcGSSnx+oNTR5bQ5knoVvIU8iIU8yFVaqDRaR5dELNAmwwMAJg4Jxtj+nfDJ/87hj4tVji6nzdBoGUgVasOAKQDU0qWLS2qz4cFisbAwsS+6+3thxjcnUSml8Q970HeX6m/VAtRl6qrabHgAgEjAxcqJsaipV2FWTj4txmsHNQ3Cw5vCw6W16fAAgIiO3nj/mUj8duEWVuy74Ohy3F7D8BAJdGuXUqOYa2rz4QEASYO6IiGmMz7bW4IjF245uhy3ZggPz4aXLXTHxRVReEA3/rHguSj0DPDCjC35qJDKHV2S22p45iG+Gx505uGaKDzu8hJwsWriAMgUKvz9Gxr/sBWJvEF4CGjMw5VReDTQu4MY85+Nwu8Xq7B073lHl+OWauhui9ug8LjP+IFdMS62C5bvO49D5ysdXY7bqalXgc9hQ8Blw4PHAYfNomn5LorCw4j5z0WiV3sRZm7Jx00JjX9Yk+RuazqLxQKLxYJIQJPjXBWFhxGefC5WTYxFnVKD1785CTW1T1uNbkbtvaecigQ0Oc5VUXg0ITRIjA8SonD00m18uqfE0eW4Df10fD2xkNb0cFUUHs1IjO2CpIFdsTK3FPvPVTi6HLdgLDzossU1UXiY8P6zkQjvIMasnHxcr6l3dDku7/7wEAloKUJXReFhgpDHwcqJsVCqtXh980maPt5K+oWA9ERCHnWYuiibhIdWq0VWVhaSkpKQlpaGsrKyRttPnTqF1NRUpKSkYMaMGVAonHtGa0h7EbIT++J42R18/L9zji7HZWm1DCRyI5ctdObhkmwSHnv27IFSqUROTg4yMzOxaNEiwzaGYTB37lwsXLgQ33zzDeLi4lBeXm6LMqzq2ejOSB0SjNUHLmJv8U1Hl+OSpAo1GAbwbhgetAiyy7JJeOTl5SEuLg4AEB0djaKiIsO2S5cuwdfXFxs2bMCkSZNQXV2Nnj172qIMq8sa0wd9Onoj89sClFfT+EdL6dfy8L5vzEOh1kKppstBV8M1/ZKWk8lkEIlEhq85HA7UajW4XC7u3LmDkydPYu7cuejWrRtee+01REVFYejQoU3uT6FQoLi42OT7yuVys17XGpkP+WD6Tikmrz2MxSM6gcdh2eR97HEs9qI/lgtVustTadVNFBfLAAC1NTUAgBOFZ+Aj5DisRnO5y+dijeOwSXiIRCLU1tYavtZqteBydW/l6+uLbt26oVevXgCAuLg4FBUVNRseAoEAERERJt+3uLjYrNe1RgSAjz3aY9rmE9h+GfjnGNu8nz2OxV70x3L7wi0A5YgM7YGInv4AgNN1V4GjVegU3BPB/p6OLdQM7vK5mHsczQWMTS5bYmNjcfDgQQBAfn4+wsLCDNu6du2K2tpawyDq8ePHERoaaosybGZ0v45IH9oNX/12Cf87fcPR5biMhpPi9PTrmErojovLscmZR3x8PA4fPozk5GQwDIPs7Gzs2LEDdXV1SEpKwgcffIDMzEwwDIOYmBg88cQTtijDpt4dHYGTf1XjjW8LsKujN7r6Of+/mo5mLDy8aU0Pl2WT8GCz2Zg3b16j74WEhBj+e+jQodi2bZst3tpuBFwOVqbGYvTyQ5i++QS+fe1h8LnUNtMco2ce+vCgOy4uh37bWyHY3xMfPd8fBVdrkP2z6w+i2ZqkXgUumwVP/r2BUf1li5Sm5bscCo9WejqqA158pDv+feQydhded3Q5Tk3fms5i3btDJRbeXQSZzjxcDoWHFcwZGYH+XX3x1rZTKKuqNf0DbZT+MZMN6dcxpWn5rofCwwr4XDZWpMSAxQKmbT4BuUrj6JKckrHwEHDZ4LJZ1GXqgig8rKSrnyc+mRCNonIJPthF4x/GSO6bUQvoVq5v7bT86zX1OHrpdmvLIy1E4WFF8X2C8HJcD2z8oww7Cq45uhync/90fD1RKyfHffxLCZLW/I6faczJrig8rOytp8MRG+yLOd8X4tItGv9o6P4lCPVEAl6rLlsKy6vBMMDMLfn00HI7ovCwMh6HjRWpseByWJi2icY/9BiGgUSuNnrmoVuK0LJbtXVKNS5UyJAxtBuC/T3x8objKL4uaW25xAwtCg+tlmY+mqOTrweWTOiPM9clmLfzjKPLcQq1Sg00WsZ4eLRiNbHi61JoGeDR0PbY8NJgeAm4eGH9UVy9U9fakokJJsNj9+7d2LVrF3744Qc88sgjWLt2rT3qcnlPhgfh1cd7YvOff+GnfOdfr8TWjHWX6rVmzOP0Nd2s3KjO3ujs64ENLw1GnVKD9HVHcadWaXnBxCST4bFu3To8/PDD2L59Ow4cOIDc3Fx71OUW3vhbbwzs1g7vfF+I0kqZo8txqJq6psOjNSuoF16tQYCIjw7eQgC6p/59lT4QV+/U46UNx1CnpFvAtmIyPAQCAQDAy8sLfD6/0VR70jweh43lqTEQ8DiYtukE6pVtd/xDf+bhLTRy5iHgWXyrtuiaBJGdfBp1rQ7p6Y9lyTEouFKN6ZvpuTu2YjI8unTpgnHjxmHcuHFYsWIF+vXrZ4+63EZHH934x9kbUry3/bSjy3GYGiOriOmJhVwoNVoo1C0LV7lKg/M3pYjq7P3AtqejOmDes1HYd7YC7/xQCIahB5dbm8lZtYsWLUJtbS28vLwQFRWF9u3b26Mut/JE70BMGxaClbmlGNLTD4mxXRxdkt1JmhnzEDd44LVAZP5qYuduSKHWMujb2cfo9kkPdUOFVIFle88jUCzEGyN6W1A5aYpZd1u8vLwAgIKjFWYND8OQHn5494cinL8pdXQ5dmcYMPU0dtli2bT8oruDpZGdjIcHAMwaHoqUwV2xIvcCNhy53KL9k+ZRn4edcDlsLEuJgSefg6mbTrS5gbyaehXYLEDEN9YkZtmCQEXlNfDx4KFLO48mX8NisTD/2SgMjwjCeztOUxeqFTUZHitWrGi06jlpvSBvIZYmx+BCpQxzf2xb4x/6SXFs9oMLRusXBGrpUoRF5RL07dx4sNQYLoeN5SkxiA1uh5lb8vF7KXWhWkOT4TFs2DDs378fU6ZMQVZWFvbs2YP6enrcQGs9GhqA158MxXcnrmLr8SuOLsdu7n/YU0PeFqzpoVRrce6GFJFGBkuN8eBzsDZjIIL9PfHK19SFag1NDphGRkYiMjISAHDr1i3s378f7777LjQaDZYuXWq3At3R358KxfHLt5H1UxH6d/FF7w5iR5dkc01NigMsu2wpuSmFUqNFVDPjHffz9eTj65cGI3HVEWSsO4rvpjxMa8+2glljHgEBAXj++eexZMkSfPzxx7auye1x2Cx8lhwNkYCHqZvyUNsGFsJpNjwa3G0xl76ztKk7LU3p5OuBrycPhlylQcb6o7hNXagWa/GAKY9n/BeAtEygWIhlKdG4dKsW77aBPoSaepXRBjHg3q3alpx5FJVLIBZwEWzBmUNYkBhfZQzSdaH+m7pQLUV3Wxzo4ZAAzBwehh/zr2HLMfce/5AYWUVMT8DlgM9ht+jMo7C8Bn06eRsdgDXH4B5+WJ4Sg1NXqzFt0wmoqAu1xZoMD41GA6VSienTp0OlUkGpVEKhUCA9Pd2e9bm9acN6IS40AP/afhpnrrnnIB7DMM1etgD6yXHm3W1Ra7Qovi5p8SXL/UZEdsD856KQe64S73zv/md/1tbkgOl3332HL774Ardu3cLTTz8NhmHA4XAwYMAAe9bn9jhsFj5NisboZYd0j7Cc/ohhRXF3oVAzUGmMT8fXa8nkuAuVMijUWkS1MjwAYOKQbqiQKLB073kEegvw5ojwVu+zrWgyPCZMmIAJEyZg06ZNmDhxoj1ranMCRAIsS45Bypd/YM73hVieEuPokqxKqtRdEjR75iEwfx3TonLdGZo1wgMAZg4PRYVUgZW5pWgvEuCFR3pYZb/uzuSYx86dO+1RR5s3pKc/Mv/WGztPXcd//vzL0eVYVa2Z4WHu4xeKymvgyeegR4CXVerTdaFGIr5PEN7feQa7TlEXqjlMTozz9PREdnY2evToATZblzVJSUk2L6wtmvJ4CI5dvo35O87gk5Ed4frPYteR3V2KoPnLFh7Kq81rQiwqr0Gfjt7gWDhYaoy+C3XSV39iVk4+2nnx8HBIgNX2745MnnnExMTA29sbVVVVqKysRGVlpT3qapPYbBaWTIiGv4iP7AM33ebJ8TKF6TMPsZkDphotgzPXJVa7ZGlIyOPgq4yB6ObviVe/znPbAWxrMXnmMX36dHvUQe7y8+JjeUoMJqz+HW9/dworU2NNzt1wdtYc87h0qxZ1So1NwgPQdaFueGkwxn1+BBnrj+J76kJtEvV5OKGB3f3wYqwffi68ga9/L3N0Oa0muxse3kYeu6Cnv9ti6nZpUfm9NUttpdPdtVAVKg0y1h1FlUxhs/dyZRQeTiox0gdPhQdiwa4zOHW12tHltIp+zKO5W9AiIRdqLQOFuvlmraLyGgi4bPRqL7JqjfcLCxJj7QuDUF5dj5c2HKcuVCNMhodMJsPPP/+MH3/80fDHFK1Wi6ysLCQlJSEtLQ1lZcb/9Zw7dy7NlWkCm8XCJxP6I1AsxLTNJwyL6bgimUILsZDb7ACnWGDe/JaiazWI6OgNLsf2/+4N6u6HZSkxKLxajanUhfoAk5/A1KlTsW/fPpSWlqK0tBQXL140udM9e/ZAqVQiJycHmZmZWLRo0QOv2bJlC0pKSiyruo3w9eRjeWoMrlfL8da2ApftgJQptc2OdwD3zkqae/iTVsvgdLnEppcs9xsR2QELnuuL/ecq8fZ31IXakMkBU4ZhWnx2kJeXh7i4OABAdHT0A4sKnTx5EgUFBUhKSjIrjNqy2OB2eHtkOBbsKsa6w5cx+VHXa2AyJzzMmZb/1+06SBXqVrelt1TqkGBUSOX4bM95cJS+WNzHrm/vtEyGR+/evVFQUICIiHtdB3w+v9mfkclkEInuXZNyOByo1WpwuVxUVFRgxYoVWLFiBXbv3m1WkQqFAsXFpp88L5fLzXqdK2h4LA/7Mxja1RPZu87AT1uN8PZCB1fXMhK5GgKuttnPpuqmrsfjdMlF8KTGlxU8cEn37BtPxW0UF9v3ESB/68TgXJgYW4uq4ff9H3g2wr4BZm3W+LtiMjyOHj2Kffv2Gb5msVjYu3dvsz8jEokaPd9Fq9WCy9W91X//+1/cuXMHr7zyCiorKyGXy9GzZ08kJiY2uT+BQNAovJpSXFxs1utcwf3H8kX3UIxefggfH7mDXTMeha9n8wHuTOp+vIIeHXyb/Wy0PjXAL9fRLrATIiI6GH3Nj5eLwefcwoiH+oHPtf9Y/4pwBpM+34/Vx6oQ1SsYY/p1snsN1mLu35XmAsZkeGzfvr1lVQGIjY1Fbm4uRo0ahfz8fISFhRm2paenG2bmfv/997h48WKzwUF0fDx5WJkai+e/OII3vi3Al+kDXab/w6wxD8HdpQibuWw5XS5B7w5ihwQHoJvEOPuxQHxwuAb/yCmAnycfD/dqu12oJj+FvXv3YvLkyUhPT0daWhrGjh1rcqfx8fHg8/lITk7GwoULMWfOHOzYsQM5OTlWKbqt6t/VF++MisCe4gp8ech1xorMGvPQLwjUxIApwzAoLK+x62CpMQIuG1+lD0L3AE+8sjHPsKJZW2TyzGPlypWYO3cutmzZgiFDhuDw4cMmd8pmszFv3rxG3wsJCXngdXTG0XIvPNwdRy/dxof/PYcB3dphQDc/R5fULLlKA6WGaXIhID1TA6ZX79Sjpl5ls87SlvDx5GHD3bVQX1h/rM12oZo882jXrh1iYnRTxBMTE3Hz5k2bF0WaxmKx8OHz/dDZ1wPTN590+jU4Jc08ZrIhPpcNAbfp1cT0/8K3ZMFjW+ro44GvXxoMpVqL9DbahWoyPHg8Ho4dOwa1Wo1Dhw7RxDgn4C3kYdXEWFTJlPjH1nxotY7pPVBrtLhRI0fBlWocvXTbaA9ETTOPmbyfWNj0tPzC8hpw2SynWmk+NEiMtRkDca1atxZqW1jIuiGTly3vv/8+Ll68iClTpmDp0qWYMWOGPeoiJkR19sHcMRGY+9NpfHGwFFOf6GXz9zxQUokNRy7jpkSOmxIFqmoVaJgXS5Oj8Wx050Y/05LwaG5yXFG5BKFBYgh55j/L1h4GdvfDitRYvLrxOKZtPoEv0weCZ4fuV2dg8iiDgoIAACdOnMC0adMwfPhwmxdFzDPpoW4Y068jPvlfCY5eum3T97pWXY9pm06g+LoEQd5CxPcJxIwnQ/FBQhS+Sh+IqM7eWLT7LOqVjZ90r19WwLwzD57RDlOGYVBUXoOoTo4dLG1KfJ8gfJCg60Kd/d2pNtOFavLMY8mSJbhx4wZKS0vB4/GwZs0aLFmyxB61ERNYLBYWJvbF6WsSvP7NCeyaEYcAkcDq78MwDOZ8XwiNlkHOK0MR7P/g4KC3Bw8TVv+O1QdLMXP4vVvzLT7zMHLqf0MiR1Wt0ikGS5uSMjgYFRIFPt1TgkCxEG+PdP+1UE2eeeTl5WHx4sXw9PREQkICrl69ao+6iJnEQh5WpMbgTp0Ks3JsM/6xLe8qDpRU4u2R4UaDA9A9ymB0v4744kAprjVYEaymrgXh0cQiyNZes9RWZjzVCxOHBOOLA6VY99slR5djcybDQ6PRQKFQgMViQaPRGJYiJM4jspMP3hsbiUPnb2Fl7gWr7vtGjRzzdp7B4B5+SHuoW7OvffvpcGgZYPF/zxq+V1OvCwNvocmT3CZXUC8srwGbBUR0dJ7BUmNYLBbmPRuFEZFBmL/rDHYUXHN0STZlMgkyMjKQmJiI8+fPY/z48UhNTbVHXaSFUgZ3xbPRnfDpnhIcKb1llX0yDIN3fyiESqPF4nH9TD5gqaufJ16J64kf86/hxF93AOguWzx4LLOm0IubuGw5XV6DkPYiePJNB5CjcdgsLE2OwaBufvjH1nwcuWCdz8IZmfxER44cic2bN2P16tVYu3YtnnnmGXvURVqIxWIhO6Evugd44e9b8lEpbX3fwY/55dh7tgJv/K03upu5UvmUJ0IQKBZg3o4z0Gp1D3sS8c07W9U9+OnB1cSKrtXYfSZtawh5HHyZPhA9A0R4ZWOeYfUzd2PyU923bx/effddLF++HG+99RZefvlle9RFLOAl4GLVxFhI5Sr8fctJaFox/lEhleO97WcQG+yLF1vwHBMvARdvPR2O/CvV+Kmg/G54mHd7VSzkQaNlUK+6d8emQqq7LRzpQuEB6LpQ//3SIHgLuXhh/TFcuV3n6JKszmR4fPjhh0hLS0NmZqbhD3Fe4R28Me+ZKBwprcLyfect2gfDMPjnD0WoV2mw+Pn+LX7EQWJMZ/Tr4oMPd5/DTYnc/DMPfYt6g3GP03cHS13pzEOvo48Hvp48GCqNe3ahmvxUQ0NDMWTIEISHhxv+EOc2fmAXJMZ2xtK953HYgmvuHaeu439nbiIzPgy9Alu+ViibzULWmD64IZGjsLzG7PAQ3x1UbdhlWnj3lL+Pk/Z4mNIrUIx1L7hnF6rJT/Wpp55CUlIS5syZY/hDnBuLxcKC56LQq70If99yEhUSudk/e0umwL9+KkL/rr74v7ieFtcwsLsfxvbXrXchErQwPBqceRSV16BngJfhrMQVDeim60ItLK/BFDdaC9Xkp7px40ZkZGRg1KhRhj/E+XnydeMftQoNZmw5CbWZv7D/+uk0ahUafPx8v1Y/ke3tkeEQ8tgI8DTvL75Iv6ZHw8uWa7Z5wJO9xfcJQnZCXxwsqcTsbaccNh/Jmkx+qgEBARQYLio0SIwFz0Uh89sCLN17Hpl/693s638uvI5dhdfx5ojeCA1qfU9FZ18P/Drrcdy6al7D1L1p+brGstu1SpRX1yPj4eb7S1xF8uBgVEgVWPJrCdp7CzBnpGuvemcyPIRCISZPnow+ffoYVq76xz/+YfPCiHWMG9AFf16qworcCxjU3Q+PhbU3+rrbtUrM/bEIUZ298cpjll+u3K+rnydkNy27bDE84MlJpuFbw+tP9kKFVI7VBy4iUCx0yQWt9UyGx7Bhw+xRB7Gh95+JQsGVGszMycfPM+LQwefBBZTf234aErkK/3l+iMNmhT4QHnfX8HC127TNYbFYeP+ZKNySKjF/5xm0FwvwTH/XXAvVZHgkJCTYow5iQx58DlZOjMUzK37DjG9OYvPLQxp1fP5y+ga2F1zDrOFhiOjouLsaXvetJlZUXoNgP0+z5sW4Eg6bhc+So5G+7igyt+bDz5OPR0Ndby1UmqjSRvQKFCE7oS+OXr6NT36997Ct6jol3v2hCBEdvTF12INLRdoTj8OGB4/TIDzs+4Ane2rYhfrqxuMu2YVK4dGGPBfTGSmDu+Lz/aXIPVsBAJi34wyq65T46Pl+TrGIjW5mrQo1dSr8dbvOLe60NMXHQ7cWqo8HDy+sP4a/qlyrC9Xxvy3Erv41NhIRHb0xa2s+/vNHGb4/WY4pT4Q4zV9SsUA3s9bZ1iy1lQ4+Qnw9eTDUWi3S1/2JWy7UhUrh0cYIeRysTI2BSq3FP38sQu8gMaY/afslDM0lvjs5Tj9Y6iyhZku9AsVYmzEINyRyl+pCpfBog3q2F+Gj8f0R5C3AR+P7QcB1nnVB9QsCFZZL0NnXA35ervNkvNYY0K0dVqTE4vQ1CV77Tx6UaufvQqXwaKNG9e2IP+Y8hX5dfB1dSiP6RZBPl9cg0kXns1hqeJ8gZCdE4dD5W5j9nfN3obruhAHSas74uEqxkIcbEjlq6lVIiOls+gfcTNIg3Vqon/xagkCxAHNGOW8XKoUHcSoiAdewaHJbGO8wZvqTvVAhVWD1wYtoLxa0aoKiLVF4EKcibrDWaaSb9niYwmKx8N4zkbglU2DBrmK0FwseeB6OM6AxD+JU9JPjgrwFCBQ/2EbfVnDYLHyaFI3BPfzwxrcFOHTe+Z7USOFBnIpYqGtFd/f+DnPou1BD2ovwmhOuhUrhQZyK6O5liztNhmsNfReqrycfL6w/irKqWkeXZEDhQZyK/vkuzvpoSUcI8hZiw0uDodYySF931Gm6UCk8iFMZGuKPrDF9MCw80NGlOJVegSKse2EQbkrkeHH9MaPPt7E3m4SHVqtFVlYWkpKSkJaWhrKyskbbd+7cifHjxyM5ORlZWVnQap2/m47Yh4DLwUuP9nCKSXrOJja4HVamxuLMdQmmOEEXqk0+oT179kCpVCInJweZmZlYtGiRYZtcLsdnn32Gr7/+Glu2bIFMJkNubq4tyiDE7TwVEYSFiX1x6PwtvLWtwKFdqDbp88jLy0NcXBwAIDo6GkVFRYZtfD4fW7ZsgYeHBwBArVZDILD+k90JcVcTBnZFpVSBj345h0BvId5xUBeqTcJDJpNBJLr3vA8OhwO1Wg0ulws2m42AAN2qSRs3bkRdXR0eeeSRZvenUChQXFxs8n3lcrlZr3MFdCzOyVmOZVgQg7O9vbHm4EUwddVIjPRt0c9b4zhsEh4ikQi1tfduKWm1WnC53EZff/TRR7h06RKWL19uco6FQCBARITpdC0uLjbrda6AjsU5OdOxfBbOQPPNCXx5/AaiegW3qAvV3ONoLmBsMuYRGxuLgwcPAgDy8/MRFhbWaHtWVhYUCgVWrVpluHwhhLQMh83CkgnRGOKgLlSbhEd8fDz4fD6Sk5OxcOFCzJkzBzt27EBOTg5Onz6Nbdu2oaSkBBkZGUhLS8Ovv/5qizIIcXtCHgdfZtzrQi28ar8uVJtctrDZbMybN6/R90JC7i2ue/bsWVu8LSFtkrdQ14WauOoIXlh/FN9NeRjdA7xs/r50M50QNxDkrVsLVcvoulArpbbvQqXwIMRNhLQXYe0Lg1AhlePFfx+1eRcqhQchbiQ2uB1WTYxF8XUpXtto2y5UCg9C3MyT4bou1N8u3MKbNuxCpZXECHFDDbtQ24sE+OeYPlZ/DwoPQtzU1CdCUCGR46vfLiHIW4iXH7PuWqgUHoS4KRaLhayxkbglU+KDn4sRIOYjIaaL1fZP4UGIG+OwWfhkQn9U1Srw5ren4O8lwGNh7a2ybxowJcTNCXkcrEkfiF6BIrz2nzyculptlf1SeBDSBui7UNt58vHi+mO4JlG1ep8UHoS0EQ27ULMP3Gz1/ig8CGlDQtqLsPnlhzC6d+sXmKYBU0LamIiO3kBY68ODzjwIIRah8CCEWITCgxBiEQoPQohFKDwIIRah8CCEWITCgxBiEQoPQohFKDwIIRah8CCEWITCgxBiEQoPQohFKDwIIRah8CCEWITCgxBiEQoPQohFKDwIIRah8CCEWMQm4aHVapGVlYWkpCSkpaWhrKys0fZ9+/Zh3LhxSEpKwtatW21RAiHExmwSHnv27IFSqUROTg4yMzOxaNEiwzaVSoWFCxdi3bp12LhxI3JyclBZWWmLMgghNmST8MjLy0NcXBwAIDo6GkVFRYZtpaWlCA4Oho+PD/h8PgYMGIDjx4/bogxCiA3ZZPV0mUwGkUhk+JrD4UCtVoPL5UImk0EsFhu2eXl5QSaTNbs/hUKB4uJis97b3Ne5AjoW5+Qux2LOcSgUiia32SQ8RCIRamtrDV9rtVpwuVyj22praxuFiTHR0dG2KJMQ0go2uWyJjY3FwYMHAQD5+fkICwszbAsJCUFZWRmqq6uhVCpx/PhxxMTE2KIMQogNsRiGYay9U61Wi/feew8lJSVgGAbZ2dk4c+YM6urqkJSUhH379mHlypVgGAbjxo3DxIkTrV0CIcTGbBIehBD3R01ihBCLUHgQQiziFuHx66+/IjMz0+i2rVu3IjExERMmTEBubq6dKzOfXC7H66+/jtTUVLz88su4ffv2A69ZsGABEhMTkZaWhrS0NEilUgdUapw7dRWbOpb169dj9OjRhs/h4sWLDqrUPAUFBUhLS3vg+63+TBgXN3/+fGbEiBHMzJkzH9hWUVHBjBkzhlEoFIxEIjH8tzNat24ds2zZMoZhGGbnzp3M/PnzH3hNcnIyU1VVZe/SzPLLL78ws2fPZhiGYU6ePMm89tprhm1KpZIZPnw4U11dzSgUCiYxMZGpqKhwVKkmNXcsDMMwmZmZTGFhoSNKa7E1a9YwY8aMYcaPH9/o+9b4TFz+zCM2Nhbvvfee0W2nTp1CTEwM+Hw+xGIxgoODcfbsWfsWaKaGXbmPPfYYfv/990bbtVotysrKkJWVheTkZGzbts0RZTbJnbqKmzsWADh9+jTWrFmDlJQUrF692hElmi04OBjLly9/4PvW+Exs0iRmC99++y02bNjQ6HvZ2dkYNWoU/vzzT6M/Y0k3qz0YOxZ/f39DrV5eXg9cktTV1WHSpEl48cUXodFokJ6ejqioKISHh9ut7uZYu6vYkZo7FgAYPXo0UlNTIRKJMH36dOTm5mLYsGGOKrdZI0aMwNWrVx/4vjU+E5cJj/Hjx2P8+PEt+hlLulntwdixTJ8+3VBrbW0tvL29G2338PBAeno6PDw8AAAPPfQQzp496zThYe2uYkdq7lgYhkFGRoah/scffxxnzpxx2vBoijU+E5e/bGlOv379kJeXB4VCAalUitLS0kbdrs4kNjYWBw4cAAAcPHgQAwYMaLT98uXLSE1NhUajgUqlwokTJxAZGemIUo1yp67i5o5FJpNhzJgxqK2tBcMw+PPPPxEVFeWoUi1mjc/EZc48WmL9+vUIDg7GU089hbS0NKSmpoJhGMyaNQsCgcDR5RmVkpKC2bNnIyUlBTweD5988gmAxscyduxYTJgwATweD88++yxCQ0MdXPU98fHxOHz4MJKTkw1dxTt27DB0Fb/99tuYPHmyoas4KCjI0SU3ydSxzJo1C+np6eDz+Rg6dCgef/xxR5dsNmt+JtRhSgixiFtfthBCbIfCgxBiEQoPQohFKDwIIRah8CCEWITCg9hMTk4OVCqVw/dBbIPCg9jM6tWrodVqHb4PYhtu2SRGrEMul2POnDm4du0aVCoV3nnnHeTk5ODKlSvQaDR48cUXMWrUKKSlpSE8PBznz5+HTCbD0qVLceTIEVRWVmLWrFnIyMjAxx9/DB6PhwkTJkAoFGLTpk2G91m6dCkAYObMmWAYBiqVCu+//z5OnTpl2MeqVasc9b+BNKX1k36Ju1q/fj3z0UcfMQzDMOfOnWNWrlzJfPDBBwzDMIxUKmXi4+OZqqoqZtKkScz27dsZhmGYJUuWMKtXr2YYhmGGDRvGyOVy5o8//mDGjh1r2O/nn3/O1NXVMQzDMHPnzmV++uknJjc3l5k6dSpTX1/PFBYWMsePH2+0D+J86LKFNOnixYuGx16EhYWhsrISgwYNAqCbWBUSEoIrV64AAPr06QMA6NChg9FnffTo0cPw3/7+/pg9ezbmzJmDc+fOQa1W47HHHsOgQYMwdepULFu2DGw2/Wo6O/qESJNCQkJQWFgIALhy5Qp27dplWPNBJpOhpKQEXbp0afLnWSyWYbxCHwZSqRTLli3Dp59+igULFkAgEBgmmAUGBmLdunWYMmUKlixZ8sA+iHOh8CBNSk5OxtWrVzFp0iS89dZb+Oqrr1BdXY2UlBSkp6dj+vTp8Pf3b/LnBw4ciFdeeQVMg+lTIpEIsbGxSEhIwMSJEyEUClFRUYHw8HBs3boVSUlJWLx4MV599dUm90GcA02MI4RYhM48CCEWofAghFiEwoMQYhEKD0KIRSg8CCEWofAghFiEwoMQYhEKD0KIRf4fnVQ6RBsGGD4AAAAASUVORK5CYII=\n" + }, + "metadata": {}, + "output_type": "display_data" + } + ], "source": [ "trial_data = one.load_object(eid, 'trials', collection='alf')\n", "\n", @@ -344,10 +424,20 @@ }, { "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ + "execution_count": 14, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001B[36m2023-07-10 12:11:44.796 INFO [training_wheel.py:343] minimum quiescent period assumed to be 200ms\u001B[0m\n", + "\u001B[1;33m2023-07-10 12:11:44.831 WARNING [training_wheel.py:367] no reliable goCue/Feedback times (both needed) for 114 trials\u001B[0m\n" + ] + } + ], + "source": [ + "wheel_moves = one.load_object(eid, 'wheelMoves', collection='alf')\n", "firstMove_times, is_final_movement, ids = extract_first_movement_times(wheel_moves, trial_data)" ] }, @@ -361,9 +451,18 @@ }, { "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], + "execution_count": 15, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": "
", + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYoAAAESCAYAAADjS5I+AAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjQuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8rg+JYAAAACXBIWXMAAAsTAAALEwEAmpwYAABUsUlEQVR4nO2dd1hUx9v+b0BAKaLYNfZevvmaWGINKlYUEYmIvUbFXqNBVCIW7A1BjRqNGCvYu2hQf9Y3qFEEFXtFBSOg0naf3x++u29Yhd09O3P2oPO5Lq9LYOc+9/PMGR7OzDlzLIiIIBAIBAJBDlia24BAIBAIlI0oFAKBQCDIFVEoBAKBQJArolAIBAKBIFdEoRAIBAJBrohCIRAIBIJcEYVCkOd5/Pgxqlevjt69e3/0sylTpqB69epISkoygzPpPHr0CKNGjcrx55cvX8b48eMBAMHBwdixYwcAYNOmTWjatCk8PDzg4eGBnj17atscOXIEXbt2RadOnTBkyBC8fv0aABAXFwcfHx906tQJPj4+OHfuHMfIBHkRUSgEnwW2tra4d+8enjx5ov3eu3fvEB0dbUZX0nn69Cnu3buX488vXbqEhg0bAgAuXLiA7777DsCHAjJlyhTs2bMHe/bswR9//AEAuHbtGgIDA7F8+XLs378fFSpUwJIlSwAAw4cPR7du3bB//36sWLECAQEBePnyJecIBXkJC/HAnSCv8/jxY7i7u8PLywtFixbFsGHDAAC7d+/GzZs3sX79epw7dw7Ozs7Ytm0bNm3aBEtLSxQtWhTTpk1D0aJF4eLigiNHjqBYsWIAgG7dumHkyJFo3LgxFi5ciEuXLkGlUqFWrVrw9/eHg4MDWrVqhU6dOuH8+fN48+YNBg8ejOjoaMTExCBfvnwIDQ1FiRIlkJCQgJkzZ+LZs2fIzMxEx44dMWzYMDx+/Bj9+/eHi4sLrl69iuTkZEyaNAmtWrVC+/btkZCQgAYNGmDdunXaWCMjI7F8+XLcv38fZcuWhYWFBR4+fIgKFSogIiICrVu3RtWqVfHs2TMUL14cP/30E6pXr47Zs2fDzs4O48aNAwCkpKTgn3/+gb29PZo2bYrr16/DysoKADB69Gi0aNECXbt2lbknBYqFBII8zqNHj6hu3bp07do1at++vfb7/fr1o5s3b1K1atUoMTGRzp49S61bt6bExEQiIgoPD6cOHTqQWq2mn376idauXUtERPHx8dSiRQtSqVS0YsUKCgoKIrVaTUREixYtohkzZhARUcuWLWnOnDlERHTgwAGqUaMGxcbGEhHR8OHDKTQ0lIiI+vTpQ5GRkURElJaWRn369KEDBw7Qo0ePqFq1anTixAkiIjp8+DC1aNGCiIjOnz9PHTt2zDHmli1bUlZWFsXGxtKwYcOIiOjt27c0cOBAunjxotZT8+bNKTU1lQYPHkwLFiygYcOGkbu7O02YMEGbh9atW9OOHTuIiOjhw4fUpEkTWrVqleT+EHx+iKknwWdDnTp1YGVlhevXr+PZs2d4+/YtqlWrpv356dOn4ebmBmdnZwBA165dkZCQgMePH6Nbt27YvXs3ACA8PBxeXl6wtLTEn3/+iRMnTqBLly7w8PDA8ePHcefOHa1m27ZtAQBly5ZF0aJFUaNGDQBAuXLl8ObNG7x79w6XLl3CsmXL4OHhAW9vbzx79gxxcXEAAGtra7i4uAAAatWqhX/++UdvnC9fvkTRokVhZWWF2NhY1KpVCwBgZ2eHdevWoUGDBgAANzc3ODk54dq1a8jKysLJkycxc+ZM7N69G8WKFYO/vz8AIDQ0FEeOHIG7uzuWLVsGFxcXWFtbS+0GwWdIPnMbEAhY0rlzZ+zduxfOzs7w8PDI9jO1Wv3R54kIWVlZqF+/PrKysvD3339j//792LZtm7aNn5+f9pf527dvkZ6erm1vY2Oj/f+nfrmq1WoQEbZu3YoCBQoAAJKSkmBra4vXr1/D2toalpYf/l6zsLDQG19kZCTmzJmD1NRUeHh4ICEhAfb29jh+/DhCQkJw4sQJ9OnTJ1t8+fLlQ/HixVG9enXt1FrXrl3Rr18/rcfQ0FDky/fh18HAgQPRqlUrvV4EXw7iikLwWeHh4YHDhw/j4MGD6NSpU7afNW/eHAcPHtTeARUeHo5ChQqhfPnyAD6sSwQGBqJ69eooVaoUAKBZs2bYvHkzMjIyoFarMW3aNCxevNhgPw4ODqhbty5+++03AEBycjJ69OiByMjIXNtZWVkhMzPzo++7urqiQ4cO8Pf3x549e1CyZEns3LkTe/bsQYECBbB06VL8/fffAICoqCi8f/8eX3/9Ndq1a4eTJ09q73Q6evQo/vOf/wAApk+fjuPHjwMAoqOjcfv2bTRp0sTgGAWfP+KKQvBZUaJECVSuXBmOjo4oVKhQtp81bdoU/fv3R79+/aBWq+Hs7IzVq1dr/6Lv0qULFi9enK0QDB8+HPPmzYOnpydUKhVq1qyJKVOmGOVp4cKFCAwMhLu7OzIyMtCpUyd07twZjx8/zrFNlSpVYGtrix9++AE7duzIdrXxP//zP+jTpw9SUlKgVqtRuHBhAICzszOWLl2K6dOnIzMzEw4ODli5ciVsbGzQqlUrPH/+HH369IFarUbp0qUxe/ZsAMDMmTPh7++PlStXws7ODqGhobCzszMqRsHnjbjrSSAQCAS5IqaeBAKBQJArolAIBAKBIFdEoRAIBAJBrohCIRAIBIJc+ezuerpy5QpsbW2ZaqanpzPX5IHwyRbhky3CJ1tY+0xPT0fdunU/+bPPrlDY2tqiZs2aTDVjY2OZa/LAHD4fPXoE4MOTyYaiz6cUTR7w8skqPo0OAKP7XdeDHDk/ceIEqlatavZ+1ZBTzFLGEc/8sfSZG7GxsTn+7LMrFAJ50TwF/OeffypakwdSfbKKT6MTGhoqua3Ggxw5nzJlCuzs7BTTryxj5pk/JYwHUSgEJqHZL0jpmjyQ6pNVfKbo6LaVI+dDhw7VPgWvBFjGzDN/ShgPolAITKJ169Z5QpMHUn2yik+jk9uUgaEe5Mh5kyZNFDWFyzJmnvlTwngQhUJgEnfv3gUAVKpUSdGaPJDqk1V8Gh1T2mo8yJHzR48ewdbWVjH9yjJmnvlTwngQhUJgEgMHDgTAdv6UhyYPpPpkFZ9GR8oaha4HOXLu7++vqDUKljHzzJ8SxoMoFAKT+OWXX/KEJg+k+mQVnyk6um3lyPnIkSMVtUbBMmae+VPCeBCFQmASmvc0KF2TB1J9sopPoyNljULXgxw5b9CggaLWKFjGzDN/ShgPolAITOLmzZsAgOrVqytakwdSfbKKT6NjSluNBzlyfu/ePVhaWiqmX1nGzCN/ycnJOH/+PE6ePAlHR0cMGDBA+54UuRGFQmASQ4cOBcB2/pSHJg+k+mQVn0ZHyhqFrgc5ch4QEKCoNQqWMbPUevXqFQIDA7F+/XqkpqZqvz916lTUr18fffv21b6dUC5EoRCYxJw5c/KEJg+k+mQVnyk6um3lyPnYsWNRoUIF7scxFJYxs9LatWsXfvzxR7x58wY9evRA3759kZ6ejsTERDx58gTh4eEYPXo0/P39MWjQIMyePVv7il2u0GfGjRs38oQmD4RPtgifbFGSz4SEBNq2bRudPHmS7t+/T1lZWdqfmcNnVlYWTZ48mQBQ/fr16dq1azl+9sKFC+Tu7k4AqGzZsrRx40ZSqVQme8gtbrF7rMAkrl+/juvXrytekwdSfbKKzxQd3bZy5Pz27dtm71ciwsqVK1GpUiV0794dLVu2RIUKFVCgQAF4enoiJiZGkq4p+cvIyECvXr0wb948DB06FGfOnEGdOnVy1G7YsCH27t2LDRs2oESJEujXrx/q16+v9z3sJmFyGVIY4opCXlxcXMjFxcWoNvp8StHkAS+frOLT6Ejpd10PcuS8QYMGZu1XlUpFo0aNIgDUoUMH+uabb+jrr7+m1atX09ixY8nJyYlsbGxo8uTJRv+FLjV/qamp1LZtWwJA8+fPN0r7xo0bpFKpaPPmzVS+fHkCQFOnTjXaw7/1ckIUCjNp8sAcPi9evEgXL140qo0+n1I0ecDLJ6v4NDpS+l3Xgxw537Ztm1n7NSAggADQhAkTSKVSfRTzixcvqHPnzgSA2rdvTy9evDBYW0r+EhMTqVGjRmRpaUnr1q0zWvvf/f7+/XtasWIF7dmzxygPOenpIgqFmTR5IHyyRfhkizl9RkVFEQDq27cvqdXqHD+nVqtp+vTpZGtrS6VLl6aoqCgufp4+fUp16tQhGxsbCg8Pl6TBOp9ijULAjStXruDKlSuK1+SBVJ+s4jNFR7etHDmPjY01S79mZWVhxIgRKF++PEJDQ2FhYQHg0zFbWFjAx8cH58+fh729PVq2bAl/f3+kpaXlegxj8nfr1i00a9YM9+7dw8GDB9G1a1dm2txgWpIUgLiikBexRuFitK5Yo5CXDRs2EICP/nLPbe6fiCg5OZn69etHAKhq1ap0/PjxHI9haP7Cw8OpYMGCVKRIEbpw4YJB/vX5ZIWYelKgJg/M4fPy5ct0+fJlo9ro8ylFkwe8fLKKT6Mjpd91PciR8/DwcNn7NSsri6pXr07//e9/P5pyyilm3XwePXqUKleuTACoTZs2dOTIkY8Wu/XlLzY2VntLa4MGDejBgwcGx2CoT1PJTU88cCcwiZzesas0TR5I9ckqPo2OlL2edD3IkfOaNWvKvtdTREQEbt68iW3btmmnnDQYGnObNm1w7do1hIaGIigoCO3atUO5cuXQs2dPNG3aFHXr1kXt2rWhVqvx6NEjvHjxAi9evEBCQgLi4uJw+vRpnD17Fo6OjggKCsLYsWONete1EsaDKBQCk7h06RKADxu+KVmTB1J9sopPo+Pg4CC5rcaDHDm/du0aUlNTZe3XxYsXo2rVqvDy8vroZ8bEXKBAAYwfPx7Dhw/H3r17sW7dOsyfPx9qtTrXdtbW1vj2228xa9Ys/PjjjyhevLjRMShhPIhCITCJSZMmAWC7RxAPTR5I9ckqPo2OlL2edD3IkfOFCxfKutfT9evXcf78eSxatAhWVlYf/VxKzPnz54e3tze8vb3x9u1bREdHIyYmBkFBQbCwsICfnx+KFy+u/VemTBnkz5/fpDiUMB5EoRCYRHBwcJ7Q5IFUn6ziM0VHt60cOff395f1LW3r1q2DtbU1+vbt+8mfmxqzvb09mjdvjubNm6NZs2YAkO2JalYoYTyIQiEwCR4Dg4cmD6T6ZBWfRkfKGoWuBzlyXrVqVdnWKNLS0vD777/D09MTRYsW/eRnWMbMM39KGA+iUAhM4uzZswCAJk2aKFqTB1J9sopPo1O4cGHJbTUe5Mj55cuX8fr1a1n6dffu3UhKSsLgwYNz/AzLmHnmTwnjQRQKgUn4+fkBYDt/ykOTB1J9sopPoyNljULXgxw5X7p0qWxrFFu2bMFXX30FV1fXHD/DMmae+VPCeGBeKDIzM+Hn54cnT54gIyMDvr6+qFu3Lvz9/ZGcnAyVSoX58+ejXLlyiIqKwsqVKwEAtWrVwowZM7LdwhYbG4vAwEBYWVnBxsYG8+bNy/EyUmAeVq9enSc0eSDVJ6v4NDr67rwxxIMcOQ8ICEDlypW5HyclJQVHjhzBsGHDYGmZ8+YTLGPmmT9FjAemT2wQ0c6dO2nWrFlERJSUlEQuLi40efJkOnDgABERnTt3jk6ePEkpKSnUsWNHSkxMJCKiNWvWaP+voVevXtqHQLZs2UJz5szRe3zxwJ3yET7ZInxmZ8uWLQSATp06Jan9l5pPWR+4a9++Pdq1a6f92srKCtHR0ahevTr69++PMmXKYOrUqfjrr79QrVo1zJs3D48ePUK3bt3g7OycTWvx4sXa+45VKpVRD6kI5CEqKgoA2xfA89DkgVSfrOLT6Ei5N1/Xgxw5v3TpEl68eMG9X8PDw1GiRAm9c/osY+aZPyWMBwsiIh7Cqamp8PX1hbe3N6ZMmYKZM2fCy8sLwcHBUKlUqFixIubNm4fdu3fDzs4OvXr1wpIlS1CxYsWPtKKjozF16lRs3rz5o2Kiy5UrV5gXlLS0NJPvhZYDc/jUvLt348aNBrfR51OKJg94+WQVn0Zn9erVRve7rgc5ct6nTx9YWlpyPcb79+/RrFkzdO7cGTNmzMj1sznFLGUc8cwfS5/6yPGuNKbXLv/L06dPydPTk3bs2EFERE2aNKGkpCQiIoqJiaHBgwdTVFQUDR06VNsmMDBQOz31bw4cOECdOnWihw8fGnRsMfUkL3fu3KE7d+4Y1UafTymaPODlk1V8Gh0p/a7rQY6cHzlyhPsxIiIiCAAdO3ZM72dziplFPlnC0mduyDr19OrVKwwcOBDTp09H48aNAQD16tVDVFQUunTpgkuXLqFKlSqoU6cObt26haSkJBQsWBBXr16Ft7d3Nq09e/Zg27Zt2LRpEwoVKsTaqoABPB6gkvOhLFOQ6pNVfBodKc9R6HqQI+dly5blfpzw8HA4OzsbNE3D0gvPuJQwHpgXilWrViE5ORkhISEICQkBAAQFBcHf3x9bt26Fg4MDFi1aBCcnJ0yYMEF7n3P79u1RrVo1xMfHIywsDNOmTcPs2bNRqlQpjBo1CsCHvU5Gjx7N2rLABI4fPw4AaN26taI1eSDVJ6v4NDplypSR3FbjQY6cnz17Fk+ePOF2jPT0dOzbtw9eXl6wtrbW+3mWMfPMnyLGA9NrFwUgpp7kRbyPwsVoXfE+Cj4cOHCAAND+/fsN+jzL9zzwzJ8S3kchHrgTmMSmTZvyhCYPpPpkFZ9GJzU11WQPcuQ8KCgIVatW5aYfHh6OggULGvyXN8uYeeZPCeNBFAqBSZQtWzZPaPJAqk9W8Wl0pKxR6HqQI+elSpXidhyVSoW9e/eiY8eOBt/1yNILz/wpYTyIQiEwicOHDwP4sMakZE0eSPXJKj6NTvny5SW31XiQI+enT5/GgwcPuBzj0qVLePXqFTp37mxwG5Yx88yfEsaDKBQCkwgKCgLA9iTmockDqT5ZxafRkbLXk64HOXK+du1a2NnZcTnGgQMHYGlpibZt2xrchmXMPPOnhPEgCoXAJLZu3ZonNHkg1Ser+DQ6r1+/NtmDHDlfuHAhqlWrxkX7wIEDaNKkid4Hcv8Ny5h55k8J40EUCoFJlCxZMk9o8kCqT1bxaXSkFApdD3LkvFixYlyO8/TpU1y+fBlz5841qh1LLzzzp4TxIAqFwCT27dsHAHB3d1e0Jg+k+mQVn0anSpUqkttqPMiR85MnTyI+Pp75MQ4dOgQA6Nixo1HtWMbMM39KGA+iUAhMYtGiRQDYnsQ8NHkg1Ser+DQ6UtYodD3IkfMNGzbAzs6O+TEOHDiAr776yug3wbGMmWf+lDAeRKEQmMTOnTvzhCYPpPpkFZ9G5+XLlyZ7kCPnS5cuZb5GkZ6ejmPHjqFXr17Z3mVjCCxj5pk/JYwHUSgEJsHjRVJ55eVUUn2yik+jI6VQ6HqQI+eFCxdmfpxTp04hNTUVnTp1MrotSy8886eE8ZDz658EAgOIiIhARESE4jV5INUnq/hM0dFtK0fOjx07xvwY+/btQ4ECBXJ95WlOsIyZZ/6UMB7EFYXAJJYvXw4A6Nq1q6I1eSDVJ6v4NDpS1ih0PciR87CwMNjZ2TE7BhFh//79cHV1RYECBYxuzzJmnvlTwngQhUJgEnv27MkTmjyQ6pNVfBqdp0+fmuxBjpwHBwejevXqzPRiY2Nx7949TJ48WVJ7ljHzzJ8SxoMoFAKTcHJyyhOaPJDqk1V8Gh0phULXgxw5d3R0ZHoczW2jUtYnALYx88yfEsaDWKMQmMS2bduwbds2xWvyQKpPVvGZoqPbVo6cHzp0iOkx9u/fj2+++UbS+zgAtjHzzJ8SxoO4ohCYhGZ+vHv37orW5IFUn6zi0+hIWaPQ9SBHzrdu3Qo7Ozsmx0hMTMTZs2fh7+8vWYNlzDzzp4TxIAqFwCQOHjyYJzR5INUnq/g0Og8ePDDZgxw5X7VqFWrUqMFE69ChQ1Cr1ZKnnQC2MfPMnxLGgygUApOws7PLE5o8kOqTVXym6Oi2lSPnBQoUYHac/fv3o2TJkqhXr55kDZYx88yfEsaDWKMQmERYWBjCwsIUr8kDqT5ZxWeKjm5bOXK+d+9eJsfIzMzE4cOH0bFjR1haSv8VxjJmnvlTwngQVxQCk1i7di0AoHfv3orW5IFUn6zi0+hI+ata14McOQ8PD4ednZ3Jxzhz5gzevHlj0rQTwDZmnvlTwngQhUJgEseOHcsTmjyQ6tPU+B4+fIhJkyZBrVajVq1aCA8Px8SJE5E/f37JHuTI+dq1a1GzZk2Tdfbt2wcbGxuD342dEyxj5pk/JYwHUSgEJmFtbZ0nNHkg1acp8cXFxcHV1RVv3rxBnTp1sGXLFiQnJyMiIgJHjhxBsWLFJHmQI+fW1tYmH4eIEB4ejrZt28LBwcFkP6zgmT8ljAdRKAQmsWHDBgBA//79Fa3JA6k+pba7d+8eXFxcYGFhgXPnzuGvv/4CEeHdu3eYOHEi3NzccPLkSYN+gep6kCPnu3btwoULF0w6xqVLl/Dw4UPMnDnTZD8sY+aZP0WMB/rMuHHjRp7Q5IE5fLq4uJCLi4tRbfT5lKLJg9x8njhxgkqUKEFVqlShFy9eGKUrJb6UlBT6z3/+Q4ULF6a4uLhsOjdu3KB9+/aRpaUlDRw4UJIHOXLeoEEDk48xceJEsra2ptevX5vsJ6eYpYwjnvlj6TM3ctMThcJMmjwQPtmSk8+VK1eShYUF5cuXjwCQvb09bd68mZsPtVpNPj4+ZGlpSUePHs3Rp5+fHwGg8PBwbl5MwdR+V6vVVL58eXJzc2Pk6NPk9fOTh564PVYgMIJz585h1KhR6NSpE5KTk3H16lV8++236NWrF37//Xcux9y4cSO2bt2KwMBAtGnTJsfPBQQEoH79+vjxxx8lvaNC6Vy4cAEPHjzADz/8YG4rXxyiUAhM4tdff8Wvv/6qeE0WqNVqjB49GqVKlcLmzZsRFhaGCxcu4NixY2jVqhWGDBmCq1ev6tUxJr74+HiMHDkSLVq0+GiXVF0da2tr/P7770hJScH48eON8iBHznfs2GHSMTZt2oT8+fPDy8uLiR+WMfPMnyLGA9NrFwUgpp7kxdXVlVxdXY1qo8+nFE0e6Prcvn07AaDff/+diLL7TEhIoNKlS1P16tXp/fv3ueoaGp9KpaJmzZpRoUKF6NGjRznq6PqcNm0aAfjkNFVOHuTIeaNGjSQfIz09nZydncnHx4eZn5xiljKOeOaPpc/cEGsUCtTkgfDJFl2fzZo1o0qVKpFKpfrk548ePUoAyN/fn8nxly5dSgBo48aNRvl8//49VatWjSpXrkzv3r1j4oUFpvT77t27CQAdOHCAoaNPk1fPT556YupJIDCA69ev48yZMxg+fHiO20a0adMGffr0QVBQEK5fv27S8eLj4/Hzzz+jY8eO6NOnj1Ft8+fPj1WrVuHOnTsIDAw0yYdS2LRpE4oXL462bdua28oXiSgUApMICQlBSEiI4jVNZcuWLbC0tMz2S/tTPhcvXgwnJycMHToUarX6k1r64lOr1Rg4cCBsbGywevVqWFhYGK3TsmVL9O/fHwsWLMC1a9f0tpUj51u2bJF0jISEBOzZswe9e/dGvnzsHv1iGTPP/ClhPIhCITCJffv2ad80pmRNUyAibN26Fa6urihevLj2+5/yWbRoUSxatAhnz57FmjVrPqmnL76VK1fi9OnTWLp0aa4v5dGns2DBAjg5OWHgwIHIyMjIta0cOT958qSkY6xfvx5ZWVkYMmQIUz8sY+aZP0WMB6aTXApArFEon7zm89KlSwSA1q1bZ1A7tVpNrVq1ooIFC9KTJ0+MOmZ8fDzZ2dlRhw4dSK1WG+XzU+zYsYMA0OjRo43ywQMp/Z6ZmUkVKlSgVq1acXD0afLa+SmHHvMriszMTEyaNAk9e/bEDz/8gMjISCQmJsLX1xe9evWCj48PHj58CACIioqCt7c3vL29ERAQACLKpvXgwQP06NEDPXv2xIwZM3K8lBcIeLJ161ZYW1vD09PToM9bWFhg9erVyMjIwMiRIz86r3MiPT1dO72yZs2aHKecjOGHH37AmDFjsHz5cixbtsxkPbkJDw/H/fv3MXLkSHNb+bJhWpKIaOfOnTRr1iwiIkpKSiIXFxeaPHmy9m6Fc+fO0cmTJyklJYU6duxIiYmJRES0Zs0a7f81DB06lM6fP09EH275y+12Pw3iikJeli5dSkuXLjWqjT6fUjR5oPFZtWpVat++/Uc/1+dz3rx5BICWL19uULshQ4YQANqxY4dB/jQ6+vKZkZFBXbt2JQA0atQoSkpKyuZBrVbTggULKCgoyOCrGCn8/PPPRvWrWq2mb775hqpVq5bjnWamkFM/SBlHPM9Zlj5zQ9Yrivbt22PMmDHar62srBAdHY2EhAT0798f+/btQ8OGDXH58mVUq1YN8+bNQ8+ePVG0aFE4Oztn04qJiUHDhg0BAN9//z3Onj3L2q7ARCIjIxEZGal4TancuXMHt2/fhpub20c/0+dz4sSJcHd3x/jx47Odu59qFxQUhDVr1mDKlCkGP3lsaJ6sra2xdetWjBo1CitWrEDx4sXh5+eHyZMnw8nJCfny5cOkSZMwZcoU2NnZoWLFimjSpAlGjhyJK1euGOTFEM6fP29Uv0ZGRuLy5cuYNGmSSS8oyk2f1XnG85xVwniwIDLwuthIUlNT4evrC29vb0yZMgUzZ86El5cXgoODoVKpULFiRcybNw+7d++GnZ0devXqhSVLlqBixYpajWbNmuHMmTMAPmydEB4ejoULF+Z63CtXrsDW1pZpLGlpaUbt9W8uhE+2pKWlITw8HLNnz8ahQ4dQvnx5ozWSk5PRrVs3JCcnY+3atahdu3a2n2dmZmLZsmVYv349OnbsiKCgIFhZWRnt09B83rx5EwcOHMCTJ09gb28POzs77T8LCwskJSXh5cuXePnyJS5fvoyMjAy0bt0aEyZMkBS/VJ9EhIEDB+LOnTs4fvw4bGxsTDq2MeSl85O1zxzfF8L02uV/efr0KXl6emovoZs0aUJJSUlERBQTE0ODBw+mqKgoGjp0qLZNYGDgRw/TNG/eXPv/Y8eO0S+//KL32GLqSfnkJZ9ubm5UuXJlk3Tu3LlDFSpUIAcHB1q9ejU9ffqUzp07R2vXrqUaNWoQAPL19aWsrCzJPnmQmJhIgYGBZG9vT7a2tjRjxgy9T53nhjE+9+3bRwBo2bJlko8nlbx0fsqlx/x67tWrVxg4cCAmTZqkvYSuV68eoqKiAHzYT75KlSqoU6cObt26haSkJGRlZeHq1auoUqVKNq1atWrhwoULAIBTp06hfv36rO0KTGThwoV6r/KUoCmF9PR0nDx5Eh06dPjkzw31WalSJZw+fRr169fH0KFDUbp0aTRu3BiDBw8GEWHPnj0ICQkx+krClDzptv2UlrOzM/z9/XH79m14eXnhl19+QZ06dXDo0CFJx1y/fr1BfjMyMjBhwgRUr14dvr6+ko5lCCzPM57nrBLGA/MXF61atQrJycnZHhIJCgqCv78/tm7dCgcHByxatAhOTk6YMGECBg8eDODD2ka1atUQHx+PsLAwBAQEYPLkyZg2bRoWL16MSpUqoV27dqztCkzk3LlzeUJTCv/zP/+D9+/f51gojPH51VdfITIyEocOHYKfnx/s7e2xfv16VK1a1egCoXv8jh07Sm6b09f/RrMJ4qBBgzB8+HC4ubnBy8sLS5YsQdmyZQ0+5tWrV+Ho6Kj3c/PmzcOtW7ewb98+rm93Y3me8TxnFTEemF67KAAx9aR88orPAQMGkI2NDb19+9bcVnJFznympaXRnDlzqECBAmRvb08LFiygjIwMg9oa4vPChQtkZWVFPXr0MNWqZPLK+Zmnp54Egs+F8+fPo3HjxrCzszO3FcVga2uLn3/+GTdu3ICrqysmTZqEb775BqdOnTJZ+8GDB/Dw8MBXX32FlStXMnArYIUoFAKTCAoKQlBQkOI1jSUxMRFxcXFwdXXN8TNSfbKKzxQd3bbGalWoUAF79uzBnj17kJqaChcXF/Tr1y/XFyb9+uuvOR7j6dOn6NChA96/f48DBw6gcOHChgcjEZbnGc9zVgnjgfkaheDLguV99jw1jeXkyZMgIrRq1SrHz0j1ySo+jY6Hh4fJHqR66ty5M1q3bo3Zs2djwYIFOHDgABYtWoS+fft+9GR5XFwcnj59+pHGzZs30b59e7x69Qr79+//6BZiXrA8z3ies0oYD2KNwkyaPBA+2eHr60t2dnYGz7+bE6Xk8/r169SkSRMCQA0aNKBDhw5le6Ja16dKpaKVK1eSvb09FStWjC5duiS35U+ilHzqQ6xRCARmJjIyEvXr1+d6183nRu3atXH69Gls2LABz58/R4cOHVCtWjUsWLAA9+7dAxEhOTkZp0+fxqJFi1CzZk2MGDECTZs2xV9//SVuf1cwYupJYBKaF+NMmzZN0ZrG8PjxY9y6dQtdunTJ9XNSfbKKT6Nj6JYfuXlg5cnS0hL9+vWDj48PIiIisGrVKvz000/46aefYG1tjczMTO1nv/32W2zbtg3dunVjsgGisbA8z3ies+YeD4AoFAITuXnzZp7QNAbNvjqNGjXK9XNSfbKKzxQd3basc25ra4sePXqgR48euHnzJiIjI7Fw4UI4Ojpizpw5+Pbbb1GqVCmmxzQWljHzPGfNPR6AXPZ6atasGYAPe9G8f/8epUqVwvPnz1GkSBGcOHFCVpPGEBsbm/N+JQrS5IHwyYa+ffvi0KFD+PPPP2VbWDUFpedTg/DJFtY+c9PLcY3izJkzOHPmDJo3b44jR47gyJEjOHr0KL7++mtmxgQCpUFEiIyMRKtWrbjsWCoQ5EX0Tj09fvxYe4lYokQJPHv2jLspQd5h+vTpAICZM2cqWtNQbt26hadPn+Z6W6wGqT5ZxafR6dGjh+S2Gg9y5HzFihUoWrSoWfr1U7CMmWf+zDkeNOgtFJUrV8akSZPw9ddf48qVK6hXr54cvgR5hEePHuUJTUPRrE+4urpmW3j9FFJ9sorPFB3dtnLk/NmzZ3j//j334xgKy5h55s+c40GD3vdRqNVqnDp1Crdv30alSpVyfVJVCYg1CuHTFLp27Yq//voL9+/fR1xcnGJ9/hsl5/PfCJ9sUcQahYbk5GSkpqaiWLFiSE5OxurVq5kZEwiURGZmJiIjI9GuXTuz3K4pECgVvVNPo0ePRoUKFXDr1i3Y2tqiQIECcvgS5BF+/vlnAMDcuXMVrWkIFy9eRHJyMtq2bWvQ56X6ZBWfRqdv376S22o8yJHzxYsXo2jRorL3a06wjJln/sw1Hv6NQc9RzJw5Ez///DNmz56NXr168fYkyEMkJibmCU1DOHr0KCwtLQ2eXpXqk1V8pujotpUj52/evFHUlRrLmHnmz1zjIRv69v/o06cPpaWl0ZgxY0itVlPnzp0Z7SzCB7HXk/JRqs/vvvuOGjVqpP1aqT51ET7Z8qX6NGmvp169emHjxo1o2rQpXFxcUKlSJTnql0AgK0lJSbh06ZLB004CwZeE3qmn9PR0DBkyBADQoUMHODg4cDclyDtMnDgRAJi+05eHpj4OHToEtVpt1GtFpfpkFZ9GZ9CgQZLbajzIkfP58+ejSJEiZn//swaWMfPMnznGgy56C8X27dvRuXNnABBFQvARPO6LN8e99nv37kXJkiWN2sFUqk9W8Zmio9tWjpynp6cr6jkKll54xqWEnOl9jsLb2xsZGRmoWLGidkuDRYsWyWJOCuI5CuHTWDIyMlCsWDF4e3vj119/1X5faT5zQvhky5fqMzc9vVcUmssegeBz5c8//0RycjLc3d3NbUUgUCR6C0XDhg3l8CHIo4wdOxYAsHTpUkVr5sbWrVvh6Oho9EK2VJ+s4tPoDB06VHJbjQc5cj537lw4OzvL1q/6YBkzz/zJPR4+hdgeU/BFk56ejoiICHh6eiJ//vzmtiMQKJOc7pu9cOECZWVlMb1PVw7EcxTKR0k+d+/eTQDo0KFDH/1MST5zQ/hky5fqMze9HKeeYmJi8Pvvv8Pe3h5NmzbF999/j0KFCslXwQQCGdiyZQuKFCmi+M0uBQJzkmOhGDBgAAYMGIDU1FScOnUK8+bNQ0pKCr7++mvtcxUCwYgRIwAAK1euVLTmp0hISEBERASGDh0Ka2tro9tL9ckqPo3OyJEjJbfVeJAj54GBgShcuDD3fjUUljHzzJ9c4yE39C5mOzg4wM3NDW5ubiAiXLlyRQZbgrwCj00i5dp4cs2aNcjMzJT0ixaQ7pNVfKbo6LaVI+dK21SUpReecSkhZ3qfo8hriOcohE9DyMzMRIUKFfCf//wHhw8f/uRnlODTEIRPtnypPk16jkIg+BzZsmULnj59ijVr1pjbikCgeAwqFImJiUhPT9d+Xbp0aW6GBHkLzXoVy1+4PDT/TUZGBgICAvDtt9+iQ4cOknWk+mQVn0Zn3LhxkttqPPDOOQDMmDEDhQoVUkxxZhkzz/zJ0Tf60FsoAgICcOrUKRQvXhxEBAsLC2zdulUOb4I8QJEiRfKE5r9Zvnw57t27h5UrV2q3pZGCVJ+s4jNFR7ct75wDgJOTkyzHMRSWXnjGpYSc6V2j6Nq1K3bu3GnSgJITsUbB3+fbt29x7tw53LlzBw8fPoRarUaZMmVQrlw57T9nZ2ez+/wU9+/fR+3atdG6dWvs3r071xfpiH5ni/DJFkWtUZQvXx7p6emKWHkXmJcnT54gICAAmzdv1u5omS/fh1MoKysr22erVKmCjh07on///qhbt67cVj/Ju3fv8MMPP8DS0hIrVqxQ1NvWBAIlo7dQPHv2DC1btkT58uUBQO/UU2ZmJvz8/PDkyRNkZGTA19cXdevWhb+/P5KTk6FSqTB//nyUK1cOs2bNQnR0NOzt7QEAISEhcHR01GrFxsZixowZsLKyQoUKFTB79uw8c2XzOUFE2LhxI0aNGoX09HQMGDAAXbt2Re3ateHv7w8AmDdvHh4+fIiHDx/izp07OH36NEJDQ7Fs2TJ4enpi7ty5qF69ukHHGzBgAADgt99+YxbDP//8A09PT0RHR2PPnj0oV66cyZpSfbKKT6Pz008/SW6r8cAj57r4+fmhUKFCXI9hDCxj5pk/OfpGH3oLhbFbiu/duxeFChXCggUL8Pr1a3h6eqJRo0Zwd3eHm5sbzp8/j7t376JcuXKIiYnB2rVrc5ymCA4OxogRI+Di4oIJEybgzz//RKtWrYzyIzANIsLkyZOxYMECtGjRAuvWrcv2lkPNL9wSJUqgRIkSaNCgAYAPv7xev36N4OBgzJ8/HwcPHsSyZcsM2sCubNmyTGM4ceIEhg4digcPHmDTpk3MdomV6pNVfKbo6LZlnfNPUapUKRQtWpT7cQyFZcw88ydH3+hF3/4fz549o1GjRpGbmxsNHz6cHj16lOvnU1NTKSUlhYiIkpKSqFWrVtSmTRtav3499evXj/z8/Ojt27ekUqmocePGNGLECOrevTvt2LHjI60VK1bQ7t27Sa1W09ChQykqKkqfXbHXE0OysrJo8ODBBICGDx8uee+v58+fU4cOHQgATZw4ka5fv87U56f4559/aOPGjdS+fXsCQJUrVzbo/Pk3X2q/80L4ZIucez3pXcwePHgwevTogQYNGuDixYvYtGkTNm7cqLcApaamwtfXF97e3pgyZQpmzpwJLy8vBAcHQ6VSYdCgQfj9998xYMAAqFQq9O3bF3PmzEGNGjW0Gvv378fMmTPh7OwMR0dHhIWFwdbWNtfjXrlyRe9njCUtLS1P7CzK0mdGRgYmT56MI0eOYNiwYRg1apRJc/oqlQpz587FH3/8gW7duiEgIID5GsHbt29x4sQJHD58GGfOnEFmZiZKly4NLy8vDBgwwOjcfIn9zhPhky08fOa4OK6vyvTu3Tvb1z179tRbmZ4+fUqenp7aq4QmTZpQUlISERHFxMTQ4MGDKSsrS3vlQUQ0b9482rVrVzadRo0a0a1bt4iIKCwsjAICAvQeW1xRmE5qaiq1bduWANCiRYty/WyvXr2oV69eBumq1Wr66aeftFcWarXaZM13797Rtm3byNPTk2xtbQkAlSlThsaNG0fnz5/P8RiGoC+fxvhk0S4nHSn9ruuBlafc6NSpE/djGENOMbPIJ0tY+swNSbvHalCpVLh58yaqV6+Omzdv6v0r8NWrVxg4cCCmT5+Oxo0bAwDq1auHqKgodOnSBZcuXUKVKlVw//59jBs3Drt27YJarUZ0dDQ8PT2zaTk5OWnf0128eHFER0frrYgC00hKSoK7uzvOnz+PdevWYeDAgbl+3tAFauDDjRBBQUF4/PgxFi5cCGdnZ/z888+SNF+9eoUZM2Zg8+bNePPmDUqWLIkhQ4age/fuaNy4sSw3PRgTO4t2LHV027LylBsVK1ZEsWLFuB/HUFjGzDN/cvSNXvRVmZiYGOratSs1a9aMvLy89FaxwMBAatKkCfXu3Vv77/Hjx9S/f3/q3r07DRo0iP755x8iIlqzZg117dqVunfvTn/88QcREd2+fZtmzJhBRESXLl2i7t27U69evah///5610f0VUWpfClXFI8ePaJatWqRjY3NJ9eMWHH9+nXq1asXAaCNGzca1VatVlNYWBgVLVqUrK2tqXfv3nT8+HEu7075UvpdLoRPtsh5RaG3UOQ1RKGQxvnz56ls2bJUsGBBOnHiBENXH3Pjxg1KT08nV1dXypcvHx05csSgdvfv39cuTjdq1Ij7oviX0O9yInyyRc5CkeP1+ejRowEAzZo1++if4PMhISEBP//8M5o2bQoLCwtERUWhZcuWBrf38fGBj4+P0ce1sbFBREQEateuDS8vL0RGRuaomZmZiSVLlqB27do4c+YMVqxYgTNnzqB27dpGH5clUmOX2o6ljm5bVp5yY8KECdyPYQwsY+aZPzn6Rh85rlEsX74cALBjxw6UKlVK+/07d+7wdyVgikqlQnx8PGJjYxEXF4fbt28jPj4et2/fxrNnzwAAffr0wYoVK+Dk5GSUtilPXRcsWBCHDh1C27Zt0aFDByxcuFD7gCYR4dWrV9i7dy/mz5+Pmzdvws3NDaGhoUwelmOB1NhZPaluio5uWzmenq9RowaKFy/O/TiGwjJmnvlTxM4GOV1q3Lx5k06dOkWdO3emM2fO0OnTpykqKoo6d+7M9HKHNWLq6f/466+/qH///lSkSBECoP1XqlQpat68OQ0YMIDmzJlDcXFxZvX5+vVr7ZRSxYoVqW7dulSwYEGt3zp16tC+fftMuoOJhU+lInyy5Uv1Kemup+TkZBw8eBCJiYnYv38/gA93rfTs2VOG8iUwhbt372LMmDHYv38/HB0d4eHhAVdXV9SuXRs1atTItk2KEihUqBAOHjyI/fv3Y/ny5bCxsUHz5s1RqVIl1K9fXzstJhAIzEOOhaJ+/fqoX78+YmJizD4XLDCMjIwMzJkzB4GBgciXLx9mz56NESNGGD2dZAxeXl4AgPDwcJN0LCws4O7uDnd3d3h5eeHJkyfa6U+lIjV2VjnT6MyaNUtyW40HVp5yY8yYMXB0dOR6DGNgGTPP/MnRN/rIsVDMnDkT06dPx8yZMz/6a068j0J5HD16FL6+vrh79y68vLywbNkylClThvtxNc/KKF2TB1J9sorPFB3dtnLk/L///S9KlCjB/TiGwjJmnvlTxHjIaU7q5cuXRET0+PHjj/4pmS9pjUKtVtPx48epTZs2BIDKli1L+/fvN7ctvSg1n7oIn2wRPtmiiNtjNbs8vnv3Di9evMCrV6/g5+eHhw8fylXDBDlARNixYwfq1auH1q1b49q1a1iwYAH27duHjh07mtueQCD4zNC7z8GMGTNgY2OD0NBQjBs3DsHBwXL4EuTAlStX0KhRI3h7e+Pdu3dYu3Yt7t+/j4kTJ8LGxkZ2P507d0bnzp0Vr8kDqT5ZxWeKjm5bOXI+YsQIRfUry5h55k8J40HvXk/58uVD1apVkZmZibp160KlUsnhS6ADEWHFihWYNGkSnJ2d8dtvv6FPnz6wsrIyqy9XV9c8ockDqT5ZxWeKjm5bOXLeqFEjRa1RsIyZZ/4UMR70zVv17duXRo4cSRs2bKADBw5Q//79Gc6KsedzXKNQq9U0YcIEAkDu7u7a9SNdzO3TUIRPtgifbPlSfZq0e+ySJUtw7do1uLi44MKFC1iyZIkM5Uvwb+bPn49FixZhxIgRWL58uXgdrEAgkBW9v3FsbGxw/vx5DBkyJNt+PAJ5iIiIwJQpU+Dj46PIItGhQwd06NBB8Zo8kOqTVXym6Oi2lSPnQ4YMUVS/soyZZ/6UMB70XlH4+fmhQYMG6Ny5My5evIgpU6Zg1apVcnj74rl37x4GDBiAhg0b4rffflNckQDA7P3TvDV5INUnq/hM0dFtK0fOW7ZsiZIlS3I/jqGwjJln/hQxHvTNW+m+4a5Hjx6mT4Zx5HNZo8jIyKCGDRuSk5MT3b1716A2X+rcKi+ET7YIn2xRxHMUGtLT0/Hy5UsAH94qplaruRcvATB79mxcvHgRv/76KypWrGhuOwKB4AtG79TTmDFj4OPjAwcHB7x9+xaBgYFy+PqiuXLlCmbPno1evXqhW7du5raTK61btwYAHD9+XNGaPJDqk1V8Gp0VK1ZIbqvxIEfOBw4cCHt7e8X0K8uYeeZPCeNBb6Fo2rQpjhw5glevXqFEiRJiF0/OZGZmYsCAAShSpAiWLVtmbjt66d69e57Q5IFUn6ziM0VHt60cOe/QoUO2d9uYG5Yx88yfIsaDvnmrI0eOUMuWLalLly7UunVrOnPmDNN5Mdbk9TWKwMBAAkARERFGt/1S51Z5IXyyRfhki6KeowgJCcGOHTtQpEgRvHr1CsOGDUPTpk1lKGFfHrdv38asWbPg7e0NT09Pc9sRCAQCAAY8R1GoUCEUKVIEwIeNAh0cHLib+hIhIowcORK2trZYunSpue0YTIsWLdCiRQvFa/JAqk9W8Zmio9tWjpz369dPUf3KMmae+VPCeNB7ReHg4IBBgwahQYMGiImJQVpaGhYvXgwAGD9+PHeDXwo7d+7E0aNHsXz5ckXN4+qjf//+eUKTB1J9sorPFB3dtnLkvEuXLihdujT34xgKy5h55k8J48GCiCi3D+zatSvHnylxeiQ2NhY1a9ZUvOa/SUlJQY0aNVCiRAlcvHgR+fLprd+fhLdPVgifbBE+2fKl+sxNT+9vJCUWg8+NGTNm4NmzZ4iIiJBcJMxFZmYmAMDa2lrRmjyQ6pNVfBodU9pqPMiR88zMTGRmZiqmX1nGzDN/ShgPeeu30mfI1atXsXz5cvz444/47rvvzG3HaNq0aQMA+PPPPxWtyQOpPlnFp9EJDQ2V3FbjQY6cDx48GHZ2dorpV5Yx88yfEsaDKBRmhIgwatQoFC5cGHPnzjW3HUkMHjw4T2jyQKpPVvGZoqPbVo6ce3l5yfIed0NhGTPP/CliPDC9EVcB5KXnKMLDwwkArVq1ionel3r/Ny+ET7YIn2xR1F5PAj5kZGTgp59+Qu3atTFo0CBz25HMu3fv8O7dO8Vr8kCqT1bxmaKj21aOnL9//15R/coyZp75U8J4EFNPZmLlypW4c+cODh8+nOcWsP+Nm5sbALbzpzw0eSDVJ6v4NDpS1ih0PciR82HDhilqjYJlzDzzp4TxkHd/Q+VhEhMTMXPmTLRv3x7t2rUztx2T8PX1zROaPJDqk1V8pujotpUj5z4+Popao2AZM8/8KWI8MJ3kUgB5YY1i0qRJZGlpSdeuXWOq+6XOrfJC+GSL8MkWsUbxGfPs2TMEBwejd+/eqFOnjrntmMybN2/w5s0bxWvyQKpPVvGZoqPbVo6cp6SkKKpfWcbMM39KGA9i6klm5s6di4yMDEyfPt3cVpjg4eEBgO38KQ9NHkj1ySo+jY6UNQpdD3LkfOTIkYpao2AZM8/8KWE8MC8UmZmZ8PPzw5MnT5CRkQFfX1/UrVsX/v7+SE5Ohkqlwvz581GuXDnMmjUL0dHRsLe3B/Bhp1pHR0etVmJi4ifb5VUePXqE1atXY+DAgahcubK57TBh9OjReUKTB1J9sorPFB3dtnLkvHfv3vjqq6+4H8dQWMbMM3+KGA9MJ7mIaOfOnTRr1iwiIkpKSiIXFxeaPHkyHThwgIiIzp07RydPniQiIh8fH0pMTMxRK6d2uaHkNYohQ4aQjY0NPXjwgImeLl/q3CovhE+2CJ9sydNrFO3bt8eYMWO0X1tZWSE6OhoJCQno378/9u3bh4YNG0KtVuPBgweYPn06fHx8sHPnzo+0PtUur3L37l2sX78eQ4YMydNXRbq8evUKr169UrwmD6T6ZBWfKTq6beXI+evXrxXVryxj5pk/JYwHvbvHSiU1NRW+vr7w9vbGlClTMHPmTHh5eSE4OBgqlQqDBg3C77//jgEDBkClUqFv376YM2cOatSoodWoXbv2R+3+XYQ+xZUrV2Bra8s0lrS0NOTPn98kDT8/Pxw6dAhHjx5FsWLFGDnLDgufxtKvXz8AwMaNGw1uo8+nFE0e8PLJKj6NzurVq43ud10PcuS8T58+sLS0NHu/asgpZinjiGf+WPrUR4670TK9dvlfnj59Sp6enrRjxw4iImrSpAklJSUREVFMTAwNHjyYsrKyKCUlRdtm3rx5tGvXrmw6n2qnDyVOPcXGxpKlpSVNmDCBkaNPY45L5r1799LevXuNaqPPpxRNHvDyySo+jY6Uftf1IEfOV65cqYh+1ZBTzCzyyRKWPnMjNz3mheLly5fUvn17Onv2rPZ7o0aN0haBDRs2UFBQEMXHx5O7uztlZWVRRkYGde/enW7dupVN61Pt9KHEQuHj40P29vb04sULRo4+zZc6t8oL4ZMtwidbFPXObGNZtWoVkpOTERISgpCQEABAUFAQ/P39sXXrVjg4OGDRokVwcnKCu7s7vL29YW1tDQ8PD1StWhXx8fEICwtDQEAAJk+e/FG7vEZcXBy2bduGKVOmcJtyMifPnz8HAJQsWVLRmjyQ6pNVfBodU9pqPMiR85cvX6Jw4cKK6VeWMfPMnyLGA9OSpACUdkXx448/Uv78+blfTRCZ5y8hFxcXcnFxMaqNPp9SNHnAyyer+DQ6Uvpd14McOW/QoIEi+lVDTjGzyCdLWPrMDVmvKAT/R0JCgnbB/nO8mgCAKVOm5AlNHkj1ySo+U3R028qR88GDByvqjj+WMfPMnxLGgygUHAkODkZGRgbGjRtnbivcaN++fZ7Q5IFUn6zi0+jExsaa7EGOnDdv3lxR76JmGTPP/ClhPIhCwYm3b98iJCQEHh4eqFatmrntcOPRo0cAgLJlyypakwdSfbKKT6NjSluNBzly/uzZMzg4OCimX1nGzDN/ShgPolBwYsOGDUhKSsKkSZPMbYUrffr0AcB2HxoemjyQ6pNVfBodKXs96XqQI+dTpkxR1F5PLGPmmT8ljAdRKDigUqmwePFiNG7cGE2aNDG3Ha74+/vnCU0eSPXJKj5TdHTbypHzoUOHonz58tyPYygsY+aZPyWMB1EoOHDo0CHcvXsX8+bNM7cV7rRu3TpPaPJAqk9W8Wl0pKxR6HqQI+dNmjRR1BoFy5h55k8J40EUCg6EhoaiVKlS2u2BP2fu3r0LAKhUqZKiNXkg1Ser+DQ6prTVeJAj548ePYKtra1i+pVlzDzzp4TxIAoFY+7du4dDhw5h2rRpsLa2Nrcd7gwcOBAA2/lTHpo8kOqTVXwaHSlrFLoe5Mi5v7+/otYoWMbMM39KGA+iUDDm119/hYWFBQYPHmxuK7Lwyy+/5AlNHkj1ySo+U3R028qR85EjRypqjYJlzDzzp4TxIAoFQzIyMrBu3Tq4u7sr5hZA3ri4uOQJTR5I9ckqPo2OlDUKXQ9y5LxBgwaKWqNgGTPP/ClhPIhCwZBdu3bhxYsXGDZsmLmtyMbNmzcBANWrV1e0Jg+k+mQVn0bHlLYaD3Lk/N69e7C0tFRMv7KMmWf+lDAeRKFgSGhoKCpWrIi2bdua24psDB06FADb+VMemjyQ6pNVfBodKWsUuh7kyHlAQICi1ihYxswzf0oYD6JQMOLWrVuIiopCUFAQLC2ZvzhQscyZMydPaPJAqk9W8Zmio9tWjpyPHTsWFSpU4H4cQ2EZM8/8KWE8iELBiE2bNsHS0hJ9+/Y1txVZ4fFAYV55SFGqT1bxaXSkrFHoepAj5998842i1ihYxswzf0oYD6JQMECtViMsLAytW7dGqVKlzG1HVq5fvw4AqFOnjqI1eSDVJ6v4NDpWVlaS22o8yJHz27dvQ6VSKaZfWcbMM39KGA+iUDDg//2//4f79+8jMDDQ3FZkZ+TIkQDYzp/y0OSBVJ+s4tPoSFmj0PUgR85nzZqlqDUKljHzzJ8SxoMoFAwICwuDvb09PD09zW1FdhYsWJAnNHkg1Ser+EzR0W0rR84nTpyIihUrcj+OobCMmWf+lDAeRKEwkaysLISHh8PDwwP29vbmtiM7DRo0yBOaPJDqk1V8Gh0paxS6HuTI+X/+8x9FrVGwjJln/pQwHkShMJGTJ08iMTER3bp1M7cVs3DlyhUAQN26dRWtyQOpPlnFp9GxtbWV3FbjQY6cx8bGIj09XTH9yjJmnvlTwngQhcJEtm/fDgcHB0W8hcocjB07FgDb+VMemjyQ6pNVfBodKWsUuh7kyHlQUJCi1ihYxswzf0oYD6JQmEBmZiYiIiLg4eGB/Pnzm9uOWVi6dGme0OSBVJ+s4jNFR7etHDmfMmWKYnaOBdjGzDN/ShgPolCYwIkTJ5CUlARvb29zWzEbPC6HlTI1oQ+pPlnFp9GRskah60GOnNesWVNRaxQsY+aZPyWMB1EoTGDHjh1wdHT8orbs0OXSpUsA2C648dDkgVSfrOLT6Dg4OEhuq/EgR86vXbuG1NRUxfQry5h55k8J40EUComIaacPaN4JznL+lIcmD6T6ZBWfRkfKGoWuBzlyvnDhQkWtUbCMmWf+lDAeRKGQSGRkJF6/fv1FTzsBQHBwcJ7Q5IFUn6ziM0VHt60cOff391fUGgXLmHnmTwnjQRQKiWzfvh0FCxb8oqedAD7bCihliwd9SPXJKj6NjpQ1Cl0PcuS8atWqilqjYBkzz/wpYTyIQiGBjIwM7N69Gx4eHpLuYf+cOHv2LAC2G5fx0OSBVJ+s4tPoFC5cWHJbjQc5cn758mW8fv1aMf3KMmae+VPCeBCFQgJi2un/8PPzA8B2/pSHJg+k+mQVn0ZHyhqFrgc5cr506VJFrVGwjJln/pQwHkShkMD27dvh5OSENm3amNuK2Vm9enWe0OSBVJ+s4tPoqNVqkz3IkfOAgABUrlyZ+3EMhWXMPPOnhPEgCoWRZGRkYNeuXejSpcsXP+0E8Hk9o1JelakPqT5ZxafRkbJGoetBjpxXrFhRUX3L0gvPuJSQM1EojOTYsWN48+aNmHb6X6KiogCwfQE8D00eSPXJKj6NTvHixSW31XiQI+eXLl3CixcvFNOvLGPmmT8ljAdRKIxk+/btKFSoEFq3bm1uK4pgxowZANjOn/LQ5IFUn6zi0+hIWaPQ9SBHzoODgxW1RsEyZp75U8J4EIXCCNLT07F79254eXnBxsbG3HYUwfr16/OEJg+k+mQVn0YnPT3dZA9y5HzWrFmoUqUK9+MYCsuYeeZPCeOBeaHIzMyEn58fnjx5goyMDPj6+qJu3brw9/dHcnIyVCoV5s+fj3LlymHWrFmIjo7WvschJCQEjo6OH2nu27cPYWFh2LZtG2u7RnHkyBEkJyeLaad/weMBKiU9lJUbUn2yik+jI2WNQteDHDkvW7asovqWpReecSkhZ8wLxd69e1GoUCEsWLAAr1+/hqenJxo1agR3d3e4ubnh/PnzuHv3LsqVK4eYmBisXbsWzs7OOerFxsZi586dICLWVo1m8+bNKFasGFxdXc1tRTEcP34cAJhOxfHQ5IFUn6zi0+iUKVNGcluNBzlyfvbsWTx58kQx/coyZp75U8J4YF4o2rdvj3bt2mm/trKyQnR0NKpXr47+/fujTJkymDp1KtRqNR48eIDp06fj1atX+OGHH/DDDz9k03r9+jUWLlwIPz8/TJs2zaDjp6enS/oLKzfS0tJw8eJF7N69G926dUN8fDxTfVakpaUxj10fmnu8jfllpc+nFE0e8PLJKj6NzurVq43ud10PcuQ8NDQUlpaWZu9XDTnFLGUc8cwfS5+SIU6kpKRQ7969ae/evVSrVi3auXMnERGtWLGCli5dSikpKbRy5Up69+4dpaSkkKenJ8XGxmrbZ2Vlka+vL8XHx9OjR4+oW7duBh33xo0bzGO5ceMGrVu3jgDQhQsXmOuzgkfs+nj48CE9fPjQqDb6fErR5AEvn6zi0+hI6XddD3LkPDIyUhH9qiGnmFnkkyUsfeZGbnpcFrOfPXuGESNGoGfPnnB3d0dQUBBatWoFAGjVqhWWLFmCAgUKoG/fvihQoAAAoFGjRoiLi0ONGjUAADExMXjw4AECAgKQnp6O+Ph4zJ49G1OnTuVhWS+bNm1CtWrVFLNFslIoW7ZsntDkgVSfrOLT6Ej5q1LXgxw5L1WqlKL6lqUXnnEpIWeWrAVfvXqFgQMHYtKkSdqppHr16mnvBb506RKqVKmC+/fvo2fPnlCpVMjMzER0dDRq166t1fn6669x4MABbNq0CYsXL0aVKlXMViQePXqEqKgo9O7dGxYWFmbxoFQOHz6Mw4cPK16TB1J9sorPFB3dtnLk/PTp04rqV5Yx88yfEsYD8yuKVatWITk5GSEhIQgJCQHw4V25/v7+2Lp1KxwcHLBo0SI4OTnB3d0d3t7esLa2hoeHB6pWrYr4+HiEhYUhICCAtTXJbN++HZaWlhg4cKC5rSiOoKAgAGD6znAemjyQ6pNVfBodKc9R6HqQI+dr166FnZ2dYvqVZcw886eE8WBBpIDbiRgSGxvLdCvjtLQ0lCpVCq6urti5cyczXR6wjt0Qnj9/DgAoWbKkwW30+ZSiyQNePlnFp9F5/fq10f2u60GOnJ86dQrVqlUze79qyClmKeOIZ/5Y+syN3PTEA3d62LFjB/755x/4+vqa24oi4TEwlPKLRB9SfbKKT6Pz+vVrkz3IkfNixYopqm9ZeuEZlxJyJgpFLqjVasyfPx+VKlXSLsYLsrNv3z4AgLu7u6I1eSDVJ6v4NDpSnnbW9SBHzk+ePIn4+HjF9CvLmHnmTwnjQRSKXNi3bx+uX7+OoKAgsYidA4sWLQLA9iTmockDqT5ZxafRkbJGoetBjpxv2LABdnZ2iulXljHzzJ8SxoMoFDmgUqkwY8YMVKxYEW5ubua2o1h4rNsofS1Ig1SfrOLT6Lx8+dJkD3LkfOnSpahWrRr34xgKy5h55k8J40EUihxYs2YNrl69iu3btyNfPpGmnChatGie0OSBVJ+s4tPoSCkUuh7kyHnhwoUV1bcsvfCMSwk5Y/4cxefAkydP4O/vjxYtWny0rYggOxEREYiIiFC8Jg+k+mQVnyk6um3lyPmxY8cU1a8sY+aZPyWMB/Gnsg5paWnw8fFBeno6QkNDxdqEHpYvXw4A6Nq1q6I1eSDVJ6v4NDpS1ih0PciR87CwMNjZ2SmmX1nGzDN/ShgPolD8i5SUFPTo0QNnzpzBli1btNuJCHJmz549eUKTB1J9sopPo/P06VOTPciR8+DgYEW81lMDy5h55k8J40EUiv9FrVbj+++/x99//43Q0FD4+PiY21KewMnJKU9o8kCqT1bxaXSkFApdD3Lk3NHRUVF9y9ILz7iUkDOxRvEvPD09cfLkSQwbNszcVvIM27ZtY/5CKR6aPJDqk1V8pujotpUj54cOHVJUv7KMmWf+lDAexBYeZtLkgTl8tmjRAoBx7/PV51OKJg94+WQVn0YnNDTU6H7X9SBHzhs2bKiod2bnFLOUccQzfyx95obYwkPAjYMHD+YJTR5I9ckqPo3OgwcPTPYgR85XrVqlqHU/ljHzzJ8SxoMoFAKTsLOzyxOaPJDqk1V8pujotpUj5wUKFFBU37L0wjMuJeRMrFEITCIsLAxhYWGK1+SBVJ+s4jNFR7etHDnfu3evovqVZcw886eE8SCuKAQmsXbtWgBA7969Fa3JA6k+WcWn0alXr57kthoPcuQ8PDwcdnZ2iulXljHzzJ8SxoMoFAKTOHbsWJ7Q5IFUn6zi0+jEx8eb7EGOnK9du1ZRN4WwjJln/pQwHkShEJiEtbV1ntDkgVSfrOIzRUe3rRw5t7a2VlTfsvTCMy4l5EysUQhMYsOGDdiwYYPiNXkg1Ser+EzR0W0rR8537dqlqH5lGTPP/ClhPIgrCoFJaE7g/v37K1qTB1J9sopPo/Pdd99JbqvxIEfOd+/eDTs7O8X0K8uYeeZPCePhs3vg7sqVK7C1tTW3DYFAIMhTpKeno27dup/82WdXKAQCgUDAFrFGIRAIBIJcEYVCIBAIBLkiCoVAIBAIckUUCoFAIBDkiigUAoFAIMgVUSgEAoFAkCtfxAN3V69excKFC7Fp0ybExsZixowZsLKyQoUKFTB79mxYWlpiw4YNOHDgAADAxcUFI0eORFpaGiZNmoTExETY29tj3rx5cHZ2zqa9fft2bN26Ffny5YOvry9atmxpUDu5fX6qHRHh+++/R4UKFQAAdevWxYQJE8zqc9asWYiOjoa9vT0AICQkBNbW1orKZ2xsLObMmaP9+sqVK1i5ciWaN28uaz41HDt2DIcPH8aiRYs+0lbC+WmITyWcn4b4VML5qc8n6/MTAECfOWvWrKFOnTpRt27diIho+PDh9OeffxIR0fjx4ykyMpIePnxInp6elJWVRSqVirp3706xsbG0fv16Wr58ORER7d+/nwIDA7Npv3jxgjp16kTp6emUnJys/b++dnL7zKnd/fv3aejQoYrJJxGRj48PJSYmZvue0vL5bw4ePEjjx48nIpI9n0REgYGB1K5dOxo7duxH2ko5P/X5VMr5qc8nkTLOT0N8ajD1/NTw2U89lStXDitWrNB+XbNmTfzzzz8gIrx9+xb58uVDyZIlsXbtWlhZWcHS0hJZWVmwtbXFX3/9hebNmwMAvv/+e5w7dy6b9t9//41vvvkGNjY2cHR0RLly5RAXF6e3ndw+c2oXExODhIQE9OnTBz/++CPu3r1rVp9qtRoPHjzA9OnT4ePjg507dwKA4vKp4d27d1ixYgWmTp0KALLnEwC+/fZbBAQEfFJbKeenPp9KOT/1+VTK+anPpwYW56eGz75QtGvXDvny/d8Mm+ayrkOHDkhMTMR3330Ha2trODs7g4gwb9481KpVCxUrVkRqaiocHR0BAPb29khJScmm/e+faz6Tmpqqt53cPnNqV6xYMQwZMgSbNm3C0KFDMWnSJLP6fPfuHXr37o0FCxZg7dq1+OOPPxAXF6e4fGrYuXMn2rdvr51mkDufAODm5gYLC4tPaivl/NTnUynnpz6fSjk/9fnUwOL81PDZFwpdZs+ejc2bN+Pw4cPo0qULgoKCAHzY52TixIl4+/YtZsyYAQBwcHDA27dvAQBv375FwYIFs2n9++eazzg6OuptJ7fPnNrVqVMHrq6uAID69esjISEBZOSOLix9FihQAH379kWBAgXg4OCARo0aIS4uTpH5BIB9+/ahW7du2q/lzqc+lHJ+GoISzk99KOX8NBSW5+cXVyicnJzg4OAAAChevDiSk5NBRBg+fDiqV6+OmTNnwsrKCsCHy7uoqCgAwKlTpz56k9jXX3+Nv/76C+np6UhJScGdO3dQrVo1ve3k9plTu+DgYGzcuBEAEBcXh9KlS+v9K4Wnz/v376Nnz55QqVTIzMxEdHQ0ateurbh8AkBKSgoyMjJQqlQp7ffkzqc+lHJ+6kMp56c+lHJ+GgLr8/OLuOvp38yaNQvjxo1Dvnz5YG1tjcDAQBw/fhwXL15ERkYGTp8+DQAYP348evTogcmTJ6NHjx6wtrbW3l3w22+/oVy5cnB1dUWfPn3Qs2dPEBHGjRsHW1vbHNuZy6darf5kuyFDhmDSpEmIioqClZUV5s6da/Z8uru7w9vbG9bW1vDw8EDVqlXx1VdfKSqfrq6uuHfvHsqUKZPtGHLn85tvvvmkhtLOT30+lXJ+GpJPJZyfhvhkfX6K3WMFAoFAkCtf3NSTQCAQCIxDFAqBQCAQ5IooFAKBQCDIFVEoBAKBQJArolAIBAKBIFdEoRAIjCA9PR07duwAAERERCAyMpKZ9pEjRxAeHs5MTyBghSgUAoERvHz5Ulsounbtqn3SlQVRUVFwcXFhpicQsOKLe+BOIDCFVatWIT4+HsHBwSAiFC1aFJUqVcKaNWtgbW2N58+fw8fHB+fPn0dcXBz69u2Lnj174uLFi1iyZAmsrKxQtmxZzJw5E9bW1lpdIsLr169RtGhR7ffS09MxZswYpKamarcG/+6778wRtuALRxQKgcAIhg0bhlu3bmHkyJHZdv98/vw5du/ejZiYGIwZMwbHjh1DQkICRo4ciR49emDatGn4448/UKRIESxduhS7du2Ct7e3tv3ff/+NOnXqZDvWw4cP8erVK2zYsAGJiYm4f/++XGEKBNkQhUIgYEDVqlVhbW2t3c7bxsYGTk5OSE9PR1JSEl68eIGxY8cCANLS0tC0adNs7U+ePIm2bdt+pNmrVy+MHz8eWVlZ6NOnj1zhCATZEIVCIDACS0tLqNXqj76f2+ZqhQsXRsmSJRESEgJHR0dERkbCzs4u22fi4uK0hUTDzZs38fbtW6xZswYvXryAj48PWrZsySQOgcAYRKEQCIygSJEiyMzMxIIFC5A/f36D2lhaWmLq1KkYMmQIiAj29vaYP3++9ucJCQkoXrz4R+0qVKiAlStXYvfu3bC2tsbo0aOZxSEQGIPYFFAgEAgEuSJujxUIBAJBrohCIRAIBIJcEYVCIBAIBLkiCoVAIBAIckUUCoFAIBDkiigUAoFAIMgVUSgEAoFAkCv/Hxz3jXDHsVT4AAAAAElFTkSuQmCC\n" + }, + "metadata": {}, + "output_type": "display_data" + } + ], "source": [ "n = 569\n", "on, off = wheel_moves['intervals'][n,]\n", @@ -391,9 +490,18 @@ }, { "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], + "execution_count": 16, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": "
", + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYoAAAESCAYAAADjS5I+AAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjQuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8rg+JYAAAACXBIWXMAAAsTAAALEwEAmpwYAABTF0lEQVR4nO2deVxU1f//XwOCoCiapJniDmapGSqSJliafDPTzA1xqcw1tRQrV5JU/GgpmlSWe4ammEpi5o64i6KYEIKCmagg4o4swrx/f/BjDOGeuXOX4TJzno+Hj2Lm/X6d18sLHOeee8/VERGBw+FwOBwBbMrbAIfD4XC0DZ8oOBwOh8OETxQcDofDYcInCg6Hw+Ew4RMFh8PhcJjwiYLD4XA4TPhEweEAmDt3Lnr37o3evXujZcuW8PX1NXydm5tbovbXX3/F8uXLmXonT55Ez549Bd//6KOPkJqaivz8fPj6+pZ6f/z48Zg9e7bh63/++QeDBw9Gjx490K9fP6SkpAAAiAhLlixBjx490KNHD0yZMgU5OTmmROdwjFKpvA1wOFpg5syZhv9/4403sHDhQrRq1arM2kGDBskaq6CgAFevXkWTJk0QExOD1q1bl3h/xYoVOH36NHr06GF47bPPPsP777+Pd955B9HR0fj0008RGRmJvXv34siRI4iIiICdnR0+/fRTrFu3DqNHj5blkcP5L3yi4HCMEBoairi4ONy8eRPNmzdHw4YNcefOHXz55ZeIiorCTz/9hPz8fNy+fRvvvvsuJk6cKKg1cuRIpKam4uHDh+jduzcyMjJQtWpVrF+/HoMHD8bJkydx+PBh+Pn54f79+wCAjIwMpKam4u233wYA+Pj44KuvvsLff/+N7t274/XXX4ednR0ePnyI27dvo0aNGmb4W+FYE/zUE4cjgmvXrmHbtm1YuHCh4TUiwurVqzF//nxs3boVmzZtwvLly3H79m1BnRUrVuCjjz7C8OHD8fvvv6Ndu3YIDQ3F4MGDkZGRgeDgYCxcuBC2traGnhs3bqB27dqwsXny41qnTh2kp6cDAOzs7BAWFoYuXbrgzp07ePPNN1X4G+BYM3yi4HBE0KZNG1SqVPIDuE6nw48//oiEhAR89913mD9/PojI6BrBhQsX0KJFCwDAxYsX0axZMzx+/BiTJ0/GtGnTULt27RL1er0eOp2uxGtEVGIyGTJkCE6dOoVu3brhk08+kROVwykFP/XE4YigSpUqpV579OgR+vTpg27duqFdu3bo27cv9u3bB9b2aSNHjkRMTAzOnDmDr7/+GhkZGejfvz/+7//+D1evXsX8+fMBALdu3UJhYSHy8vIwYcIEZGZmgogME8bNmzfx3HPP4cKFC9Dr9XjxxReh0+nQv39/rFu3Tp2/BI7VwicKDkciV65cwcOHDzFx4kTY29sjIiIC+fn50Ov1gj1LlixB//79sWPHDuzbtw8nTpwwLKSPHTvWUBcaGmpYBwGABg0aYOfOnXj77bdx+PBh2NjYwN3dHdu3b8eaNWuwceNGODo6IiIiAl5eXuoG51gdfKLgcCTSvHlzdOnSBW+99Rbs7e3h7u6OZs2a4cqVK7C3ty+zJy4uDh4eHgCA06dPo3379qLGCgkJQWBgIJYtWwZ7e3t8++23sLGxwbvvvot///0Xffv2ha2tLdzc3BAcHKxYRg4HAHR8m3EOh8PhsOCL2RwOh8NhwicKDofD4TDhEwWHw+FwmPCJgsPhcDhMLO6qp7i4OFSuXJlZk5eXZ7TGErCWnID1ZLWWnID1ZNVKzry8PLRp06bM9yxuoqhcubLhrlchEhMTjdZYAtaSEzB/1qtXrwIAXF1dFakT28vKKTQWy4NUf2L65GQHgAMHDsDNzU22b7k+1NZk5TQniYmJgu+pNlGcO3cOCxcuxC+//IKEhATMmjUL9vb2aNGiBWbMmGHYt0av12PUqFHo2rVrqV05L126hMDAQBARXnjhBQQGBpbYtoDDKS+GDh0KADh48KAidUr0CtWzdKT6E9MnJzsATJ06FVWqVJHtW64PtTVZObWCKhPFihUrsH37djg6OgIAAgMDMXPmTHh4eGDx4sWIjIxE7969ARTdqXrv3r0ydUJCQhAQEID27dtj6tSpOHDgAN/wjKMJ/rstuRJ1SvQK1bN0pPoT0ycnOwCMHj0aDRs2lK0t14famqycWkGViaJBgwYIDQ3FF198AaBom+Tiu1E9PDywf/9+9O7dG7t27YJOp4O3t3eZOqGhobC1tUV+fj4yMzNRq1YtNexyOCbTrVs3ReuU6BWqZ+lI9SemT052AOjYsaPgaTZTtOX6UFuTlVMrqDJR+Pr6Ii0tzfC1q6srYmJi4OnpiaioKOTk5CA5ORk7duzA0qVL8f3335epY2tri2vXruHDDz+Ek5MTGjdubHTsvLw85rk2AMjNzTVaYwlYS07A/FnLa42CldPS1iguXbqEq1evWvwaBSunVlBtC4+0tDQEBAQgPDwcqampCA4Ohq2tLVq1aoUHDx6gUqVKOHXqFBwcHHDt2jXY2dlhxowZgp8uNm/ejNOnT2PBggXMccUsalrLIq+15ATMn7VLly4AjJ+jFlsntpeVU2gslgep/sT0yckOAJ6enoLn7k3RlutDbU1WTnPC+t4yy1VP0dHRmDdvHurUqYM5c+bA29sbPj4+hvdDQ0Ph4uJSapIYM2YMpk6dikaNGqFq1aolHtzC4ZQnX331laJ1SvQK1bN0pPoT0ycnO1D03HChc/emaMv1obYmK6dWMMtE0bBhQ4waNQqOjo7o0KFDiUniaS5duoSwsDAEBQVh1KhRmDp1Kuzs7ODo6Ii5c+eawy6HYxTW97CUOiV6hepZOlL9iemTkx0A2rdvL/gvXFO05fpQW5OVUytY3O6x/NTTE6wlJ2D+rElJSQCKthpXok5sLyun0FgsD1L9iemTkx0Adu7ciaZNm8ryHRsbiy1btqB58+bo06cPqlevLsmL1PHFsGPHDri5uSmiJQfmzxBZGH///bciNZaAteQkMn9WHx8f8vHxUaxObC8rp9BYLA9S/Ynpk5OdiKh9+/ayfIeFhZGNjQ0BIACk0+no//7v/ygtLU2yJ1PGN4Zer6fg4GDS6XRkZ2dHb7/9NoWEhNDjx49l+5MC63vL4u7M5nDMwbx58xStU6JXqJ6lI9WfmD452QFg4sSJaNSokSTtLVu2YOjQofDx8cHHH3+MtLQ03L59G4sXL4a3tzeio6NRv359yd7kZiMijB49GitWrMDLL7+M5557DpcvX0ZAQAB2796NX3/9FTVr1pQ1hqKYb74yD/wTxROsJSeR9WS1lpxE0rOeP3+eqlatSq+++iplZ2eXeO/kyZNUrVo1cnd3p5s3byphUxLLly8nADR16lRKSEgo8bqdnR25u7vTtWvXzOqJ9ffNLyPicCQQHx+P+Ph4xeqU6BWqZ+lI9SemT052ALh48aLJvrOzs/Hee++hWrVq+O2331ClSpUStZ6envjzzz9x5coVjB49GiRxiVZOtjNnzmDChAno3r07goODcenSJYPWyJEjsX//fly/fh2+vr64efOmpDEUx3zzlXngnyieYC05ifgaBWssa1qjCAgIIAAUFRXFrP36668JAIWFhUnyJjXbnTt3qEmTJlSvXj3DJ5qycu7fv58cHR2pYcOG9Ndff4nSzsrKopycHJM9FcP63uIThQVjLTmJzJ81JiaGYmJiFKsT28vKKTQWy4NUf2L65GQnItq0aZNJvs+cOUM2NjY0evRoo7UFBQX06quvUs2aNSWd4pGabciQIVSpUiU6evSo4TWhnKdOnaK6deuSk5MTRUZGlqmXkpJCISEh5O3tTTY2NuTv72+yp2L4RCGhxhKwlpxE1pPVWnISmZa1oKCA2rVrR3Xq1KHbt2+L6klKSiIHBwfq2bMn6fV6qTZFc+TIEQJAM2bMKPE6K2daWhp5eHiQTqejoUOH0ieffEK9e/eml19+mZydnQ1XdLVu3Zq+/PJL+vfffyX741c9cTgKExcXBwCCD3oxtU6JXqF6lo5Uf2L65GQHiq7rF3qYztPaoaGhOH36dJlXCwn5cHd3x//+9z9MmjQJP//8Mz744APR3kzNptfr8cknn6BevXqYNm1aifdYOevVq4dDhw5h+vTpWLVqFXQ6HRo1aoRGjRrhtddeQ/PmzdGzZ09R++DJQvL0o1H4J4onWEtOIr5GwRqroq1R3L59myZPnky1a9em5s2bU2xsLOXm5gpqb926lWxtbalHjx5lfjJg+SgsLKTOnTuTs7MzXb9+XbRHU7OtXLmSAND69etLvcdai/kvBQUFqn7y4aeeJNRYAtaSk8j8Wc+ePUtnz55VrE5sLyun0FgsD1L9iemTol1QUEAdO3YkAGRvb284tVK1alV69913adWqVZSenk779++nZcuW0bRp08jOzo68vLzo/v37knwkJSVR5cqVqVevXqJ/EZuS7e7du1S7dm3q2LFjmfpbtmyR/D2iJHyikFBjCVhLTiLryWrpOX/66ScCQOvWraP4+HhKTk6mTZs20dixY6l+/fqGieO/f/r370937tyRNe4333xjGFdppk6dSgDo1KlTZb6vlWPK8sH3erJgrCUnYP6sp06dAlC0oZsSdWJ7WTmFxmJ5kOpPTJ+p2llZWXB3d0erVq0QFRWFzZs3o3HjxoZ+IkJcXBz+/PNPZGRkoHnz5hg4cKDRB5qJ8VFYWAgfHx8kJCQgISEBzz//vGxNoOi5Fe7u7ujbty/CwsLKrAkPDy+Rs7zgez1JqLEErCUnEV+jYI1VUdYoxo8fT7a2tob7BuTu9WRqbXJysuiroMRqDhkyhCpXrkz//POPYI3YNQq14aeeJNRYAtaSk8j8Wc+fP0/nz59XrE5sLyun0FgsD1L9iekzRTszM5McHBxoxIgRhtd+//13RXybUhsSEkIA6Oeff5atefr0aQJAU6ZMYdaxcpoTPlFIqLEErCUnkfVktdScwcHBBKDEvkflkbWgoIA6depENWrUkLXXkl6vpy5dupCLiwvdvXuXWauVY8r3euJwFObYsWM4duyYYnVK9ArVs3Sk+hPTJ1Y7Pz8f33//Pbp3744XX3zR8PrZs2cV8W1Kra2tLdasWYPc3FxMmjRJsuaWLVtw8OBBfPXVV3B2dmaOycqpGcw4YZkF/oniCdaSk4ivUbDG0voaxfr16wkA/fHHHyVeN/caxX8JCgoiAHT48GGTNbOzs6lBgwb08ssvU0FBgdGx+BpFOcAniidYS04i82e9cOECXbhwQbE6sb2snEJjsTxI9SemT6y2l5cXubu7U2FhYYnX//jjD0V8S8mYnZ1N9erVo/bt25fyZUyz+HLY6OhoUWOxcpoTPlFIqLEErCUnkfVktbScKSkpBIAWLFhQ6r3yzrpu3ToCQKtXrxbdc+rUKbK1taXhw4eL7invnMXwNQoOR2Gio6MRHR2tWJ0SvUL1LB2p/sT0ianZvHkzAGDAgAGl3jt16pQivqVmHDx4MF599VVMnToV9+7dM6qZk5ODoUOHom7duli0aJHocVg5NYMZJyyzwD9RPMFachLxNQrWWFpeo3jllVfI09OzzPfKc42imNOnT5NOp6NJkyYZ1Zw0aRIBoD179pg0hlWvUcTFxdGQIUOIiCg+Pp769u1LgwYNotmzZ5c451dYWEgfffQRbdiwoZTG33//TYMGDaIhQ4bQ8OHDKTMz0+i4fKJ4grXkJDJ/1pSUFEpJSVGsTmwvK6fQWCwPUv2J6TNWk5ycTABo0aJFZb6/e/duRXzLOQZERCNHjiRbW1s6ffq0oOb27dsJAH388ccm67NymhOzTxTLly+nnj17Uv/+/YmIqE+fPhQbG0tERTe0REREGGoXLVpE/fr1K3OiGDx4sMH8r7/+SvPmzTM6Np8onmAtOYmsJ6sl5Zw7dy4BEHyGglayZmZmUoMGDej5558v896KpKQkcnZ2Jg8PD3r06JHJ+lrJafY1igYNGiA0NNTwdUZGBjw8PAAAHh4eiI2NBQDs2rULOp0O3t7eZeqEhIQY9h4pLCxE5cqV1bDL4ZjMvn37sG/fPsXqlOgVqmfpSPUnps9YzaZNm9CxY0e4urqW+f6xY8cU8S3nGACAi4sLIiMjcf/+ffTq1QuPHj0yaMbHx6Nr166ws7PDli1b4OjoaLI+K6dWUOXBRb6+vkhLSzN87erqipiYGHh6eiIqKgo5OTlITk7Gjh07sHTpUnz//fdl6tSuXRtA0cPIw8LCsH79eqNj5+XlITExkVmTm5trtMYSsJacgPmzTp8+HUDRg2WUqBPby8opNBbLg1R/YvpYNSkpKTh//jymTZsmmGfZsmWwsbGR7VvOMSjGzs4OCxYswPjx4+Hl5YVbt27h4cOHyM3NRfXq1bFixQrk5ORI+h5k5dQKqu0em5aWhoCAAISHhyM1NRXBwcGwtbVFq1at8ODBA1SqVAmnTp2Cg4MDrl27Bjs7O8yYMaPUp4udO3di2bJl+OGHHwT/5fFf+O6xT7CWnID5s169ehUAjH5Piq0T28vKKTQWy4MUf0lJSdi/fz+6d++OZs2ameS/mK+++gpfffUV0tLSBHdqPXDgANzc3GT7lnMMnua3337DmDFjcPv2bbRs2RJvvvkmpkyZYvhHrRRYOc1Juewee/XqVcMaxerVqyk9PZ2IiGbPnk0HDx4sUbt06dIy1ygiIiJo0KBBJu01z9conmAtOYmsJ2t554yJiaEqVaoQAHJ3d2fuisqiRYsW5O3tzawp76xC5OTk0MOHDxXT00rOcr+PomHDhhg1ahT8/Pzg5OQEHx8fwdpLly4hKCgIhYWFCA4ORnZ2NiZMmIChQ4di6dKl5rDL4Rhl165d2LVrl2J1SvQK1bN0TBmjsLAQH3zwAZ555hlMnDgRaWlpGDZsmMl+UlNTkZiYiPfee4853uHDhxXxLecYlIWDgwPTm6koqaUaZpywzAL/RPEEa8lJZHn3Uej1emrTpg21b9++xOvleR/Fb7/9RgAoPDycfHx8qFmzZgSAjh49apL28uXLCYDRY6aF+yjMoWnV91GUF3yieIK15CQyf9YbN27QjRs3FKt7muJttwGQt7c33b59m4jYOYXGYnkwxd9rr71GTZs2pYKCArpx4walpKRQjRo1aNiwYSZp9+/fn+rVq2f04UDR0dGK+JZ6DMylycppTljfW6pc9cThWDrPPfeconX/5dy5c5gxYwZ69uyJzp07Y+bMmRg7diw2btwoaSyWB7H+UlNTceTIEcyfPx+2traGvh49euDPP/+EXq+HjU3JM9llaev1ehw4cAA9e/aETqdjjvnss89KyiSntjw0WTm1At/ricORQGRkJCIjIxWr+y+LFy9G1apV4e/vjxYtWmD69OnYtGkTEhISJI3F8iDWX/GeTH5+fiX63n77bWRmZhqeIW1M++zZs8jKykK3bt2MjhkVFSXbt6m1YlFSk5VTM5jxk41Z4KeenmAtOYksZ43ixo0bZG9vT+PGjTP0ZmZmkr29PU2YMKHc1ig6dOhA7dq1K9WXlZVFNjY2FBgYKEp7/vz5BEDUqRa+RmFe+BqFhBpLwFpyEpk/a2Zmpqi9x8TWFVP8wJykpKQSvYMHD6bq1auX2G9I7FgsD2L8paenEwCaM2dOmX2dOnUiDw8PUdpdu3alVq1aMccr5ujRo7J8S6kVi5KarJzmpNwvj+VwLA0XFxe4uLgoVlfMxo0b8cYbb8Dd3b1E78iRI3H//n0cOHDA5LFYHsT4+/PPPwEAb7/9dpl9b7/9Ns6cOYMbN24wtXNycnDkyBFRp50AoGbNmrJ8S6kVi5KarJxagU8UHI4Etm7diq1btypWBwDJycm4cOEC3n333VK9nTt3hqurK/NcttBYLA9i/P3xxx94/vnn0aZNmzL7evToAaBoFwWW9pEjR5CXl4c333yTOV4xe/fuleVbSq1YlNRk5dQMZvxkYxb4qacnWEtOIstYo1i4cCEBoMuXL5fZO2XKFLK1taVbt26ZNJacNYr8/HyqXr06jRgxQrBPr9dT/fr16b333mNqf/7552RnZyf6rma+RmFe+BqFhBpLwFpyEpk/6927d+nu3buK1REReXt7U+vWrQV7T58+TQBo1apVJo3F8mDM34EDBwgAbdu2jdn30UcfkbOzMz1+/Fiw5pVXXjHpF+LJkycl+5ZaKxYlNVk5zQlfo+BwFMbZ2RnOzs6K1WVlZeHIkSPo1auXYK+Hhwfq1auHLVu2mDQWy4Mxf3/88Qfs7e1LrSs83ffmm2/i3r17OH36dJk1mZmZOHv2rOj1CQCoVq2aZN9Sa8WipCYrp1bgEwWHYyIJCQlYtGgRNm3aZLR206ZNouoOHToEvV4PX19fwV6dTgdfX1/s3bsXWVlZosdieTDm748//oCPjw+cnJyYfV27doVOp8PevXvLrCl+3oLY9QmgaBFdqm+ptWJRUpOVUzOY8ZONWeCnnp5gLTmJzJc1JSWFqlatSgCobt26RrehEHsue/LkyWRvb085OTnM3q1btxIA+uGHH0SPJXWN4tKlSwSAlixZIqqvbdu21KlTpzJr3nvvPXruueeooKCgzLHKgq9RmBe+RiGhxhKwlpxE5ss6fvx4AkA9evQo89z902RnZ1N2drZR3VdffZU6duxotDchIYFatmxZqpY1FssD672QkBACUObznMvqCwwMJBsbG8rIyChRc//+fXJwcKDx48eXOY4QsbGxknzLqRWLkpqsnOaEr1FwOAqQl5eHn3/+GUOHDsX27dvh6uqKlStXMnuqVKmCKlWqMGtyc3MRGxuLTp06Ge3V6XQYMmQIjh07htTUVFFjsTyw3tu+fTtatmyJJk2aiOrr27cv9Ho9IiIiStRERkYiNzcXAwcOLHMcIRwdHSX5llMrFiU1WTm1Ap8oOByRHDp0CA8ePMDAgQPx66+/olWrVti9e3eZ6wXFhIWFISwsjKl7+vRp5Ofnl5oohHr9/f2h0+lKvSdUz/Ig9F5WVhYOHz6Md955R3Rf69at0bRpU8Nie3HNqlWr4Orqio4dO5apJcT27dtN9i23VixKarJyagYzfrIxC/zU0xOsJSeRebJOmjSJKleuTNnZ2eTj40Nt27YlAPTTTz8J9og5l128/9HNmzeN9hbnfP3118nNza3EGomSaxTFz4yIjY01qW/KlClUqVIlSktLK/F3NH/+/DJ1WPA1CvPC1ygk1FgC1pKTyDxZPT09DT/Q+fn5lJeXR02aNKF3331XsCc/P5/y8/OZuu+88w65u7uL6i3OuWrVKgJAJ0+eNDoWy4PQe127dqVmzZoJLtYL9aWmppKtrS1NnjyZcnNzqVu3buTs7Gx4noYpnDt3zmTfcmvFoqQmK6c54WsUHI5MCgoK8Ndff6Ft27YAADs7O9jb26NTp044ceIEiKjMPjs7O9jZ2QnqEhGOHTtW6rSTsd6+ffvCwcEBy5cvN1rP0inrvZs3byIqKgoDBw4UfGaEkGbjxo3h7++PJUuWoEOHDti3bx/mzZuHmjVrlqnDwlTfUnSkoqSmGv6Uhk8UHI4ILly4gNzcXLzyyisAgLVr12Lt2rXw8vJCeno6rl69WmZfcZ0QSUlJyMrKKnOiYPU6Ozvjo48+wtq1a5GUlCRYHxUVhbZt26Jr1664fv26qDE2btwIvV7PXHxmefv222/RunVrnD9/Hv369cPHH38sqMNi27ZtgmMY+3uVWisWJTVZOTWD2T7XmAl+6ukJ1pKTSP2s69atIwAUHx9PRE/OUcfGxhIA2rRpU5l9xs5lr1y5kgBQYmKiqN7/5szIyCAnJyfDHktP169cuZJsbGzIzs6OdDodubi40L///sscQ6/XU8uWLUs8e0JKrry8POrYsaOsc+98jcK88DUKCTWWgLXkJFI/66RJk8jR0bHEXkZEReeqHR0daeLEiZJ0P/zwQ6pVq5bRG/eKeTrnV199RQDo2LFjhtf0ej3NnTuXAJCvry89ePCAzp8/Tw4ODuTv78/UP3r0qNEFenNhLd+/WslZLmsU586dw9ChQwEUbXnQr18/+Pv7Y86cOdDr9YY6vV6PESNG4NdffxXUmjdvHvN9Dkdtzpw5g9atW6NSpZKPmbezs0O7du1w4sQJSbpHjx5Fx44djT4/WoiAgADUqVMHw4cPR3BwMIYMGYKXXnoJM2fOxJAhQ7B9+3Y4OTmhZcuWmDx5MjZs2ICTJ08K6n3zzTeoWbMm/P39JfnhWCaqTBQrVqzAzJkzkZeXBwAIDAzE9OnTsWHDBjg5OZXYU3/JkiW4d+9emTq3b9/GiBEjmA9r4XDUhogQFxdnWJ8Air7HV6xYAQDo0KEDzpw5Y/h+/y//rXuazMxMJCcnC95fwOotxsnJCRs2bMC///6LmTNnYufOnWjUqBGWLVuGdevWwd7e3qAzdepU1KpVC3Pnzi1zjAsXLuD333/HuHHjSu3tJMWbmBoWmzdvFuw3RVuuD7U1WTm1gioTRYMGDRAaGmr4OiMjAx4eHgCKdsCMjY0FAOzatQs6nQ7e3t5l6mRnZ2PChAno3bu3GjY5HFFcvnwZ9+7dM3wPAyU3hfPy8kJ+fj7i4uJK9bI2jzt27BgAlLmQbaz3v7zxxhu4ceMGfHx84OHhgZ07d2LMmDGGTynFOk5OTvj000+xY8cOnDt3rtQY33zzDSpXrowJEyYYHVOMN7kb5/FNAbWDjkjguj6ZpKWlISAgAOHh4fDz80NAQAA8PT0RFBSE7OxsjBw5EkuXLsXSpUvx/fffw8XFBYMGDSpTKzQ0lPn+f4mLi0PlypWZNbm5uXBwcJCUqyJhLTkBdbPu3bsXn376KTZt2oRWrVqVej8jIwOvv/46pk2bZjjdKoaFCxdi3bp1iImJEe1dbs579+6hW7du8PHxwcKFCw2vX7t2DW+99Rb69++PwMBAyfpKYi3fv1rK2aJFizJfr1Tmqwozb948BAcHY+XKlWjVqhXs7e0RERGBjIwMvP/++7h27Rrs7OxQr149wU8XYqlcubJg2GISExON1lgC1pITUDfrtm3bABQ98rOsUzItWrRA/fr1cfnyZZM8XLhwAe3atStxSssYSuQcP348vvnmGyxZsgRubm4gInzxxReoXLkyFixYAFdXV1n6SmEt379ayZmYmCj4nlkmiujoaMybNw916tTBnDlz4O3tDR8fH8P7xZ8Y5E4SHI4aJCUloX79+iUmiR9++AEADPcIeHp64tSpU6V6n64rJjc3F6dPn2ae5hHqNbX+6dcnTZqEb7/9FgsWLICHhwdOnjyJHTt2YOHChaInCTHeTPX/NL/++iuee+65MvtN0ZbrQ21NVk6tYJYb7ho2bIhRo0bBz88PTk5OJSaJp7l06RKCgoLMYYvDEcWFCxfQvHnzEq9FRkaWuCijffv2SElJwZ07d5h1xcTGxpa5EaCYXlPrn369Tp06GD16NFatWoXPPvsM69atQ5cuXfDJJ5/IHkuO/6eJiooS7DdFW64PtTVZOTWDmS7RNRv8PoonWEtOIvWy6vV6cnZ2po8//phZt3fvXgJAe/bsEaW7YMECAkDp6ekm+VEqZ05ODvn5+dFrr71GCxcupIcPHyqiqyTW8v2rlZwsH2Y59cThVFRu3ryJe/fulfpE8TTFe0CdPn1a1OM+jx49imbNmqFOnTqK+DQVBwcHfm8SRzR8rycOh0HxPkpPTxTffvstvv32W8PXNWvWRLNmzUqtUzxdB7A3AjTWK6WepWPqGKb0SdUu5pdfflHEt1wfamuycmoFPlFwOAyEJor9+/dj//79JV5r164dTp8+bbQuOTkZt27dMjpRlNUrpZ6lY+oYpvRJ1S7mxIkTiviW60NtTVZOzWC+M2Dmga9RPMFachKpl3Xy5Mnk4OBAhYWFRmsXLVokat2h+FkSCQkJJvvhx9Ty0EpO/jwKDkciSUlJcHd3h42N8R+Vdu3aAUCpTxVPc/ToUdSsWRMvvPCCIh45HLXhEwWHwyApKanMheyFCxeWuLMZAF555RXodLoSE0VZdcUbARqbfMrqlVLP0jF1DFP6pGoXs3r1akV8y/WhtiYrp1bgVz1xOAI8fvwYqampZT7A5/jx46Veq1atGlq0aFFioni67tatW0hKSsL7779vdPyyxpBSz9IxdQxT+qRqF3Pu3DlUq1ZNtrZcH2prsnJqBjOeAjMLfI3iCdaSk0idrBcuXCAAtG7dOtE9w4YNozp16gg+XyI8PJwA0NGjRyV54sfU8tBKTr5GweFIIDk5GQDg7u4uuqd9+/bIyMjAtWvXynx/7969qF69Ojw9PRXxyOGYAz5RcDgCsCaK+fPnY/78+aVeL17QLr6f4r91RIQ9e/bgjTfeKPUApLIQGsPUepaOqWOY0idVu5gVK1Yo4luuD7U1WTm1Al+j4HAESE5OxrPPPouaNWuWeq+sZ08AQJs2beDg4IBDhw6hT58+JeouXryIK1eu4IsvvhA1vtAYptazdEwdw5Q+qdrFXLhwAdevX5etLdeH2pqsnFpBtedRlBdituzVyra+amMtOQF1snbp0gUFBQU4cuSISX1vvvkm0tPTcf78+RKvL1iwAFOnTsU///yDhg0bSvLEj6nloZWcLB/81BOHI0BycrJJ6xPFdOvWDfHx8UhPTy/x+ubNm+Hp6Sl5kuBwygs+UXA4ZfDgwQPcuHFDcKKYM2cO5syZU+Z7b731FoCiBx4V16WmpiI2Nhb9+vUT7YE1hin1LB1TxzClT6p2McuWLVPEt1wfamuycmoFvkbB4ZTBxYsXAQhf8VS8B1RZtGrVCi1btkRYWBgaN24MAPjpp59gY2NT5j0ZQrDGMKWepWPqGKb0SdUu5vLly7h9+7Zsbbk+1NZk5dQKgmsUr732GoCim45ycnJQt25dpKeno1atWjhw4IBZTZoCX6N4grXkBJTP+uuvv8Lf3x/x8fF46aWXTO4vXo9ISkrCM888Azc3N3Tr1g2bN2+W5YsfU8tDKzklrVEcOXIER44cQefOnbF7927s3r0be/bsQevWrVUzyuFoheTkZOh0OjRt2lRS/7Bhw1ClShUMGzYMgwcPxqNHjxAYGKiwSw7HPBhdo0hLS0PdunUBFD1C8caNG6qb4nDKm+TkZDRs2BAODg5lvv/ll1/iyy+/FOyvW7cu5s2bh5MnT2LPnj1YsmSJyf/IMjaG2HqWjqljmNInVbuY0NBQRXzL9aG2JiunVjC6RtG0aVN8/vnnaN26NeLi4gxP8uJwLBljVzxdvXrVqMYnn3yC7du3w9nZGWPHjjXZg5gxxNSzdEwdw5Q+qdrF3LhxAzk5ObK15fpQW5OVUysYvY9Cr9fj0KFDuHjxIpo0aYKuXbuay5sk+BrFE6wlJ6BsViKCs7MzPvjgAyxdulQRTaXgx9Ty0EpOWfdR3L9/Hw8fPsSzzz6L+/fv46efflLcIIejJTIyMvDgwQNJ91BwOJaI0Ynik08+QUxMDDZu3IiIiAicPXtWlPC5c+cwdOhQAEBCQgL69esHf39/zJkzB3q93lCn1+sxYsSIMh/0fuXKFQwaNAj+/v6YNWtWiT4ORy3EbAY4bdo0TJs2zaiW2DoleoXqWTpS/Ynpk5MdAEJCQhTxLdeH2pqsnFpB1A13s2fPRuPGjbFmzRrcu3fPaP2KFSswc+ZM5OXlAQACAwMxffp0bNiwAU5OToiMjDTULlmyRFDzf//7HyZOnIgNGzaAiLT/XFmORSBmosjKykJWVpZRLbF1SvQK1bN0pPoT0ycnOwDcu3dPEd9yfaitycqpFYyuUQwbNgwrVqzAlClTsHjxYrz77rv4/fffmaK7d+9G8+bN8cUXXyA8PBydOnXC0aNHAQDR0dHYv38/Zs+ejV27diExMRGVKlWCi4sLBg0aVEKnc+fOOHToEHQ6Hfbt24ejR49i1qxZzLHj4uJQuXJlZk1ubq7g1SyWhLXkBJTN+s0332D9+vWIjY2Fra2tIppKwY+p5aGlnEJrFEaveho8eDB+/vlndOrUCT4+PqKuevL19UVaWprha1dXV8TExMDT0xNRUVHIyclBcnIyduzYgaVLl+L7778vU4eIoNPpAABVq1bFgwcPjI5duXJlvpj9/7GWnICyWbOysuDu7o6WLVsqoqck/JhaHlrJmZiYKPie0YkiLy8Po0aNAlC0h42Tk5PJBubNm4fg4GCsXLkSrVq1gr29PSIiIpCRkYH3338f165dg52dHerVqwdvb29D33+fKZydnY3q1aubPDaHYyqJiYlG73n47LPPAMDos47F1inRK1TP0pHqT0yfnOwA8PXXX6NWrVqyfcv1obYmK6dWMDpRhIeHo1evXgAgaZIAik43zZs3D3Xq1MGcOXPg7e0NHx8fw/uhoaFwcXEpMUkAwIsvvoiTJ0+iQ4cOOHToELy8vCSNz+GIJTc3FykpKaVOgz6N2Ove5Vwfb2qvUD1LR6o/MX1y7w3Iy8uTlElpH2prsnJqBaNrFAMGDEB+fj4aN25s+Bf+okWLjAqnpaUhICAA4eHhOHDgAL799ls4OjqiQ4cOmDRpUona4oli0KBBuHTpEsLCwhAUFITLly8jMDAQjx8/RpMmTTB37lyj54z5fRRPsJacgHJZ4+Li8Morr2DTpk0YMGCAAs6UhR9Ty0MrOVk+jH6iKP6IZSr169dHeHg4AOCNN97AG2+8IVg7YcIEw/83a9YMQUFBAIDGjRsjLCxM0vgcjhQSEhIAQNJGgByOpWJ0ouAPgedYE/Hx8bCzs4ObmxuzbuLEiQCKLu9Wok6JXqF6lo5Uf2L65GQHii6Pf+aZZ2T7lutDbU1WTq3AH1zE4fyHhIQEuLu7w97evrytcDiaQXCNIiYmBm3bttXcdeTG4GsUT7CWnIByWZs2bYp27dph06ZNCrhSHn5MLQ+t5JS0RpGQkIB169ahatWq6NSpE7y9vVGjRg21PHI45c6jR49w+fJlvP/+++VthcPRFIITxYcffogPP/wQDx8+xKFDh7BgwQI8ePAArVu3NtxXweFYEomJiSAiUQvZ48aNAwDBm0VNrVOiV6iepSPVn5g+OdmBoudS16xZU7ZvuT7U1mTl1ApGF7OdnJzQo0cP9OjRA0SEuLg4M9jicMxPfHw8AHFXPDk6OorSFFunRK9QPUtHqj8xfXKyA0W7LEjJpLQPtTVZObWC0fsoKhp8jeIJ1pITUCbr+PHj8fPPP+Pu3buaXZvjx9Ty0EpOWc+j4HCshePHj8PT01OzkwSHU16ImiiysrJw/fp1wx8Ox9J49OgRzp07h1dffVVU/ahRo0St1YmtU6JXqJ6lI9WfmD452QFg1qxZiviW60NtTVZOrWB0jSIoKAiHDh1C7dq1Dbu5bty40RzeOByzcfr0aRQWForeT6xWrVqK1inRK1TP0pHqT0yfnOwA4OzsLCmT0j7U1mTl1AxkhD59+lBhYaGxMs3w999/K1JjCVhLTiL5WefPn08AKDMzUyFH6sCPqeWhlZwsH0ZPPTVs2NDwpDoOx1I5ceIE3Nzc4OLiUt5WOBzNYfTU040bN/D666+jYcOGAMBPPXEsDiLC8ePH0b17d9E9H374IQBgzZo1itQp0StUz9KR6k9Mn5zsADB9+nTUqFFDtm+5PtTWZOXUCkYnCjFbinM4FZkrV64gIyND9EI2UPTURiXrlOgVqmfpSPUnpk9OdgCoW7eu4Cc8U7Tl+lBbk5VTKxi9jyI9PR3z5s1DSkoKGjVqhGnTpqF+/frm8mcy/D6KJ1hLTkBe1g0bNmDw4ME4e/Ys2rRpo6wxheHH1PLQSk5Z91HMnDkTvXv3xq+//oo+ffpgxowZihvkcMqTP/74Ay4uLmjVqlV5W+FwNInRiSIvLw9du3ZF9erV0a1bNxQUFJjDF4djFh4/foydO3eiZ8+eJt1oN2TIEAwZMkSxOiV6hepZOlL9iemTkx0AvvjiC0V8y/WhtiYrp1YwukZRWFiIpKQkNG/eHElJSdDpdObwxeGYhcjISNy9exd9+/Y1qa958+aK1inRK1TP0pHqT0yfnOxA0RMun332Wdnacn2orcnKqRWMrlH8/fffCAwMxM2bN1GnTh3MmTNHE+fThOBrFE+wlpyA9Ky+vr5ITEzE5cuXK8TWHfyYWh5aySnrmdkvvvgitmzZorgpDqe8uXDhAvbs2YPZs2dXiEmCwykvBNcoPvnkEwDAa6+9VuqPGM6dO4ehQ4cCKHoIUr9+/eDv7485c+ZAr9cDANavX4++ffuiX79+iIqKKqUh1MfhKMHs2bNRtWpVjBkzxuRePz8/+Pn5KVanRK9QPUtHqj8xfXKyA8DkyZMV8S3Xh9qarJxaQfATxdKlSwEAmzdvRt26dQ2vp6SkGBVdsWIFtm/fbthjPTAwEDNnzoSHhwcWL16MyMhIdO7cGRs2bEBERATy8vLw9ttvo0uXLiXWQMrq6927t+SwHE4x8fHx2LhxI6ZMmSLp/LDYy2jlXG5raq9QPUtHqj8xfXIvNX7hhRdQu3Zt2dpqXPKspCYrp2YQ2tsjKSmJDh06RL169aIjR47Q4cOHKTo6mnr16mV0z5Bdu3bR5cuXqX///kRE1LFjR8N7Bw8epMDAQCIievz4MRERXb58mXr27FlKR6iPBd/r6QnWkpPItKx6vZ58fHyoZs2adOvWLRVdKQ8/ppaHVnKyfAh+orh//z527tyJrKws7NixA0DR9h3+/v5GJx9fX1+kpaUZvnZ1dUVMTAw8PT0RFRWFnJwcAEClSpUQFhaG0NBQw2mq/yLUxyIvLw+JiYnMmtzcXKM1loC15ARMyxoZGYno6GjMmjULN2/exM2bN1V2pxz8mFoeFSGn0aueEhISRD0a8mnS0tIQEBCA8PBwpKamIjg4GLa2tmjVqhUePHiA6dOnG2rz8/MxcuRIjB07tsQ2z8b6yoJf9fQEa8kJiM9648YNtGrVCk2bNsWxY8ckL2IXX05r7EIPsXVie1k5hcZieZDqT0yfnOwA0L17d1SrVk22b7k+1NZk5TQnkq56mj17Nr788kvMnj271L0Tpm4KGB0djXnz5hkur/X29kZqaipCQkIQGhoKOzs72Nvbw8bGxmgfhyMVIsLw4cPx6NEj/Pzzz7KudBK7L5Qp+0fJ7RWqZ+lI9SemT052AHj55ZdRp04d2dpyfaitycqpGYTOSRXvy5+WllbqjxiuXr1qWKPYv38/9erViwYOHEghISGGmtDQUOrfvz8NGDCAQkNDiYjo4sWLNGvWLGYfC75G8QRryUkkLuuSJUsIAH333XdmcKQO/JhaHlrJyfJh9NTTxYsX8fDhQ9jY2CAkJARjxoxRZYZWCn7q6QnWkhMwnvXs2bPw8vKCr68vfv/99wq7wwA/ppaHVnLK2hRw1qxZsLe3x7JlyzBp0iR89913ihvkcNQkOzsbgwYNgouLC1avXq3IJNGrVy/06tVLsToleoXqWTpS/Ynpk5MdAMaNG6eIb7k+1NZk5dQKRu/MrlSpEtzc3PD48WO0adMGhYWF5vDF4SjGlClTkJycjP379yu273/Xrl0VrVOiV6iepSPVn5g+OdkBwMvLS/DcvSnacn2orcnKqRWMnnp6//33Ub16dbRr1w7PPvssNm/erOknMfFTT0+wlpyAcNaYmBh4eXlhwoQJ+Pbbb8vBmbLwY2p5aCWnrL2eFi9ejPPnz8PHxwcnT57E4sWLFTfI4ahBQUEBxowZg7p162LOnDnlbYfDqbAYnSjs7e1x4sQJrF+/Ho0aNVJly14ORw2+//57nD17FuHh4ahevbqi2m+99RYA4M8//1SkToleoXqWjlR/YvrkZAeAUaNGwcnJSbZvuT7U1mTl1ApGJ4rp06ejffv26NWrF2JiYjB16lT8+OOP5vDG4UgmIyMDX375JXx9fdGvXz/F9d955x1F65ToFapn6Uj1J6ZPTnYAeP311/Hcc8/J1pbrQ21NVk7NYOza2iFDhpT4etCgQfIu1lUZfh/FE6wlJ1HprKNHj6ZKlSpRUlJSOTlSB2s+ppaKVnKyfIh6FGpmZiYA4NatW3yrb47mSUhIwIoVK/Dxxx/D3d29vO1wOBUeo6eePv30U/j5+cHJyQnZ2dl8UZCjeb744gtUq1YNgYGBqo3RrVs3AMC+ffsUqVOiV6iepSPVn5g+OdkBYPjw4ahataps33J9qK3JyqkVjE4UnTp1wu7du3Hr1i3UqVOnwt7RyrEO9u3bh507d+Lrr79W7J6Jshg4cKCidUr0CtWzdKT6E9MnJztQtGD832fhSNWW60NtTVZOrWD0Poo9e/Zg/vz5cHZ2xsOHDxEUFIROnTqZy5/J8PsonmAtOYGirO7u7mjbti3u3r2LCxcuwMHBobxtKY61HVNryKqVnLLuo/jhhx+wefNm1KpVC7du3cKYMWM0PVFwrJewsDCcO3cOGzZssMhJgsMpL4wuZteoUQO1atUCALi4uMDJyUl1UxyOqeTk5GDmzJlo3769KqcanqZLly7o0qWLYnVK9ArVs3Sk+hPTJyc7ULQrhBK+5fpQW5OVUysY/UTh5OSEjz76CO3bt0dCQgJyc3MREhICAAgICFDdIIcjhnXr1iEtLQ3r168v9VwTNfjggw8UrVOiV6iepSPVn5g+OdkB4N1338Xzzz8vW1uuD7U1WTm1gtE1im3btgm+16dPH8UNyYWvUTzBWnLevHkTTZo0Qbdu3RAREVHedlTFWo4pYD1ZtZJT1hqFFicDDue/fPXVV8jNzcWCBQvMNubjx48BAHZ2dorUKdErVM/SkepPTJ+c7MX9jx8/lu1brg+1NVk5tYLRiYLD0TJJSUn46aefMGDAALPuQ/bmm28CAA4ePKhInRK9QvUsHan+xPTJyQ4AI0aMQJUqVWT7lutDbU1WTq3AJwpOhWbKlCmoUqUKPv74Y7OOO2LECEXrlOgVqmfpSPUnpk9OdgDo27cv6tWrJ1tbrg+1NVk5tYLRNYqKBl+jeIKl5zx8+DC8vb0RHByMPn36WHTWYiz9mP4Xa8mqlZyyHoXK4WgRIsJnn32GevXqYeLEiWYf/9GjR3j06JFidUr0CtWzdKT6E9MnJztQdMmzEr7l+lBbk5VTK/BTT5wKSUREBGJiYrB69WpUqVLF7OP36NEDgPFz1GLrlOgVqmfpSPUnpk9OdgAYM2aM4Ll7U7Tl+lBbk5VTK6g2UZw7dw4LFy7EL7/8goSEBMyaNQv29vZo0aIFZsyYARsbG6xfvx5bt26FTqfDuHHj8Prrr5fQSExMxKxZs2Bra4tGjRohODjYLNfIc7QNEWH27Nlo3rw5hg4dWi4exo4dq2idEr1C9Swdqf7E9MnJDgB+fn6C5+5N0ZbrQ21NVk7NoMa+5suXL6eePXtS//79iYioT58+FBsbS0REISEhFBERQVlZWdSjRw/Kz8+nBw8ekLe3N+n1+hI6H3/8MR08eJCIiAICAmj//v1Gx+bPo3iCpebct28fAaBVq1YZXrPUrE9jLTmJrCerVnLKeh6FFBo0aIDQ0FDD1xkZGfDw8AAAeHh4IDY2Fs888wx+//132NnZ4datW6hevXqpnWlbtGiBu3fvgoiQnZ2NSpX4mTIOEBISgtq1a8Pf37/cPNy7dw/37t1TrE6JXqF6lo5Uf2L65GQHgAcPHijiW64PtTVZObWCKr95fX19kZaWZvja1dUVMTEx8PT0RFRUFHJycooGr1QJYWFhCA0NLfMUQqNGjTB79mwsW7YM1apVQ4cOHYyOnZeXh8TERGZNbm6u0RpLwBJzpqSkYOfOnZgwYQIuX75seN3cWd9//30AwM8//6xIndheVk6hsVgepPoT0ycnOwB8/PHHsLGxke1brg+1NVk5NYNaH2OuXr1qOPWUkpJCw4cPp5EjR9LSpUspODi4RG1eXh4NGzaMjh8/XuJ1Ly8vSk5OJiKisLAwCgoKMjouP/X0BEvMOWLECHJwcKCbN2+WeN3cWbds2UJbtmxRrE5sLyun0FgsD1L9iemTk52I6Ntvv1XEt1wfamuycpoT1veWWSaK1atXU3p6OhERzZ49mw4ePEgpKSk0btw40uv1pNfracSIEXTy5MkSGr6+vnT9+nUiItqzZw8FBAQYHZdPFE+wtJx3796lypUr06hRo0q9Z2lZhbCWnETWk1UrOVk+zHLSv2HDhhg1ahQcHR3RoUMH+Pj4AABeeOEFDBw4EDqdDp07d4anpycuXbqEsLAwBAUFYe7cuZg0aRIqVaoEOzs7/hhWK2fHjh3Iy8vDhx9+WN5WcOvWLQAw+hQ9sXVK9ArVs3Sk+hPTJyc7ANy5cwe3bt2S7VuuD7U1WTm1Ar8z24KxtJx9+/bFiRMncPXq1VKXSZs7a/HzA4xd+y62TmwvK6fQWCwPUv2J6ZOTHQA8PT0F7y8wRVuuD7U1WTnNiazdYzkcLfDo0SP8+eefGD58uCbupZk8ebKidUr0CtWzdKT6E9MnJztQ9MwHV1dX2dpyfaitycqpFfgnCgvGknJu3boVffv2xYEDB0rdmAlYVlYW1pITsJ6sWsnJ93riVHgiIiJQq1YtdO7cubytAADS09ORnp6uWJ0SvUL1LB2p/sT0yckOAJmZmYr4lutDbU1WTq3ATz1xNA8R4cCBA3jzzTc1c9Oln58fAOPnqMXWKdErVM/SkepPTJ+c7ADw2WefCZ67N0Vbrg+1NVk5tYI2fuo4HAYpKSm4du2a4Wo5LTB16lRF65ToFapn6Uj1J6ZPTnag6JkPDRo0kK0t14famqycWoGvUVgwlpJz1apVGDFiBBITE/HCCy+UWWMpWY1hLTkB68mqlZx8jYJToTl48CDq1Klj1kedGuPq1au4evWqYnVK9ArVs3Sk+hPTJyc7ANy4cUMR33J9qK3JyqkV+KknjqYhIkRHR8Pb27vUppHlSfHeZMbOK4utU6JXqJ6lI9WfmD452YGi0ztC5+5N0ZbrQ21NVk6twCcKjqa5fPkyrl69qsp5ZjnMnDlT0ToleoXqWTpS/Ynpk5MdAEaPHo2GDRvK1pbrQ21NVk6twNcoLBhLyLlmzRoMHz4c8fHxeOmllwTrLCGrGKwlJ2A9WbWSk69RcCosBw8ehIuLC1588cXytlKC1NRUpKamKlanRK9QPUtHqj8xfXKyA0XrAEr4lutDbU1WTq3ATz1xNE10dDR8fHw0tT4BAMOHDwdg/By12DoleoXqWTpS/Ynpk5MdKDq9I3Tu3hRtuT7U1mTl1Ap8ouBolhs3buDKlSuYOHFieVspxVdffaVonRK9QvUsHan+xPTJyQ4A48ePFzx3b4q2XB9qa7JyagW+RmHBVPScO3bswDvvvIPDhw/jtddeY9ZW9KxisZacgPVk1UpOvkbBqZCcOXMGOp0Obdq0KW8rpUhKSkJSUpJidUr0CtWzdKT6E9MnJztQdMWbEr7l+lBbk5VTK/BPFBZMRc/Zu3dvJCcni3oWNn8eBX8ehVo+1Nbkz6PgcGQQGxtr+IHUGvPmzVO0ToleoXqWjlR/YvrkZAeAiRMnolGjRrK15fpQW5OVUyvwiYKjSTIyMnDt2jV4eHiUt5Uy6dixo6J1SvQK1bN0pPoT0ycnOwC88sorgv/CNUVbrg+1NVk5tQJfo+BokjNnzgAA2rZtW85OyiY+Ph7x8fGK1SnRK1TP0pHqT0yfnOwAcPHiRUV8y/WhtiYrp1bgnyg4mqR4otDiQjZQdEkjYPwctdg6JXqF6lk6Uv2J6ZOTHQDmzp0reO7eFG25PtTWZOXUCnyi4GiS2NhYuLm5wdnZubytlMk333yjaJ0SvUL1LB2p/sT0yckOFD3Qp3HjxrK15fpQW5OVUyuodtXTuXPnsHDhQvzyyy9ISEjArFmzYG9vjxYtWmDGjBmwsbHB+vXrsXXrVuh0OowbN67Us5AnTZqEW7duAQCuXbuGl19+GYsXL2aOy696ekJFztmoUSN4eXlh48aNouorclZTsJacgPVk1UpOs1/1tGLFCmzfvh2Ojo4AgMDAQMycORMeHh5YvHgxIiMj0blzZ2zYsAERERHIy8vD22+/jS5dupTYqqF4Urh37x6GDRuGadOmqWGXozGysrJw5coVjBs3rrytCBIXFwfA+KkxsXVK9ArVs3Sk+hPTJyc7UPSLKy8vT7ZvuT7U1mTl1AykArt27aLLly9T//79iYioY8eOhvcOHjxIgYGBRET0+PFjIiK6fPky9ezZU1Bvzpw5FB4eLmrsv//+W5EaS6Ci5tyzZw8BoH379onuMXdWHx8f8vHxUaxObC8rp9BYLA9S/Ynpk5OdiKh9+/aK+JbrQ21NVk5zwvreUu3UU1paGgICAhAeHg4/Pz8EBATA09MTQUFByM7ONpzjCwsLQ2hoKIYOHWpYIPovWVlZGDZsGLZv3w5bW1uj48bFxaFy5crMmtzcXDg4OEgLVoGoqDlXrlyJkJAQHD9+XPQahbmzFt8EKOY0p5g6sb2snEJjsTxI9SemT052oOj0dfHpajnacn2orcnKaW6EPJhlokhNTUVwcDBsbW3RqlUrPHjwANOnTzfU5ufnY+TIkRg7diy8vLxK6Kxfvx7379/H2LFjRY3L1yieUFFzDho0CCdOnMDly5dF91TUrKZiLTkB68mqlZzlvtdTdHQ05s2bh+XLl+Pu3bvo1KkTUlNTMX78eBAR7OzsYG9vDxub0naOHz8Ob29vc9jkaIS4uDi8/PLL5W2DyalTp3Dq1CnF6pToFapn6Uj1J6ZPTnYAOH/+vCK+5fpQW5OVUyuY5fLYhg0bYtSoUXB0dESHDh3g4+MDAHjhhRcwcOBA6HQ6dO7cGZ6enrh06RLCwsIQFBQEoGjDLFdXV3PY5GiA7OxsJCUlYeDAgeVthcnnn38OwPh19GLrlOgVqmfpSPUnpk9OdgBYuHCh4P0FpmjL9aG2JiunVuCbAlowFTHnyZMn4eXlhW3btuHdd98V3WfurMV30rZs2VKROrG9rJxCY7E8SPUnpk9OdgDYvn07mjRpItu3XB9qa7JymhO+KSCnwqDGpYxqIPaHWs4Pv6m9QvUsHan+xPTJ/cXn5uYm+IvLFG01fgErqcnKqRX4Xk8cTXH27FnUqFFD80/8OnbsGI4dO6ZYnRK9QvUsHan+xPTJyQ4UfS8o4VuuD7U1WTm1Aj/1ZMFUxJzt2rVDjRo1sG/fPpP6+PMo+PMo1PKhtiZ/HgWHYwJ5eXn466+/MGnSpPK2YpSffvpJ0ToleoXqWTpS/Ynpk5MdAIKCgtC0aVPZ2nJ9qK3JyqkV+ETB0Qzx8fF4/Pgx2rVrV95WjNK8eXNF65ToFapn6Uj1J6ZPTnYAaNy4saRMSvtQW5OVUyvwNQqOZjh9+jQA7T6D4r9ER0cjOjpasToleoXqWTpS/Ynpk5MdKLpXQQnfcn2orcnKqRX4GoUFU9Fyjhw5Elu2bEFWVlaJzSHFwNco+BqFWj7U1uRrFByOCcTGxqJt27YmTxLlwerVqxWtU6JXqJ6lI9WfmD452YGiB/o0a9ZMtrZcH2prsnJqBT5RcDRBbm4uzp8/j8mTJ5e3FVE0adJE0ToleoXqWTpS/Ynpk5MdAFxdXSVlUtqH2pqsnFqBr1FwNEFMTAwKCgrw6quvlrcVUezbt0/UJbxi65ToFapn6Uj1J6ZPTnag6F4FJXzL9aG2JiunVuBrFBZMRcoZHByMmTNn4tatW6hVq5bJ/XyNgq9RqOVDbU2+RsHhiOTw4cN48cUXJU0S5cEvv/yiaJ0SvUL1LB2p/sT0yckOAPPnz4ebm5tsbbk+1NZk5dQKfKLglDuFhYU4duwY/P39y9uKaMTuaCxn52NTe4XqWTpS/Ynpk7vrc926dSVlUtqH2pqsnFqBr1Fwyp0TJ07gwYMHho/zFYFdu3Zh165ditUp0StUz9KR6k9Mn5zsQNGnTCV8y/WhtiYrp1bgaxQWTEXJ+dlnn2Hp0qXIzMwU/ejTp+FrFHyNQi0famtWhDUKPlFYMBUhJxHBzc0Nbm5u+PPPPyXrmDtreno6AOC5555TpE5sLyun0FgsD1L9iemTkx0ADh06BHd3d9m+5fpQW5OV05zwxWyOZomPj0dKSorhiWEVBbE/1HJ++E3tFapn6Uj1J6ZP7i++Z599VlImpX2orcnKqRX4GgWnXNm2bRt0Oh169+5d3lZMIjIyEpGRkYrVKdErVM/SkepPTJ+c7AAQFRWliG+5PtTWZOXUCvzUkwWj9ZxEhNatW6N69eo4evSoLC2+RsHXKNTyobZmRVij4KeeOOXG3r17ER8fj5UrV5a3FZP57bffFK1ToleonqUj1Z+YPjnZAWDJkiVwd3eXrS3Xh9qarJxagU8UnHJBr9cjKCgI9erVw9ChQ8vbjsm4uLgoWqdEr1A9S0eqPzF9crIDQM2aNSVlUtqH2pqsnFpBtTWKc+fOGX4BJCQkoF+/fvD398ecOXOg1+sBAOvXr0ffvn3Rr18/REVFldLIysrC2LFjMXjwYPj5+eHff/9Vyy7HzCxevBjHjx/H3LlzYW9vX952TGbr1q3YunWrYnVK9ArVs3Sk+hPTJyc7UPSJUwnfcn2orcnKqRlIBZYvX049e/ak/v37ExFRnz59KDY2loiIQkJCKCIigrKysqhHjx6Un59PDx48IG9vb9Lr9SV0pkyZQn/88QcRER0/fpyioqKMjv33338rUmMJaDFnYWEhLVmyhCpVqkR9+vQpdcylYu6sPj4+5OPjo1id2F5WTqGxWB6k+hPTJyc7EVH79u0V8S3Xh9qarJzmhPW9pcpi9u7du9G8eXN88cUXCA8PR6dOnQyLldHR0di/fz9mz56NgoICVKpUCf/88w8mTJhQauW/e/fuGDRoEKKjo1GvXj3MmDEDVapUYY4dFxeHypUrM2tyc3Ph4OBQ6vWQkBBcu3YNNjY2sLGxga2tLXQ6neFrpV6ztbWFjY2N4f+lvCbmz+PHj+Ho6Ci6vizfT/+/WPLz83Hv3j3cvXvX8CcjIwORkZE4f/48unTpgq+//hpOTk6iNaUcU7V48OABAKBatWqK1IntZeUUGovlQao/MX1ysgNAZmYmHBwcZPuW60NtTVZOc2PWxWxfX1+kpaUZvnZ1dUVMTAw8PT0RFRWFnJycosErVUJYWBhCQ0PLPE997do1VK9eHWvXrsV3332HFStW4NNPP2WOXblyZclXPd24cQOpqanQ6/UoLCyEXq8v8f9P/1foPUtG7ARV/IP0NO7u7lixYgU++ugjRR9QpPUrvJTCWnIWYy1ZtZAzMTFR8D2zLGbPmzcPwcHBWLlyJVq1alXinPSQIUMwYMAAjBw5EidOnICXl5fhvRo1auCNN94AALzxxhtYvHixqj537NihiA4RSZpghN6TqvXvv/+ibt26JvcVFhYaMkj5Y2trCxcXF9SqVavEHxcXF9SrV69CPMHOGJs2bQIADBw4UJE6JXqF6lk6Uv2J6ZOTHQD+/PNP/PXXX7J9y/WhtiYrp2ZQ63zX1atXDWsUq1evpvT0dCIimj17Nh08eJBSUlJo3LhxpNfrSa/X04gRI+jkyZMlNCZMmEDbtm0jIqK1a9fS/PnzjY7L1yieYC05ifgaBWssvkbB1yjEwPreMssnioYNG2LUqFFwdHREhw4d4OPjAwB44YUXMHDgQOh0OnTu3Bmenp64dOkSwsLCEBQUhClTpmDmzJnYuHEjnJycsGjRInPY5XCMsnPnTkXrlOgVqmfpSPUnpk9OdgD48ccf8cILL8jWlutDbU1WTq3A78y2YKwlJ2A9Wa0lJ2A9WbWSk+WD7/XE4UggLCwMYWFhitUp0StUz9KR6k9Mn5zsALB9+3ZFfMv1obYmK6dW4J8oLBhryQnwvZ5YY/G9nuT5UFuzIuz1xCcKC8ZacgLmz/r48WMAgJ2dnSJ1YntZOYXGYnmQ6k9Mn5zsAPDXX3+hRYsWsn3L9aG2JiunOeGbAnI4CiP2h1rOD7+pvUL1LB2p/sT0yf3FZ2dnJymT0j7U1mTl1Ap8jYLDkcDatWuxdu1axeqU6BWqZ+lI9SemT052oOhZJUr4lutDbU1WTq3ATz1ZMNaSE+BrFKyx+BqFPB9qa/I1inJAzF5PHA6HwylJXl4e2rRpU+Z7FjdRcDgcDkdZ+BoFh8PhcJjwiYLD4XA4TPhEweFwOBwmfKLgcDgcDhM+UXA4HA6HCZ8oOBwOh8Okwk8U586dK/UY1cjIyFJPi7p9+za6d++OvLw8w2tXrlxBz549y9Tds2cPunXrhqFDh2Lo0KGIiYlR3rwJSM354MEDjBkzBkOGDMHAgQNx9uzZUtrh4eF47733MGDAAERFRakXQgRq5pw7dy7ee+89wzEVelyruZCa9dGjRxg7diz8/f3x0Ucf4fbt26W0LeGYislpKce0mJSUFLRt27bU60D5HtMKvdfTihUrsH37djg6OhpeS0xMxG+//Yb/3h5y+PBhLFq0CLdu3TK8FhERgXXr1uHOnTtlaickJODzzz+Hr6+vegFEIifnmjVr4OXlhQ8++ACpqamYPHkytm3bZng/MzMTv/zyC7Zs2YK8vDz4+/ujU6dOJR5Xay7UzAkUHdOVK1fimWeeUT+MEeRkDQ8Px0svvYTx48dj69at+OGHHzBz5kzD+5ZyTI3lBCznmALAw4cPsWDBgjKPU3kf0wr9iaJBgwYIDQ01fH3nzh0sXLgQ06dPL1FnY2ODNWvWoEaNGobXnJ2dmXvAJyQkYMuWLfD398f8+fNRUFCguH+xyMn5wQcfwM/PDwBQWFhY6q71v/76C6+88grs7e1RrVo1NGjQABcuXFAvDAM1c+r1ely5cgVffvkl/Pz88Ntvv6kXRARys44dOxYAcP36dbi4uJTosaRjysppSceUiBAYGIiAgIASE00x5X1MK/QnCl9fX6SlpQEo+uUwY8YMTJ8+vdQviU6dOpXqff3115nanTp1Qrdu3VC/fn3MmjULGzduxJAhQ5QzbwJyclavXh1A0b9IPv/881LftA8fPkS1atUMX1etWhUPHz5UOoIo1Mz56NEjDBkyBB9++CEKCwsxbNgwtGzZstweQSknKwDY2tpi2LBhSE5Oxpo1a0q8ZynHFGDntKRj+t1338HHx0fQe3kf0wr9ieK/JCQk4MqVKwgKCkJAQAAuXbqE4OBgyXp9+/aFq6srdDodunbtir///ltBt9KRkjMpKQkffPABJk2aBE9PzxLvOTk5ITs72/B1dnZ2iW/I8kLpnI6Ojhg2bBgcHR3h5OQELy+vcvtX9tNI/d5dt24d1q9fjwkTJpR43ZKOKSCc05KO6fbt27FlyxYMHToUmZmZGD58eIn3y/2YUgXn6tWr1L9/f6OvERG9/vrrlJubW+K1jh07lqrT6/Xk4+NDN27cICKi//3vfxQWFqaga9ORmvPixYvk6+tLiYmJZerevHmTevbsSbm5uXT//n3y9fUt9XdkTtTKeenSJXrnnXeooKCA8vPzaeDAgZScnKx8ABOQmvXHH3+kbdu2ERHR9evXqXv37iVqLeWYGstpScfU2OvlfUwr9KknpTl+/DhiY2Mxfvx4zJ07F+PHj4eDgwOaNm2KAQMGlLc9SSxatAj5+fmGf804OTlh2bJlWLNmDRo0aICuXbti6NCh8Pf3BxFh0qRJFXL3XTE533nnHQwYMAB2dnbo3bs33Nzcytm1NPr27YspU6Zgy5YtKCwsxLx58wDA4o6pmJyWckyF0Mox5bvHcjgcDoeJxaxRcDgcDkcd+ETB4XA4HCZ8ouBwOBwOEz5RcDgcDocJnyg4HA6Hw4RPFByOCeTl5WHz5s0AgK1bt2L//v2Kae/evRtbtmxRTI/DUQo+UXA4JpCZmWmYKN577z107dpVMe3o6Gj4+PgopsfhKAW/4Y7DMYEff/wRly5dwnfffQcigouLC5o0aYLly5fDzs4O6enp8PPzw4kTJ3DhwgUMGzYM/v7+iImJweLFi2FrawtXV1fMnj0bdnZ2Bl0iwp07d0psfJeXl4dPP/0UDx8+RG5uLj7//HN06NChPGJzrBw+UXA4JjBmzBgkJydj/PjxJXYKTU9PR0REBBISEvDpp59i7969yMjIwPjx4zFo0CAEBgZiw4YNqFWrFpYsWYJt27aVuNv/r7/+QsuWLUuM9e+//+LWrVtYu3YtsrKy8M8//5grJodTAj5RcDgK4ObmBjs7O8MW0Pb29nB2dkZeXh5u376NmzdvYuLEiQCA3NzcUjuIRkVFoXv37qU0Bw8ejICAABQUFJR6IA6HYy74RMHhmICNjQ30en2p13U6nWBPzZo18dxzz+GHH35AtWrVsH//flSpUqVEzYULFwwTSTFJSUnIzs7G8uXLcfPmTfj5+RndHp/DUQM+UXA4JlCrVi08fvwY33zzDRwcHET12NjYYMaMGRg1ahSICFWrVsXXX39teD8jIwO1a9cu1deoUSN8//33iIiIgJ2dHT755BPFcnA4psA3BeRwOBwOE355LIfD4XCY8ImCw+FwOEz4RMHhcDgcJnyi4HA4HA4TPlFwOBwOhwmfKDgcDofDhE8UHA6Hw2Hy/wBhLt2lZiohmQAAAABJRU5ErkJggg==\n" + }, + "metadata": {}, + "output_type": "display_data" + } + ], "source": [ "n = 403 # trial number\n", "start, end = trial_data['intervals'][n,] # trial intervals\n", @@ -427,9 +535,18 @@ }, { "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], + "execution_count": 17, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": "
", + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAlwAAACsCAYAAACuEXKAAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjQuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8rg+JYAAAACXBIWXMAAAsTAAALEwEAmpwYAABRg0lEQVR4nO3dd1xT1/sH8E8SNigqiKJGUQEHfkXBUSc4cVbrKmj1565V68ZBRRQV9yiitq5iFbXuLYK7LsCtyBCogq2A4AKBsO7vj0g0Esi4NwN43q9XXi9J7nnuQzwnebjjHB7DMAwIIYQQQoja8LWdACGEEEJIeUcFFyGEEEKImlHBRQghhBCiZlRwEUIIIYSoGRVchBBCCCFqRgUXIYQQQoiaUcGlIcuWLcOAAQMwYMAANGvWDG5ubpKfc3JypLbdv38/tm3bVmq8sLAw9OvXr8TXx40bh4SEBOTm5sLNzU3qtVevXqFTp0548+aN1PPXr1/HgAEDJD8fP35ckuOAAQPQtWtXODg4IC0tDQDQtm1bqddPnjyp0HtBiC6Mh5ycHCxYsAD9+vVD3759sWDBAsm+nz9/jhEjRqBPnz4YMmQI4uPjJbF+/vln9OjRQ5Kvn5+fqm8DqeB0YRwUFBTAx8cHffr0QZ8+fbBq1Sp8PVvU4cOHMWnSJKnnGIbBvHnzsHPnTmV+5YqNIRrXpUsX5tGjR6xi3L59m+nbt6/M1/Ly8pgePXowDMMwYWFhzJw5cySvHTt2jOnSpQtjb2/PpKenMwzDMNnZ2cz69euZVq1alRgzNzeXGTZsGLN//36GYRgmPj6e6dmzJ6vfgRCG0d54WL9+PePp6ckUFBQw+fn5zMyZM5mNGzcyDMMwgwcPZk6ePMkwDMNcuXKF6du3L1NYWMgwDMN06NCBSU5OZpUvIV/T1jg4cuQIM3LkSCY/P5/Jzc1lBg0axJw9e5ZhGIZ5+/Yt4+3tzbRo0YKZOHGiJFZcXBwzcuRIxtHRkdmxYwernCsSPW0XfATYtGkTHjx4gNTUVDRq1Aj16tXD27dvsWjRIly+fBm///47cnNz8ebNGwwcOBAzZswoMdaECROQkJCAzMxMDBgwACkpKTA1NUVQUBC6d++OCxcuYOfOnejVq5ekzfXr15GdnY2VK1diw4YNMuNu374d1apVg7u7OwDg/v374PP5GD58ODIyMuDm5oaffvoJAoGA0/eGVDyaGg+tW7dG7dq1weeLD/Q3adIEcXFxSElJQUJCAvr27QsAcHFxwZIlS/D06VNUrlwZHz9+hLe3N169eoVmzZph3rx5qFKligbeGVKRaGocGBgYIDs7G7m5uSgsLEReXh4MDQ0BAOfOnYOVlRXmzZuHy5cvS+IFBQVh6NChqFWrlrrfhnKFCi4d8e+//+L06dPQ09PDpk2bAIgP2e7atQsrV66EjY0NUlJS0KVLF4waNarEONu3b8e+ffuQkZGBH3/8EVOnTsXkyZPRtGlTAEBAQECxNt27d0f37t0RFhYmM+abN2/wxx9/4OjRo5LnCgoK0L59e8yePRv5+fmYOHEizMzMMHr0aBbvAiFimhoPX+5v9+7dWLp0KV69egUrKytJIQYANWrUQHJyMvLz89G+fXssXLgQVlZW8PPzg5eXF7Zs2aKeN4JUaJoYBwUFBQgODkbnzp2Rn5+Pjh07omvXrgAADw8PAJD67AeARYsWAQBu3Lihjl+73KKCS0e0aNECenrS/x08Hg+//fYbrly5gtOnTyM+Ph4MwyA7O7vUWNHR0ejevTsA4NmzZ7C1tWWV28GDB9GtWzcIhULJc8OGDZPaZsyYMdizZw8VXIQTmhwPT548wdSpU/HDDz+gS5cuuHfvHng8ntQ2DMNAIBDA0dERmzdvljw/depUdOzYEbm5uTAwMGDzKxNSjCbGQUBAAKpVq4YbN25AJBJh8uTJ2LVrF8aOHaueX6oCo4vmdYSJiUmx57KysvDdd98hMjISTZs2xdy5c6Gnp1fsgsYvTZgwASdOnMDq1avRr18/pKSkYOjQoQgKClI5t7Nnz2LQoEFSzx0/fhzR0dGSnxmGKfbBQIiqNDUezpw5g7Fjx2L27NmSi4Jr1aqF169fS8VNTU1FzZo1cefOHVy8eFHyPMMw4PF4dCqdqIUmxkFoaCgGDx4MAwMDVKpUCd99912JZzsIO/QNqcNevHiBzMxMzJgxAwYGBjh+/LjkPHtJNm7ciKFDh+L06dO4cOECbt++jYULF6qcw/v375GYmIiWLVtKPf/s2TOEhIRg06ZNyMvLQ1BQEPr376/yfgiRh+vxcOnSJSxbtgw7d+7E//73P0mbmjVrom7dujh79iz69u2Lv//+G3w+H/b29vj777+xbNkyODs7o0qVKti5cyfc3Nyo4CIaw/U4ePjwIc6dO4dvvvkGeXl5uHTpEhwdHTX161QoVHDpsEaNGsHV1RW9e/eGgYEB7O3tYWtrixcvXpR4+uLBgwdwcnICANy5cwetW7dmlcOLFy9QvXp16OvrSz0/depU+Pr6on///sjPz0evXr0wdOhQVvsipDRcj4ei29+//IPEyckJPj4+WL9+Pby9vbF161YYGBjg119/BZ/Ph4uLC0aOHAkPDw8UFhaiUaNGWLp0qXp/cUK+wPU4WLBgAZYuXYpevXpBIBCgXbt2GD9+vEZ+l4qGx5R2HJIQQgghhLBG13ARQgghhKgZFVyEEEIIIWpGBRchhBBCiJpRwUUIIYQQomYavUvxwYMHkiUDiohEomLPaYsiuSR9SAIACCsLS91OdmNxWwjlty1r74um6GouIpEILVq0UKidrHHwdTxtUSYHVmOhWDDpsaEL74Wu5KELOSiaBxfjgIs81EVWn1cpHyW+C5ShK32liC7lo8lcShoHGi24DA0N0aRJE6nnoqKiij2nLYrkEnBGvDTO5rabS91OduNPy+pslt+2rL0vmqKruURFRSncTtY4+DqetiiTA6uxUCyY9NjQhfdCV/LQhRwUzYOLccBFHuoiq8+rlI8S3wXK0JW+UkSX8tFkLiWNA5qHS0mb+7IYIBwPLkK0idVYKBaMxgbRfZz1eervFRJdw0UIIYQQomZUcClpRvAMzAieoWLjGeIHIeUAq7FQLNgMGhtE53HW56m/V0h0SpHorLy8PKxfvx7//vsvpkyZgkaNGmk7JULKpNTUVISFheHevXt48eIFGjduDHd3d9StW1fbqcmUk5ODZ8+egWEYFBYWSj0SEhLw9u3bYs/L2rakh6rb3su+B4ZhsOrhKslzKSkpsLCwkNr2zZs3aNiwIaZOnaozF40T7aOCS0kbe21k0ZhF2wrIy8sLa9euBQBs27YNJ0+ehJDju3q0QSQSybyoMicnR6mLjtVBmRx+rPcjAOUulC45mDgWPsXShfdCV/Jgm8OmTZuwbds2FBQUgMfjoVq1avjjjz+wfv16HDt2DFWqVNFIHsqYOHEi9uzZo5F9qeI6rst8ns/ng8/nQyAQQCQSISQkBGfPni2+uDl9F1RIVHARnRQVFYWNGzdi3LhxWLZsGXr06IFRo0bh6NGj2k6NtfJylyLlofs5HD9+HFu3bsX333+Pn3/+GY6OjjAzM0NYWBg6deqEjRs34q+//uIsD64KspUrV+Lbb7+VFDBfPl6+fAkbGxuZr/H5fPB4vBJf47JNUbuYmBg4ODiAx+NJ/Q4BAQH4+eefcfToUQwdOpST94WUbVRwKWnKmSkAVLxbZYq4Ld2hUjqGYTBjxgyYmprCz88PVlZW2Lt3L1q3bo1Vq1bhxIkT2k6RgOVYKBaMxoY6rF+/Hra2ttizZw/09fUlz7dt2xaLFi2Ct7c3xowZg169emkxy+Jq1aqFIUOGyHxNm0WwrD4vEAiKFVsA8NNPP2HTpk3YsGFD8YKL+nuFRBfNK8lY3xjG+sYqNjYWP0ipTpw4gZCQEPj6+sLKygoA4OjoiJkzZ+LUqVN49OiRljMkAMuxUCwYjQ2uxcXF4e+//8bYsWOliq0inp6esLe3x9SpU5GTk6OFDMseZfq8QCDAqFGjcOvWLbx69eqrQNTfKyIquJS0tudarO25VsXGa8UPUqLs7GzMnDkTzZo1w+TJk6Vemz9/PipVqgQvLy8tZUe+xGosFAtGY4NrgYGB4PP5GDVqlMzXDQ0NERAQgPj4eKxatUrD2ZVNyvb5/v37AwDOnz//VSDq7xURFVxEp6xZswbPnz/Hpk2boKcnfca7atWqGD9+PM6cOYNbt25pKUNCyoYjR46gW7duqF27donb9OjRA99//z1WrFiB+Ph4DWZXMTRr1gzm5uYICwvTdipEB2j0Gi5Zd2fpwl1ARRTJxeeODwBgSaslSsev6SNum7xEftuy9r5wISkpCX5+fujVqxdq1Kghc5+DBw/Grl27sGDBAmzdulXtOZVGl/6PtGHiqYkAgG39t3EQTBwL2ziIRZCcnIzo6GiMGTNG7rbr16/H2bNnMXPmTJw8eVID2ZVdyvZ5Pp+P1q1bIzw8/KtA1N8rIlpL8QuK5NLw34YAoFrODcVtqyrQtqy9L2wxDANPT0/o6+tjx44dJf5VHhUVhTlz5mDhwoXIyclBy5Yt1ZpXaVRdS7G8sDC24DAYh7EIrl27BgBwcXGRu22tWrUwd+5ceHt7486dO2jVqpW60yuzVOnzTk5O2LBhA/Lz8z8ftaf+XiHRXYpKWtF9BYvGLNqWcydPnsSZM2ewdu3aUk+BAMCUKVOwatUqrFixAgcPHtRQhuRrrMZCsWA0Nrh09epVmJqawsnJSaHtf/75Z6xfvx6+vr50lKsUqvR5e3t75OXlITExEQ0aNPgUiPp7RUTXcBGty8rKwvTp0+Hg4IBp06bJ3b5KlSr46aefcOTIESQkJGggQ0LKlqtXr6Jjx44y706UxdzcHNOmTcOpU6cQGxur5uwqFnt7ewDAs2fPtJwJ0TYquJQ05sQYjDkh/7oI2Y3HiB9EyvLly/HixQts2bJF4S+IadOmQSAQYCPN2Kw1rMZCsWA0Nrjy7t07REZGomPHjkq1mzRpEvT19REQEKCmzMo+Vfq8nZ0dAEgXstTfKyQquJQkrCyEsLKKy8sIheJHORUbGwsvLy+l5smKiYnBmjVrMHLkSHTu3FnhdrVr18bw4cOxc+dOvHnzRpV0CUusxkKxYOV7bHApJycHcXFxYBhG5ut3794FALRp00apuDVr1oS7uzv++OMPfPjwgXWe5ZEqfb5GjRowMzOTPsJF/b1CooJLSb5dfOHbxVfFxr7iRzkUHR2Ndu3aYcWKFWjRogV27Nghtw3DMJg6dSpMTEywZs0apfc5e/ZsZGVl4bffflMlZcISq7FQLFj5HRtcGzduHOzs7DBs2DCZr9+5cwcAVLr4ffr06cjMzMSuXbtY5VheqdLneTwe7O3tpY9wUX+vkKjgIqy9e/cO3377LfT09BAREQE3NzdMmjQJN27cKLXdunXrcOHCBaxYsQI1atRQer//+9//4ObmBn9/f4hEIlXTJ6TMSExMxL59+2BgYIDDhw8Xn24AQEREBBo2bIhq1aopHd/Z2RkuLi5Ys2YNzT7PITs7O8TFxWk7DcJSQUEBkpOTVW5PBZeSfjj6A344+oOKjX8QP8qRgoICeHh44Pnz5zhy5AhatWqFAwcOoF69evDw8EBaWlqxNgzDICAgAHPnzsXgwYMxadIklfc/Z84cpKSkICgoiM2vQVTAaiwUC1b2xsaLFy/w7NmzEk/tqUNwcDAA4MqVKzA0NMS+ffuKbcN2aodFixbhv//+U+godUWjap+vW7cukpKSUFhY+ClQ2evvBNi8eTOaNGmi8pingktJjSwaoZFFIxUbNxI/yon8/HxMmDABwcHB2Lx5s+QiXXNzcxw4cACpqano2LEjli1bhilTpmDAgAFwdnZG9erV8fPPP6N37974888/ZS78qqhu3brB0dER69at0+gXH2E5FooFK1tj4+bNm7Czs5OsRagpwcHBEAqF+Oabb+Dm5objx49L9fvU1FS8ePECrVu3VnkfXbp0gYuLC5YuXYr3799zkXa5oWqfr1u3LnJzc/H69etPgcpWfydiV69ehaWlpcrfWTQPl5K8XbxZNGbRVkekpKTA09MT6enpiI6ORkJCAnx8fDBhwgSp7Vq3bo2QkBAMHjwY3t7eqFq1KurUqYM6derA2dkZXbp0wffffw8+n13Nz+PxMGfOHIwcORLBwcHo3bs3q3hEcazGQrFg2h0bmZmZ2Lp1K9q2bSv35o28vDyMHTsW1apVQ7du3bBlyxaMGDEC7du3V2uOeXl5uHDhAtzd3cHj8eDm5oaTJ08iLi5Ocidc0Wn8du3aqbwfHo+HtWvXom3btpg3bx5dI/kFVfu88NMF8klJSeLLJ8rBd0FFdPfuXXzzzTcqt6cjXERhIpEIffr0wV9//YW4uDjUr18fR48exeLFi2Vu37lzZ/z777/IzMzEmzdv8OjRI5w9exbbtm2Dh4cH62KryPfff4/atWvDz8+PjnIRlSxbtgxz586Fi4uLzOuivnT69GnExMRg69at+P3332FmZobt27erPcdbt24hIyMDvXr1AiBeBxEALly4INnm77//hpGREevZ4lu1aoWZM2fi999/x6lTp1jFIp8LrsTERC1nQlSVlpaGFy9ewNnZWeUYVHApyf2wO9wPu6vY2F38KKOWL1+Oe/fu4fDhw4iJicGFCxfw3XffldrGwMAApqamas1LX18fCxcuxPXr12mWbA1iNRaKBdPe2Pj48SO2bNmCnj17wsLCAuvXry91+61bt0IoFOLbb7+FmZkZPDw8cPDgQWRkZKg1z+DgYAgEAnTr1g0AYGtri7p16yI0NFSyzd9//422bdvCwMCA9f58fX3h7OwMDw8PXLx4kXW88kDVPl+3bl0A4iNc4kBl+7ugIoqIiAAAVgWXzpxSzM7OxurVq5Geng4ej1fswefzZT5f2kPZNqmpqahZs2ap7UQfReDxeNi+fbvMfQkEAvTo0UP2XXctWmj8feXK27dvsXHjRgwZMgT9+/fXdjrFjB8/Hr/++ivmzp2LPn36KDyBKlFdi5otOAzGYSwlXbt2DRkZGZg9ezaOHDmCoKAgZGVlwcTEpNi2cXFxCA0Nha+vLwQCAQBgxIgR2L59u0J/gLARHByM9u3bw9zcHID41F/Pnj1x6NAh5OfnIycnB/fv38eCBQs42Z+JiQlOnTqF7t27o2fPnhg7dqxSkxOXR6r2eQsLCxgZGX0+wlWGvwsqquvXr0MgECg9v92XdKbgyszMxN69e5GWlgaGYWQ+CgsLS3yt6KEpx3CsxNfq1auHe/fuFb8te/58NWelPjt37kRGRgZ++eUXbacik56eHlavXo1vv/0W27dvx+TJk7WdUrk3vyOH/VmLYyMkJARGRkbo1KkTBAIBtm3bhnPnzmHw4MHFtt2yZQv09PQwbtw4yXPt27eHmZkZQkJC1FZwJScn4/79+1i+fLnU8z179sSOHTsQHh6OjIwMFBQUoFOnTpzt19raGmFhYfjpp5+wf/9+zJkzB40q8MXeqvZ5Ho8nuVNRHKjsfhdUVH///TecnJxgZmamcgydKbiqV6/O2VpTyhZpRY/o6GjY29sr1ebrfUVGRmLgwIFYt25dsQ/Hsiw4OBjNmzdHCx3+y6xfv35wdXXF4sWL8cMPP6By5cpSr2dkZCAtLQ1CoRB6ejrT9YmWXbx4EZ06dYKxsTFcXFxgYWGBEydOFCu4MjIysHPnTgwdOhS1atWSPK+vr4+uXbsiJCREbTkWxS66fqtI9+7dIRAIcOzYMXz48AGmpqZKL+kjj5mZGfbs2YOnT59W6GKLLaFQSNdwlVEikQjh4eGYMmUKqzjl8lun6DQfAMlhf0VUrlwZVatWLXWbwQfFH8JHhh2R+XqDBg3Qt29f7Ny5E0uWLJH+Yi/6AD8iu62uys3NxY0bN1jNl6UJRXdXtW7dGpMnT8bu3bshEAjw9u1b+Pj4YNu2bRCJRNDX10eDBg3Qo0cPjB8/Ho6OjtpOvUySNxaUC6adsfHhwwc8efIEQ4YMASA+UtqrVy+cO3cOBQUFUtv++eef+PDhA6ZPn14sTs+ePSV3DNra2nKeZ3BwMKysrIr9wVO1alUMGDAAa9euhUAgwA8//CDzVCgX2EzfUl6w6fNCofBzUV5GvwsqqrCwMIhEItZHj8tlwaVO7erIv936hx9+wKlTpxAeHi59qziLW7W16eHDh8jJyUGXLl20nYpczs7O8PX1hbe3NxISEmBmZoawsDBkZmZizJgxaNOmDf755x88ffoU27dvR0BAAJydnTF+/Hh4eHhIro8h8ikyFhQPpp2xER4eDoZhpG717tevH4KCghAeHo4qVaoAAAoLC+Hv7482bdqgbdu2xeL07NkTgPhIFNcFV0FBAUJCQtCnTx+Zd/bOmTMHJ0+ehKWlJbxpugG1YtPn69ati1evXiEvLw/6ZfS7oKI6e/Ys9PT0WH8HUsGlpDnt58jdpkePHuDz+QgJCZEuuObIb6uLbt++DT6fr9Ti0tr0yy+/oGrVqvDx8UG9evXg7u6OSZMmoWXLllLbvXnzBkFBQdixYwd++uknzJw5E40aNUK1atXw9u1bpKamAgAqVaqEX3/9FW5ubtr4dXSWImNB8WDaGRu3b98GIL3Qs5ubGwQCAc6cOYMRI0YAEB9hio2NLXFFA1tbW9jY2CAkJITz6wcjIyORnp5e7HRikXbt2uH58+ewtLSEoaEhp/sm0tj0eaFQCIZh8O+//8KmjH4XVFRnzpxBp06dWP9BTgWXGlStWhVNmzaVLCJb1oWHh8PZ2Vny176u4/F4mDJlitzz7dWqVcPPP/+MqVOn4u7du9i7dy8SEhLw5s0bCIVCODk5gcfjITs7GxYWFhrKnmjS7du30bRpU6m+XbVqVXTo0AGnT5/GiBEjUFhYCG9vb9StW1dy6vFrRXcM7t+/X3wEg8M7+S5evAiBQFBqwV+7dm3O9kfU48vJT21sbLSbDFFYTEwMnjx5gg0bNrCORQWXkr7d/y0A4KRH6fM9tWzZUmpCQnFjcVtoaa6oAwcO4M6dO1i0aFGxC8pL8vHjRzx8+BCzZ89Wc3baw+Px0KpVK9aTRVY0io4FxYJpfmwwDIPbt2/j26J9f6Ffv36YO3cukpOTcefOHdy7dw9//vlnqfNbubm5Ydu2bQgLC+PswnWGYRAaGgpXV9dyV/SLRCJERUUp3S4nJ0eldlyYcl38R9zmjpuVzic3NxeAuMhv4eMDAHi5eXNpTZSmzfdGFl3KR9VcNm/eDB6Ph5YtW7L+XajgUlK3+t0U2s7JyQl79uxBSkrK5zm5uinWVh1u3rwJDw8PAOIBf+3aNYVmer9x4wby8/PLxPVbZUVJXzS68OGkTA7NTJoBACc5V20mjvX2UyxNvBcJCQlIT09H/fr1i+2radOmAIDVq1fjxo0baN68udwP3Nq1a0NPTw9//PEHZ8XRs2fP8Pz5c4waNapM9Q1FGBoaokmTJkq3i4qKUqkdFwa8HwAAUvtXNJ86deoAEF8PWGlA8Thc0OZ7I4su5aNKLvn5+Th58iS6desGFxcXpfYlE6NBT58+Veg5beEylytXrjAAmHPnzmk9l4yMDKZhw4aMjY0Ns27dOgYAc/bsWYXaLliwgNHT02MyMzM5y4cNXe0vyuRV0ra68LvpQg4Mo5k8duzYwQBgoqKiZL7+3XffMQAYCwsLJiEhQaGYffv2ZYRCIVNQUMBJjosXL2Z4PB7z6tUrTuKxocj/CRfjQF3t1EWZfMzNzZkpU6boRC6aoEv5qJLL4cOHGQDM8ePHOdkXLe2jJkV/IcfExKh1PwzDYPfu3fDz88Pdu3dlbjN37lwkJCQgMDAQU6dOhbW1NTZu3KhQ/GvXrsHBwUHty/MQomnXr1+HhYVFiXNL7dmzB0uWLMGjR49Qv359hWIOHz4cSUlJxS8nUAHDMAgKCoKzszNq1qzJOh7RPqFQ+HnyU6LzNm3aBBsbG/Tr14+TeCUWXB07dkTHjh3Rtm1bNG/eHG5ubnB0dETXrl052XFZ1TuoN3oH9Za7naWlJapUqSI9mWvv3uIHh1asWIHRo0fjl19+QatWrTB37lwUFhZKXj948CC2bt2KWbNmwcXFBQYGBpg6dSpCQkLw6NGjUmPn5OQgIiKC1dpRpPxSdCwoFoz7sVEahmFw6dIluLi4lDi/lKmpabFJTuUZPHgwateuDV9fX9YrX5w5cwbPnj0r8UJ9onls+7yk4NJwfyfKu3LlCq5evYrJkycrNZ9naUosuK5fv47r16+jU6dOOH/+PM6fP4+QkBA0b96ckx2XVf3t+6O/vfy1BHk8Huzs7BAbG/tF4/7iB0eeP3+OxYsXY9iwYUhNTcXkyZOxZs0ajB07FhkZGfj9998xcuRItGvXDn5+fpJ2kyZNgqmpKdatW1dq/IiICOTm5sLJyYmznEn5oehYUCwYt2NDnvj4eCQmJkoWguaKoaEhfvnlF9y4cQM7duxQOU5BQQG8vLxga2uL3vTFrDPY9nlJwaXh/k6U8/HjR4wbNw4NGzbkdJoXuRfNv3z5EtbW1gCAGjVq4NWrV5ztvCya3FrxN9/Ozg43b978ojG38/OsXLkSfD4f69atQ/Xq1REQEAArKyssXrwYu3fvBgC4urriyJEjUndXVatWDePGjcOWLVvg6+uLevXqyYx/7do1ACg2fxUhgHJjQX4wza59efHiRQDgvOACgAkTJuDEiROYMmUKjIyMMHjwYKVnf/fz88Pjx49x4MCBCr1YtK5h2+eFQiHS0tKQPWYMjI2NOcqqYig6Kv3u3TvY2trC1tZWbZe6/PLLL0hISMCVK1c43Yfca7gaNmwIT09P7NmzB7Nnz6bTS0qwsbHBy5cviy0RwoWcnBzs378fHh4ekrtfeDwefHx8cPXqVUyYMAEHDx7EpUuXii+iDWDGjBkQCARwdnbG0KFDsXv3bqlTkQBw7NgxtGrVSu5yR4SUNadPn0a9evVgb2/PeWw9PT0cOHAADg4OGDVqFExNTSEUCjFw4ED8/fffpba9f/8+hg4dikWLFuGHH37AsGHDOM+PaE/RXFwvX77UciZlS2pqKnr27Inu3btjyJAhaNGiBSpXrow+ffrgyJEjkik3uPDnn3/C398fU6ZMUerOREXIPcK1dOlSXLt2Dc+ePUOfPn3U8hdhWdL9z+4AgAuj5F8UKxQKkZ+fj5SUFPF1IN3FbcHBBbVnzpzBhw8fJDNhf6lz585yZ4WvX78+Ll++DH9/f9y6dQuHDx9GcHAwgoKCwOfzkZCQgLt372L16tWscyXlkzJjQX4w7saGPBkZGQgNDcWkSZPUtj5glSpVcOfOHYSGhuLu3buIiYlBSEgIOnfujNGjR8PX1xdCoRC5ubmIiYnB7du3ceDAAVy6dAmVK1eGt7c3Fi1aROsX6hi2fb6o4LJwdweqVtVIfy/rMjIy0Lt3b0RFRSEgIADffPMN4uPjce/ePezduxdDhgyBpaUlRo4ciXHjxqFx48aIjo5GREQEYmNjkZWVherVq6OwsBCOjo54/fo1Hj58iISEBCQlJYHH48HExAQmJiZIS0vDw4cP0bVrV7V898ktuD58+IDMzExUr14dHz58wO+//44ff/yR80TKiu8dvld427p16wIAEhMTxQXX94q3lScoKAg1a9ZkNT9Wu3bt0K5dOzAMAz8/PyxcuBCWlpbw9/fHvn37AABDhgxBTk4OV2mTckSZsSA/GIex5Dh//jxEIhG+++47te5HIBCgV69ekiV5srKysGzZMqxZswaBgYGwtLTE27dvJUfAGzRoAD8/P0yePJnW9NRRbPt8UcEV3by59LJvRKb8/HwMHjwYDx8+xKlTpyTXMzo7O2PYsGFYvnw5QkJCsHPnTgQEBGDDhg0wNjZGdnY2APHRZiMjI2RmZkrFrVSpEho0aCC5nCY7OxtZWVmwsLDA2rVrMXXqVLUskyW34Jo2bRpsbGwQGxsLQ0PDCn/eeYLzBIW3/bLg+uabb4AJirctzdu3b3HmzBnO7p7g8Xjw8vLCmzdvsH79erx79w7Hjx9H7969ZU4KSQig3FiQH4zDWHIEBgaiZs2a6NChg8b2CQAmJibw8/PDxIkT8ddffyEhIQHVq1eHg4MDHB0d0aRJEzqipePY9vmiyz8uNWyI9hrs82XVihUrEBoaih07dsi8eUQgEKB3797o3bs3Xr9+jaCgIDx//hxOTk5o06YN7O3twefzkZ2djZs3b6Jy5cqoUqUKGjZsqNDE31xTaKZ5X19fLFiwAMuXL5d5CovI9mXBxaXjx48jNzeX0/8LHo+HNWvW4PXr19izZw+aN2+OLVu2cBafEF3w7NkznD17Ft7e3tDT085CGzY2Npg3b55W9k20y9jYGJaWljQXlwLu3buHJUuWYPjw4Rg3bpzc7atXr44ZM2bIfM3Y2Bi1atXS+qz3CpV4IpEI2dnZ4PF4yMrKUndOOs010BWuga4KbVu5cmWYm5t/LrhcXcUPlkJCQmBtbc35DQx8Ph+BgYGIi4vDgwcPaIFVUiplxoL8YK6cjA155syZAxMTE/z0009q3xcpf7jo80KhEFMOH9ZIfy+r8vPzMX78eFhZWSEgIEDb6XBG7p94I0aMwO7du9GhQwe4uLhU+LsUR7cYrdT2UjMLj1aurSxFt8b27NlTLacf+Hw+GjZsyHlcUv4oOxZKD8ZhrBIEBwfj5MmTWLlyJc3cTlTCRZ8XCoU4mJqK5hro82XV1q1bcf/+fRw8eLBc3SUvt+ASiUSYOHEiAKB3794wMzNTe1K6TNkBV7du3c9HuDgYYJGRkUhNTa3wM/4T7StLBVdubi5mzJgBOzu7Ek87ECIPVwVXwNWrWFbOCq6XL1/i7du3sLe3Z3XB+evXr+Ht7Y0ePXqUu1UW5J5SPHjwoOTfFb3YAoC8gjzkFeQpvL1UwZWXJ36wcOnSJQCggotonbJjofRg7MdGafz9/RETE4ONGzeq5e4jUjFw0eeFQiE+vn+PjDdvOMpK+27dugU7Ozs0b94cJiYmcHNzkywdxzAMkpOTcffuXaSlpcmNNXfuXHz8+BG//vprubuJRO4RrtzcXAwcOBD169eXXNUvb0mY8qzHnh4AgCujryi0fd26dZGWloasrCyY9OkjfvKKYm1luXTpktTtrIRoi7JjofRg4lhsxkZJXr16hSVLlqBv377oUzQGCVEBF31eKBQiFAC/Vy8gPJybxLQoLy8PY8aMgYWFBby9vfH8+XNs27YNjo6OsLGxQXp6OjIyMiTbN2rUCKNHj8akSZNQpUoVqVgnT55EYGAgvLy8tH6BuzrILbjmzJnD2c5EIlGxKQZycnJ0ZtoBRXLpXVN8a6qiORdN23DlyhU4frqt9YMCbWXlUlBQgEuXLqFXr14afc/K2v+RpuhSLtow3mk8h8E4jPUVLy8v5ObmYsOGDWrbB6kYuOjzQqEQvwOo5uqK8rAy8YkTJxATE4OjR49K5rbz9PTEtm3bEBoaiv79+8POzg516tSRTAC8YMECrFmzBosWLcLo0aORnJyMmzdv4ueff4aTkxMWLVqk5d9KTRgNevr0qULPaYs6crl69SoDgAkNDWWdy/379xkAzN69e7lKT+VctEVXc1Emr5K21YXfTRdyYBju8oiPj2f4fD4za9YsrebBhi7kwDCK5cHFOFBXO3VRNp9//vmHAcBs375d67lwwc3NjREKhUx+fr7C+dy7d4/p0qULA0Dq0bx5c+bVq1dqyVOT701J+9LORDRlWFaeeFoME33FFqOtVasWAOC///4DiqbUUHIh2yIREREAgLZt26rUnhAuKTsWSg/GbmyUZO3atdDT08Ps2bM5jUsqJi76fO3atWECIDkhgaOstOf9+/e4cOECPD09lZqEu2XLlrh48SJu3LiBa9euoU6dOmjSpAlatmyptfnxNKHE3yw8PBzOzs6czGRenvQJEl8Doug5fGtrawDi60jA8hquiIgIVK1alaZtIDpB2bFQejD21zd+LTU1FX/88QdGjRol+cOHEDa46PP6+voINTBAtT/+APz8OMpMOy5evIiCggL07dtX6bY8Hg8dO3ZEx44d1ZCZbiqx4IqMjMSff/4JU1NTdOjQAZ07dy52gVtF9FMr5SZMNDU1RaVKlcQFF8vJFiMiItCqVatyd+cGKZuUHQulB+N+IlJ/f3+IRCJ4enpyHptUTFz1+bN168LIyAgLOYmmPcHBwTA3NxcvXUfkKrHgGjNmDMaMGYPMzExcu3YNq1atQkZGBpo3by6Zl6si+r6Z8ouXWltbiwsuFgv0ZmVl4fHjx7QkCNEZqoyFkoNxu3h1RkYGNm/ejEGDBsHe3p7T2KTi4qrPx7RogcePH5fpgothGAQHB6N79+7l+jQgl+S+S2ZmZujTpw/69OkDhmHw4MEDDaSlu97nvAcAmBuZK9xGUnC9F7eFueJtizx8+BAFBQVo3bq10m0JUQdVxkLJwVQfG7Ls3bsX7969o6NbhFNc9Xk7KytcT0wEwzBl9ozF8+fPkZSUhAULFmg7lTJDqbKUx+OhZcuW6sqlTBhwYAAA5c7hW1tb486dO8AAcVtVrlN5/PgxAMDR0VHptoSogypjoeRgqo8NWXbu3AlHR0e0adOGk3iEANz1+Z/On0fP7Gy8ffsW1apV4yAzzQv/NIcYnU5UHB0HVNK0ttOUbiM5wrVqlcr7jYyMhKmpKU14SnSGKmOh5GDcxYqMjMTdu3excePGMnv0gOgmrvp80nffwX/tWvgkJZXpgsvIyAjNmjXTdiplhkIFV3p6OkQikeTninzHz6Amg5RuY21tjY8fPyKjRw9UqlRJpf0+efIEDg4Oktn+CdE2VcZCycFKjvXu3TssX74cfD4fc+bMQfXq1UsNtW/fPvD5fLi7u3OXHyHgrs8LhgzBsbVrMTYpqcyetQgLC4OTkxP09fW1nUqZIbfgWrx4Ma5duwYrKyvJ+eYDBw5oIjedlJYlXgvK0sRS4TZFU0OkREaikq0tYKl42yJPnjxR6dZbQtRFlbFQcrBPa6x9NTYYhsH333+PkJAQAOKlP+7evQuTEubrYhgG+/btQ/fu3VGjRg32eRHyBa76fD1TU1gASEpK4iArzcvLy8O9e/fw448/ajuVMkVuwfXo0SNcuHCBjqx8MuSgePVyZa/hAgDLSZOAKlWUvk7l9evXSE1NpUO3RKeoMhZKDiaO9fXYOHjwIEJCQrBp0yY0atQIPXv2xPLly7F8+XKZYcLDw/H8+XP4+Piwz4mQr3DV52tMmYIjAM6X0YIrMjIS2dnZdI2kkuQWXPXq1YNIJIKxsbEm8tF5s9spP2N1UcH1qEcPdO7cWen2kZGRAEAFVzkha01RQDfWZlQmh2HCYQAUX1e0NGbDxLEyP8XKycnB06dP4ePjg4YNG6JLly7g8/no27cvNmzYgD59+si89mXr1q3Q09NDkyZNOMmrrP2fVIQ8tEmVz39ZeHPm4M+oKOSW0YKr6IJ5WvVEOXILrlevXqFLly6Si7Ur+inF/o36K92mqOC6W6sWOvdXvn1RweXg4KB0W6J7DA0N0aRJk2LPR0VFyXxek5TJgdNcv4oVFRWF//77DzExMdi1a5ek769duxZNmzbF6dOnsWLFCqk2DMPg8uXL6NmzJ2dfBGXt/0TbeZT3gkyVz3/ZgfojtlEjCMpowRUWFgYLCwvUr19f26mUKXILrnXr1mkijzIjOTMZAFDTrKbCbapUqQJDQ0NkPHsGJCcDNRVvC4iv36pSpUqFvlmB6B5VxkLJwcSxvhwbBw4cQKVKleDh4SF5rnHjxhg6dCi2bNmC+fPnw/yLebvCwsKQmJiIJUuWsM+HEBk46/PJyWhmaYmQR484yErzwsPD0aZNG7oLWElyL8wSCARYtWoVJk6cCD8/PzAMo4m8dJb7YXe4H1bu7icejwdra2sMPXIEUOHOqcjISDg4OFDnJjpFlbFQcjB3qbFRUFCAEydOoF+/fjAyMpLadN68efjw4QM2btwo9XxgYCCMjY0xcOBAbnIi5Cuc9Xl3d8y5excvX75EYWEh+3gcyM7OxuTJk2FjY4Nly5aV+F2fmZmJp0+f0iTcKpB7hGvhwoXw8PBA69atER4ejl9++QW7d+/WRG46aX7H+Sq1s7a2xp/Z2VgxX/n20dHRGFA0MSQhOkLVsSA7mHSs+/fv4/Xr1/juu++Kberk5IShQ4di1apVGDt2LIRCITIzM7F//34MHTqU1nwlasNZn58/H5EnTyJ361a8fv1a63fUMgyDcePG4cCBA3B0dIS3tzeMjY0xe3bxa9bu3buHwsJCun5LBXILLpFIhG7dugEAunfvjj/++EPtSemyXra9VGpnbW2Nk9HRWNFLufZv377F69ev0ahRI5X2S4i6qDoWZAeTjhUaGgpDQ0P07t1b5uarV6/GyZMnMX/+fAQFBWHz5s348OEDJk+ezF1ORK1KunlEHm1evF8P4muZv9y/SvnUq4fEpk0BANeuXePshihV35tjx45h//79mD59OiZOnIjJkydj8eLFcHFxgampqdS2p06dAgCYm5vL3Zcu3WihC7nILbgKCgoQExODRo0aISYmpsKf1kp6L77IUWguVKqdtbU1Yi9eBJKSAKHibWNjYwGAFuAlOkfVsSA72KeLh4VCMAyD0NBQuLm5wczMTObmNjY28PT0xLJly2Bubo4///wT/fr1o7+6y5CSbh6RR5s3Ecjq8yrlk5SE/AYNAEByVy0XVMlFJBLht99+Q5s2bbBhwwbweDysXr0abdu2xe3btzF16lSp7ePj42FjY4MOHTqoJR910WQuJRV2Cp1S9PLyQmpqKmrUqIGlS5dynlxZMvLYSADKz8NibW2NTe/fo3DECPCvXVO4XVHBRUe4iK5RdSzIDiaOhStXcPfuXSQnJ2NQKbPPA8D8+fNx/PhxbN26FQ4ODvjtt9/Y50FIKTjr8yNHolFeHgAgMTGRZVbs7Nu3D0lJSdi+fbvkgEqbNm3Qtm1b+Pv7Y/LkyZJ5OHNzcxEaGorhw4drM+UyS27B1bRpUxw5ckQTuZQJCzsvVKmdtbU1lgHYO3EilLm/JSYmBgKBgG6/JTpH1bEgO9jnWMeOHYNAIEB/OVOomJqaIjw8HElJSbCzs6vwR9+J+nHW5xcuhD7DwHjAALx48YKbmCpgGAYbN27E//73P/Ts2VPqtenTp2P48OE4d+6cZJWTq1evIjMzU+7YJLKVWHBNmzYN/v7+6NixY7HXrl+/rtakdFn3Bt1ValezZk1cBPDCzk7pgqtBgwYwMDBQab+EqIuqY0F2sM+xjh49itatWyu0qK+xsTGdbicaw1mf794dPAANGzZEfHw8NzFVcOXKFTx69Ag7d+4s9gfLkCFD4OnpiTVr1qBPnz7g8Xg4deoUjI2NJdd1E+WUWHD5+/sDAA4dOiSZuBOAVjuHLkh4mwAAaFC1gVLtrKysUB9AxsOHgBLXmcTGxtIXCtFJqo4F2cHEsaJEIkRHR8Pb25t9TEI4xlmf/9TfbW1tERMTwzYtlfn7+8PS0lLmKUJ9fX3MmzcP06ZNw6FDh9CrVy/s2bMHffv2pZVnVFRiwRUbG4uUlBSsXbsWc+fOBcMwKCwsxLp163DixAlN5qhTxp4YC0D5c/g1atTALgCN1q0DJk5UqE1hYSGePXuG7t05PJJACEdUHQuyg4ljHXdzAwB07dqVfUxCOMZZn//U3+3atMHZs2dRUFAAgUDAMjvlJCUl4eTJk5g3b16xue6KTJ48Gbt378bEiRPRqFEjvH//HvNVmNqIiJVYcH348AFnz55Feno6Tp8+DUA8gWdFv1huiatqs1hXr14dPwAY36kTRirY5uXLl8jOzqYjXEQnqToWZAcTxwpetAhOTk5an5eIEFk46/Of+rtdbCxyc3ORlJQEGxsbbmIraNu2bWAYBj/++GOJ2wgEAuzcuRNubm549uwZtm3bBmdnZw1mWb6UWHC1atUKrVq1ksxyTsRcbFxUamdkZISH5ua4Y2qqcMFVdKiZ7lAkukjVsSA7mAsyMjJw8+ZNzJkzh7u4hHCIsz7vIo5j92k297i4OI0WXB8+fMBvv/2Gfv36SdZJLomjoyP+/fdfFBYWQl9fX0MZlk8lFly+vr5YtGgRfH19i11MV5EXr45J+1QEWSpfBLWtUgX8Z88U3p7m4CK6jM1YKB4sBhGXLiE/Px89evRgH48QNeCsz3/6Y9rW1hYANH7pyOrVq5GWlgYfHx+FthcIBBo/5VkelVhwFc3YvH79eo0lUxb8eFp8+FWVc/gr374F79YthbePi4uDiYmJ1E0LhOgKNmOheLAfIYyLg7GxMTp06ICETxcVE6JLOOvzn07j1bp0CcbGxnimxB/ibP37779Yv349PDw86PSghpVYcFlaWgIAsrKykJmZCT6fj/Xr12PSpEmoXbu2xhLUNX7d/FRue6hlS7x48QJBCm4fHx+Phg0b0vxCRCexGQvFg/lhkYcHXF1dYWhoyF1cQjjEWZ/3E8fh8/mwtbXVaMHl7e2N/Px8LF++XGP7JGJ8eRv4+PjAwMAAW7duxcyZMxEQEKCJvHRWe2F7tBe2V6ntu6ZNEZKZqfD2cXFxkkPOhOgaNmPha4l16uBAYiKdTiQ6jbM+3769+AHAzs5OYwXX8ePH8ccff2DGjBk0mbYWyC249PT0YGdnh7y8PLRo0QIFBQWayEtnPUl9giepT1Rq2wxAzbQ05Ofny922sLAQCQkJaNiwoUr7IkTd2IyFr90JDIQDUGy2a0J0CWd9/skT8QPigishIUHt360vXrzAmDFj4OTkVOGX6NMWuUv78Hg8zJ49G507d8bZs2cr/IRnU8+KF/JU5Rz+gAsX0BRAWloaatYsfb75lJQUiEQiOsJFdBabsfC1xgEB2GZggKZNm7KORYi6cNbnixaEvnIFtra2yMvLQ2JiotqOOuXl5cHDwwMFBQX466+/6LS9lsgtuDZs2IDHjx/DxcUFYWFh2LBhgyby0llreqxRuW3MuHGYN38+dqSkyC24ihY0pYKL6Co2Y+FLhYWFmJGXh3bduqE9Xa9IdBhXfR5rPsexs7MDIL5TUV0Fl5eXF27duoX9+/fTd4oWyS24DAwMcPv2bQQFBcHGxqbCzwnVunZrldsadOiAOwBSU1PlbpuUlAQAdEqR6Cw2Y+FLkZGRCH33DsOHDeMkHiHqwlWfR+vPcb4suLg+pc4wDDw9PbFu3TpMmjQJ7u7unMYnypF7DZeXlxdq1aqFmTNnonbt2hV+Wv8HyQ/wIPmBSm2F6elwhPh0oTyJiYnQ19eHUChUaV+EqBubsfCly5cvwxFATysr1rEIUSeu+jwePBA/AFhbW8PExARxcXHs434hPz8f48aNw7p16zB16tQKf8ObLpB7hOvt27cYOVI8N3qTJk1w/vx5tSely2YEzwCg2jn82mvWYCOAewoc4UpMTESDBg1osjmis9iMhS9dvnwZvxkZodbq1UCfPuwTI0RNuOrzmCGOgytXwOPxOJ8aQiQSYfjw4Th69CgWL16MRYsW0fRCOkBuwSUSifD69WtUr14daWlpKCws1EReOmtjr40qtxVs2gTPNm3QVcEjXHQ6kegyNmOhCMMwuHnzJs50745v6M4pouO46PPiQNJx7Ozs8PjxY05CZ2ZmYuDAgbh48SI2btyI6dOncxKXsCe34Jo+fTrc3d1hZmaGjx8/srqdVCQSISoqSuq5nJycYs9piyK5GEJ8d0fUWxVyNjLCS0tLxMbGlrofhmGQmJiIVq1a6cR7U9b+jzRFl3LRhhY1W7COkZiYiNTUVFj37g20YB+PEHXios+LA0nHsbOzw4kTJ5Cfnw89PblfyyX6+PEjevfujVu3bmH37t0YNWoUy0QJl+T+z3bo0AHnz59HWloaatSoweqwpKGhIZo0aSL1XFRUVLHntEWRXCL+jQCg4sWTERHoUaUK0kSiUveTmpqKrKwstGnTRifem7L2f6QpX+ZSEQsvVmPhk/DwcACAq6kpEBEhdTExIbqGiz4vDiSOU9Tf7ezskJ+fL7mURBUMw2DWrFmSuxGHDh3KLkfCObkFV0hICFauXAlzc3NkZmZi8eLF6NChgyZy00meoZ4AVDyH7+mJOampGGtqWupmRRdP0ilFostYjYVPwsPDYWhoiMa7dgE8HnBF9ViEqBsXfV4cSBynqL9/eaeiqgXXn3/+ib///hubNm2iYktHyS24tmzZgkOHDsHCwgJpaWmYNGlShS64AvqwuNMjIAAHFy5Eyt27pW5WVHDRfCnlk6xT64BunKJUJodZjWcBYHd07/Lly2jcuDGez54NABB9iqUL74Wu5KELOehSHtrE6vNfKpB0nKLP+mfPnsHNzU3pcG/evMGsWbPg5OSEyZMnq5RSXl4eXr58iZycHJXalxRTV/qMOnIxMjJCnTp1oK+vr9D2cguuKlWqwMLCAoB4QWszMzN2GZZxzayasWjcDPmNGyP13DkwDFPi6dn4+Hjw+XzY2Niovi+is2SdWgd043SpMjk0AbtcCwsLER0djbFjx6LBt9+qnIc66UIeupCDonnoyperurD6/JcKJB2nZs2aMDMzU/lOxX379uHNmzfYvn07+Hy5sz3J9PLlS1SqVAk2Njac3dGYnZ2tM6vTcJ0LwzBIT0/Hy5cvFZ6wVm7BZWZmhnHjxqF169aIjIxETk4O1q9fDwCYNWsWu4zLoJtJNwFAtQVMb96E48ePyM3Nxfv371GlShWZm8XFxcHa2hoGBgYsMiVEvViNBYjXdvv48SOaN28O3BTHKlrQlxBdxLbPfw4k3d+LpoZQdS6uoKAgODo6sirMc3JyOC22yjsejwcLCwu8fv1a4TZyC65u3bpJ/l2jRg3VMitHvC56AVDxHL6Xl2RKiNTU1FILrrp166qYISGawWosAHjyafHeZs2aAUUTKtM1XESHse3znwOJ43zZ3+3s7PDg02SoyoiPj8ft27exevVqdjkBVGwpSdn3S27B9d1336mcTHn0e7/fWTT+HXE3bgDjxiElJQX29vYyN4uPj5cqdAnRRazGAiCZd6hp06bA7+xiEaIJbPv850DF49ja2uLYsWNKTw2xf/9+8Hg8uLu7IzMzk5v8iFqoPuFHBdXIksVako0awezTBYklraf47t07pKen0xEuovNYjQWIj3DVq1cPlStXBipX5igrQtSHbZ//HKh4nKKpIZ4/f67wDVMMwyAoKAidO3eGUCgs99fQlXWqXV1XgV19fhVXn19VsfFV1ImPB1Dyeorxn16ngovoOlZjAeKCq1nRxcNXr4ofhOgwtn3+c6Di/b1oaghlruO6f/8+oqOjMXz4cPY5fcU10BWBDwIBAHkFeXANdMXeR3sBAFl5WXANdMVfT/4CALzPeQ/XQFccjToKAEjLSoNroCvOxJ0BACRnJnOeX1lER7iU5HPFB4CK5/B9fFDt092JJR3hKhpsVHARXcdmLOTl5SE6Ohp9itZO9BHHomu4iC5j9fkvFah4f/9yLq5evXopFObYsWMQCAQYMmQIu3x0QE5ODubOnSteecLaGhEREbh+/TqePn2KpUuXQiAQwNDQEEuXLkWtWrWk2i1YsAD//fcf8vLy4O3tjX/++QcJCQmYM2cORCIRevfujTNnziAmJgbLli0DIJ6Bwc/PD5UqVdLY70gFl5J2DdjFovEu8ABYtG0r9whXnTp1VN8PIRrAZiw8e/YMeXl5n49w7WIxrgjREFaf/1KBisexsrJCpUqVlJoaIjQ0FG3btkW1atW4yesLXxaV+gJ9qZ9N9E2kfjY3Mpf62dLEEldGX0F2djYAoKZZTbn7++uvv1CnTh34+/sjPj4e/fr1AwAsXLgQy5cvR5MmTXDhwgWsXLkS/v7+knYHDhxA7dq1sWHDBsTGxuLmzZviyxRk8Pb2hp+fH2xtbXHo0CHs2LEDM2fOVODd4AYVXEpqUFW1WYDFjcVtraysSjzCFR8fj5o1a8LExET1/RCiAWzGQtEdiv/73/8+BWMxrgjREFaf/1KBiscpmhpC0YLrzZs3iIiIgLe3Nzc5aVl8fDw6d+4MQLzKSlERmZqaKpnuonXr1li3bp1Uu4SEBEk7e3t72Nvb4+jRo5LXGYaR2seSJUsAiI+yKzp/Fleo4FLShYQLAIDuDbqr0FjctkaNGqUe4aIlfUhZwGYsPHnyBAKBAI2KLh7+NDbQXYVxRcqkklZckEebM97fTPk0D1eNz/NwqZKPyad5uLK+mneuRo0aiIyMVCje+fPnUVhYCHt7e8n2bN6bvLw8yREprjAMo3DM+vXrIyIiAh06dEBSUhLevn2L7OxsVK9eHQ8fPoS9vT2uX7+OunXrSsWsW7cu7t+/jw4dOuDly5cICAiAq6srXr16hezsbNy/fx+FhYVgGAb16tXDkiVLYG1tjfv37yMtLY3176zMDPZUcClp2TXx+V+VCq5P546tatbEvXv3ZG4SHx+Prl27qpwfIZrCZiw8efIEdnZ2MDIy+hRMHIsKroqjpBUX5NHmzPs/hf0EABjnOo5dPj+J42DcOKmnnZ2dERoaCltbW7nLxWzcuBGVK1fGsGHDJNNIsHlvoqKiOJ8VXpnZ3T08PDB//nyMHz8etWrVgqGhIYyNjbF8+XIsX74cDMNAIBDAz89PKubIkSPh5eWFCRMmoKCgAF5eXqhXrx6OHDmCsWPHwsHBAZUqVQKPx4Ovry98fHxQUFAAAFi+fDnr31lfX7/Ye15SAUYFl5L2fLeHRWNx2xpr18o8wpWTk4N///2XjnCRMoHNWHjy5AkcHR2/CMZiXBGiIaw+/6UCyY5jZ2eHgoICPH/+XHIRvSwMw+D8+fPo2rWrUnN26bKnT59iyJAh6NixI54/f4779+8DEM/TFxQUVGI7Q0PDYqcZAWDv3r1SP2dnZ6NZs2bYo8XPmvLxP6VBQnMhi8bitlZWVvjw4QNycnI+/4UP4J9//gHDMFRwkTJB1bGQnZ2NuLg4jBgx4otgLMYVIRrC6vNfKpDsOEWn2KOjo0stuOLi4vDixQvMmzePm3x0gFAoxKxZsxAQEID8/HwsWrRI2ylxjgouJQXHBQMAetkqdtuudGNx26IlklJTU6WmfyiaEoIKLlIWqDoWnj59CoZhPt+hCEjGBhS8HZ4QbWD1+S8VSHZ/b9q0KQDxEeD+/fuX2Dw0NBQA0KNHD3Z56JDq1atr9eiTJlDBpaSV11cCUHHArRS3tfq06PfXBVdsbCwA8Z0WJV1UT4iuUHUsSK2hKAkmjkUFF9FlrD7/pQLJ7u+VK1eGUChEZGRkqc2Dg4NhY2NDf5yXMVRwKenAkAMsGovb1njxAkDx2eZjY2NhYWGBatWqUcFFdJ6qY+HJkycwNDSU/rI4wGJcEaIhrD7/pQKVHKdZs2alFlyZmZkICQnBpEmTaLHpMoYKLiUpMoFbyY3Fba0+3YYqq+BqJGONLUJ0kapj4dGjR3BwcJC+2Lcmi3FFiIaw+vyXClRyHAcHB1y6dAkFBQUQCATFXj937hxEIhEGDRrETS5EY2gtRSWdijmFUzGnVGx8Cjh1SrIsQVJSktTLsbGxsLe3Z5siIRqh6lh49OgRmjdv/lUw8dggRJex+vyXClRyf3dwcIBIJJKsOvK1o0ePonr16ujQoQP7PIhGUcGlpHW31mHdreK3oCrWeB2wbh0MDQ1Rq1YtvPh0ahEQHyb+77//qOAiZYYqYyE1NRXJycnFC65PY4MQXcbq818qUMn9vejaRlmnFXNycnD69GkMHDhQ5tGvskwkEuHQoUPYtGkT9u/fz1lcZQrTrl27QiQSST137do1zJ8/n5NcqOBS0uFhh3F42GEVGx8WPwDUq1cPz58/l7xUtJwDFVykrFBlLDx+/BgAihdcX4wNQnQVq89/qUAl9/eiSTSLbi750sWLF5GZmamZ04murkBgoPjfeXnin4vmtsrKEv/811/in9+/F/9ctKROWhrg6gr+mTPin5OT5e7u9evXOHToEHf56yC6hktJliaWLBp/bluvXj2Eh4dLfv7yDkVCygJVxkLRCgtSk54CUmODEF3F6vNfKlDJcUxNTVG/fn2ZR7iOHDmCypUrl8vVSH777TfExcXh0aNH6NixI4KDg/Hu3TtMnz4dXbt2RZcuXdCgQQM0aNAAY8eOhbe3N0QiEQwNDbF06VJUq1YN06dPR2ZmJnJycuDp6Ym2bdsiNzcXs2fPxsuXL1GtWjX4+/sjOzsbnp6eyMzMREFBAaZPn4527dpJcomPj4eXlxeMjY1hbGwMc3NzTn5HKriUdDRKXMEPaqLCXxhF1f+gQZKlBwoLC8Hn8xETEwMAsLW15SpVQtRKlbEQHh6OBg0awPLrL5wvxgYhuorV579UoNL7u6w7FfPz83HixAn0798fBgYG7PaviCtXPv9bX1/6ZxMT6Z/NzaV/trQErlxBYdE6hQrcFDNp0iTExsaiU6dOSE5OxvLlyxEWFoYdO3aga9euePXqFY4ePYqqVatixowZGDlyJFxcXHDr1i2sXbsWkyZNQlpaGgIDA5Geni45g5SVlYWZM2fCwsICEydORFRUFM6dO4f27dvj//7v/5CSkgIPDw9cKFrPFcCvv/6KadOmoUOHDti2bRsSEhKUffdkooJLSf5h/gBUHHD+4rZFBVdeXh5evXqF2rVr49GjR2jYsCHna1kRoi6qjIWwsDDZ11R8MTYI0VWsPv+lApXe3x0cHBAcHIy8vDzJmorXrl3DmzdvMHjwYHb7LgMcHBwAAJaWlsjJyQEAVK1aFVWrVgUgPiP0+++/Y8eOHWAYBvr6+rCzs8OIESMwa9Ys5OfnY+TIkQAAc3Nz1KlTB9nZ2bC0tER2djbi4+MlE8vWqFEDZmZmePPmjWT/z549k1z24OTkRAWXtpxwP8Gi8ee2NjY2AMTL+dSuXRv379+Hs7Mzy+wI0Rxlx0JiYiKSkpLQtm1bGcFYjCtCNITV579UoNLjODg4IC8vD8+ePZPMPn/06FEYGxvDzc2Nmxx0DJ/PR2FhIQDInF+Mz/98yXnRaUUnJyfEx8cjIiICMTEx+PjxI7Zt24bU1FS4u7ujS5cuMmM1bNgQd+7cQdOmTZGSkoIPHz6gSpUqUvHv37+Pzp07y7yWTlVUcCnJ3IjFudwvzgM3btwYgHhV8WbNmiEhIQHjx49nmx4hGqPsWAj+tJxJz549ZQTj5hoJQtSJ1ee/VKDS43x5p2LTpk1RWFiIY8eOoXfv3jAxMeEmBx1jYWGBvLw8yRGt0sybNw+LFy+GSCRCTk4OfvnlF9jY2GDz5s04fvw49PX1MW3atBLb//jjj/Dy8sL58+eRk5MDX19fqXkBfXx8MHPmTOzcuRPVqlWDoaEhJ78jFVxK+uuJ+K6M75t9r0LjT3d0fP896tWrB1NTUzx58kSySGnLli25SpMQtVN2LAQHB0MoFEruwpIO9nlsEKKrWH3+SwUqvb83btwYenp6iIiIwNChQxEeHo7//vuvXE92amhoiBNfHflr2LChZH3FGzduSJ4XCoXYuXNnsRj+Radqv/Bluw0bNkj+vWXLlmLbXrp0CQBgZWWFoKAgJX8D+ajgUtLWO1sBqDjgtorb4vvvwefz4eDggMePH6PmpwsK6ZRixSASiRAVFVXs+ZycHJnPa5IyOay7Jp5HqLmguZwtgfT0dJw9exaDBg1CdHR0sdfrfpqTKPHTdRO68F7oSh66kIMu5aFNrD7/pQJ9/i6QxcjICK6urjh16hRWr16NgwcPQl9fH3379mW3X6JVVHAp6eyIsywaS7f95ptvsH37dmRlZcHR0RHVq1dnmR0pCwwNDWUe5YmKipJ99EeDlMnhiu0VAICJvvxTHLNmzYJIJIKPj4/s5as+3eHU5NPpEl14L3QlD13IQdE8yntBxurzXyqQ/DgDBw7E1KlTcfPmTezatQsDBw6Uus6IlD1UcClJkS+XkhtLt+3Tpw/8/f0RFhaGxYsXs0uMlAtXrlyRXMunr6+v8IPH44FhGBQWFhZ7yHq+pOc+fvwIQ0NDldqX9Hxubi7evXuHCRMmlLxWaDm9LoWUL6w+/6UCyY8zZMgQeHp6okOHDtDT04OXlxc3+y4FwzC0ILYSGIZRansquJS095F4pt0fmv+gQuNPs/T+IG7bpUsX9OvXDxYWFpg5cyZXKZIyrHbt2nB1dUV2djby8vJkPrKysoo9xzAM+Hx+sQePx1P4OYFAAAMDA5iZmSnUPrFKInh8HhpmNpS7baNGjTBlypSSf/GvxgYhuojV579UIPn9vUaNGti7dy+WLl2KxYsXo0WLFuz2KYeRkRHS09NhYWFBRZcCGIZBeno6jIyMFG5DBZeSdtzbAUDFAbdD3LZokBkYGOAULdhLvmBnZ4cdRf1EC5Q5feUa6AoA2Dt6L/sdfzU2CNFFrD7/pQIp1t8HDRqksQvl69Spg5cvX+L169ecxfxyHjFtU0cuRkZGqFOnjsLbU8GlpNCRoSwas2hLiI5hNRaKBaOxQXQfZ31eB/u7vr4+6tevz2lMXbn+ENCNXDRacMm6O0uX7nyhXGSjXGTTpVy0QV/A4V+LOvJXMCGl4azPU3+vkDRacMm6O0sXqs4iiuQS+CAQADC6xWjld1C08vpo+W3L2vuiKbqaS0UsvFiNhWLBxLEUGRuEaAtnfZ76e4XEl78J+VLgg0DJoFO+ceDngUZIGcdqLBQLFkhjg+g8zvo89fcKiccoe18jCw8ePOBsinxCdIlIJFL4LiIaB6S8onFASMnjQKMFFyGEEEJIRUSnFAkhhBBC1IwKLkIIIYQQNaOCixBCCCFEzajgIoQQQghRMyq4CCGEEELUjAouQgghhBA1o4KLEEIIIUTNqOAihBBCCFEzKrgIIYQQQtSMCi5CCCGEEDWjgksHiEQiHDp0CABw9OhRXLx4kbPY58+fx5EjRziLRwghhBDlUcGlA16/fi0puAYNGoRu3bpxFvvq1atwcXHhLB4hhBBClKen7QQI8NtvvyEuLg4BAQFgGAaWlpZo0KABtm3bBn19fSQnJ8Pd3R23b99GdHQ0Ro0aheHDhyM8PBwbNmyAQCCAUCiEr68v9PX1JXEZhsHbt29haWkpeU4kEmH69OnIzMxETk4OPD090bZtW5w7dw6BgYHg8/lwdnbGnDlzkJ6ejvnz5yMjIwMMw2DVqlWwsbHRwjtECCGElG1UcOmASZMmITY2FlOnTsWmTZskzycnJ+P48eOIjIzE9OnTERoaipSUFEydOhUeHh7w9vbGvn37YGFhgY0bN+LYsWMYNmyYpP2jR4/QrFkzqX0lJiYiLS0NgYGBSE9Px/Pnz/Hu3Tts2rQJR44cgbGxMTw9PXHjxg1cvnwZXbt2hYeHB27duoVHjx5RwUUIIYSogAouHWZnZwd9fX1UqlQJdevWhYGBAczNzSESifDmzRukpqZixowZAICcnBx06NBBqv3ly5fRs2fPYjFHjBiBWbNmIT8/HyNHjkRiYiLevHmDiRMnAgA+fvyIpKQk/PPPPxgyZAgAoF27dur/hQkhhJByigouHcDn81FYWFjseR6PV2KbqlWrombNmtiyZQsqVaqEixcvwsTERGqb6OhoSUFWJCYmBh8/fsS2bduQmpoKd3d3HD58GNbW1ti1axf09fVx9OhRNGnSBAkJCXj8+DEaN26MiIgIXLlyBZ6enpz8zoQQQkhFQgWXDrCwsEBeXh7WrFkDIyMjhdrw+Xz88ssvmDhxIhiGgampKVavXi15PSUlBVZWVsXa2djYYPPmzTh+/Dj09fUxbdo0VKtWDaNHj8bIkSNRUFCA2rVro3fv3pg0aRK8vLxw8uRJAICfnx83vzAhhBBSwfAYhmG0nQQhhBBCSHlG00IQQgghhKgZFVyEEEIIIWpGBRchhBBCiJpRwUUIIYQQomZUcBFCCCGEqBkVXIQQQgghakYFFyGEEEKImv0/5shQN4W65aYAAAAASUVORK5CYII=\n" + }, + "metadata": {}, + "output_type": "display_data" + } + ], "source": [ "n_trials = 3 # Number of trials to plot\n", "# Randomly select the trials to plot\n", @@ -488,4 +605,4 @@ }, "nbformat": 4, "nbformat_minor": 2 -} +} \ No newline at end of file diff --git a/brainbox/examples/gen_phy_metrics.py b/brainbox/examples/gen_phy_metrics.py index 2fa0dcd90..554c02415 100644 --- a/brainbox/examples/gen_phy_metrics.py +++ b/brainbox/examples/gen_phy_metrics.py @@ -14,6 +14,7 @@ from brainbox import metrics from brainbox.processing import get_units_bunch + def gen_metrics(exp_id, ks_dir, probe_label, ephys_file_path=None): """ Tries to generate single unit metrics for all units metric-by-metric and save the metrics diff --git a/brainbox/examples/pyschofit_example.ipynb b/brainbox/examples/pyschofit_example.ipynb index f2020f879..ef44d4a98 100644 --- a/brainbox/examples/pyschofit_example.ipynb +++ b/brainbox/examples/pyschofit_example.ipynb @@ -12,7 +12,7 @@ "Example 3: Same, with two different lapse rates
\n", "Example 4: Fit data from .5 to 1, using weibull\n", "\n", - "### Advanced: \n", + "### Advanced:\n", "Add error bars to data, parameters, and fits" ] }, @@ -35,7 +35,7 @@ "metadata": {}, "source": [ "## Example 1\n", - "Fit data from 0 to 1 and stimulus in log units, using erf " + "Fit data from 0 to 1 and stimulus in log units, using erf" ] }, { @@ -71,8 +71,8 @@ "ntrials = 40\n", "dd = np.random.binomial(1, pp, size=(ntrials,nxx))\n", "\n", - "# data: \n", - "# 3 x n matrix where first row corrsponds to stim levels (log units), \n", + "# data:\n", + "# 3 x n matrix where first row corrsponds to stim levels (log units),\n", "# the second to number of trials for each stim level (int),\n", "# the third to proportion correct (float between 0 and 1)\n", "data = np.vstack((np.log10(xx), 10 * np.ones((nxx,)), np.mean(dd, axis=0)))\n", @@ -141,8 +141,8 @@ " 'nfits': 10\n", "}\n", "\n", - "# data: \n", - "# 3 x n matrix where first row corrsponds to stim levels (% contrast), \n", + "# data:\n", + "# 3 x n matrix where first row corrsponds to stim levels (% contrast),\n", "# the second to number of trials for each stim level (int),\n", "# the third to proportion rightward (float between 0 and 1)\n", "data = np.vstack((xx, ntrials * np.ones((nxx,)), np.mean(dd, axis=0)))\n", @@ -215,8 +215,8 @@ " 'nfits': 10\n", "}\n", "\n", - "# data: \n", - "# 3 x n matrix where first row corrsponds to stim levels (% contrast), \n", + "# data:\n", + "# 3 x n matrix where first row corrsponds to stim levels (% contrast),\n", "# the second to number of trials for each stim level (int),\n", "# the third to proportion rightward (float between 0 and 1)\n", "data = np.vstack((xx, ntrials * np.ones((nxx,)), np.mean(dd, axis=0)))\n", @@ -275,12 +275,12 @@ "ntrials = 80\n", "dd = np.random.binomial(1., pp, size=(ntrials, nxx))\n", "\n", - "# data: \n", - "# 3 x n matrix where first row corrsponds to stim levels (notice we do NOT take log of x values), \n", + "# data:\n", + "# 3 x n matrix where first row corresponds to stim levels (notice we do NOT take log of x values),\n", "# the second to number of trials for each stim level (int),\n", "# the third to proportion correct (float between 0 and 1)\n", "data = np.vstack((xx, ntrials * np.ones((nxx,)), np.mean(dd, axis=0)))\n", - "# fit to reconstruct the parameters \n", + "# fit to reconstruct the parameters\n", "pars, L = psy.mle_fit_psycho(data, 'weibull50');\n", "\n", "# graphics\n", diff --git a/brainbox/examples/raster_cluster_ordered.py b/brainbox/examples/raster_cluster_ordered.py index d9c861b3a..e931eeabf 100644 --- a/brainbox/examples/raster_cluster_ordered.py +++ b/brainbox/examples/raster_cluster_ordered.py @@ -5,7 +5,7 @@ from oneibl.one import ONE import alf.io as ioalf import ibllib.plots as iblplt -from brainbox.processing import bincount2D +from iblutil.numerical import bincount2D T_BIN = 0.01 diff --git a/brainbox/examples/raster_clusters.py b/brainbox/examples/raster_clusters.py index 500dbe3ef..84901125a 100644 --- a/brainbox/examples/raster_clusters.py +++ b/brainbox/examples/raster_clusters.py @@ -6,7 +6,7 @@ import alf.io as ioalf import ibllib.plots as iblplt -from brainbox.processing import bincount2D +from iblutil.numerical import bincount2D T_BIN = 0.01 diff --git a/brainbox/examples/raster_depths.py b/brainbox/examples/raster_depths.py index 872f7c07c..e1f17deb8 100644 --- a/brainbox/examples/raster_depths.py +++ b/brainbox/examples/raster_depths.py @@ -8,7 +8,7 @@ from oneibl.one import ONE import ibllib.plots as iblplt -from brainbox.processing import bincount2D +from iblutil.numerical import bincount2D from brainbox.io import one as bbone T_BIN = 0.05 diff --git a/brainbox/examples/raster_per_trial.py b/brainbox/examples/raster_per_trial.py index e7e7d353e..6cd8a1dce 100644 --- a/brainbox/examples/raster_per_trial.py +++ b/brainbox/examples/raster_per_trial.py @@ -1,6 +1,6 @@ import numpy as np import alf.io -from brainbox.processing import bincount2D +from iblutil.numerical import bincount2D import matplotlib.pyplot as plt import ibllib.plots as iblplt diff --git a/brainbox/examples/wheel_moves.ipynb b/brainbox/examples/wheel_moves.ipynb index 744ff5b51..10edcafba 100644 --- a/brainbox/examples/wheel_moves.ipynb +++ b/brainbox/examples/wheel_moves.ipynb @@ -29,41 +29,20 @@ "source": [ "%matplotlib notebook\n", "\n", - "import re\n", "import numpy as np\n", "import matplotlib.pyplot as plt\n", "import seaborn as sns\n", + "from one.api import ONE\n", "\n", "from brainbox.io.one import load_wheel_reaction_times\n", "import brainbox.behavior.wheel as wh\n", "from ibllib.io.extractors.ephys_fpga import extract_wheel_moves\n", "from ibllib.io.extractors.training_wheel import extract_first_movement_times\n", - "# from ibllib.misc.exp_ref import eid2ref\n", - "from one.api import ONE\n", "\n", "one = ONE()\n", "sns.set_style('whitegrid')" ] }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [], - "source": [ - "# NB: This function will soon be available from ibllib.misc.exp_ref\n", - "def eid2ref(eid):\n", - " \"\"\"\n", - " Get human-readable session ref from path\n", - " :param eid: The experiment uuid to find reference for\n", - " :return: dict containing 'subject', 'date' and 'sequence'\n", - " \"\"\"\n", - " path_str = str(one.path_from_eid(eid))\n", - " pattern = r'(?P[\\w-]+)([\\\\/])(?P\\d{4}-\\d{2}-\\d{2})(\\2)(?P\\d{3})'\n", - " match = re.search(pattern, path_str)\n", - " return match.groupdict()" - ] - }, { "cell_type": "code", "execution_count": 4, @@ -86,7 +65,7 @@ ], "source": [ "eid = 'eafbdb1a-8776-4390-b210-76b7509e31d0'\n", - "eid2ref(eid)" + "one.eid2ref(eid, as_dict=False)" ] }, { diff --git a/brainbox/examples/xcorr_numpy.py b/brainbox/examples/xcorr_numpy.py index 6d38556d9..43546e341 100644 --- a/brainbox/examples/xcorr_numpy.py +++ b/brainbox/examples/xcorr_numpy.py @@ -5,7 +5,7 @@ from oneibl.one import ONE import alf.io as ioalf -from brainbox.processing import bincount2D +from iblutil.numerical import bincount2D class Bunch(dict): diff --git a/brainbox/io/one.py b/brainbox/io/one.py index 1264f0ba7..b63faebbe 100644 --- a/brainbox/io/one.py +++ b/brainbox/io/one.py @@ -723,7 +723,7 @@ def load_iti(trials): np.array An array of inter-trial intervals, the last value being NaN. """ - if not {'intervals', 'stimOff_times'} <= trials.keys(): + if not {'intervals', 'stimOff_times'} <= set(trials.keys()): raise ValueError('trials must contain keys {"intervals", "stimOff_times"}') return np.r_[(np.roll(trials['intervals'][:, 0], -1) - trials['stimOff_times'])[:-1], np.nan] diff --git a/brainbox/lfp.py b/brainbox/lfp.py index 29b2e2d92..8b407d2f3 100644 --- a/brainbox/lfp.py +++ b/brainbox/lfp.py @@ -1,10 +1,9 @@ # -*- coding: utf-8 -*- """ -Created on Fri Mar 13 14:57:53 2020 - -Functions to analyse LFP signals +Functions to analyse LFP signals. @author: Guido Meijer +Created on Fri Mar 13 14:57:53 2020 """ from scipy.signal import welch, csd, filtfilt, butter diff --git a/brainbox/metrics/electrode_drift.py b/brainbox/metrics/electrode_drift.py index 12fee83e5..3b2ac3bc1 100644 --- a/brainbox/metrics/electrode_drift.py +++ b/brainbox/metrics/electrode_drift.py @@ -1,7 +1,7 @@ import numpy as np from neurodsp import smooth, utils, fourier -from brainbox.processing import bincount2D +from iblutil.numerical import bincount2D def estimate_drift(spike_times, spike_amps, spike_depths, display=False): diff --git a/brainbox/metrics/single_units.py b/brainbox/metrics/single_units.py index 4200464f8..12a8f7593 100644 --- a/brainbox/metrics/single_units.py +++ b/brainbox/metrics/single_units.py @@ -26,12 +26,11 @@ import spikeglx from phylib.stats import correlograms from iblutil.util import Bunch -from iblutil.numerical import ismember, between_sorted +from iblutil.numerical import ismember, between_sorted, bincount2D from slidingRP import metrics from brainbox import singlecell from brainbox.io.spikeglx import extract_waveforms -from brainbox.processing import bincount2D from brainbox.metrics import electrode_drift diff --git a/brainbox/plot.py b/brainbox/plot.py index 2bf0fd1a9..d997adc6b 100644 --- a/brainbox/plot.py +++ b/brainbox/plot.py @@ -26,8 +26,8 @@ # from matplotlib.ticker import StrMethodFormatter from brainbox import singlecell from brainbox.metrics import single_units -from brainbox.processing import bincount2D from brainbox.io.spikeglx import extract_waveforms +from iblutil.numerical import bincount2D import spikeglx diff --git a/brainbox/plot_base.py b/brainbox/plot_base.py index 62e7ba9cd..cbba05aff 100644 --- a/brainbox/plot_base.py +++ b/brainbox/plot_base.py @@ -229,9 +229,9 @@ def __init__(self, img, x, y, cmap=None): """ # Make sure we have inputs as lists, can get input from arrange_channels2banks - assert type(img) == list - assert type(x) == list - assert type(y) == list + assert isinstance(img, list) + assert isinstance(x, list) + assert isinstance(y, list) data = Bunch({'x': x, 'y': y, 'c': img}) super().__init__('probe', data) diff --git a/brainbox/population/cca.py b/brainbox/population/cca.py index cfcd7ca66..3c339e714 100644 --- a/brainbox/population/cca.py +++ b/brainbox/population/cca.py @@ -1,6 +1,6 @@ import numpy as np import matplotlib.pylab as plt -from brainbox.processing import bincount2D +from iblutil.numerical import bincount2D def _smooth(data, sd): diff --git a/brainbox/processing.py b/brainbox/processing.py index 2eb2d8d81..a1713704f 100644 --- a/brainbox/processing.py +++ b/brainbox/processing.py @@ -7,7 +7,13 @@ import pandas as pd from scipy import interpolate, sparse from brainbox import core +from iblutil.numerical import bincount2D as _bincount2D from iblutil.util import Bunch +import logging +import warnings +import traceback + +_logger = logging.getLogger(__name__) def sync(dt, times=None, values=None, timeseries=None, offsets=None, interp='zero', @@ -131,45 +137,14 @@ def bincount2D(x, y, xbin=0, ybin=0, xlim=None, ylim=None, weights=None): :param weights: (optional) defaults to None, weights to apply to each value for aggregation :return: 3 numpy arrays MAP [ny,nx] image, xscale [nx], yscale [ny] """ - # if no bounds provided, use min/max of vectors - if xlim is None: - xlim = [np.min(x), np.max(x)] - if ylim is None: - ylim = [np.min(y), np.max(y)] - - def _get_scale_and_indices(v, bin, lim): - # if bin is a nonzero scalar, this is a bin size: create scale and indices - if np.isscalar(bin) and bin != 0: - scale = np.arange(lim[0], lim[1] + bin / 2, bin) - ind = (np.floor((v - lim[0]) / bin)).astype(np.int64) - # if bin == 0, aggregate over unique values - else: - scale, ind = np.unique(v, return_inverse=True) - return scale, ind - - xscale, xind = _get_scale_and_indices(x, xbin, xlim) - yscale, yind = _get_scale_and_indices(y, ybin, ylim) - # aggregate by using bincount on absolute indices for a 2d array - nx, ny = [xscale.size, yscale.size] - ind2d = np.ravel_multi_index(np.c_[yind, xind].transpose(), dims=(ny, nx)) - r = np.bincount(ind2d, minlength=nx * ny, weights=weights).reshape(ny, nx) - - # if a set of specific values is requested output an array matching the scale dimensions - if not np.isscalar(xbin) and xbin.size > 1: - _, iout, ir = np.intersect1d(xbin, xscale, return_indices=True) - _r = r.copy() - r = np.zeros((ny, xbin.size)) - r[:, iout] = _r[:, ir] - xscale = xbin - - if not np.isscalar(ybin) and ybin.size > 1: - _, iout, ir = np.intersect1d(ybin, yscale, return_indices=True) - _r = r.copy() - r = np.zeros((ybin.size, r.shape[1])) - r[iout, :] = _r[ir, :] - yscale = ybin - - return r, xscale, yscale + for line in traceback.format_stack(): + print(line.strip()) + warning_text = """Deprecation warning: bincount2D() is now a part of iblutil. + brainbox.processing.bincount2D is deprecated and will be removed in + future versions. Please replace imports with iblutil.numerical.bincount2D.""" + _logger.warning(warning_text) + warnings.warn(warning_text, DeprecationWarning) + return _bincount2D(x, y, xbin, ybin, xlim, ylim, weights) def compute_cluster_average(spike_clusters, spike_var): diff --git a/brainbox/task/passive.py b/brainbox/task/passive.py index 3e0110be5..83f02fb23 100644 --- a/brainbox/task/passive.py +++ b/brainbox/task/passive.py @@ -2,7 +2,7 @@ Functions dealing with passive task """ import numpy as np -from brainbox.processing import bincount2D +from iblutil.numerical import bincount2D from scipy.linalg import svd diff --git a/brainbox/task/trials.py b/brainbox/task/trials.py index 59b5ff947..8ccfcd89e 100644 --- a/brainbox/task/trials.py +++ b/brainbox/task/trials.py @@ -1,5 +1,4 @@ -from iblutil.numerical import ismember -from brainbox.processing import bincount2D +from iblutil.numerical import ismember, bincount2D import numpy as np diff --git a/brainbox/tests/test_behavior.py b/brainbox/tests/test_behavior.py index 9dd731061..8d02d185a 100644 --- a/brainbox/tests/test_behavior.py +++ b/brainbox/tests/test_behavior.py @@ -10,7 +10,6 @@ import brainbox.behavior.wheel as wheel import brainbox.behavior.training as train -import brainbox.behavior.pyschofit as psy from ibllib.tests import TEST_DB @@ -44,18 +43,18 @@ def setUp(self): 'intervals': np.array([[0, 62], [63, 90], [95, 110], [115, 135], [140, 200]]) } - def test_derivative(self): - if self.test_data is None: - return - t = np.array([0, .5, 1., 1.5, 2, 3, 4, 4.5, 5, 5.5]) - p = np.arange(len(t)) - v = wheel.velocity(t, p) - self.assertTrue(len(v) == len(t)) - self.assertTrue(np.all(v[0:4] == 2) and v[5] == 1 and np.all(v[7:] == 2)) - # import matplotlib.pyplot as plt - # plt.figure() - # plt.plot(t[:-1] + np.diff(t) / 2, np.diff(p) / np.diff(t), '*-') - # plt.plot(t, v, '-*') + def test_velocity_filtered(self): + """Test for brainbox.behavior.wheel.velocity_filtered""" + Fs = 1000 + pos, _ = wheel.interpolate_position(*self.test_data[1][0], freq=Fs) + vel, acc = wheel.velocity_filtered(pos, Fs) + self.assertEqual(vel.shape, pos.shape) + expected = [-0.03020161, -0.02642356, -0.0229635, -0.01981592, -0.01697264, + -0.01442305, -0.01215438, -0.01015202, -0.00839981, -0.00688036] + np.testing.assert_array_almost_equal(vel[-10:], expected) + expected = [0., 187.41222339, 4.16291917, 3.94583813, 3.67112556, + 3.33635025, 2.94002541, 2.48170905, 1.96209209, 1.38307198] + np.testing.assert_array_almost_equal(acc[:10], expected) def test_movements(self): # These test data are the same as those used in the MATLAB code @@ -105,15 +104,26 @@ def test_traces_by_trial(self): np.testing.assert_array_equal(trace_pos[[0, -1]], pos[ind]) def test_direction_changes(self): + """Test for brainbox.behavior.wheel.direction_changes""" t, pos = self.test_data[0][0] on, off, *_ = self.test_data[0][1] - vel, _ = wheel.velocity_smoothed(pos, 1000) + vel, _ = wheel.velocity_filtered(pos, 1000) times, indices = wheel.direction_changes(t, vel, np.c_[on, off]) # import matplotlib.pyplot as plt # plt.plot(np.diff(pos) * 1000) # plt.plot(vel) self.assertTrue(len(times) == len(indices) == 14, 'incorrect number of arrays returned') + def test_get_movement_onset(self): + """Test for brainbox.behavior.wheel.get_movement_onset""" + on, off, *_ = self.test_data[0][1] + intervals = np.c_[on, off] + times = wheel.get_movement_onset(intervals, self.trials['feedback_times']) + expected = [np.nan, 79.66293334, 100.73593334, 129.26693334, np.nan] + np.testing.assert_array_almost_equal(times, expected) + with self.assertRaises(ValueError): + wheel.get_movement_onset(intervals, np.random.permutation(self.trials['feedback_times'])) + class TestTraining(unittest.TestCase): def setUp(self): @@ -250,145 +260,3 @@ def test_query_criterion(self): self.assertIsNone(n_sessions) self.assertIsNone(n_days) self.assertRaises(ValueError, train.query_criterion, subject, 'foobar', one=one) - - -class PsychofitTest(unittest.TestCase): - def setUp(self) -> None: - """Data are 3 x n arrays""" - data = {'weibull50': np.vstack([ - 10 ** np.linspace(-4, -1, 8), - np.ones(8) * 80, - np.array([0.5125, 0.35, 0.5625, 0.5375, 0.8875, 0.8875, 0.9125, 0.8625]) - ]), 'weibull': np.vstack([ - 10 ** np.linspace(-4, -1, 8), - np.ones(8) * 80, - np.array([0.125, 0.1125, 0.1375, 0.4, 0.8125, 0.9, 0.925, 0.875]) - ]), 'erf_psycho_2gammas': np.vstack([ - np.arange(-50, 50, 10), - np.ones(10) * 40, - np.array([0.175, 0.225, 0.35, 0.275, 0.725, 0.9, 0.925, 0.975, 1., 1.]) - ]), 'erf_psycho': np.vstack([ - np.arange(-50, 50, 10), - np.ones(10) * 40, - np.array([0.1, 0.125, 0.25, 0.15, 0.6, 0.65, 0.75, 0.9, 0.9, 0.85]) - ])} - self.test_data = data - np.random.seed(0) - - def test_weibull50(self): - xx = self.test_data['weibull50'][0, :] - - # test parameters - alpha = 10 ** -2.5 - beta = 2. - gamma = 0.1 - - # fake experimental data given those parameters - actual = psy.weibull50((alpha, beta, gamma), xx) - expected = np.array( - [0.5003998, 0.50286841, 0.5201905, 0.62446761, 0.87264857, 0.9, 0.9, 0.9] - ) - self.assertTrue(np.allclose(expected, actual)) - - with self.assertRaises(ValueError): - psy.weibull50((alpha, beta), xx) - - def test_weibull(self): - xx = self.test_data['weibull'][0, :] - - # test parameters - alpha = 10 ** -2.5 - beta = 2. - gamma = 0.1 - - # fake experimental data given those parameters - actual = psy.weibull((alpha, beta, gamma), xx) - expected = np.array( - [0.1007996, 0.10573682, 0.14038101, 0.34893523, 0.84529714, 0.9, 0.9, 0.9] - ) - self.assertTrue(np.allclose(expected, actual)) - - with self.assertRaises(ValueError): - psy.weibull((alpha, beta), xx) - - def test_erf_psycho(self): - xx = self.test_data['erf_psycho'][0, :] - - # test parameters - bias = -10. - threshold = 20. - lapse = .1 - - # fake experimental data given those parameters - actual = psy.erf_psycho((bias, threshold, lapse), xx) - expected = np.array( - [0.10187109, 0.11355794, 0.16291968, 0.29180005, 0.5, - 0.70819995, 0.83708032, 0.88644206, 0.89812891, 0.89983722] - ) - self.assertTrue(np.allclose(expected, actual)) - - with self.assertRaises(ValueError): - psy.erf_psycho((bias, threshold, lapse, lapse), xx) - - def test_erf_psycho_2gammas(self): - xx = self.test_data['erf_psycho_2gammas'][0, :] - - # test parameters - bias = -10. - threshold = 20. - gamma1 = .2 - gamma2 = 0. - - # fake experimental data given those parameters - actual = psy.erf_psycho_2gammas((bias, threshold, gamma1, gamma2), xx) - expected = np.array( - [0.20187109, 0.21355794, 0.26291968, 0.39180005, 0.6, - 0.80819995, 0.93708032, 0.98644206, 0.99812891, 0.99983722] - ) - self.assertTrue(np.allclose(expected, actual)) - - with self.assertRaises(ValueError): - psy.erf_psycho_2gammas((bias, threshold, gamma1), xx) - - def test_neg_likelihood(self): - data = self.test_data['erf_psycho'] - with self.assertRaises(ValueError): - psy.neg_likelihood((10, 20, .05), data[1:, :]) - with self.assertRaises(TypeError): - psy.neg_likelihood('(10, 20, .05)', data) - - ll = psy.neg_likelihood((-20, 30, 2), data, P_model='erf_psycho', - parmin=np.array((-10, 20, 0)), parmax=np.array((10, 10, .05))) - self.assertTrue(ll > 10000) - - def test_mle_fit_psycho(self): - expected = { - 'weibull50': (np.array([0.0034045, 3.9029162, .1119576]), -334.1149693046583), - 'weibull': (np.array([0.00316341, 1.72552866, 0.1032307]), -261.235178611311), - 'erf_psycho': (np.array([-9.78747259, 10., 0.15967605]), -193.0509031440323), - 'erf_psycho_2gammas': (np.array([-11.45463779, 9.9999999, 0.24117732, 0.0270835]), - -147.02380025592902) - } - for model in self.test_data.keys(): - pars, L = psy.mle_fit_psycho(self.test_data[model], P_model=model, nfits=10) - expected_pars, expected_L = expected[model] - self.assertTrue(np.allclose(expected_pars, pars, atol=1e-3), - f'unexpected pars for {model}') - self.assertTrue(np.isclose(expected_L, L, atol=1e-3), - f'unexpected likelihood for {model}') - - # Test on of the models with function pars - params = { - 'parmin': np.array([-5., 10., 0.]), - 'parmax': np.array([5., 15., .1]), - 'parstart': np.array([0., 11., 0.1]), - 'nfits': 5 - } - model = 'erf_psycho' - pars, L = psy.mle_fit_psycho(self.test_data[model], P_model=model, **params) - expected = [-5, 15, 0.1] - self.assertTrue(np.allclose(expected, pars, rtol=.01), f'unexpected pars for {model}') - self.assertTrue(np.isclose(-195.55603, L, atol=1e-5), f'unexpected likelihood for {model}') - - def tearDown(self): - np.random.seed() diff --git a/brainbox/tests/test_processing.py b/brainbox/tests/test_processing.py index b30f81a19..4754e5184 100644 --- a/brainbox/tests/test_processing.py +++ b/brainbox/tests/test_processing.py @@ -1,6 +1,7 @@ from brainbox import processing, core import unittest import numpy as np +import datetime class TestProcessing(unittest.TestCase): @@ -62,41 +63,14 @@ def test_sync(self): self.assertTrue(times2.min() >= resamp2.times.min()) self.assertTrue(times2.max() <= resamp2.times.max()) - def test_bincount_2d(self): - # first test simple with indices - x = np.array([0, 1, 1, 2, 2, 3, 3, 3]) - y = np.array([3, 2, 2, 1, 1, 0, 0, 0]) - r, xscale, yscale = processing.bincount2D(x, y, xbin=1, ybin=1) - r_ = np.zeros_like(r) - # sometimes life would have been simpler in c: - for ix, iy in zip(x, y): - r_[iy, ix] += 1 - self.assertTrue(np.all(np.equal(r_, r))) - # test with negative values - y = np.array([3, 2, 2, 1, 1, 0, 0, 0]) - 5 - r, xscale, yscale = processing.bincount2D(x, y, xbin=1, ybin=1) - self.assertTrue(np.all(np.equal(r_, r))) - # test unequal bins - r, xscale, yscale = processing.bincount2D(x / 2, y / 2, xbin=1, ybin=2) - r_ = np.zeros_like(r) - for ix, iy in zip(np.floor(x / 2), np.floor((y / 2 + 2.5) / 2)): - r_[int(iy), int(ix)] += 1 - self.assertTrue(np.all(r_ == r)) - # test with weights - w = np.ones_like(x) * 2 - r, xscale, yscale = processing.bincount2D(x / 2, y / 2, xbin=1, ybin=2, weights=w) - self.assertTrue(np.all(r_ * 2 == r)) - # test aggregation instead of binning - x = np.array([0, 1, 1, 2, 2, 4, 4, 4]) - y = np.array([4, 2, 2, 1, 1, 0, 0, 0]) - r, xscale, yscale = processing.bincount2D(x, y) - self.assertTrue(np.all(xscale == yscale) and np.all(xscale == np.array([0, 1, 2, 4]))) - # test aggregation on a fixed scale - r, xscale, yscale = processing.bincount2D(x + 10, y + 10, xbin=np.arange(5) + 10, - ybin=np.arange(3) + 10) - self.assertTrue(np.all(xscale == np.arange(5) + 10)) - self.assertTrue(np.all(yscale == np.arange(3) + 10)) - self.assertTrue(np.all(r.shape == (3, 5))) + def test_bincount2D_deprecation(self): + # Timer to remove bincount2D (now in iblutil) + # Once this test fails: + # - Remove the bincount2D method in processing.py + # - Remove the import from iblutil at the top of that file + # - Delete this test + if datetime.datetime.now() > datetime.datetime(2024, 6, 30): + raise NotImplementedError def test_compute_cluster_averag(self): # Create fake data for 3 clusters diff --git a/examples/atlas/Working with ibllib atlas.ipynb b/examples/atlas/Working with ibllib atlas.ipynb index aa82322d6..9e435aea7 100644 --- a/examples/atlas/Working with ibllib atlas.ipynb +++ b/examples/atlas/Working with ibllib atlas.ipynb @@ -1,158 +1,365 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "b767b213", - "metadata": {}, - "source": [ - "# Working with IBL atlas object" - ] - }, - { - "cell_type": "markdown", - "id": "bba98311", - "metadata": {}, - "source": [ - "## Getting started" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "df873343", - "metadata": {}, - "outputs": [], - "source": [ - "from ibllib.atlas import AllenAtlas\n", - "\n", - "res = 25 # resolution of Atlas, available resolutions are 10, 25 (default) and 50\n", - "brain_atlas = AllenAtlas(res_um=res)" - ] - }, - { - "cell_type": "markdown", - "id": "95a8e4db", - "metadata": {}, - "source": [ - "## Exploring the volumes" - ] - }, - { - "cell_type": "markdown", - "id": "5f34f56c", - "metadata": {}, - "source": [ - "### 1. Image Volume \n", - "Allen atlas dwi average template" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "769b4fd4", - "metadata": {}, - "outputs": [], - "source": [ - "# Access the image volume\n", - "im = brain_atlas.image\n", - "\n", - "# Explore the size of the image volume (ap, ml, dv)\n", - "im.shape\n", - "\n", - "# Plot a coronal slice at ap = -1000um\n", - "ap = -1000 / 1e6 # input must be in metres\n", - "ax = brain_atlas.plot_cslice(ap, volume='image')\n" - ] - }, - { - "cell_type": "markdown", - "id": "1c46789b", - "metadata": {}, - "source": [ - "### Label Volume\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "9b5b885e", - "metadata": {}, - "outputs": [], - "source": [ - "brain_atlas.bc.nxyz" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "f710395f", - "metadata": {}, - "outputs": [], - "source": [ - "brain_atlas.regions.acronym.shape" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "e8bf8781", - "metadata": {}, - "outputs": [], - "source": [ - "brain_atlas.bc.xyz2i([-1000/1e6, -1000/1e6, -4000/1e6])" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "0680ca09", - "metadata": {}, - "outputs": [], - "source": [ - "brain_atlas.image[190, 256, 173] = 100000\n", - "brain_atlas.image[256, 190, 173] = 100000000" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "88e1fac5", - "metadata": {}, - "outputs": [], - "source": [ - "ax = brain_atlas.plot_cslice(ap, volume='image', vmax=10000)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "efd9a13d", - "metadata": {}, - "outputs": [], - "source": [ - "# understainding" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python [conda env:iblenv] *", - "language": "python", - "name": "conda-env-iblenv-py" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.7" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} +{ + "cells": [ + { + "cell_type": "markdown", + "id": "b767b213", + "metadata": {}, + "source": [ + "# Working with IBL atlas object" + ] + }, + { + "cell_type": "markdown", + "id": "bba98311", + "metadata": {}, + "source": [ + "## Getting started" + ] + }, + { + "cell_type": "markdown", + "id": "461b8f34", + "metadata": {}, + "source": [ + "The Allen atlas image and annotation volumes can be accessed using the `ibllib.atlas.AllenAtlas` class. Upon instantiating the class for the first time, the relevant files will be downloaded from the Allen database." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "df873343", + "metadata": {}, + "outputs": [], + "source": [ + "from ibllib.atlas import AllenAtlas\n", + "\n", + "res = 25 # resolution of Atlas, available resolutions are 10, 25 (default) and 50\n", + "brain_atlas = AllenAtlas(res_um=res)" + ] + }, + { + "cell_type": "markdown", + "id": "95a8e4db", + "metadata": {}, + "source": [ + "## Exploring the volumes" + ] + }, + { + "cell_type": "markdown", + "id": "12f16b38", + "metadata": {}, + "source": [ + "The brain_atlas class contains two volumes, the dwi image volume and the annotation label volume" + ] + }, + { + "cell_type": "markdown", + "id": "5f34f56c", + "metadata": {}, + "source": [ + "### 1. Image Volume \n", + "Allen atlas dwi average template" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "769b4fd4", + "metadata": {}, + "outputs": [], + "source": [ + "# Access the image volume\n", + "im = brain_atlas.image\n", + "\n", + "# Explore the size of the image volume (ap, ml, dv)\n", + "print(f'Shape of image volume: {im.shape}')\n", + "\n", + "# Plot a coronal slice at ap = -1000um\n", + "ap = -1000 / 1e6 # input must be in metres\n", + "ax = brain_atlas.plot_cslice(ap, volume='image')\n" + ] + }, + { + "cell_type": "markdown", + "id": "1c46789b", + "metadata": {}, + "source": [ + "### Label Volume\n" + ] + }, + { + "cell_type": "markdown", + "id": "72bea21a", + "metadata": {}, + "source": [ + "The label volume contains information about which brain region each voxel in the volume belongs to." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ff7cb654", + "metadata": {}, + "outputs": [], + "source": [ + "# Access the image volume\n", + "lab = brain_atlas.label\n", + "\n", + "# Explore the size of the image volume (ap, ml, dv)\n", + "print(f'Shape of label volume: {lab.shape}')\n", + "\n", + "# Plot a coronal slice at ap = -1000um\n", + "ap = -1000 / 1e6 # input must be in metres\n", + "ax = brain_atlas.plot_cslice(ap, volume='annotation')" + ] + }, + { + "cell_type": "markdown", + "id": "8bd69066", + "metadata": {}, + "source": [ + "The label volume used in the IBL AllenAtlas class differs from the Allen annotation volume in two ways.\n", + "- Each voxel has information about the index of the Allen region rather than the Allen atlas id\n", + "- The volume has been lateralised to differentiate between the left and right hemisphere\n", + "\n", + "To understand this better let's explore the BrainRegions class that contains information about the Allen structure tree." + ] + }, + { + "cell_type": "markdown", + "id": "04f601ed", + "metadata": {}, + "source": [ + "## Exploring brain regions" + ] + }, + { + "cell_type": "markdown", + "id": "a1802136", + "metadata": {}, + "source": [ + "The Allen brain region structure tree can be accessed through the class `ibllib.atlas.regions.BrainRegions`. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9c2d097f", + "metadata": {}, + "outputs": [], + "source": [ + "from ibllib.atlas.regions import BrainRegions\n", + "\n", + "brain_regions = BrainRegions()\n", + "\n", + "# Alternatively if you already have the AllenAtlas instantiated you can access it as an attribute\n", + "brain_regions = brain_atlas.regions" + ] + }, + { + "cell_type": "markdown", + "id": "6cf9ab47", + "metadata": {}, + "source": [ + "The brain_regions class has the following data attributes" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1d078160", + "metadata": {}, + "outputs": [], + "source": [ + "brain_regions.__annotations__" + ] + }, + { + "cell_type": "markdown", + "id": "44339559", + "metadata": {}, + "source": [ + "These attributes are the same as the Allen structure tree and for example `id` corresponds to the Allen atlas id while the `name` represents the full anatomical brain region name." + ] + }, + { + "cell_type": "markdown", + "id": "fbe04558", + "metadata": {}, + "source": [ + "The index refers to the index in each of these attribute arrays. For example, index 1 corresponds to the `root` brain region with an atlas id of 977. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "0c1fdf7c", + "metadata": {}, + "outputs": [], + "source": [ + "index = 1\n", + "print(brain_regions.id[index])\n", + "print(brain_regions.acronym[index])" + ] + }, + { + "cell_type": "markdown", + "id": "fd8e542c", + "metadata": {}, + "source": [ + "Alternatively, index 1000 corresponds to `PPYd` with an atlas id of 185" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "cf56d8d9", + "metadata": {}, + "outputs": [], + "source": [ + "index = 1000\n", + "print(brain_regions.id[index])\n", + "print(brain_regions.acronym[index])" + ] + }, + { + "cell_type": "markdown", + "id": "4c3acedd", + "metadata": {}, + "source": [ + "In the label volume we described above, it is these indices that we are referring to. Therefore, we know all voxels in the volume with a value of 0 will be voxels that lie in `root`, while the voxels that have a value of 1000 will be in `PPYd`" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b607f170", + "metadata": {}, + "outputs": [], + "source": [ + "import numpy as np\n", + "root_voxels = np.where(brain_atlas.label == 1)\n", + "ppyd_voxels = np.where(brain_atlas.label == 1000)" + ] + }, + { + "cell_type": "markdown", + "id": "474bb26b", + "metadata": {}, + "source": [ + "An additional nuance is the lateralisation. If you compare the size of the brain_regions data class to the Allen structure tree. You will see that it has double the number of columms. This is because the IBL brain regions encodes both the left and right hemisphere. We can understand this better by exploring the `brain_regions.id` and `brain_regions.name` at the indices where it transitions between hemispheres." + ] + }, + { + "cell_type": "markdown", + "id": "861fef87", + "metadata": {}, + "source": [ + "The `brain_region.id` go from positive Allen atlas ids (right hemisphere) to negative Allen atlas ids (left hemisphere)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "31cceb95", + "metadata": {}, + "outputs": [], + "source": [ + "print(brain_regions.id[1320:1340])" + ] + }, + { + "cell_type": "markdown", + "id": "e2221959", + "metadata": {}, + "source": [ + "The `brain_region.name` go from right to left hemisphere descriptions" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "97079539", + "metadata": {}, + "outputs": [], + "source": [ + "print(brain_regions.name[1320:1340])" + ] + }, + { + "cell_type": "markdown", + "id": "7f35aa26", + "metadata": {}, + "source": [ + "In the label volume, we can therefore differentiate between left and right hemisphere voxels for the same brain region. First we will use a method in the brain_region class to find out the index of left and right `CA1`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4c93c1a0", + "metadata": {}, + "outputs": [], + "source": [ + "brain_regions.acronym2index('CA1')" + ] + }, + { + "cell_type": "markdown", + "id": "d8bb5fc2", + "metadata": {}, + "source": [ + "The method `acronym2index` returns a tuple, with the first value being a list of acronyms passed in and the second value giving the indices in the array that correspond to the left and right hemispheres for this region. We can now use these indices to search in the label volume" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "0680ca09", + "metadata": {}, + "outputs": [], + "source": [ + "CA1_right = np.where(brain_atlas.label == 458)\n", + "CA1_left = np.where(brain_atlas.label == 1785)" + ] + }, + { + "cell_type": "markdown", + "id": "42cc166b", + "metadata": {}, + "source": [ + "## Coordinate systems" + ] + }, + { + "cell_type": "markdown", + "id": "7ffcd53b", + "metadata": {}, + "source": [ + "The voxles can be translated to 3D space. In the IBL all xyz coordinates" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.16" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} \ No newline at end of file diff --git a/examples/atlas/atlas_mapping.ipynb b/examples/atlas/atlas_mapping.ipynb index 36525f994..41a050f8a 100644 --- a/examples/atlas/atlas_mapping.ipynb +++ b/examples/atlas/atlas_mapping.ipynb @@ -1,526 +1,650 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "b8ea3853", - "metadata": {}, - "source": [ - "# Atlas mapping" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2dae63ed", - "metadata": {}, - "outputs": [], - "source": [ - "# import brain atlas and brain regions objects\n", - "from ibllib.atlas import AllenAtlas\n", - "from ibllib.atlas.regions import BrainRegions\n", - "ba = AllenAtlas()\n", - "br = BrainRegions() # br is also an attribute of ba so could to br = ba.regions" - ] - }, - { - "cell_type": "markdown", - "id": "279114b7", - "metadata": {}, - "source": [ - "## Available Mappings\n", - "Three mappings are currently available within the IBL, these are:\n", - "\n", - "1. Allen Atlas - total of 1328 annotation regions provided by Allen Atlas\n", - "2. Beryl Atlas - total of 310 annotation regions\n", - "3. Cosmos Atlas - total of 12 annotation regions" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3c9d922c", - "metadata": {}, - "outputs": [], - "source": [ - "# create figure\n", - "import matplotlib.pyplot as plt\n", - "fig, axs = plt.subplots(1, 3, figsize=(15, 18))\n", - "\n", - "# plot coronal slice at ap = -2000 um\n", - "ap = -2000 / 1e6\n", - "# Allen mapping\n", - "ba.plot_cslice(ap, volume='annotation', mapping='Allen', ax=axs[0])\n", - "_ = axs[0].set_title('Allen')\n", - "# Beryl mapping\n", - "ba.plot_cslice(ap, volume='annotation', mapping='Beryl', ax=axs[1])\n", - "_ = axs[1].set_title('Beryl')\n", - "# Cosmos mapping\n", - "ba.plot_cslice(ap, volume='annotation', mapping='Cosmos', ax=axs[2])\n", - "_ = axs[2].set_title('Cosmos')" - ] - }, - { - "cell_type": "markdown", - "id": "a8460f28", - "metadata": {}, - "source": [ - "## Understanding the mappings\n", - "The mappings store the highest level annotation (or parent node) that should be considered. Any regions that are children of these nodes are assigned the same annotation as their parent. \n", - "\n", - "For example, consider the region with the acronym **MDm** (Mediodorsal nucleus of the thalamus, medial part). Firstly, to navigate ourselves, we can find the acronyms of all the ancestors to this region," - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "08393219", - "metadata": {}, - "outputs": [], - "source": [ - "# First find the atlas_id associated with acronym MDm\n", - "atlas_id = br.acronym2id('MDm')\n", - "# Then find the acronyms of the ancestors of this region\n", - "print(br.ancestors(ids=atlas_id)['acronym'])" - ] - }, - { - "cell_type": "markdown", - "id": "9081a1b3", - "metadata": {}, - "source": [ - "We can then take a look at what acronym this region will be assigned under the different mappings. Under the Allen mapping we expect it to be assigned the same acronym as this is the lowest level region parcelation that we use." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "ef838f27", - "metadata": {}, - "outputs": [], - "source": [ - "print(br.acronym2acronym('MDm', mapping='Allen'))" - ] - }, - { - "cell_type": "markdown", - "id": "2cc3d224", - "metadata": {}, - "source": [ - "Under the Beryl mapping, **MDm** is given the acronym of it's parent, **MD**" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3aa34ba5", - "metadata": {}, - "outputs": [], - "source": [ - "print(br.acronym2acronym('MDm', mapping='Beryl'))" - ] - }, - { - "cell_type": "markdown", - "id": "f574d115", - "metadata": {}, - "source": [ - "Under the Cosmos mapping, it is assigned to the region **TH**" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "f74fa398", - "metadata": {}, - "outputs": [], - "source": [ - "print(br.acronym2acronym('MDm', mapping='Cosmos'))" - ] - }, - { - "cell_type": "markdown", - "id": "23ed91cb", - "metadata": {}, - "source": [ - "Therefore any clusters that are assigned an acronym **MDm** in the Allen mapping, will instead be assigned to the region **MD** in the Beryl mapping and **TH** in the Cosmos mapping. " - ] - }, - { - "cell_type": "markdown", - "id": "509bcee9", - "metadata": {}, - "source": [ - "If a region is not included in a mapping, the value for this region is set to root. For example **TH** is not included in the Beryl mapping" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "76962ae7", - "metadata": {}, - "outputs": [], - "source": [ - "print(br.acronym2acronym('TH', mapping='Beryl'))" - ] - }, - { - "cell_type": "markdown", - "id": "607f1e1b", - "metadata": {}, - "source": [ - "## Lateralisation\n", - "Lateralised versions of each of the three mappings are also available. This allow regions on the left and right hemispheres of the brain to be differentiated. \n", - "\n", - "The convention used is that regions in the left hemisphere have negative atlas ids whereas those on the right hemisphere have positive atlas ids.\n", - "\n", - "For the non lateralised mappings the atlas id is always positive regardless of whether the region lies in the left or right hemisphere. Aggregating values over regions could result, therefore, in values from different hemispheres being considered together.\n", - "\n", - "One thing to be aware of, is that while lateralised mappings return distinct atlas ids for the left and right hemispheres, the acronyms returned are not lateralised. \n", - "\n", - "For example consider findng the atlas id when mapping the acronym **MDm** onto the Beryl atlas. When specifying the left hemisphere, the returned atlas id is negative" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "30f5e956", - "metadata": {}, - "outputs": [], - "source": [ - "# Left hemisphere gives negative atlas id\n", - "print(br.acronym2id('MDm', mapping='Beryl-lr', hemisphere='left'))" - ] - }, - { - "cell_type": "markdown", - "id": "aa94a06e", - "metadata": {}, - "source": [ - "When specifying the right hemisphere, the returned atlas id is positive" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "9125fc7c", - "metadata": {}, - "outputs": [], - "source": [ - "# Left hemisphere gives negative atlas id\n", - "print(br.acronym2id('MDm', mapping='Beryl-lr', hemisphere='right'))" - ] - }, - { - "cell_type": "markdown", - "id": "be5e0654", - "metadata": {}, - "source": [ - "However, when converting from atlas id to acronym, regardless of whether we specify a negative (left hemisphere) or positive (right hemisphere) value, the returned acronym is always the same" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "aaa8b3f5", - "metadata": {}, - "outputs": [], - "source": [ - "print(br.id2acronym(-362, mapping='Beryl-lr'))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "77291362", - "metadata": {}, - "outputs": [], - "source": [ - "print(br.id2acronym(362, mapping='Beryl-lr'))" - ] - }, - { - "cell_type": "markdown", - "id": "6dc83925", - "metadata": {}, - "source": [ - "## How to map your data" - ] - }, - { - "cell_type": "markdown", - "id": "9606cc0b", - "metadata": {}, - "source": [ - "### Mapping from mlapdv coordinates\n", - "The recommended and most versatile way to find the locations of clusters under different mappings is to use the mlapdv coordinates of the clusters. Given a probe insertion id, the clusters object can be loaded in using the following code" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "bc33122d", - "metadata": {}, - "outputs": [], - "source": [ - "from brainbox.io.one import SpikeSortingLoader\n", - "from one.api import ONE\n", - "one = ONE()\n", - "\n", - "pid = 'da8dfec1-d265-44e8-84ce-6ae9c109b8bd'\n", - "\n", - "sl = SpikeSortingLoader(pid=pid, one=one, atlas=ba)\n", - "spikes, clusters, channels = sl.load_spike_sorting()\n", - "clusters = sl.merge_clusters(spikes, clusters, channels)" - ] - }, - { - "cell_type": "markdown", - "id": "2a274d98", - "metadata": {}, - "source": [ - "You will find that the cluster object returned already contains an atlas_id attribute. These are the atlas ids that are obtained using the default mapping - **non lateralised Allen**. For this mapping regardless of whether the clusters lie in the right or left hemisphere the clusters are assigned positive atlas ids" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "03881f96", - "metadata": {}, - "outputs": [], - "source": [ - "print(clusters['atlas_id'][0:10])" - ] - }, - { - "cell_type": "markdown", - "id": "2170a215", - "metadata": {}, - "source": [ - "We can obtain the mlapdv coordinates of the clusters and explore in which hemisphere the clusters lie, clusters with negative x lie on the left hemisphere." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "74617c4b", - "metadata": {}, - "outputs": [], - "source": [ - "import numpy as np\n", - "mlapdv = np.c_[clusters['x'], clusters['y'], clusters['z']] # x = ml, y = ap, z = dv\n", - "clus_LH = np.sum(mlapdv[:, 0] < 0)\n", - "clus_RH = np.sum(mlapdv[:, 0] > 0)\n", - "print(f'Total clusters = {len(mlapdv)}, LH clusters = {clus_LH}, RH clusters = {clus_RH}')" - ] - }, - { - "cell_type": "markdown", - "id": "ac3c4252", - "metadata": {}, - "source": [ - "To get a better understanding of the difference between using lateralised and non-lateralised mappings, let's also make a manipulated version of the mlapdv positions where the first 5 clusters have been moved into the right hemisphere and call this `mlapdv_rh`" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "7e2c981f", - "metadata": {}, - "outputs": [], - "source": [ - "mlapdv_rh = np.copy(mlapdv)\n", - "mlapdv_rh[0:5, 0] = -1 * mlapdv_rh[0:5, 0]\n", - "clus_LH = np.sum(mlapdv_rh[:, 0] < 0)\n", - "clus_RH = np.sum(mlapdv_rh[:, 0] > 0)\n", - "print(f'Total clusters = {len(mlapdv_rh)}, LH clusters = {clus_LH}, RH clusters = {clus_RH}')" - ] - }, - { - "cell_type": "markdown", - "id": "a1323aed", - "metadata": {}, - "source": [ - "To find the locations of the clusters in the brain from the mlapdv position we can use the [get_labels](https://int-brain-lab.github.io/iblenv/_autosummary/ibllib.atlas.atlas.html#ibllib.atlas.atlas.BrainAtlas.get_labels) method in the AllenAtlas object. First let's explore the output of using a non lateralised mapping." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "d332a29a", - "metadata": {}, - "outputs": [], - "source": [ - "atlas_id_Allen = ba.get_labels(mlapdv, mapping='Allen')\n", - "atlas_id_Allen_rh = ba.get_labels(mlapdv_rh, mapping='Allen')\n", - "print(f'Non-lateralised Allen mapping ids using mlapdv: {atlas_id_Allen[0:10]}')\n", - "print(f'Non-lateralised Allen mapping ids using mlapdv_rh: {atlas_id_Allen_rh[0:10]}')" - ] - }, - { - "cell_type": "markdown", - "id": "5a2a5225", - "metadata": {}, - "source": [ - "Notice that regardless of whether the clusters lie in the left or right hemisphere the sign of the atlas id is the same. The result of this mapping is also equivalent the default output from `clusters['atlas_id']`" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "76664a2d", - "metadata": {}, - "outputs": [], - "source": [ - "np.array_equal(clusters['atlas_id'], atlas_id_Allen)" - ] - }, - { - "cell_type": "markdown", - "id": "185f9f6b", - "metadata": {}, - "source": [ - "Now if we use the lateralised mapping, we notice that the clusters in the left hemisphere have been assigned negative atlas ids whereas those in the right hemisphere have positive atlas ids" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "af431f8d", - "metadata": {}, - "outputs": [], - "source": [ - "atlas_id_Allen = ba.get_labels(mlapdv, mapping='Allen-lr')\n", - "atlas_id_Allen_rh = ba.get_labels(mlapdv_rh, mapping='Allen-lr')\n", - "print(f'Lateralised Allen mapping ids using mlapdv: {atlas_id_Allen[0:10]}')\n", - "print(f'Lateralised Allen mapping ids using mlapdv_rh: {atlas_id_Allen_rh[0:10]}')" - ] - }, - { - "cell_type": "markdown", - "id": "b0a09001", - "metadata": {}, - "source": [ - "By changing the mapping argument that we pass in, we can also easily obtain the atlas ids for the Beryl and Cosmos mappings" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "91143d6a", - "metadata": {}, - "outputs": [], - "source": [ - "atlas_id_Beryl = ba.get_labels(mlapdv_rh, mapping='Beryl-lr')\n", - "atlas_id_Cosmos = ba.get_labels(mlapdv_rh, mapping='Cosmos')\n", - "print(f'Lateralised Beryl mapping ids using mlapdv_rh: {atlas_id_Beryl[0:10]}')\n", - "print(f'Non-lateralised Cosmos mapping ids using mlapdv_rh: {atlas_id_Cosmos[0:10]}')" - ] - }, - { - "cell_type": "markdown", - "id": "c229e12d", - "metadata": {}, - "source": [ - "### Mapping from atlas ids\n", - "Methods are available that allow you to translate atlas ids from one mapping to another. " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "8be221d9", - "metadata": {}, - "outputs": [], - "source": [ - "# map atlas ids from lateralised Allen to lateralised Beryl\n", - "atlas_id_Allen = ba.get_labels(mlapdv_rh, mapping='Allen-lr') # lateralised Allen\n", - "\n", - "remap_beryl = br.id2id(atlas_id_Allen, mapping='Beryl-lr')\n", - "print(f'Lateralised Beryl mapping ids using remap: {remap_beryl[0:10]}')\n", - "\n", - "# map atlas ids from lateralised Allen to non-lateralised Cosmos\n", - "remap_cosmos = br.id2id(atlas_id_Allen_rh, mapping='Cosmos')\n", - "print(f'Non-lateralised Cosmos mapping ids using remap: {remap_cosmos[0:10]}')" - ] - }, - { - "cell_type": "markdown", - "id": "2cf81730", - "metadata": {}, - "source": [ - "When remapping with atlas ids it is not possible to map from \n", - "\n", - "1. A non-lateralised to a lateralised mapping. \n", - "2. From a higher mapping to a lower one (e.g cannot map from Beryl to Allen, or Cosmos to Allen)\n", - "3. From Beryl to Cosmos\n", - "\n", - "This is why it is recommened to use mlapdv coordinates for remappings as it allows complete flexibility" - ] - }, - { - "cell_type": "markdown", - "id": "ec294cf4", - "metadata": {}, - "source": [ - "### Converting to acronyms\n", - "Methods are available to convert between atlas ids and acronyms. Note that when converting to acronyms, even if the atlas ids are lateralised the returned acronyms are not" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "7f9428b9", - "metadata": {}, - "outputs": [], - "source": [ - "atlas_id_Allen = ba.get_labels(mlapdv_rh, mapping='Allen-lr') # lateralised Allen\n", - "acronym_Allen = br.id2acronym(atlas_id_Allen, mapping='Allen-lr')\n", - "print(f'Acronyms of lateralised Allen ids: {acronym_Allen[0:10]}')\n", - "\n", - "atlas_id_Allen = ba.get_labels(mlapdv_rh, mapping='Allen-lr') # Non-ateralised Allen\n", - "acronym_Allen = br.id2acronym(atlas_id_Allen)\n", - "print(f'Acronyms of non-lateralised Allen: {acronym_Allen[0:10]}')" - ] - }, - { - "cell_type": "markdown", - "id": "7cd9eb73", - "metadata": {}, - "source": [ - "It is also possible to simultaneously remap the acronyms with these methods" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "0f82948f", - "metadata": {}, - "outputs": [], - "source": [ - "acronym_Cosmos = br.id2acronym(atlas_id_Allen, mapping='Cosmos-lr')\n", - "print(acronym_Cosmos[0:10])" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python [conda env:iblenv] *", - "language": "python", - "name": "conda-env-iblenv-py" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.7" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} +{ + "cells": [ + { + "cell_type": "markdown", + "id": "b8ea3853", + "metadata": {}, + "source": [ + "# Atlas mapping" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2dae63ed", + "metadata": {}, + "outputs": [], + "source": [ + "# import brain atlas and brain regions objects\n", + "from ibllib.atlas import AllenAtlas\n", + "from ibllib.atlas.regions import BrainRegions\n", + "ba = AllenAtlas()\n", + "br = BrainRegions() # br is also an attribute of ba so could to br = ba.regions" + ] + }, + { + "cell_type": "markdown", + "id": "279114b7", + "metadata": {}, + "source": [ + "## Available Mappings\n", + "Four mappings are currently available within the IBL, these are:\n", + "\n", + "1. Allen Atlas - total of 1328 annotation regions provided by Allen Atlas\n", + "2. Beryl Atlas - total of 308 annotation regions\n", + "3. Cosmos Atlas - total of 12 annotation regions\n", + "4. Swanson Atlas - total of 319 annotation regions (*)\n", + "\n", + "(*) Note: The dedicated mapping for plotting on Swanson flatmap is explained in this [webpage](https://int-brain-lab.github.io/iblenv/notebooks_external/atlas_swanson_flatmap.html)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3c9d922c", + "metadata": { + "pycharm": { + "is_executing": true + } + }, + "outputs": [], + "source": [ + "# create figure\n", + "import matplotlib.pyplot as plt\n", + "fig, axs = plt.subplots(1, 3, figsize=(15, 18))\n", + "\n", + "# plot coronal slice at ap = -2000 um\n", + "ap = -2000 / 1e6\n", + "# Allen mapping\n", + "ba.plot_cslice(ap, volume='annotation', mapping='Allen', ax=axs[0])\n", + "_ = axs[0].set_title('Allen')\n", + "# Beryl mapping\n", + "ba.plot_cslice(ap, volume='annotation', mapping='Beryl', ax=axs[1])\n", + "_ = axs[1].set_title('Beryl')\n", + "# Cosmos mapping\n", + "ba.plot_cslice(ap, volume='annotation', mapping='Cosmos', ax=axs[2])\n", + "_ = axs[2].set_title('Cosmos')" + ] + }, + { + "cell_type": "markdown", + "source": [ + "The `br.mappings` contains the `index` of the region for a given mapping.\n", + "You can use these indices to find for example the acronyms contained in Cosmos:" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "code", + "execution_count": null, + "outputs": [], + "source": [ + "import numpy as np\n", + "cosmos_indices = np.unique(br.mappings['Cosmos'])\n", + "br.acronym[cosmos_indices]" + ], + "metadata": { + "collapsed": false, + "pycharm": { + "name": "#%%\n" + } + } + }, + { + "cell_type": "markdown", + "source": [ + "You can check that all brain regions within these 4 mappings are contained in the Allen parcellation, for example for Beryl:" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "code", + "execution_count": null, + "outputs": [], + "source": [ + "set(np.unique(br.mappings['Beryl'])).difference(set(np.unique(br.mappings['Allen'])))\n", + "# Expect to return an empty set" + ], + "metadata": { + "collapsed": false, + "pycharm": { + "name": "#%%\n" + } + } + }, + { + "cell_type": "markdown", + "id": "a8460f28", + "metadata": {}, + "source": [ + "## Understanding the mappings\n", + "The mappings store the highest level annotation (or parent node) that should be considered. Any regions that are children of these nodes are assigned the same annotation as their parent. \n", + "\n", + "For example, consider the region with the acronym **MDm** (Mediodorsal nucleus of the thalamus, medial part). Firstly, to navigate ourselves, we can find the acronyms of all the ancestors to this region," + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "08393219", + "metadata": {}, + "outputs": [], + "source": [ + "# First find the atlas_id associated with acronym MDm\n", + "atlas_id = br.acronym2id('MDm')\n", + "# Then find the acronyms of the ancestors of this region\n", + "print(br.ancestors(ids=atlas_id)['acronym'])" + ] + }, + { + "cell_type": "markdown", + "id": "9081a1b3", + "metadata": {}, + "source": [ + "We can then take a look at what acronym this region will be assigned under the different mappings. Under the Allen mapping we expect it to be assigned the same acronym as this is the lowest level region parcelation that we use." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ef838f27", + "metadata": {}, + "outputs": [], + "source": [ + "print(br.acronym2acronym('MDm', mapping='Allen'))" + ] + }, + { + "cell_type": "markdown", + "id": "2cc3d224", + "metadata": {}, + "source": [ + "Under the Beryl mapping, **MDm** is given the acronym of it's parent, **MD**" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3aa34ba5", + "metadata": {}, + "outputs": [], + "source": [ + "print(br.acronym2acronym('MDm', mapping='Beryl'))" + ] + }, + { + "cell_type": "markdown", + "id": "f574d115", + "metadata": {}, + "source": [ + "Under the Cosmos mapping, it is assigned to the region **TH**" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f74fa398", + "metadata": {}, + "outputs": [], + "source": [ + "print(br.acronym2acronym('MDm', mapping='Cosmos'))" + ] + }, + { + "cell_type": "markdown", + "source": [ + "Under the Swanson mapping, it is assigned to the region **MD**" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "code", + "execution_count": null, + "outputs": [], + "source": [ + "print(br.acronym2acronym('MDm', mapping='Swanson'))" + ], + "metadata": { + "collapsed": false, + "pycharm": { + "name": "#%%\n" + } + } + }, + { + "cell_type": "markdown", + "id": "23ed91cb", + "metadata": {}, + "source": [ + "Therefore any clusters that are assigned an acronym **MDm** in the Allen mapping, will instead be assigned to the region **MD** in the Beryl or Swanson mapping, and **TH** in the Cosmos mapping." + ] + }, + { + "cell_type": "markdown", + "id": "509bcee9", + "metadata": {}, + "source": [ + "If a region is above (i.e. parent) of what is included in a mapping, the value for this region is set to root. For example **TH** is not included in the Beryl nor Swanson mapping" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "76962ae7", + "metadata": {}, + "outputs": [], + "source": [ + "print(br.acronym2acronym('TH', mapping='Beryl'))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "outputs": [], + "source": [ + "print(br.acronym2acronym('TH', mapping='Swanson'))" + ], + "metadata": { + "collapsed": false, + "pycharm": { + "name": "#%%\n" + } + } + }, + { + "cell_type": "markdown", + "source": [ + "However, a child region that is not included in the mapping will be returned as its closest parent in the mapping. For example, VISa is not included in Swanson, but its parent PLTp is:" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "code", + "execution_count": null, + "outputs": [], + "source": [ + "print(br.acronym2acronym('VISa', mapping='Swanson'))" + ], + "metadata": { + "collapsed": false, + "pycharm": { + "name": "#%%\n" + } + } + }, + { + "cell_type": "markdown", + "source": [], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "markdown", + "id": "607f1e1b", + "metadata": {}, + "source": [ + "## Lateralisation\n", + "Lateralised versions of each of the three mappings are also available. This allow regions on the left and right hemispheres of the brain to be differentiated. \n", + "\n", + "The convention used is that regions in the left hemisphere have negative atlas ids whereas those on the right hemisphere have positive atlas ids.\n", + "\n", + "For the non lateralised mappings the atlas id is always positive regardless of whether the region lies in the left or right hemisphere. Aggregating values over regions could result, therefore, in values from different hemispheres being considered together.\n", + "\n", + "One thing to be aware of, is that while lateralised mappings return distinct atlas ids for the left and right hemispheres, the acronyms returned are not lateralised. \n", + "\n", + "For example consider finding the atlas id when mapping the acronym **MDm** onto the Beryl atlas. When specifying the left hemisphere, the returned atlas id is negative" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "30f5e956", + "metadata": {}, + "outputs": [], + "source": [ + "# Left hemisphere gives negative atlas id\n", + "print(br.acronym2id('MDm', mapping='Beryl-lr', hemisphere='left'))" + ] + }, + { + "cell_type": "markdown", + "id": "aa94a06e", + "metadata": {}, + "source": [ + "When specifying the right hemisphere, the returned atlas id is positive" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9125fc7c", + "metadata": {}, + "outputs": [], + "source": [ + "# Left hemisphere gives negative atlas id\n", + "print(br.acronym2id('MDm', mapping='Beryl-lr', hemisphere='right'))" + ] + }, + { + "cell_type": "markdown", + "id": "be5e0654", + "metadata": {}, + "source": [ + "However, when converting from atlas id to acronym, regardless of whether we specify a negative (left hemisphere) or positive (right hemisphere) value, the returned acronym is always the same" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "aaa8b3f5", + "metadata": {}, + "outputs": [], + "source": [ + "print(br.id2acronym(-362, mapping='Beryl-lr'))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "77291362", + "metadata": {}, + "outputs": [], + "source": [ + "print(br.id2acronym(362, mapping='Beryl-lr'))" + ] + }, + { + "cell_type": "markdown", + "id": "6dc83925", + "metadata": {}, + "source": [ + "## How to map your data" + ] + }, + { + "cell_type": "markdown", + "id": "9606cc0b", + "metadata": {}, + "source": [ + "### Mapping from mlapdv coordinates\n", + "The recommended and most versatile way to find the locations of clusters under different mappings is to use the mlapdv coordinates of the clusters. Given a probe insertion id, the clusters object can be loaded in using the following code" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "bc33122d", + "metadata": {}, + "outputs": [], + "source": [ + "from brainbox.io.one import SpikeSortingLoader\n", + "from one.api import ONE\n", + "one = ONE()\n", + "\n", + "pid = 'da8dfec1-d265-44e8-84ce-6ae9c109b8bd'\n", + "\n", + "sl = SpikeSortingLoader(pid=pid, one=one, atlas=ba)\n", + "spikes, clusters, channels = sl.load_spike_sorting()\n", + "clusters = sl.merge_clusters(spikes, clusters, channels)" + ] + }, + { + "cell_type": "markdown", + "id": "2a274d98", + "metadata": {}, + "source": [ + "You will find that the cluster object returned already contains an atlas_id attribute. These are the atlas ids that are obtained using the default mapping - **non lateralised Allen**. For this mapping regardless of whether the clusters lie in the right or left hemisphere the clusters are assigned positive atlas ids" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "03881f96", + "metadata": {}, + "outputs": [], + "source": [ + "print(clusters['atlas_id'][0:10])" + ] + }, + { + "cell_type": "markdown", + "id": "2170a215", + "metadata": {}, + "source": [ + "We can obtain the mlapdv coordinates of the clusters and explore in which hemisphere the clusters lie, clusters with negative x lie on the left hemisphere." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "74617c4b", + "metadata": {}, + "outputs": [], + "source": [ + "import numpy as np\n", + "mlapdv = np.c_[clusters['x'], clusters['y'], clusters['z']] # x = ml, y = ap, z = dv\n", + "clus_LH = np.sum(mlapdv[:, 0] < 0)\n", + "clus_RH = np.sum(mlapdv[:, 0] > 0)\n", + "print(f'Total clusters = {len(mlapdv)}, LH clusters = {clus_LH}, RH clusters = {clus_RH}')" + ] + }, + { + "cell_type": "markdown", + "id": "ac3c4252", + "metadata": {}, + "source": [ + "To get a better understanding of the difference between using lateralised and non-lateralised mappings, let's also make a manipulated version of the mlapdv positions where the first 5 clusters have been moved into the right hemisphere and call this `mlapdv_rh`" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7e2c981f", + "metadata": {}, + "outputs": [], + "source": [ + "mlapdv_rh = np.copy(mlapdv)\n", + "mlapdv_rh[0:5, 0] = -1 * mlapdv_rh[0:5, 0]\n", + "clus_LH = np.sum(mlapdv_rh[:, 0] < 0)\n", + "clus_RH = np.sum(mlapdv_rh[:, 0] > 0)\n", + "print(f'Total clusters = {len(mlapdv_rh)}, LH clusters = {clus_LH}, RH clusters = {clus_RH}')" + ] + }, + { + "cell_type": "markdown", + "id": "a1323aed", + "metadata": {}, + "source": [ + "To find the locations of the clusters in the brain from the mlapdv position we can use the [get_labels](https://int-brain-lab.github.io/iblenv/_autosummary/ibllib.atlas.atlas.html#ibllib.atlas.atlas.BrainAtlas.get_labels) method in the AllenAtlas object. First let's explore the output of using a non lateralised mapping." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d332a29a", + "metadata": {}, + "outputs": [], + "source": [ + "atlas_id_Allen = ba.get_labels(mlapdv, mapping='Allen')\n", + "atlas_id_Allen_rh = ba.get_labels(mlapdv_rh, mapping='Allen')\n", + "print(f'Non-lateralised Allen mapping ids using mlapdv: {atlas_id_Allen[0:10]}')\n", + "print(f'Non-lateralised Allen mapping ids using mlapdv_rh: {atlas_id_Allen_rh[0:10]}')" + ] + }, + { + "cell_type": "markdown", + "id": "5a2a5225", + "metadata": {}, + "source": [ + "Notice that regardless of whether the clusters lie in the left or right hemisphere the sign of the atlas id is the same. The result of this mapping is also equivalent the default output from `clusters['atlas_id']`" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "76664a2d", + "metadata": {}, + "outputs": [], + "source": [ + "np.array_equal(clusters['atlas_id'], atlas_id_Allen)" + ] + }, + { + "cell_type": "markdown", + "id": "185f9f6b", + "metadata": {}, + "source": [ + "Now if we use the lateralised mapping, we notice that the clusters in the left hemisphere have been assigned negative atlas ids whereas those in the right hemisphere have positive atlas ids" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "af431f8d", + "metadata": {}, + "outputs": [], + "source": [ + "atlas_id_Allen = ba.get_labels(mlapdv, mapping='Allen-lr')\n", + "atlas_id_Allen_rh = ba.get_labels(mlapdv_rh, mapping='Allen-lr')\n", + "print(f'Lateralised Allen mapping ids using mlapdv: {atlas_id_Allen[0:10]}')\n", + "print(f'Lateralised Allen mapping ids using mlapdv_rh: {atlas_id_Allen_rh[0:10]}')" + ] + }, + { + "cell_type": "markdown", + "id": "b0a09001", + "metadata": {}, + "source": [ + "By changing the mapping argument that we pass in, we can also easily obtain the atlas ids for the Beryl and Cosmos mappings" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "91143d6a", + "metadata": {}, + "outputs": [], + "source": [ + "atlas_id_Beryl = ba.get_labels(mlapdv_rh, mapping='Beryl-lr')\n", + "atlas_id_Cosmos = ba.get_labels(mlapdv_rh, mapping='Cosmos')\n", + "print(f'Lateralised Beryl mapping ids using mlapdv_rh: {atlas_id_Beryl[0:10]}')\n", + "print(f'Non-lateralised Cosmos mapping ids using mlapdv_rh: {atlas_id_Cosmos[0:10]}')" + ] + }, + { + "cell_type": "markdown", + "id": "c229e12d", + "metadata": {}, + "source": [ + "### Mapping from atlas ids\n", + "Methods are available that allow you to translate atlas ids from one mapping to another. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8be221d9", + "metadata": {}, + "outputs": [], + "source": [ + "# map atlas ids from lateralised Allen to lateralised Beryl\n", + "atlas_id_Allen = ba.get_labels(mlapdv_rh, mapping='Allen-lr') # lateralised Allen\n", + "\n", + "remap_beryl = br.id2id(atlas_id_Allen, mapping='Beryl-lr')\n", + "print(f'Lateralised Beryl mapping ids using remap: {remap_beryl[0:10]}')\n", + "\n", + "# map atlas ids from lateralised Allen to non-lateralised Cosmos\n", + "remap_cosmos = br.id2id(atlas_id_Allen_rh, mapping='Cosmos')\n", + "print(f'Non-lateralised Cosmos mapping ids using remap: {remap_cosmos[0:10]}')" + ] + }, + { + "cell_type": "markdown", + "id": "2cf81730", + "metadata": {}, + "source": [ + "When remapping with atlas ids it is not possible to map from \n", + "\n", + "1. A non-lateralised to a lateralised mapping. \n", + "2. From a higher mapping to a lower one (e.g cannot map from Beryl to Allen, or Cosmos to Allen)\n", + "3. From Beryl to Cosmos\n", + "\n", + "This is why it is recommened to use mlapdv coordinates for remappings as it allows complete flexibility" + ] + }, + { + "cell_type": "markdown", + "id": "ec294cf4", + "metadata": {}, + "source": [ + "### Converting to acronyms\n", + "Methods are available to convert between atlas ids and acronyms. Note that when converting to acronyms, even if the atlas ids are lateralised the returned acronyms are not" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7f9428b9", + "metadata": {}, + "outputs": [], + "source": [ + "atlas_id_Allen = ba.get_labels(mlapdv_rh, mapping='Allen-lr') # lateralised Allen\n", + "acronym_Allen = br.id2acronym(atlas_id_Allen, mapping='Allen-lr')\n", + "print(f'Acronyms of lateralised Allen ids: {acronym_Allen[0:10]}')\n", + "\n", + "atlas_id_Allen = ba.get_labels(mlapdv_rh, mapping='Allen-lr') # Non-ateralised Allen\n", + "acronym_Allen = br.id2acronym(atlas_id_Allen)\n", + "print(f'Acronyms of non-lateralised Allen: {acronym_Allen[0:10]}')" + ] + }, + { + "cell_type": "markdown", + "id": "7cd9eb73", + "metadata": {}, + "source": [ + "It is also possible to simultaneously remap the acronyms with these methods" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "0f82948f", + "metadata": {}, + "outputs": [], + "source": [ + "acronym_Cosmos = br.id2acronym(atlas_id_Allen, mapping='Cosmos-lr')\n", + "print(acronym_Cosmos[0:10])" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python [conda env:iblenv] *", + "language": "python", + "name": "conda-env-iblenv-py" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.7" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} \ No newline at end of file diff --git a/examples/atlas/atlas_swanson_flatmap.ipynb b/examples/atlas/atlas_swanson_flatmap.ipynb index 336904ba9..4f92ee3ae 100644 --- a/examples/atlas/atlas_swanson_flatmap.ipynb +++ b/examples/atlas/atlas_swanson_flatmap.ipynb @@ -16,13 +16,6 @@ "to interface programmatically with the Allen Atlas regions." ] }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## The swanson flatmap with annotations" - ] - }, { "cell_type": "code", "execution_count": null, @@ -30,13 +23,13 @@ "outputs": [], "source": [ "import numpy as np\n", - "from ibllib.atlas.flatmaps import swanson, plot_swanson\n", + "from ibllib.atlas.plots import plot_swanson_vector\n", "from ibllib.atlas import BrainRegions\n", "\n", "br = BrainRegions()\n", "\n", "# Plot Swanson map will default colors and acronyms\n", - "plot_swanson(br=br, annotate=True)" + "plot_swanson_vector(br=br, annotate=True)" ] }, { @@ -51,8 +44,8 @@ { "cell_type": "markdown", "source": [ - "The Swanson map holds 318 brain region acronyms, some of which are an aggregate of distinct brain regions in the Allen or Beryl parcellation.\n", - "To find the acronyms of the regions represented in Swanson, use:" + "The Swanson map holds 323 brain region acronyms.\n", + "To find these acronyms, use the indices stored in the swanson mapping:" ], "metadata": { "collapsed": false @@ -63,7 +56,9 @@ "execution_count": null, "outputs": [], "source": [ - "swanson_ac = np.sort(br.acronym[np.unique(swanson())])" + "swanson_indices = np.unique(br.mappings['Swanson'])\n", + "swanson_ac = np.sort(br.acronym[swanson_indices])\n", + "swanson_ac.size" ], "metadata": { "collapsed": false, @@ -75,7 +70,7 @@ { "cell_type": "markdown", "source": [ - "Regions which are \"children\" of a Swanson region will not be included in the acronyms. For example `PTLp` is in Swanson, but its children `VISa` and `VISrl`(i.e. a finer parcellation of `PTLp`) are not:" + "Regions which are \"children\" or \"parents\" of a Swanson region will not be included in the acronyms. For example `VISa` is in Swanson, but its parent `PTLp` or child `VISa2/3` are not:" ], "metadata": { "collapsed": false @@ -86,10 +81,14 @@ "execution_count": null, "outputs": [], "source": [ - "# Example: Check if PTLp is in Swanson\n", - "np.isin(['PTLp'], swanson_ac)\n", - "# Example: Check if VISa and VISrl are in Swanson\n", - "np.isin(['VISa', 'VISrl'], swanson_ac)" + "# Example: VISa is in Swanson\n", + "print(np.isin(['VISa'], swanson_ac))\n", + "\n", + "# Example child: VISa2/3 is not in Swanson\n", + "print(np.isin(['VISa2/3'], swanson_ac))\n", + "\n", + "# Example parent: PTLp is not in Swanson\n", + "print(np.isin(['PTLp'], swanson_ac))" ], "metadata": { "collapsed": false, @@ -101,7 +100,45 @@ { "cell_type": "markdown", "source": [ - "As such, you can only plot value for a given region that is in Swanson. This was done to ensure there is no confusion about how data is aggregated and represented per region (for example, if you were to input values for both `VISa` and `VISrl`, it is unclear whether the mean, median or else should have been plotted onto the `PTLp` area - instead, we ask you to do the aggregation yourself and pass this into the plotting function).\n", + "Also, only the indices corresponding to one hemisphere are represented in Swanson. For example, for VISa:" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "code", + "execution_count": null, + "outputs": [], + "source": [ + "# indices of VISa\n", + "indices = br.acronym2index('VISa')[1][0]\n", + "print(f'Index {indices[0]} in swanson? {indices[0] in swanson_indices}')\n", + "print(f'Index {indices[1]} in swanson? {indices[1] in swanson_indices}')" + ], + "metadata": { + "collapsed": false, + "pycharm": { + "name": "#%%\n" + } + } + }, + { + "cell_type": "markdown", + "source": [ + "### Selecting the brain regions for plotting" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "markdown", + "source": [ + "You can only plot value for a given region that is in Swanson, or a parent region (see below for detailed explanation on this latter point). You cannot plot value on children regions of those in the Swanson mapping. In other words, the brain regions contains in the Swanson mapping are the lowest hierarchical level you can plot onto.\n", + "\n", + "This was done to ensure there is no confusion about how data is aggregated and represented per region.\n", + "For example, if you were to input values for both `VISa1` and `VISa2/3`, it is unclear whether the mean, median or else should have been plotted onto the `VISa` area - instead, we ask you to do the aggregation yourself and pass this into the plotting function.\n", "\n", "For example," ], @@ -114,17 +151,97 @@ "execution_count": null, "outputs": [], "source": [ - "from ibllib.atlas.flatmaps import plot_swanson_vector\n", + "# 'VISa', 'CA1', 'VPM' are in Swanson and all 3 are plotted\n", + "acronyms = ['VISa', 'CA1', 'VPM']\n", + "values = np.array([1.5, 3, 4])\n", + "plot_swanson_vector(acronyms, values, annotate=True,\n", + " annotate_list=['VISa', 'CA1', 'VPM'],empty_color='silver')\n", "\n", - "# 'PTLp', 'CA1', 'VPM' as in Swanson and all 3 are plotted\n", + "# 'VISa1','VISa2/3' are not in Swanson, only 'CA1', 'VPM' are plotted\n", + "acronyms = ['VISa1','VISa2/3', 'CA1', 'VPM']\n", + "values = np.array([1, 2, 3, 4])\n", + "plot_swanson_vector(acronyms, values, annotate=True,\n", + " annotate_list=['VISa1','VISa2/3', 'CA1', 'VPM'],empty_color='silver')" + ], + "metadata": { + "collapsed": false, + "pycharm": { + "name": "#%%\n" + } + } + }, + { + "cell_type": "markdown", + "source": [ + "You can plot onto the parent of a region in the Swanson mapping, for example you can plot over `PTLp` (which is the parent of `VISa` and `VISrl`). This paints the same value across all regions of the Swanson mapping contained in the parent region." + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "code", + "execution_count": null, + "outputs": [], + "source": [ + "# Plotting over a parent region (PTLp) paints the same value across all children (VISa and VISrl)\n", "acronyms = ['PTLp', 'CA1', 'VPM']\n", "values = np.array([1.5, 3, 4])\n", - "plot_swanson_vector( acronyms, values, annotate=True, annotate_list=['PTLp', 'CA1', 'VPM'],empty_color='silver')\n", - "\n", - "# 'VISa','VISrl' are not in Swanson, only 'CA1', 'VPM' are plotted\n", - "acronyms = ['VISa','VISrl', 'CA1', 'VPM']\n", + "plot_swanson_vector(acronyms, values, annotate=True,\n", + " annotate_list=['PTLp', 'CA1', 'VPM'],empty_color='silver')" + ], + "metadata": { + "collapsed": false, + "pycharm": { + "name": "#%%\n" + } + } + }, + { + "cell_type": "markdown", + "source": [ + "Plotting over a parent and child region simultaneously will overwrite the corresponding portion of the parent region:" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "code", + "execution_count": null, + "outputs": [], + "source": [ + "# Plotting over 'PTLp' and overwriting the 'VISrl' value\n", + "acronyms = ['PTLp','VISrl', 'CA1', 'VPM']\n", "values = np.array([1, 2, 3, 4])\n", - "plot_swanson_vector( acronyms, values, annotate=True, annotate_list=['VISa','VISrl', 'CA1', 'VPM'],empty_color='silver')\n" + "plot_swanson_vector(acronyms, values, annotate=True,\n", + " annotate_list=['PTLp','VISrl', 'CA1', 'VPM'],empty_color='silver')" + ], + "metadata": { + "collapsed": false, + "pycharm": { + "name": "#%%\n" + } + } + }, + { + "cell_type": "markdown", + "source": [ + "As such, you can easily fill in a whole top-hierarchy region, supplemented by one particular region of interest." + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "code", + "execution_count": null, + "outputs": [], + "source": [ + "acronyms = ['Isocortex', 'VISa']\n", + "values = np.array([1, 2])\n", + "plot_swanson_vector(acronyms, values, annotate=True,\n", + " annotate_list=['Isocortex', 'VISa'],empty_color='silver')" ], "metadata": { "collapsed": false, @@ -135,16 +252,36 @@ }, { "cell_type": "markdown", - "source": [], + "source": [ + "## Mapping to the swanson brain regions\n", + "\n", + "Similarly as explained in this [page](https://int-brain-lab.github.io/iblenv/notebooks_external/atlas_mapping.html), you can map brain regions to those found in Swanson using `br.acronym2acronym`:" + ], "metadata": { "collapsed": false } }, + { + "cell_type": "code", + "execution_count": null, + "outputs": [], + "source": [ + "br.acronym2acronym('MDm', mapping='Swanson')" + ], + "metadata": { + "collapsed": false, + "pycharm": { + "name": "#%%\n" + } + } + }, { "cell_type": "markdown", "metadata": {}, "source": [ - "## Plotting values on the swanson flatmap" + "## Plotting values on the swanson flatmap\n", + "### Single hemisphere display\n", + "You simply need to provide an array of brain region acronyms, and an array of corresponding values." ] }, { @@ -210,7 +347,7 @@ "\n", "\n", "# and display on a single hemishphere, using a blue colormap\n", - "plot_swanson(acronyms, values, cmap='Blues', br=br)" + "plot_swanson_vector(acronyms, values, cmap='Blues', br=br)" ] }, { @@ -218,7 +355,7 @@ "metadata": {}, "source": [ "### Lateralized display\n", - "A more advanced example is when each hemisphere is assigned a different value." + "A more advanced example is when each hemisphere is assigned a different value. For this, you need to convert the acronyms to Allen ID, and assign positive/negative ID values to differentiate between the two hemispheres." ] }, { @@ -232,14 +369,14 @@ "# assign random values for the sake of this example\n", "values_rl = np.random.randn(regions_rl.size)\n", "# display with an explicit dual hemisphere setup\n", - "plot_swanson(regions_rl, values_rl, hemisphere='both', cmap='magma', br=br)" + "plot_swanson_vector(regions_rl, values_rl, hemisphere='both', cmap='magma', br=br)" ] }, { "cell_type": "markdown", "source": [ "## Portrait orientation\n", - "One can also mirror the hemishperes and orient the display in portrait mode." + "One can also mirror the hemispheres and orient the display in portrait mode." ], "metadata": { "collapsed": false @@ -250,12 +387,7 @@ "execution_count": null, "outputs": [], "source": [ - "from ibllib.atlas.flatmaps import plot_swanson\n", - "from ibllib.atlas import BrainRegions\n", - "\n", - "br = BrainRegions()\n", - "\n", - "plot_swanson(acronyms=acronyms, values=values, orientation='portrait', cmap='Greens', hemisphere='mirror')" + "plot_swanson_vector(acronyms=acronyms, values=values, orientation='portrait', cmap='Greens', hemisphere='mirror')" ], "metadata": { "collapsed": false diff --git a/examples/atlas/atlas_working_with_ibllib_atlas.ipynb b/examples/atlas/atlas_working_with_ibllib_atlas.ipynb new file mode 100644 index 000000000..d56d21edd --- /dev/null +++ b/examples/atlas/atlas_working_with_ibllib_atlas.ipynb @@ -0,0 +1,1016 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "b767b213", + "metadata": {}, + "source": [ + "# Working with IBL atlas object" + ] + }, + { + "cell_type": "markdown", + "id": "bba98311", + "metadata": {}, + "source": [ + "## Getting started" + ] + }, + { + "cell_type": "markdown", + "id": "461b8f34", + "metadata": {}, + "source": [ + "The Allen atlas image and annotation volumes can be accessed using the `ibllib.atlas.AllenAtlas` class. Upon instantiating the class for the first time, the relevant files will be downloaded from the Allen database." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "df873343", + "metadata": {}, + "outputs": [], + "source": [ + "from ibllib.atlas import AllenAtlas\n", + "\n", + "res = 25 # resolution of Atlas, available resolutions are 10, 25 (default) and 50\n", + "brain_atlas = AllenAtlas(res_um=res)" + ] + }, + { + "cell_type": "markdown", + "id": "95a8e4db", + "metadata": {}, + "source": [ + "## Exploring the volumes" + ] + }, + { + "cell_type": "markdown", + "id": "12f16b38", + "metadata": {}, + "source": [ + "The brain_atlas class contains two volumes, the diffusion weighted imaging **(DWI) image** volume and the **annotation label** volume.\n", + "Each volume is saved into a matrix of the same shape (i.e. they contain the same number of voxels), as defined by the input resolution `res`." + ] + }, + { + "cell_type": "markdown", + "id": "5f34f56c", + "metadata": {}, + "source": [ + "### 1. Image Volume \n", + "The image volume contains the Allen atlas DWI average template. DWI images are typically represented in gray-scale colors." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "769b4fd4", + "metadata": {}, + "outputs": [], + "source": [ + "# Access the image volume\n", + "im = brain_atlas.image\n", + "\n", + "# Explore the size of the image volume (ap, ml, dv)\n", + "print(f'Shape of image volume: {im.shape}')\n", + "\n", + "# Plot a coronal slice at ap = -1000um\n", + "ap = -1000 / 1e6 # input must be in metres\n", + "ax = brain_atlas.plot_cslice(ap, volume='image')\n" + ] + }, + { + "cell_type": "markdown", + "id": "1c46789b", + "metadata": {}, + "source": [ + "### Label Volume\n" + ] + }, + { + "cell_type": "markdown", + "id": "72bea21a", + "metadata": {}, + "source": [ + "The label volume contains information about which brain region each voxel in the volume belongs to." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ff7cb654", + "metadata": {}, + "outputs": [], + "source": [ + "# Access the label volume\n", + "lab = brain_atlas.label\n", + "\n", + "# Explore the size of the label volume (ap, ml, dv)\n", + "print(f'Shape of label volume: {lab.shape}')\n", + "\n", + "# Plot a coronal slice at ap = -1000um\n", + "ap = -1000 / 1e6 # input must be in metres\n", + "ax = brain_atlas.plot_cslice(ap, volume='annotation')" + ] + }, + { + "cell_type": "markdown", + "id": "8bd69066", + "metadata": {}, + "source": [ + "The label volume used in the IBL AllenAtlas class differs from the Allen annotation volume in two ways.\n", + "- Each voxel has information about the index of the Allen region rather than the Allen atlas id\n", + "- The volume has been lateralised to differentiate between the left and right hemisphere\n", + "\n", + "To understand this better let's explore the BrainRegions class that contains information about the Allen structure tree." + ] + }, + { + "cell_type": "markdown", + "id": "04f601ed", + "metadata": {}, + "source": [ + "## Exploring brain regions" + ] + }, + { + "cell_type": "markdown", + "id": "a1802136", + "metadata": {}, + "source": [ + "### Index versus Allen ID\n", + "\n", + "The Allen brain region structure tree can be accessed through the class `ibllib.atlas.regions.BrainRegions`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9c2d097f", + "metadata": {}, + "outputs": [], + "source": [ + "from ibllib.atlas.regions import BrainRegions\n", + "\n", + "brain_regions = BrainRegions()\n", + "\n", + "# Alternatively if you already have the AllenAtlas instantiated you can access it as an attribute\n", + "brain_regions = brain_atlas.regions" + ] + }, + { + "cell_type": "markdown", + "id": "6cf9ab47", + "metadata": {}, + "source": [ + "The brain_regions class has the following data attributes" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1d078160", + "metadata": {}, + "outputs": [], + "source": [ + "brain_regions.__annotations__" + ] + }, + { + "cell_type": "markdown", + "id": "44339559", + "metadata": {}, + "source": [ + "These attributes are the same as the Allen structure tree and for example `id` corresponds to the Allen atlas id while the `name` represents the full anatomical brain region name." + ] + }, + { + "cell_type": "markdown", + "id": "fbe04558", + "metadata": {}, + "source": [ + "The index refers to the index in each of these attribute arrays. For example, index 1 corresponds to the `root` brain region with an atlas id of 977. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "0c1fdf7c", + "metadata": {}, + "outputs": [], + "source": [ + "index = 1\n", + "print(brain_regions.id[index])\n", + "print(brain_regions.acronym[index])" + ] + }, + { + "cell_type": "markdown", + "id": "fd8e542c", + "metadata": {}, + "source": [ + "Alternatively, index 1000 corresponds to `PPYd` with an atlas id of 185" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "cf56d8d9", + "metadata": {}, + "outputs": [], + "source": [ + "index = 1000\n", + "print(brain_regions.id[index])\n", + "print(brain_regions.acronym[index])" + ] + }, + { + "cell_type": "markdown", + "id": "4c3acedd", + "metadata": {}, + "source": [ + "In the label volume we described above, it is these indices that we are referring to. Therefore, we know all voxels in the volume with a value of 0 will be voxels that lie in `root`, while the voxels that have a value of 1000 will be in `PPYd`" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b607f170", + "metadata": {}, + "outputs": [], + "source": [ + "import numpy as np\n", + "root_voxels = np.where(brain_atlas.label == 1)\n", + "ppyd_voxels = np.where(brain_atlas.label == 1000)" + ] + }, + { + "cell_type": "markdown", + "source": [ + "Voxels outside of the brain are labelled with `void`, which has the both the index and Allen ID being 0:" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "code", + "execution_count": null, + "outputs": [], + "source": [ + "index_void = 0\n", + "print(brain_regions.id[index_void])\n", + "print(brain_regions.acronym[index_void])" + ], + "metadata": { + "collapsed": false, + "pycharm": { + "name": "#%%\n" + } + } + }, + { + "cell_type": "markdown", + "source": [ + "As such, you can find all the voxels within the brain by filtering for non-zero indices:" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "code", + "execution_count": null, + "outputs": [], + "source": [ + "vox_in = np.where(brain_regions.id != index_void)" + ], + "metadata": { + "collapsed": false, + "pycharm": { + "name": "#%%\n" + } + } + }, + { + "cell_type": "markdown", + "source": [ + "You can jump betwen acronym / id / index with these functions :" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "code", + "execution_count": null, + "outputs": [], + "source": [ + "# From an acronym, get the index and id\n", + "acronym = 'MDm'\n", + "index = brain_regions.acronym2index(acronym)\n", + "id = brain_regions.acronym2id(acronym)\n", + "\n", + "print(f'The acronym {acronym} has the indices {index[1][0]} and Allen id {id[0]}')\n", + "\n", + "# From an id, get the index and acronym\n", + "id = 636\n", + "acronym = brain_regions.id2acronym(id)\n", + "index = brain_regions.id2index(id)\n", + "\n", + "print(f'The Allen id {id} has the acronym {acronym[0]} and the indices {index[1][0]}')\n", + "\n", + "# From a single index, get the id and acronym\n", + "# (Note that this returns only 1 value)\n", + "index = 2016\n", + "id = brain_regions.id[index]\n", + "acronym = brain_regions.acronym[index]\n", + "\n", + "print(f'The index {index} has the acronym {acronym} and the Allen id {id}')" + ], + "metadata": { + "collapsed": false, + "pycharm": { + "name": "#%%\n" + } + } + }, + { + "cell_type": "markdown", + "source": [], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "markdown", + "id": "474bb26b", + "metadata": {}, + "source": [ + "### Lateralisation: left/right hemisphere differentiation\n", + "\n", + "An additional nuance is the lateralisation. If you compare the size of the brain_regions data class to the Allen structure tree, you will see that it has double the number of columms. This is because the IBL brain regions encodes both the left and right hemisphere using unique, positive integers (the index), whilst the Allen IDs are signed integers (the sign represents the left or right hemisphere)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "outputs": [], + "source": [ + "# Print how many indexes there are\n", + "print(brain_regions.id.size)" + ], + "metadata": { + "collapsed": false, + "pycharm": { + "name": "#%%\n" + } + } + }, + { + "cell_type": "markdown", + "source": [ + "This is equivalent to 2x the number of unique Allen IDs (positive + negative), plus `void` (0) that is not lateralised:" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "code", + "execution_count": null, + "outputs": [], + "source": [ + "positive_id = np.where(brain_regions.id>0)[0]\n", + "negative_id = np.where(brain_regions.id<0)[0]\n", + "void_id = np.where(brain_regions.id==0)[0]\n", + "\n", + "print(len(positive_id) + len(negative_id) + len(void_id))" + ], + "metadata": { + "collapsed": false, + "pycharm": { + "name": "#%%\n" + } + } + }, + { + "cell_type": "markdown", + "source": [ + "We can understand this better by exploring the `brain_regions.id` and `brain_regions.name` at the indices where it transitions between hemispheres." + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "markdown", + "source": [ + "The first value of `brain_region.id` is `void` (Allen id `0`):" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "code", + "execution_count": null, + "outputs": [], + "source": [ + "print(brain_regions.id[index_void])" + ], + "metadata": { + "collapsed": false, + "pycharm": { + "name": "#%%\n" + } + } + }, + { + "cell_type": "markdown", + "source": [ + "The point of change between right and left hemisphere is at the index:" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "code", + "execution_count": null, + "outputs": [], + "source": [ + "print(len(positive_id))" + ], + "metadata": { + "collapsed": false, + "pycharm": { + "name": "#%%\n" + } + } + }, + { + "cell_type": "markdown", + "source": [ + "Around this index, the `brain_region.id` go from positive Allen atlas ids (right hemisphere) to negative Allen atlas ids (left hemisphere)." + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "code", + "execution_count": null, + "outputs": [], + "source": [ + "print(brain_regions.id[1320:1340])" + ], + "metadata": { + "collapsed": false, + "pycharm": { + "name": "#%%\n" + } + } + }, + { + "cell_type": "markdown", + "source": [ + "Regions are organised following the same index ordering in left/right hemisphere.\n", + "For example, you will find the same acronym `PPYd` at the index 1000, and once you've passed the positive integers:" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "code", + "execution_count": null, + "outputs": [], + "source": [ + "index = 1000\n", + "print(brain_regions.acronym[index])\n", + "print(brain_regions.acronym[index + len(positive_id)])\n", + "# Note: do not re-use this approach, this is for explanation only - you will see below a dedicated function" + ], + "metadata": { + "collapsed": false, + "pycharm": { + "name": "#%%\n" + } + } + }, + { + "cell_type": "markdown", + "id": "0a1af738", + "metadata": {}, + "source": [ + "The `brain_region.name` also go from right to left hemisphere:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c99a6e89", + "metadata": {}, + "outputs": [], + "source": [ + "print(brain_regions.name[1320:1340])" + ] + }, + { + "cell_type": "markdown", + "id": "c144bdd2", + "metadata": {}, + "source": [ + "In the label volume, we can therefore differentiate between left and right hemisphere voxels for the same brain region. First we will use a method in the brain_region class to find out the index of left and right `CA1`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "0b7d5209", + "metadata": {}, + "outputs": [], + "source": [ + "brain_regions.acronym2index('CA1')\n", + "# The first values are the acronyms, the second values are the indices" + ] + }, + { + "cell_type": "markdown", + "id": "607ae9b6", + "metadata": {}, + "source": [ + "The method `acronym2index` returns a tuple, with the first value being a list of acronyms passed in and the second value giving the indices in the array that correspond to the left and right hemispheres for this region. We can now use these indices to search in the label volume" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "0680ca09", + "metadata": {}, + "outputs": [], + "source": [ + "CA1_right = np.where(brain_atlas.label == 458)\n", + "CA1_left = np.where(brain_atlas.label == 1785)" + ] + }, + { + "cell_type": "markdown", + "source": [ + "## Navigate the brain region hierarchy\n", + "The 1328 regions in the Allen parcelation are organised in a hierarchical tree.\n", + "For example, the region PPY encompasses both the regions PPYd and PPYs.\n", + "\n", + "You can visually explore the hierarchy through this [webpage](https://openalyx.internationalbrainlab.org/admin/experiments/brainregion/) (username: `intbrainlab`, password: `international`).\n", + "(TODO THIS IS NOT A GREAT WAY, CHANGE TO OTHER REFERENCE)\n" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "markdown", + "source": [ + "### Ancestors\n", + "\n", + "To find ancestors of a region, i.e. regions that are higher in the hierarchy tree, use `brain_regions.ancestors`.\n", + "\n", + "Let's use the region PPYd as an example:" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "code", + "execution_count": null, + "outputs": [], + "source": [ + "index = 1000 # Remember the Allen id at this index is 185\n", + "brain_regions.ancestors(ids=brain_regions.id[index])" + ], + "metadata": { + "collapsed": false, + "pycharm": { + "name": "#%%\n" + } + } + }, + { + "cell_type": "markdown", + "source": [ + "All parents along the hierarchy tree are returned.\n", + "The parents are organised in increasing order of `level` (0-1-2...), i.e. the highest, all-encompassing level is first (`root` in the example above).\n", + "Note:\n", + "- The fields contain all the parents regions, including the one passed in (which is last).\n", + "- The field `parent` returns the parent region id of the regions in `id` (you can notice they are the same as in `id` but incremented by one level).\n", + "- The field `order` returns values used for plotting (Note: this is *not* the parent's index)\n", + "\n", + "For example, the last `parent` region is PPY (which is indeed the closest parent of PPYd):" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "code", + "execution_count": null, + "outputs": [], + "source": [ + "index = 999\n", + "print(brain_regions.id[index])\n", + "print(brain_regions.acronym[index])" + ], + "metadata": { + "collapsed": false, + "pycharm": { + "name": "#%%\n" + } + } + }, + { + "cell_type": "markdown", + "source": [ + "### Descendants\n", + "To find the descendants of a region, use `brain_regions.descendants`.\n", + "\n", + "Let's use the region PPY as an example:" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "code", + "execution_count": null, + "outputs": [], + "source": [ + "index = 999\n", + "brain_regions.descendants(ids=brain_regions.id[index])" + ], + "metadata": { + "collapsed": false, + "pycharm": { + "name": "#%%\n" + } + } + }, + { + "cell_type": "markdown", + "source": [ + "Note:\n", + "- The fields contain all the descendant regions, including the one passed in (which is first).\n", + "- The field `parent` returns the parent region id of the regions in `id`.\n", + "- The field `order` returns values used for plotting (Note: this is *not* the parent's index)\n", + "\n", + "Note also that the `descendants` methods will return all descendants from all the different branches down, for example for PTLp :" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "code", + "execution_count": null, + "outputs": [], + "source": [ + "atlas_id = brain_regions.acronym2id('PTLp')\n", + "# Print the acronyms of the descendants of this region\n", + "print(brain_regions.descendants(ids=atlas_id)['acronym'])\n", + "# Print the levels of the descendants of this region\n", + "print(brain_regions.descendants(ids=atlas_id)['level'])" + ], + "metadata": { + "collapsed": false, + "pycharm": { + "name": "#%%\n" + } + } + }, + { + "cell_type": "markdown", + "source": [ + "### Find region at a particular place in the hierarchy\n", + "\n", + "#### Leaf node\n", + "\n", + "If you need to check a region is a leaf node, i.e. that it has no descendant, you could use the `descendants` method and check that the returned length of the `id` is one (i.e. it only returns itself).\n", + "\n", + "For example, PPYd is a leaf node:" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "code", + "execution_count": null, + "outputs": [], + "source": [ + "index = 1000\n", + "ppyd_desc = brain_regions.descendants(ids=brain_regions.id[index])\n", + "\n", + "len(ppyd_desc['id']) == 1" + ], + "metadata": { + "collapsed": false, + "pycharm": { + "name": "#%%\n" + } + } + }, + { + "cell_type": "markdown", + "source": [ + "However, there is a faster method.\n", + "To find all the regions that are leaf nodes, use `brain_regions.leaves`:" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "code", + "execution_count": null, + "outputs": [], + "source": [ + "brain_regions.leaves()" + ], + "metadata": { + "collapsed": false, + "pycharm": { + "name": "#%%\n" + } + } + }, + { + "cell_type": "markdown", + "source": [ + "It is recommended you use this function to check whether a region is a leaf node:" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "code", + "execution_count": null, + "outputs": [], + "source": [ + "index = 1000\n", + "brain_regions.id[index] in brain_regions.leaves().id" + ], + "metadata": { + "collapsed": false, + "pycharm": { + "name": "#%%\n" + } + } + }, + { + "cell_type": "markdown", + "source": [ + "#### Find region at a given hierarchy level\n", + "\n", + "To find all the regions that are on a given level of the hierarchy, use `brain_regions.level`:" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "code", + "execution_count": null, + "outputs": [], + "source": [ + "print(f'brain_regions.level contains {brain_regions.level.size} values, which are either {np.unique(brain_regions.level)}')" + ], + "metadata": { + "collapsed": false, + "pycharm": { + "name": "#%%\n" + } + } + }, + { + "cell_type": "code", + "execution_count": null, + "outputs": [], + "source": [ + "# Example: find the index and acronyms of brain regions at level 0 (i.e. highest parents):\n", + "index = np.where(brain_regions.level == 0)[0]\n", + "print(index)\n", + "brain_regions.acronym[index] # Note that root appears twice because of the lateralisation" + ], + "metadata": { + "collapsed": false, + "pycharm": { + "name": "#%%\n" + } + } + }, + { + "cell_type": "markdown", + "id": "89d7f7d8", + "metadata": {}, + "source": [ + "## Coordinate systems" + ] + }, + { + "cell_type": "markdown", + "id": "0c47c5c7", + "metadata": {}, + "source": [ + "The voxels can be translated to 3D space.\n", + "In the IBL, all xyz coordinates are referenced from Bregma, which point is set as xyz coordinate [0,0,0].\n", + "\n", + "![IBL coordinate system](https://github.com/int-brain-lab/ibllib/blob/atlas_docs/examples/atlas/images/brain_xyz.png?raw=true)\n", + "\n", + "In contrast, in the Allen coordinate framework, the [0,0,0] point corresponds to one of the cubic volume edge." + ] + }, + { + "cell_type": "markdown", + "source": [ + "Below we show the value of Bregma in the Allen CCF space (in micrometer um):" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "code", + "execution_count": null, + "outputs": [], + "source": [ + "from ibllib.atlas import ALLEN_CCF_LANDMARKS_MLAPDV_UM\n", + "print(ALLEN_CCF_LANDMARKS_MLAPDV_UM)" + ], + "metadata": { + "collapsed": false, + "pycharm": { + "name": "#%%\n" + } + } + }, + { + "cell_type": "markdown", + "source": [ + "To translate this into an index into the volume `brain_atlas`, you need to divide by the atlas resolution (also in micrometer):" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "code", + "execution_count": null, + "outputs": [], + "source": [ + "# Find bregma position in indices\n", + "bregma_index = ALLEN_CCF_LANDMARKS_MLAPDV_UM['bregma'] / brain_atlas.res_um" + ], + "metadata": { + "collapsed": false, + "pycharm": { + "name": "#%%\n" + } + } + }, + { + "cell_type": "markdown", + "source": [ + "This index can be passed into `brain_atlas.bc.i2xyz` that converts volume indices into IBL xyz coordinates (i.e. relative to Bregma):" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "code", + "execution_count": null, + "outputs": [], + "source": [ + "# Find bregma position in xyz in m (expect this to be 0 0 0)\n", + "bregma_xyz = brain_atlas.bc.i2xyz(bregma_index)\n", + "print(bregma_xyz)" + ], + "metadata": { + "collapsed": false, + "pycharm": { + "name": "#%%\n" + } + } + }, + { + "cell_type": "markdown", + "source": [ + "Functions exist in both direction, i.e. from a volume index to IBL xyz, and from xyz to an index.\n", + "Note that the functions return/input values are in *meters*, not micrometers." + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "code", + "execution_count": null, + "outputs": [], + "source": [ + "# Convert from arbitrary index to xyz position (m) position relative to Bregma\n", + "index = np.array([102, 234, 178]).astype(float)\n", + "xyz = brain_atlas.bc.i2xyz(index)\n", + "print(f'xyz values are in meters: {xyz}')\n", + "\n", + "# Convert from xyz position (m) to index in atlas\n", + "xyz = np.array([-325, 4000, 250]) / 1e6\n", + "index = brain_atlas.bc.xyz2i(xyz)" + ], + "metadata": { + "collapsed": false, + "pycharm": { + "name": "#%%\n" + } + } + }, + { + "cell_type": "markdown", + "source": [ + "To know the sign and voxel resolution for each xyz axis, use:" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "code", + "execution_count": null, + "outputs": [], + "source": [ + "# Find the resolution (in meter) of each axis\n", + "res_xyz = brain_atlas.bc.dxyz\n", + "\n", + "# Find the sign of each axis\n", + "sign_xyz = np.sign(res_xyz)\n", + "\n", + "print(f\"Resolution xyz: {res_xyz} in meter \\nSign xyz:\\t\\t{sign_xyz}\")" + ], + "metadata": { + "collapsed": false, + "pycharm": { + "name": "#%%\n" + } + } + }, + { + "cell_type": "markdown", + "source": [ + "To jump directly from an Allen xyz value to an IBL xyz value, use `brain_atlas.ccf2xyz`:" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "code", + "execution_count": null, + "outputs": [], + "source": [ + "# Example: Where is the Allen 0 relative to IBL Bregma?\n", + "# This will give the Bregma value shown above (in meters), but with opposite axis sign value\n", + "brain_atlas.ccf2xyz(np.array([0, 0, 0]))" + ], + "metadata": { + "collapsed": false, + "pycharm": { + "name": "#%%\n" + } + } + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.16" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} \ No newline at end of file diff --git a/examples/atlas/images/brain_xyz.png b/examples/atlas/images/brain_xyz.png new file mode 100644 index 000000000..5d707848e Binary files /dev/null and b/examples/atlas/images/brain_xyz.png differ diff --git a/examples/loading_data/loading_multi_photon_imaging_data.ipynb b/examples/loading_data/loading_multi_photon_imaging_data.ipynb new file mode 100644 index 000000000..d2bb08a1a --- /dev/null +++ b/examples/loading_data/loading_multi_photon_imaging_data.ipynb @@ -0,0 +1,424 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "source": [ + "# Loading Multi-photon Calcium Imaging Data\n", + "\n", + "Cellular Calcium activity recorded using a multi-photon imaging." + ], + "metadata": { + "collapsed": false, + "pycharm": { + "name": "#%% md\n" + } + } + }, + { + "cell_type": "markdown", + "source": [ + "## Relevant ALF objects\n", + "* mpci\n", + "* mpciROIs\n", + "* mpciROITypes\n", + "* mpciMeanImage\n", + "* mpciStack\n", + "\n", + "## Terminology\n", + "* **ROI** - A region of interest, usually a neuron soma, detected using an algorithm such as Suite2P.\n", + "* **FOV** - A field of view is a plane or volume covering a region of the brain.\n", + "* **Imaging stack** - Multiple FOVs acquired at different depths along the same z axis.\n", + "\n", + "## Finding sessions with imaging data\n", + "Sessions that contain any form of imaging data have an 'Imaging' procedure. This includes sessions\n", + "photometry, mesoscope, 2P, and widefield data. To further filter by imaging modality you can query\n", + "the imaging type associated with a session's field of view." + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "markdown", + "source": [ + "```python\n", + "# Find mesoscope imaging sessions\n", + "import numpy as np\n", + "\n", + "from one.api import ONE\n", + "one = ONE()\n", + "assert not one.offline, 'ONE must be connect to Alyx for searching imaging sessions'\n", + "\n", + "query = 'field_of_view__imaging_type__name,mesoscope'\n", + "eids = one.search(procedures='Imaging', django=query, query_type='remote')\n", + "```" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "markdown", + "source": [ + "Sessions can be further filtered by brain region. You can filter with by Allen atlas name, acronym\n", + "or ID, for example:\n", + "\n", + "* `atlas_name='Primary visual area'`\n", + "* `atlas_acronym='VISp'`\n", + "* `atlas_id=385`" + ], + "metadata": { + "collapsed": false, + "pycharm": { + "name": "#%% md\n" + } + } + }, + { + "cell_type": "markdown", + "source": [ + "```python\n", + "# Find mesoscope imaging sessions in V1, layer 2/3\n", + "query = 'field_of_view__imaging_type__name,mesoscope'\n", + "eids = one.search(procedures='Imaging', django=query, query_type='remote', atlas_acronym='VISp2/3')\n", + "```" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "markdown", + "source": [ + "The 'details' flag will return the session details, including a `field_of_view` field which contains\n", + "a list of each field of view and its location. All preprocessed mpci imaging data is in `alf/FOV_XX`\n", + "where XX is the field of view number. The `FOV_XX` corresponds to a field of view name in Alyx." + ], + "metadata": { + "collapsed": false, + "pycharm": { + "name": "#%% md\n" + } + } + }, + { + "cell_type": "markdown", + "source": [ + "```python\n", + "eids, det = one.search(procedures='Imaging', django=query, query_type='remote', atlas_acronym='VISp2/3', details=True)\n", + "FOVs = det[0]['field_of_view']\n", + "print(FOVs[0])\n", + "```" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "markdown", + "source": [ + "The ibllib AllenAtlas class allows you to search brain region descendents and ancestors in order to\n", + "find the IDs of brain regions at a certain granularity." + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "markdown", + "source": [ + "```python\n", + "# Search brain areas by name using Alyx\n", + "V1 = one.alyx.rest('brain-regions', 'list', name='Primary visual area')\n", + "for area in V1:\n", + " print('%s (%s: %i)' % (area['name'], area['acronym'], area['id']))\n", + "\n", + "\n", + "from ibllib.atlas import AllenAtlas\n", + "atlas = AllenAtlas()\n", + "\n", + "# Interconvert ID and acronym\n", + "V1_id = atlas.regions.acronym2id('VISp')\n", + "V1_acronym = atlas.regions.id2acronym(V1_id)\n", + "\n", + "# Show all descendents of primary visual area (i.e. all layers)\n", + "atlas.regions.descendants(V1_id)\n", + "```" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "markdown", + "source": [ + "For more information see \"[Working with ibllib atlas](../atlas_working_with_ibllib_atlas.html)\"." + ], + "metadata": { + "collapsed": false, + "pycharm": { + "name": "#%% md\n" + } + } + }, + { + "cell_type": "markdown", + "source": [ + "## Loading imaging data for a given field of view\n", + "\n", + "For mesoscope sessions there are likely more than one field of view, not all of which cover the\n", + "area of interest. For mesoscope sessions it's therefore more useful to search by field of view instead.\n", + "Each field of view returned contains a session eid for loading data with." + ], + "metadata": { + "collapsed": false, + "pycharm": { + "name": "#%% md\n" + } + } + }, + { + "cell_type": "markdown", + "source": [ + "```python\n", + "# Search for all mesoscope fields of view containing V1\n", + "FOVs = one.alyx.rest('fields-of-view', 'list', imaging_type='mesoscope', atlas_acronym='VISp')\n", + "# Download all data for the first field of view\n", + "FOV_00 = one.load_collection(FOVs[0]['session'], '*' + FOVs[0]['name'])\n", + "\n", + "# Search the fields of view for a specific session that took place in a given brain region\n", + "eid = 'a5550a8e-2484-4539-b7f0-8e5f829d0ba7'\n", + "FOVs = one.alyx.rest('fields-of-view', 'list', imaging_type='mesoscope', atlas_id=187, session=eid)\n", + "```" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "markdown", + "source": [ + "## Loading imaging stacks\n", + "For mesoscope sessions the same region may be acquired at multiple depths. The plane at each depth\n", + "is considered a separate field of view and are related to one another through the stack object.\n", + "If a field of view was acquired as part of a stack, the `stack` field will contain an ID. You can\n", + "find all fields of view in a given stack by querying the 'imaging-stack' endpoint:" + ], + "metadata": { + "collapsed": false, + "pycharm": { + "name": "#%% md\n" + } + } + }, + { + "cell_type": "markdown", + "source": [ + "```python\n", + "stack = one.alyx.rest('imaging-stack', 'read', id=FOVs[0]['stack'])\n", + "FOVs = stack['slices']\n", + "print('There were %i fields of view in stack %s' % (len(FOVs), stack['id']))\n", + "```" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "markdown", + "source": [ + "### List the number of fields of view (FOVs) recorded during a session" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "markdown", + "source": [ + "```python\n", + "from one.api import ONE\n", + "one = ONE()\n", + "eid = 'b1ca324f-5db7-4106-8be2-0dd9cce17648'\n", + "\n", + "fov_folders = one.list_collections(eid, collection='alf/FOV_*')\n", + "fovs = sorted(map(lambda x: int(x[-2:]), fov_folders))\n", + "nFOV = len(fovs)\n", + "```" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "markdown", + "source": [ + "## Loading ROI activity for a single session" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "markdown", + "source": [ + "```python\n", + "# Loading ROI activity for a single FOV\n", + "ROI_data_00 = one.load_collection(eid, 'alf/FOV_00', object=['mpci', 'mpciROIs', 'mpciROITypes', 'mpciStack'])\n", + "print(ROI_data_00.keys())\n", + "\n", + "# Loading ROI activity for all FOVs\n", + "all_ROI_data = one.load_collection(eid, 'alf/FOV_*', object=['mpci', 'mpciROIs', 'mpciROITypes', 'mpciStack'])\n", + "print(all_ROI_data.keys())\n", + "print(all_ROI_data.FOV_00.keys())\n", + "```" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "markdown", + "source": [ + "### Get the brain location of an ROI\n", + "The brain location of each ROI are first estimated using the surgical coordinates of the imaging window.\n", + "These datasets have an '_estimate' in the name. After histological alignment, datasets are created\n", + "without '_estimate' in the name. The histologically aligned locations are most accurate and should be\n", + "used where available." + ], + "metadata": { + "collapsed": false, + "pycharm": { + "name": "#%% md\n" + } + } + }, + { + "cell_type": "markdown", + "source": [ + "```python\n", + "roi = 0 # The ROI index to lookup\n", + "final_alignment = 'brainLocationsIds_ccf_2017' in ROI_data_00['mpciROI']\n", + "key = 'brainLocationsIds_ccf_2017' if final_alignment else 'brainLocationsIds_ccf_2017_estimate'\n", + "\n", + "atlas_id = ROI_data_00['mpciROI'][key][roi]\n", + "print(f'ROI {roi} was located in {atlas.regions.id2acronym(atlas_id)}')\n", + "```" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "markdown", + "source": [ + "## Loading times\n", + "Timestamps for each frame are in seconds from session start and represent the time when frame acquisition started.\n", + "Typically a laser scans each voxel in the field of view in a line by line fashion (this may vary across apparatus and\n", + "in configuarations such as dual plane mode). Thus there is a fixed time offset between regions of interest.\n", + "The offset can be found in the mpciStack.timeshift.npy dataset and depending on its shape, may be per voxel or per\n", + "scan line." + ], + "metadata": { + "collapsed": false, + "pycharm": { + "name": "#%% md\n" + } + } + }, + { + "cell_type": "markdown", + "source": [ + "```python\n", + "frame_times = ROI_data_00['mpci']['times']\n", + "roi_xyz = ROI_data_00['mpciROIs']['stackPos']\n", + "timeshift = ROI_data_00['mpciStack']['timeshift']\n", + "roi_offsets = timeshift[roi_xyz[:, len(timeshift.shape)]]\n", + "# An array of timestamps of shape (n_roi, n_frames)\n", + "roi_times = np.tile(frame_times, (roi_offsets.size, 1)) + roi_offsets[np.newaxis, :].T\n", + "\n", + "import matplotlib.pyplot as plt\n", + "roi_signal = ROI_data_00['mpci']['ROIActivityF'].T\n", + "roi = 2 # The ROI index to lookup\n", + "plt.plot(roi_times[roi], roi_signal[roi])\n", + "plt.xlabel('Timestamps / s'), plt.ylabel('ROI activity / photodetector units')\n", + "```" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "markdown", + "source": [ + "### Search for sessions with multi-depth fields of view (imaging stacks)" + ], + "metadata": { + "collapsed": false, + "pycharm": { + "name": "#%% md\n" + } + } + }, + { + "cell_type": "markdown", + "source": [ + "```python\n", + "query = 'field_of_view__stack__isnull,False'\n", + "eids, det = one.search(procedures='Imaging', django=query, query_type='remote', details=True)\n", + "```" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "markdown", + "source": [ + "### Search sessions with GCaMP mice\n", + "..." + ], + "metadata": { + "collapsed": false, + "pycharm": { + "name": "#%% md\n" + } + } + }, + { + "cell_type": "markdown", + "source": [ + "## More details\n", + "* [Description of mesoscope datasets](https://docs.google.com/document/d/1OqIqqakPakHXRAwceYLwFY9gOrm8_P62XIfCTnHwstg/edit#heading=h.nvzaz0fozs8h)\n", + "* [Loading raw mesoscope data](./loading_raw_mesoscope_data.ipynb)" + ], + "metadata": { + "collapsed": false, + "pycharm": { + "name": "#%% md\n" + } + } + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 2 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython2", + "version": "2.7.6" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} diff --git a/examples/loading_data/loading_raw_mesoscope_data.ipynb b/examples/loading_data/loading_raw_mesoscope_data.ipynb new file mode 100644 index 000000000..816b2c8b2 --- /dev/null +++ b/examples/loading_data/loading_raw_mesoscope_data.ipynb @@ -0,0 +1,116 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "collapsed": true, + "pycharm": { + "name": "#%% md\n" + } + }, + "source": [ + "# Loading Raw Mesoscope Imaging Data\n", + "\n", + "Cellular activity recorded using a 2 photon mesoscope.\n", + "\n", + "## Loading raw cell detection output\n", + "\n", + "Currently cell detection is done using [suite2p](https://suite2p.readthedocs.io/en/latest/). You\n", + "can load the raw output of suite2p for loading into their GUI:" + ] + }, + { + "cell_type": "markdown", + "source": [ + "```python\n", + "from one.api import ONE\n", + "one = ONE()\n", + "\n", + "eid = 'b1ca324f-5db7-4106-8be2-0dd9cce17648'\n", + "FOV = 0 # The field of view (FOV) to load\n", + "suite2p_zip = one.load_dataset(eid, '*ROIData.raw', collection=f'alf/FOV_{FOV:02}')\n", + "\n", + "# Unarchive zip file\n", + "import shutil\n", + "dst_dir = suite2p_zip.parent.joinpath('suite2p_output')\n", + "files = shutil.unpack_archive(suite2p_zip, dst_dir)\n", + "\n", + "# Run the suite2p GUI\n", + "import suite2p.gui\n", + "suite2p.gui.run(statfile=dst_dir / 'stat.npy')\n", + "```" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "code", + "execution_count": null, + "outputs": [], + "source": [ + "# Downloading the raw images" + ], + "metadata": { + "collapsed": false, + "pycharm": { + "name": "#%%\n" + } + } + }, + { + "cell_type": "markdown", + "source": [ + "## Suite2P output vs ALF datasets\n", + "Below is a table compareing the raw output of Suite2P with the ALF datasets available through ONE.\n", + "\n", + "| Suite2P | ONE |\n", + "| --- | --- |\n", + "| **F.npy** [nROIs, nFrames] | **mpci.ROIActivityF.npy** [nFrames, nROIs] |\n", + "| **Fneu.npy** [nROIs, nFrames] | **mpci.ROIActivityFneu.npy** [nFrames, nROIs] |\n", + "| **spks.npy** [nROIs, nFrames] | **mpci.ROIActivityDeconvolved.npy** [nFrames, nROIs] |\n", + "| **ops.npy** (badframes) [nFrames] | **mpci.badFrames.npy** [nFrames] |\n", + "| **iscell.npy** [nROIs, 2] | **mpciROIs.included.npy** [nROIs] |\n", + "| **stat.npy** (med) [nROIs, 3] | **mpciROIs.stackPos.npy** [nROIs, 3] |" + ], + "metadata": { + "collapsed": false, + "pycharm": { + "name": "#%% md\n" + } + } + }, + { + "cell_type": "markdown", + "source": [ + "## More details\n", + "* [Description of mesoscope datasets](https://docs.google.com/document/d/1OqIqqakPakHXRAwceYLwFY9gOrm8_P62XIfCTnHwstg/edit#heading=h.nvzaz0fozs8h)\n", + "* [Loading multi-photon imaging data](./loading_multi_photon_imaging_data.ipynb)\n" + ], + "metadata": { + "collapsed": false + } + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 2 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython2", + "version": "2.7.6" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} diff --git a/examples/loading_data/loading_spikesorting_data.ipynb b/examples/loading_data/loading_spikesorting_data.ipynb index 62c9851ba..d765642d0 100644 --- a/examples/loading_data/loading_spikesorting_data.ipynb +++ b/examples/loading_data/loading_spikesorting_data.ipynb @@ -213,7 +213,7 @@ "outputs": [], "source": [ "from brainbox.ephys_plots import image_fr_plot\n", - "from brainbox.processing import bincount2D\n", + "from iblutil.numerical import bincount2D\n", "import numpy as np\n", "\n", "time_bin = 0.05 # time bin in seconds\n", diff --git a/examples/loading_data/loading_trials_data.ipynb b/examples/loading_data/loading_trials_data.ipynb index 361b46cf1..398d32734 100644 --- a/examples/loading_data/loading_trials_data.ipynb +++ b/examples/loading_data/loading_trials_data.ipynb @@ -45,9 +45,7 @@ { "cell_type": "markdown", "source": [ - "## Loading a single session's trials\n", - "\n", - "If you want to load the trials data for a single session, we recommend you use the `SessionLoader`:" + "## Loading a single session's trials\n" ], "metadata": { "collapsed": false @@ -58,20 +56,10 @@ "execution_count": null, "outputs": [], "source": [ - "'''\n", - "RECOMMENDED\n", - "'''\n", - "from brainbox.io.one import SessionLoader\n", "from one.api import ONE\n", "one = ONE()\n", "eid = '4ecb5d24-f5cc-402c-be28-9d0f7cb14b3a'\n", - "sl = SessionLoader(eid=eid, one=one)\n", - "sl.load_trials()\n", - "\n", - "# The datasets are attributes of the sl.trials, for example probabilityLeft :\n", - "probabilityLeft = sl.trials['probabilityLeft']\n", - "# Find all of them using:\n", - "sl.trials.keys()" + "trials = one.load_object(eid, 'trials', collection='alf')" ], "metadata": { "collapsed": false, @@ -83,7 +71,7 @@ { "cell_type": "markdown", "source": [ - "For completeness, we present below how to load the trials object using the `one.load_object` method, however we recommend you use the code above and use the `SessionLoader` instead." + "For combining trials data with various recording modalities for a given session, the `SessionLoader` class is more convenient:" ], "metadata": { "collapsed": false @@ -94,13 +82,17 @@ "execution_count": null, "outputs": [], "source": [ - "'''\n", - "ALTERNATIVE - NOT RECOMMENDED\n", - "'''\n", + "from brainbox.io.one import SessionLoader\n", "from one.api import ONE\n", "one = ONE()\n", "eid = '4ecb5d24-f5cc-402c-be28-9d0f7cb14b3a'\n", - "trials = one.load_object(eid, 'trials', collection='alf')" + "sl = SessionLoader(eid=eid, one=one)\n", + "sl.load_trials()\n", + "\n", + "# The datasets are attributes of the sl.trials, for example probabilityLeft:\n", + "probabilityLeft = sl.trials['probabilityLeft']\n", + "# Find all of them using:\n", + "sl.trials.keys()" ], "metadata": { "collapsed": false, @@ -139,8 +131,14 @@ "trials = (trials\n", " .set_index('session')\n", " .join(training.set_index('session'))\n", - " .sort_values(by='session_start_time')\n", - " .fillna(method='ffill'))" + " .sort_values(by=['session_start_time', 'intervals_0']))\n", + "trials['training_status'] = trials.training_status.fillna(method='ffill')\n", + "\n", + "# Join sessions table for number, task_protocol, etc.\n", + "trials = one.load_aggregate('subjects', subject, '_ibl_subjectTrials.table')\n", + "if 'task_protocol' in trials:\n", + " trials.drop('task_protocol', axis=1)\n", + "trials = trials.set_index('session').join(one._cache.sessions.drop('date', axis=1))" ] }, { @@ -150,7 +148,7 @@ "source": [ "## More details\n", "* [Description of trials datasets](https://docs.google.com/document/d/1OqIqqakPakHXRAwceYLwFY9gOrm8_P62XIfCTnHwstg/edit#heading=h.nvzaz0fozs8h)\n", - "* [Decsription of task QC metrics](https://int-brain-lab.github.io/iblenv/_autosummary/ibllib.qc.task_metrics.html)" + "* [Description of task QC metrics](../_autosummary/ibllib.qc.task_metrics.html)" ] }, { @@ -158,11 +156,12 @@ "id": "2fcc52a0", "metadata": {}, "source": [ - "## Useful modules\n", - "* [brainbox.behavior.training](https://int-brain-lab.github.io/iblenv/_autosummary/brainbox.behavior.training.html)\n", - "* [brainbox.behavior.pyschofit](https://int-brain-lab.github.io/iblenv/_autosummary/brainbox.behavior.pyschofit.html)\n", - "* [brainbox.task.trials](https://int-brain-lab.github.io/iblenv/_autosummary/brainbox.task.trials.html)\n", - "* [ibllib.qc.task_metrics](https://int-brain-lab.github.io/iblenv/_autosummary/ibllib.qc.task_metrics.html)" + "## Useful modules, packages and functions\n", + "* [brainbox.behavior.training](../_autosummary/brainbox.behavior.training.html)\n", + "* [psychofit](https://github.com/cortex-lab/psychofit/blob/master/Examples.ipynb)\n", + "* [brainbox.task.trials](../_autosummary/brainbox.task.trials.html)\n", + "* [ibllib.qc.task_metrics](../_autosummary/ibllib.qc.task_metrics.html)\n", + "* [brainbox.io.one.load_iti](../_autosummary/brainbox.io.one.html#brainbox.io.one.load_iti)" ] }, { @@ -232,7 +231,7 @@ "id": "6a7aa771", "metadata": {}, "source": [ - "### Example 3. Plotting pyschometric curve" + "### Example 3. Plotting psychometric curve" ] }, { @@ -293,6 +292,35 @@ "help(task_metrics.check_errorCue_delays)" ] }, + { + "cell_type": "markdown", + "source": [ + "### Example 5: Computing the inter-trial interval (ITI)\n", + "The ITI is the period of open-loop grey screen commencing at stimulus off and lasting until the\n", + "quiescent period at the start of the following trial." + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "code", + "execution_count": null, + "outputs": [], + "source": [ + "from brainbox.io.one import load_iti\n", + "eid = 'ae8787b1-4229-4d56-b0c2-566b61a25b77'\n", + "trials = one.load_object(eid, 'trials')\n", + "trials['iti'] = load_iti(trials)\n", + "print(trials.to_df().iloc[:5, -5:])" + ], + "metadata": { + "collapsed": false, + "pycharm": { + "name": "#%%\n" + } + } + }, { "cell_type": "markdown", "id": "5738f9fb", @@ -303,7 +331,7 @@ }, "source": [ "## Other relevant examples\n", - "* COMING SOON" + "* For examples of how to compute reaction time and or response time, see [Working with wheel data](./docs_wheel_moves.html)." ] } ], diff --git a/examples/loading_data/loading_wheel_data.ipynb b/examples/loading_data/loading_wheel_data.ipynb index 2cbd669ab..053f72d2a 100644 --- a/examples/loading_data/loading_wheel_data.ipynb +++ b/examples/loading_data/loading_wheel_data.ipynb @@ -70,7 +70,8 @@ "metadata": {}, "source": [ "## More details\n", - "* [Description of wheel datasets](https://docs.google.com/document/d/1OqIqqakPakHXRAwceYLwFY9gOrm8_P62XIfCTnHwstg/edit#heading=h.hnjqyfnroyya)" + "* [Description of wheel datasets](https://docs.google.com/document/d/1OqIqqakPakHXRAwceYLwFY9gOrm8_P62XIfCTnHwstg/edit#heading=h.hnjqyfnroyya)\n", + "* [Working with wheel data](./docs_wheel_moves.html)" ] }, { @@ -78,10 +79,10 @@ "id": "357a860b", "metadata": {}, "source": [ - "## Useful modules\n", - "* [brainbox.behavior.wheel](https://int-brain-lab.github.io/iblenv/_autosummary/brainbox.behavior.wheel.html)\n", - "* [brainbox.io.one.load_wheel_reaction_times](https://int-brain-lab.github.io/iblenv/_autosummary/brainbox.io.one.html#brainbox.io.one.load_wheel_reaction_times)\n", - "* [ibllib.qc.task_metrics](https://int-brain-lab.github.io/iblenv/_autosummary/ibllib.qc.task_metrics.html)" + "## Useful modules and functions\n", + "* [brainbox.behavior.wheel](../_autosummary/brainbox.behavior.wheel.html)\n", + "* [brainbox.io.one.load_wheel_reaction_times](../_autosummary/brainbox.io.one.html#brainbox.io.one.load_wheel_reaction_times)\n", + "* [ibllib.qc.task_metrics](../_autosummary/ibllib.qc.task_metrics.html)" ] }, { @@ -94,42 +95,47 @@ }, { "cell_type": "markdown", - "id": "5a4b3e83", - "metadata": {}, "source": [ - "### Example 1: Extract wheel velocity" - ] + "### Example 3: Find linearly interpolated wheel position" + ], + "metadata": { + "collapsed": false + } }, { "cell_type": "code", "execution_count": null, - "id": "7a487944", - "metadata": {}, "outputs": [], "source": [ - "from brainbox.behavior.wheel import velocity\n", - "\n", - "wh_velocity = velocity(wheel['timestamps'], wheel['position'])" - ] + "from brainbox.behavior.wheel import interpolate_position\n", + "Fs = 1000\n", + "wh_pos_lin, wh_ts_lin = interpolate_position(wheel['timestamps'], wheel['position'], freq=Fs)" + ], + "metadata": { + "collapsed": false, + "pycharm": { + "name": "#%%\n" + } + } }, { "cell_type": "markdown", - "id": "cb128868", + "id": "5a4b3e83", "metadata": {}, "source": [ - "### Example 2: Find linearly interpolated wheel position" + "### Example 2: Extract wheel velocity" ] }, { "cell_type": "code", "execution_count": null, - "id": "a7492bfc", + "id": "7a487944", "metadata": {}, "outputs": [], "source": [ - "from brainbox.behavior.wheel import interpolate_position\n", + "from brainbox.behavior.wheel import velocity_filtered\n", "\n", - "wh_pos_lin, wh_ts_lin = interpolate_position(wheel['timestamps'], wheel['position'])" + "wh_velocity, wh_acc = velocity_filtered(wh_pos_lin, Fs)\n" ] }, { @@ -138,7 +144,7 @@ "metadata": {}, "source": [ "## Other relevant examples\n", - "* [Working with wheel data](https://int-brain-lab.github.io/iblenv/notebooks_external/docs_wheel_moves.html)" + "* [Working with wheel data](./docs_wheel_moves.html)" ] } ], @@ -163,4 +169,4 @@ }, "nbformat": 4, "nbformat_minor": 5 -} +} \ No newline at end of file diff --git a/ibllib/__init__.py b/ibllib/__init__.py index f02501713..3a1a43a09 100644 --- a/ibllib/__init__.py +++ b/ibllib/__init__.py @@ -2,7 +2,7 @@ import logging import warnings -__version__ = '2.23.1' +__version__ = '2.25.0' warnings.filterwarnings('always', category=DeprecationWarning, module='ibllib') # if this becomes a full-blown library we should let the logging configuration to the discretion of the dev @@ -12,13 +12,8 @@ USE_LOGGING = True #%(asctime)s,%(msecs)d if USE_LOGGING: - try: # TODO Remove after release of iblutil v1.4 - from iblutil.util import get_logger - get_logger(name='ibllib') - warnings.warn('Please run `pip install -U iblutil` to update to v1.4', category=DeprecationWarning) - except ImportError: - from iblutil.util import setup_logger - setup_logger(name='ibllib') + from iblutil.util import setup_logger + setup_logger(name='ibllib', level=logging.INFO) else: # deactivate all log calls for use as a library logging.getLogger('ibllib').addHandler(logging.NullHandler()) diff --git a/ibllib/atlas/__init__.py b/ibllib/atlas/__init__.py index 0a8bf6324..aaaaf24dd 100644 --- a/ibllib/atlas/__init__.py +++ b/ibllib/atlas/__init__.py @@ -1,2 +1,199 @@ +"""A package for working with brain atlases. + +For examples and tutorials on using the IBL atlas package, see +https://docs.internationalbrainlab.org/atlas_examples.html + +.. TODO Explain differences between this package and the Allen SDK. +Much of this was adapted from the `cortexlab allenCCF repository `_. + +Terminology +----------- +There are many terms used somewhat incoherently within this API and the community at large. Below +are some definitions of the most common terms. + +* **Atlas** - A set of serial sections along different anatomical planes of a brain where each relevant brain structure is + assigned a number of coordinates to define its outline or volume. An atlas essentially comprises a set of images, annotations, + and a coordinate system. +* **Annotation** - A set of identifiers assigned to different atlas regions. +* **Mapping** - A function that maps one ordered list of brain region IDs to another, allowing one to control annotation + granularity and brain region hierarchy, or to translate brain region names from one atlas to another. The default mapping is + identity. +* **Coordinate framework** - The way in which an atlas translates image coordinates (e.g. Cartesian or sperical coordinates) to + real-world anatomical coordinates in (typically physical distance from a given landmark such as bregma, along three axes, + ML-AP-DV). +* **Reference space** - The coordinate system and annotations used by a given atlas. It is sometimes useful to compare anatomy + between atlases, which requires expressing one atlas in another's reference space. +* **Structure tree** - The hirarchy of brain regions, handled by the BrainRegions class. +* **Scaling** - Atlases typically comprise images averaged over a number of brains. Scaling allows one to account for any + consistent and measurable imgaging or tissue distortion, or to better align to an individual brain of a specific size. The + default scaling is identity. +* **Flat map** - An annotated projection of the 3D brain to 2D. +* **Slice** - A 2D section of a brain atlas volume. Typically these are coronal (cut along the medio-lateral axis), sagittal + (along the dorso-ventral axis) or transverse a.k.a. axial, horizontal (along the rostro-caudal a.k.a. anterio-posterior axis). + + +Atlases +------- +There are two principal mouse brain atlases in this module: + +1. The Allen Common Coordinate Framework (CCF) [1]_. +2. The Mouse Brain in Stereotaxic Coordinates (MBSC) 4th Edition, by Paxinos G, and Franklin KBJ [2]_, matched to + to the Allen Common Coordiante Framework by Chon et al. [3]_. + +The latter is referred to here as the 'Franklin-Paxinos atlas'. These atlases comprise a 3D array of voxels and their associated +brain region identifiers (labels) at a given resolution. The Allen Atlas can be instantiated in 10um, 25um or 50um resolution. +The Franklin-Paxinos atlas has a resolution of 10um in the ML and DV axis, and 100um in the AP axis. **TODO Mention flat maps.** + + +Scalings +-------- +Additionally there are two further atlases that apply some form of scaling to the Allen CCF atlas +to account for distortion that occurs during the imaging and tissue fixation process: + +1. The Needles atlas - 40 C57BL/6J (p84) mice underwnt MRI imaging post-mortem while the brain was still in the skull, followed by + conventional Nissl histology [4]_. These mouse brain atlas images combined with segmentation (known as DSURQE) were manually + transformed onto the Allen CCF atlas to determine the scaling. +2. The MRI Toronto - 12 p65 mice MRI images were taken *in vivo* then averaged and transformed on the Allen CCF atlas to determine + the scaling [5]_. + +All scaling is currently linear. Scaling of this kind can be applied arbitrarily to better represent a specific mouse age and +sex [5]_. NB: In addition to distortions, the Allen CFF atlas is pitched down by about 5 degrees relative to a flat skull (where +bregma and lambda are at the same DV height) [6]_, however this is not currently accounted for. + + +Mappings +-------- +In addition to the atlases there are also multiple brain region mappings that serve one of two purposes: 1. control the +granularity particular brain regions; 2. support differing anatomical sub-devisions or nomenclature. The two Allen atlas mappings +below were created somewhat arbirarily by Nick Steinmetz to aid in analysis: + +1. Beryl - brain atlas annotations without layer sub-divisions or certain ganglial/nucleus sub-devisisions (e.g. the core/shell + sub-division of the lateral geniculate nucleus). Fibre tracts, pia, etc. are also absent. The choice of which areas to combine + was guided partially by the computed volume of each area. This mapping is used in the brainwide map and prior papers [7]_, [8]_ + . +2. Cosmos - coarse brain atlas annotations, dividing the atlas into 10 broad areas: isocortex, olfactory areas, cortical subplate, + cerebral nuclei, thalamus, hypothalamus, midbrain, hindbrain, cerebellum and hippocampal formation. + +The names of these two mappings appear to be without meaning. + +Non-Allen mappings: + +3. Swanson - the brain atlas annotations from the Swansan rat brain flat map [9]_, mapped to the Allen atlas manually by Olivier + Winter. See `Fixtures`_ for details. + +Each mapping includes both a lateralized (suffix '-lr') and non-laterized version. The lateralized mappings assign a different ID +to structures in the right side of the brain. The Allen atlas IDs are kept intact but lateralized as follows: labels are +duplicated and IDs multiplied by -1, with the understanding that left hemisphere regions have negative IDs. There is currently no +mapping between Franklin & Paxinos and the Allen atlases. + + +Notes +----- +The Allen atlas and the CCF annotations have different release dates and versions [10]_. The annotations used by IBL are the 2017 +version. + +The IBL uses the following conventions: + +- All atlas images have dimensions (AP, ML, DV). With C-ordering this makes coronal slicing most efficient. The origin is the top + left corner of the image. +- Coordinates are provided in the order (ML AP DV) and are in meters relative to bregma. +- Left hemisphere ML coordinates are -ve; right, +ve. +- AP coordinates anterior to bregma are +ve; posterior, -ve. +- DV coordinates ventral to bregma are -ve; ventral +ve. +- Bregma was determined by asking five experimentalists to pick the voxel containing bregma on the Allen atlas and taking the + average. NB: The midline appears slightly off-center in the Allen atlas image volume. +- All left hemisphere regions have negative region IDs in all lateralized mappings. + + +Examples +-------- +Below are some breif API examples. For in depth tutorials on using the IBL atlas package, see +https://docs.internationalbrainlab.org/atlas_examples.html. + +Find bregma position in indices * resolution in um + +>>> ba = AllenAtlas() +>>> bregma_index = ALLEN_CCF_LANDMARKS_MLAPDV_UM['bregma'] / ba.res_um + +Find bregma position in xyz in m (expect this to be 0 0 0) + +>>> bregma_xyz = ba.bc.i2xyz(bregma_index) + + +Fixtures +-------- + +.. TODO List the data files in this package, their purpose, data types, shape, etc. +.. TODO List the remote files used by this package, e.g. annotations files, swansonpaths.json, etc. + +Local files +^^^^^^^^^^^ + +* **allen_structure_tree.csv** - TODO Document. Where does this come from? Is it modified from either structure_tree_safe.csv or + structure_tree_safe_2017.csv? +* **franklin_paxinos_structure_tree.csv** - Obtained from Supplementary Data 2 in reference [10]. +* **beryl.npy** - A 306 x 1 int32 array of Allen CCF brain region IDs generated in MATLAB [*]_. See more information see + `Mappings`_. +* **cosmos.npy** - A 10 x 1 int32 array of Allen CCF brain region IDs generated in MATLAB [*]_. See more information see + `Mappings`_. +* **swanson_regions.npy** - A 1D array of length 323 containing the Allen CCF brain region IDs +* **mappings.pqt** - A table of mappings. Each column defines a mapping, with the '-lr' suffix indicating a lateralized version. + The rows contain the correspondence of each mapping to the int64 index of the lateralized Allen structure tree. The table is + generated by ibllib.atlas.regions.BrainRegions._compute_mappings. + +Remote files +^^^^^^^^^^^^ + +* **annotation_.nrrd** - A 3D volume containing indicies of the regions in the associated + structure tree. `res_um` indicates the isometric spacing in microns. These uint16 indicies are + known as the region 'index' in the structure tree, i.e. the position of the region in the + flattened tree. +* **average_template_.nrrd** - TODO Document +* **annotation__lut_.npz** - TODO Document +* **FranklinPaxinons/annotation_.npz** - A 3D volume containing indices of the regions associated with the Franklin- + Paxinos structure tree. +* **FranklinPaxinons/average_template_.npz** - A 3D volume containing the Allen dwi image slices corresponding to + the slices in the annotation volume [*] . +* **swansonpaths.json** - The paths of a vectorized Swanson flatmap image [*]. The vectorized version was generated + from the Swanson bitmap image using the matlab contour function to find the paths for each region. The paths for each + region were then simplified using the `Ramer Douglas Peucker algorithm `_ +* **swanson2allen.npz** - TODO Document who made this, its contents, purpose and data type +* **_.nrrd** - TODO Document who made this, its contents, purpose and data type +* **gene-expression.pqt** - TODO Document who made this, its contents, purpose and data type +* **gene-expression.bin** - TODO Document who made this, its contents, purpose and data type. + +.. [*] The annotation and average template volumes were created from the images provided in Supplemtary Data 4 of Chon et al. [3]_ + and stitched together as a single volume using SimpleITK. +.. [*] output of aggType 2 in https://github.com/cortex-lab/allenCCF/blob/master/Browsing%20Functions/aggregateAcr.m +.. [*] output of aggType 1 in https://github.com/cortex-lab/allenCCF/blob/master/Browsing%20Functions/aggregateAcr.m +.. [*] the paths were generated from a bitmap of the + `BM3 rat flatmap 3.0 foldout poster `_ + in `Swanson LW (2004) Brain Maps, 3rd ed. `_ TODO where is code for this? + + +References +---------- +.. [1] © 2015 Allen Institute for Brain Science. Allen Mouse Brain Atlas (2015) with region annotations (2017). + Available from: http://download.alleninstitute.org/informatics-archive/current-release/mouse_ccf/annotation/ +.. [2] Paxinos G, and Franklin KBJ (2012) The Mouse Brain in Stereotaxic Coordinates, 4th edition (Elsevier Academic Press) +.. [3] Chon U et al (2019) Enhanced and unified anatomical labeling for a common mouse brain atlas + [doi 10.1038/s41467-019-13057-w] +.. [4] Dorr AE, Lerch JP, Spring S, Kabani N, Henkelman RM (2008). High resolution three-dimensional brain atlas using an average + magnetic resonance image of 40 adult C57Bl/6J mice. Neuroimage 42(1):60-9. [doi 10.1016/j.neuroimage.2008.03.037] +.. [5] Qiu, LR, Fernandes, DJ, Szulc-Lerch, KU et al. (2018) Mouse MRI shows brain areas relatively larger + in males emerge before those larger in females. Nat Commun 9, 2615. [doi 10.1038/s41467-018-04921-2] +.. [6] International Brain Laboratory et al. (2022) Reproducibility of in-vivo electrophysiological measurements in mice. + bioRxiv. [doi 10.1101/2022.05.09.491042] +.. [7] International Brain Laboratory et al. (2023) A Brain-Wide Map of Neural Activity during Complex Behaviour. + bioRxiv. [doi 10.1101/2023.07.04.547681] +.. [8] Findling C et al. (2023) Brain-wide representations of prior information in mouse decision-making. + bioRxiv. [doi 10.1101/2023.07.04.547684] +.. [9] Swanson LW (2018) Brain maps 4.0—Structure of the rat brain: An open access atlas with global nervous system nomenclature + ontology and flatmaps. J Comp Neurol. [doi 10.1002/cne.24381] +.. [10] Allen Mouse Common Coordinate Framework Technical White Paper (October 2017 v3) + http://help.brain-map.org/download/attachments/8323525/Mouse_Common_Coordinate_Framework.pdf + +""" from .atlas import * # noqa from .regions import regions_from_allen_csv +from .flatmaps import FlatMap diff --git a/ibllib/atlas/atlas.py b/ibllib/atlas/atlas.py index 956d98201..d4ec0914b 100644 --- a/ibllib/atlas/atlas.py +++ b/ibllib/atlas/atlas.py @@ -1,29 +1,59 @@ +""" +Classes for manipulating brain atlases, insertions, and coordinates. +""" +from pathlib import Path, PurePosixPath from dataclasses import dataclass import logging + import matplotlib.pyplot as plt -from pathlib import Path, PurePosixPath import numpy as np import nrrd from one.webclient import http_download_file import one.params import one.remote.aws as aws - from iblutil.numerical import ismember from ibllib.atlas.regions import BrainRegions, FranklinPaxinosRegions - -_logger = logging.getLogger(__name__) ALLEN_CCF_LANDMARKS_MLAPDV_UM = {'bregma': np.array([5739, 5400, 332])} +"""dict: The ML AP DV voxel coordinates of brain landmarks in the Allen atlas.""" + PAXINOS_CCF_LANDMARKS_MLAPDV_UM = {'bregma': np.array([5700, 4300 + 160, 330])} +"""dict: The ML AP DV voxel coordinates of brain landmarks in the Franklin & Paxinos atlas.""" S3_BUCKET_IBL = 'ibl-brain-wide-map-public' +"""str: The name of the public IBL S3 bucket containing atlas data.""" + +_logger = logging.getLogger(__name__) def cart2sph(x, y, z): """ - Converts cartesian to spherical Coordinates - theta: polar angle, phi: azimuth + Converts cartesian to spherical coordinates. + + Returns spherical coordinates (r, theta, phi). + + Parameters + ---------- + x : numpy.array + A 1D array of x-axis coordinates. + y : numpy.array + A 1D array of y-axis coordinates. + z : numpy.array + A 1D array of z-axis coordinates. + + Returns + ------- + numpy.array + The radial distance of each point. + numpy.array + The polar angle. + numpy.array + The azimuthal angle. + + See Also + -------- + sph2cart """ r = np.sqrt(x ** 2 + y ** 2 + z ** 2) phi = np.arctan2(y, x) * 180 / np.pi @@ -37,8 +67,31 @@ def cart2sph(x, y, z): def sph2cart(r, theta, phi): """ - Converts Spherical to Cartesian coordinates - theta: polar angle, phi: azimuth + Converts Spherical to Cartesian coordinates. + + Returns Cartesian coordinates (x, y, z). + + Parameters + ---------- + r : numpy.array + A 1D array of radial distances. + theta : numpy.array + A 1D array of polar angles. + phi : numpy.array + A 1D array of azimuthal angles. + + Returns + ------- + x : numpy.array + A 1D array of x-axis coordinates. + y : numpy.array + A 1D array of y-axis coordinates. + z : numpy.array + A 1D array of z-axis coordinates. + + See Also + -------- + cart2sph """ x = r * np.cos(phi / 180 * np.pi) * np.sin(theta / 180 * np.pi) y = r * np.sin(phi / 180 * np.pi) * np.sin(theta / 180 * np.pi) @@ -48,47 +101,85 @@ def sph2cart(r, theta, phi): class BrainCoordinates: """ - Class for mapping and indexing a 3D array to real-world coordinates - x = ml, right positive - y = ap, anterior positive - z = dv, dorsal positive + Class for mapping and indexing a 3D array to real-world coordinates. + + * x = ml, right positive + * y = ap, anterior positive + * z = dv, dorsal positive The layout of the Atlas dimension is done according to the most used sections so they lay contiguous on disk assuming C-ordering: V[iap, iml, idv] - nxyz: number of elements along each cartesian axis (nx, ny, nz) = (nml, nap, ndv) - xyz0: coordinates of the element volume[0, 0, 0]] in the coordinate space - dxyz: spatial interval of the volume along the 3 dimensions + Parameters + ---------- + nxyz : array_like + Number of elements along each Cartesian axis (nx, ny, nz) = (nml, nap, ndv). + xyz0 : array_like + Coordinates of the element volume[0, 0, 0] in the coordinate space. + dxyz : array_like, float + Spatial interval of the volume along the 3 dimensions. + + Attributes + ---------- + xyz0 : numpy.array + The Cartesian coordinates of the element volume[0, 0, 0], i.e. the origin. + x0 : int + The x-axis origin coordinate of the element volume. + y0 : int + The y-axis origin coordinate of the element volume. + z0 : int + The z-axis origin coordinate of the element volume. """ - def __init__(self, nxyz, xyz0=[0, 0, 0], dxyz=[1, 1, 1]): + def __init__(self, nxyz, xyz0=(0, 0, 0), dxyz=(1, 1, 1)): if np.isscalar(dxyz): - dxyz = [dxyz for i in range(3)] + dxyz = [dxyz] * 3 self.x0, self.y0, self.z0 = list(xyz0) self.dx, self.dy, self.dz = list(dxyz) self.nx, self.ny, self.nz = list(nxyz) @property def dxyz(self): + """numpy.array: Spatial interval of the volume along the 3 dimensions.""" return np.array([self.dx, self.dy, self.dz]) @property def nxyz(self): + """numpy.array: Coordinates of the element volume[0, 0, 0] in the coordinate space.""" return np.array([self.nx, self.ny, self.nz]) - """Methods ratios to indice""" + """Methods ratios to indices""" def r2ix(self, r): + # FIXME Document return int((self.nx - 1) * r) def r2iy(self, r): + # FIXME Document return int((self.nz - 1) * r) def r2iz(self, r): + # FIXME Document return int((self.nz - 1) * r) - """Methods distance to indice""" + """Methods distance to indices""" @staticmethod def _round(i, round=True): + """ + Round an input value to the nearest integer, replacing NaN values with 0. + + Parameters + ---------- + i : int, float, numpy.nan, numpy.array + A value or array of values to round. + round : bool + If false this function is identity. + + Returns + ------- + int, float, numpy.nan, numpy.array + If round is true, returns the nearest integer, replacing NaN values with 0, otherwise + returns the input unaffected. + """ nanval = 0 if round: ii = np.array(np.round(i)).astype(int) @@ -98,6 +189,31 @@ def _round(i, round=True): return i def x2i(self, x, round=True, mode='raise'): + """ + Find the nearest volume image index to a given x-axis coordinate. + + Parameters + ---------- + x : float, numpy.array + One or more x-axis coordinates, relative to the origin, x0. + round : bool + If true, round to the nearest index, replacing NaN values with 0. + mode : {'raise', 'clip', 'wrap'}, default='raise' + How to behave if the coordinate lies outside of the volume: raise (default) will raise + a ValueError; 'clip' will replace the index with the closest index inside the volume; + 'wrap' will return the index as is. + + Returns + ------- + numpy.array + The nearest indices of the image volume along the first dimension. + + Raises + ------ + ValueError + At least one x value lies outside of the atlas volume. Change 'mode' input to 'wrap' to + keep these values unchanged, or 'clip' to return the nearest valid indices. + """ i = np.asarray(self._round((x - self.x0) / self.dx, round=round)) if np.any(i < 0) or np.any(i >= self.nx): if mode == 'clip': @@ -105,11 +221,36 @@ def x2i(self, x, round=True, mode='raise'): i[i >= self.nx] = self.nx - 1 elif mode == 'raise': raise ValueError("At least one x value lies outside of the atlas volume.") - elif mode == 'wrap': + elif mode == 'wrap': # This is only here for legacy reasons pass return i def y2i(self, y, round=True, mode='raise'): + """ + Find the nearest volume image index to a given y-axis coordinate. + + Parameters + ---------- + y : float, numpy.array + One or more y-axis coordinates, relative to the origin, y0. + round : bool + If true, round to the nearest index, replacing NaN values with 0. + mode : {'raise', 'clip', 'wrap'} + How to behave if the coordinate lies outside of the volume: raise (default) will raise + a ValueError; 'clip' will replace the index with the closest index inside the volume; + 'wrap' will return the index as is. + + Returns + ------- + numpy.array + The nearest indices of the image volume along the second dimension. + + Raises + ------ + ValueError + At least one y value lies outside of the atlas volume. Change 'mode' input to 'wrap' to + keep these values unchanged, or 'clip' to return the nearest valid indices. + """ i = np.asarray(self._round((y - self.y0) / self.dy, round=round)) if np.any(i < 0) or np.any(i >= self.ny): if mode == 'clip': @@ -117,11 +258,36 @@ def y2i(self, y, round=True, mode='raise'): i[i >= self.ny] = self.ny - 1 elif mode == 'raise': raise ValueError("At least one y value lies outside of the atlas volume.") - elif mode == 'wrap': + elif mode == 'wrap': # This is only here for legacy reasons pass return i def z2i(self, z, round=True, mode='raise'): + """ + Find the nearest volume image index to a given z-axis coordinate. + + Parameters + ---------- + z : float, numpy.array + One or more z-axis coordinates, relative to the origin, z0. + round : bool + If true, round to the nearest index, replacing NaN values with 0. + mode : {'raise', 'clip', 'wrap'} + How to behave if the coordinate lies outside of the volume: raise (default) will raise + a ValueError; 'clip' will replace the index with the closest index inside the volume; + 'wrap' will return the index as is. + + Returns + ------- + numpy.array + The nearest indices of the image volume along the third dimension. + + Raises + ------ + ValueError + At least one z value lies outside of the atlas volume. Change 'mode' input to 'wrap' to + keep these values unchanged, or 'clip' to return the nearest valid indices. + """ i = np.asarray(self._round((z - self.z0) / self.dz, round=round)) if np.any(i < 0) or np.any(i >= self.nz): if mode == 'clip': @@ -129,16 +295,35 @@ def z2i(self, z, round=True, mode='raise'): i[i >= self.nz] = self.nz - 1 elif mode == 'raise': raise ValueError("At least one z value lies outside of the atlas volume.") - elif mode == 'wrap': + elif mode == 'wrap': # This is only here for legacy reasons pass return i def xyz2i(self, xyz, round=True, mode='raise'): """ - :param mode: {‘raise’, 'clip', 'wrap'} determines what to do when determined index lies outside the atlas volume - 'raise' will raise a ValueError - 'clip' will replace the index with the closest index inside the volume - 'wrap' will wrap around to the other side of the volume. This is only here for legacy reasons + Find the nearest volume image indices to the given Cartesian coordinates. + + Parameters + ---------- + xyz : array_like + One or more Cartesian coordinates, relative to the origin, xyz0. + round : bool + If true, round to the nearest index, replacing NaN values with 0. + mode : {'raise', 'clip', 'wrap'} + How to behave if any coordinate lies outside of the volume: raise (default) will raise + a ValueError; 'clip' will replace the index with the closest index inside the volume; + 'wrap' will return the index as is. + + Returns + ------- + numpy.array + The nearest indices of the image volume. + + Raises + ------ + ValueError + At least one coordinate lies outside of the atlas volume. Change 'mode' input to 'wrap' + to keep these values unchanged, or 'clip' to return the nearest valid indices. """ xyz = np.array(xyz) dt = int if round else float @@ -150,15 +335,68 @@ def xyz2i(self, xyz, round=True, mode='raise'): """Methods indices to distance""" def i2x(self, ind): + """ + Return the x-axis coordinate of a given index. + + Parameters + ---------- + ind : int, numpy.array + One or more indices along the first dimension of the image volume. + + Returns + ------- + float, numpy.array + The corresponding x-axis coordinate(s), relative to the origin, x0. + """ return ind * self.dx + self.x0 def i2y(self, ind): + """ + Return the y-axis coordinate of a given index. + + Parameters + ---------- + ind : int, numpy.array + One or more indices along the second dimension of the image volume. + + Returns + ------- + float, numpy.array + The corresponding y-axis coordinate(s), relative to the origin, y0. + """ return ind * self.dy + self.y0 def i2z(self, ind): + """ + Return the z-axis coordinate of a given index. + + Parameters + ---------- + ind : int, numpy.array + One or more indices along the third dimension of the image volume. + + Returns + ------- + float, numpy.array + The corresponding z-axis coordinate(s), relative to the origin, z0. + """ return ind * self.dz + self.z0 def i2xyz(self, iii): + """ + Return the Cartesian coordinates of a given index. + + Parameters + ---------- + iii : array_like + One or more image volume indices. + + Returns + ------- + numpy.array + The corresponding xyz coordinates, relative to the origin, xyz0. + """ + iii = np.array(iii, dtype=float) out = np.zeros_like(iii) out[..., 0] = self.i2x(iii[..., 0]) @@ -169,17 +407,21 @@ def i2xyz(self, iii): """Methods bounds""" @property def xlim(self): + # FIXME Document return self.i2x(np.array([0, self.nx - 1])) @property def ylim(self): + # FIXME Document return self.i2y(np.array([0, self.ny - 1])) @property def zlim(self): + # FIXME Document return self.i2z(np.array([0, self.nz - 1])) def lim(self, axis): + # FIXME Document if axis == 0: return self.xlim elif axis == 1: @@ -190,19 +432,23 @@ def lim(self, axis): """returns scales""" @property def xscale(self): + # FIXME Document return self.i2x(np.arange(self.nx)) @property def yscale(self): + # FIXME Document return self.i2y(np.arange(self.ny)) @property def zscale(self): + # FIXME Document return self.i2z(np.arange(self.nz)) """returns the 3d mgrid used for 3d visualization""" @property def mgrid(self): + # FIXME Document return np.meshgrid(self.xscale, self.yscale, self.zscale) @@ -212,6 +458,12 @@ class BrainAtlas: Currently this is designed for the AllenCCF at several resolutions, yet this class can be used for other atlases arises. """ + + """numpy.array: An image volume.""" + image = None + """numpy.array: An annotation label volume.""" + label = None + def __init__(self, image, label, dxyz, regions, iorigin=[0, 0, 0], dims2xyz=[0, 1, 2], xyz2dims=[0, 1, 2]): """ @@ -241,7 +493,7 @@ def __init__(self, image, label, dxyz, regions, iorigin=[0, 0, 0], @staticmethod def _get_cache_dir(): par = one.params.get(silent=True) - path_atlas = Path(par.CACHE_DIR).joinpath(PurePosixPath('histology', 'ATLAS', 'Needles', 'Allen', 'flatmaps')) + path_atlas = Path(par.CACHE_DIR).joinpath('histology', 'ATLAS', 'Needles', 'Allen', 'flatmaps') return path_atlas def compute_surface(self): @@ -280,10 +532,22 @@ def _lookup_inds(self, ixyz, mode='raise'): def _lookup(self, xyz, mode='raise'): """ - Performs a 3D lookup from real world coordinates to the flat indices in the volume - defined in the BrainCoordinates object - :param xyz: [n, 3] array of coordinates - :return: n array of flat indices + Performs a 3D lookup from real world coordinates to the flat indices in the volume, + defined in the BrainCoordinates object. + + Parameters + ---------- + xyz : numpy.array + An (n, 3) array of Cartesian coordinates. + mode : {'raise', 'clip', 'wrap'} + How to behave if any coordinate lies outside of the volume: raise (default) will raise + a ValueError; 'clip' will replace the index with the closest index inside the volume; + 'wrap' will return the index as is. + + Returns + ------- + numpy.array + A 1D array of flat indices. """ return self._lookup_inds(self.bc.xyz2i(xyz, mode=mode), mode=mode) @@ -295,6 +559,9 @@ def get_labels(self, xyz, mapping=None, radius_um=None, mode='raise'): :param mapping: brain region mapping (defaults to original Allen mapping) :param radius_um: if not null, returns a regions ids array and an array of proportion of regions in a sphere of size radius around the coordinates. + :param mode: {‘raise’, 'clip'} determines what to do when determined index lies outside the atlas volume + 'raise' will raise a ValueError (default) + 'clip' will replace the index with the closest index inside the volume :return: n array of region ids """ mapping = mapping or self.regions.default_mapping @@ -304,9 +571,9 @@ def get_labels(self, xyz, mapping=None, radius_um=None, mode='raise'): nry = int(np.ceil(radius_um / abs(self.bc.dy) / 1e6)) nrz = int(np.ceil(radius_um / abs(self.bc.dz) / 1e6)) nr = [nrx, nry, nrz] - iii = self.bc.xyz2i(xyz) + iii = self.bc.xyz2i(xyz, mode=mode) # computing the cube radius and indices is more complicated as volume indices are not - # necessariy in ml, ap, dv order so the indices order is dynamic + # necessarily in ml, ap, dv order so the indices order is dynamic rcube = np.meshgrid(*tuple((np.arange( -nr[i], nr[i] + 1) * self.bc.dxyz[i]) ** 2 for i in self.xyz2dims)) rcube = np.sqrt(rcube[0] + rcube[1], rcube[2]) * 1e6 @@ -435,6 +702,32 @@ def plot_tilted_slice(self, xyz, axis, volume='image', cmap=None, ax=None, retur @staticmethod def _plot_slice(im, extent, ax=None, cmap=None, volume=None, **kwargs): + """ + Plot an atlas slice. + + Parameters + ---------- + im : numpy.array + A 2D image slice to plot. + extent : array_like + The bounding box in data coordinates that the image will fill specified as (left, + right, bottom, top) in data coordinates. + ax : matplotlib.pyplot.Axes + An optional Axes object to plot to. + cmap : str, matplotlib.colors.Colormap + The Colormap instance or registered colormap name used to map scalar data to colors. + Defaults to 'bone'. + volume : str + If 'boundary', assumes image is an outline of boundaries between all regions. + FIXME How does this affect the plot? + **kwargs + See matplotlib.pyplot.imshow. + + Returns + ------- + matplotlib.pyplot.Axes + The image axes. + """ if not ax: ax = plt.gca() ax.axis('equal') @@ -687,9 +980,13 @@ def plot_top(self, volume='annotation', mapping=None, region_values=None, ax=Non @dataclass class Trajectory: """ - 3D Trajectory (usually for a linear probe). Minimally defined by a vector and a point. - instantiate from a best fit from a n by 3 array containing xyz coordinates: - trj = Trajectory.fit(xyz) + 3D Trajectory (usually for a linear probe), minimally defined by a vector and a point. + + Examples + -------- + Instantiate from a best fit from an n by 3 array containing xyz coordinates: + + >>> trj = Trajectory.fit(xyz) """ vector: np.ndarray point: np.ndarray @@ -697,9 +994,17 @@ class Trajectory: @staticmethod def fit(xyz): """ - fits a line to a 3D cloud of points, returns a Trajectory object - :param xyz: n by 3 numpy array containing cloud of points - :returns: a Trajectory object + Fits a line to a 3D cloud of points. + + Parameters + ---------- + xyz : numpy.array + An n by 3 array containing a cloud of points to fit a line to. + + Returns + ------- + Trajectory + A new trajectory object. """ xyz_mean = np.mean(xyz, axis=0) return Trajectory(vector=np.linalg.svd(xyz - xyz_mean)[2][0], point=xyz_mean) @@ -793,9 +1098,8 @@ def exit_points(self, bc): class Insertion: """ Defines an ephys probe insertion in 3D coordinate. IBL conventions. - To instantiate, use the static methods: - Insertion.from_track - Insertion.from_dict + + To instantiate, use the static methods: `Insertion.from_track` and `Insertion.from_dict`. """ x: float y: float @@ -809,9 +1113,18 @@ class Insertion: @staticmethod def from_track(xyzs, brain_atlas=None): """ - :param brain_atlas: None. If provided, disregards the z coordinate and locks the insertion - point to the z of the brain surface - :return: Trajectory object + Define an insersion from one or more trajectory. + + Parameters + ---------- + xyzs : numpy.array + An n by 3 array xyz coordinates representing an insertion trajectory. + brain_atlas : BrainAtlas + A brain atlas instance, used to attain the point of entry. + + Returns + ------- + Insertion """ assert brain_atlas, 'Input argument brain_atlas must be defined' traj = Trajectory.fit(xyzs) @@ -821,35 +1134,44 @@ def from_track(xyzs, brain_atlas=None): entry = Insertion.get_brain_entry(traj, brain_atlas) # convert to spherical system to store the insertion depth, theta, phi = cart2sph(*(entry - tip)) - insertion_dict = {'x': entry[0], 'y': entry[1], 'z': entry[2], - 'phi': phi, 'theta': theta, 'depth': depth} + insertion_dict = { + 'x': entry[0], 'y': entry[1], 'z': entry[2], 'phi': phi, 'theta': theta, 'depth': depth + } return Insertion(**insertion_dict) @staticmethod def from_dict(d, brain_atlas=None): """ - Constructs an Insertion object from the json information stored in probes.description file - :param trj: dictionary containing at least the following keys, in um - { - 'x': 544.0, - 'y': 1285.0, - 'z': 0.0, - 'phi': 0.0, - 'theta': 5.0, - 'depth': 4501.0 - } - :param brain_atlas: None. If provided, disregards the z coordinate and locks the insertion - point to the z of the brain surface - :return: Trajectory object + Constructs an Insertion object from the json information stored in probes.description file. + + Parameters + ---------- + d : dict + A dictionary containing at least the following keys {'x', 'y', 'z', 'phi', 'theta', + 'depth'}. The depth and xyz coordinates must be in um. + brain_atlas : BrainAtlas, default=None + If provided, disregards the z coordinate and locks the insertion point to the z of the + brain surface. + + Returns + ------- + Insertion + + Examples + -------- + >>> tri = {'x': 544.0, 'y': 1285.0, 'z': 0.0, 'phi': 0.0, 'theta': 5.0, 'depth': 4501.0} + >>> ins = Insertion.from_dict(tri) """ + assert brain_atlas, 'Input argument brain_atlas must be defined' z = d['z'] / 1e6 - if brain_atlas: - iy = brain_atlas.bc.y2i(d['y'] / 1e6) - ix = brain_atlas.bc.x2i(d['x'] / 1e6) - # Only use the brain surface value as z if it isn't NaN (this happens when the surface touches the edges - # of the atlas volume - if not np.isnan(brain_atlas.top[iy, ix]): - z = brain_atlas.top[iy, ix] + if not hasattr(brain_atlas, 'top'): + brain_atlas.compute_surface() + iy = brain_atlas.bc.y2i(d['y'] / 1e6) + ix = brain_atlas.bc.x2i(d['x'] / 1e6) + # Only use the brain surface value as z if it isn't NaN (this happens when the surface touches the edges + # of the atlas volume + if not np.isnan(brain_atlas.top[iy, ix]): + z = brain_atlas.top[iy, ix] return Insertion(x=d['x'] / 1e6, y=d['y'] / 1e6, z=z, phi=d['phi'], theta=d['theta'], depth=d['depth'] / 1e6, beta=d.get('beta', 0), label=d.get('label', '')) @@ -876,7 +1198,19 @@ def tip(self): @staticmethod def _get_surface_intersection(traj, brain_atlas, surface='top'): + """ + TODO Document! + Parameters + ---------- + traj + brain_atlas + surface + + Returns + ------- + + """ brain_atlas.compute_surface() distance = traj.mindist(brain_atlas.srf_xyz) @@ -926,6 +1260,8 @@ def get_brain_entry(traj, brain_atlas): class AllenAtlas(BrainAtlas): """ + The Allan Common Coordinate Framework (CCF) brain atlas. + Instantiates an atlas.BrainAtlas corresponding to the Allen CCF at the given resolution using the IBL Bregma and coordinate system. """ @@ -933,6 +1269,20 @@ class AllenAtlas(BrainAtlas): """pathlib.PurePosixPath: The default relative path of the Allen atlas file.""" atlas_rel_path = PurePosixPath('histology', 'ATLAS', 'Needles', 'Allen') + """numpy.array: A diffusion weighted imaging (DWI) image volume. + + The Allen atlas DWI average template volume has with the shape (ap, ml, dv) and contains uint16 + values. FIXME What do the values represent? + """ + image = None + + """numpy.array: An annotation label volume. + + The Allen atlas label volume has with the shape (ap, ml, dv) and contains uint16 indices + of the Allen CCF brain regions to which each voxel belongs. + """ + label = None + def __init__(self, res_um=25, scaling=(1, 1, 1), mock=False, hist_path=None): """ Instantiates an atlas.BrainAtlas corresponding to the Allen CCF at the given resolution @@ -941,7 +1291,7 @@ def __init__(self, res_um=25, scaling=(1, 1, 1), mock=False, hist_path=None): Parameters ---------- res_um : {10, 25, 50} int - The Atlas resolution in micometres; one of 10, 25 or 50um. + The Atlas resolution in micrometres; one of 10, 25 or 50um. scaling : float, numpy.array Scale factor along ml, ap, dv for squeeze and stretch (default: [1, 1, 1]). mock : bool @@ -955,7 +1305,7 @@ def __init__(self, res_um=25, scaling=(1, 1, 1), mock=False, hist_path=None): >>> target_dir = one.cache_dir / AllenAtlas.atlas_rel_path ... ba = AllenAtlas(hist_path=target_dir) """ - LUT_VERSION = "v01" # version 01 is the lateralized version + LUT_VERSION = 'v01' # version 01 is the lateralized version regions = BrainRegions() xyz2dims = np.array([1, 0, 2]) # this is the c-contiguous ordering dims2xyz = np.array([1, 0, 2]) @@ -1025,16 +1375,27 @@ def _read_volume(file_volume): def xyz2ccf(self, xyz, ccf_order='mlapdv', mode='raise'): """ - Converts coordinates to the CCF coordinates, which is assumed to be the cube indices - times the spacing. - :param xyz: mlapdv coordinates in meters, origin Bregma - :param ccf_order: order that you want values returned 'mlapdv' (ibl) or 'apdvml' - (Allen mcc vertices) - :param mode: {‘raise’, 'clip', 'wrap'} determines what to do when determined index lies outside the atlas volume - 'raise' will raise a ValueError - 'clip' will replace the index with the closest index inside the volume - 'wrap' will wrap around to the other side of the volume. This is only here for legacy reasons - :return: coordinates in CCF space um, origin is the front left top corner of the data + Converts anatomical coordinates to CCF coordinates. + + Anatomical coordinates are in meters, relative to bregma, which CFF coordinates are + assumed to be the volume indices multiplied by the spacing in micormeters. + + Parameters + ---------- + xyz : numpy.array + An N by 3 array of anatomical coordinates in meters, relative to bregma. + ccf_order : {'mlapdv', 'apdvml'}, default='mlapdv' + The order of the CCF coordinates returned. For IBL (the default) this is (ML, AP, DV), + for Allen MCC vertices, this is (AP, DV, ML). + mode : {'raise', 'clip', 'wrap'}, default='raise' + How to behave if the coordinate lies outside of the volume: raise (default) will raise + a ValueError; 'clip' will replace the index with the closest index inside the volume; + 'wrap' will return the index as is. + + Returns + ------- + numpy.array + Coordinates in CCF space (um, origin is the front left top corner of the data volume, order determined by ccf_order """ ordre = self._ccf_order(ccf_order) @@ -1043,13 +1404,24 @@ def xyz2ccf(self, xyz, ccf_order='mlapdv', mode='raise'): def ccf2xyz(self, ccf, ccf_order='mlapdv'): """ - Converts coordinates from the CCF coordinates, which is assumed to be the cube indices - times the spacing. - :param ccf coordinates in CCF space in um, origin is the front left top corner of the data - volume - :param ccf_order: order of ccf coordinates given 'mlapdv' (ibl) or 'apdvml' - (Allen mcc vertices) - :return: xyz: mlapdv coordinates in m, origin Bregma + Convert anatomical coordinates from CCF coordinates. + + Anatomical coordinates are in meters, relative to bregma, which CFF coordinates are + assumed to be the volume indices multiplied by the spacing in micormeters. + + Parameters + ---------- + ccf : numpy.array + An N by 3 array of coordinates in CCF space (atlas volume indices * um resolution). The + origin is the front left top corner of the data volume. + ccf_order : {'mlapdv', 'apdvml'}, default='mlapdv' + The order of the CCF coordinates given. For IBL (the default) this is (ML, AP, DV), + for Allen MCC vertices, this is (AP, DV, ML). + + Returns + ------- + numpy.array + The MLAPDV coordinates in meters, relative to bregma. """ ordre = self._ccf_order(ccf_order, reverse=True) return self.bc.i2xyz((ccf[..., ordre] / float(self.res_um))) @@ -1074,21 +1446,25 @@ def _ccf_order(ccf_order, reverse=False): else: ValueError("ccf_order needs to be either 'mlapdv' or 'apdvml'") - def compute_regions_volume(self): + def compute_regions_volume(self, cumsum=False): """ Sums the number of voxels in the labels volume for each region. Then compute volumes for all of the levels of hierarchy in cubic mm. + :param: cumsum: computes the cumulative sum of the volume as per the hierarchy (defaults to False) :return: """ nr = self.regions.id.shape[0] count = np.bincount(self.label.flatten(), minlength=nr) - self.regions.compute_hierarchy() - self.regions.volume = np.zeros_like(count) - for i in np.arange(nr): - if count[i] == 0: - continue - self.regions.volume[np.unique(self.regions.hierarchy[:, i])] += count[i] - self.regions.volume = self.regions.volume * (self.res_um / 1e3) ** 3 + if not cumsum: + self.regions.volume = count * (self.res_um / 1e3) ** 3 + else: + self.regions.compute_hierarchy() + self.regions.volume = np.zeros_like(count) + for i in np.arange(nr): + if count[i] == 0: + continue + self.regions.volume[np.unique(self.regions.hierarchy[:, i])] += count[i] + self.regions.volume = self.regions.volume * (self.res_um / 1e3) ** 3 def NeedlesAtlas(*args, **kwargs): @@ -1096,8 +1472,33 @@ def NeedlesAtlas(*args, **kwargs): Instantiates an atlas.BrainAtlas corresponding to the Allen CCF at the given resolution using the IBL Bregma and coordinate system. The Needles atlas defines a stretch along AP axis and a squeeze along the DV axis. - :param res_um: 10, 25 or 50 um - :return: atlas.AllenAtlas + + Parameters + ---------- + res_um : {10, 25, 50} int + The Atlas resolution in micrometres; one of 10, 25 or 50um. + **kwargs + See AllenAtlas. + + Returns + ------- + AllenAtlas + An Allen atlas object with MRI atlas scaling applied. + + Notes + ----- + The scaling was determined by manually transforming the DSURQE atlas [1]_ onto the Allen CCF. + The DSURQE atlas is an MRI atlas acquired from 40 C57BL/6J mice post-mortem, with 40um + isometric resolution. The alignment was performed by Mayo Faulkner. + The atlas data can be found `here `__. + More information on the dataset and segmentation can be found + `here `__. + + References + ---------- + .. [1] Dorr AE, Lerch JP, Spring S, Kabani N, Henkelman RM (2008). High resolution + three-dimensional brain atlas using an average magnetic resonance image of 40 adult C57Bl/6J + mice. Neuroimage 42(1):60-9. [doi 10.1016/j.neuroimage.2008.03.037] """ DV_SCALE = 0.952 # multiplicative factor on DV dimension, determined from MRI->CCF transform AP_SCALE = 1.087 # multiplicative factor on AP dimension @@ -1107,12 +1508,29 @@ def NeedlesAtlas(*args, **kwargs): def MRITorontoAtlas(*args, **kwargs): """ + The MRI Toronto brain atlas. + Instantiates an atlas.BrainAtlas corresponding to the Allen CCF at the given resolution using the IBL Bregma and coordinate system. The MRI Toronto atlas defines a stretch along AP - a squeeze along DV *and* a squeeze along ML. These are based on 12 p65 mice MRIs averaged. - See: https://www.nature.com/articles/s41467-018-04921-2 DB has access to the dataset. - :param res_um: 10, 25 or 50 um - :return: atlas.AllenAtlas + a squeeze along DV *and* a squeeze along ML. These are based on 12 p65 mice MRIs averaged [1]_. + + Parameters + ---------- + res_um : {10, 25, 50} int + The Atlas resolution in micrometres; one of 10, 25 or 50um. + **kwargs + See AllenAtlas. + + Returns + ------- + AllenAtlas + An Allen atlas object with MRI atlas scaling applied. + + References + ---------- + .. [1] Qiu, LR, Fernandes, DJ, Szulc-Lerch, KU et al. (2018) Mouse MRI shows brain areas + relatively larger in males emerge before those larger in females. Nat Commun 9, 2615. + [doi 10.1038/s41467-018-04921-2] """ ML_SCALE = 0.952 DV_SCALE = 0.885 # multiplicative factor on DV dimension, determined from MRI->CCF transform @@ -1142,8 +1560,7 @@ def _download_atlas_allen(target_file_image): - © 2015 Allen Institute for Brain Science. Allen Mouse Brain Atlas (2015) with region annotations (2017). - Available from: http://download.alleninstitute.org/informatics-archive/current-release/mouse_ccf/annotation/ - See Allen Mouse Common Coordinate Framework Technical White Paper for details - http://help.brain-map.org/download/attachments/8323525/ - Mouse_Common_Coordinate_Framework.pdf?version=3&modificationDate=1508178848279&api=v2 + http://help.brain-map.org/download/attachments/8323525/Mouse_Common_Coordinate_Framework.pdf """ (target_file_image := Path(target_file_image)).parent.mkdir(exist_ok=True, parents=True) @@ -1160,85 +1577,18 @@ def _download_atlas_allen(target_file_image): return Path(http_download_file(url, target_dir=target_file_image.parent)) -class FlatMap(AllenAtlas): - - def __init__(self, flatmap='dorsal_cortex', res_um=25): - """ - Avaiable flatmaps are currently 'dorsal_cortex', 'circles' and 'pyramid' - :param flatmap: - :param res_um: - """ - super().__init__(res_um=res_um) - self.name = flatmap - if flatmap == 'dorsal_cortex': - self._get_flatmap_from_file() - elif flatmap == 'circles': - from ibllib.atlas.flatmaps import circles - if res_um != 25: - raise NotImplementedError('Pyramid circles not implemented for resolution other than 25um') - self.flatmap, self.ml_scale, self.ap_scale = circles(N=5, atlas=self, display='flat') - elif flatmap == 'pyramid': - from ibllib.atlas.flatmaps import circles - if res_um != 25: - raise NotImplementedError('Pyramid circles not implemented for resolution other than 25um') - self.flatmap, self.ml_scale, self.ap_scale = circles(N=5, atlas=self, display='pyramid') - - def _get_flatmap_from_file(self): - # gets the file in the ONE cache for the flatmap name in the property, downloads it if needed - file_flatmap = self._get_cache_dir().joinpath(f'{self.name}_{self.res_um}.nrrd') - if not file_flatmap.exists(): - file_flatmap.parent.mkdir(exist_ok=True, parents=True) - aws.s3_download_file(f'atlas/{file_flatmap.name}', file_flatmap) - self.flatmap, _ = nrrd.read(file_flatmap) - - def plot_flatmap(self, depth=0, volume='annotation', mapping='Allen', region_values=None, ax=None, **kwargs): - """ - Displays the 2D image corresponding to the flatmap. If there are several depths, by default it - will display the first one - :param depth: index of the depth to display in the flatmap volume (the last dimension) - :param volume: - :param mapping: - :param region_values: - :param ax: - :param kwargs: - :return: - """ - if self.flatmap.ndim == 3: - inds = np.int32(self.flatmap[:, :, depth]) - else: - inds = np.int32(self.flatmap[:, :]) - regions = self._get_mapping(mapping=mapping)[self.label.flat[inds]] - if volume == 'annotation': - im = self._label2rgb(regions) - elif volume == 'value': - im = region_values[regions] - elif volume == 'boundary': - im = self.compute_boundaries(regions) - elif volume == 'image': - im = self.image.flat[inds] - if not ax: - ax = plt.gca() - - return self._plot_slice(im, self.extent_flmap(), ax=ax, volume=volume, **kwargs) - - def extent_flmap(self): - extent = np.r_[0, self.flatmap.shape[1], 0, self.flatmap.shape[0]] - return extent - - class FranklinPaxinosAtlas(BrainAtlas): - """ - Instantiates an atlas.BrainAtlas corresponding to the Franklin & Paxinos atlas at the given - resolution, using the IBL Bregma and coordinate system. - """ """pathlib.PurePosixPath: The default relative path of the atlas file.""" atlas_rel_path = PurePosixPath('histology', 'ATLAS', 'Needles', 'FranklinPaxinos') def __init__(self, res_um=(10, 100, 10), scaling=(1, 1, 1), mock=False, hist_path=None): - """ - Instantiates an atlas.BrainAtlas corresponding to the Franklin & Paxinos atlas at the given - resolution, using the IBL Bregma and coordinate system. + """The Franklin & Paxinos brain atlas. + + Instantiates an atlas.BrainAtlas corresponding to the Franklin & Paxinos atlas [1]_ at the + given resolution, matched to the Allen coordinate Framework [2]_ and using the IBL Bregma + and coordinate system. The Franklin Paxisnos volume has resolution of 10um in ML and DV + axis and 100 um in AP direction. Parameters ---------- @@ -1255,11 +1605,17 @@ def __init__(self, res_um=(10, 100, 10), scaling=(1, 1, 1), mock=False, hist_pat -------- Instantiate Atlas from a non-default location, in this case the cache_dir of an ONE instance. >>> target_dir = one.cache_dir / AllenAtlas.atlas_rel_path - ... ba = AllenAtlas(hist_path=target_dir) + ... ba = FranklinPaxinosAtlas(hist_path=target_dir) + References + ---------- + .. [1] Paxinos G, and Franklin KBJ (2012) The Mouse Brain in Stereotaxic Coordinates, 4th + edition (Elsevier Academic Press) + .. [2] Chon U et al (2019) Enhanced and unified anatomical labeling for a common mouse + brain atlas [doi 10.1038/s41467-019-13057-w] """ # TODO interpolate? - LUT_VERSION = "v01" # version 01 is the lateralized version + LUT_VERSION = 'v01' # version 01 is the lateralized version regions = FranklinPaxinosRegions() xyz2dims = np.array([1, 0, 2]) # this is the c-contiguous ordering dims2xyz = np.array([1, 0, 2]) @@ -1310,10 +1666,31 @@ def __init__(self, res_um=(10, 100, 10), scaling=(1, 1, 1), mock=False, hist_pat @staticmethod def _read_volume(file_volume): + """ + Loads an atlas image volume given a file path. + + Parameters + ---------- + file_volume : pathlib.Path + The file path of an image volume. Currently supports .nrrd and .npz files. + + Returns + ------- + numpy.array + The loaded image volume with dimensions (ap, ml, dv). + + Raises + ------ + ValueError + Unknown file extension, expects either '.nrrd' or '.npz'. + """ if file_volume.suffix == '.nrrd': volume, _ = nrrd.read(file_volume, index_order='C') # ml, dv, ap # we want the coronal slice to be the most contiguous volume = np.transpose(volume, (2, 0, 1)) # image[iap, iml, idv] elif file_volume.suffix == '.npz': volume = np.load(file_volume)['arr_0'] + else: + raise ValueError( + f'"{file_volume.suffix}" files not supported, must be either ".nrrd" or ".npz"') return volume diff --git a/ibllib/atlas/flatmaps.py b/ibllib/atlas/flatmaps.py index 6d232bf15..782fba4c6 100644 --- a/ibllib/atlas/flatmaps.py +++ b/ibllib/atlas/flatmaps.py @@ -1,28 +1,121 @@ -""" -Module that hold techniques to project the brain volume onto 2D images for visualisation purposes -""" +"""Techniques to project the brain volume onto 2D images for visualisation purposes.""" from functools import lru_cache import logging import json +import nrrd import numpy as np from scipy.interpolate import interp1d import matplotlib.pyplot as plt -import matplotlib.colors -from matplotlib import cm -from iblutil.numerical import ismember from iblutil.util import Bunch from iblutil.io.hashfile import md5 import one.remote.aws as aws -from ibllib.atlas.atlas import AllenAtlas, BrainRegions -from ibllib.atlas.plots import plot_polygon, plot_polygon_with_hole, coords_for_poly_hole +from ibllib.atlas.atlas import AllenAtlas _logger = logging.getLogger(__name__) +class FlatMap(AllenAtlas): + """The Allen Atlas flatmap. + + FIXME Document! How are these flatmaps determined? Are they related to the Swansan atlas or is + that something else? + """ + + def __init__(self, flatmap='dorsal_cortex', res_um=25): + """ + Available flatmaps are currently 'dorsal_cortex', 'circles' and 'pyramid' + :param flatmap: + :param res_um: + """ + super().__init__(res_um=res_um) + self.name = flatmap + if flatmap == 'dorsal_cortex': + self._get_flatmap_from_file() + elif flatmap == 'circles': + if res_um != 25: + raise NotImplementedError('Pyramid circles not implemented for resolution other than 25um') + self.flatmap, self.ml_scale, self.ap_scale = circles(N=5, atlas=self, display='flat') + elif flatmap == 'pyramid': + if res_um != 25: + raise NotImplementedError('Pyramid circles not implemented for resolution other than 25um') + self.flatmap, self.ml_scale, self.ap_scale = circles(N=5, atlas=self, display='pyramid') + + def _get_flatmap_from_file(self): + # gets the file in the ONE cache for the flatmap name in the property, downloads it if needed + file_flatmap = self._get_cache_dir().joinpath(f'{self.name}_{self.res_um}.nrrd') + if not file_flatmap.exists(): + file_flatmap.parent.mkdir(exist_ok=True, parents=True) + aws.s3_download_file(f'atlas/{file_flatmap.name}', file_flatmap) + self.flatmap, _ = nrrd.read(file_flatmap) + + def plot_flatmap(self, depth=0, volume='annotation', mapping='Allen', region_values=None, ax=None, **kwargs): + """ + Displays the 2D image corresponding to the flatmap. + + If there are several depths, by default it will display the first one. + + Parameters + ---------- + depth : int + Index of the depth to display in the flatmap volume (the last dimension). + volume : {'image', 'annotation', 'boundary', 'value'} + - 'image' - Allen image volume. + - 'annotation' - Allen annotation volume. + - 'boundary' - outline of boundaries between all regions. + - 'volume' - custom volume, must pass in volume of shape BrainAtlas.image.shape as + regions_value argument. + mapping : str, default='Allen' + The brain region mapping to use. + region_values : numpy.array + An array the shape of the brain atlas image containing custom region values. Used when + `volume` value is 'volume'. + ax : matplotlib.pyplot.Axes, optional + A set of axes to plot to. + **kwargs + See matplotlib.pyplot.imshow. + + Returns + ------- + matplotlib.pyplot.Axes + The plotted image axes. + """ + if self.flatmap.ndim == 3: + inds = np.int32(self.flatmap[:, :, depth]) + else: + inds = np.int32(self.flatmap[:, :]) + regions = self._get_mapping(mapping=mapping)[self.label.flat[inds]] + if volume == 'annotation': + im = self._label2rgb(regions) + elif volume == 'value': + im = region_values[regions] + elif volume == 'boundary': + im = self.compute_boundaries(regions) + elif volume == 'image': + im = self.image.flat[inds] + else: + raise ValueError(f'Volume type "{volume}" not supported') + if not ax: + ax = plt.gca() + + return self._plot_slice(im, self.extent_flmap(), ax=ax, volume=volume, **kwargs) + + def extent_flmap(self): + """ + Returns the boundary coordinates of the flat map. + + Returns + ------- + numpy.array + The bounding coordinates of the flat map image, specified as (left, right, bottom, top). + """ + extent = np.r_[0, self.flatmap.shape[1], 0, self.flatmap.shape[0]] + return extent + + @lru_cache(maxsize=1, typed=False) def circles(N=5, atlas=None, display='flat'): """ @@ -120,6 +213,18 @@ def circles(N=5, atlas=None, display='flat'): def swanson(filename="swanson2allen.npz"): + """ + FIXME Document! Which publication to reference? Are these specifically for flat maps? + Shouldn't this be made into an Atlas class with a mapping or scaling applied? + + Parameters + ---------- + filename + + Returns + ------- + + """ # filename could be "swanson2allen_original.npz", or "swanson2allen.npz" for remapped indices to match # existing labels in the brain atlas OLD_MD5 = [ @@ -135,8 +240,21 @@ def swanson(filename="swanson2allen.npz"): return s2a -def swanson_json(filename="swansonpaths.json"): +def swanson_json(filename="swansonpaths.json", remap=True): + """ + Vectorized version of the swanson bitmap file. The vectorized version was generated from swanson() using matlab + contour to find the paths for each region. The paths for each region were then simplified using the + Ramer Douglas Peucker algorithm https://rdp.readthedocs.io/en/latest/ + + Parameters + ---------- + filename + remap + + Returns + ------- + """ OLD_MD5 = ['97ccca2b675b28ba9b15ca8af5ba4111', # errored map with FOTU and CUL4, 5 mixed up '56daa7022b5e03080d8623814cda6f38', # old md5 of swanson json without CENT and PTLp # and CUL4 split (on s3 called swansonpaths_56daa.json) @@ -151,215 +269,57 @@ def swanson_json(filename="swansonpaths.json"): with open(json_file) as f: sw_json = json.load(f) - return sw_json - - -def plot_swanson_vector(acronyms=None, values=None, ax=None, hemisphere=None, br=None, orientation='landscape', - empty_color='silver', vmin=None, vmax=None, cmap='viridis', annotate=False, annotate_n=10, - annotate_order='top', annotate_list=None, mask=None, mask_color='w', fontsize=10, **kwargs): - - br = BrainRegions() if br is None else br - br.compute_hierarchy() - - if ax is None: - fig, ax = plt.subplots() - ax.set_axis_off() + # The swanson contains regions that are children of regions contained within the Allen + # annotation volume. Here we remap these regions to the parent that is contained with the + # annotation volume + if remap: + id_map = {391: [392, 393, 394, 395, 396], + 474: [483, 487], + 536: [537, 541], + 601: [602, 603, 604, 608], + 622: [624, 625, 626, 627, 628, 629, 630, 631, 632, 634, 635, 636, 637, 638], + 686: [687, 688, 689], + 708: [709, 710], + 721: [723, 724, 726, 727, 729, 730, 731], + 740: [741, 742, 743], + 758: [759, 760, 761, 762], + 771: [772, 773], + 777: [778, 779, 780], + 788: [789, 790, 791, 792], + 835: [836, 837, 838], + 891: [894, 895, 896, 897, 898, 900, 901, 902], + 926: [927, 928], + 949: [950, 951, 952, 953, 954], + 957: [958, 959, 960, 961, 962], + 999: [1000, 1001], + 578: [579, 580]} + + rev_map = {} + for k, vals in id_map.items(): + for v in vals: + rev_map[v] = k + + for sw in sw_json: + sw['thisID'] = rev_map.get(sw['thisID'], sw['thisID']) - if acronyms is not None: - ibr, vals = br.propagate_down(acronyms, values) - colormap = cm.get_cmap(cmap) - vmin = vmin or np.nanmin(vals) - vmax = vmax or np.nanmax(vals) - norm = matplotlib.colors.Normalize(vmin=vmin, vmax=vmax) - rgba_color = colormap(norm(vals), bytes=True) - - if mask is not None: - imr, _ = br.propagate_down(mask, np.ones_like(mask)) - else: - imr = [] - - sw = swanson() - sw_json = swanson_json() - - plot_idx = [] - plot_val = [] - for i, reg in enumerate(sw_json): - - if acronyms is None: - color = br.rgba[br.mappings['Swanson'][reg['thisID']]] / 255 - else: - idx = np.where(ibr == reg['thisID'])[0] - if len(idx) > 0: - plot_idx.append(ibr[idx[0]]) - plot_val.append(vals[idx[0]]) - color = rgba_color[idx[0]] / 255 - else: - idx = np.where(imr == reg['thisID'])[0] - if len(idx) > 0: - color = mask_color - else: - color = empty_color - - coords = reg['coordsReg'] - reg_id = reg['thisID'] - - if reg['hole']: - vertices, codes = coords_for_poly_hole(coords) - if orientation == 'portrait': - vertices[:, [0, 1]] = vertices[:, [1, 0]] - plot_polygon_with_hole(ax, vertices, codes, color, reg_id, **kwargs) - if hemisphere is not None: - color_inv = color if hemisphere == 'mirror' else empty_color - vertices_inv = np.copy(vertices) - vertices_inv[:, 0] = -1 * vertices_inv[:, 0] + (sw.shape[0] * 2) - plot_polygon_with_hole(ax, vertices_inv, codes, color_inv, reg_id, **kwargs) - else: - plot_polygon_with_hole(ax, vertices, codes, color, reg_id, **kwargs) - if hemisphere is not None: - color_inv = color if hemisphere == 'mirror' else empty_color - vertices_inv = np.copy(vertices) - vertices_inv[:, 1] = -1 * vertices_inv[:, 1] + (sw.shape[0] * 2) - plot_polygon_with_hole(ax, vertices_inv, codes, color_inv, reg_id, **kwargs) - else: - coords = [coords] if type(coords) == dict else coords - for c in coords: - - if orientation == 'portrait': - xy = np.c_[c['y'], c['x']] - plot_polygon(ax, xy, color, reg_id, **kwargs) - if hemisphere is not None: - color_inv = color if hemisphere == 'mirror' else empty_color - xy_inv = np.copy(xy) - xy_inv[:, 0] = -1 * xy_inv[:, 0] + (sw.shape[0] * 2) - plot_polygon(ax, xy_inv, color_inv, reg_id, **kwargs) - else: - xy = np.c_[c['x'], c['y']] - plot_polygon(ax, xy, color, reg_id, **kwargs) - if hemisphere is not None: - color_inv = color if hemisphere == 'mirror' else empty_color - xy_inv = np.copy(xy) - xy_inv[:, 1] = -1 * xy_inv[:, 1] + (sw.shape[0] * 2) - plot_polygon(ax, xy_inv, color_inv, reg_id, **kwargs) - - if orientation == 'portrait': - ax.set_ylim(0, sw.shape[1]) - if hemisphere is None: - ax.set_xlim(0, sw.shape[0]) - else: - ax.set_xlim(0, 2 * sw.shape[0]) - else: - ax.set_xlim(0, sw.shape[1]) - if hemisphere is None: - ax.set_ylim(0, sw.shape[0]) - else: - ax.set_ylim(0, 2 * sw.shape[0]) - - if annotate: - if annotate_list is not None: - annotate_swanson(ax=ax, acronyms=annotate_list, orientation=orientation, br=br, thres=10, fontsize=fontsize) - elif acronyms is not None: - ids = br.index2id(np.array(plot_idx)) - _, indices, _ = np.intersect1d(br.id, br.remap(ids, 'Swanson-lr'), return_indices=True) - a, b = ismember(ids, br.id[indices]) - sorted_id = ids[a] - vals = np.array(plot_val)[a] - sort_vals = np.argsort(vals) if annotate_order == 'bottom' else np.argsort(vals)[::-1] - annotate_swanson(ax=ax, acronyms=sorted_id[sort_vals[:annotate_n]], orientation=orientation, br=br, - thres=10, fontsize=fontsize) - else: - annotate_swanson(ax=ax, orientation=orientation, br=br, fontsize=fontsize) - - def format_coord(x, y): - try: - ind = sw[int(y), int(x)] - ancestors = br.ancestors(br.id[ind])['acronym'] - return f'sw-{ind}, {ancestors}, aid={br.id[ind]}-{br.acronym[ind]} \n {br.name[ind]}' - except IndexError: - return '' - - ax.format_coord = format_coord - - ax.invert_yaxis() - ax.set_aspect('equal') - - -def plot_swanson(acronyms=None, values=None, ax=None, hemisphere=None, br=None, - orientation='landscape', annotate=False, empty_color='silver', **kwargs): - """ - Displays the 2D image corresponding to the swanson flatmap. - This case is different from the others in the sense that only a region maps to another regions, there - is no correspondency from the spatial 3D coordinates. - :param acronyms: - :param values: - :param hemisphere: hemisphere to display, options are 'left', 'right', 'both' or 'mirror' - :param br: ibllib.atlas.BrainRegions object - :param ax: matplotlib axis object to plot onto - :param orientation: 'landscape' (default) or 'portrait' - :param annotate: (False) if True, labels regions with acronyms - :param empty_color: (grey) matplotlib color code or rgb_a int8 tuple defining the filling - of brain regions not provided. Defaults to 'silver' - :param kwargs: arguments for imshow - :return: - """ - mapping = 'Swanson' - br = BrainRegions() if br is None else br - br.compute_hierarchy() - s2a = swanson() - # both hemishpere - if hemisphere == 'both': - _s2a = s2a + np.sum(br.id > 0) - _s2a[s2a == 0] = 0 - _s2a[s2a == 1] = 1 - s2a = np.r_[s2a, np.flipud(_s2a)] - mapping = 'Swanson-lr' - elif hemisphere == 'mirror': - s2a = np.r_[s2a, np.flipud(s2a)] - if orientation == 'portrait': - s2a = np.transpose(s2a) - if acronyms is None: - regions = br.mappings[mapping][s2a] - im = br.rgba[regions] - iswan = None - else: - ibr, vals = br.propagate_down(acronyms, values) - # we now have the mapped regions and aggregated values, map values onto swanson map - iswan, iv = ismember(s2a, ibr) - im = np.zeros_like(s2a, dtype=np.float32) - im[iswan] = vals[iv] - im[~iswan] = np.nan - if not ax: - ax = plt.gca() - ax.set_axis_off() # unless provided we don't need scales here - ax.imshow(im, **kwargs) - # overlay the boundaries if value plot - imb = np.zeros((*s2a.shape[:2], 4), dtype=np.uint8) - # fill in the empty regions with the blank regions colours if necessary - if iswan is not None: - imb[~iswan] = (np.array(matplotlib.colors.to_rgba(empty_color)) * 255).astype('uint8') - imb[s2a == 0] = 255 - # imb[s2a == 1] = np.array([167, 169, 172, 255]) - imb[s2a == 1] = np.array([0, 0, 0, 255]) - ax.imshow(imb) - if annotate: - annotate_swanson(ax=ax, orientation=orientation, br=br) - - # provides the mean to see the region on axis - def format_coord(x, y): - ind = s2a[int(y), int(x)] - ancestors = br.ancestors(br.id[ind])['acronym'] - return f'sw-{ind}, {ancestors}, aid={br.id[ind]}-{br.acronym[ind]} \n {br.name[ind]}' - - ax.format_coord = format_coord - return ax + return sw_json @lru_cache(maxsize=None) def _swanson_labels_positions(thres=20000): """ - This functions computes label positions to overlay on the Swanson flatmap - :return: dictionary where keys are acronyms + Computes label positions to overlay on the Swanson flatmap. + + Parameters + ---------- + thres : int, default=20000 + The number of pixels above which a region is labeled. + + Returns + ------- + dict of str + A map of brain acronym to a tuple of x y coordinates. """ - NPIX_THRESH = thres # number of pixels above which region is labeled s2a = swanson() iw, ih = np.meshgrid(np.arange(s2a.shape[1]), np.arange(s2a.shape[0])) # compute the center of mass of all regions (fast enough to do on the fly) @@ -371,7 +331,7 @@ def _swanson_labels_positions(thres=20000): NWH, NWW = (200, 600) h, w = s2a.shape labels = {} - for ilabel in np.where(bc > NPIX_THRESH)[0]: + for ilabel in np.where(bc > thres)[0]: x, y = (cmw[ilabel], cmh[ilabel]) # the polygon is convex and the label is outside. Dammit !!! if s2a[int(y), int(x)] != ilabel: @@ -390,29 +350,3 @@ def _swanson_labels_positions(thres=20000): # ax.plot(x, y, 'r+') labels[ilabel] = (x, y) return labels - - -def annotate_swanson(ax, acronyms=None, orientation='landscape', br=None, thres=20000, **kwargs): - """ - Display annotations on the flatmap - :param ax: - :param acronyms: (None) list or np.array of acronyms or allen region ids. If None plot all. - :param orientation: - :param br: BrainRegions object - :param kwargs: arguments for the annotate function - :return: - """ - br = br or BrainRegions() - if acronyms is None: - indices = np.arange(br.id.size) - else: # tech debt: here in fact we should remap and compute labels for hierarchical regions - aids = br.parse_acronyms_argument(acronyms) - _, indices, _ = np.intersect1d(br.id, br.remap(aids, 'Swanson-lr'), return_indices=True) - labels = _swanson_labels_positions(thres=thres) - for ilabel in labels: - # do not display uwanted labels - if ilabel not in indices: - continue - # rotate the labels if the dislay is in portrait mode - xy = np.flip(labels[ilabel]) if orientation == 'portrait' else labels[ilabel] - ax.annotate(br.acronym[ilabel], xy=xy, ha='center', va='center', **kwargs) diff --git a/ibllib/atlas/genes.py b/ibllib/atlas/genes.py index da0802a78..e3f32e378 100644 --- a/ibllib/atlas/genes.py +++ b/ibllib/atlas/genes.py @@ -1,3 +1,4 @@ +"""Gene expression maps.""" import logging from pathlib import Path @@ -17,7 +18,7 @@ def allen_gene_expression(filename='gene-expression.pqt', folder_cache=None): Reads in the Allen gene expression experiments binary data. :param filename: :param folder_cache: - :return: a dataframe of experiments, where each record correspond to a single gene expression + :return: a dataframe of experiments, where each record corresponds to a single gene expression and a memmap of all experiments volumes, size (4345, 58, 41, 67) corresponding to (nexperiments, ml, dv, ap). The spacing between slices is 200 um """ diff --git a/ibllib/atlas/mappings.pqt b/ibllib/atlas/mappings.pqt index c4b988444..fb5418773 100644 Binary files a/ibllib/atlas/mappings.pqt and b/ibllib/atlas/mappings.pqt differ diff --git a/ibllib/atlas/plots.py b/ibllib/atlas/plots.py index b9091cdfe..9c5926dcb 100644 --- a/ibllib/atlas/plots.py +++ b/ibllib/atlas/plots.py @@ -1,30 +1,36 @@ """ -Module that has convenience plotting functions for 2D atlas slices +Module that has convenience plotting functions for 2D atlas slices and flatmaps. """ -import matplotlib -import matplotlib.pyplot as plt -import numpy as np +import copy import logging -from iblutil.io.hashfile import md5 -import one.remote.aws as aws +import numpy as np from scipy.ndimage import gaussian_filter from scipy.stats import binned_statistic -from matplotlib import cm +import matplotlib.pyplot as plt +from matplotlib import cm, colors from matplotlib.patches import Polygon, PathPatch import matplotlib.path as mpath +from iblutil.io.hashfile import md5 +import one.remote.aws as aws -from ibllib.atlas import AllenAtlas, FlatMap +from ibllib.atlas import AllenAtlas +from ibllib.atlas.flatmaps import FlatMap, _swanson_labels_positions, swanson, swanson_json from ibllib.atlas.regions import BrainRegions from iblutil.numerical import ismember - from ibllib.atlas.atlas import BrainCoordinates, ALLEN_CCF_LANDMARKS_MLAPDV_UM _logger = logging.getLogger(__name__) def get_bc_10(): + """ + Get BrainCoordinates object for 10um Allen Atlas + Returns + ------- + BrainCoordinates object + """ dims2xyz = np.array([1, 0, 2]) res_um = 10 scaling = np.array([1, 1, 1]) @@ -40,17 +46,88 @@ def get_bc_10(): def plot_polygon(ax, xy, color, reg_id, edgecolor='k', linewidth=0.3, alpha=1): + """ + Function to plot matplotlib polygon on an axis + + Parameters + ---------- + ax : matplotlib.pyplot.Axes + An axis object to plot onto. + xy: numpy.array + 2D array of x and y coordinates of vertices of polygon + color: str, tuple of int + The color to fill the polygon + reg_id: str, int + An id to assign to the polygon + edgecolor: str, tuple of int + The color of the edge of the polgon + linewidth: int + The width of the edges of the polygon + alpha: float between 0 and 1 + The opacitiy of the polygon + + Returns + ------- + + """ p = Polygon(xy, facecolor=color, edgecolor=edgecolor, linewidth=linewidth, alpha=alpha, gid=f'region_{reg_id}') ax.add_patch(p) def plot_polygon_with_hole(ax, vertices, codes, color, reg_id, edgecolor='k', linewidth=0.3, alpha=1): + """ + Function to plot matplotlib polygon that contains a hole on an axis + + Parameters + ---------- + ax : matplotlib.pyplot.Axes + An axis object to plot onto. + vertices: numpy.array + 2D array of x and y coordinates of vertices of polygon + codes: numpy.array + 1D array of path codes used to link the vertices + (https://matplotlib.org/stable/tutorials/advanced/path_tutorial.html) + color: str, tuple of int + The color to fill the polygon + reg_id: str, int + An id to assign to the polygon + edgecolor: str, tuple of int + The color of the edge of the polgon + linewidth: int + The width of the edges of the polygon + alpha: float between 0 and 1 + The opacitiy of the polygon + + Returns + ------- + + """ + path = mpath.Path(vertices, codes) patch = PathPatch(path, facecolor=color, edgecolor=edgecolor, linewidth=linewidth, alpha=alpha, gid=f'region_{reg_id}') ax.add_patch(patch) def coords_for_poly_hole(coords): + """ + Function to convert + + Parameters + ---------- + coords : dict + Dictionary containing keys x, y and invert. x and y contain numpy.array of x coordinates, y coordinates + for the vertices of the polgyon. The invert key is either 1 or -1 and deterimine how to assign the paths. + The value for invert for each polygon was assigned manually after looking at the result + + Returns + ------- + all_coords: numpy.array + 2D array of x and y coordinates of vertices of polygon + all_codes: numpy.array + 1D array of path codes used to link the vertices + (https://matplotlib.org/stable/tutorials/advanced/path_tutorial.html) + + """ for i, c in enumerate(coords): xy = np.c_[c['x'], c['y']] codes = np.ones(len(xy), dtype=mpath.Path.code_type) * mpath.Path.LINETO @@ -91,11 +168,25 @@ def prepare_lr_data(acronyms_lh, values_lh, acronyms_rh, values_rh): def reorder_data(acronyms, values, brain_regions=None): """ - Reorder list of acronyms and values to match the Allen ordering - :param acronyms: array of acronyms - :param values: array of values - :param brain_regions: BrainRegions object - :return: ordered array of acronyms and values + Reorder list of acronyms and values to match the Allen ordering. + + TODO Document more + + Parameters + ---------- + acronyms : array_like of str + The acronyms to match the Allen ordering, whatever that means. + values : array_like + An array of some sort of values I guess... + brain_regions : ibllib.atlas.regions.BrainRegions + A brain regions object. + + Returns + ------- + numpy.array of str + An ordered array of acronyms + numpy.array + An ordered array of values. I don't know what those values are, not IDs, so maybe indices? """ br = brain_regions or BrainRegions() @@ -115,7 +206,25 @@ def reorder_data(acronyms, values, brain_regions=None): def load_slice_files(slice, mapping): + """ + Function to load in set of vectorised atlas slices for a given atlas axis and mapping. + + If the data does not exist locally, it will download the files automatically stored in a AWS S3 + bucket. + + Parameters + ---------- + slice : {'coronal', 'sagittal', 'horizontal', 'top'} + The axis of the atlas to load. + mapping : {'Allen', 'Beryl', 'Cosmos'} + The mapping to load. + Returns + ------- + slice_data : numpy.array + A json containing the vertices to draw each region for each slice in the Allen annotation volume. + + """ OLD_MD5 = { 'coronal': [], 'sagittal': [], @@ -136,7 +245,48 @@ def load_slice_files(slice, mapping): def _plot_slice_vector(coords, slice, values, mapping, empty_color='silver', clevels=None, cmap='viridis', show_cbar=False, ba=None, ax=None, slice_json=None, **kwargs): + """ + Function to plot scalar value per allen region on vectorised version of histology slice. Do not use directly but use + through plot_scalar_on_slice function with vector=True. + + Parameters + ---------- + coords: float + Coordinate of slice in um (not needed when slice='top'). + slice: {'coronal', 'sagittal', 'horizontal', 'top'} + The axis through the atlas volume to display. + values: numpy.array + Array of values for each of the lateralised Allen regions found using BrainRegions().acronym. If no + value is assigned to the acronym, the value at corresponding to that index should be NaN. + mapping: {'Allen', 'Beryl', 'Cosmos'} + The mapping to use. + empty_color: str, tuple of int, default='silver' + The color used to fill the regions that do not have any values assigned (regions with NaN). + clevels: numpy.array, list or tuple + The min and max values to use for the colormap. + cmap: string + Colormap to use. + show_cbar: bool, default=False + Whether to display a colorbar. + ba : ibllib.atlas.AllenAtlas + A brain atlas object. + ax : matplotlib.pyplot.Axes + An axis object to plot onto. + slice_json: numpy.array + The set of vectorised slices for this slice, obtained using load_slice_files(slice, mapping). + **kwargs + Set of kwargs passed into matplotlib.patches.Polygon. + + Returns + ------- + fig: matplotlib.figure.Figure + The plotted figure. + ax: matplotlib.pyplot.Axes + The plotted axes. + cbar: matplotlib.pyplot.colorbar, optional + matplotlib colorbar object, only returned if show_cbar=True + """ ba = ba or AllenAtlas() mapping = mapping.split('-')[0].lower() if clevels is None: @@ -154,7 +304,7 @@ def _plot_slice_vector(coords, slice, values, mapping, empty_color='silver', cle fig = ax.get_figure() colormap = cm.get_cmap(cmap) - norm = matplotlib.colors.Normalize(vmin=clevels[0], vmax=clevels[1]) + norm = colors.Normalize(vmin=clevels[0], vmax=clevels[1]) nan_vals = np.isnan(values) rgba_color = np.full((values.size, 4), fill_value=np.nan) rgba_color[~nan_vals] = colormap(norm(values[~nan_vals]), bytes=True) @@ -193,7 +343,7 @@ def _plot_slice_vector(coords, slice, values, mapping, empty_color='silver', cle if len(coords) == 0: continue - if type(coords) == list: + if isinstance(coords, (list, tuple)): vertices, codes = coords_for_poly_hole(coords) plot_polygon_with_hole(ax, vertices, codes, color, **kwargs) else: @@ -205,40 +355,71 @@ def _plot_slice_vector(coords, slice, values, mapping, empty_color='silver', cle ax.invert_yaxis() if show_cbar: - cbar = fig.colorbar(matplotlib.cm.ScalarMappable(norm=norm, cmap=cmap), ax=ax) + cbar = fig.colorbar(cm.ScalarMappable(norm=norm, cmap=cmap), ax=ax) return fig, ax, cbar else: return fig, ax -def plot_scalar_on_slice(regions, values, coord=-1000, slice='coronal', mapping='Allen', hemisphere='left', +def plot_scalar_on_slice(regions, values, coord=-1000, slice='coronal', mapping=None, hemisphere='left', background='image', cmap='viridis', clevels=None, show_cbar=False, empty_color='silver', brain_atlas=None, ax=None, vector=False, slice_files=None, **kwargs): """ - Function to plot scalar value per allen region on histology slice - - :param regions: array of acronyms of Allen regions - :param values: array of scalar value per acronym. If hemisphere is 'both' and different values want to be shown on each - hemispheres, values should contain 2 columns, 1st column for LH values, 2nd column for RH values - :param coord: coordinate of slice in um (not needed when slice='top') - :param slice: orientation of slice, options are 'coronal', 'sagittal', 'horizontal', 'top' (top view of brain) - :param mapping: atlas mapping to use, options are 'Allen', 'Beryl' or 'Cosmos' - :param hemisphere: hemisphere to display, options are 'left', 'right', 'both' - :param background: background slice to overlay onto, options are 'image' or 'boundary' (only used when vector = False) - :param cmap: colormap to use - :param clevels: min max color levels [cmin, cmax] - :param show_cbar: whether or not to add colorbar to axis - :param empty_color: color to use for regions without any values (only used when vector = True) - :param brain_atlas: AllenAtlas object - :param ax: optional axis object to plot on - :param vector: whether to show as bitmap of vector graphic - :param slice_files: slice files for - :param **kwargs: kwargs to pass to matplotlib polygon e.g linewidth=2, edgecolor='none' (only used when vector = True) - :return: + Function to plot scalar value per region on histology slice. + + Parameters + ---------- + regions : array_like + An array of brain region acronyms. + values : numpy.array + An array of scalar value per acronym. If hemisphere is 'both' and different values want to + be shown on each hemisphere, values should contain 2 columns, 1st column for LH values, 2nd + column for RH values. + coord : float + Coordinate of slice in um (not needed when slice='top'). + slice : {'coronal', 'sagittal', 'horizontal', 'top'}, default='coronal' + Orientation of slice. + mapping : str, optional + Atlas mapping to use, options are depend on atlas used (see `ibllib.atlas.BrainRegions`). + If None, the atlas default mapping is used. + hemisphere : {'left', 'right', 'both'}, default='left' + The hemisphere to display. + background : {image', 'boundary'}, default='image' + Background slice to overlay onto, options are 'image' or 'boundary'. If `vector` is false, + this argument is ignored. + cmap: str, default='viridis' + Colormap to use. + clevels : array_like + The min and max color levels to use. + show_cbar: bool, default=False + Whether to display a colorbar. + empty_color : str, default='silver' + Color to use for regions without any values (only used when `vector` is true). + brain_atlas : ibllib.atlas.AllenAtlas + A brain atlas object. + ax : matplotlib.pyplot.Axes + An axis object to plot onto. + vector : bool, default=False + Whether to show as bitmap or vector graphic. + slice_files: numpy.array + The set of vectorised slices for this slice, obtained using `load_slice_files(slice, mapping)`. + **kwargs + Set of kwargs passed into matplotlib.patches.Polygon, e.g. linewidth=2, edgecolor='None' + (only used when vector = True). + + Returns + ------- + fig: matplotlib.figure.Figure + The plotted figure. + ax: matplotlib.pyplot.Axes + The plotted axes. + cbar: matplotlib.pyplot.colorbar, optional + matplotlib colorbar object, only returned if show_cbar=True. """ ba = brain_atlas or AllenAtlas() br = ba.regions + mapping = mapping or br.default_mapping if clevels is None: clevels = (np.nanmin(values), np.nanmax(values)) @@ -340,7 +521,7 @@ def plot_scalar_on_flatmap(regions, values, depth=0, flatmap='dorsal_cortex', ma d_idx = int(np.round(depth / ba.res_um)) # need to find nearest to 25 if background == 'boundary': - cmap_bound = matplotlib.cm.get_cmap("bone_r").copy() + cmap_bound = cm.get_cmap("bone_r").copy() cmap_bound.set_under([1, 1, 1], 0) if ax: @@ -365,8 +546,8 @@ def plot_scalar_on_flatmap(regions, values, depth=0, flatmap='dorsal_cortex', ma ax.set_xlim(np.ceil(ba.flatmap.shape[1] / 2), ba.flatmap.shape[1]) if show_cbar: - norm = matplotlib.colors.Normalize(vmin=clevels[0], vmax=clevels[1], clip=False) - cbar = fig.colorbar(matplotlib.cm.ScalarMappable(norm=norm, cmap=cmap), ax=ax) + norm = colors.Normalize(vmin=clevels[0], vmax=clevels[1], clip=False) + cbar = fig.colorbar(cm.ScalarMappable(norm=norm, cmap=cmap), ax=ax) return fig, ax, cbar else: return fig, ax @@ -375,7 +556,7 @@ def plot_scalar_on_flatmap(regions, values, depth=0, flatmap='dorsal_cortex', ma def plot_volume_on_slice(volume, coord=-1000, slice='coronal', mapping='Allen', background='boundary', cmap='Reds', clevels=None, show_cbar=False, brain_atlas=None, ax=None): """ - Plot slice at through volume + Plot slice through a volume :param volume: 3D array of volume (must be same shape as brain_atlas object) :param coord: coordinate of slice in um @@ -384,7 +565,7 @@ def plot_volume_on_slice(volume, coord=-1000, slice='coronal', mapping='Allen', :param background: background slice to overlay onto, options are 'image' or 'boundary' :param cmap: colormap to use :param clevels: min max color levels [cmin, cmax] - :param show_cbar: whether or not to add colorbar to axis + :param show_cbar: whether to add colorbar to axis :param brain_atlas: AllenAtlas object :param ax: optional axis object to plot on :return: @@ -424,7 +605,7 @@ def plot_points_on_slice(xyz, values=None, coord=-1000, slice='coronal', mapping :param background: background slice to overlay onto, options are 'image' or 'boundary' :param cmap: colormap to use :param clevels: min max color levels [cmin, cmax] - :param show_cbar: whether or not to add colorbar to axis + :param show_cbar: whether to add colorbar to axis :param aggr: aggregation method. Options are sum, count, mean, std, median, min and max. Can also give in custom function (https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.binned_statistic.html) :param fwhm: fwhm distance of gaussian kernel in um @@ -498,7 +679,48 @@ def compute_volume_from_points(xyz, values=None, aggr='sum', fwhm=100, ba=None): def _plot_slice(coord, slice, region_values, vol_type, background='boundary', map='Allen', clevels=None, cmap='viridis', show_cbar=False, ba=None, ax=None): + """ + Function to plot scalar value per allen region on histology slice. + + Do not use directly but use through plot_scalar_on_slice function. + + Parameters + ---------- + coord: float + coordinate of slice in um (not needed when slice='top'). + slice: {'coronal', 'sagittal', 'horizontal', 'top'} + the axis through the atlas volume to display. + region_values: numpy.array + Array of values for each of the lateralised Allen regions found using BrainRegions().acronym. If no + value is assigned to the acronym, the value at corresponding to that index should be nan. + vol_type: 'value' + The type of volume to be displayed, should always be 'value' if values want to be displayed. + background: {'image', 'boundary'} + The background slice to overlay the values onto. When 'image' it uses the Allen dwi image, when + 'boundary' it displays the boundaries between regions. + map: {'Allen', 'Beryl', 'Cosmos'} + the mapping to use. + clevels: numpy.array, list or tuple + The min and max values to use for the colormap. + cmap: str, default='viridis' + Colormap to use. + show_cbar: bool, default=False + Whether to display a colorbar. + ba : ibllib.atlas.AllenAtlas + A brain atlas object. + ax : matplotlib.pyplot.Axes + An axis object to plot onto. + + Returns + ------- + fig: matplotlib.figure.Figure + The plotted figure + ax: matplotlib.pyplot.Axes + The plotted axes. + cbar: matplotlib.pyplot.colorbar + matplotlib colorbar object, only returned if show_cbar=True. + """ ba = ba or AllenAtlas() if clevels is None: @@ -550,14 +772,41 @@ def _plot_slice(coord, slice, region_values, vol_type, background='boundary', ma ba.plot_top(volume='boundary', mapping=map, ax=ax) if show_cbar: - norm = matplotlib.colors.Normalize(vmin=clevels[0], vmax=clevels[1], clip=False) - cbar = fig.colorbar(matplotlib.cm.ScalarMappable(norm=norm, cmap=cmap), ax=ax) + norm = colors.Normalize(vmin=clevels[0], vmax=clevels[1], clip=False) + cbar = fig.colorbar(cm.ScalarMappable(norm=norm, cmap=cmap), ax=ax) return fig, ax, cbar else: return fig, ax -def plot_scalar_on_barplot(acronyms, values, errors=None, order=True, ylim=None, ax=None, brain_regions=None): +def plot_scalar_on_barplot(acronyms, values, errors=None, order=True, ax=None, brain_regions=None): + """ + Function to plot scalar value per allen region on a bar plot. If order=True, the acronyms and values are reordered + according to the order defined in the Allen structure tree + + Parameters + ---------- + acronyms: numpy.array + A 1D array of acronyms + values: numpy.array + A 1D array of values corresponding to each acronym in the acronyms array + errors: numpy.array + A 1D array of error values corresponding to each acronym in the acronyms array + order: bool, default=True + Whether to order the acronyms according to the order defined by the Allen structure tree + ax : matplotlib.pyplot.Axes + An axis object to plot onto. + brain_regions : ibllib.atlas.regions.BrainRegions + A brain regions object + + Returns + ------- + fig: matplotlib.figure.Figure + The plotted figure + ax: matplotlib.pyplot.Axes + The plotted axes. + + """ br = brain_regions or BrainRegions() if order: @@ -574,3 +823,352 @@ def plot_scalar_on_barplot(acronyms, values, errors=None, order=True, ylim=None, ax.bar(np.arange(acronyms.size), values, color=colours) return fig, ax + + +def plot_swanson_vector(acronyms=None, values=None, ax=None, hemisphere=None, br=None, orientation='landscape', + empty_color='silver', vmin=None, vmax=None, cmap='viridis', annotate=False, annotate_n=10, + annotate_order='top', annotate_list=None, mask=None, mask_color='w', fontsize=10, **kwargs): + """ + Function to plot scalar value per allen region on the swanson projection. Plots on a vecortised version of the + swanson projection + + Parameters + ---------- + acronyms: numpy.array + A 1D array of acronyms or atlas ids + values: numpy.array + A 1D array of values corresponding to each acronym in the acronyms array + ax : matplotlib.pyplot.Axes + An axis object to plot onto. + hemisphere : {'left', 'right', 'both', 'mirror'} + The hemisphere to display. + br : ibllib.atlas.BrainRegions + A brain regions object. + orientation : {landscape', 'portrait'}, default='landscape' + The plot orientation. + empty_color : str, tuple of int, default='silver' + The greyscale matplotlib color code or an RGBA int8 tuple defining the filling of brain + regions not provided. + vmin: float + Minimum value to restrict the colormap + vmax: float + Maximum value to restrict the colormap + cmap: string + matplotlib named colormap to use + annotate : bool, default=False + If true, labels the regions with acronyms. + annotate_n: int + The number of regions to annotate + annotate_order: {'top', 'bottom'} + If annotate_n is specified, whether to annotate the n regions with the highest (top) or lowest (bottom) values + annotate_list: numpy.array of list + List of regions to annotate, if this is provided, if overwrites annotate_n and annotate_order + mask: numpy.array or list + List of regions to apply a mask to (fill them with a specific color) + mask_color: string, tuple or list + Color for the mask + fontsize : int + The annotation font size in points. + **kwargs + See plot_polygon and plot_polygon_with_hole. + + Returns + ------- + matplotlib.pyplot.Axes + The plotted axes. + + """ + br = BrainRegions() if br is None else br + br.compute_hierarchy() + sw_shape = (2968, 6820) + + if ax is None: + fig, ax = plt.subplots() + ax.set_axis_off() + + if hemisphere != 'both' and acronyms is not None and not isinstance(acronyms[0], str): + # If negative atlas ids are passed in and we are not going to lateralise (e.g hemisphere='both') + # transfer them over to one hemisphere + acronyms = np.abs(acronyms) + + if acronyms is not None: + ibr, vals = br.propagate_down(acronyms, values) + colormap = cm.get_cmap(cmap) + vmin = vmin or np.nanmin(vals) + vmax = vmax or np.nanmax(vals) + norm = colors.Normalize(vmin=vmin, vmax=vmax) + rgba_color = colormap(norm(vals), bytes=True) + + if mask is not None: + imr, _ = br.propagate_down(mask, np.ones_like(mask)) + else: + imr = [] + + sw_json = swanson_json() + if hemisphere == 'both': + sw_rev = copy.deepcopy(sw_json) + for sw in sw_rev: + sw['thisID'] = sw['thisID'] + br.n_lr + sw_json = sw_json + sw_rev + + plot_idx = [] + plot_val = [] + for i, reg in enumerate(sw_json): + + coords = reg['coordsReg'] + reg_id = reg['thisID'] + + if acronyms is None: + color = br.rgba[br.mappings['Swanson'][reg['thisID']]] / 255 + if hemisphere is None: + col_l = None + col_r = color + elif hemisphere == 'left': + col_l = empty_color if orientation == 'portrait' else color + col_r = color if orientation == 'portrait' else empty_color + elif hemisphere == 'right': + col_l = color if orientation == 'portrait' else empty_color + col_r = empty_color if orientation == 'portrait' else color + elif hemisphere in ['both', 'mirror']: + col_l = color + col_r = color + else: + idx = np.where(ibr == reg['thisID'])[0] + idxm = np.where(imr == reg['thisID'])[0] + if len(idx) > 0: + plot_idx.append(ibr[idx[0]]) + plot_val.append(vals[idx[0]]) + color = rgba_color[idx[0]] / 255 + elif len(idxm) > 0: + color = mask_color + else: + color = empty_color + + if hemisphere is None: + col_l = None + col_r = color + elif hemisphere == 'left': + col_l = empty_color if orientation == 'portrait' else color + col_r = color if orientation == 'portrait' else empty_color + elif hemisphere == 'right': + col_l = color if orientation == 'portrait' else empty_color + col_r = empty_color if orientation == 'portrait' else color + elif hemisphere == 'mirror': + col_l = color + col_r = color + elif hemisphere == 'both': + if reg_id <= br.n_lr: + col_l = color if orientation == 'portrait' else None + col_r = None if orientation == 'portrait' else color + else: + col_l = None if orientation == 'portrait' else color + col_r = color if orientation == 'portrait' else None + + if reg['hole']: + vertices, codes = coords_for_poly_hole(coords) + if orientation == 'portrait': + vertices[:, [0, 1]] = vertices[:, [1, 0]] + if col_r is not None: + plot_polygon_with_hole(ax, vertices, codes, col_r, reg_id, **kwargs) + if col_l is not None: + vertices_inv = np.copy(vertices) + vertices_inv[:, 0] = -1 * vertices_inv[:, 0] + (sw_shape[0] * 2) + plot_polygon_with_hole(ax, vertices_inv, codes, col_l, reg_id, **kwargs) + else: + if col_r is not None: + plot_polygon_with_hole(ax, vertices, codes, col_r, reg_id, **kwargs) + if col_l is not None: + vertices_inv = np.copy(vertices) + vertices_inv[:, 1] = -1 * vertices_inv[:, 1] + (sw_shape[0] * 2) + plot_polygon_with_hole(ax, vertices_inv, codes, col_l, reg_id, **kwargs) + else: + coords = [coords] if isinstance(coords, dict) else coords + for c in coords: + if orientation == 'portrait': + xy = np.c_[c['y'], c['x']] + if col_r is not None: + plot_polygon(ax, xy, col_r, reg_id, **kwargs) + if col_l is not None: + xy_inv = np.copy(xy) + xy_inv[:, 0] = -1 * xy_inv[:, 0] + (sw_shape[0] * 2) + plot_polygon(ax, xy_inv, col_l, reg_id, **kwargs) + else: + xy = np.c_[c['x'], c['y']] + if col_r is not None: + plot_polygon(ax, xy, col_r, reg_id, **kwargs) + if col_l is not None: + xy_inv = np.copy(xy) + xy_inv[:, 1] = -1 * xy_inv[:, 1] + (sw_shape[0] * 2) + plot_polygon(ax, xy_inv, col_l, reg_id, **kwargs) + + if orientation == 'portrait': + ax.set_ylim(0, sw_shape[1]) + if hemisphere is None: + ax.set_xlim(0, sw_shape[0]) + else: + ax.set_xlim(0, 2 * sw_shape[0]) + else: + ax.set_xlim(0, sw_shape[1]) + if hemisphere is None: + ax.set_ylim(0, sw_shape[0]) + else: + ax.set_ylim(0, 2 * sw_shape[0]) + + if annotate: + if annotate_list is not None: + annotate_swanson(ax=ax, acronyms=annotate_list, orientation=orientation, br=br, thres=10, fontsize=fontsize) + elif acronyms is not None: + ids = br.index2id(np.array(plot_idx)) + _, indices, _ = np.intersect1d(br.id, br.remap(ids, 'Swanson-lr'), return_indices=True) + a, b = ismember(ids, br.id[indices]) + sorted_id = ids[a] + vals = np.array(plot_val)[a] + sort_vals = np.argsort(vals) if annotate_order == 'bottom' else np.argsort(vals)[::-1] + annotate_swanson(ax=ax, acronyms=sorted_id[sort_vals[:annotate_n]], orientation=orientation, br=br, + thres=10, fontsize=fontsize) + else: + annotate_swanson(ax=ax, orientation=orientation, br=br, fontsize=fontsize) + + def format_coord(x, y): + patch = next((p for p in ax.patches if p.contains_point(p.get_transform().transform(np.r_[x, y]))), None) + if patch is not None: + ind = int(patch.get_gid().split('_')[1]) + ancestors = br.ancestors(br.id[ind])['acronym'] + return f'sw-{ind}, {ancestors}, aid={br.id[ind]}-{br.acronym[ind]} \n {br.name[ind]}' + else: + return '' + + ax.format_coord = format_coord + + ax.invert_yaxis() + ax.set_aspect('equal') + return ax + + +def plot_swanson(acronyms=None, values=None, ax=None, hemisphere=None, br=None, + orientation='landscape', annotate=False, empty_color='silver', **kwargs): + """ + Displays the 2D image corresponding to the swanson flatmap. + + This case is different from the others in the sense that only a region maps to another regions, + there is no correspondence to the spatial 3D coordinates. + + Parameters + ---------- + acronyms: numpy.array + A 1D array of acronyms or atlas ids + values: numpy.array + A 1D array of values corresponding to each acronym in the acronyms array + ax : matplotlib.pyplot.Axes + An axis object to plot onto. + hemisphere : {'left', 'right', 'both', 'mirror'} + The hemisphere to display. + br : ibllib.atlas.BrainRegions + A brain regions object. + orientation : {landscape', 'portrait'}, default='landscape' + The plot orientation. + empty_color : str, tuple of int, default='silver' + The greyscale matplotlib color code or an RGBA int8 tuple defining the filling of brain + regions not provided. + vmin: float + Minimum value to restrict the colormap + vmax: float + Maximum value to restrict the colormap + cmap: string + matplotlib named colormap to use + annotate : bool, default=False + If true, labels the regions with acronyms. + **kwargs + See matplotlib.pyplot.imshow. + + Returns + ------- + matplotlib.pyplot.Axes + The plotted axes. + """ + mapping = 'Swanson' + br = BrainRegions() if br is None else br + br.compute_hierarchy() + s2a = swanson() + # both hemispheres + if hemisphere == 'both': + _s2a = s2a + np.sum(br.id > 0) + _s2a[s2a == 0] = 0 + _s2a[s2a == 1] = 1 + s2a = np.r_[s2a, np.flipud(_s2a)] + mapping = 'Swanson-lr' + elif hemisphere == 'mirror': + s2a = np.r_[s2a, np.flipud(s2a)] + if orientation == 'portrait': + s2a = np.transpose(s2a) + if acronyms is None: + regions = br.mappings[mapping][s2a] + im = br.rgba[regions] + iswan = None + else: + ibr, vals = br.propagate_down(acronyms, values) + # we now have the mapped regions and aggregated values, map values onto swanson map + iswan, iv = ismember(s2a, ibr) + im = np.zeros_like(s2a, dtype=np.float32) + im[iswan] = vals[iv] + im[~iswan] = np.nan + if not ax: + ax = plt.gca() + ax.set_axis_off() # unless provided we don't need scales here + ax.imshow(im, **kwargs) + # overlay the boundaries if value plot + imb = np.zeros((*s2a.shape[:2], 4), dtype=np.uint8) + # fill in the empty regions with the blank regions colours if necessary + if iswan is not None: + imb[~iswan] = (np.array(colors.to_rgba(empty_color)) * 255).astype('uint8') + imb[s2a == 0] = 255 + # imb[s2a == 1] = np.array([167, 169, 172, 255]) + imb[s2a == 1] = np.array([0, 0, 0, 255]) + ax.imshow(imb) + if annotate: + annotate_swanson(ax=ax, orientation=orientation, br=br) + + # provides the mean to see the region on axis + def format_coord(x, y): + ind = s2a[int(y), int(x)] + ancestors = br.ancestors(br.id[ind])['acronym'] + return f'sw-{ind}, {ancestors}, aid={br.id[ind]}-{br.acronym[ind]} \n {br.name[ind]}' + + ax.format_coord = format_coord + return ax + + +def annotate_swanson(ax, acronyms=None, orientation='landscape', br=None, thres=20000, **kwargs): + """ + Display annotations on a Swanson flatmap. + + Parameters + ---------- + ax : matplotlib.pyplot.Axes + An axis object to plot onto. + acronyms : array_like + A list or numpy array of acronyms or Allen region IDs. If None plot all acronyms. + orientation : {landscape', 'portrait'}, default='landscape' + The plot orientation. + br : ibllib.atlas.BrainRegions + A brain regions object. + thres : int, default=20000 + The number of pixels above which a region is labelled. + **kwargs + See matplotlib.pyplot.Axes.annotate. + + """ + br = br or BrainRegions() + if acronyms is None: + indices = np.arange(br.id.size) + else: # TODO we should in fact remap and compute labels for hierarchical regions + aids = br.parse_acronyms_argument(acronyms) + _, indices, _ = np.intersect1d(br.id, br.remap(aids, 'Swanson-lr'), return_indices=True) + labels = _swanson_labels_positions(thres=thres) + for ilabel in labels: + # do not display unwanted labels + if ilabel not in indices: + continue + # rotate the labels if the display is in portrait mode + xy = np.flip(labels[ilabel]) if orientation == 'portrait' else labels[ilabel] + ax.annotate(br.acronym[ilabel], xy=xy, ha='center', va='center', **kwargs) diff --git a/ibllib/atlas/regions.py b/ibllib/atlas/regions.py index bab806639..85f94edbb 100644 --- a/ibllib/atlas/regions.py +++ b/ibllib/atlas/regions.py @@ -1,3 +1,31 @@ +"""Brain region mappings. + +Four mappings are currently available within the IBL, these are: + +* Allen Atlas - total of 1328 annotation regions provided by Allen Atlas. +* Beryl Atlas - total of 308 annotation regions determined by Nick Steinmetz for the brain wide map, mainly at the level of + major cortical areas, nuclei/ganglia. Thus annotations relating to layers and nuclear subregions are absent. +* Cosmos Atlas - total of 10 annotation regions determined by Nick Steinmetz for coarse analysis. Annotations include the major + divisions of the brain only. +* Swanson Atlas - total of 319 annotation regions provided by the Swanson atlas (FIXME which one?). + +Terminology +----------- +* **Name** - The full anatomical name of a brain region. +* **Acronymn** - A shortened version of a brain region name. +* **Index** - The index of the of the brain region within the ordered list of brain regions. +* **ID** - A unique numerical identifier of a brain region. These are typically integers that + therefore take up less space than storing the region names or acronyms. +* **Mapping** - A function that maps one ordered list of brain region IDs to another, allowing one + to control annotation granularity and brain region hierarchy, or to translate brain region names + from one atlas to another. The default mapping is identity. See + [atlas package documentation](./ibllib.atlas.html#mappings) for other mappings. +* **Order** - Each structure is assigned a consistent position within the flattened graph. This + value is known as the annotation index, i.e. the annotation volume contains the brain region + order at each point in the image. + +FIXME Document the two structure trees. Which Website did they come from, and which publication/edition? +""" from dataclasses import dataclass import logging from pathlib import Path @@ -8,7 +36,6 @@ from iblutil.numerical import ismember _logger = logging.getLogger(__name__) -# 'Beryl' is the name given to an atlas containing a subset of the most relevant allen annotations FILE_MAPPINGS = str(Path(__file__).parent.joinpath('mappings.pqt')) ALLEN_FILE_REGIONS = str(Path(__file__).parent.joinpath('allen_structure_tree.csv')) FRANKLIN_FILE_REGIONS = str(Path(__file__).parent.joinpath('franklin_paxinos_structure_tree.csv')) @@ -16,34 +43,75 @@ @dataclass class _BrainRegions: + """A struct of brain regions, their names, IDs, relationships and associated plot colours.""" + + """numpy.array: An integer array of unique brain region IDs.""" id: np.ndarray + """numpy.array: A str array of verbose brain region names.""" name: object + """numpy.array: A str array of brain region acronyms.""" acronym: object + """numpy.array: A, (n, 3) uint8 array of brain region RGB colour values.""" rgb: np.uint8 + """numpy.array: An unsigned integer array indicating the number of degrees removed from root.""" level: np.ndarray + """numpy.array: An integer array of parent brain region IDs.""" parent: np.ndarray + """numpy.array: The position within the flattened graph.""" order: np.uint16 + def __post_init__(self): + self._compute_mappings() + + def _compute_mappings(self): + """Compute default mapping for the structure tree. + + Default mapping is identity. This method is intended to be overloaded by subclasses. + """ + self.default_mapping = None + self.mappings = dict(default_mapping=self.order) + # the number of lateralized regions (typically half the number of regions in a lateralized structure tree) + self.n_lr = 0 + + def to_df(self): + """ + Return dataclass as a pandas DataFrame. + + Returns + ------- + pandas.DataFrame + The object as a pandas DataFrame with attributes as columns. + """ + attrs = ['id', 'name', 'acronym', 'hexcolor', 'level', 'parent', 'order'] + d = dict(zip(attrs, list(map(self.__getattribute__, attrs)))) + return pd.DataFrame(d) + @property def rgba(self): + """numpy.array: An (n, 4) uint8 array of RGBA values for all n brain regions.""" rgba = np.c_[self.rgb, self.rgb[:, 0] * 0 + 255] rgba[0, :] = 0 # set the void to transparent return rgba - def _compute_order(self): - """ - Compute the order of regions, per region order by left hemisphere and then right hemisphere - :return: - """ - orders = np.zeros_like(self.id) - # Left hemisphere first - orders[1::2] = np.arange(self.n_lr) + self.n_lr + 1 - # Then right hemisphere - orders[2::2] = np.arange(self.n_lr) + 1 + @property + def hexcolor(self): + """numpy.array of str: The RGB colour values as hexadecimal triplet strings.""" + return np.apply_along_axis(lambda x: "#{0:02x}{1:02x}{2:02x}".format(*x.astype(int)), 1, self.rgb) def get(self, ids) -> Bunch: """ - Get a bunch of the name/id + Return a map of id, name, acronym, etc. for the provided IDs. + + Parameters + ---------- + ids : int, tuple of ints, numpy.array + One or more brain region IDs to get information for. + + Returns + ------- + iblutil.util.Bunch[str, numpy.array] + A dict-like object containing the keys {'id', 'name', 'acronym', 'rgb', 'level', + 'parent', 'order'} with arrays the length of `ids`. """ uid, uind = np.unique(ids, return_inverse=True) a, iself, _ = np.intersect1d(self.id, uid, assume_unique=False, return_indices=True) @@ -54,13 +122,25 @@ def get(self, ids) -> Bunch: def _navigate_tree(self, ids, direction='down', return_indices=False): """ - Private method to navigate the tree and get all related objects either up, down or along the branch. - By convention the provided id is returned in the list of regions - :param ids: array or single allen id (int32) - :param direction: 'up' returns ancestors, 'down' descendants - :param return indices: Bool (False), if true returns a second argument with indices mapping - to the current br object - :return: Bunch + Navigate the tree and get all related objects either up, down or along the branch. + + By convention the provided id is returned in the list of regions. + + Parameters + ---------- + ids : int, array_like + One or more brain region IDs (int32). + direction : {'up', 'down'} + Whether to return ancestors ('up') or descendants ('down'). + return_indices : bool, default=False + If true returns a second argument with indices mapping to the current brain region + object. + + Returns + ------- + iblutil.util.Bunch[str, numpy.array] + A dict-like object containing the keys {'id', 'name', 'acronym', 'rgb', 'level', + 'parent', 'order'} with arrays the length of `ids`. """ indices = ismember(self.id, ids)[0] count = np.sum(indices) @@ -82,10 +162,21 @@ def _navigate_tree(self, ids, direction='down', return_indices=False): def subtree(self, scalar_id, return_indices=False): """ - Given a node, returns the subtree containing the node along with ancestors - :param return indices: Bool (False), if true returns a second argument with indices mapping - to the current br object - :return: Bunch + Given a node, returns the subtree containing the node along with ancestors. + + Parameters + ---------- + scalar_id : int + A brain region ID. + return_indices : bool, default=False + If true returns a second argument with indices mapping to the current brain region + object. + + Returns + ------- + iblutil.util.Bunch[str, numpy.array] + A dict-like object containing the keys {'id', 'name', 'acronym', 'rgb', 'level', + 'parent', 'order'} with arrays the length of one. """ if not np.isscalar(scalar_id): assert scalar_id.size == 1 @@ -99,55 +190,80 @@ def subtree(self, scalar_id, return_indices=False): def descendants(self, ids, **kwargs): """ - Get descendants from one or an array of ids - :param ids: np.array or scalar representing the region primary key - :param return_indices: Bool (False) returns the indices in the current br obj - :return: Bunch + Get descendants from one or more IDs. + + Parameters + ---------- + ids : int, array_like + One or more brain region IDs. + return_indices : bool, default=False + If true returns a second argument with indices mapping to the current brain region + object. + + Returns + ------- + iblutil.util.Bunch[str, numpy.array] + A dict-like object containing the keys {'id', 'name', 'acronym', 'rgb', 'level', + 'parent', 'order'} with arrays the length of `ids`. """ return self._navigate_tree(ids, direction='down', **kwargs) def ancestors(self, ids, **kwargs): """ - Get ancestors from one or an array of ids - :param ids: np.array or scalar representing the region primary key - :param return_indices: Bool (False) returns the indices in the current br obj - :return: Bunch + Get ancestors from one or more IDs. + + Parameters + ---------- + ids : int, array_like + One or more brain region IDs. + return_indices : bool, default=False + If true returns a second argument with indices mapping to the current brain region + object. + + Returns + ------- + iblutil.util.Bunch[str, numpy.array] + A dict-like object containing the keys {'id', 'name', 'acronym', 'rgb', 'level', + 'parent', 'order'} with arrays the length of `ids`. """ return self._navigate_tree(ids, direction='up', **kwargs) def leaves(self): """ - Get all regions that do not have children - :return: + Get all regions that do not have children. + + Returns + ------- + iblutil.util.Bunch[str, numpy.array] + A dict-like object containing the keys {'id', 'name', 'acronym', 'rgb', 'level', + 'parent', 'order'} with arrays of matching length. """ leaves = np.setxor1d(self.id, self.parent) return self.get(np.int64(leaves[~np.isnan(leaves)])) - def propagate_down(self, acronyms, values): - """ - This function remaps a set of user specified acronyms and values to the - swanson map, by filling down the child nodes when higher up values are - provided. - :param acronyms: list or array of allen ids or acronyms - :param values: list or array of associated values - :return: - """ - user_aids = self.parse_acronyms_argument(acronyms) - _, user_indices = ismember(user_aids, self.id) - self.compute_hierarchy() - ia, ib = ismember(self.hierarchy, user_indices) - v = np.zeros_like(ia, dtype=np.float64) * np.NaN - v[ia] = values[ib] - all_values = np.nanmedian(v, axis=0) - indices = np.where(np.any(ia, axis=0))[0] - all_values = all_values[indices] - return indices, all_values - def _mapping_from_regions_list(self, new_map, lateralize=False): """ - From a vector of regions id, creates a mapping such as - newids = self.mapping - :param new_map: np.array: vector of regions id + From a vector of region IDs, creates a structure tree index mapping. + + For example, given a subset of brain region IDs, this returns an array the length of the + total number of brain regions, where each element is the structure tree index for that + region. The IDs in `new_map` and their descendants are given that ID's index and any + missing IDs are given the root index. + + + Parameters + ---------- + new_map : array_like of int + An array of atlas brain region IDs. + lateralize : bool + If true, lateralized indices are assigned to all IDs. If false, IDs are assigned to q + non-lateralized index regardless of their sign. + + Returns + ------- + numpy.array + A vector of brain region indices representing the structure tree order corresponding to + each input ID and its descendants. """ I_ROOT = 1 I_VOID = 0 @@ -159,7 +275,7 @@ def _mapping_from_regions_list(self, new_map, lateralize=False): iid, inm = ismember(self.id, new_map) iid = np.where(iid)[0] mapind = np.zeros_like(self.id) + I_ROOT # non assigned regions are root - # TO DO should root be lateralised? + # TODO should root be lateralised? mapind[iid] = iid # regions present in the list have the same index # Starting by the higher up levels in the hierarchy, assign all descendants to the mapping for i in np.argsort(self.level[iid]): @@ -219,7 +335,7 @@ def id2acronym(self, atlas_id, mapping=None): """ Convert atlas id to acronym and remap - :param acronym: list or array of atlas ids + :param atlas_id: list or array of atlas ids :param mapping: target map to remap acronyms :return: array of remapped acronyms """ @@ -231,7 +347,7 @@ def id2id(self, atlas_id, mapping='Allen'): """ Remap atlas id onto mapping - :param acronym: list or array of atlas ids + :param atlas_id: list or array of atlas ids :param mapping: target map to remap acronyms :return: array of remapped atlas ids """ @@ -303,7 +419,6 @@ def _filter_lr_index(self, values, hemisphere): Filter index values by those on left or right hemisphere :param values: array of index values - :param mapping: mapping to use :param hemisphere: hemisphere :return: """ @@ -322,12 +437,23 @@ def _find_inds(self, values, all_values): return inds def parse_acronyms_argument(self, acronyms, mode='raise'): - """ - Parse an input acronym arguments: returns a numpy array of allen regions ids - regardless of the input: list of acronyms, np.array of acronyms strings or np aray of allen ids - To be used into functions to provide flexible input type - :param acronyms: List, np.array of acronym strings or np.array of allen ids - :return: np.array of int ids + """Parse input acronyms. + + Returns a numpy array of region IDs regardless of the input: list of acronyms, array of + acronym strings or region IDs. To be used by functions to provide flexible input type. + + Parameters + ---------- + acronyms : array_like + An array of region acronyms to convert to IDs. An array of region IDs may also be + provided, in which case they are simply returned. + mode : str, optional + If 'raise', asserts that all acronyms exist in the structure tree. + + Returns + ------- + numpy.array of int + An array of brain regions corresponding to `acronyms`. """ # first get the allen region ids regardless of the input type acronyms = np.array(acronyms) @@ -335,13 +461,17 @@ def parse_acronyms_argument(self, acronyms, mode='raise'): if not np.issubdtype(acronyms.dtype, np.number): user_aids = self.acronym2id(acronyms) if mode == 'raise': - assert user_aids.size == acronyms.size, "All acronyms should exist in Allen ontology" + assert user_aids.size == acronyms.size, 'all acronyms must exist in the ontology' else: user_aids = acronyms return user_aids class FranklinPaxinosRegions(_BrainRegions): + """Mouse Brain in Stereotaxic Coordinates (MBSC). + + Paxinos G, and Franklin KBJ (2012). The Mouse Brain in Stereotaxic Coordinates, 4th edition (Elsevier Academic Press). + """ def __init__(self): df_regions = pd.read_csv(FRANKLIN_FILE_REGIONS) # get rid of nan values, there are rows that are in Allen but are not in the Franklin Paxinos atlas @@ -400,23 +530,31 @@ def __init__(self): parent=df_regions['Parent ID'].to_numpy(), order=df_regions['structure Order'].to_numpy().astype(np.uint16)) - self.n_lr = int((len(self.id) - 1) / 2) - self._compute_mappings() - self.default_mapping = 'FranklinPaxinos' - def _compute_mappings(self): + """ + Compute lateralized and non-lateralized mappings. + + This method is called by __post_init__. + """ self.mappings = { 'FranklinPaxinos': self._mapping_from_regions_list(np.unique(np.abs(self.id)), lateralize=False), 'FranklinPaxinos-lr': np.arange(self.id.size), } + self.default_mapping = 'FranklinPaxinos' + self.n_lr = int((len(self.id) - 1) / 2) # the number of lateralized regions class BrainRegions(_BrainRegions): """ + A struct of Allen brain regions, their names, IDs, relationships and associated plot colours. + ibllib.atlas.regions.BrainRegions(brainmap='Allen') - The Allen atlas ids are kept intact but lateralized as follows: labels are duplicated - and ids multiplied by -1, with the understanding that left hemisphere regions have negative - ids. + + Notes + ----- + The Allen atlas IDs are kept intact but lateralized as follows: labels are duplicated + and IDs multiplied by -1, with the understanding that left hemisphere regions have negative + IDs. """ def __init__(self): df_regions = pd.read_csv(ALLEN_FILE_REGIONS) @@ -431,6 +569,9 @@ def __init__(self): lambda x: int(x, 16) if isinstance(x, str) else 256 ** 3 - 1)) c = np.flip(np.reshape(c.view(np.uint8), (df_regions.id.size, 4))[:, :3], 1) c[0, :] = 0 # set the void region to black + # For void assign the depth and level to avoid warnings of nan being converted to int + df_regions.loc[0, 'depth'] = 0 + df_regions.loc[0, 'graph_order'] = 0 # creates the BrainRegion instance super().__init__(id=df_regions.id.to_numpy(), name=df_regions.name.to_numpy(), @@ -439,37 +580,46 @@ def __init__(self): level=df_regions.depth.to_numpy().astype(np.uint16), parent=df_regions.parent_structure_id.to_numpy(), order=df_regions.graph_order.to_numpy().astype(np.uint16)) - # mappings are indices not ids: they range from 0 to n regions -1 - mappings = pd.read_parquet(FILE_MAPPINGS) - self.mappings = {k: mappings[k].to_numpy() for k in mappings} - self.n_lr = int((len(self.id) - 1) / 2) - self.default_mapping = 'Allen' def _compute_mappings(self): """ - Recomputes the mapping indices for all mappings - This is left mainly as a reference for adding future mappings as this take a few seconds - to execute. In production,we use the MAPPING_FILES pqt to avoid recomputing at each \ - instantiation + Recomputes the mapping indices for all mappings. + + Attempts to load mappings from the FILE_MAPPINGS file, otherwise generates from arrays of + brain IDs. In production, we use the MAPPING_FILES pqt to avoid recomputing at each + instantiation as this take a few seconds to execute. + + Currently there are 8 available mappings (Allen, Beryl, Cosmos, and Swanson), lateralized + (with suffix -lr) and non-lateralized. Each row contains the correspondence to the Allen + CCF structure tree order (i.e. index) for each mapping. + + This method is called by __post_init__. """ - beryl = np.load(Path(__file__).parent.joinpath('beryl.npy')) - cosmos = np.load(Path(__file__).parent.joinpath('cosmos.npy')) - swanson = np.load(Path(__file__).parent.joinpath('swanson_regions.npy')) - self.mappings = { - 'Allen': self._mapping_from_regions_list(np.unique(np.abs(self.id)), lateralize=False), - 'Allen-lr': np.arange(self.id.size), - 'Beryl': self._mapping_from_regions_list(beryl, lateralize=False), - 'Beryl-lr': self._mapping_from_regions_list(beryl, lateralize=True), - 'Cosmos': self._mapping_from_regions_list(cosmos, lateralize=False), - 'Cosmos-lr': self._mapping_from_regions_list(cosmos, lateralize=True), - 'Swanson': self._mapping_from_regions_list(swanson, lateralize=False), - 'Swanson-lr': self._mapping_from_regions_list(swanson, lateralize=True), - } - pd.DataFrame(self.mappings).to_parquet(FILE_MAPPINGS) + # mappings are indices not ids: they range from 0 to n regions -1 + if Path(FILE_MAPPINGS).exists(): + mappings = pd.read_parquet(FILE_MAPPINGS) + self.mappings = {k: mappings[k].to_numpy() for k in mappings} + else: + beryl = np.load(Path(__file__).parent.joinpath('beryl.npy')) + cosmos = np.load(Path(__file__).parent.joinpath('cosmos.npy')) + swanson = np.load(Path(__file__).parent.joinpath('swanson_regions.npy')) + self.mappings = { + 'Allen': self._mapping_from_regions_list(np.unique(np.abs(self.id)), lateralize=False), + 'Allen-lr': np.arange(self.id.size), + 'Beryl': self._mapping_from_regions_list(beryl, lateralize=False), + 'Beryl-lr': self._mapping_from_regions_list(beryl, lateralize=True), + 'Cosmos': self._mapping_from_regions_list(cosmos, lateralize=False), + 'Cosmos-lr': self._mapping_from_regions_list(cosmos, lateralize=True), + 'Swanson': self._mapping_from_regions_list(swanson, lateralize=False), + 'Swanson-lr': self._mapping_from_regions_list(swanson, lateralize=True), + } + pd.DataFrame(self.mappings).to_parquet(FILE_MAPPINGS) + self.default_mapping = 'Allen' + self.n_lr = int((len(self.id) - 1) / 2) # the number of lateralized regions def compute_hierarchy(self): """ - Creates a self.hierarchy attributes that is a n_levels by n_region array + Creates a self.hierarchy attribute that is an n_levels by n_region array of indices. This is useful to perform fast vectorized computations of ancestors and descendants. :return: @@ -494,21 +644,67 @@ def compute_hierarchy(self): self.hierarchy[lev, sel] = np.where(sel)[0] _mask[sel] = True - def remap(self, region_ids, source_map='Allen', target_map='Beryl'): + def propagate_down(self, acronyms, values): """ - Remap atlas regions ids from source map to target map - :param region_ids: atlas ids to map - :param source_map: map name which original region_ids are in - :param target_map: map name onto which to map + This function remaps a set of user specified acronyms and values to the + swanson map, by filling down the child nodes when higher up values are + provided. + :param acronyms: list or array of allen ids or acronyms + :param values: list or array of associated values :return: + # FIXME Why only the swanson map? Also, how is this actually related to the Swanson map? + """ + user_aids = self.parse_acronyms_argument(acronyms) + _, user_indices = ismember(user_aids, self.id) + self.compute_hierarchy() + ia, ib = ismember(self.hierarchy, user_indices) + v = np.zeros_like(ia, dtype=np.float64) * np.NaN + v[ia] = values[ib] + all_values = np.nanmedian(v, axis=0) + indices = np.where(np.any(ia, axis=0))[0] + all_values = all_values[indices] + return indices, all_values + + def remap(self, region_ids, source_map='Allen', target_map='Beryl'): """ - _, inds = ismember(region_ids, self.id[self.mappings[source_map]]) - return self.id[self.mappings[target_map][inds]] + Remap atlas regions IDs from source map to target map. + + Any NaNs in `region_ids` remain as NaN in the output array. + + Parameters + ---------- + region_ids : array_like of int + The region IDs to remap. + source_map : str + The source map name, in `self.mappings`. + target_map : str + The target map name, in `self.mappings`. + + Returns + ------- + numpy.array of int + The input IDs mapped to `target_map`. + """ + isnan = np.isnan(region_ids) + if np.sum(isnan) > 0: + # In case the user provides nans + nan_loc = np.where(isnan)[0] + _, inds = ismember(region_ids[~isnan], self.id[self.mappings[source_map]]) + mapped_ids = self.id[self.mappings[target_map][inds]].astype(float) + mapped_ids = np.insert(mapped_ids, nan_loc, np.full(nan_loc.shape, np.nan)) + else: + _, inds = ismember(region_ids, self.id[self.mappings[source_map]]) + mapped_ids = self.id[self.mappings[target_map][inds]] + + return mapped_ids def regions_from_allen_csv(): """ - Reads csv file containing the ALlen Ontology and instantiates a BrainRegions object + (DEPRECATED) Reads csv file containing the ALlen Ontology and instantiates a BrainRegions object. + + NB: Instantiate BrainRegions directly instead. + :return: BrainRegions object """ _logger.warning("ibllib.atlas.regions.regions_from_allen_csv() is deprecated. " diff --git a/ibllib/atlas/swanson_regions.npy b/ibllib/atlas/swanson_regions.npy index 92434207d..2475d392b 100644 Binary files a/ibllib/atlas/swanson_regions.npy and b/ibllib/atlas/swanson_regions.npy differ diff --git a/ibllib/ephys/ephysqc.py b/ibllib/ephys/ephysqc.py index 59f3c0a8b..e88250cb6 100644 --- a/ibllib/ephys/ephysqc.py +++ b/ibllib/ephys/ephysqc.py @@ -370,8 +370,9 @@ def _single_test(assertion, str_ok, str_ko): str_ok="PASS: Bpod", str_ko="FAILED: Bpod") try: # note: tried to depend as little as possible on the extraction code but for the valve... - behaviour = ephys_fpga.extract_behaviour_sync(rawsync, chmap=sync_map) - res = behaviour.valveOpen_times.size > 1 + bpod = ephys_fpga.get_sync_fronts(rawsync, sync_map['bpod']) + _, t_valve_open, _ = ephys_fpga._assign_events_bpod(bpod['times'], bpod['polarities']) + res = t_valve_open.size > 1 except AssertionError: res = False # check that the reward valve is actionned at least once diff --git a/ibllib/graphic.py b/ibllib/graphic.py deleted file mode 100644 index cdfafb971..000000000 --- a/ibllib/graphic.py +++ /dev/null @@ -1,21 +0,0 @@ -#!/usr/bin/env python -# -*- coding:utf-8 -*- -# @Author: Niccolò Bonacchi -# @Date: Sunday, February 3rd 2019, 11:59:56 am -import tkinter as tk -from tkinter import messagebox -import traceback -import warnings - -for line in traceback.format_stack(): - print(line.strip()) - -warnings.warn('ibllib.graphic has been deprecated. ' - 'See stack above', DeprecationWarning) - - -def popup(title, msg): - root = tk.Tk() - root.withdraw() - messagebox.showinfo(title, msg) - root.quit() diff --git a/ibllib/io/extractors/base.py b/ibllib/io/extractors/base.py index 05e320a6f..c1b46b22e 100644 --- a/ibllib/io/extractors/base.py +++ b/ibllib/io/extractors/base.py @@ -30,7 +30,8 @@ class BaseExtractor(abc.ABC): session_path = None save_names = None - default_path = Path("alf") # relative to session + var_names = None + default_path = Path('alf') # relative to session def __init__(self, session_path=None): # If session_path is None Path(session_path) will fail @@ -38,8 +39,7 @@ def __init__(self, session_path=None): def extract(self, save=False, path_out=None, **kwargs): """ - :return: numpy.ndarray or list of ndarrays, list of filenames - :rtype: dtype('float64') + :return: dict of numpy.array, list of filenames """ out = self._extract(**kwargs) files = self._save(out, path_out=path_out) if save else None @@ -91,6 +91,13 @@ def _write_to_disk(file_path, data): elif isinstance(self.save_names, str): file_paths = path_out.joinpath(self.save_names) _write_to_disk(file_paths, data) + elif isinstance(data, dict): + file_paths = [] + for var, value in data.items(): + if fn := self.save_names[self.var_names.index(var)]: + fpath = path_out.joinpath(fn) + _write_to_disk(fpath, value) + file_paths.append(fpath) else: # Should be list or tuple... assert len(data) == len(self.save_names) file_paths = [] @@ -121,7 +128,7 @@ class BaseBpodTrialsExtractor(BaseExtractor): settings = None task_collection = None - def extract(self, task_collection='raw_behavior_data', bpod_trials=None, settings=None, **kwargs): + def extract(self, bpod_trials=None, settings=None, **kwargs): """ :param: bpod_trials (optional) bpod trials from jsonable in a dictionary :param: settings (optional) bpod iblrig settings json file in a dictionary @@ -132,14 +139,14 @@ def extract(self, task_collection='raw_behavior_data', bpod_trials=None, setting """ self.bpod_trials = bpod_trials self.settings = settings - self.task_collection = task_collection + self.task_collection = kwargs.pop('task_collection', 'raw_behavior_data') if self.bpod_trials is None: self.bpod_trials = raw.load_data(self.session_path, task_collection=self.task_collection) if not self.settings: self.settings = raw.load_settings(self.session_path, task_collection=self.task_collection) if self.settings is None: self.settings = {"IBLRIG_VERSION_TAG": "100.0.0"} - elif self.settings["IBLRIG_VERSION_TAG"] == "": + elif self.settings.get("IBLRIG_VERSION_TAG", "") == "": self.settings["IBLRIG_VERSION_TAG"] = "100.0.0" return super(BaseBpodTrialsExtractor, self).extract(**kwargs) @@ -168,7 +175,9 @@ def run_extractor_classes(classes, session_path=None, **kwargs): files.extend(fil) elif fil is not None: files.append(fil) - if isinstance(cls.var_names, str): + if isinstance(out, dict): + outputs.update(out) + elif isinstance(cls.var_names, str): outputs[cls.var_names] = out else: for i, k in enumerate(cls.var_names): @@ -191,11 +200,11 @@ def _get_task_types_json_config(): return task_types -def get_task_protocol(session_path): +def get_task_protocol(session_path, task_collection='raw_behavior_data'): try: - settings = load_settings(get_session_path(session_path)) + settings = load_settings(get_session_path(session_path), task_collection=task_collection) except json.decoder.JSONDecodeError: - _logger.error(f"Can't read settings for {session_path}") + _logger.error(f'Can\'t read settings for {session_path}') return if settings: return settings.get('PYBPOD_PROTOCOL', None) @@ -218,10 +227,10 @@ def get_task_extractor_type(task_name): task_types = _get_task_types_json_config() task_type = task_types.get(task_name, None) - if task_type is None: + if task_type is None: # Try lazy matching of name task_type = next((task_types[tt] for tt in task_types if tt in task_name), None) if task_type is None: - _logger.warning(f"No extractor type found for {task_name}") + _logger.warning(f'No extractor type found for {task_name}') return task_type @@ -234,7 +243,7 @@ def get_session_extractor_type(session_path, task_collection='raw_behavior_data' """ settings = load_settings(session_path, task_collection=task_collection) if settings is None: - _logger.error(f'ABORT: No data found in "raw_behavior_data" folder {session_path}') + _logger.error(f'ABORT: No data found in "{task_collection}" folder {session_path}') return False extractor_type = get_task_extractor_type(settings['PYBPOD_PROTOCOL']) if extractor_type: @@ -243,13 +252,13 @@ def get_session_extractor_type(session_path, task_collection='raw_behavior_data' return False -def get_pipeline(session_path): +def get_pipeline(session_path, task_collection='raw_behavior_data'): """ Get the pre-processing pipeline name from a session path :param session_path: :return: """ - stype = get_session_extractor_type(session_path) + stype = get_session_extractor_type(session_path, task_collection=task_collection) return _get_pipeline_from_task_type(stype) @@ -267,3 +276,80 @@ def _get_pipeline_from_task_type(stype): return 'widefield' else: return stype + + +def _get_task_extractor_map(): + """ + Load the task protocol extractor map. + + Returns + ------- + dict(str, str) + A map of task protocol to Bpod trials extractor class. + """ + FILENAME = 'task_extractor_map.json' + with open(Path(__file__).parent.joinpath(FILENAME)) as fp: + task_extractors = json.load(fp) + try: + # look if there are custom extractor types in the personal projects repo + import projects.base + custom_extractors = Path(projects.base.__file__).parent.joinpath(FILENAME) + with open(custom_extractors) as fp: + custom_task_types = json.load(fp) + task_extractors.update(custom_task_types) + except (ModuleNotFoundError, FileNotFoundError): + pass + return task_extractors + + +def get_bpod_extractor_class(session_path, task_collection='raw_behavior_data'): + """ + Get the Bpod trials extractor class associated with a given Bpod session. + + Parameters + ---------- + session_path : str, pathlib.Path + The session path containing Bpod behaviour data. + task_collection : str + The session_path subfolder containing the Bpod settings file. + + Returns + ------- + str + The extractor class name. + """ + # Attempt to load settings files + settings = load_settings(session_path, task_collection=task_collection) + if settings is None: + raise ValueError(f'No data found in "{task_collection}" folder {session_path}') + # Attempt to get task protocol + protocol = settings.get('PYBPOD_PROTOCOL') + if not protocol: + raise ValueError(f'No task protocol found in {session_path/task_collection}') + return protocol2extractor(protocol) + + +def protocol2extractor(protocol): + """ + Get the Bpod trials extractor class associated with a given Bpod task protocol. + + The Bpod task protocol can be found in the 'PYBPOD_PROTOCOL' field of _iblrig_taskSettings.raw.json. + + Parameters + ---------- + protocol : str + A Bpod task protocol name. + + Returns + ------- + str + The extractor class name. + """ + # Attempt to get extractor class from protocol + extractor_map = _get_task_extractor_map() + extractor = extractor_map.get(protocol, None) + if extractor is None: # Try lazy matching of name + extractor = next((extractor_map[tt] for tt in extractor_map if tt in protocol), None) + if extractor is None: + raise ValueError(f'No extractor associated with "{protocol}"') + return extractor diff --git a/ibllib/io/extractors/biased_trials.py b/ibllib/io/extractors/biased_trials.py index 2d3e92e8e..c7c16d6c0 100644 --- a/ibllib/io/extractors/biased_trials.py +++ b/ibllib/io/extractors/biased_trials.py @@ -13,6 +13,8 @@ StimOffTriggerTimes, StimFreezeTriggerTimes, ErrorCueTriggerTimes, PhasePosQuiescence) from ibllib.io.extractors.training_wheel import Wheel +__all__ = ['extract_all', 'BiasedTrials', 'EphysTrials'] + class ContrastLR(BaseBpodTrialsExtractor): """ @@ -121,7 +123,8 @@ class TrialsTableEphys(BaseBpodTrialsExtractor): wheel_timestamps, wheel_position, wheel_moves_intervals, wheel_moves_peak_amplitude """ save_names = ('_ibl_trials.table.pqt', None, None, '_ibl_wheel.timestamps.npy', '_ibl_wheel.position.npy', - '_ibl_wheelMoves.intervals.npy', '_ibl_wheelMoves.peakAmplitude.npy', None, None, None, None, None) + '_ibl_wheelMoves.intervals.npy', '_ibl_wheelMoves.peakAmplitude.npy', None, + None, None, None, '_ibl_trials.quiescencePeriod.npy') var_names = ('table', 'stimOff_times', 'stimFreeze_times', 'wheel_timestamps', 'wheel_position', 'wheel_moves_intervals', 'wheel_moves_peak_amplitude', 'peakVelocity_times', 'is_final_movement', 'phase', 'position', 'quiescence') @@ -138,6 +141,54 @@ def _extract(self, extractor_classes=None, **kwargs): return table.to_df(), *(out.pop(x) for x in self.var_names if x != 'table') +class BiasedTrials(BaseBpodTrialsExtractor): + """ + Same as training_trials.TrainingTrials except... + - there is no RepNum + - ContrastLR is extracted differently + - IncludedTrials is only extracted for 5.0.0 or greater + """ + save_names = ('_ibl_trials.goCueTrigger_times.npy', '_ibl_trials.stimOnTrigger_times.npy', None, None, None, None, + '_ibl_trials.table.pqt', None, None, '_ibl_wheel.timestamps.npy', '_ibl_wheel.position.npy', + '_ibl_wheelMoves.intervals.npy', '_ibl_wheelMoves.peakAmplitude.npy', None, None, '_ibl_trials.included.npy', + None, None, '_ibl_trials.quiescencePeriod.npy') + var_names = ('goCueTrigger_times', 'stimOnTrigger_times', 'itiIn_times', 'stimOffTrigger_times', 'stimFreezeTrigger_times', + 'errorCueTrigger_times', 'table', 'stimOff_times', 'stimFreeze_times', 'wheel_timestamps', 'wheel_position', + 'wheel_moves_intervals', 'wheel_moves_peak_amplitude', 'peakVelocity_times', 'is_final_movement', 'included', + 'phase', 'position', 'quiescence') + + def _extract(self, extractor_classes=None, **kwargs): + base = [GoCueTriggerTimes, StimOnTriggerTimes, ItiInTimes, StimOffTriggerTimes, StimFreezeTriggerTimes, + ErrorCueTriggerTimes, TrialsTableBiased, IncludedTrials, PhasePosQuiescence] + # Exclude from trials table + out, _ = run_extractor_classes(base, session_path=self.session_path, bpod_trials=self.bpod_trials, settings=self.settings, + save=False, task_collection=self.task_collection) + return tuple(out.pop(x) for x in self.var_names) + + +class EphysTrials(BaseBpodTrialsExtractor): + """ + Same as BiasedTrials except... + - Contrast, phase, position, probabilityLeft and quiescence is extracted differently + """ + save_names = ('_ibl_trials.goCueTrigger_times.npy', '_ibl_trials.stimOnTrigger_times.npy', None, None, None, None, + '_ibl_trials.table.pqt', None, None, '_ibl_wheel.timestamps.npy', '_ibl_wheel.position.npy', + '_ibl_wheelMoves.intervals.npy', '_ibl_wheelMoves.peakAmplitude.npy', None, None, + '_ibl_trials.included.npy', None, None, '_ibl_trials.quiescencePeriod.npy') + var_names = ('goCueTrigger_times', 'stimOnTrigger_times', 'itiIn_times', 'stimOffTrigger_times', 'stimFreezeTrigger_times', + 'errorCueTrigger_times', 'table', 'stimOff_times', 'stimFreeze_times', 'wheel_timestamps', 'wheel_position', + 'wheel_moves_intervals', 'wheel_moves_peak_amplitude', 'peakVelocity_times', 'is_final_movement', 'included', + 'phase', 'position', 'quiescence') + + def _extract(self, extractor_classes=None, **kwargs): + base = [GoCueTriggerTimes, StimOnTriggerTimes, ItiInTimes, StimOffTriggerTimes, StimFreezeTriggerTimes, + ErrorCueTriggerTimes, TrialsTableEphys, IncludedTrials, PhasePosQuiescence] + # Exclude from trials table + out, _ = run_extractor_classes(base, session_path=self.session_path, bpod_trials=self.bpod_trials, settings=self.settings, + save=False, task_collection=self.task_collection) + return tuple(out.pop(x) for x in self.var_names) + + def extract_all(session_path, save=False, bpod_trials=False, settings=False, extra_classes=None, task_collection='raw_behavior_data', save_path=None): """ @@ -163,19 +214,15 @@ def extract_all(session_path, save=False, bpod_trials=False, settings=False, ext if settings['IBLRIG_VERSION_TAG'] == '': settings['IBLRIG_VERSION_TAG'] = '100.0.0' - base = [GoCueTriggerTimes] # Version check if parse_version(settings['IBLRIG_VERSION_TAG']) >= parse_version('5.0.0'): # We now extract a single trials table - base.extend([ - StimOnTriggerTimes, ItiInTimes, StimOffTriggerTimes, StimFreezeTriggerTimes, ErrorCueTriggerTimes, - TrialsTableBiased, IncludedTrials, PhasePosQuiescence - ]) + base = [BiasedTrials] else: - base.extend([ - Intervals, Wheel, FeedbackType, ContrastLR, ProbabilityLeft, Choice, + base = [ + GoCueTriggerTimes, Intervals, Wheel, FeedbackType, ContrastLR, ProbabilityLeft, Choice, StimOnTimes_deprecated, RewardVolume, FeedbackTimes, ResponseTimes, GoCueTimes, PhasePosQuiescence - ]) + ] if extra_classes: base.extend(extra_classes) diff --git a/ibllib/io/extractors/bpod_trials.py b/ibllib/io/extractors/bpod_trials.py index 1b6e978e2..950797b88 100644 --- a/ibllib/io/extractors/bpod_trials.py +++ b/ibllib/io/extractors/bpod_trials.py @@ -3,30 +3,62 @@ i.e. habituation, training or biased. """ import logging +import importlib from collections import OrderedDict +import warnings from pkg_resources import parse_version from ibllib.io.extractors import habituation_trials, training_trials, biased_trials, opto_trials -import ibllib.io.extractors.base +from ibllib.io.extractors.base import get_bpod_extractor_class, protocol2extractor +from ibllib.io.extractors.habituation_trials import HabituationTrials +from ibllib.io.extractors.training_trials import TrainingTrials +from ibllib.io.extractors.biased_trials import BiasedTrials, EphysTrials +from ibllib.io.extractors.base import get_session_extractor_type, BaseBpodTrialsExtractor import ibllib.io.raw_data_loaders as rawio _logger = logging.getLogger(__name__) -def extract_all(session_path, save=True, bpod_trials=None, settings=None, task_collection='raw_behavior_data', save_path=None): +def extract_all(session_path, save=True, bpod_trials=None, settings=None, + task_collection='raw_behavior_data', extractor_type=None, save_path=None): """ Extracts a training session from its path. NB: Wheel must be extracted first in order to extract trials.firstMovement_times. - :param session_path: the path to the session to be extracted - :param save: if true a subset of the extracted data are saved as ALF - :param bpod_trials: list of Bpod trial data - :param settings: the Bpod session settings - :return: trials: Bunch/dict of trials - :return: wheel: Bunch/dict of wheel positions - :return: out_Files: list of output files + + Parameters + ---------- + session_path : str, pathlib.Path + The path to the session to be extracted. + task_collection : str + The subfolder containing the raw Bpod data files. + save : bool + If true, save the output files to save_path. + bpod_trials : list of dict + The loaded Bpod trial data. If None, attempts to load _iblrig_taskData.raw from + raw_task_collection. + settings : dict + The loaded Bpod settings. If None, attempts to load _iblrig_taskSettings.raw from + raw_task_collection. + extractor_type : str + The type of extraction. Supported types are {'ephys', 'biased', 'biased_opto', + 'ephys_biased_opto', 'training', 'ephys_training', 'habituation'}. If None, extractor type + determined from settings. + save_path : str, pathlib.Path + The location of the output files if save is true. Defaults to /alf. + + Returns + ------- + dict + The extracted trials data. + dict + The extracted wheel data. + list of pathlib.Path + The output files if save is true. """ - extractor_type = ibllib.io.extractors.base.get_session_extractor_type(session_path, task_collection=task_collection) - _logger.info(f"Extracting {session_path} as {extractor_type}") + warnings.warn('`extract_all` functions soon to be deprecated, use `bpod_trials.get_bpod_extractor` instead', FutureWarning) + if not extractor_type: + extractor_type = get_session_extractor_type(session_path, task_collection=task_collection) + _logger.info(f'Extracting {session_path} as {extractor_type}') bpod_trials = bpod_trials or rawio.load_data(session_path, task_collection=task_collection) settings = settings or rawio.load_settings(session_path, task_collection=task_collection) _logger.info(f'{extractor_type} session on {settings["PYBPOD_BOARD"]}') @@ -58,13 +90,56 @@ def extract_all(session_path, save=True, bpod_trials=None, settings=None, task_c elif extractor_type == 'habituation': if settings['IBLRIG_VERSION_TAG'] and \ parse_version(settings['IBLRIG_VERSION_TAG']) <= parse_version('5.0.0'): - _logger.warning("No extraction of legacy habituation sessions") + _logger.warning('No extraction of legacy habituation sessions') return None, None, None trials, files_trials = habituation_trials.extract_all(session_path, bpod_trials=bpod_trials, settings=settings, save=save, task_collection=task_collection, save_path=save_path) wheel = None files_wheel = [] else: - raise ValueError(f"No extractor for task {extractor_type}") + raise ValueError(f'No extractor for task {extractor_type}') _logger.info('session extracted \n') # timing info in log return trials, wheel, (files_trials + files_wheel) if save else None + + +def get_bpod_extractor(session_path, protocol=None, task_collection='raw_behavior_data') -> BaseBpodTrialsExtractor: + """ + Returns an extractor for a given session. + + Parameters + ---------- + session_path : str, pathlib.Path + The path to the session to be extracted. + protocol : str, optional + The protocol name, otherwise uses the PYBPOD_PROTOCOL key in iblrig task settings files. + task_collection : str + The folder within the session that contains the raw task data. + + Returns + ------- + BaseBpodTrialsExtractor + An instance of the task extractor class, instantiated with the session path. + """ + builtins = { + 'HabituationTrials': HabituationTrials, + 'TrainingTrials': TrainingTrials, + 'BiasedTrials': BiasedTrials, + 'EphysTrials': EphysTrials + } + if protocol: + class_name = protocol2extractor(protocol) + else: + class_name = get_bpod_extractor_class(session_path, task_collection=task_collection) + if class_name in builtins: + return builtins[class_name](session_path) + + # look if there are custom extractor types in the personal projects repo + if not class_name.startswith('projects.'): + class_name = 'projects.' + class_name + module, class_name = class_name.rsplit('.', 1) + mdl = importlib.import_module(module) + extractor_class = getattr(mdl, class_name, None) + if extractor_class: + return extractor_class(session_path) + else: + raise ValueError(f'extractor {class_name} not found') diff --git a/ibllib/io/extractors/camera.py b/ibllib/io/extractors/camera.py index 73b929b7e..f3e5dbd1d 100644 --- a/ibllib/io/extractors/camera.py +++ b/ibllib/io/extractors/camera.py @@ -1,5 +1,5 @@ -""" Camera extractor functions -This module handles extraction of camera timestamps for both Bpod and FPGA. +""" Camera extractor functions. +This module handles extraction of camera timestamps for both Bpod and DAQ. """ import logging from functools import partial @@ -35,12 +35,11 @@ def extract_camera_sync(sync, chmap=None): :return: dictionary containing camera timestamps """ assert chmap - sr = get_sync_fronts(sync, chmap['right_camera']) - sl = get_sync_fronts(sync, chmap['left_camera']) - sb = get_sync_fronts(sync, chmap['body_camera']) - return {'right': sr.times[::2], - 'left': sl.times[::2], - 'body': sb.times[::2]} + times = {} + for k in filter(lambda x: x.endswith('_camera'), chmap): + label, _ = k.rsplit('_', 1) + times[label] = get_sync_fronts(sync, chmap[k]).times[::2] + return times def get_video_length(video_path): @@ -58,6 +57,9 @@ def get_video_length(video_path): class CameraTimestampsFPGA(BaseExtractor): + """ + Extractor for videos using DAQ sync and channel map. + """ def __init__(self, label, session_path=None): super().__init__(session_path) @@ -70,24 +72,39 @@ def __init__(self, label, session_path=None): def __del__(self): _logger.setLevel(self._log_level) - def _extract(self, sync=None, chmap=None, video_path=None, + def _extract(self, sync=None, chmap=None, video_path=None, sync_label='audio', display=False, extrapolate_missing=True, **kwargs): """ - The raw timestamps are taken from the FPGA. These are the times of the camera's frame TTLs. - If the pin state file exists, these timestamps are aligned to the video frames using the - audio TTLs. Frames missing from the embedded frame count are removed from the timestamps - array. + The raw timestamps are taken from the DAQ. These are the times of the camera's frame TTLs. + If the pin state file exists, these timestamps are aligned to the video frames using + task TTLs (typically the audio TTLs). Frames missing from the embedded frame count are + removed from the timestamps array. If the pin state file does not exist, the left and right camera timestamps may be aligned using the wheel data. - :param sync: dictionary 'times', 'polarities' of fronts detected on sync trace. - :param chmap: dictionary containing channel indices. Default to constant. - :param video_path: an optional path for fetching the number of frames. If None, - the video is loaded from the session path. If an int is provided this is taken to be - the total number of frames. - :param display: if True, the audio and GPIO fronts are plotted. - :param extrapolate_missing: if True, any missing timestamps at the beginning and end of - the session are extrapolated based on the median frame rate, otherwise they will be NaNs. - :return: a numpy array of camera timestamps + + Parameters + ---------- + sync : dict + Dictionary 'times', 'polarities' of fronts detected on sync trace. + chmap : dict + Dictionary containing channel indices. Default to constant. + video_path : str, pathlib.Path, int + An optional path for fetching the number of frames. If None, the video is loaded from + the session path. If an int is provided this is taken to be the total number of frames. + sync_label : str + The sync label of the channel that's wired to the GPIO for synchronising the times. + display : bool + If true, the TTL and GPIO fronts are plotted. + extrapolate_missing : bool + If true, any missing timestamps at the beginning and end of the session are + extrapolated based on the median frame rate, otherwise they will be NaNs. + **kwargs + Extra keyword arguments (unused). + + Returns + ------- + numpy.array + The extracted camera timestamps. """ fpga_times = extract_camera_sync(sync=sync, chmap=chmap) count, (*_, gpio) = raw.load_embedded_frame_data(self.session_path, self.label) @@ -100,18 +117,18 @@ def _extract(self, sync=None, chmap=None, video_path=None, length = (video_path if isinstance(video_path, int) else get_video_length(video_path)) _logger.debug(f'Number of video frames = {length}') - if gpio is not None and gpio['indices'].size > 1: - _logger.info('Aligning to audio TTLs') - # Extract audio TTLs - audio = get_sync_fronts(sync, chmap['audio']) + if gpio is not None and gpio['indices'].size > 1 and sync_label is not None: + _logger.info(f'Aligning to {sync_label} TTLs') + # Extract sync TTLs + ttl = get_sync_fronts(sync, chmap[sync_label]) _, ts = raw.load_camera_ssv_times(self.session_path, self.label) try: """ - NB: Some of the audio TTLs occur very close together, and are therefore not + NB: Some of the sync TTLs occur very close together, and are therefore not reflected in the pin state. This function removes those. Also converts frame - times to FPGA time. + times to DAQ time. """ - gpio, audio, ts = groom_pin_state(gpio, audio, ts, display=display) + gpio, ttl, ts = groom_pin_state(gpio, ttl, ts, display=display) """ The length of the count and pin state are regularly longer than the length of the video file. Here we assert that the video is either shorter or the same @@ -123,16 +140,15 @@ def _extract(self, sync=None, chmap=None, video_path=None, count = count[:length] else: assert length == count.size, 'fewer counts than frames' - raw_ts = fpga_times[self.label] assert raw_ts.shape[0] > 0, 'no timestamps found in channel indicated for ' \ f'{self.label} camera' - return align_with_audio(raw_ts, audio, gpio, count, - display=display, - extrapolate_missing=extrapolate_missing) + return align_with_gpio(raw_ts, ttl, gpio, count, + display=display, + extrapolate_missing=extrapolate_missing) except AssertionError as ex: - _logger.critical('Failed to extract using audio: %s', ex) + _logger.critical('Failed to extract using %s: %s', sync_label, ex) - # If you reach here extracting using audio TTLs was not possible + # If you reach here extracting using sync TTLs was not possible _logger.warning('Alignment by wheel data not yet implemented') if length < raw_ts.size: df = raw_ts.size - length @@ -197,14 +213,14 @@ def _extract(self, video_path=None, display=False, extrapolate_missing=True, **k """ The raw timestamps are taken from the Bpod. These are the times of the camera's frame TTLs. If the pin state file exists, these timestamps are aligned to the video frames using the - audio TTLs. Frames missing from the embedded frame count are removed from the timestamps + sync TTLs. Frames missing from the embedded frame count are removed from the timestamps array. If the pin state file does not exist, the left camera timestamps may be aligned using the wheel data. :param video_path: an optional path for fetching the number of frames. If None, the video is loaded from the session path. If an int is provided this is taken to be the total number of frames. - :param display: if True, the audio and GPIO fronts are plotted. + :param display: if True, the TTL and GPIO fronts are plotted. :param extrapolate_missing: if True, any missing timestamps at the beginning and end of the session are extrapolated based on the median frame rate, otherwise they will be NaNs. :return: a numpy array of camera timestamps @@ -221,15 +237,15 @@ def _extract(self, video_path=None, display=False, extrapolate_missing=True, **k # Check if the GPIO is usable for extraction. GPIO is None if the file does not exist, # is empty, or contains only one value (i.e. doesn't change) if gpio is not None and gpio['indices'].size > 1: - _logger.info('Aligning to audio TTLs') - task_collection = kwargs.get('task_collection', 'raw_behavior_data') + _logger.info('Aligning to sync TTLs') # Extract audio TTLs - _, audio = raw.load_bpod_fronts(self.session_path, data=self.bpod_trials, task_collection=task_collection) + _, audio = raw.load_bpod_fronts(self.session_path, data=self.bpod_trials, + task_collection=self.task_collection) _, ts = raw.load_camera_ssv_times(self.session_path, 'left') """ - There are many audio TTLs that are for some reason missed by the GPIO. Conversely + There are many sync TTLs that are for some reason missed by the GPIO. Conversely the last GPIO doesn't often correspond to any audio TTL. These will be removed. - The drift appears to be less severe than the FPGA, so when assigning TTLs we'll take + The drift appears to be less severe than the DAQ, so when assigning TTLs we'll take the nearest TTL within 500ms. The go cue TTLs comprise two short pulses ~3ms apart. We will fuse any TTLs less than 5ms apart to make assignment more accurate. """ @@ -241,8 +257,8 @@ def _extract(self, video_path=None, display=False, extrapolate_missing=True, **k else: assert length == count.size, 'fewer counts than frames' - return align_with_audio(raw_ts, audio, gpio, count, - extrapolate_missing, display=display) + return align_with_gpio(raw_ts, audio, gpio, count, + extrapolate_missing, display=display) except AssertionError as ex: _logger.critical('Failed to extract using audio: %s', ex) @@ -333,33 +349,43 @@ def _times_from_bpod(self): return frame_times -def align_with_audio(timestamps, audio, pin_state, count, - extrapolate_missing=True, display=False): +def align_with_gpio(timestamps, ttl, pin_state, count, extrapolate_missing=True, display=False): """ - Groom the raw FPGA or Bpod camera timestamps using the frame embedded audio TTLs and frame - counter. - :param timestamps: An array of raw FPGA or Bpod camera timestamps - :param audio: An array of FPGA or Bpod audio TTL times - :param pin_state: An array of camera pin states - :param count: An array of frame numbers - :param extrapolate_missing: If true and the number of timestamps is fewer than the number of - frame counts, the remaining timestamps are extrapolated based on the frame rate, otherwise - they are NaNs - :param display: Plot the resulting timestamps - :return: The corrected frame timestamps + Groom the raw DAQ or Bpod camera timestamps using the frame embedded GPIO and frame counter. + + Parameters + ---------- + timestamps : numpy.array + An array of raw DAQ or Bpod camera timestamps. + ttl : dict + A dictionary of DAQ sync TTLs, with keys {'times', 'polarities'}. + pin_state : dict + A dictionary containing GPIO pin state values, with keys {'indices', 'polarities'}. + count : numpy.array + An array of frame numbers. + extrapolate_missing : bool + If true and the number of timestamps is fewer than the number of frame counts, the + remaining timestamps are extrapolated based on the frame rate, otherwise they are NaNs. + display : bool + Plot the resulting timestamps. + + Returns + ------- + numpy.array + The corrected frame timestamps. """ # Some assertions made on the raw data # assert count.size == pin_state.size, 'frame count and pin state size mismatch' assert all(np.diff(count) > 0), 'frame count not strictly increasing' - assert all(np.diff(timestamps) > 0), 'FPGA/Bpod camera times not strictly increasing' - same_n_ttl = pin_state['times'].size == audio['times'].size - assert same_n_ttl, 'more audio TTLs detected on camera than TTLs sent' + assert all(np.diff(timestamps) > 0), 'DAQ/Bpod camera times not strictly increasing' + same_n_ttl = pin_state['times'].size == ttl['times'].size + assert same_n_ttl, 'more ttl TTLs detected on camera than TTLs sent' - """Here we will ensure that the FPGA camera times match the number of video frames in + """Here we will ensure that the DAQ camera times match the number of video frames in length. We will make the following assumptions: - 1. The number of FPGA camera times is equal to or greater than the number of video frames. - 2. No TTLs were missed between the camera and FPGA. + 1. The number of DAQ camera times is equal to or greater than the number of video frames. + 2. No TTLs were missed between the camera and DAQ. 3. No pin states were missed by Bonsai. 4 No pixel count data was missed by Bonsai. @@ -377,23 +403,22 @@ def align_with_audio(timestamps, audio, pin_state, count, """ # Align on first pin state change first_uptick = pin_state['indices'][0] - first_ttl = np.searchsorted(timestamps, audio['times'][0]) - """Here we find up to which index in the FPGA times we discard by taking the difference - between the index of the first pin state change (when the audio TTL was reported by the - camera) and the index of the first audio TTL in FPGA time. We subtract the difference + first_ttl = np.searchsorted(timestamps, ttl['times'][0]) + """Here we find up to which index in the DAQ times we discard by taking the difference + between the index of the first pin state change (when the sync TTL was reported by the + camera) and the index of the first sync TTL in DAQ time. We subtract the difference between the frame count at the first pin state change and the index to account for any video frames that were not saved during this period (we will remove those from the - camera FPGA times later). + camera DAQ times later). """ - # Minus any frames that were dropped between the start of frame acquisition and the - # first TTL + # Minus any frames that were dropped between the start of frame acquisition and the first TTL start = first_ttl - first_uptick - (count[first_uptick] - first_uptick) # Get approximate frame rate for extrapolating timestamps (if required) frate = round(1 / np.nanmedian(np.diff(timestamps))) if start < 0: n_missing = abs(start) - _logger.warning(f'{n_missing} missing FPGA/Bpod timestamp(s) at start; ' + _logger.warning(f'{n_missing} missing DAQ/Bpod timestamp(s) at start; ' f'{"extrapolating" if extrapolate_missing else "prepending nans"}') to_app = (timestamps[0] - (np.arange(n_missing, 0, -1) + 1) / frate if extrapolate_missing @@ -404,15 +429,13 @@ def align_with_audio(timestamps, audio, pin_state, count, # Remove the extraneous timestamps from the beginning and end end = count[-1] + 1 + start ts = timestamps[start:end] - n_missing = count[-1] - ts.size + 1 - if n_missing > 0: - # if (n_missing := count[-1] - ts.size + 1) > 0: # py3.8 + if (n_missing := count[-1] - ts.size + 1) > 0: """ - For ephys sessions there may be fewer FPGA times than frame counts if SpikeGLX is turned - off before the video acquisition workflow. For Bpod this always occurs because Bpod + For ephys sessions there may be fewer DAQ times than frame counts if DAQ acquisition is + turned off before the video acquisition workflow. For Bpod this always occurs because Bpod finishes before the camera workflow. For Bpod the times are already extrapolated for these late frames.""" - _logger.warning(f'{n_missing} fewer FPGA/Bpod timestamps than frame counts; ' + _logger.warning(f'{n_missing} fewer DAQ/Bpod timestamps than frame counts; ' f'{"extrapolating" if extrapolate_missing else "appending nans"}') to_app = ((np.arange(n_missing, ) + 1) / frate + ts[-1] if extrapolate_missing @@ -423,8 +446,8 @@ def align_with_audio(timestamps, audio, pin_state, count, # Remove the rest of the dropped frames ts = ts[count] - assert np.searchsorted(ts, audio['times'][0]) == first_uptick,\ - 'time of first audio TTL doesn\'t match after alignment' + assert np.searchsorted(ts, ttl['times'][0]) == first_uptick, \ + 'time of first sync TTL doesn\'t match after alignment' if ts.size != count.size: _logger.error('number of timestamps and frames don\'t match after alignment') @@ -434,9 +457,9 @@ def align_with_audio(timestamps, audio, pin_state, count, y = within_ranges(np.arange(ts.size), pin_state['indices'].reshape(-1, 2)).astype(float) y *= 1e-5 # For scale when zoomed in axes.plot(ts, y, marker='d', color='blue', drawstyle='steps-pre', label='GPIO') - axes.plot(ts, np.zeros_like(ts), 'kx', label='FPGA timestamps') - vertical_lines(audio['times'], ymin=0, ymax=1e-5, - color='r', linestyle=':', ax=axes, label='audio TTL') + axes.plot(ts, np.zeros_like(ts), 'kx', label='DAQ timestamps') + vertical_lines(ttl['times'], ymin=0, ymax=1e-5, + color='r', linestyle=':', ax=axes, label='sync TTL') plt.legend() return ts @@ -451,69 +474,101 @@ def attribute_times(arr, events, tol=.1, injective=True, take='first'): second array, the absolute difference is taken and the index of either the first sufficiently close value, or simply the closest one, is assigned. - If injective is True, once a value has been assigned, to a value it can't be assigned to + If injective is True, once a value has been assigned to an event it can't be assigned to another. In other words there is a one-to-one mapping between the two arrays. - :param arr: An array of event times to attribute to those in `events` - :param events: An array of event times considered a subset of `arr` - :param tol: The max absolute difference between values in order to be considered a match - :param injective: If true, once a value has been assigned it will not be assigned again - :param take: If 'first' the first value within tolerance is assigned; if 'nearest' the - closest value is assigned - :returns Numpy array the same length as `values` + Parameters + ---------- + arr : numpy.array + An array of event times to attribute to those in `events`. + events : numpy.array + An array of event times considered a subset of `arr`. + tol : float + The max absolute difference between values in order to be considered a match. + injective : bool + If true, once a value has been assigned it will not be assigned again. + take : {'first', 'nearest', 'after'} + If 'first' the first value within tolerance is assigned; if 'nearest' the + closest value is assigned; if 'after' assign the first event after. + + Returns + ------- + numpy.array + An array the same length as `events`. """ - take = take.lower() - if take not in ('first', 'nearest'): - raise ValueError('Parameter `take` must be either "first" or "nearest"') + if (take := take.lower()) not in ('first', 'nearest', 'after'): + raise ValueError('Parameter `take` must be either "first", "nearest", or "after"') stack = np.ma.masked_invalid(arr, copy=False) stack.fill_value = np.inf assigned = np.full(events.shape, -1, dtype=int) # Initialize output array + min_tol = 0 if take == 'after' else -tol for i, x in enumerate(events): - dx = np.abs(stack.filled() - x) - if dx.min() < tol: # is any value within tolerance - idx = np.where(dx < tol)[0][0] if take == 'first' else dx.argmin() + dx = stack.filled() - x + candidates = np.logical_and(min_tol < dx, dx < tol) + if any(candidates): # is any value within tolerance + idx = np.abs(dx).argmin() if take == 'nearest' else np.where(candidates)[0][0] assigned[i] = idx stack.mask[idx] = injective # If one-to-one, remove the assigned value return assigned -def groom_pin_state(gpio, audio, ts, tolerance=2., display=False, take='first', min_diff=0.): +def groom_pin_state(gpio, ttl, ts, tolerance=2., display=False, take='first', min_diff=0.): """ - Align the GPIO pin state to the FPGA audio TTLs. Any audio TTLs not reflected in the pin - state are removed from the dict and the times of the detected fronts are converted to FPGA - time. At the end of this the number of GPIO fronts should equal the number of audio fronts. + Align the GPIO pin state to the DAQ sync TTLs. Any sync TTLs not reflected in the pin + state are removed from the dict and the times of the detected fronts are converted to DAQ + time. At the end of this the number of GPIO fronts should equal the number of TTLs. Note: - - This function is ultra safe: we probably don't need assign all the ups and down fronts + - This function is ultra safe: we probably don't need assign all the ups and down fronts. separately and could potentially even align the timestamps without removing the missed fronts - - The input gpio and audio dicts may be modified by this function + - The input gpio and TTL dicts may be modified by this function. - For training sessions the frame rate is only 30Hz and the TTLs tend to be broken up by small gaps. Setting the min_diff to 5ms helps the timestamp assignment accuracy. - :param gpio: array of GPIO pin state values - :param audio: dict of FPGA audio TTLs (see ibllib.io.extractors.ephys_fpga._get_sync_fronts) - :param ts: camera frame times - :param tolerance: two pulses need to be within this many seconds to be considered related - :param take: If 'first' the first value within tolerance is assigned; if 'nearest' the - closest value is assigned - :param display: If true, the resulting timestamps are plotted against the raw audio signal - :param min_diff: Audio TTL fronts less than min_diff seconds apart will be removed - :returns: dict of GPIO FPGA front indices, polarities and FPGA aligned times - :returns: audio times and polarities sans the TTLs not detected in the frame data - :returns: frame times in FPGA time + + Parameters + ---------- + gpio : dict + A dictionary containing GPIO pin state values, with keys {'indices', 'polarities'}. + ttl : dict + A dictionary of DAQ sync TTLs, with keys {'times', 'polarities'}. + ts : numpy.array + The camera frame times (the camera frame TTLs acquired by the main DAQ). + tolerance : float + Two pulses need to be within this many seconds to be considered related. + display : bool + If true, the resulting timestamps are plotted against the raw audio signal. + take : {'first', 'nearest'} + If 'first' the first value within tolerance is assigned; if 'nearest' the + closest value is assigned. + min_diff : float + Sync TTL fronts less than min_diff seconds apart will be removed. + + Returns + ------- + dict + Dictionary of GPIO DAQ front indices, polarities and DAQ aligned times. + dict + Sync TTL times and polarities sans the TTLs not detected in the frame data. + numpy.array + Frame times in DAQ time. + + See Also + -------- + ibllib.io.extractors.ephys_fpga._get_sync_fronts """ # Check that the dimensions match if np.any(gpio['indices'] >= ts.size): _logger.warning('GPIO events occurring beyond timestamps array length') keep = gpio['indices'] < ts.size gpio = {k: gpio[k][keep] for k, v in gpio.items()} - assert audio and audio['times'].size > 0, 'no audio TTLs for session' - assert audio['times'].size == audio['polarities'].size, 'audio data dimension mismatch' + assert ttl and ttl['times'].size > 0, 'no sync TTLs for session' + assert ttl['times'].size == ttl['polarities'].size, 'sync TTL data dimension mismatch' # make sure that there are no 2 consecutive fall or consecutive rise events - assert np.all(np.abs(np.diff(audio['polarities'])) == 2), 'consecutive high/low audio events' + assert np.all(np.abs(np.diff(ttl['polarities'])) == 2), 'consecutive high/low sync TTL events' # make sure first TTL is high - assert audio['polarities'][0] == 1 - # make sure audio times in order - assert np.all(np.diff(audio['times']) > 0) + assert ttl['polarities'][0] == 1 + # make sure ttl times in order + assert np.all(np.diff(ttl['times']) > 0) # make sure raw timestamps increase assert np.all(np.diff(ts) > 0), 'timestamps must strictly increase' # make sure there are state changes @@ -521,22 +576,22 @@ def groom_pin_state(gpio, audio, ts, tolerance=2., display=False, take='first', # # make sure first GPIO state is high assert gpio['polarities'][0] == 1 """ - Some audio TTLs appear to be so short that they are not recorded by the camera. These can + Some sync TTLs appear to be so short that they are not recorded by the camera. These can be as short as a few microseconds. Applying a cutoff based on framerate was unsuccessful. - Assigning each audio TTL to each pin state change is not easy because some onsets occur very + Assigning each sync TTL to each pin state change is not easy because some onsets occur very close together (sometimes < 70ms), on the order of the delay between TTL and frame time. - Also, the two clocks have some degree of drift, so the delay between audio TTL and pin state + Also, the two clocks have some degree of drift, so the delay between sync TTL and pin state change may be zero or even negative. - Here we split the events into audio onsets (lo->hi) and audio offsets (hi->lo). For each - uptick in the GPIO pin state, we take the first audio onset time that was within 100ms of it. - We ensure that each audio TTL is assigned only once, so a TTL that is closer to frame 3 than + Here we split the events into sync TTL onsets (lo->hi) and TTL offsets (hi->lo). For each + uptick in the GPIO pin state, we take the first TTL onset time that was within 100ms of it. + We ensure that each sync TTL is assigned only once, so a TTL that is closer to frame 3 than frame 1 may still be assigned to frame 1. """ ifronts = gpio['indices'] # The pin state flips - audio_times = audio['times'] - if ifronts.size != audio['times'].size: - _logger.warning('more audio TTLs than GPIO state changes, assigning timestamps') + sync_times = ttl['times'] + if ifronts.size != ttl['times'].size: + _logger.warning('more sync TTLs than GPIO state changes, assigning timestamps') to_remove = np.zeros(ifronts.size, dtype=bool) # unassigned GPIO fronts to remove low2high = ifronts[gpio['polarities'] == 1] high2low = ifronts[gpio['polarities'] == -1] @@ -544,26 +599,23 @@ def groom_pin_state(gpio, audio, ts, tolerance=2., display=False, take='first', # Remove and/or fuse short TTLs if min_diff > 0: - short, = np.where(np.diff(audio['times']) < min_diff) - audio_times = np.delete(audio['times'], np.r_[short, short + 1]) + short, = np.where(np.diff(ttl['times']) < min_diff) + sync_times = np.delete(ttl['times'], np.r_[short, short + 1]) _logger.debug(f'Removed {short.size * 2} fronts TLLs less than ' f'{min_diff * 1e3:.0f}ms apart') - assert audio_times.size > 0, f'all audio TTLs less than {min_diff}s' + assert sync_times.size > 0, f'all sync TTLs less than {min_diff}s' # Onsets ups = ts[low2high] - ts[low2high][0] # times relative to first GPIO high - onsets = audio_times[::2] - audio_times[0] # audio times relative to first onset - # assign GPIO fronts to audio onset + onsets = sync_times[::2] - sync_times[0] # TTL times relative to first onset + # assign GPIO fronts to ttl onset assigned = attribute_times(onsets, ups, tol=tolerance, take=take) unassigned = np.setdiff1d(np.arange(onsets.size), assigned[assigned > -1]) if unassigned.size > 0: - _logger.debug(f'{unassigned.size} audio TTL rises were not detected by the camera') + _logger.debug(f'{unassigned.size} sync TTL rises were not detected by the camera') # Check that all pin state upticks could be attributed to an onset TTL - missed = assigned == -1 - if np.any(missed): - # if np.any(missed := assigned == -1): # py3.8 - _logger.warning(f'{sum(missed)} pin state rises could ' - f'not be attributed to an audio TTL') + if np.any(missed := assigned == -1): + _logger.warning(f'{sum(missed)} pin state rises could not be attributed to a sync TTL') if display: ax = plt.subplot() vertical_lines(ups[assigned > -1], @@ -574,44 +626,41 @@ def groom_pin_state(gpio, audio, ts, tolerance=2., display=False, take='first', label='unassigned GPIO up state') vertical_lines(onsets[unassigned], linestyle=':', color='k', ax=ax, - alpha=0.3, label='audio onset') + alpha=0.3, label='sync TTL onset') vertical_lines(onsets[assigned], - linestyle=':', color='b', ax=ax, label='assigned audio onset') + linestyle=':', color='b', ax=ax, label='assigned TTL onset') plt.legend() plt.show() # Remove the missed fronts to_remove = np.in1d(gpio['indices'], low2high[missed]) assigned = assigned[~missed] - onsets_ = audio_times[::2][assigned] + onsets_ = sync_times[::2][assigned] # Offsets downs = ts[high2low] - ts[high2low][0] - offsets = audio_times[1::2] - audio_times[1] + offsets = sync_times[1::2] - sync_times[1] assigned = attribute_times(offsets, downs, tol=tolerance, take=take) unassigned = np.setdiff1d(np.arange(offsets.size), assigned[assigned > -1]) if unassigned.size > 0: - _logger.debug(f'{unassigned.size} audio TTL falls were not detected by the camera') + _logger.debug(f'{unassigned.size} sync TTL falls were not detected by the camera') # Check that all pin state downticks could be attributed to an offset TTL - missed = assigned == -1 - if np.any(missed): - # if np.any(missed := assigned == -1): # py3.8 - _logger.warning(f'{sum(missed)} pin state falls could ' - f'not be attributed to an audio TTL') + if np.any(missed := assigned == -1): + _logger.warning(f'{sum(missed)} pin state falls could not be attributed to a sync TTL') # Remove the missed fronts to_remove |= np.in1d(gpio['indices'], high2low[missed]) assigned = assigned[~missed] - offsets_ = audio_times[1::2][assigned] + offsets_ = sync_times[1::2][assigned] - # Audio groomed + # Sync TTLs groomed if np.any(to_remove): # Check for any orphaned fronts (only one pin state edge was assigned) to_remove = np.pad(to_remove, (0, to_remove.size % 2), 'edge') # Ensure even size # Perform xor to find GPIOs where only onset or offset is marked for removal orphaned = to_remove.reshape(-1, 2).sum(axis=1) == 1 if orphaned.any(): - """If there are orphaned GPIO fronts (i.e. only one edge was assigned to an - audio front), remove the orphaned front its assigned audio TTL. In other words - if both edges cannot be assigned to an audio TTL, we ignore the TTL entirely. + """If there are orphaned GPIO fronts (i.e. only one edge was assigned to a sync + TTL front), remove the orphaned front its assigned sync TTL. In other words + if both edges cannot be assigned to a sync TTL, we ignore the TTL entirely. This is a sign that the assignment was bad and extraction may fail.""" _logger.warning('Some onsets but not offsets (or vice versa) were not assigned; ' 'this may be a sign of faulty wiring or clock drift') @@ -625,7 +674,7 @@ def groom_pin_state(gpio, audio, ts, tolerance=2., display=False, take='first', orphaned_offsets, = np.where(~to_remove.reshape(-1, 2)[:, 1] & orphaned) for i, v in enumerate(orphaned_offsets): orphaned_offsets[i] -= to_remove.reshape(-1, 2)[:v, 1].sum() - # Remove orphaned audio onsets and offsets + # Remove orphaned ttl onsets and offsets onsets_ = np.delete(onsets_, orphaned_onsets[orphaned_onsets < onsets_.size]) offsets_ = np.delete(offsets_, orphaned_offsets[orphaned_offsets < offsets_.size]) _logger.debug(f'{orphaned.sum()} orphaned TTLs removed') @@ -636,45 +685,44 @@ def groom_pin_state(gpio, audio, ts, tolerance=2., display=False, take='first', ifronts = gpio['indices'] # Assert that we've removed discrete TTLs - # A failure means e.g. an up-going front of one TTL was missed - # but not the down-going one. + # A failure means e.g. an up-going front of one TTL was missed but not the down-going one. assert np.all(np.abs(np.diff(gpio['polarities'])) == 2) assert gpio['polarities'][0] == 1 - audio_ = {'times': np.empty(ifronts.size), 'polarities': gpio['polarities']} - audio_['times'][::2] = onsets_ - audio_['times'][1::2] = offsets_ + ttl_ = {'times': np.empty(ifronts.size), 'polarities': gpio['polarities']} + ttl_['times'][::2] = onsets_ + ttl_['times'][1::2] = offsets_ else: - audio_ = audio + ttl_ = ttl.copy() - # Align the frame times to FPGA - fcn_a2b, drift_ppm = dsp.sync_timestamps(ts[ifronts], audio_['times']) - _logger.debug(f'frame audio alignment drift = {drift_ppm:.2f}ppm') + # Align the frame times to DAQ + fcn_a2b, drift_ppm = dsp.sync_timestamps(ts[ifronts], ttl_['times']) + _logger.debug(f'frame ttl alignment drift = {drift_ppm:.2f}ppm') # Add times to GPIO dict gpio['times'] = fcn_a2b(ts[ifronts]) if display: # Plot all the onsets and offsets ax = plt.subplot() - # All Audio TTLS - squares(audio['times'], audio['polarities'], - ax=ax, label='audio TTLs', linestyle=':', color='k', yrange=[0, 1], alpha=0.3) + # All sync TTLs + squares(ttl['times'], ttl['polarities'], + ax=ax, label='sync TTLs', linestyle=':', color='k', yrange=[0, 1], alpha=0.3) # GPIO x = np.insert(gpio['times'], 0, 0) y = np.arange(x.size) % 2 squares(x, y, ax=ax, label='GPIO') y = within_ranges(np.arange(ts.size), ifronts.reshape(-1, 2)) # 0 or 1 for each frame ax.plot(fcn_a2b(ts), y, 'kx', label='cam times') - # Assigned audio - squares(audio_['times'], audio_['polarities'], - ax=ax, label='assigned audio TTL', linestyle=':', color='g', yrange=[0, 1]) + # Assigned ttl + squares(ttl_['times'], ttl_['polarities'], + ax=ax, label='assigned sync TTL', linestyle=':', color='g', yrange=[0, 1]) ax.legend() - plt.xlabel('FPGA time (s)') + plt.xlabel('DAQ time (s)') ax.set_yticks([0, 1]) - ax.set_title('GPIO - audio TTL alignment') + ax.set_title('GPIO - sync TTL alignment') plt.show() - return gpio, audio_, fcn_a2b(ts) + return gpio, ttl_, fcn_a2b(ts) def extract_all(session_path, sync_type=None, save=True, **kwargs): diff --git a/ibllib/io/extractors/default_channel_maps.py b/ibllib/io/extractors/default_channel_maps.py index 481bcb2ce..0eaab331d 100644 --- a/ibllib/io/extractors/default_channel_maps.py +++ b/ibllib/io/extractors/default_channel_maps.py @@ -52,5 +52,37 @@ 'rotary_encoder_1': 6, 'audio': 7, 'bpod': 16} + }, + + 'mesoscope': + {'timeline': {'left_camera': 0, + 'right_camera': 1, + 'belly_camera': 2, + 'frame2ttl': 3, + 'audio': 4, + 'bpod': 5, + 'rotary_encoder': 6, + 'neural_frames': 7} } } + + +def all_default_labels(): + """ + Returns the set of channel map channel names. + + Returns + ------- + set of str + The channel names present throughout all default channel maps. + """ + keys = set() + + def _iter_map(d): + for k, v in d.items(): + if isinstance(v, dict): + _iter_map(v) + else: + keys.add(k) + _iter_map(DEFAULT_MAPS) + return keys diff --git a/ibllib/io/extractors/ephys_fpga.py b/ibllib/io/extractors/ephys_fpga.py index 7fe9e4758..98bdcdd25 100644 --- a/ibllib/io/extractors/ephys_fpga.py +++ b/ibllib/io/extractors/ephys_fpga.py @@ -5,6 +5,7 @@ import logging from pathlib import Path import uuid +import re import matplotlib.pyplot as plt import numpy as np @@ -18,7 +19,6 @@ import ibllib.exceptions as err from ibllib.io import raw_data_loaders, session_params from ibllib.io.extractors.bpod_trials import extract_all as bpod_extract_all -from ibllib.io.extractors.opto_trials import LaserBool import ibllib.io.extractors.base as extractors_base from ibllib.io.extractors.training_wheel import extract_wheel_moves import ibllib.plots as plots @@ -203,8 +203,7 @@ def _assign_events_bpod(bpod_t, bpod_polarities, ignore_first_valve=True): return t_trial_start, t_valve_open, t_iti_in -def _rotary_encoder_positions_from_fronts(ta, pa, tb, pb, ticks=WHEEL_TICKS, radius=1, - coding='x4'): +def _rotary_encoder_positions_from_fronts(ta, pa, tb, pb, ticks=WHEEL_TICKS, radius=WHEEL_RADIUS_CM, coding='x4'): """ Extracts the rotary encoder absolute position as function of time from fronts detected on the 2 channels. Outputs in units of radius parameters, by default radians @@ -361,14 +360,14 @@ def _clean_audio(audio, display=False): dd = np.diff(audio['times']) 1 / np.median(dd[::2]) # 2ms up 1 / np.median(dd[1::2]) # 4.666 ms down - 1 / (np.median(dd[::2]) + np.median(dd[1::2])) # both sum to 150 Hx + 1 / (np.median(dd[::2]) + np.median(dd[1::2])) # both sum to 150 Hz This only runs on sessions when the bug is detected and leaves others untouched """ DISCARD_THRESHOLD = 0.01 average_150_hz = np.mean(1 / np.diff(audio['times'][audio['polarities'] == 1]) > 140) naudio = audio['times'].size if average_150_hz > 0.7 and naudio > 100: - _logger.warning("Soundcard signal on FPGA seems to have been mixed with 150Hz camera") + _logger.warning('Soundcard signal on FPGA seems to have been mixed with 150Hz camera') keep_ind = np.r_[np.diff(audio['times']) > DISCARD_THRESHOLD, False] keep_ind = np.logical_and(keep_ind, audio['polarities'] == -1) keep_ind = np.where(keep_ind)[0] @@ -395,7 +394,7 @@ def _clean_frame2ttl(frame2ttl, display=False): frame2ttl_ = {'times': np.delete(frame2ttl['times'], iko), 'polarities': np.delete(frame2ttl['polarities'], iko)} if iko.size > (0.1 * frame2ttl['times'].size): - _logger.warning(f'{iko.size} ({iko.size / frame2ttl["times"].size:.2%} %) ' + _logger.warning(f'{iko.size} ({iko.size / frame2ttl["times"].size:.2%}) ' f'frame to TTL polarity switches below {F2TTL_THRESH} secs') if display: # pragma: no cover from ibllib.plots import squares @@ -431,13 +430,14 @@ def extract_wheel_sync(sync, chmap=None, tmin=None, tmax=None): np.array Wheel positions in radians. """ - wheel = {} + # Assume two separate edge count channels + assert chmap.keys() >= {'rotary_encoder_0', 'rotary_encoder_1'} channela = get_sync_fronts(sync, chmap['rotary_encoder_0'], tmin=tmin, tmax=tmax) channelb = get_sync_fronts(sync, chmap['rotary_encoder_1'], tmin=tmin, tmax=tmax) - wheel['re_ts'], wheel['re_pos'] = _rotary_encoder_positions_from_fronts( + re_ts, re_pos = _rotary_encoder_positions_from_fronts( channela['times'], channela['polarities'], channelb['times'], channelb['polarities'], - ticks=WHEEL_TICKS, radius=1, coding='x4') - return wheel['re_ts'], wheel['re_pos'] + ticks=WHEEL_TICKS, radius=WHEEL_RADIUS_CM, coding='x4') + return re_ts, re_pos def extract_behaviour_sync(sync, chmap=None, display=False, bpod_trials=None, tmin=None, tmax=None): @@ -474,9 +474,23 @@ def extract_behaviour_sync(sync, chmap=None, display=False, bpod_trials=None, tm audio = _clean_audio(audio) # extract events from the fronts for each trace t_trial_start, t_valve_open, t_iti_in = _assign_events_bpod(bpod['times'], bpod['polarities']) - # one issue is that sometimes bpod pulses may not have been detected, in this case - # perform the sync bpod/FPGA, and add the start that have not been detected - if bpod_trials: + if not bpod_trials: + raise ValueError('No Bpod trials to align') + # If there are no detected trial start times or more than double the trial end pulses, + # the trial start pulses may be too small to be detected, in which case, sync using the ini_in + if t_trial_start.size == 0 or (t_trial_start.size / t_iti_in.size) < .5: + _logger.info('Attempting to align on ITI in') + assert t_iti_in.size > 0, 'no detected ITI in TTLs on the DAQ to align' + bpod_end = bpod_trials['itiIn_times'] + fcn, drift = neurodsp.utils.sync_timestamps(bpod_end, t_iti_in) + # if it's drifting too much + if drift > 200 and bpod_end.size != t_iti_in.size: + raise err.SyncBpodFpgaException('sync cluster f*ck') + t_trial_start = fcn(bpod_trials['intervals_bpod'][:, 0]) + else: + # one issue is that sometimes bpod pulses may not have been detected, in this case + # perform the sync bpod/FPGA, and add the start that have not been detected + _logger.info('Attempting to align on trial start') bpod_start = bpod_trials['intervals_bpod'][:, 0] fcn, drift, ibpod, ifpga = neurodsp.utils.sync_timestamps( bpod_start, t_trial_start, return_indices=True) @@ -485,11 +499,8 @@ def extract_behaviour_sync(sync, chmap=None, display=False, bpod_trials=None, tm raise err.SyncBpodFpgaException('sync cluster f*ck') missing_bpod = fcn(bpod_start[np.setxor1d(ibpod, np.arange(len(bpod_start)))]) t_trial_start = np.sort(np.r_[t_trial_start, missing_bpod]) - else: - _logger.warning('Deprecation Warning: calling FPGA trials extraction without a bpod trials' - ' dictionary will result in an error.') - t_ready_tone_in, t_error_tone_in = _assign_events_audio( - audio['times'], audio['polarities']) + + t_ready_tone_in, t_error_tone_in = _assign_events_audio(audio['times'], audio['polarities']) trials = Bunch({ 'goCue_times': _assign_events_to_trial(t_trial_start, t_ready_tone_in, take='first'), 'errorCue_times': _assign_events_to_trial(t_trial_start, t_error_tone_in), @@ -706,19 +717,66 @@ class FpgaTrials(extractors_base.BaseExtractor): 'wheel_timestamps', 'wheel_position', 'wheelMoves_intervals', 'wheelMoves_peakAmplitude') - # Fields from bpod extractor that we want to resync to FPGA + # Fields from bpod extractor that we want to re-sync to FPGA bpod_rsync_fields = ('intervals', 'response_times', 'goCueTrigger_times', 'stimOnTrigger_times', 'stimOffTrigger_times', 'stimFreezeTrigger_times', 'errorCueTrigger_times') # Fields from bpod extractor that we want to save - bpod_fields = ('feedbackType', 'choice', 'rewardVolume', 'contrastLeft', 'contrastRight', 'probabilityLeft', - 'intervals_bpod', 'phase', 'position', 'quiescence') + bpod_fields = ('feedbackType', 'choice', 'rewardVolume', 'contrastLeft', 'contrastRight', + 'probabilityLeft', 'intervals_bpod', 'phase', 'position', 'quiescence') - def __init__(self, *args, **kwargs): + """str: The Bpod events to synchronize (must be present in sync channel map).""" + sync_field = 'intervals' + + def __init__(self, *args, bpod_trials=None, bpod_extractor=None, **kwargs): """An extractor for all ephys trial data, in FPGA time""" super().__init__(*args, **kwargs) self.bpod2fpga = None + self.bpod_trials = bpod_trials + if bpod_extractor: + self.bpod_extractor = bpod_extractor + self._update_var_names() + + def _update_var_names(self, bpod_fields=None, bpod_rsync_fields=None): + """ + Updates this object's attributes based on the Bpod trials extractor. + + Fields updated: bpod_fields, bpod_rsync_fields, save_names, and var_names. + + Parameters + ---------- + bpod_fields : tuple + A set of Bpod trials fields to keep. + bpod_rsync_fields : tuple + A set of Bpod trials fields to sync to the DAQ times. + + TODO Turn into property getter; requires ensuring the output field are the same for legacy + """ + if self.bpod_extractor: + self.var_names = self.bpod_extractor.var_names + self.save_names = self.bpod_extractor.save_names + self.bpod_rsync_fields = bpod_rsync_fields or self._time_fields(self.bpod_extractor.var_names) + self.bpod_fields = bpod_fields or [x for x in self.bpod_extractor.var_names if x not in self.bpod_rsync_fields] + + @staticmethod + def _time_fields(trials_attr) -> set: + """ + Iterates over Bpod trials attributes returning those that correspond to times for syncing. + + Parameters + ---------- + trials_attr : iterable of str + The Bpod field names. + + Returns + ------- + set + The field names that contain timestamps. + """ + FIELDS = ('times', 'timestamps', 'intervals') + pattern = re.compile(fr'^[_\w]*({"|".join(FIELDS)})[_\w]*$') + return set(filter(pattern.match, trials_attr)) def _extract(self, sync=None, chmap=None, sync_collection='raw_ephys_data', task_collection='raw_behavior_data', **kwargs): """Extracts ephys trials by combining Bpod and FPGA sync pulses""" @@ -727,18 +785,16 @@ def _extract(self, sync=None, chmap=None, sync_collection='raw_ephys_data', task _sync, _chmap = get_sync_and_chn_map(self.session_path, sync_collection) sync = sync or _sync chmap = chmap or _chmap - # load the bpod data and performs a biased choice world training extraction - # TODO these all need to pass in the collection so we can load for different protocols in different folders - bpod_raw = raw_data_loaders.load_data(self.session_path, task_collection=task_collection) - assert bpod_raw is not None, "No task trials data in raw_behavior_data - Exit" - bpod_trials = self._extract_bpod(bpod_raw, task_collection=task_collection, save=False) + if not self.bpod_trials: + self.bpod_trials, *_ = bpod_extract_all( + session_path=self.session_path, task_collection=task_collection, save=False, + extractor_type=kwargs.get('extractor_type')) # Explode trials table df - trials_table = alfio.AlfBunch.from_df(bpod_trials.pop('table')) + trials_table = alfio.AlfBunch.from_df(self.bpod_trials.pop('table')) table_columns = trials_table.keys() - bpod_trials.update(trials_table) - # synchronize - bpod_trials['intervals_bpod'] = np.copy(bpod_trials['intervals']) + self.bpod_trials.update(trials_table) + self.bpod_trials['intervals_bpod'] = np.copy(self.bpod_trials['intervals']) # Get the spacer times for this protocol if (protocol_number := kwargs.get('protocol_number')) is not None: # look for spacer @@ -749,24 +805,27 @@ def _extract(self, sync=None, chmap=None, sync_collection='raw_ephys_data', task tmin = tmax = None fpga_trials = extract_behaviour_sync( - sync=sync, chmap=chmap, bpod_trials=bpod_trials, tmin=tmin, tmax=tmax) + sync=sync, chmap=chmap, bpod_trials=self.bpod_trials, tmin=tmin, tmax=tmax) + assert self.sync_field in self.bpod_trials and self.sync_field in fpga_trials + self.bpod_trials[f'{self.sync_field}_bpod'] = np.copy(self.bpod_trials[self.sync_field]) + # checks consistency and compute dt with bpod self.bpod2fpga, drift_ppm, ibpod, ifpga = neurodsp.utils.sync_timestamps( - bpod_trials['intervals_bpod'][:, 0], fpga_trials.pop('intervals')[:, 0], + self.bpod_trials[f'{self.sync_field}_bpod'][:, 0], fpga_trials.pop(self.sync_field)[:, 0], return_indices=True) - nbpod = bpod_trials['intervals_bpod'].shape[0] + nbpod = self.bpod_trials[f'{self.sync_field}_bpod'].shape[0] npfga = fpga_trials['feedback_times'].shape[0] nsync = len(ibpod) - _logger.info(f"N trials: {nbpod} bpod, {npfga} FPGA, {nsync} merged, sync {drift_ppm} ppm") + _logger.info(f'N trials: {nbpod} bpod, {npfga} FPGA, {nsync} merged, sync {drift_ppm} ppm') if drift_ppm > BPOD_FPGA_DRIFT_THRESHOLD_PPM: _logger.warning('BPOD/FPGA synchronization shows values greater than %i ppm', BPOD_FPGA_DRIFT_THRESHOLD_PPM) out = OrderedDict() - out.update({k: bpod_trials[k][ibpod] for k in self.bpod_fields}) - out.update({k: self.bpod2fpga(bpod_trials[k][ibpod]) for k in self.bpod_rsync_fields}) + out.update({k: self.bpod_trials[k][ibpod] for k in self.bpod_fields}) + out.update({k: self.bpod2fpga(self.bpod_trials[k][ibpod]) for k in self.bpod_rsync_fields}) out.update({k: fpga_trials[k][ifpga] for k in sorted(fpga_trials.keys())}) # extract the wheel data - wheel, moves = get_wheel_positions(sync=sync, chmap=chmap, tmin=tmin, tmax=tmax) + wheel, moves = self.get_wheel_positions(sync=sync, chmap=chmap, tmin=tmin, tmax=tmax) from ibllib.io.extractors.training_wheel import extract_first_movement_times settings = raw_data_loaders.load_settings(session_path=self.session_path, task_collection=task_collection) min_qt = settings.get('QUIESCENT_PERIOD', None) @@ -781,15 +840,16 @@ def _extract(self, sync=None, chmap=None, sync_collection='raw_ephys_data', task return [out[k] for k in out] + [wheel['timestamps'], wheel['position'], moves['intervals'], moves['peakAmplitude']] - def _extract_bpod(self, bpod_trials, task_collection='raw_behavior_data', save=False): - bpod_trials, *_ = bpod_extract_all( - session_path=self.session_path, save=save, bpod_trials=bpod_trials, task_collection=task_collection) + def get_wheel_positions(self, *args, **kwargs): + """Extract wheel and wheelMoves objects. - return bpod_trials + This method is called by the main extract method and may be overloaded by subclasses. + """ + return get_wheel_positions(*args, **kwargs) -def extract_all(session_path, sync_collection='raw_ephys_data', save=True, task_collection='raw_behavior_data', save_path=None, - protocol_number=None, **kwargs): +def extract_all(session_path, sync_collection='raw_ephys_data', save=True, save_path=None, + task_collection='raw_behavior_data', protocol_number=None, **kwargs): """ For the IBL ephys task, reads ephys binary file and extract: - sync @@ -821,16 +881,22 @@ def extract_all(session_path, sync_collection='raw_ephys_data', save=True, task_ list of pathlib.Path, None If save is True, a list of file paths to the extracted data. """ - extractor_type = extractors_base.get_session_extractor_type(session_path, task_collection=task_collection) - _logger.info(f"Extracting {session_path} as {extractor_type}") + # Extract Bpod trials + bpod_raw = raw_data_loaders.load_data(session_path, task_collection=task_collection) + assert bpod_raw is not None, 'No task trials data in raw_behavior_data - Exit' + bpod_trials, *_ = bpod_extract_all( + session_path=session_path, bpod_trials=bpod_raw, task_collection=task_collection, + save=False, extractor_type=kwargs.get('extractor_type')) + + # Sync Bpod trials to FPGA sync, chmap = get_sync_and_chn_map(session_path, sync_collection) # sync, chmap = get_main_probe_sync(session_path, bin_exists=bin_exists) - base = [FpgaTrials] - if extractor_type == 'ephys_biased_opto': - base.append(LaserBool) - outputs, files = extractors_base.run_extractor_classes( - base, session_path=session_path, save=save, sync=sync, chmap=chmap, path_out=save_path, + trials = FpgaTrials(session_path, bpod_trials=bpod_trials) + outputs, files = trials.extract( + save=save, sync=sync, chmap=chmap, path_out=save_path, task_collection=task_collection, protocol_number=protocol_number, **kwargs) + if not isinstance(outputs, dict): + outputs = {k: v for k, v in zip(trials.var_names, outputs)} return outputs, files @@ -902,8 +968,8 @@ def load_channel_map(session_path, sync_collection): if data_for_keys(default_chmap.keys(), chmap): return chmap else: - _logger.warning("Keys missing from provided channel map, " - "setting missing keys from default channel map") + _logger.warning('Keys missing from provided channel map, ' + 'setting missing keys from default channel map') return {**default_chmap, **chmap} diff --git a/ibllib/io/extractors/fibrephotometry.py b/ibllib/io/extractors/fibrephotometry.py index 2f71bb78a..e9cb60321 100644 --- a/ibllib/io/extractors/fibrephotometry.py +++ b/ibllib/io/extractors/fibrephotometry.py @@ -46,15 +46,22 @@ NEUROPHOTOMETRICS_LED_STATES = { 'Condition': { 0: 'No additional signal', - 1: 'Output 0 signal HIGH + Stimulation', - 2: 'Output 0 signal HIGH + Input 0 signal HIGH', - 3: 'Input 0 signal HIGH + Stimulation', - 4: 'Output 0 HIGH + Input 0 HIGH + Stimulation' + 1: 'Output 1 signal HIGH', + 2: 'Output 0 signal HIGH', + 3: 'Stimulation ON', + 4: 'GPIO Line 2 HIGH', + 5: 'GPIO Line 3 HIGH', + 6: 'Input 1 HIGH', + 7: 'Input 0 HIGH', + 8: 'Output 0 signal HIGH + Stimulation', + 9: 'Output 0 signal HIGH + Input 0 signal HIGH', + 10: 'Input 0 signal HIGH + Stimulation', + 11: 'Output 0 HIGH + Input 0 HIGH + Stimulation', }, - 'No LED ON': {0: 0, 1: 48, 2: 528, 3: 544, 4: 560}, - 'L415': {0: 1, 1: 49, 2: 529, 3: 545, 4: 561}, - 'L470': {0: 2, 1: 50, 2: 530, 3: 546, 4: 562}, - 'L560': {0: 4, 1: 52, 2: 532, 3: 548, 4: 564} + 'No LED ON': {0: 0, 1: 8, 2: 16, 3: 32, 4: 64, 5: 128, 6: 256, 7: 512, 8: 48, 9: 528, 10: 544, 11: 560}, + 'L415': {0: 1, 1: 9, 2: 17, 3: 33, 4: 65, 5: 129, 6: 257, 7: 513, 8: 49, 9: 529, 10: 545, 11: 561}, + 'L470': {0: 2, 1: 10, 2: 18, 3: 34, 4: 66, 5: 130, 6: 258, 7: 514, 8: 50, 9: 530, 10: 546, 11: 562}, + 'L560': {0: 4, 1: 12, 2: 20, 3: 36, 4: 68, 5: 132, 6: 260, 7: 516, 8: 52, 9: 532, 10: 548, 11: 564} } @@ -111,7 +118,7 @@ def sync_photometry_to_daq(vdaq, fs, df_photometry, chmap=DAQ_CHMAP, v_threshold def read_daq_voltage(daq_file, chmap=DAQ_CHMAP): channel_names = [c.name for c in load_raw_daq_tdms(daq_file)['Analog'].channels()] assert all([v in channel_names for v in chmap.values()]), "Missing channel" - vdaq, fs = load_channels_tdms(daq_file, chmap=chmap, return_fs=True) + vdaq, fs = load_channels_tdms(daq_file, chmap=chmap) vdaq = {k: v - np.median(v) for k, v in vdaq.items()} return vdaq, fs diff --git a/ibllib/io/extractors/mesoscope.py b/ibllib/io/extractors/mesoscope.py new file mode 100644 index 000000000..93491945e --- /dev/null +++ b/ibllib/io/extractors/mesoscope.py @@ -0,0 +1,601 @@ +"""Mesoscope (timeline) data extraction.""" +import logging + +import numpy as np +import one.alf.io as alfio +from one.util import ensure_list +from one.alf.files import session_path_parts +import matplotlib.pyplot as plt +from neurodsp.utils import falls +from pkg_resources import parse_version + +from ibllib.plots.misc import squares, vertical_lines +from ibllib.io.raw_daq_loaders import (extract_sync_timeline, timeline_get_channel, + correct_counter_discontinuities, load_timeline_sync_and_chmap) +import ibllib.io.extractors.base as extractors_base +from ibllib.io.extractors.ephys_fpga import FpgaTrials, WHEEL_TICKS, WHEEL_RADIUS_CM, get_sync_fronts, get_protocol_period +from ibllib.io.extractors.training_wheel import extract_wheel_moves +from ibllib.io.extractors.camera import attribute_times +from ibllib.io.extractors.ephys_fpga import _assign_events_bpod + +_logger = logging.getLogger(__name__) + + +def patch_imaging_meta(meta: dict) -> dict: + """ + Patch imaging meta data for compatibility across versions. + + A copy of the dict is NOT returned. + + Parameters + ---------- + dict : dict + A folder path that contains a rawImagingData.meta file. + + Returns + ------- + dict + The loaded meta data file, updated to the most recent version. + """ + # 2023-05-17 (unversioned) adds nFrames and channelSaved keys + if parse_version(meta.get('version') or '0.0.0') <= parse_version('0.0.0'): + if 'channelSaved' not in meta: + meta['channelSaved'] = next((x['channelIdx'] for x in meta['FOV'] if 'channelIdx' in x), []) + return meta + + +def plot_timeline(timeline, channels=None, raw=True): + """ + Plot the timeline data. + + Parameters + ---------- + timeline : one.alf.io.AlfBunch + The timeline data object. + channels : list of str + An iterable of channel names to plot. + raw : bool + If true, plot the raw DAQ samples; if false, apply TTL thresholds and plot changes. + + Returns + ------- + matplotlib.pyplot.Figure + The figure containing timeline subplots. + list of matplotlib.pyplot.Axes + The axes for each timeline channel plotted. + """ + meta = {x.copy().pop('name'): x for x in timeline['meta']['inputs']} + channels = channels or meta.keys() + fig, axes = plt.subplots(len(channels), 1, sharex=True) + axes = ensure_list(axes) + if not raw: + chmap = {ch: meta[ch]['arrayColumn'] for ch in channels} + sync = extract_sync_timeline(timeline, chmap=chmap) + for i, (ax, ch) in enumerate(zip(axes, channels)): + if raw: + # axesScale controls vertical scaling of each trace (multiplicative) + values = timeline['raw'][:, meta[ch]['arrayColumn'] - 1] * meta[ch]['axesScale'] + ax.plot(timeline['timestamps'], values) + elif np.any(idx := sync['channels'] == chmap[ch]): + squares(sync['times'][idx], sync['polarities'][idx], ax=ax) + ax.tick_params(axis='x', which='both', bottom=False, top=False, labelbottom=False) + ax.spines['bottom'].set_visible(False), ax.spines['left'].set_visible(True) + ax.set_ylabel(ch, rotation=45, fontsize=8) + # Add back x-axis ticks to the last plot + axes[-1].tick_params(axis='x', which='both', bottom=True, labelbottom=True) + axes[-1].spines['bottom'].set_visible(True) + plt.get_current_fig_manager().window.showMaximized() # full screen + fig.tight_layout(h_pad=0) + return fig, axes + + +class TimelineTrials(FpgaTrials): + """Similar extraction to the FPGA, however counter and position channels are treated differently.""" + + """one.alf.io.AlfBunch: The timeline data object""" + timeline = None + + def __init__(self, *args, sync_collection='raw_sync_data', **kwargs): + """An extractor for all ephys trial data, in Timeline time""" + super().__init__(*args, **kwargs) + self.timeline = alfio.load_object(self.session_path / sync_collection, 'DAQdata', namespace='timeline') + + def _extract(self, sync=None, chmap=None, sync_collection='raw_sync_data', **kwargs): + if not (sync or chmap): + sync, chmap = load_timeline_sync_and_chmap( + self.session_path / sync_collection, timeline=self.timeline, chmap=chmap) + + if kwargs.get('display', False): + plot_timeline(self.timeline, channels=chmap.keys(), raw=True) + trials = super()._extract(sync, chmap, sync_collection, extractor_type='ephys', **kwargs) + + # If no protocol number is defined, trim timestamps based on Bpod trials intervals + trials_table = trials[self.var_names.index('table')] + bpod = get_sync_fronts(sync, chmap['bpod']) + if kwargs.get('protocol_number') is None: + tmin = trials_table.intervals_0.iloc[0] - 1 + tmax = trials_table.intervals_1.iloc[-1] + # Ensure wheel is cut off based on trials + wheel_ts_idx = self.var_names.index('wheel_timestamps') + mask = np.logical_and(tmin <= trials[wheel_ts_idx], trials[wheel_ts_idx] <= tmax) + trials[wheel_ts_idx] = trials[wheel_ts_idx][mask] + wheel_pos_idx = self.var_names.index('wheel_position') + trials[wheel_pos_idx] = trials[wheel_pos_idx][mask] + move_idx = self.var_names.index('wheelMoves_intervals') + mask = np.logical_and(trials[move_idx][:, 0] >= tmin, trials[move_idx][:, 0] <= tmax) + trials[move_idx] = trials[move_idx][mask, :] + else: + tmin, tmax = get_protocol_period(self.session_path, kwargs['protocol_number'], bpod) + bpod = get_sync_fronts(sync, chmap['bpod'], tmin, tmax) + + self.frame2ttl = get_sync_fronts(sync, chmap['frame2ttl'], tmin, tmax) # save for later access by QC + + # Replace valve open times with those extracted from the DAQ + # TODO Let's look at the expected open length based on calibration and reward volume + assert len(bpod['times']) > 0, 'No Bpod TTLs detected on DAQ' + _, driver_out, _, = _assign_events_bpod(bpod['times'], bpod['polarities'], False) + # Use the driver TTLs to find the valve open times that correspond to the valve opening + valve_open_times = self.get_valve_open_times(driver_ttls=driver_out) + assert len(valve_open_times) == sum(trials_table.feedbackType == 1) # TODO Relax assertion + correct = trials_table.feedbackType == 1 + trials[self.var_names.index('valveOpen_times')][correct] = valve_open_times + trials_table.feedback_times[correct] = valve_open_times + + # Replace audio events + self.audio = get_sync_fronts(sync, chmap['audio'], tmin, tmax) + # Attempt to assign the go cue and error tone onsets based on TTL length + go_cue, error_cue = self._assign_events_audio(self.audio['times'], self.audio['polarities']) + + assert error_cue.size == np.sum(~correct), 'N detected error tones does not match number of incorrect trials' + assert go_cue.size <= len(trials_table), 'More go cue tones detected than trials!' + + if go_cue.size < len(trials_table): + _logger.warning('%i go cue tones missed', len(trials_table) - go_cue.size) + """ + If the error cues are all assigned and some go cues are missed it may be that some + responses were so fast that the go cue and error tone merged. + """ + err_trig = self.bpod2fpga(self.bpod_trials['errorCueTrigger_times']) + go_trig = self.bpod2fpga(self.bpod_trials['goCueTrigger_times']) + assert not np.any(np.isnan(go_trig)) + assert err_trig.size == go_trig.size + + def first_true(arr): + """Return the index of the first True value in an array.""" + indices = np.where(arr)[0] + return None if len(indices) == 0 else indices[0] + + # Find which trials are missing a go cue + _go_cue = np.full(len(trials_table), np.nan) + for i, intervals in enumerate(trials_table[['intervals_0', 'intervals_1']].values): + idx = first_true(np.logical_and(go_cue > intervals[0], go_cue < intervals[1])) + if idx is not None: + _go_cue[i] = go_cue[idx] + + # Get all the DAQ timestamps where audio channel was HIGH + raw = timeline_get_channel(self.timeline, 'audio') + raw = (raw - raw.min()) / (raw.max() - raw.min()) # min-max normalize + ups = self.timeline.timestamps[raw > .5] # timestamps where input HIGH + for i in np.where(np.isnan(_go_cue))[0]: + # Get the timestamp of the first HIGH after the trigger times + _go_cue[i] = ups[first_true(ups > go_trig[i])] + idx = first_true(np.logical_and( + error_cue > trials_table['intervals_0'][i], + error_cue < trials_table['intervals_1'][i])) + if np.isnan(err_trig[i]): + if idx is not None: + error_cue = np.delete(error_cue, idx) # Remove mis-assigned error tone time + else: + error_cue[idx] = ups[first_true(ups > err_trig[i])] + go_cue = _go_cue + + trials_table.feedback_times[~correct] = error_cue + trials_table.goCue_times = go_cue + return trials + + def extract_wheel_sync(self, ticks=WHEEL_TICKS, radius=WHEEL_RADIUS_CM, coding='x4', tmin=None, tmax=None): + """ + Gets the wheel position from Timeline counter channel. + + Parameters + ---------- + ticks : int + Number of ticks corresponding to a full revolution (1024 for IBL rotary encoder). + radius : float + Radius of the wheel. Defaults to 1 for an output in radians. + coding : str {'x1', 'x2', 'x4'} + Rotary encoder encoding (IBL default is x4). + tmin : float + The minimum time from which to extract the sync pulses. + tmax : float + The maximum time up to which we extract the sync pulses. + + Returns + ------- + np.array + Wheel timestamps in seconds. + np.array + Wheel positions in radians. + + See Also + -------- + ibllib.io.extractors.ephys_fpga.extract_wheel_sync + """ + if coding not in ('x1', 'x2', 'x4'): + raise ValueError('Unsupported coding; must be one of x1, x2 or x4') + raw = correct_counter_discontinuities(timeline_get_channel(self.timeline, 'rotary_encoder')) + + # Timeline evenly samples counter so we extract only change points + d = np.diff(raw) + ind, = np.where(d.astype(int)) + pos = raw[ind + 1] + pos -= pos[0] # Start from zero + pos = pos / ticks * np.pi * 2 * radius / int(coding[1]) # Convert to radians + + # Get timestamps of changes and trim based on protocol spacers + ts = self.timeline['timestamps'][ind + 1] + tmin = ts.min() if tmin is None else tmin + tmax = ts.max() if tmax is None else tmax + mask = np.logical_and(ts >= tmin, ts <= tmax) + return ts[mask], pos[mask] + + def get_wheel_positions(self, ticks=WHEEL_TICKS, radius=WHEEL_RADIUS_CM, coding='x4', + tmin=None, tmax=None, display=False, **kwargs): + """ + Gets the wheel position and detected movements from Timeline counter channel. + + Called by the super class extractor (FPGATrials._extract). + + Parameters + ---------- + ticks : int + Number of ticks corresponding to a full revolution (1024 for IBL rotary encoder). + radius : float + Radius of the wheel. Defaults to 1 for an output in radians. + coding : str {'x1', 'x2', 'x4'} + Rotary encoder encoding (IBL default is x4). + tmin : float + The minimum time from which to extract the sync pulses. + tmax : float + The maximum time up to which we extract the sync pulses. + display : bool + If true, plot the wheel positions from bpod and the DAQ. + + Returns + ------- + dict + wheel object with keys ('timestamps', 'position'). + dict + wheelMoves object with keys ('intervals' 'peakAmplitude'). + """ + wheel = self.extract_wheel_sync(ticks=ticks, radius=radius, coding=coding, tmin=tmin, tmax=tmax) + wheel = dict(zip(('timestamps', 'position'), wheel)) + moves = extract_wheel_moves(wheel['timestamps'], wheel['position']) + + if display: + fig, (ax0, ax1) = plt.subplots(nrows=2, sharex=True) + bpod_ts = self.bpod_trials['wheel_timestamps'] + bpod_pos = self.bpod_trials['wheel_position'] + ax0.plot(self.bpod2fpga(bpod_ts), bpod_pos) + ax0.set_ylabel('Bpod wheel position / rad') + ax1.plot(wheel['timestamps'], wheel['position']) + ax1.set_ylabel('DAQ wheel position / rad'), ax1.set_xlabel('Time / s') + return wheel, moves + + def get_valve_open_times(self, display=False, threshold=-2.5, floor_percentile=10, driver_ttls=None): + """ + Get the valve open times from the raw timeline voltage trace. + + Parameters + ---------- + display : bool + Plot detected times on the raw voltage trace. + threshold : float + The threshold for applying to analogue channels. + floor_percentile : float + 10% removes the percentile value of the analog trace before thresholding. This is to + avoid DC offset drift. + driver_ttls : numpy.array + An optional array of driver TTLs to use for assigning with the valve times. + + Returns + ------- + numpy.array + The detected valve open times. + + TODO extract close times too + """ + tl = self.timeline + info = next(x for x in tl['meta']['inputs'] if x['name'] == 'reward_valve') + values = tl['raw'][:, info['arrayColumn'] - 1] # Timeline indices start from 1 + offset = np.percentile(values, floor_percentile, axis=0) + idx = falls(values - offset, step=threshold) # Voltage falls when valve opens + open_times = tl['timestamps'][idx] + # The closing of the valve is noisy. Keep only the falls that occur immediately after a Bpod TTL + if driver_ttls is not None: + # Returns an array of open_times indices, one for each driver TTL + ind = attribute_times(open_times, driver_ttls, tol=.1, take='after') + open_times = open_times[ind[ind >= 0]] + # TODO Log any > 40ms? Difficult to report missing valve times because of calibration + + if display: + fig, (ax0, ax1) = plt.subplots(nrows=2, sharex=True) + ax0.plot(tl['timestamps'], timeline_get_channel(tl, 'bpod'), 'k-o') + if driver_ttls is not None: + vertical_lines(driver_ttls, ymax=5, ax=ax0, linestyle='--', color='b') + ax1.plot(tl['timestamps'], values - offset, 'k-o') + ax1.set_ylabel('Voltage / V'), ax1.set_xlabel('Time / s') + ax1.plot(tl['timestamps'][idx], np.zeros_like(idx), 'r*') + if driver_ttls is not None: + ax1.plot(open_times, np.zeros_like(open_times), 'g*') + return open_times + + def _assign_events_audio(self, audio_times, audio_polarities, display=False): + """ + This is identical to ephys_fpga._assign_events_audio, except for the ready tone threshold. + + Parameters + ---------- + audio_times : numpy.array + An array of audio TTL front times. + audio_polarities : numpy.array + An array of audio TTL front polarities (1 for rises, -1 for falls). + display : bool + If true, display audio pulses and the assigned onsets. + + Returns + ------- + numpy.array + The times of the go cue onsets. + numpy.array + The times of the error tone onsets. + """ + # make sure that there are no 2 consecutive fall or consecutive rise events + assert np.all(np.abs(np.diff(audio_polarities)) == 2) + # take only even time differences: ie. from rising to falling fronts + dt = np.diff(audio_times) + onsets = audio_polarities[:-1] == 1 + + # error tones are events lasting from 400ms to 1200ms + i_error_tone_in = np.where(np.logical_and(0.4 < dt, dt < 1.2) & onsets)[0] + t_error_tone_in = audio_times[i_error_tone_in] + + # detect ready tone by length below 300 ms + i_ready_tone_in = np.where(np.logical_and(dt <= 0.3, onsets))[0] + t_ready_tone_in = audio_times[i_ready_tone_in] + if display: # pragma: no cover + fig, ax = plt.subplots(nrows=2, sharex=True) + ax[0].plot(self.timeline.timestamps, timeline_get_channel(self.timeline, 'audio'), 'k-o') + ax[0].set_ylabel('Voltage / V') + squares(audio_times, audio_polarities, yrange=[-1, 1], ax=ax[1]) + vertical_lines(t_ready_tone_in, ymin=-.8, ymax=.8, ax=ax[1], label='go cue') + vertical_lines(t_error_tone_in, ymin=-.8, ymax=.8, ax=ax[1], label='error tone') + ax[1].set_xlabel('Time / s') + ax[1].legend() + + return t_ready_tone_in, t_error_tone_in + + +class MesoscopeSyncTimeline(extractors_base.BaseExtractor): + """Extraction of mesoscope imaging times.""" + + var_names = ('mpci_times', 'mpciStack_timeshift') + save_names = ('mpci.times.npy', 'mpciStack.timeshift.npy') + + """one.alf.io.AlfBunch: The raw imaging meta data and frame times""" + rawImagingData = None + + def __init__(self, session_path, n_FOVs): + """ + Extract the mesoscope frame times from DAQ data acquired through Timeline. + + Parameters + ---------- + session_path : str, pathlib.Path + The session path to extract times from. + n_FOVs : int + The number of fields of view acquired. + """ + super().__init__(session_path) + self.n_FOVs = n_FOVs + fov = list(map(lambda n: f'FOV_{n:02}', range(self.n_FOVs))) + self.var_names = [f'{x}_{y.lower()}' for x in self.var_names for y in fov] + self.save_names = [f'{y}/{x}' for x in self.save_names for y in fov] + + def _extract(self, sync=None, chmap=None, device_collection='raw_imaging_data', events=None): + """ + Extract the frame timestamps for each individual field of view (FOV) and the time offsets + for each line scan. + + The detected frame times from the 'neural_frames' channel of the DAQ are split into bouts + corresponding to the number of raw_imaging_data folders. These timestamps should match the + number of frame timestamps extracted from the image file headers (found in the + rawImagingData.times file). The field of view (FOV) shifts are then applied to these + timestamps for each field of view and provided together with the line shifts. + + Parameters + ---------- + sync : one.alf.io.AlfBunch + A dictionary with keys ('times', 'polarities', 'channels'), containing the sync pulses + and the corresponding channel numbers. + chmap : dict + A map of channel names and their corresponding indices. Only the 'neural_frames' + channel is required. + device_collection : str, iterable of str + The location of the raw imaging data. + events : pandas.DataFrame + A table of software events, with columns {'time_timeline' 'name_timeline', + 'event_timeline'}. + + Returns + ------- + list of numpy.array + A list of timestamps for each FOV and the time offsets for each line scan. + """ + frame_times = sync['times'][sync['channels'] == chmap['neural_frames']] + + # imaging_start_time = datetime.datetime(*map(round, self.rawImagingData.meta['acquisitionStartTime'])) + if isinstance(device_collection, str): + device_collection = [device_collection] + if events is not None: + events = events[events.name == 'mpepUDP'] + edges = self.get_bout_edges(frame_times, device_collection, events) + fov_times = [] + line_shifts = [] + for (tmin, tmax), collection in zip(edges, sorted(device_collection)): + imaging_data = alfio.load_object(self.session_path / collection, 'rawImagingData') + imaging_data['meta'] = patch_imaging_meta(imaging_data['meta']) + # Calculate line shifts + _, fov_time_shifts, line_time_shifts = self.get_timeshifts(imaging_data['meta']) + assert len(fov_time_shifts) == self.n_FOVs, f'unexpected number of FOVs for {collection}' + ts = frame_times[np.logical_and(frame_times >= tmin, frame_times <= tmax)] + assert ts.size >= imaging_data['times_scanImage'].size, f'fewer DAQ timestamps for {collection} than expected' + if ts.size > imaging_data['times_scanImage'].size: + _logger.warning( + 'More DAQ frame times detected for %s than were found in the raw image data.\n' + 'N DAQ frame times:\t%i\nN raw image data times:\t%i.\n' + 'This may occur if the bout detection fails (e.g. UDPs recorded late), ' + 'when image data is corrupt, or when frames are not written to file.', + collection, ts.size, imaging_data['times_scanImage'].size) + _logger.info('Dropping last %i frame times for %s', ts.size - imaging_data['times_scanImage'].size, collection) + ts = ts[:imaging_data['times_scanImage'].size] + fov_times.append([ts + offset for offset in fov_time_shifts]) + if not line_shifts: + line_shifts = line_time_shifts + else: # The line shifts should be the same across all imaging bouts + [np.testing.assert_array_equal(x, y) for x, y in zip(line_time_shifts, line_shifts)] + + # Concatenate imaging timestamps across all bouts for each field of view + fov_times = list(map(np.concatenate, zip(*fov_times))) + n_fov_times, = set(map(len, fov_times)) + if n_fov_times != frame_times.size: + # This may happen if an experimenter deletes a raw_imaging_data folder + _logger.debug('FOV timestamps length does not match neural frame count; imaging bout(s) likely missing') + return fov_times + line_shifts + + def get_bout_edges(self, frame_times, collections=None, events=None, min_gap=1., display=False): + """ + Return an array of edge times for each imaging bout corresponding to a raw_imaging_data + collection. + + Parameters + ---------- + frame_times : numpy.array + An array of all neural frame count times. + collections : iterable of str + A set of raw_imaging_data collections, used to extract selected imaging periods. + events : pandas.DataFrame + A table of UDP event times, corresponding to times when recordings start and end. + min_gap : float + If start or end events not present, split bouts by finding gaps larger than this value. + display : bool + If true, plot the detected bout edges and raw frame times. + + Returns + ------- + numpy.array + An array of imaging bout intervals. + """ + if events is None or events.empty: + # No UDP events to mark blocks so separate based on gaps in frame rate + idx = np.where(np.diff(frame_times) > min_gap)[0] + starts = np.r_[frame_times[0], frame_times[idx + 1]] + ends = np.r_[frame_times[idx], frame_times[-1]] + else: + # Split using Exp/BlockStart and Exp/BlockEnd times + _, subject, date, _ = session_path_parts(self.session_path) + pattern = rf'(Exp|Block)%s\s{subject}\s{date.replace("-", "")}\s\d+' + + # Get start times + UDP_start = events[events['info'].str.match(pattern % 'Start')] + if len(UDP_start) > 1 and UDP_start.loc[0, 'info'].startswith('Exp'): + # Use ExpStart instead of first bout start + UDP_start = UDP_start.copy().drop(1) + # Use ExpStart/End instead of first/last BlockStart/End + starts = frame_times[[np.where(frame_times >= t)[0][0] for t in UDP_start.time]] + + # Get end times + UDP_end = events[events['info'].str.match(pattern % 'End')] + if len(UDP_end) > 1 and UDP_end['info'].values[-1].startswith('Exp'): + # Use last BlockEnd instead of ExpEnd + UDP_end = UDP_end.copy().drop(UDP_end.index[-1]) + if not UDP_end.empty: + ends = frame_times[[np.where(frame_times <= t)[0][-1] for t in UDP_end.time]] + else: + # Get index of last frame to occur within a second of the previous frame + consec = np.r_[np.diff(frame_times) > min_gap, True] + idx = [np.where(np.logical_and(frame_times > t, consec))[0][0] for t in starts] + ends = frame_times[idx] + + # Remove any missing imaging bout collections + edges = np.c_[starts, ends] + if collections: + if edges.shape[0] > len(collections): + # Remove any bouts that correspond to a skipped collection + # e.g. if {raw_imaging_data_00, raw_imaging_data_02}, remove middle bout + include = sorted(int(c.rsplit('_', 1)[-1]) for c in collections) + edges = edges[include, :] + elif edges.shape[0] < len(collections): + raise ValueError('More raw imaging folders than detected bouts') + + if display: + _, ax = plt.subplots(1) + ax.step(frame_times, np.arange(frame_times.size), label='frame times', color='k', ) + vertical_lines(edges[:, 0], ax=ax, ymin=0, ymax=frame_times.size, label='bout start', color='b') + vertical_lines(edges[:, 1], ax=ax, ymin=0, ymax=frame_times.size, label='bout end', color='orange') + if edges.shape[0] != len(starts): + vertical_lines(np.setdiff1d(starts, edges[:, 0]), ax=ax, ymin=0, ymax=frame_times.size, + label='missing bout start', linestyle=':', color='b') + vertical_lines(np.setdiff1d(ends, edges[:, 1]), ax=ax, ymin=0, ymax=frame_times.size, + label='missing bout end', linestyle=':', color='orange') + ax.set_xlabel('Time / s'), ax.set_ylabel('Frame #'), ax.legend(loc='lower right') + return edges + + @staticmethod + def get_timeshifts(raw_imaging_meta): + """ + Calculate the time shifts for each field of view (FOV) and the relative offsets for each + scan line. + + Parameters + ---------- + raw_imaging_meta : dict + Extracted ScanImage meta data (_ibl_rawImagingData.meta.json). + + Returns + ------- + list of numpy.array + A list of arrays, one per FOV, containing indices of each image scan line. + numpy.array + An array of FOV time offsets (one value per FOV) relative to each frame acquisition + time. + list of numpy.array + A list of arrays, one per FOV, containing the time offsets for each scan line, relative + to each FOV offset. + """ + FOVs = raw_imaging_meta['FOV'] + + # Double-check meta extracted properly + raw_meta = raw_imaging_meta['rawScanImageMeta'] + artist = raw_meta['Artist'] + assert sum(x['enable'] for x in artist['RoiGroups']['imagingRoiGroup']['rois']) == len(FOVs) + + # Number of scan lines per FOV, i.e. number of Y pixels / image height + n_lines = np.array([x['nXnYnZ'][1] for x in FOVs]) + n_valid_lines = np.sum(n_lines) # Number of lines imaged excluding flybacks + # Number of lines during flyback + n_lines_per_gap = int((raw_meta['Height'] - n_valid_lines) / (len(FOVs) - 1)) + # The start and end indices of each FOV in the raw images + fov_start_idx = np.insert(np.cumsum(n_lines[:-1] + n_lines_per_gap), 0, 0) + fov_end_idx = fov_start_idx + n_lines + line_period = raw_imaging_meta['scanImageParams']['hRoiManager']['linePeriod'] + + line_indices = [] + fov_time_shifts = fov_start_idx * line_period + line_time_shifts = [] + + for ln, s, e in zip(n_lines, fov_start_idx, fov_end_idx): + line_indices.append(np.arange(s, e)) + line_time_shifts.append(np.arange(0, ln) * line_period) + + return line_indices, fov_time_shifts, line_time_shifts diff --git a/ibllib/io/extractors/mesoscope/README.md b/ibllib/io/extractors/mesoscope/README.md new file mode 100644 index 000000000..379a71bc1 --- /dev/null +++ b/ibllib/io/extractors/mesoscope/README.md @@ -0,0 +1,9 @@ +# File fixtures +### surface_triangulation.npz +A triangle mesh of the smoothed convex hull of the dorsal surface of the mouse brain, generated from +the 2017 Allen 10um annotation volume. + +- **points** - An N by 3 integer array of x-y vertices, defining all points of the triangle mesh. These are in um relative to the IBL bregma coordinates. +- **connectivity_list** - An N by 3 integer array of vertex indices defining all points that form a triangle. + +This triangulation was generated in MATLAB. diff --git a/ibllib/io/extractors/mesoscope/surface_triangulation.npz b/ibllib/io/extractors/mesoscope/surface_triangulation.npz new file mode 100644 index 000000000..d2e2f7aee Binary files /dev/null and b/ibllib/io/extractors/mesoscope/surface_triangulation.npz differ diff --git a/ibllib/io/extractors/task_extractor_map.json b/ibllib/io/extractors/task_extractor_map.json new file mode 100644 index 000000000..22d0eebb0 --- /dev/null +++ b/ibllib/io/extractors/task_extractor_map.json @@ -0,0 +1,5 @@ +{"ephysChoiceWorld": "EphysTrials", + "_biasedChoiceWorld": "BiasedTrials", + "_habituationChoiceWorld": "HabituationTrials", + "_trainingChoiceWorld": "TrainingTrials" +} diff --git a/ibllib/io/extractors/training_trials.py b/ibllib/io/extractors/training_trials.py index e18f9e30a..dc13ed7dd 100644 --- a/ibllib/io/extractors/training_trials.py +++ b/ibllib/io/extractors/training_trials.py @@ -9,6 +9,7 @@ _logger = logging.getLogger(__name__) +__all__ = ['TrainingTrials', 'extract_all'] class FeedbackType(BaseBpodTrialsExtractor): @@ -51,10 +52,18 @@ class ContrastLR(BaseBpodTrialsExtractor): var_names = ('contrastLeft', 'contrastRight') def _extract(self): - contrastLeft = np.array([t['contrast']['value'] if np.sign( - t['position']) < 0 else np.nan for t in self.bpod_trials]) - contrastRight = np.array([t['contrast']['value'] if np.sign( - t['position']) > 0 else np.nan for t in self.bpod_trials]) + # iblrigv8 has only flat values in the trial table so we can switch to parquet table when times come + # and all the clutter here would fit in ~30 lines + if isinstance(self.bpod_trials[0]['contrast'], float): + contrastLeft = np.array([t['contrast'] if np.sign( + t['position']) < 0 else np.nan for t in self.bpod_trials]) + contrastRight = np.array([t['contrast'] if np.sign( + t['position']) > 0 else np.nan for t in self.bpod_trials]) + else: + contrastLeft = np.array([t['contrast']['value'] if np.sign( + t['position']) < 0 else np.nan for t in self.bpod_trials]) + contrastRight = np.array([t['contrast']['value'] if np.sign( + t['position']) > 0 else np.nan for t in self.bpod_trials]) return contrastLeft, contrastRight @@ -111,9 +120,13 @@ class RepNum(BaseBpodTrialsExtractor): var_names = 'repNum' def _extract(self): - trial_repeated = np.array( - [t['contrast']['type'] == 'RepeatContrast' for t in self.bpod_trials]) - trial_repeated = trial_repeated.astype(int) + def get_trial_repeat(trial): + if 'debias_trial' in trial: + return trial['debias_trial'] + else: + return trial['contrast']['type'] == 'RepeatContrast' + + trial_repeated = np.array(list(map(get_trial_repeat, self.bpod_trials))).astype(int) repNum = trial_repeated.copy() c = 0 for i in range(len(trial_repeated)): @@ -684,6 +697,24 @@ def _extract(self, extractor_classes=None, **kwargs): return table.to_df(), *(out.pop(x) for x in self.var_names if x != 'table') +class TrainingTrials(BaseBpodTrialsExtractor): + save_names = ('_ibl_trials.repNum.npy', '_ibl_trials.goCueTrigger_times.npy', '_ibl_trials.stimOnTrigger_times.npy', None, + None, None, None, '_ibl_trials.table.pqt', None, None, '_ibl_wheel.timestamps.npy', '_ibl_wheel.position.npy', + '_ibl_wheelMoves.intervals.npy', '_ibl_wheelMoves.peakAmplitude.npy', None, None, None, None, None) + var_names = ('repNum', 'goCueTrigger_times', 'stimOnTrigger_times', 'itiIn_times', 'stimOffTrigger_times', + 'stimFreezeTrigger_times', 'errorCueTrigger_times', 'table', 'stimOff_times', 'stimFreeze_times', + 'wheel_timestamps', 'wheel_position', 'wheel_moves_intervals', 'wheel_moves_peak_amplitude', + 'peakVelocity_times', 'is_final_movement', 'phase', 'position', 'quiescence') + + def _extract(self): + base = [RepNum, GoCueTriggerTimes, StimOnTriggerTimes, ItiInTimes, StimOffTriggerTimes, StimFreezeTriggerTimes, + ErrorCueTriggerTimes, TrialsTable, PhasePosQuiescence] + out, _ = run_extractor_classes( + base, session_path=self.session_path, bpod_trials=self.bpod_trials, settings=self.settings, save=False, + task_collection=self.task_collection) + return tuple(out.pop(x) for x in self.var_names) + + def extract_all(session_path, save=False, bpod_trials=None, settings=None, task_collection='raw_behavior_data', save_path=None): """Extract trials and wheel data. @@ -711,19 +742,15 @@ def extract_all(session_path, save=False, bpod_trials=None, settings=None, task_ if settings is None or settings['IBLRIG_VERSION_TAG'] == '': settings = {'IBLRIG_VERSION_TAG': '100.0.0'} - base = [RepNum, GoCueTriggerTimes] # Version check if parse_version(settings['IBLRIG_VERSION_TAG']) >= parse_version('5.0.0'): # We now extract a single trials table - base.extend([ - StimOnTriggerTimes, ItiInTimes, StimOffTriggerTimes, StimFreezeTriggerTimes, - ErrorCueTriggerTimes, TrialsTable, PhasePosQuiescence - ]) + base = [TrainingTrials] else: - base.extend([ - Intervals, Wheel, FeedbackType, ContrastLR, ProbabilityLeft, Choice, IncludedTrials, + base = [ + RepNum, GoCueTriggerTimes, Intervals, Wheel, FeedbackType, ContrastLR, ProbabilityLeft, Choice, IncludedTrials, StimOnTimes_deprecated, RewardVolume, FeedbackTimes, ResponseTimes, GoCueTimes, PhasePosQuiescence - ]) + ] out, fil = run_extractor_classes(base, save=save, session_path=session_path, bpod_trials=bpod_trials, settings=settings, task_collection=task_collection, path_out=save_path) diff --git a/ibllib/io/extractors/training_wheel.py b/ibllib/io/extractors/training_wheel.py index b62b20469..617b5f1df 100644 --- a/ibllib/io/extractors/training_wheel.py +++ b/ibllib/io/extractors/training_wheel.py @@ -352,8 +352,7 @@ def extract_first_movement_times(wheel_moves, trials, min_qt=None): all_move_onsets = wheel_moves['intervals'][:, 0] # Iterate over trials, extracting onsets approx. within closed-loop period cwarn = 0 - for i, (t1, t2) in enumerate(zip(trials['goCue_times'] - min_qt, - trials['feedback_times'])): + for i, (t1, t2) in enumerate(zip(trials['goCue_times'] - min_qt, trials['feedback_times'])): if ~np.isnan(t2 - t1): # If both timestamps defined mask = (all_move_onsets > t1) & (all_move_onsets < t2) if np.any(mask): # If any onsets for this trial diff --git a/ibllib/io/extractors/video_motion.py b/ibllib/io/extractors/video_motion.py index 0afb3dfa1..ef75187b5 100644 --- a/ibllib/io/extractors/video_motion.py +++ b/ibllib/io/extractors/video_motion.py @@ -193,7 +193,7 @@ def align_motion(self, period=(-np.inf, np.inf), side='left', sd_thresh=10, disp x = camera_times[cam_mask] Fs = 1000 pos, t = wh.interpolate_position(wheel.timestamps, wheel.position, freq=Fs) - v, _ = wh.velocity_smoothed(pos, Fs) + v, _ = wh.velocity_filtered(pos, Fs) interp_mask = self.alignment.to_mask(t) # Convert to normalized speed xs = np.unique([find_nearest(t[interp_mask], ts) for ts in x]) diff --git a/ibllib/io/globus.py b/ibllib/io/globus.py index b3da6f5a8..15bd8f9a1 100644 --- a/ibllib/io/globus.py +++ b/ibllib/io/globus.py @@ -1,3 +1,4 @@ +"""TODO: This entire module may be removed in favour of one.remote.globus""" import re import sys import os @@ -27,6 +28,7 @@ def as_globus_path(path): # A globus path >>> as_globus_path('/E/FlatIron/integration') >>> '/E/FlatIron/integration' + TODO Remove in favour of one.remote.globus.as_globus_path """ path = str(path) if ( @@ -42,7 +44,7 @@ def as_globus_path(path): def _login(globus_client_id, refresh_tokens=False): - + # TODO Import from one.remove.globus client = globus.NativeAppAuthClient(globus_client_id) client.oauth2_start_flow(refresh_tokens=refresh_tokens) @@ -62,6 +64,7 @@ def _login(globus_client_id, refresh_tokens=False): def login(globus_client_id): + # TODO Import from one.remove.globus token = _login(globus_client_id, refresh_tokens=False) authorizer = globus.AccessTokenAuthorizer(token['access_token']) tc = globus.TransferClient(authorizer=authorizer) @@ -69,6 +72,7 @@ def login(globus_client_id): def setup(globus_client_id, str_app='globus/default'): + # TODO Import from one.remove.globus # Lookup and manage consents there # https://auth.globus.org/v2/web/consents gtok = _login(globus_client_id, refresh_tokens=True) @@ -76,6 +80,7 @@ def setup(globus_client_id, str_app='globus/default'): def login_auto(globus_client_id, str_app='globus/default'): + # TODO Import from one.remove.globus token = params.read(str_app, {}) required_fields = {'refresh_token', 'access_token', 'expires_at_seconds'} if not (token and required_fields.issubset(token.as_dict())): @@ -87,6 +92,7 @@ def login_auto(globus_client_id, str_app='globus/default'): def get_local_endpoint(): + # TODO Remove in favour of one.remote.globus.get_local_endpoint_id if sys.platform == 'win32' or sys.platform == 'cygwin': id_path = Path(os.environ['LOCALAPPDATA']).joinpath("Globus Connect") else: diff --git a/ibllib/io/raw_daq_loaders.py b/ibllib/io/raw_daq_loaders.py index 341838196..add980130 100644 --- a/ibllib/io/raw_daq_loaders.py +++ b/ibllib/io/raw_daq_loaders.py @@ -1,27 +1,34 @@ -"""Loader functions for various DAQ data formats""" +"""Loader functions for various DAQ data formats.""" from pathlib import Path import logging -from collections import OrderedDict +from collections import OrderedDict, defaultdict +import json import nptdms import numpy as np import neurodsp.utils +import one.alf.io as alfio +import one.alf.exceptions as alferr +from one.alf.spec import to_alf + +from ibllib.io.extractors.default_channel_maps import all_default_labels logger = logging.getLogger(__name__) def load_raw_daq_tdms(path) -> 'nptdms.tdms.TdmsFile': """ - Returns a dict of channel names and values from chmap + Load a raw DAQ TDMS file. Parameters ---------- - path - chmap + path : str, pathlib.Path + The location of the .tdms file to laod. Returns ------- - + nptdms.tdms.TdmsFile + The loaded TDMS object. """ from nptdms import TdmsFile # If path is a directory, glob for a tdms file @@ -35,14 +42,15 @@ def load_raw_daq_tdms(path) -> 'nptdms.tdms.TdmsFile': return TdmsFile.read(file_path) -def load_channels_tdms(path, chmap=None, return_fs=False): +def load_channels_tdms(path, chmap=None): """ Note: This currently cannot deal with arbitrary groups. Parameters ---------- - path + path : str, pathlib.Path + The file or folder path of the raw TDMS data file. chmap: dictionary mapping devices names to channel codes: example {"photometry": 'AI0', 'bpod': 'AI1'} if None, will read all of available channel from the DAQ @@ -64,6 +72,7 @@ def _load_digital_channels(data_file, group='Digital', ch='AuxPort'): data_file = load_raw_daq_tdms(path) data = {} digital_channels = None + fs = np.nan if chmap: for name, ch in chmap.items(): if ch.lower()[0] == 'a': @@ -84,14 +93,12 @@ def _load_digital_channels(data_file, group='Digital', ch='AuxPort'): else: data[ch] = data_file[group][ch.upper()].data fs = data_file[group].properties['ScanRate'] # from daqami it's unclear that fs could be set per channel - if return_fs: - return data, fs - else: - return data + return data, fs def load_sync_tdms(path, sync_map, fs=None, threshold=2.5, floor_percentile=10): """ + Load a sync channels from a raw DAQ TDMS file. Parameters ---------- @@ -109,7 +116,11 @@ def load_sync_tdms(path, sync_map, fs=None, threshold=2.5, floor_percentile=10): Returns ------- - + one.alf.io.AlfBunch + A dictionary with keys ('times', 'polarities', 'channels'), containing the sync pulses and + the corresponding channel numbers. + dict + A map of channel names and their corresponding indices. """ data_file = load_raw_daq_tdms(path) sync = {} @@ -142,3 +153,242 @@ def load_sync_tdms(path, sync_map, fs=None, threshold=2.5, floor_percentile=10): sync_map = {v.lower(): k for k, v in sync_map.items()} # turn inside-out chmap = {sync_map[k]: v for k, v in channel_ids.items()} return sync, chmap + + +def correct_counter_discontinuities(raw, overflow=2**32): + """ + Correct over- and underflow wrap around values for DAQ counter channel. + + Parameters + ---------- + raw : numpy.array + An array of counts. + overflow : int + The maximum representable value of the data before it was cast to float64. + + Returns + ------- + numpy.array + An array of counts with the over- and underflow discontinuities removed. + """ + flowmax = overflow - 1 + d = np.diff(raw) + # correct for counter flow discontinuities + d[d >= flowmax] = d[d >= flowmax] - flowmax + d[d <= -flowmax] = d[d <= -flowmax] + flowmax + return np.cumsum(np.r_[0, d]) + raw[0] # back to position + + +def load_timeline_sync_and_chmap(alf_path, chmap=None, timeline=None, save=True): + """Load the sync and channel map from disk. + + If the sync files do not exist, they are extracted from the raw DAQ data and saved. + + Parameters + ---------- + alf_path : str, pathlib.Path + The folder containing the sync file and raw DAQ data. + chmap : dict + An optional channel map, otherwise extracted based on the union of timeline meta data and + default extractor channel map names. + timeline : dict + An optional timeline object, otherwise is loaded from alf_path. + save : bool + If true, save the sync files if they don't already exist. + + Returns + ------- + one.alf.io.AlfBunch + A dictionary with keys ('times', 'polarities', 'channels'), containing the sync pulses and + the corresponding channel numbers. + dict, optional + A map of channel names and their corresponding indices for sync channels, if chmap is None. + """ + if not chmap: + if not timeline: + meta = alfio.load_object(alf_path, 'DAQdata', namespace='timeline', attribute='meta')['meta'] + else: + meta = timeline['meta'] + chmap = timeline_meta2chmap(meta, include_channels=all_default_labels()) + try: + sync = alfio.load_object(alf_path, 'sync') + except alferr.ALFObjectNotFound: + if not timeline: + timeline = alfio.load_object(alf_path, 'DAQdata') + sync = extract_sync_timeline(timeline, chmap=chmap) + if save: + alfio.save_object_npy(alf_path, sync, object='sync', namespace='timeline') + return sync, chmap + + +def extract_sync_timeline(timeline, chmap=None, floor_percentile=10, threshold=None): + """ + Load sync channels from a timeline object. + + Note: Because the scan frequency is typically faster than the sample rate, the position and + edge count channels may detect more than one front between samples. Therefore for these, the + raw data is more accurate than the extracted polarities. + + Parameters + ---------- + timeline : dict, str, pathlib.Path + A timeline object or the file or folder path of the _timeline_DAQdata files. + chmap : dict + A map of channel names and channel IDs. + floor_percentile : float + 10% removes the percentile value of the analog trace before thresholding. This is to avoid + DC offset drift. + threshold : float, dict of str: float + The threshold for applying to analogue channels. If None, take mean after subtracting + floor percentile offset. + + Returns + ------- + one.alf.io.AlfBunch + A dictionary with keys ('times', 'polarities', 'channels'), containing the sync pulses and + the corresponding channel numbers. + dict, optional + A map of channel names and their corresponding indices for sync channels, if chmap is None. + """ + if isinstance(timeline, (str, Path)): + timeline = alfio.load_object(timeline, 'DAQdata', namespace='timeline') + assert timeline.keys() >= {'timestamps', 'raw', 'meta'}, 'Timeline object missing attributes' + + # If no channel map was passed, load it from 'wiring' file, or extract from meta file + return_chmap = chmap is None + chmap = chmap or timeline.get('wiring') or timeline_meta2chmap(timeline['meta']) + + # Initialize sync object + sync = alfio.AlfBunch((k, np.array([], dtype=d)) for k, d in + (('times', 'f'), ('channels', 'u1'), ('polarities', 'i1'))) + for label, i in chmap.items(): + try: + info = next(x for x in timeline['meta']['inputs'] if x['name'].lower() == label.lower()) + except StopIteration: + logger.warning('sync channel "%s" not found', label) + continue + raw = timeline['raw'][:, info['arrayColumn'] - 1] # -1 because MATLAB indexes from 1 + if info['measurement'] == 'Voltage': + # Get TLLs by applying a threshold to the diff of voltage samples + offset = np.percentile(raw, floor_percentile, axis=0) + daqID = info['daqChannelID'] + logger.debug(f'{label} ({daqID}): estimated analogue channel DC Offset approx. {np.mean(offset):.2f}') + step = threshold.get(label) if isinstance(threshold, dict) else threshold + if step is None: + step = np.max(raw - offset) / 2 + iup = neurodsp.utils.rises(raw - offset, step=step, analog=True) + idown = neurodsp.utils.falls(raw - offset, step=step, analog=True) + pol = np.r_[np.ones_like(iup), -np.ones_like(idown)].astype('i1') + ind = np.r_[iup, idown] + + sync.polarities = np.concatenate((sync.polarities, pol)) + elif info['measurement'] == 'EdgeCount': + # Monotonically increasing values; extract indices where delta == 1 + raw = correct_counter_discontinuities(raw) + ind, = np.where(np.diff(raw)) + ind += 1 + sync.polarities = np.concatenate((sync.polarities, np.ones_like(ind, dtype='i1'))) + elif info['measurement'] == 'Position': + # Bidirectional; extract indices where delta != 0 + raw = correct_counter_discontinuities(raw) + d = np.diff(raw) + ind, = np.where(d.astype(int)) + sync.polarities = np.concatenate((sync.polarities, np.sign(d[ind]).astype('i1'))) + ind += 1 + else: + raise NotImplementedError(f'{info["measurement"]} sync extraction') + # Append timestamps of indices and channel index to sync arrays + sync.times = np.concatenate((sync.times, timeline['timestamps'][ind])) + sync.channels = np.concatenate((sync.channels, np.full(ind.shape, i, dtype='u1'))) + + # Sort arrays by time + assert sync.check_dimensions == 0 + t_ind = np.argsort(sync.times) + for k in sync: + sync[k] = sync[k][t_ind] + if return_chmap: + return sync, chmap + else: + return sync + + +def timeline_meta2wiring(path, save=False): + """ + Given a timeline meta data object, return a dictionary of wiring info. + + Parameters + ---------- + path : str, pathlib.Path + The path of the timeline meta file, _timeline_DAQdata.meta. + save : bool + If true, save the timeline wiring file in the same location as the meta file, + _timeline_DAQData.wiring.json. + + Returns + ------- + dict + A dictionary with base keys {'SYSTEM', 'SYNC_WIRING_DIGITAL', 'SYNC_WIRING_ANALOG'}, the + latter of which contain maps of channel names and their IDs. + pathlib.Path + If save=True, returns the path of the wiring file. + """ + meta = alfio.load_object(path, 'DAQdata', namespace='timeline', attribute='meta').get('meta') + assert meta, 'No meta data in timeline object' + wiring = defaultdict(dict, SYSTEM='timeline') + for input in meta['inputs']: + key = 'SYNC_WIRING_' + ('ANALOG' if input['measurement'] == 'Voltage' else 'DIGITAL') + wiring[key][input['daqChannelID']] = input['name'] + if save: + out_path = Path(path) / to_alf('DAQ data', 'wiring', 'json', namespace='timeline') + with open(out_path, 'w') as fp: + json.dump(wiring, fp) + return dict(wiring), out_path + return dict(wiring) + + +def timeline_meta2chmap(meta, exclude_channels=None, include_channels=None): + """ + Convert a timeline meta object to a sync channel map. + + Parameters + ---------- + meta : dict + A loaded timeline metadata file, i.e. _timeline_DAQdata.meta. + exclude_channels : list + An optional list of channels to exclude from the channel map. + include_channels : list + An optional list of channels to include from the channel map, takes priority over the + exclude list. + + Returns + ------- + dict + A map of channel names and their corresponding indices for sync channels. + """ + chmap = {} + for input in meta.get('inputs', []): + if (include_channels is not None and input['name'] not in include_channels) or \ + (exclude_channels and input['name'] in exclude_channels): + continue + chmap[input['name']] = input['arrayColumn'] + return chmap + + +def timeline_get_channel(timeline, channel_name): + """ + Given a timeline object, returns the vector of values recorded from a given channel name. + + Parameters + ---------- + timeline : one.alf.io.AlfBunch + A loaded timeline object. + channel_name : str + The name of a channel to extract. + + Returns + ------- + numpy.array + The channel data. + """ + idx = next(ch['arrayColumn'] for ch in timeline['meta']['inputs'] if ch['name'] == channel_name) + return timeline['raw'][:, idx - 1] # -1 because MATLAB indices start from 1, not 0 diff --git a/ibllib/io/raw_data_loaders.py b/ibllib/io/raw_data_loaders.py index f8359552c..200b8ca15 100644 --- a/ibllib/io/raw_data_loaders.py +++ b/ibllib/io/raw_data_loaders.py @@ -321,13 +321,19 @@ def _read_settings_json_compatibility_enforced(settings): else: with open(settings) as js: md = json.load(js) - if 'IS_MOCK' not in md.keys(): + if 'IS_MOCK' not in md: md['IS_MOCK'] = False if 'IBLRIG_VERSION_TAG' not in md.keys(): md['IBLRIG_VERSION_TAG'] = md.get('IBLRIG_VERSION', '') # 2018-12-05 Version 3.2.3 fixes (permanent fixes in IBL_RIG from 3.2.4 on) if md['IBLRIG_VERSION_TAG'] == '': pass + elif parse_version(md.get('IBLRIG_VERSION_TAG')) >= parse_version('8.0.0'): + md['SESSION_NUMBER'] = str(md['SESSION_NUMBER']).zfill(3) + md['PYBPOD_BOARD'] = md['RIG_NAME'] + md['PYBPOD_CREATOR'] = (md['ALYX_USER'], '') + md['SESSION_DATE'] = md['SESSION_START_TIME'][:10] + md['SESSION_DATETIME'] = md['SESSION_START_TIME'] elif parse_version(md.get('IBLRIG_VERSION_TAG')) <= parse_version('3.2.3'): if 'LAST_TRIAL_DATA' in md.keys(): md.pop('LAST_TRIAL_DATA') @@ -341,7 +347,7 @@ def _read_settings_json_compatibility_enforced(settings): dt = dateparser.parse(md['SESSION_DATETIME']) md['SESSION_DATETIME'] = date2isostr(dt) # add the weight key if it doesn't already exist - if 'SUBJECT_WEIGHT' not in md.keys(): + if 'SUBJECT_WEIGHT' not in md: md['SUBJECT_WEIGHT'] = None return md @@ -408,7 +414,7 @@ def load_encoder_events(session_path, task_collection='raw_behavior_data', setti path = next(path.glob("_iblrig_encoderEvents.raw*.ssv"), None) if not settings: settings = load_settings(session_path, task_collection=task_collection) - if settings is None or settings['IBLRIG_VERSION_TAG'] == '': + if settings is None or not settings.get('IBLRIG_VERSION_TAG'): settings = {'IBLRIG_VERSION_TAG': '100.0.0'} # auto-detect old files when version is not labeled with open(path) as fid: @@ -512,7 +518,7 @@ def load_encoder_positions(session_path, task_collection='raw_behavior_data', se path = next(path.glob("_iblrig_encoderPositions.raw*.ssv"), None) if not settings: settings = load_settings(session_path, task_collection=task_collection) - if settings is None or settings['IBLRIG_VERSION_TAG'] == '': + if settings is None or not settings.get('IBLRIG_VERSION_TAG'): settings = {'IBLRIG_VERSION_TAG': '100.0.0'} # auto-detect old files when version is not labeled with open(path) as fid: diff --git a/ibllib/io/session_params.py b/ibllib/io/session_params.py index d9ea52eae..a15802a3f 100644 --- a/ibllib/io/session_params.py +++ b/ibllib/io/session_params.py @@ -27,7 +27,7 @@ from datetime import datetime import logging from pathlib import Path -import warnings +from copy import deepcopy from one.converters import ConversionMixin from pkg_resources import parse_version @@ -52,7 +52,7 @@ def write_yaml(file_path, data): """ file_path.parent.mkdir(exist_ok=True, parents=True) - with open(file_path, 'w+') as fp: + with open(file_path, 'w') as fp: yaml.safe_dump(data, fp) @@ -75,7 +75,7 @@ def _patch_file(data: dict) -> dict: _logger.warning('Description file generated by more recent code') elif parse_version(v) <= parse_version('0.1.0'): # Change tasks key from dict to list of dicts - if 'tasks' in data and data['tasks']: + if 'tasks' in data and isinstance(data['tasks'], dict): data['tasks'] = [{k: v} for k, v in data['tasks'].copy().items()] data['version'] = SPEC_VERSION return data @@ -83,9 +83,19 @@ def _patch_file(data: dict) -> dict: def write_params(session_path, data) -> Path: """ - :param session_path : pathlib.Path, str - :param ad: - :return: pathlib.Path: yaml full file path + Write acquisition description data to the session path. + + Parameters + ---------- + session_path : str, pathlib.Path + A session path containing an _ibl_experiment.description.yaml file. + data : dict + The acquisition description data to save + + Returns + ------- + pathlib.Path + The full path to the saved acquisition description. """ yaml_file = Path(session_path).joinpath('_ibl_experiment.description.yaml') write_yaml(yaml_file, data) @@ -134,6 +144,41 @@ def read_params(path) -> dict: return data +def merge_params(a, b, copy=False): + """ + Given two experiment descriptions, update first with fields in second. + + Parameters + ---------- + a : dict + An experiment description dictionary to be updated with fields from `b`. + b : dict + An experiment description dictionary to update `a` with + copy : bool + If true, return a deep copy of `a` instead of updating directly. + + Returns + ------- + dict + A merged dictionary consisting of fields from `a` and `b`. + """ + if copy: + a = deepcopy(a) + for k in b: + if k == 'sync': + assert k not in a or a[k] == b[k], 'multiple sync fields defined' + if isinstance(b[k], list): + prev = a.get(k, []) + # For procedures and projects, remove duplicates + to_add = b[k] if k == 'tasks' else set(prev) ^ set(b[k]) + a[k] = prev + list(to_add) + elif isinstance(b[k], dict): + a[k] = {**a.get(k, {}), **b[k]} + else: # A string + a[k] = b[k] + return a + + def aggregate_device(file_device, file_acquisition_description, unlink=False): """ Add the contents of a device file to the main acquisition description file. @@ -192,19 +237,8 @@ def aggregate_device(file_device, file_acquisition_description, unlink=False): else: acq_desc = {} - # merge the dictionaries - for k in data_device: - if k == 'sync': - assert k not in acq_desc, 'multiple sync fields defined' - if isinstance(data_device[k], list): - prev = acq_desc.get(k, []) - # For procedures and projects, remove duplicates - to_add = data_device[k] if k == 'tasks' else set(prev) ^ set(data_device[k]) - acq_desc[k] = prev + list(to_add) - elif isinstance(data_device[k], dict): - acq_desc[k] = {**acq_desc.get(k, {}), **data_device[k]} - else: # A string - acq_desc[k] = data_device[k] + # merge the dictionaries (NB: acq_desc modified in place) + acq_desc = merge_params(acq_desc, data_device) with open(file_acquisition_description, 'w') as fp: yaml.safe_dump(acq_desc, fp) @@ -370,14 +404,12 @@ def get_collections(sess_params): def iter_dict(d): for k, v in d.items(): - if not v or isinstance(v, str): - continue if isinstance(v, list): for d in filter(lambda x: isinstance(x, dict), v): iter_dict(d) - elif 'collection' in v: + elif isinstance(v, dict) and 'collection' in v: collection_map[k] = v['collection'] - else: + elif isinstance(v, dict): iter_dict(v) iter_dict(sess_params) @@ -389,7 +421,7 @@ def get_video_compressed(sess_params): if not videos: return None - # This is all of nothing, assumes either all videos or not compressed + # This is all or nothing, assumes either all videos or not compressed for key, vals in videos.items(): compressed = vals.get('compressed', False) @@ -416,7 +448,7 @@ def get_remote_stub_name(session_path, device_id=None): Example ------- >>> get_remote_stub_name(Path.home().joinpath('subject', '2020-01-01', '001'), 'host-123') - Path.home() / 'subject/2020-01-01/001/_devices/2020-01-01_1_subject@host-123' + Path.home() / 'subject/2020-01-01/001/_devices/2020-01-01_1_subject@host-123.yaml' """ device_id = device_id or misc.create_basic_transfer_params()['TRANSFER_LABEL'] exp_ref = '{date}_{sequence:d}_{subject:s}'.format(**ConversionMixin.path2ref(session_path)) @@ -424,7 +456,7 @@ def get_remote_stub_name(session_path, device_id=None): return session_path / '_devices' / remote_filename -def prepare_experiment(session_path, acquisition_description=None, local=None, remote=None): +def prepare_experiment(session_path, acquisition_description=None, local=None, remote=None, device_id=None, overwrite=False): """ Copy acquisition description yaml to the server and local transfers folder. @@ -436,24 +468,46 @@ def prepare_experiment(session_path, acquisition_description=None, local=None, r The data to write to the experiment.description.yaml file. local : str, pathlib.Path The path to the local session folders. + >>> C:\iblrigv8_data\cortexlab\Subjects # noqa remote : str, pathlib.Path The path to the remote server session folders. + >>> Y:\Subjects # noqa + device_id : str, optional + A device name, if None the TRANSFER_LABEL parameter is used (defaults to this device's + hostname with a unique numeric ID) + overwrite : bool + If true, overwrite any existing file with the new one, otherwise, update the existing file. """ if not acquisition_description: return + # Determine if user passed in arg for local/remote subject folder locations or pull in from - # local param file or prompt user if missing - params = misc.create_basic_transfer_params(transfers_path=local, remote_data_path=remote) + # local param file or prompt user if missing data. + if local is None or remote is None or device_id is None: + params = misc.create_basic_transfer_params(local_data_path=local, remote_data_path=remote, TRANSFER_LABEL=device_id) + local, device_id = (params['DATA_FOLDER_PATH'], params['TRANSFER_LABEL']) + # if the user provides False as an argument, it means the intent is to not copy anything, this + # won't be preserved by create_basic_transfer_params by default + remote = False if remote is False else params['REMOTE_DATA_FOLDER_PATH'] + + # THis is in the docstring but still, if the session Path is absolute, we need to make it relative + if Path(session_path).is_absolute(): + session_path = Path(*session_path.parts[-3:]) # First attempt to copy to server - remote_device_path = get_remote_stub_name(session_path, params['TRANSFER_LABEL']) - try: - write_yaml(remote_device_path, acquisition_description) - except Exception as ex: - warnings.warn(f'Failed to write data to {remote_device_path}: {ex}') - - # Now copy to local directory - local = params.get('TRANSFERS_PATH', params['DATA_FOLDER_PATH']) - local_device_path = Path(local).joinpath(session_path) - filename = f'_ibl_experiment.description_{params["TRANSFER_LABEL"]}.yaml' - write_yaml(local_device_path.joinpath(filename), acquisition_description) + if remote is not False: + remote_session_path = Path(remote).joinpath(session_path) + remote_device_path = get_remote_stub_name(remote_session_path, device_id=device_id) + previous_description = read_params(remote_device_path) if remote_device_path.exists() and not overwrite else {} + try: + write_yaml(remote_device_path, merge_params(previous_description, acquisition_description)) + _logger.info(f'Written data to remote device at: {remote_device_path}.') + except Exception as ex: + _logger.warning(f'Failed to write data to remote device at: {remote_device_path}. \n {ex}') + + # then create on the local machine + filename = f'_ibl_experiment.description_{device_id}.yaml' + local_device_path = Path(local).joinpath(session_path, filename) + previous_description = read_params(local_device_path) if local_device_path.exists() and not overwrite else {} + write_yaml(local_device_path, merge_params(previous_description, acquisition_description)) + _logger.info(f'Written data to local session at : {local_device_path}.') diff --git a/ibllib/io/video.py b/ibllib/io/video.py index a1835b722..3a60b794e 100644 --- a/ibllib/io/video.py +++ b/ibllib/io/video.py @@ -11,7 +11,7 @@ from one.api import ONE from one import params -VIDEO_LABELS = ('left', 'right', 'body') +VIDEO_LABELS = ('left', 'right', 'body', 'belly') class VideoStreamer: @@ -158,8 +158,8 @@ def url_from_eid(eid, label=None, one=None): :param one: An instance of ONE :return: The URL string if the label is a string, otherwise a dict of urls with labels as keys """ - valid_labels = ('left', 'right', 'body') - if not (label is None or np.isin(label, ('left', 'right', 'body')).all()): + valid_labels = VIDEO_LABELS + if not (label is None or np.isin(label, valid_labels).all()): raise ValueError('labels must be one of ("%s")' % '", "'.join(valid_labels)) one = one or ONE() session_path = one.eid2path(one.to_eid(eid)) @@ -187,7 +187,7 @@ def match(dataset): def label_from_path(video_name): """ - Return the video label, i.e. 'left', 'right' or 'body' + Return the video label, e.g.. 'left', 'right' or 'body' :param video_name: A file path, URL or file name for the video :return: The string label or None if the video doesn't match """ diff --git a/ibllib/oneibl/registration.py b/ibllib/oneibl/registration.py index 357f9a7d6..0996f01e0 100644 --- a/ibllib/oneibl/registration.py +++ b/ibllib/oneibl/registration.py @@ -110,6 +110,8 @@ def register_session_raw_data(session_path, one=None, overwrite=False, **kwargs) list of dicts, dict A list of newly created Alyx dataset records or the registration data if dry. """ + # Clear rest cache to make sure we have the latest entries + one.alyx.clear_rest_cache() client = IBLRegistrationClient(one) session_path = Path(session_path) eid = one.path2eid(session_path, query_type='remote') # needs to make sure we're up to date @@ -121,7 +123,7 @@ def register_session_raw_data(session_path, one=None, overwrite=False, **kwargs) # unless overwrite is True, filter out the datasets that already exist if not overwrite: # query the database for existing datasets on the session and allowed dataset types - dsets = datasets2records(one.alyx.rest('datasets', 'list', session=eid)) + dsets = datasets2records(one.alyx.rest('datasets', 'list', session=eid, no_cache=True)) already_registered = list(map(session_path.joinpath, dsets['rel_path'])) file_list = list(filter(lambda f: f not in already_registered, file_list)) @@ -194,7 +196,7 @@ def register_session(self, ses_path, file_list=True, projects=None, procedures=N assert len({x['IS_MOCK'] for x in settings}) == 1 assert len({md['PYBPOD_BOARD'] for md in settings}) == 1 assert len({md.get('IBLRIG_VERSION') for md in settings}) == 1 - assert len({md['IBLRIG_VERSION_TAG'] for md in settings}) == 1 + # assert len({md['IBLRIG_VERSION_TAG'] for md in settings}) == 1 # query Alyx endpoints for subject, error if not found subject = self.assert_exists(subject, 'subjects') @@ -250,7 +252,10 @@ def register_session(self, ses_path, file_list=True, projects=None, procedures=N # Submit weights for md in filter(lambda md: md.get('SUBJECT_WEIGHT') is not None, settings): user = md.get('PYBPOD_CREATOR') - user = user[0] if user[0] in users else self.one.alyx.user + if isinstance(user, list): + user = user[0] + if user not in users: + user = self.one.alyx.user self.register_weight(subject['nickname'], md['SUBJECT_WEIGHT'], date_time=md['SESSION_DATETIME'], user=user) else: # if session exists update the JSON field @@ -260,13 +265,12 @@ def register_session(self, ses_path, file_list=True, projects=None, procedures=N _logger.info(session['url'] + ' ') # create associated water administration if not found if not session['wateradmin_session_related'] and any(task_data): - for md, d in zip(settings, task_data): - if d is None: - continue + for md, d in filter(all, zip(settings, task_data)): _, _end_time = _get_session_times(ses_path, md, d) user = md.get('PYBPOD_CREATOR') user = user[0] if user[0] in users else self.one.alyx.user - if (volume := d[-1]['water_delivered'] / 1000) > 0: + volume = d[-1].get('water_delivered', sum(x['reward_amount'] for x in d)) / 1000 + if volume > 0: self.register_water_administration( subject['nickname'], volume, date_time=_end_time or end_time, user=user, session=session['id'], water_type=md.get('REWARD_TYPE') or 'Water') @@ -419,6 +423,7 @@ def _get_session_performance(md, ses_data): int The total number of correct trials across protocols. """ + if not any(filter(None, ses_data or None)): return None, None @@ -428,20 +433,21 @@ def _get_session_performance(md, ses_data): else: assert isinstance(ses_data, (list, tuple)) and len(ses_data) == len(md) - # For now just remove missing session data, long run move this function into extractors - ses_data = [sd for sd in ses_data if sd] - n_trials = [x[-1]['trial_num'] for x in ses_data] - # checks that the number of actual trials and labeled number of trials check out - assert all(len(x) == n for x, n in zip(ses_data, n_trials)) - # task specific logic - n_correct_trials = [] - for data, proc in zip(ses_data, map(lambda x: x.get('PYBPOD_PROTOCOL', ''), md)): - if 'habituationChoiceWorld' in proc: - n_correct_trials.append(0) + n_trials = [] + n_correct = [] + for data, settings in filter(all, zip(ses_data, md)): + # In some protocols trials start from 0, in others, from 1 + n = data[-1]['trial_num'] + int(data[0]['trial_num'] == 0) # +1 if starts from 0 + n_trials.append(n) + # checks that the number of actual trials and labeled number of trials check out + assert len(data) == n, f'{len(data)} trials in data, however last trial number was {n}' + # task specific logic + if 'habituationChoiceWorld' in settings.get('PYBPOD_PROTOCOL', ''): + n_correct.append(0) else: - n_correct_trials.append(data[-1]['ntrials_correct']) + n_correct.append(data[-1].get('ntrials_correct', sum(x['trial_correct'] for x in data))) - return sum(n_trials), sum(n_correct_trials) + return sum(n_trials), sum(n_correct) def get_local_data_repository(ac): diff --git a/ibllib/pipes/base_tasks.py b/ibllib/pipes/base_tasks.py index 0f04e16a8..9005e365d 100644 --- a/ibllib/pipes/base_tasks.py +++ b/ibllib/pipes/base_tasks.py @@ -1,9 +1,15 @@ +"""Abstract base classes for dynamic pipeline tasks.""" +import logging +from pathlib import Path + +from pkg_resources import parse_version from one.webclient import no_cache +from iblutil.util import flatten from ibllib.pipes.tasks import Task import ibllib.io.session_params as sess_params from ibllib.qc.base import sign_off_dict, SIGN_OFF_CATEGORIES -import logging +from ibllib.io.raw_daq_loaders import load_timeline_sync_and_chmap _logger = logging.getLogger(__name__) @@ -100,6 +106,25 @@ def get_protocol_number(self, number=None, task_protocol=None): assert number is None or isinstance(number, int) return number + @staticmethod + def _spacer_support(settings): + """ + Spacer support was introduced in v7.1 for iblrig v7 and v8.0.1 in v8. + + Parameters + ---------- + settings : dict + The task settings dict. + + Returns + ------- + bool + True if task spacers are to be expected. + """ + v = parse_version + version = v(settings.get('IBLRIG_VERSION_TAG')) + return version not in (v('100.0.0'), v('8.0.0')) and version >= v('7.1.0') + class VideoTask(DynamicTask): @@ -149,7 +174,54 @@ def __init__(self, session_path, **kwargs): self.device_collection = self.get_device_collection('widefield', kwargs.get('device_collection', 'raw_widefield_data')) -class RegisterRawDataTask(DynamicTask): # TODO write test +class MesoscopeTask(DynamicTask): + def __init__(self, session_path, **kwargs): + super().__init__(session_path, **kwargs) + + self.device_collection = self.get_device_collection( + 'mesoscope', kwargs.get('device_collection', 'raw_imaging_data_[0-9]*')) + + def get_signatures(self, **kwargs): + """ + From the template signature of the task, create the exact list of inputs and outputs to expect based on the + available device collection folders + + Necessary because we don't know in advance how many device collection folders ("imaging bouts") to expect + """ + self.session_path = Path(self.session_path) + # Glob for all device collection (raw imaging data) folders + raw_imaging_folders = [p.name for p in self.session_path.glob(self.device_collection)] + # For all inputs and outputs that are part of the device collection, expand to one file per folder + # All others keep unchanged + self.input_files = [(sig[0], sig[1].replace(self.device_collection, folder), sig[2]) + for folder in raw_imaging_folders for sig in self.signature['input_files']] + self.output_files = [(sig[0], sig[1].replace(self.device_collection, folder), sig[2]) + for folder in raw_imaging_folders for sig in self.signature['output_files']] + + def load_sync(self): + """ + Load the sync and channel map. + + This method may be expanded to support other raw DAQ data formats. + + Returns + ------- + one.alf.io.AlfBunch + A dictionary with keys ('times', 'polarities', 'channels'), containing the sync pulses + and the corresponding channel numbers. + dict + A map of channel names and their corresponding indices. + """ + alf_path = self.session_path / self.sync_collection + if self.get_sync_namespace() == 'timeline': + # Load the sync and channel map from the raw DAQ data + sync, chmap = load_timeline_sync_and_chmap(alf_path) + else: + raise NotImplementedError + return sync, chmap + + +class RegisterRawDataTask(DynamicTask): """ Base register raw task. To rename files @@ -160,10 +232,10 @@ class RegisterRawDataTask(DynamicTask): # TODO write test priority = 100 job_size = 'small' - def rename_files(self, symlink_old=False, **kwargs): + def rename_files(self, symlink_old=False): - # If no inputs are given, we don't do any renaming - if len(self.input_files) == 0: + # If either no inputs or no outputs are given, we don't do any renaming + if not all(map(len, (self.input_files, self.output_files))): return # Otherwise we need to make sure there is one to one correspondence for renaming files @@ -174,16 +246,82 @@ def rename_files(self, symlink_old=False, **kwargs): old_path = self.session_path.joinpath(old_collection).glob(old_file) old_path = next(old_path, None) # if the file doesn't exist and it is not required we are okay to continue - if not old_path and not required: - continue + if not old_path: + if required: + raise FileNotFoundError(str(old_file)) + else: + continue new_file, new_collection, _ = after new_path = self.session_path.joinpath(new_collection, new_file) + if old_path == new_path: + continue new_path.parent.mkdir(parents=True, exist_ok=True) + _logger.debug('%s -> %s', old_path.relative_to(self.session_path), new_path.relative_to(self.session_path)) old_path.replace(new_path) if symlink_old: old_path.symlink_to(new_path) + def register_snapshots(self, unlink=False, collection=None): + """ + Register any photos in the snapshots folder to the session. Typically imaging users will + take numerous photos for reference. Supported extensions: .jpg, .jpeg, .png, .tif, .tiff + + If a .txt file with the same name exists in the same location, the contents will be added + to the note text. + + Parameters + ---------- + unlink : bool + If true, files are deleted after upload. + collection : str, list, optional + Location of 'snapshots' folder relative to the session path. If None, uses + 'device_collection' attribute (if exists) or root session path. + + Returns + ------- + list of dict + The newly registered Alyx notes. + """ + collection = getattr(self, 'device_collection', None) if collection is None else collection + collection = collection or '' # If not defined, use no collection + if collection and '*' in collection: + collection = [p.name for p in self.session_path.glob(collection)] + # Check whether folders on disk contain '*'; this is to stop an infinite recursion + assert not any('*' in c for c in collection), 'folders containing asterisks not supported' + # If more that one collection exists, register snapshots in each collection + if collection and not isinstance(collection, str): + return flatten(filter(None, [self.register_snapshots(unlink, c) for c in collection])) + snapshots_path = self.session_path.joinpath(*filter(None, (collection, 'snapshots'))) + if not snapshots_path.exists(): + return + + eid = self.one.path2eid(self.session_path, query_type='remote') + if not eid: + _logger.warning('Failed to upload snapshots: session not found on Alyx') + return + note = dict(user=self.one.alyx.user, content_type='session', object_id=eid, text='') + + notes = [] + exts = ('.jpg', '.jpeg', '.png', '.tif', '.tiff') + for snapshot in filter(lambda x: x.suffix.lower() in exts, snapshots_path.glob('*.*')): + _logger.debug('Uploading "%s"...', snapshot.relative_to(self.session_path)) + if snapshot.with_suffix('.txt').exists(): + with open(snapshot.with_suffix('.txt'), 'r') as txt_file: + note['text'] = txt_file.read().strip() + else: + note['text'] = '' + with open(snapshot, 'rb') as img_file: + files = {'image': img_file} + notes.append(self.one.alyx.rest('notes', 'create', data=note, files=files)) + if unlink: + snapshot.unlink() + # If nothing else in the snapshots folder, delete the folder + if unlink and next(snapshots_path.rglob('*'), None) is None: + snapshots_path.rmdir() + _logger.info('%i snapshots uploaded to Alyx', len(notes)) + return notes + def _run(self, **kwargs): self.rename_files(**kwargs) out_files = [] diff --git a/ibllib/pipes/behavior_tasks.py b/ibllib/pipes/behavior_tasks.py index 1ab43a128..7cc317c28 100644 --- a/ibllib/pipes/behavior_tasks.py +++ b/ibllib/pipes/behavior_tasks.py @@ -1,17 +1,25 @@ +"""Standard task protocol extractor dynamic pipeline tasks.""" +import logging +import traceback + +from pkg_resources import parse_version +import one.alf.io as alfio +from one.alf.files import session_path_parts +from one.api import ONE + +from ibllib.oneibl.registration import get_lab from ibllib.pipes import base_tasks -from ibllib.io.extractors.ephys_passive import PassiveChoiceWorld -from ibllib.io.extractors import bpod_trials +from ibllib.io.raw_data_loaders import load_settings from ibllib.qc.task_extractors import TaskQCExtractor from ibllib.qc.task_metrics import HabituationQC, TaskQC +from ibllib.io.extractors.ephys_passive import PassiveChoiceWorld +from ibllib.io.extractors import bpod_trials from ibllib.io.extractors.base import get_session_extractor_type +from ibllib.io.extractors.bpod_trials import get_bpod_extractor from ibllib.io.extractors.ephys_fpga import extract_all +from ibllib.io.extractors.mesoscope import TimelineTrials from ibllib.pipes import training_status - -import one.alf.io as alfio -from one.alf.files import session_path_parts from ibllib.plots.figures import BehaviourPlots -import logging -import traceback _logger = logging.getLogger('ibllib') @@ -69,9 +77,8 @@ def _run(self, update=True): """ Extracts an iblrig training session """ - save_path = self.session_path.joinpath(self.output_collection) - trials, wheel, output_files = bpod_trials.extract_all( - self.session_path, save=True, task_collection=self.collection, save_path=save_path) + extractor = bpod_trials.get_bpod_extractor(self.session_path, task_collection=self.collection) + trials, output_files = extractor.extract(task_collection=self.collection, save=True) if trials is None: return None @@ -80,8 +87,8 @@ def _run(self, update=True): # Run the task QC # Compile task data for QC qc = HabituationQC(self.session_path, one=self.one) - qc.extractor = TaskQCExtractor(self.session_path, one=self.one, sync_collection=self.sync_collection, sync_type=self.sync, - task_collection=self.collection, save_path=save_path) + qc.extractor = TaskQCExtractor(self.session_path, sync_collection=self.sync_collection, + one=self.one, sync_type=self.sync, task_collection=self.collection) namespace = 'task' if self.protocol_number is None else f'task_{self.protocol_number:02}' qc.run(update=update, namespace=namespace) return output_files @@ -161,6 +168,48 @@ def _run(self, **kwargs): return paths +class PassiveTaskTimeline(base_tasks.BehaviourTask, base_tasks.MesoscopeTask): + """TODO should be mesoscope invariant, using wiring file""" + priority = 90 + job_size = 'small' + + @property + def signature(self): + signature = { + 'input_files': [('_iblrig_taskSettings.raw*', self.collection, True), + ('_iblrig_RFMapStim.raw*', self.collection, True), + (f'_{self.sync_namespace}_sync.channels.*', self.sync_collection, False), + (f'_{self.sync_namespace}_sync.polarities.*', self.sync_collection, False), + (f'_{self.sync_namespace}_sync.times.*', self.sync_collection, False)], + 'output_files': [('_ibl_passiveGabor.table.csv', self.output_collection, True), + ('_ibl_passivePeriods.intervalsTable.csv', self.output_collection, True), + ('_ibl_passiveRFM.times.npy', self.output_collection, True), + ('_ibl_passiveStims.table.csv', self.output_collection, True)] + } + return signature + + def _run(self, **kwargs): + """returns a list of pathlib.Paths. + This class exists to load the sync file and set the protocol_number to None + """ + settings = load_settings(self.session_path, self.collection) + version = settings.get('IBLRIG_VERSION_TAG', '100.0.0') + if version == '100.0.0' or parse_version(version) <= parse_version('7.1.0'): + _logger.warning('Protocol spacers not supported; setting protocol_number to None') + self.protocol_number = None + + sync, chmap = self.load_sync() + data, paths = PassiveChoiceWorld(self.session_path).extract( + sync_collection=self.sync_collection, task_collection=self.collection, save=True, + path_out=self.session_path.joinpath(self.output_collection), + protocol_number=self.protocol_number, sync=sync, sync_map=chmap) + + if any(x is None for x in paths): + self.status = -1 + + return paths + + class ChoiceWorldTrialsBpod(base_tasks.BehaviourTask): priority = 90 job_size = 'small' @@ -189,9 +238,9 @@ def _run(self, update=True): """ Extracts an iblrig training session """ - save_path = self.session_path.joinpath(self.output_collection) - trials, wheel, output_files = bpod_trials.extract_all( - self.session_path, save=True, task_collection=self.collection, save_path=save_path) + extractor = bpod_trials.get_bpod_extractor(self.session_path, task_collection=self.collection) + extractor.default_path = self.output_collection + trials, output_files = extractor.extract(task_collection=self.collection, save=True) if trials is None: return None if self.one is None or self.one.offline: @@ -199,14 +248,15 @@ def _run(self, update=True): # Run the task QC # Compile task data for QC type = get_session_extractor_type(self.session_path, task_collection=self.collection) + # FIXME Task data should not need re-extracting if type == 'habituation': qc = HabituationQC(self.session_path, one=self.one) qc.extractor = TaskQCExtractor(self.session_path, one=self.one, sync_collection=self.sync_collection, - sync_type=self.sync, task_collection=self.collection, save_path=save_path) + sync_type=self.sync, task_collection=self.collection) else: # Update wheel data qc = TaskQC(self.session_path, one=self.one) qc.extractor = TaskQCExtractor(self.session_path, one=self.one, sync_collection=self.sync_collection, - sync_type=self.sync, task_collection=self.collection, save_path=save_path) + sync_type=self.sync, task_collection=self.collection) qc.extractor.wheel_encoding = 'X1' # Aggregate and update Alyx QC fields namespace = 'task' if self.protocol_number is None else f'task_{self.protocol_number:02}' @@ -251,7 +301,7 @@ def _behaviour_criterion(self, update=True): """ from brainbox.behavior import training - trials = alfio.load_object(self.session_path.joinpath(self.output_collection), "trials") + trials = alfio.load_object(self.session_path.joinpath(self.output_collection), 'trials') good_enough = training.criterion_delay( n_trials=trials["intervals"].shape[0], perf_easy=training.compute_performance_easy(trials), @@ -269,28 +319,21 @@ def _extract_behaviour(self): return dsets, out_files - def _run(self, update=True, plot_qc=True): - # TODO pass in protocol number for fpga trials - dsets, out_files = self._extract_behaviour() - - if not self.one or self.one.offline: - return out_files - - self._behaviour_criterion(update=update) + def _run_qc(self, trials_data, update=True, plot_qc=True): # Run the task QC qc = TaskQC(self.session_path, one=self.one, log=_logger) qc.extractor = TaskQCExtractor(self.session_path, lazy=True, one=qc.one, sync_collection=self.sync_collection, - sync_type=self.sync, task_collection=self.collection, - save_path=self.session_path.joinpath(self.output_collection)) + sync_type=self.sync, task_collection=self.collection) # Extract extra datasets required for QC - qc.extractor.data = dsets + qc.extractor.data = trials_data # FIXME This line is pointless qc.extractor.extract_data() + # Aggregate and update Alyx QC fields namespace = 'task' if self.protocol_number is None else f'task_{self.protocol_number:02}' qc.run(update=update, namespace=namespace) if plot_qc: - _logger.info("Creating Trials QC plots") + _logger.info('Creating Trials QC plots') try: # TODO needs to be adapted for chained protocols session_id = self.one.path2eid(self.session_path) @@ -303,9 +346,96 @@ def _run(self, update=True, plot_qc=True): _logger.error(traceback.format_exc()) self.status = -1 + def _run(self, update=True, plot_qc=True): + dsets, out_files = self._extract_behaviour() + + if not self.one or self.one.offline: + return out_files + + self._behaviour_criterion(update=update) + self._run_qc(dsets, update=update, plot_qc=plot_qc) return out_files +class ChoiceWorldTrialsTimeline(ChoiceWorldTrialsNidq): + """Behaviour task extractor with DAQdata.raw NPY datasets.""" + @property + def signature(self): + signature = super().signature + signature['input_files'] = [ + ('_iblrig_taskData.raw.*', self.collection, True), + ('_iblrig_taskSettings.raw.*', self.collection, True), + ('_iblrig_encoderEvents.raw*', self.collection, True), + ('_iblrig_encoderPositions.raw*', self.collection, True), + (f'_{self.sync_namespace}_DAQdata.raw.npy', self.sync_collection, True), + (f'_{self.sync_namespace}_DAQdata.timestamps.npy', self.sync_collection, True), + (f'_{self.sync_namespace}_DAQdata.meta.json', self.sync_collection, True), + ] + if self.protocol: + extractor = get_bpod_extractor(self.session_path, protocol=self.protocol) + if extractor.save_names: + signature['output_files'] = [(fn, self.output_collection, True) + for fn in filter(None, extractor.save_names)] + return signature + + def _extract_behaviour(self): + """Extract the Bpod trials data and Timeline acquired signals.""" + # First determine the extractor from the task protocol + extractor = get_bpod_extractor(self.session_path, self.protocol, self.collection) + ret, _ = extractor.extract(save=False, task_collection=self.collection) + bpod_trials = {k: v for k, v in zip(extractor.var_names, ret)} + + trials = TimelineTrials(self.session_path, bpod_trials=bpod_trials) + save_path = self.session_path / self.output_collection + if not self._spacer_support(extractor.settings): + _logger.warning('Protocol spacers not supported; setting protocol_number to None') + self.protocol_number = None + dsets, out_files = trials.extract( + save=True, path_out=save_path, sync_collection=self.sync_collection, + task_collection=self.collection, protocol_number=self.protocol_number) + + if not isinstance(dsets, dict): + dsets = {k: v for k, v in zip(trials.var_names, dsets)} + + self.timeline = trials.timeline # Store for QC later + self.frame2ttl = trials.frame2ttl + self.audio = trials.audio + + return dsets, out_files + + def _run_qc(self, trials_data, update=True, **kwargs): + """ + Run the task QC and update Alyx with results. + + Parameters + ---------- + trials_data : dict + The extracted trials data. + update : bool + If true, update Alyx with the result. + + Notes + ----- + - Unlike the super class, currently the QC plots are not generated. + - Expects the frame2ttl and audio attributes to be set from running _extract_behaviour. + """ + # TODO Task QC extractor for Timeline + qc = TaskQC(self.session_path, one=self.one, log=_logger) + qc.extractor = TaskQCExtractor(self.session_path, lazy=True, one=qc.one, sync_collection=self.sync_collection, + sync_type=self.sync, task_collection=self.collection) + # Extract extra datasets required for QC + qc.extractor.data = TaskQCExtractor.rename_data(trials_data.copy()) + qc.extractor.load_raw_data() + + qc.extractor.frame_ttls = self.frame2ttl + qc.extractor.audio_ttls = self.audio + # qc.extractor.bpod_ttls = channel_events('bpod') + + # Aggregate and update Alyx QC fields + namespace = 'task' if self.protocol_number is None else f'task_{self.protocol_number:02}' + qc.run(update=update, namespace=namespace) + + class TrainingStatus(base_tasks.BehaviourTask): priority = 90 job_size = 'small' @@ -325,23 +455,26 @@ def _run(self, upload=True): """ Extracts training status for subject """ - # TODO need to make compatible with chained protocol - df = training_status.get_latest_training_information(self.session_path, self.one) + + lab = get_lab(self.session_path, self.one.alyx) + if lab == 'cortexlab': + one = ONE(base_url='https://alyx.internationalbrainlab.org') + else: + one = self.one + + df = training_status.get_latest_training_information(self.session_path, one) if df is not None: - training_status.make_plots(self.session_path, self.one, df=df, save=True, upload=upload) + training_status.make_plots( + self.session_path, self.one, df=df, save=True, upload=upload, task_collection=self.collection) # Update status map in JSON field of subjects endpoint - # TODO This requires exposing the json field of the subjects endpoint if self.one and not self.one.offline: _logger.debug('Updating JSON field of subjects endpoint') - try: - status = (df.set_index('date')[['training_status', 'session_path']].drop_duplicates( - subset='training_status', keep='first').to_dict()) - date, sess = status.items() - data = {'trained_criteria': {v.replace(' ', '_'): (k, self.one.path2eid(sess[1][k])) for k, v - in date[1].items()}} - _, subject, *_ = session_path_parts(self.session_path) - self.one.alyx.json_field_update('subjects', subject, data=data) - except KeyError: - _logger.error('Failed to update subject training status on Alyx: json field not available') + status = (df.set_index('date')[['training_status', 'session_path']].drop_duplicates( + subset='training_status', keep='first').to_dict()) + date, sess = status.items() + data = {'trained_criteria': {v.replace(' ', '_'): (k, self.one.path2eid(sess[1][k])) + for k, v in date[1].items()}} + _, subject, *_ = session_path_parts(self.session_path) + self.one.alyx.json_field_update('subjects', subject, data=data) output_files = [] return output_files diff --git a/ibllib/pipes/dynamic_pipeline.py b/ibllib/pipes/dynamic_pipeline.py index 90f939d70..044e242a6 100644 --- a/ibllib/pipes/dynamic_pipeline.py +++ b/ibllib/pipes/dynamic_pipeline.py @@ -13,20 +13,33 @@ import ibllib.pipes.tasks as mtasks import ibllib.pipes.base_tasks as bstasks import ibllib.pipes.widefield_tasks as wtasks +import ibllib.pipes.mesoscope_tasks as mscope_tasks import ibllib.pipes.sync_tasks as stasks import ibllib.pipes.behavior_tasks as btasks import ibllib.pipes.video_tasks as vtasks import ibllib.pipes.ephys_tasks as etasks import ibllib.pipes.audio_tasks as atasks -from ibllib.pipes.photometry_tasks import TaskFibrePhotometryPreprocess, TaskFibrePhotometryRegisterRaw +import ibllib.pipes.photometry_tasks as ptasks +# from ibllib.pipes.photometry_tasks import FibrePhotometryPreprocess, FibrePhotometryRegisterRaw _logger = logging.getLogger(__name__) def acquisition_description_legacy_session(session_path, save=False): """ - From a legacy session create a dictionary corresponding to the acquisition description - :return: dict + From a legacy session create a dictionary corresponding to the acquisition description. + + Parameters + ---------- + session_path : str, pathlib.Path + A path to a session to describe. + save : bool + If true, saves the acquisition description file to _ibl_experiment.description.yaml. + + Returns + ------- + dict + The legacy acquisition description. """ extractor_type = ibllib.io.extractors.base.get_session_extractor_type(session_path=session_path) etype2protocol = dict(biased='choice_world_biased', habituation='choice_world_habituation', @@ -101,7 +114,7 @@ def get_acquisition_description(protocol): raise ValueError(f'Unknown protocol "{protocol}"') acquisition_description['tasks'] = [{key: { 'collection': 'raw_behavior_data', - 'sync_label': 'bpod', 'main': True + 'sync_label': 'bpod', 'main': True # FIXME: What is purpose of main key? }}] acquisition_description['version'] = '1.0.0' return acquisition_description @@ -150,6 +163,8 @@ def make_pipeline(session_path, **pkwargs): tasks[f'SyncPulses_{sync}'] = type(f'SyncPulses_{sync}', (etasks.EphysSyncPulses,), {})( **kwargs, **sync_kwargs, parents=[tasks['SyncRegisterRaw']]) sync_tasks = [tasks[f'SyncPulses_{sync}']] + elif sync_args['sync_namespace'] == 'timeline': + tasks['SyncRegisterRaw'] = type('SyncRegisterRaw', (stasks.SyncRegisterRaw,), {})(**kwargs, **sync_kwargs) elif sync == 'nidq': tasks['SyncRegisterRaw'] = type('SyncRegisterRaw', (stasks.SyncMtscomp,), {})(**kwargs, **sync_kwargs) tasks[f'SyncPulses_{sync}'] = type(f'SyncPulses_{sync}', (stasks.SyncPulses,), {})( @@ -158,8 +173,7 @@ def make_pipeline(session_path, **pkwargs): elif sync == 'tdms': tasks['SyncRegisterRaw'] = type('SyncRegisterRaw', (stasks.SyncRegisterRaw,), {})(**kwargs, **sync_kwargs) elif sync == 'bpod': - pass - # ATM we don't have anything for this not sure it will be needed in the future + pass # ATM we don't have anything for this not sure it will be needed in the future # Behavior tasks task_protocols = acquisition_description.get('tasks', []) @@ -175,17 +189,27 @@ def make_pipeline(session_path, **pkwargs): if extractors := task_info.get('extractors', False): extractors = (extractors,) if isinstance(extractors, str) else extractors task_name = None # to avoid unbound variable issue in the first round - for j, task in enumerate(extractors): + for j, extractor in enumerate(extractors): # Assume previous task in the list is parent parents = [] if j == 0 else [tasks[task_name]] # Make sure extractor and sync task don't collide for sync_option in ('nidq', 'bpod'): - if sync_option in task.lower() and not sync == sync_option: - raise ValueError(f'Extractor "{task}" and sync "{sync}" do not match') - try: - task = getattr(btasks, task) - except AttributeError: - raise NotImplementedError # TODO Attempt to import from personal project repo + if sync_option in extractor.lower() and not sync == sync_option: + raise ValueError(f'Extractor "{extractor}" and sync "{sync}" do not match') + # Look for the extractor in the behavior extractors module + if hasattr(btasks, extractor): + task = getattr(btasks, extractor) + # This may happen that the extractor is tied to a specific sync task: look for TrialsChoiceWorldBpod for # example + elif hasattr(btasks, extractor + sync.capitalize()): + task = getattr(btasks, extractor + sync.capitalize()) + else: + # lookup in the project extraction repo if we find an extractor class + import projects.extraction_tasks + if hasattr(projects.extraction_tasks, extractor): + task = getattr(projects.extraction_tasks, extractor) + else: + raise NotImplementedError( + f'Extractor "{extractor}" not found in main IBL pipeline nor in personal projects') # Rename the class to something more informative task_name = f'{task.__name__}_{i:02}' # For now we assume that the second task in the list is always the trials extractor, which is dependent @@ -305,9 +329,9 @@ def make_pipeline(session_path, **pkwargs): tasks[tn] = type((tn := f'VideoSyncQC_{sync}'), (vtasks.VideoSyncQcNidq,), {})( **kwargs, **video_kwargs, **sync_kwargs, parents=[tasks['VideoCompress']] + sync_tasks) - if len(video_kwargs['cameras']) == 3: - tasks[tn] = type((tn := 'DLC'), (epp.EphysDLC,), {})( - **kwargs, parents=[dlc_parent_task]) + if sync_kwargs['sync'] != 'bpod': + tasks[tn] = type((tn := 'DLC'), (vtasks.DLC,), {})( + **kwargs, **video_kwargs, parents=[dlc_parent_task]) tasks['PostDLC'] = type('PostDLC', (epp.EphysPostDLC,), {})( **kwargs, parents=[tasks['DLC'], tasks[f'VideoSyncQC_{sync}']]) @@ -337,14 +361,29 @@ def make_pipeline(session_path, **pkwargs): tasks['WidefieldFOV'] = type('WidefieldFOV', (wtasks.WidefieldFOV,), {})( **kwargs, **wfield_kwargs, parents=[tasks['WidefieldPreprocess']]) + # Mesoscope tasks + if 'mesoscope' in devices: + (_, mscope_kwargs), = devices['mesoscope'].items() + mscope_kwargs['device_collection'] = mscope_kwargs.pop('collection') + tasks['MesoscopeRegisterSnapshots'] = type('MesoscopeRegisterSnapshots', (mscope_tasks.MesoscopeRegisterSnapshots,), {})( + **kwargs, **mscope_kwargs) + tasks['MesoscopePreprocess'] = type('MesoscopePreprocess', (mscope_tasks.MesoscopePreprocess,), {})( + **kwargs, **mscope_kwargs) + tasks['MesoscopeFOV'] = type('MesoscopeFOV', (mscope_tasks.MesoscopeFOV,), {})( + **kwargs, **mscope_kwargs, parents=[tasks['MesoscopePreprocess']]) + tasks['MesoscopeSync'] = type('MesoscopeSync', (mscope_tasks.MesoscopeSync,), {})( + **kwargs, **mscope_kwargs, **sync_kwargs) + tasks['MesoscopeCompress'] = type('MesoscopeCompress', (mscope_tasks.MesoscopeCompress,), {})( + **kwargs, **mscope_kwargs, parents=[tasks['MesoscopePreprocess']]) + if 'photometry' in devices: # {'collection': 'raw_photometry_data', 'sync_label': 'frame_trigger', 'regions': ['Region1G', 'Region3G']} photometry_kwargs = devices['photometry'] - tasks['TaskFibrePhotometryRegisterRaw'] = type('TaskFibrePhotometryRegisterRaw', ( - TaskFibrePhotometryRegisterRaw,), {})(**kwargs, **photometry_kwargs) - tasks['TaskFibrePhotometryPreprocess'] = type('TaskFibrePhotometryPreprocess', ( - TaskFibrePhotometryPreprocess,), {})(**kwargs, **photometry_kwargs, **sync_kwargs, - parents=[tasks['TaskFibrePhotometryRegisterRaw']] + sync_tasks) + tasks['FibrePhotometryRegisterRaw'] = type('FibrePhotometryRegisterRaw', ( + ptasks.FibrePhotometryRegisterRaw,), {})(**kwargs, **photometry_kwargs) + tasks['FibrePhotometryPreprocess'] = type('FibrePhotometryPreprocess', ( + ptasks.FibrePhotometryPreprocess,), {})(**kwargs, **photometry_kwargs, **sync_kwargs, + parents=[tasks['FibrePhotometryRegisterRaw']] + sync_tasks) p = mtasks.Pipeline(session_path=session_path, **pkwargs) p.tasks = tasks diff --git a/ibllib/pipes/ephys_alignment.py b/ibllib/pipes/ephys_alignment.py index ba2ca254c..9080b9053 100644 --- a/ibllib/pipes/ephys_alignment.py +++ b/ibllib/pipes/ephys_alignment.py @@ -377,7 +377,7 @@ def arrange_into_regions(depth_coords, region_ids, distance, region_colours): all_y.append(y) all_x.append(x) col = region_colours[bound[iB]] - if type(col) != str: + if not isinstance(col, str): col = '#FFFFFF' else: col = '#' + col diff --git a/ibllib/pipes/ephys_preprocessing.py b/ibllib/pipes/ephys_preprocessing.py index b0ef5328b..9cef81a34 100644 --- a/ibllib/pipes/ephys_preprocessing.py +++ b/ibllib/pipes/ephys_preprocessing.py @@ -5,11 +5,12 @@ from collections import OrderedDict import traceback from pathlib import Path -import packaging.version +import warnings import cv2 import numpy as np import pandas as pd +import packaging.version import one.alf.io as alfio from neurodsp.utils import rms @@ -21,8 +22,7 @@ from ibllib.io.video import label_from_path, assert_valid_label from ibllib.io.extractors import ephys_fpga, ephys_passive, camera from ibllib.pipes import tasks, base_tasks -from ibllib.pipes.training_preprocessing import TrainingRegisterRaw as EphysRegisterRaw -from ibllib.pipes.training_preprocessing import TrainingStatus as EphysTrainingStatus +import ibllib.pipes.training_preprocessing as tpp from ibllib.pipes.misc import create_alyx_probe_insertions from ibllib.qc.alignment_qc import get_aligned_channels from ibllib.qc.task_extractors import TaskQCExtractor @@ -35,6 +35,7 @@ from brainbox.behavior.dlc import likelihood_threshold, get_licks, get_pupil_diameter, get_smooth_pupil_diameter _logger = logging.getLogger("ibllib") +warnings.warn('`pipes.training_preprocessing` to be removed in favour of dynamic pipeline') # level 0 @@ -552,7 +553,7 @@ def _run(self, **kwargs): def get_signatures(self, **kwargs): # need to detect the number of cameras output_files = Path(self.session_path).joinpath('raw_video_data').glob('*') - labels = np.unique([label_from_path(x) for x in output_files]) + labels = {label_from_path(x) for x in output_files} full_input_files = [] for sig in self.signature['input_files']: @@ -664,6 +665,7 @@ class EphysTrials(tasks.Task): ('*trials.goCueTrigger_times.npy', 'alf', True), ('*trials.intervals_bpod.npy', 'alf', True), ('*trials.stimOff_times.npy', 'alf', True), + ('*trials.quiescencePeriod.npy', 'alf', True), ('*wheel.position.npy', 'alf', True), ('*wheel.timestamps.npy', 'alf', True), ('*wheelMoves.intervals.npy', 'alf', True), @@ -740,6 +742,23 @@ def get_signatures(self, **kwargs): self.output_files = self.signature['output_files'] +class LaserTrialsLegacy(EphysTrials): + """This is the legacy extractor for Guido's ephys optogenetic stimulation protocol. + + This is legacy because personal project extractors should be in a separate repository. + """ + def _extract_behaviour(self): + dsets, out_files = super()._extract_behaviour() + + # Re-extract the laser datasets as the above default extractor discards them + from ibllib.io.extractors import opto_trials + laser = opto_trials.LaserBool(self.session_path) + dsets_laser, out_files_laser = laser.extract(save=True) + dsets.update({k: v for k, v in zip(laser.var_names, dsets_laser)}) + out_files.extend(out_files_laser) + return dsets, out_files + + class EphysCellsQc(tasks.Task): priority = 90 level = 3 @@ -1306,7 +1325,7 @@ def __init__(self, session_path=None, **kwargs): self.session_path = session_path # level 0 tasks['ExperimentDescriptionRegisterRaw'] = base_tasks.ExperimentDescriptionRegisterRaw(self.session_path) - tasks["EphysRegisterRaw"] = EphysRegisterRaw(self.session_path) + tasks["EphysRegisterRaw"] = tpp.TrainingRegisterRaw(self.session_path) tasks["EphysPulses"] = EphysPulses(self.session_path) tasks["EphysRawQC"] = RawEphysQC(self.session_path) tasks["EphysAudio"] = EphysAudio(self.session_path) @@ -1323,7 +1342,7 @@ def __init__(self, session_path=None, **kwargs): self.session_path, parents=[tasks["EphysVideoCompress"], tasks["EphysPulses"], tasks["EphysTrials"]]) tasks["EphysCellsQc"] = EphysCellsQc(self.session_path, parents=[tasks["SpikeSorting"]]) tasks["EphysDLC"] = EphysDLC(self.session_path, parents=[tasks["EphysVideoCompress"]]) - tasks['EphysTrainingStatus'] = EphysTrainingStatus(self.session_path, parents=[tasks["EphysTrials"]]) + tasks['EphysTrainingStatus'] = tpp.TrainingStatus(self.session_path, parents=[tasks["EphysTrials"]]) # level 3 tasks["EphysPostDLC"] = EphysPostDLC(self.session_path, parents=[tasks["EphysDLC"], tasks["EphysTrials"], tasks["EphysVideoSyncQc"]]) diff --git a/ibllib/pipes/ephys_tasks.py b/ibllib/pipes/ephys_tasks.py index 692d03fda..7affc7139 100644 --- a/ibllib/pipes/ephys_tasks.py +++ b/ibllib/pipes/ephys_tasks.py @@ -3,15 +3,14 @@ from pathlib import Path import subprocess import re -import packaging import shutil + +import packaging.version import numpy as np import pandas as pd - import spikeglx import neuropixel from neurodsp.utils import rms - import one.alf.io as alfio from ibllib.misc import check_nvidia_driver @@ -289,7 +288,7 @@ def signature(self): } return signature - def _run(self): + def _run(self, delete_original=True): # Do we need the ability to register the files once it already been processed and original file deleted? @@ -297,8 +296,7 @@ def _run(self): assert len(files) == 1 bin_file = files[0].get('ap', None) - np_conv = neuropixel.NP2Converter(bin_file, post_check=True, compress=True, delete_original=False) - # TODO once we happy change delete_original=True + np_conv = neuropixel.NP2Converter(bin_file, post_check=True, compress=True, delete_original=delete_original) np_conv_status = np_conv.process() out_files = np_conv.get_processed_files_NP24() np_conv.sr.close() @@ -388,7 +386,7 @@ def __init__(self, *args, **kwargs): assert self.device_collection, "device_collection is a required argument" assert self.sync_collection, "sync_collection is a required argument" self.pname = [self.pname] if isinstance(self.pname, str) else self.pname - assert type(self.pname) == list, 'pname task argument should be a list or a string' + assert isinstance(self.pname, list), 'pname task argument should be a list or a string' @property def signature(self): diff --git a/ibllib/pipes/histology.py b/ibllib/pipes/histology.py index 99c2f2ef3..6081f6eaa 100644 --- a/ibllib/pipes/histology.py +++ b/ibllib/pipes/histology.py @@ -219,7 +219,48 @@ def get_brain_regions(xyz, channels_positions=None, brain_atlas=None): return brain_regions, insertion -def register_track(probe_id, picks=None, one=None, overwrite=False, channels=True, brain_atlas=None): +def register_chronic_track(chronic_id, picks=None, one=None, overwrite=False, channels=True, brain_atlas=None): + """ + Register the user picks to a chronic insertion in Alyx. + Here we update the database in 4 steps + 1) The user picks converted to IBL coordinates will be stored in the json field of the + corresponding chronic insertion models + 2) All associated probe insertions are identified and the user picks stored in the json field too + 2) The trajectory associated to the chronic insertion computed from the histology track is created or patched + 3) Channel locations are set in the table + :param chronic_id: + :param picks: + :param one: + :param overwrite: + :param channels: + :param brain_atlas: + :return: + """ + assert one + brain_locations, insertion_histology = register_track(chronic_id, picks=picks, one=one, overwrite=overwrite, + channels=channels, brain_atlas=brain_atlas, + endpoint='chronic-insertions') + + # Update all the associated probe insertions with the relevant QC and xyz_picks + chronic = one.alyx.rest('chronic-insertions', 'list', id=chronic_id)[0] + for probe_id in chronic['probe_insertion']: + pid = probe_id['id'] + if picks is None or picks.size == 0: + hist_qc = base.QC(pid, one=one, endpoint='insertions') + hist_qc.update_extended_qc({'tracing_exists': False}) + hist_qc.update('CRITICAL', namespace='tracing') + else: + one.alyx.json_field_update(endpoint='insertions', uuid=pid, field_name='json', + data={'xyz_picks': np.int32(picks * 1e6).tolist()}) + # Update the insertion qc to register tracing exits + hist_qc = base.QC(pid, one=one, endpoint='insertions') + hist_qc.update_extended_qc({'tracing_exists': True}) + + return brain_locations, insertion_histology + + +def register_track(probe_id, picks=None, one=None, overwrite=False, channels=True, brain_atlas=None, + endpoint='insertions'): """ Register the user picks to a probe in Alyx Here we update Alyx models on the database in 3 steps @@ -232,15 +273,19 @@ def register_track(probe_id, picks=None, one=None, overwrite=False, channels=Tru brain_atlas = brain_atlas or atlas.AllenAtlas() # 0) if it's an empty track, create a null trajectory and exit if picks is None or picks.size == 0: - tdict = {'probe_insertion': probe_id, - 'x': None, 'y': None, 'z': None, + tdict = {'x': None, 'y': None, 'z': None, 'phi': None, 'theta': None, 'depth': None, 'roll': None, 'provenance': 'Histology track', 'coordinate_system': 'IBL-Allen', } + if endpoint == 'chronic-insertions': + tdict['chronic_insertion'] = probe_id + else: + tdict['probe_insertion'] = probe_id + brain_locations = None # Update the insertion qc to CRITICAL - hist_qc = base.QC(probe_id, one=one, endpoint='insertions') + hist_qc = base.QC(probe_id, one=one, endpoint=endpoint) hist_qc.update_extended_qc({'tracing_exists': False}) hist_qc.update('CRITICAL', namespace='tracing') insertion_histology = None @@ -248,17 +293,18 @@ def register_track(probe_id, picks=None, one=None, overwrite=False, channels=Tru else: brain_locations, insertion_histology = get_brain_regions(picks, brain_atlas=brain_atlas) # 1) update the alyx models, first put the picked points in the insertion json - one.alyx.json_field_update(endpoint='insertions', uuid=probe_id, field_name='json', + one.alyx.json_field_update(endpoint=endpoint, uuid=probe_id, field_name='json', data={'xyz_picks': np.int32(picks * 1e6).tolist()}) # Update the insertion qc to register tracing exits - hist_qc = base.QC(probe_id, one=one, endpoint='insertions') + hist_qc = base.QC(probe_id, one=one, endpoint=endpoint) hist_qc.update_extended_qc({'tracing_exists': True}) # 2) patch or create the trajectory coming from histology track - tdict = create_trajectory_dict(probe_id, insertion_histology, provenance='Histology track') + tdict = create_trajectory_dict(probe_id, insertion_histology, provenance='Histology track', endpoint=endpoint) + alyx_end = 'chronic_insertion' if endpoint == 'chronic-insertions' else 'probe_insertion' hist_traj = one.alyx.get('/trajectories?' - f'&probe_insertion={probe_id}' + f'&{alyx_end}={probe_id}' '&provenance=Histology track', clobber=True) # if the trajectory exists, remove it, this will cascade delete existing channel locations if len(hist_traj): @@ -319,7 +365,7 @@ def register_aligned_track(probe_id, xyz_channels, chn_coords=None, one=None, ov one.alyx.rest('channels', 'create', data=channel_dict) -def create_trajectory_dict(probe_id, insertion, provenance): +def create_trajectory_dict(probe_id, insertion, provenance, endpoint='insertions'): """ Create trajectory dictionary in form to upload to alyx :param probe id: unique id of probe insertion @@ -328,11 +374,12 @@ def create_trajectory_dict(probe_id, insertion, provenance): :type insertion: object atlas.Insertion :param provenance: 'Histology track' or 'Ephys aligned histology track' :type provenance: string + :param endpoint: Alyx endpoint, either 'insertions', or 'chronic-insertions' + :type endpoint: string :return tdict: :type tdict: dict """ - tdict = {'probe_insertion': probe_id, - 'x': insertion.x * 1e6, + tdict = {'x': insertion.x * 1e6, 'y': insertion.y * 1e6, 'z': insertion.z * 1e6, 'phi': insertion.phi, @@ -342,6 +389,10 @@ def create_trajectory_dict(probe_id, insertion, provenance): 'provenance': provenance, 'coordinate_system': 'IBL-Allen', } + if endpoint == 'chronic-insertions': + tdict['chronic_insertion'] = probe_id + else: + tdict['probe_insertion'] = probe_id return tdict @@ -380,6 +431,54 @@ def _parse_filename(track_file): return search_filter +def register_chronic_track_files(path_tracks, one=None, overwrite=False, brain_atlas=None): + """ + Registers track files for chronic insertions + :param path_tracks: + :param one: + :param overwrite: + :param brain_atlas: + :return: + """ + + brain_atlas = brain_atlas or atlas.AllenAtlas() + glob_pattern = "*_probe*_pts*.csv" + path_tracks = Path(path_tracks) + + if not path_tracks.is_dir(): + track_files = [path_tracks] + else: + track_files = list(path_tracks.rglob(glob_pattern)) + track_files.sort() + + assert path_tracks.exists() + assert one + + ntracks = len(track_files) + for ind, track_file in enumerate(track_files): + # Nomenclature expected: + # '{yyyy-mm-dd}}_{nickname}_{session_number}_{probe_label}_pts.csv' + # beware: there may be underscores in the subject nickname + + search_filter = _parse_filename(track_file) + probe = one.alyx.rest('chronic-insertions', 'list', no_cache=True, **search_filter) + if len(probe) == 0: + raise ValueError(f"Could not find associated chronic insertion for {search_filter['subject']}," + f"{search_filter['name']}") + elif len(probe) == 1: + probe = probe[0] + else: + raise ValueError("Multiple chronic insertions found.") + chronic_id = probe['id'] + try: + xyz_picks = load_track_csv(track_file, brain_atlas=brain_atlas) + register_chronic_track(chronic_id, xyz_picks, one=one, overwrite=overwrite, brain_atlas=brain_atlas) + except Exception as e: + _logger.error(str(track_file)) + raise e + _logger.info(f"{ind + 1}/{ntracks}, {str(track_file)}") + + def register_track_files(path_tracks, one=None, overwrite=False, brain_atlas=None): """ :param path_tracks: path to directory containing tracks; also works with a single file name diff --git a/ibllib/pipes/local_server.py b/ibllib/pipes/local_server.py index 008deda50..895b0f20b 100644 --- a/ibllib/pipes/local_server.py +++ b/ibllib/pipes/local_server.py @@ -1,4 +1,3 @@ -import logging import time from datetime import datetime from pathlib import Path @@ -12,6 +11,7 @@ from one.api import ONE from one.webclient import AlyxClient from one.remote.globus import get_lab_from_endpoint_id +from iblutil.util import setup_logger from ibllib.io.extractors.base import get_pipeline, get_task_protocol, get_session_extractor_type from ibllib.pipes import tasks, training_preprocessing, ephys_preprocessing @@ -21,7 +21,7 @@ from ibllib.io.session_params import read_params from ibllib.pipes.dynamic_pipeline import make_pipeline, acquisition_description_legacy_session -_logger = logging.getLogger(__name__) +_logger = setup_logger(__name__, level='INFO') LARGE_TASKS = ['EphysVideoCompress', 'TrainingVideoCompress', 'SpikeSorting', 'EphysDLC'] @@ -109,6 +109,10 @@ def job_creator(root_path, one=None, dry=False, rerun=False, max_md5_size=None): list of dicts A list of any datasets registered (only for legacy sessions) """ + for _ in range(10): + _logger.info('#' * 110) + _logger.info('Start looking for new sessions...') + _logger.info('#' * 110) if not one: one = ONE(cache_rest=None) rc = IBLRegistrationClient(one=one) diff --git a/ibllib/pipes/mesoscope_tasks.py b/ibllib/pipes/mesoscope_tasks.py new file mode 100644 index 000000000..c379ca1e3 --- /dev/null +++ b/ibllib/pipes/mesoscope_tasks.py @@ -0,0 +1,1276 @@ +"""The mesoscope data extraction pipeline. + +The mesoscope pipeline currently comprises raw image registration and timestamps extraction. In +the future there will be compression (and potential cropping), FOV metadata extraction, and ROI +extraction. + +Pipeline: + 1. Register reference images and upload snapshots and notes to Alyx + 2. Run ROI cell detection + 3. Calculate the pixel and ROI brain locations and register fields of view to Alyx + 4. Compress the raw imaging data + 5. Extract the imaging times from the main DAQ +""" +import json +import logging +import subprocess +import shutil +from pathlib import Path +from itertools import chain +from collections import defaultdict, Counter +from fnmatch import fnmatch +import enum +import re +import time + +import numba as nb +import numpy as np +import pandas as pd +import sparse +from scipy.io import loadmat +from scipy.interpolate import interpn +import one.alf.io as alfio +from one.alf.spec import is_valid, to_alf +from one.alf.files import filename_parts, session_path_parts +import one.alf.exceptions as alferr + +from ibllib.pipes import base_tasks +from ibllib.io.extractors import mesoscope +from ibllib.atlas import ALLEN_CCF_LANDMARKS_MLAPDV_UM, MRITorontoAtlas + + +_logger = logging.getLogger(__name__) +Provenance = enum.Enum('Provenance', ['ESTIMATE', 'FUNCTIONAL', 'LANDMARK', 'HISTOLOGY']) # py3.11 make StrEnum + + +class MesoscopeRegisterSnapshots(base_tasks.MesoscopeTask, base_tasks.RegisterRawDataTask): + """Upload snapshots as Alyx notes and register the 2P reference image(s).""" + priority = 100 + job_size = 'small' + + @property + def signature(self): + signature = { + 'input_files': [('referenceImage.raw.tif', f'{self.device_collection}/reference', False), + ('referenceImage.stack.tif', f'{self.device_collection}/reference', False), + ('referenceImage.meta.json', f'{self.device_collection}/reference', False)], + 'output_files': [('referenceImage.raw.tif', f'{self.device_collection}/reference', False), + ('referenceImage.stack.tif', f'{self.device_collection}/reference', False), + ('referenceImage.meta.json', f'{self.device_collection}/reference', False)] + } + return signature + + def __init__(self, session_path, **kwargs): + super().__init__(session_path, **kwargs) + self.device_collection = self.get_device_collection('mesoscope', + kwargs.get('device_collection', 'raw_imaging_data_*')) + + def _run(self): + """ + Assert one reference image per collection and rename it. Register snapshots. + + Returns + ------- + list of pathlib.Path containing renamed reference image. + """ + # Assert that only one tif file exists per collection + file, collection, _ = self.signature['input_files'][0] + reference_images = list(self.session_path.rglob(f'{collection}/{file}')) + assert len(set(x.parent for x in reference_images)) == len(reference_images) + # Rename the reference images + out_files = super()._run() + # Register snapshots in base session folder and raw_imaging_data folders + self.register_snapshots(collection=[self.device_collection, '']) + return out_files + + +class MesoscopeCompress(base_tasks.MesoscopeTask): + """ Tar compress raw 2p tif files, optionally remove uncompressed data.""" + + priority = 90 + job_size = 'large' + _log_level = None + + @property + def signature(self): + signature = { + 'input_files': [('*.tif', self.device_collection, True)], + 'output_files': [('imaging.frames.tar.bz2', self.device_collection, True)] + } + return signature + + def setUp(self, **kwargs): + """Run at higher log level""" + self._log_level = _logger.level + _logger.setLevel(logging.DEBUG) + return super().setUp(**kwargs) + + def tearDown(self): + _logger.setLevel(self._log_level or logging.INFO) + return super().tearDown() + + def _run(self, remove_uncompressed=False, verify_output=True, clobber=False, **kwargs): + """ + Run tar compression on all tif files in the device collection. + + Parameters + ---------- + remove_uncompressed: bool + Whether to remove the original, uncompressed data. Default is False. + verify_output: bool + Whether to check that the compressed tar file can be uncompressed without errors. + Default is True. + + Returns + ------- + list of pathlib.Path + Path to compressed tar file. + """ + outfiles = [] # should be one per raw_imaging_data folder + input_files = defaultdict(list) + for file, in_dir, _ in self.input_files: + input_files[self.session_path.joinpath(in_dir)].append(file) + + for in_dir, files in input_files.items(): + outfile = in_dir / self.output_files[0][0] + if outfile.exists() and not clobber: + _logger.info('%s already exists; skipping...', outfile.relative_to(self.session_path)) + continue + # glob for all input patterns + infiles = list(chain(*map(lambda x: in_dir.glob(x), files))) + if not infiles: + _logger.info('No image files found in %s', in_dir.relative_to(self.session_path)) + continue + + _logger.debug( + 'Input files:\n\t%s', '\n\t'.join(map(Path.as_posix, (x.relative_to(self.session_path) for x in infiles))) + ) + + uncompressed_size = sum(x.stat().st_size for x in infiles) + _logger.info('Compressing %i file(s)', len(infiles)) + cmd = 'tar -cjvf "{output}" "{input}"'.format( + output=outfile.relative_to(in_dir), input='" "'.join(str(x.relative_to(in_dir)) for x in infiles)) + _logger.debug(cmd) + process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=in_dir) + info, error = process.communicate() # b'2023-02-17_2_test_2P_00001_00001.tif\n' + _logger.debug(info.decode()) + assert process.returncode == 0, f'compression failed: {error.decode()}' + + # Check the output + assert outfile.exists(), 'output file missing' + outfiles.append(outfile) + compressed_size = outfile.stat().st_size + min_size = kwargs.pop('verify_min_size', 1024) + assert compressed_size > int(min_size), f'Compressed file < {min_size / 1024:.0f}KB' + _logger.info('Compression ratio = %.3f, saving %.2f pct (%.2f MB)', + uncompressed_size / compressed_size, + round((1 - (compressed_size / uncompressed_size)) * 10000) / 100, + (uncompressed_size - compressed_size) / 1024 / 1024) + + if verify_output: + # Test bzip + cmd = f'bzip2 -tv {outfile.relative_to(in_dir)}' + process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=in_dir) + info, error = process.communicate() + _logger.debug(info.decode()) + assert process.returncode == 0, f'bzip compression test failed: {error}' + # Check tar + cmd = f'bunzip2 -dc {outfile.relative_to(in_dir)} | tar -tvf -' + process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=in_dir) + info, error = process.communicate() + _logger.debug(info.decode()) + assert process.returncode == 0, 'tarball decompression test failed' + compressed_files = set(x.split()[-1] for x in filter(None, info.decode().split('\n'))) + assert compressed_files == set(x.name for x in infiles) + + if remove_uncompressed: + _logger.info(f'Removing input files for {in_dir.relative_to(self.session_path)}') + for file in infiles: + file.unlink() + + return outfiles + + +class MesoscopePreprocess(base_tasks.MesoscopeTask): + """Run suite2p preprocessing on tif files""" + + priority = 80 + cpu = -1 + job_size = 'large' + + @property + def signature(self): + # The number of in and outputs will be dependent on the number of input raw imaging folders and output FOVs + signature = { + 'input_files': [('_ibl_rawImagingData.meta.json', self.device_collection, True), + ('*.tif', self.device_collection, True), + ('exptQC.mat', self.device_collection, False)], + 'output_files': [('mpci.ROIActivityF.npy', 'alf/FOV*', True), + ('mpci.ROINeuropilActivityF.npy', 'alf/FOV*', True), + ('mpci.ROIActivityDeconvolved.npy', 'alf/FOV*', True), + ('mpci.badFrames.npy', 'alf/FOV*', True), + ('mpci.mpciFrameQC.npy', 'alf/FOV*', True), + ('mpciFrameQC.names.tsv', 'alf/FOV*', True), + ('mpciMeanImage.images.npy', 'alf/FOV*', True), + ('mpciROIs.stackPos.npy', 'alf/FOV*', True), + ('mpciROIs.mpciROITypes.npy', 'alf/FOV*', True), + ('mpciROIs.cellClassifier.npy', 'alf/FOV*', True), + ('mpciROITypes.names.tsv', 'alf/FOV*', True), + ('mpciROIs.masks.npy', 'alf/FOV*', True), + ('mpciROIs.neuropilMasks.npy', 'alf/FOV*', True), + ('_suite2p_ROIData.raw.zip', self.device_collection, False)] + } + return signature + + @staticmethod + def _masks2sparse(stat, ops): + """ + Extract 3D sparse mask arrays from suit2p output. + + Parameters + ---------- + stat : numpy.array + The loaded stat.npy file. A structured array with fields ('lam', 'ypix', 'xpix', 'neuropil_mask'). + ops : numpy.array + The loaded ops.npy file. A structured array with fields ('Ly', 'Lx'). + + Returns + ------- + sparse.GCXS + A pydata sparse array of type float32, representing the ROI masks. + sparse.GCXS + A pydata sparse array of type float32, representing the neuropil ROI masks. + + Notes + ----- + Save using sparse.save_npz. + """ + shape = (stat.shape[0], ops['Ly'], ops['Lx']) + npx = np.prod(shape[1:]) # Number of pixels per time point + coords = [[], [], []] + data = [] + pil_coords = [] + for i, s in enumerate(stat): + coords[0].append(np.full(s['ypix'].shape, i)) + coords[1].append(s['ypix']) + coords[2].append(s['xpix']) + data.append(s['lam']) + pil_coords.append(s['neuropil_mask'] + i * npx) + roi_mask_sp = sparse.COO(list(map(np.concatenate, coords)), np.concatenate(data), shape=shape) + pil_mask_sp = sparse.COO(np.unravel_index(np.concatenate(pil_coords), shape), True, shape=shape) + return sparse.GCXS.from_coo(roi_mask_sp), sparse.GCXS.from_coo(pil_mask_sp) + + def _rename_outputs(self, suite2p_dir, frameQC_names, frameQC, rename_dict=None): + """ + Convert suite2p output files to ALF datasets. + + Parameters + ---------- + suite2p_dir : pathlib.Path + rename_dict : dict or None + The suite2p output filenames and the corresponding ALF name. NB: These files are saved + after transposition. Default is None, i.e. using the default mapping hardcoded in the function below. + + Returns + ------- + list of pathlib.Path + All paths found in FOV folders. + """ + if rename_dict is None: + rename_dict = { + 'F.npy': 'mpci.ROIActivityF.npy', + 'spks.npy': 'mpci.ROIActivityDeconvolved.npy', + 'Fneu.npy': 'mpci.ROINeuropilActivityF.npy' + } + # Rename the outputs, first the subdirectories + for plane_dir in suite2p_dir.iterdir(): + # ignore the combined dir + if plane_dir.name != 'combined': + n = int(plane_dir.name.split('plane')[1]) + fov_dir = plane_dir.parent.joinpath(f'FOV_{n:02}') + if fov_dir.exists(): + shutil.rmtree(str(fov_dir), ignore_errors=False, onerror=None) + plane_dir.rename(fov_dir) + # Now rename the content of the new directories and move them out of suite2p + for fov_dir in suite2p_dir.iterdir(): + # Compress suite2p output files + target = suite2p_dir.parent.joinpath(fov_dir.name) + target.mkdir(exist_ok=True) + # Move bin file out of the way first + if fov_dir.joinpath('data.bin').exists(): + dst = self.session_path.joinpath('raw_bin_files', fov_dir.name, 'data.bin') + dst.parent.mkdir(parents=True, exist_ok=True) + _logger.debug('Moving bin file to %s', dst.relative_to(self.session_path)) + fov_dir.joinpath('data.bin').replace(dst) + # Set logger to warning for the moment to not clutter the logs + prev_level = _logger.level + _logger.setLevel(logging.WARNING) + shutil.make_archive(str(target / '_suite2p_ROIData.raw'), 'zip', fov_dir, logger=_logger) + _logger.setLevel(prev_level) + if fov_dir != 'combined': + # save frameQC in each dir (for now, maybe there will be fov specific frame QC eventually) + if frameQC is not None and len(frameQC) > 0: + np.save(fov_dir.joinpath('mpci.mpciFrameQC.npy'), frameQC) + frameQC_names.to_csv(fov_dir.joinpath('mpciFrameQC.names.tsv'), sep='\t', index=False) + + # extract some other data from suite2p outputs + ops = np.load(fov_dir.joinpath('ops.npy'), allow_pickle=True).item() + stat = np.load(fov_dir.joinpath('stat.npy'), allow_pickle=True) + iscell = np.load(fov_dir.joinpath('iscell.npy')) + + # Save suite2p ROI activity outputs in transposed from (n_frames, n_ROI) + for k, v in rename_dict.items(): + np.save(fov_dir.joinpath(v), np.load(fov_dir.joinpath(k)).T) + # fov_dir.joinpath(k).unlink() # Keep original files for suite2P GUI + np.save(fov_dir.joinpath('mpci.badFrames.npy'), np.asarray(ops['badframes'], dtype=bool)) + np.save(fov_dir.joinpath('mpciMeanImage.images.npy'), np.asarray(ops['meanImg'], dtype=float)) + np.save(fov_dir.joinpath('mpciROIs.stackPos.npy'), np.asarray([(*s['med'], 0) for s in stat], dtype=int)) + np.save(fov_dir.joinpath('mpciROIs.cellClassifier.npy'), np.asarray(iscell[:, 1], dtype=float)) + np.save(fov_dir.joinpath('mpciROIs.mpciROITypes.npy'), np.asarray(iscell[:, 0], dtype=np.int16)) + pd.DataFrame([(0, 'no cell'), (1, 'cell')], columns=['roi_values', 'roi_labels'] + ).to_csv(fov_dir.joinpath('mpciROITypes.names.tsv'), sep='\t', index=False) + # ROI and neuropil masks + roi_mask, pil_mask = self._masks2sparse(stat, ops) + with open(fov_dir.joinpath('mpciROIs.masks.sparse_npz'), 'wb') as fp: + sparse.save_npz(fp, roi_mask) + with open(fov_dir.joinpath('mpciROIs.neuropilMasks.sparse_npz'), 'wb') as fp: + sparse.save_npz(fp, pil_mask) + # move folders out of suite2p dir + # We overwrite existing files + for file in filter(lambda x: is_valid(x.name), fov_dir.iterdir()): + target_file = target.joinpath(file.name) + if target_file.exists(): + target_file.unlink() + file.rename(target_file) + shutil.rmtree(str(suite2p_dir), ignore_errors=False, onerror=None) + # Collect all files in those directories + return list(suite2p_dir.parent.rglob('FOV*/*')) + + @staticmethod + def _check_meta_data(meta_data_all: list) -> dict: + """ + Check that the meta data is consistent across all raw imaging folders. + + Parameters + ---------- + meta_data_all: list of dicts + List of metadata dictionaries to be checked for consistency. + + Returns + ------- + dict + Single, consolidated dictionary containing metadata. + """ + # Ignore the things we don't expect to match + ignore = ('acquisitionStartTime', 'nFrames') + ignore_sub = {'rawScanImageMeta': ('ImageDescription', 'Software')} + + def equal_dicts(a, b, skip=None): + ka = set(a).difference(skip or ()) + kb = set(b).difference(skip or ()) + return ka == kb and all(a[key] == b[key] for key in ka) + + # Compare each dict with the first one in the list + for i, meta in enumerate(meta_data_all[1:]): + if meta != meta_data_all[0]: # compare entire object first + for k, v in meta_data_all[0].items(): # check key by key + if not (equal_dicts(v, meta[k], ignore_sub[k]) # compare sub-dicts... + if k in ignore_sub else # ... if we have keys to ignore in test + not (k in ignore or v == meta[k])): + _logger.warning(f'Mismatch in meta data between raw_imaging_data folders for key {k}. ' + f'Using meta_data from first folder!') + return meta_data_all[0] + + @staticmethod + def _consolidate_exptQC(exptQC): + """ + Consolidate exptQC.mat files into a single file. + + Parameters + ---------- + exptQC : list of pandas.DataFrame + The loaded 'exptQC.mat' files as squeezed and simplified data frames, with columns + {'frameQC_frames', 'frameQC_names'}. + + Returns + ------- + numpy.array + An array of uint8 where 0 indicates good frames, and other values correspond to + experimenter-defined issues (in 'qc_values' column of output data frame). + pandas.DataFrame + A data frame with columns {'qc_values', 'qc_labels'}, the former an unsigned int + corresponding to a QC code; the latter a human-readable QC explanation. + numpy.array + An array of frame indices where QC code != 0. + """ + + # Merge and make sure same indexes have same names across all files + frameQC_names_list = [e['frameQC_names'] for e in exptQC] + frameQC_names_list = [{f: 0} if isinstance(f, str) else {f[i]: i for i in range(len(f))} + for f in frameQC_names_list] + frameQC_names = {k: v for d in frameQC_names_list for k, v in d.items()} + for d in frameQC_names_list: + for k, v in d.items(): + if frameQC_names[k] != v: + raise IOError(f'exptQC.mat files have different values for name "{k}"') + + frameQC_names = pd.DataFrame(sorted([(v, k) for k, v in frameQC_names.items()]), + columns=['qc_values', 'qc_labels']) + + # Concatenate frames + frameQC = np.concatenate([e['frameQC_frames'] for e in exptQC], axis=0) + + # Transform to bad_frames as expected by suite2p + bad_frames = np.where(frameQC != 0)[0] + + return frameQC, frameQC_names, bad_frames + + def get_default_tau(self): + """ + Determine the tau (fluorescence decay) from the subject's genotype. + + Returns + ------- + float + The tau value to use. + + See Also + -------- + https://suite2p.readthedocs.io/en/latest/settings.html + """ + # These settings are from the suite2P documentation + TAU_MAP = {'G6s': 1.5, 'G6m': 1., 'G6f': .7, 'default': 1.5} + _, subject, *_ = session_path_parts(self.session_path) + genotype = self.one.alyx.rest('subjects', 'read', id=subject)['genotype'] + match = next(filter(None, (re.match(r'.+-(G\d[fms])$', g['allele']) for g in genotype)), None) + key = match.groups()[0] if match else 'default' + return TAU_MAP.get(key, TAU_MAP['default']) + + def _create_db(self, meta): + """ + Create the ops dictionary for suite2p based on metadata. + + Parameters + ---------- + meta: dict + Imaging metadata. + + Returns + ------- + dict + Inputs to suite2p run that deviate from default parameters. + """ + + # Currently only supporting single plane, assert that this is the case + # FIXME This checks for zstacks but not dual plane mode + if not isinstance(meta['scanImageParams']['hStackManager']['zs'], int): + raise NotImplementedError('Multi-plane imaging not yet supported, data seems to be multi-plane') + + # Computing dx and dy + cXY = np.array([fov['topLeftDeg'] for fov in meta['FOV']]) + cXY -= np.min(cXY, axis=0) + nXnYnZ = np.array([fov['nXnYnZ'] for fov in meta['FOV']]) + sW = np.sqrt(np.sum((np.array([fov['topRightDeg'] for fov in meta['FOV']]) - np.array( + [fov['topLeftDeg'] for fov in meta['FOV']])) ** 2, axis=1)) + sH = np.sqrt(np.sum((np.array([fov['bottomLeftDeg'] for fov in meta['FOV']]) - np.array( + [fov['topLeftDeg'] for fov in meta['FOV']])) ** 2, axis=1)) + pixSizeX = nXnYnZ[:, 0] / sW + pixSizeY = nXnYnZ[:, 1] / sH + dx = np.round(cXY[:, 0] * pixSizeX).astype(dtype=np.int32) + dy = np.round(cXY[:, 1] * pixSizeY).astype(dtype=np.int32) + nchannels = len(meta['channelSaved']) if isinstance(meta['channelSaved'], list) else 1 + + db = { + 'data_path': sorted(map(str, self.session_path.glob(f'{self.device_collection}'))), + 'save_path0': str(self.session_path.joinpath('alf')), + 'fast_disk': '', # TODO + 'look_one_level_down': False, # don't look in the children folders as that is where the reference data is + 'num_workers': self.cpu, # this selects number of cores to parallelize over for the registration step + 'num_workers_roi': -1, # for parallelization over FOVs during cell detection, for now don't + 'keep_movie_raw': False, + 'delete_bin': False, # TODO: delete this on the long run + 'batch_size': 500, # SP reduced this from 1000 + 'nimg_init': 400, + 'combined': False, + 'nonrigid': True, + 'maxregshift': 0.05, # default = 1 + 'denoise': 1, # whether binned movie should be denoised before cell detection + 'block_size': [128, 128], + 'save_mat': True, # save the data to Fall.mat + 'move_bin': True, # move the binary file to save_path + 'scalefactor': 1, # scale manually in x to account for overlap between adjacent ribbons UCL mesoscope + 'mesoscan': True, + 'nplanes': 1, + 'nrois': len(meta['FOV']), + 'nchannels': nchannels, + 'fs': meta['scanImageParams']['hRoiManager']['scanVolumeRate'], + 'lines': [list(np.asarray(fov['lineIdx']) - 1) for fov in meta['FOV']], # subtracting 1 to make 0-based + 'tau': self.get_default_tau(), # deduce the GCamp used from Alyx mouse line (defaults to 1.5; that of GCaMP6s) + 'functional_chan': 1, # for now, eventually find(ismember(meta.channelSaved == meta.channelID.green)) + 'align_by_chan': 1, # for now, eventually find(ismember(meta.channelSaved == meta.channelID.red)) + 'dx': dx, + 'dy': dy + } + + return db + + def _run(self, run_suite2p=True, rename_files=True, use_badframes=True, **kwargs): + """ + Process inputs, run suite2p and make outputs alf compatible. + + Parameters + ---------- + run_suite2p: bool + Whether to run suite2p, default is True. + rename_files: bool + Whether to rename and reorganize the suite2p outputs to be alf compatible. Defaults is True. + use_badframes: bool + Whether to exclude bad frames indicated by the experimenter in exptQC.mat. Default is currently False + due to bug in suite2p. Change this in the future. + + Returns + ------- + list of pathlib.Path + All files created by the task. + """ + import suite2p + + """ Metadata and parameters """ + # Load metadata and make sure all metadata is consistent across FOVs + meta_files = sorted(self.session_path.glob(f'{self.device_collection}/*rawImagingData.meta.*')) + collections = set(f.parts[-2] for f in meta_files) + # Check there is exactly 1 meta file per collection + assert len(meta_files) == len(list(self.session_path.glob(self.device_collection))) == len(collections) + rawImagingData = [mesoscope.patch_imaging_meta(alfio.load_file_content(filepath)) for filepath in meta_files] + if len(rawImagingData) > 1: + meta = self._check_meta_data(rawImagingData) + else: + meta = rawImagingData[0] + # Get default ops + ops = suite2p.default_ops() + # Create db which overwrites ops when passed to suite2p, with information from meta data and hardcoded + db = self._create_db(meta) + # Anything can be overwritten by keyword arguments passed to the tasks run() method + for k, v in kwargs.items(): + if k in ops.keys() or k in db.keys(): + # db overwrites ops when passed to run_s2p, so we only need to update / add it here + db[k] = v + # Update the task kwargs attribute as it will be stored in the arguments json field in alyx + self.kwargs = {**self.kwargs, **db} + + """ Bad frames """ + qc_paths = (self.session_path.joinpath(f[1], 'exptQC.mat') + for f in self.input_files if f[0] == 'exptQC.mat') + qc_paths = map(str, filter(Path.exists, qc_paths)) + exptQC = [loadmat(p, squeeze_me=True, simplify_cells=True) for p in qc_paths] + if len(exptQC) > 0: + frameQC, frameQC_names, bad_frames = self._consolidate_exptQC(exptQC) + else: + _logger.warning('No frame QC (exptQC.mat) files found.') + frameQC, bad_frames = np.array([], dtype='u1'), np.array([], dtype='i8') + frameQC_names = pd.DataFrame(columns=['qc_values', 'qc_labels']) + # If applicable, save as bad_frames.npy in first raw_imaging_folder for suite2p + if len(bad_frames) > 0 and use_badframes is True: + np.save(Path(db['data_path'][0]).joinpath('bad_frames.npy'), bad_frames) + + """ Suite2p """ + # Create alf it is doesn't exist + self.session_path.joinpath('alf').mkdir(exist_ok=True) + # Remove existing suite2p dir if it exists + suite2p_dir = Path(db['save_path0']).joinpath('suite2p') + if suite2p_dir.exists(): + shutil.rmtree(str(suite2p_dir), ignore_errors=True, onerror=None) + # Run suite2p + if run_suite2p: + _ = suite2p.run_s2p(ops=ops, db=db) + + """ Outputs """ + # Save and rename other outputs + if rename_files: + out_files = self._rename_outputs(suite2p_dir, frameQC_names, frameQC) + else: + out_files = list(Path(db['save_path0']).joinpath('suite2p').rglob('*')) + # Only return output file that are in the signature (for registration) + out_files = [f for f in out_files if f.name in [f[0] for f in self.output_files]] + return out_files + + +class MesoscopeSync(base_tasks.MesoscopeTask): + """Extract the frame times from the main DAQ.""" + + priority = 40 + job_size = 'small' + + @property + def signature(self): + signature = { + 'input_files': [(f'_{self.sync_namespace}_DAQdata.raw.npy', self.sync_collection, True), + (f'_{self.sync_namespace}_DAQdata.timestamps.npy', self.sync_collection, True), + (f'_{self.sync_namespace}_DAQdata.meta.json', self.sync_collection, True), + ('_ibl_rawImagingData.meta.json', self.device_collection, True), + ('rawImagingData.times_scanImage.npy', self.device_collection, True), + (f'_{self.sync_namespace}_softwareEvents.log.htsv', self.sync_collection, False), ], + 'output_files': [('mpci.times.npy', 'alf/mesoscope/FOV*', True), + ('mpciStack.timeshift.npy', 'alf/mesoscope/FOV*', True), ] + } + return signature + + def _run(self): + """ + Extract the imaging times for all FOVs. + + Returns + ------- + list of pathlib.Path + Files containing frame timestamps for individual FOVs and time offsets for each line scan. + + """ + # TODO function to determine nFOVs + try: + alf_path = self.session_path / self.sync_collection + events = alfio.load_object(alf_path, 'softwareEvents').get('log') + except alferr.ALFObjectNotFound: + events = None + if events is None or events.empty: + _logger.debug('No software events found for session %s', self.session_path) + collections = set(collection for _, collection, _ in self.input_files + if fnmatch(collection, self.device_collection)) + # Load first meta data file to determine the number of FOVs + # Changing FOV between imaging bouts is not supported currently! + self.rawImagingData = alfio.load_object(self.session_path / next(iter(collections)), 'rawImagingData') + self.rawImagingData['meta'] = mesoscope.patch_imaging_meta(self.rawImagingData['meta']) + n_FOVs = len(self.rawImagingData['meta']['FOV']) + sync, chmap = self.load_sync() # Extract sync data from raw DAQ data + mesosync = mesoscope.MesoscopeSyncTimeline(self.session_path, n_FOVs) + _, out_files = mesosync.extract( + save=True, sync=sync, chmap=chmap, device_collection=collections, events=events) + return out_files + + +class MesoscopeFOV(base_tasks.MesoscopeTask): + """Create FOV and FOV location objects in Alyx from metadata.""" + + priority = 40 + job_size = 'small' + + @property + def signature(self): + signature = { + 'input_files': [('_ibl_rawImagingData.meta.json', self.device_collection, True), + ('mpciROIs.stackPos.npy', 'alf/FOV*', True)], + 'output_files': [('mpciMeanImage.brainLocationIds*.npy', 'alf/FOV_*', True), + ('mpciMeanImage.mlapdv*.npy', 'alf/FOV_*', True), + ('mpciROIs.mlapdv*.npy', 'alf/FOV_*', True), + ('mpciROIs.brainLocationIds*.npy', 'alf/FOV_*', True), + ('_ibl_rawImagingData.meta.json', self.device_collection, True)] + } + return signature + + def _run(self, *args, provenance=Provenance.ESTIMATE, **kwargs): + """ + Register fields of view (FOV) to Alyx and extract the coordinates and IDs of each ROI. + + Steps: + 1. Save the mpciMeanImage.brainLocationIds_estimate and mlapdv datasets. + 2. Use mean image coordinates and ROI stack position datasets to extract brain location + of each ROI. + 3. Register the location of each FOV in Alyx. + + Parameters + ---------- + provenance : Provenance + The provenance of the coordinates in the meta file. For all but 'HISTOLOGY', the + provenance is added as a dataset suffix. Defaults to ESTIMATE. + + Returns + ------- + dict + The newly created FOV Alyx record. + list + The newly created FOV location Alyx records. + + Notes + ----- + Once the FOVs have been registered they cannot be updated with with task. Rerunning this + task will result in an error. + """ + # Load necessary data + (filename, collection, _), *_ = self.signature['input_files'] + meta_file = next(self.session_path.glob(f'{collection}/{filename}'), None) + meta = alfio.load_file_content(meta_file) or {} + nFOV = len(meta.get('FOV', [])) + + suffix = None if provenance is Provenance.HISTOLOGY else provenance.name.lower() + _logger.info('Extracting %s MLAPDV datasets', suffix or 'final') + + # Extract mean image MLAPDV coordinates and brain location IDs + mean_image_mlapdv, mean_image_ids = self.project_mlapdv(meta) + + # Save the meta data file with new coordinate fields + with open(meta_file, 'w') as fp: + json.dump(meta, fp) + + # Save the mean image datasets + mean_image_files = [] + assert set(mean_image_mlapdv.keys()) == set(mean_image_ids.keys()) and len(mean_image_ids) == nFOV + for i in range(nFOV): + alf_path = self.session_path.joinpath('alf', f'FOV_{i:02}') + for attr, arr, sfx in (('mlapdv', mean_image_mlapdv[i], suffix), + ('brainLocationIds', mean_image_ids[i], ('ccf', '2017', suffix))): + mean_image_files.append(alf_path / to_alf('mpciMeanImage', attr, 'npy', timescale=sfx)) + np.save(mean_image_files[-1], arr) + + # Extract ROI MLAPDV coordinates and brain location IDs + roi_mlapdv, roi_brain_ids = self.roi_mlapdv(nFOV, suffix=suffix) + + # Write MLAPDV + brain location ID of ROIs to disk + roi_files = [] + assert set(roi_mlapdv.keys()) == set(roi_brain_ids.keys()) and len(roi_mlapdv) == nFOV + for i in range(nFOV): + alf_path = self.session_path.joinpath('alf', f'FOV_{i:02}') + for attr, arr, sfx in (('mlapdv', roi_mlapdv[i], suffix), + ('brainLocationIds', roi_brain_ids[i], ('ccf', '2017', suffix))): + roi_files.append(alf_path / to_alf('mpciROIs', attr, 'npy', timescale=sfx)) + np.save(roi_files[-1], arr) + + # Register FOVs in Alyx + self.register_fov(meta, suffix) + + return sorted([meta_file, *roi_files, *mean_image_files]) + + def roi_mlapdv(self, nFOV: int, suffix=None): + """ + Extract ROI MLAPDV coordinates and brain location IDs. + + MLAPDV coordinates are in um relative to bregma. Location IDs are from the 2017 Allen + common coordinate framework atlas. + + Parameters + ---------- + nFOV : int + The number of fields of view acquired. + suffix : {None, 'estimate'} + The attribute suffix of the mpciMeanImage datasets to load. If generating from + estimates, the suffix should be 'estimate'. + + Returns + ------- + dict of int: numpy.array + A map of field of view to ROI MLAPDV coordinates. + dict of int: numpy.array + A map of field of view to ROI brain location IDs. + """ + all_mlapdv = {} + all_brain_ids = {} + for n in range(nFOV): + alf_path = self.session_path.joinpath('alf', f'FOV_{n:02}') + + # Load neuron centroids in pixel space + stack_pos_file = next(alf_path.glob('mpciROIs.stackPos*'), None) + if not stack_pos_file: + raise FileNotFoundError(alf_path / 'mpci.stackPos*') + stack_pos = alfio.load_file_content(stack_pos_file) + + # Load MLAPDV + brain location ID maps of pixels + mpciMeanImage = alfio.load_object( + alf_path, 'mpciMeanImage', attribute=['mlapdv', 'brainLocationIds']) + + # Get centroid MLAPDV + brainID by indexing pixel-map with centroid locations + mlapdv = np.full(stack_pos.shape, np.nan) + brain_ids = np.full(stack_pos.shape[0], np.nan) + for i in np.arange(stack_pos.shape[0]): + idx = (stack_pos[i, 0], stack_pos[i, 1]) + sfx = f'_{suffix}' if suffix else '' + mlapdv[i, :] = mpciMeanImage['mlapdv' + sfx][idx] + brain_ids[i] = mpciMeanImage['brainLocationIds_ccf_2017' + sfx][idx] + assert ~np.isnan(brain_ids).any() + all_brain_ids[n] = brain_ids.astype(int) + all_mlapdv[n] = mlapdv + + return all_mlapdv, all_brain_ids + + @staticmethod + def get_provenance(filename): + """ + Get the field of view provenance from a mpciMeanImage or mpciROIs dataset. + + Parameters + ---------- + filename : str, pathlib.Path + A filename to get the provenance from. + + Returns + ------- + Provenance + The provenance of the file. + """ + filename = Path(filename).name + timescale = (filename_parts(filename)[3] or '').split('_') + provenances = [i.name.lower() for i in Provenance] + provenance = (Provenance[x.upper()] for x in timescale if x in provenances) + return next(provenance, None) or Provenance.HISTOLOGY + + def register_fov(self, meta: dict, suffix: str = None) -> (list, list): + """ + Create FOV on Alyx. + + Assumes field of view recorded perpendicular to objective. + Assumes field of view is plane (negligible volume). + + Required Alyx fixtures: + - experiments.ImagingType(name='mesoscope') + - experiments.CoordinateSystem(name='IBL-Allen') + + Parameters + ---------- + meta : dict + The raw imaging meta data from _ibl_rawImagingData.meta.json. + suffix : str + The file attribute suffixes to load from the mpciMeanImage object. Either 'estimate' or + None. No suffix means the FOV location provenance will be L (Landmark). + + Returns + ------- + list of dict + A list registered of field of view entries from Alyx. + + TODO Determine dual plane ID for JSON field + """ + dry = self.one is None or self.one.offline + alyx_fovs = [] + # Count the number of slices per stack ID: only register stacks that contain more than one slice. + slice_counts = Counter(f['roiUUID'] for f in meta.get('FOV', [])) + # Create a new stack in Alyx for all stacks containing more than one slice. + # Map of ScanImage ROI UUID to Alyx ImageStack UUID. + stack_ids = {i: self.one.alyx.rest('imaging-stack', 'create', data={'name': i})['id'] + for i in slice_counts if slice_counts[i] > 1} + + for i, fov in enumerate(meta.get('FOV', [])): + assert set(fov.keys()) >= {'MLAPDV', 'nXnYnZ', 'roiUUID'} + # Field of view + alyx_FOV = { + 'session': self.session_path.as_posix() if dry else self.path2eid(), + 'imaging_type': 'mesoscope', 'name': f'FOV_{i:02}', + 'stack': stack_ids.get(fov['roiUUID']) + } + if dry: + print(alyx_FOV) + alyx_FOV['location'] = [] + alyx_fovs.append(alyx_FOV) + else: + alyx_fovs.append(self.one.alyx.rest('fields-of-view', 'create', data=alyx_FOV)) + + # Field of view location + data = {'field_of_view': alyx_fovs[-1].get('id'), + 'default_provenance': True, + 'coordinate_system': 'IBL-Allen', + 'n_xyz': fov['nXnYnZ']} + if suffix: + data['provenance'] = suffix[0].upper() + + # Convert coordinates to 4 x 3 array (n corners by n dimensions) + # x1 = top left ml, y1 = top left ap, y2 = top right ap, etc. + coords = [fov['MLAPDV'][key] for key in ('topLeft', 'topRight', 'bottomLeft', 'bottomRight')] + coords = np.vstack(coords).T + data.update({k: arr.tolist() for k, arr in zip('xyz', coords)}) + + # Load MLAPDV + brain location ID maps of pixels + filename = 'mpciMeanImage.brainLocationIds_ccf_2017' + (f'_{suffix}' if suffix else '') + '.npy' + filepath = self.session_path.joinpath('alf', f'FOV_{i:02}', filename) + mean_image_ids = alfio.load_file_content(filepath) + + data['brain_region'] = np.unique(mean_image_ids).astype(int).tolist() + + if dry: + print(data) + alyx_FOV['location'].append(data) + else: + alyx_fovs[-1]['location'].append(self.one.alyx.rest('fov-location', 'create', data=data)) + return alyx_fovs + + def load_triangulation(self): + """ + Load the surface triangulation file. + + A triangle mesh of the smoothed convex hull of the dorsal surface of the mouse brain, + generated from the 2017 Allen 10um annotation volume. This triangulation was generated in + MATLAB. + + Returns + ------- + points : numpy.array + An N by 3 float array of x-y vertices, defining all points of the triangle mesh. These + are in um relative to the IBL bregma coordinates. + connectivity_list : numpy.array + An N by 3 integer array of vertex indices defining all points that form a triangle. + """ + fixture_path = Path(mesoscope.__file__).parent.joinpath('mesoscope') + surface_triangulation = np.load(fixture_path / 'surface_triangulation.npz') + points = surface_triangulation['points'].astype('f8') + connectivity_list = surface_triangulation['connectivity_list'] + surface_triangulation.close() + return points, connectivity_list + + def project_mlapdv(self, meta, atlas=None): + """ + Calculate the mean image pixel locations in MLAPDV coordinates and determine the brain + location IDs. + + MLAPDV coordinates are in um relative to bregma. Location IDs are from the 2017 Allen + common coordinate framework atlas. + + Parameters + ---------- + meta : dict + The raw imaging data meta file, containing coordinates for the centre of each field of + view. + atlas : ibllib.atlas.Atlas + An atlas instance. + + Returns + ------- + dict + A map of FOV number (int) to mean image MLAPDV coordinates as a 2D numpy array. + dict + A map of FOV number (int) to mean image brain location IDs as a 2D numpy int array. + """ + mlapdv = {} + location_id = {} + # Use the MRI atlas as this applies scaling, particularly along the DV axis to (hopefully) + # more accurately represent the living brain. + atlas = atlas or MRITorontoAtlas(res_um=10) + # The centre of the craniotomy / imaging window + coord_ml = meta['centerMM']['ML'] * 1e3 # mm -> um + coord_ap = meta['centerMM']['AP'] * 1e3 # mm -> um + pt = np.array([coord_ml, coord_ap]) + + points, connectivity_list = self.load_triangulation() + # Only keep faces that have normals pointing up (positive DV value). + # Calculate the normal vector pointing out of the convex hull. + triangles = points[connectivity_list, :] + normals = surface_normal(triangles) + up_faces, = np.where(normals[:, -1] > 0) + # only keep triangles that have normal vector with positive DV component + dorsal_connectivity_list = connectivity_list[up_faces, :] + # Flatten triangulation by dropping the dorsal coordinates and find the location of the + # window center (we convert mm -> um here) + face_ind = find_triangle(pt * 1e-3, points[:, :2] * 1e-3, dorsal_connectivity_list.astype(np.intp)) + assert face_ind != -1 + + dorsal_triangle = points[dorsal_connectivity_list[face_ind, :], :] + + # Get the surface normal unit vector of dorsal triangle + normal_vector = surface_normal(dorsal_triangle) + + # find the coordDV that sits on the triangular face and had [coordML, coordAP] coordinates; + # the three vertices defining the triangle + face_vertices = points[dorsal_connectivity_list[face_ind, :], :] + + # all the vertices should be on the plane ax + by + cz = 1, so we can find + # the abc coefficients by inverting the three equations for the three vertices + abc, *_ = np.linalg.lstsq(face_vertices, np.ones(3), rcond=None) + + # and then find a point on that plane that corresponds to a given x-y + # coordinate (which is ML-AP coordinate) + coord_dv = (1 - pt @ abc[:2]) / abc[2] + + # We should not use the actual surface of the brain for this, as it might be in one of the sulci + # DO NOT USE THIS: + # coordDV = interp2(axisMLmm, axisAPmm, surfaceDV, coordML, coordAP) + + # Now we need to span the plane of the coverslip with two orthogonal unit vectors. + # We start with vY, because the order is important and we usually have less + # tilt along AP (pitch), which will cause less deviation in vX from pure ML. + vY = np.array([0, normal_vector[2], -normal_vector[1]]) # orthogonal to the normal of the plane + vX = np.cross(vY, normal_vector) # orthogonal to n and to vY + # normalize and flip the sign if necessary + vX = vX / np.sqrt(vX @ vX) * np.sign(vX[0]) # np.sqrt(vY @ vY) == LR norm of vX + vY = vY / np.sqrt(vY @ vY) * np.sign(vY[1]) + + # what are the dimensions of the data arrays (ap, ml, dv) + (nAP, nML, nDV) = atlas.image.shape + # Let's shift the coordinates relative to bregma + voxel_size = atlas.res_um # [um] resolution of the atlas + bregma_coords = ALLEN_CCF_LANDMARKS_MLAPDV_UM['bregma'] / voxel_size # (ml, ap, dv) + axis_ml_um = (np.arange(nML) - bregma_coords[0]) * voxel_size + axis_ap_um = (np.arange(nAP) - bregma_coords[1]) * voxel_size * -1. + axis_dv_um = (np.arange(nDV) - bregma_coords[2]) * voxel_size * -1. + + # projection of FOVs on the brain surface to get ML-AP-DV coordinates + _logger.info('Projecting in 3D') + for i, fov in enumerate(meta['FOV']): # i, fov = next(enumerate(meta['FOV'])) + start_time = time.time() + _logger.info(f'FOV {i + 1}/{len(meta["FOV"])}') + y_px_idx, x_px_idx = np.mgrid[0:fov['nXnYnZ'][0], 0:fov['nXnYnZ'][1]] + + # xx and yy are in mm in coverslip space + points = ((0, fov['nXnYnZ'][0] - 1), (0, fov['nXnYnZ'][1] - 1)) + if 'MM' not in fov: + fov['MM'] = { + 'topLeft': fov.pop('topLeftMM'), + 'topRight': fov.pop('topRightMM'), + 'bottomLeft': fov.pop('bottomLeftMM'), + 'bottomRight': fov.pop('bottomRightMM') + } + # The four corners of the FOV, determined by taking the center of the craniotomy in MM, + # the x-y coordinates of the imaging window center (from the tiled reference image) in + # galvanometer units, and the x-y coordinates of the FOV center in galvanometer units. + values = [[fov['MM']['topLeft'][0], fov['MM']['topRight'][0]], + [fov['MM']['bottomLeft'][0], fov['MM']['bottomRight'][0]]] + values = np.array(values) * 1e3 # mm -> um + xx = interpn(points, values, (y_px_idx, x_px_idx)) + + values = [[fov['MM']['topLeft'][1], fov['MM']['topRight'][1]], + [fov['MM']['bottomLeft'][1], fov['MM']['bottomRight'][1]]] + values = np.array(values) * 1e3 # mm -> um + yy = interpn(points, values, (y_px_idx, x_px_idx)) + + xx = xx.flatten() - coord_ml + yy = yy.flatten() - coord_ap + + # rotate xx and yy in 3D + # the coords are still on the coverslip, but now have 3D values + coords = np.outer(xx, vX) + np.outer(yy, vY) # (vX * xx) + (vY * yy) + coords = coords + [coord_ml, coord_ap, coord_dv] + + # for each point of the FOV create a line parametrization (trajectory normal to the coverslip plane). + # start just above the coverslip and go 3 mm down, should be enough to 'meet' the brain + t = np.arange(-voxel_size, 3e3, voxel_size) + + # Find the MLAPDV atlas coordinate and brain location of each pixel. + MLAPDV, annotation = _update_points( + t, normal_vector, coords, axis_ml_um, axis_ap_um, axis_dv_um, atlas.label) + annotation = atlas.regions.index2id(annotation) # convert annotation indices to IDs + + if np.any(np.isnan(MLAPDV)): + _logger.warning('Areas of FOV lie outside the brain') + _logger.info(f'done ({time.time() - start_time:3.1f} seconds)\n') + MLAPDV = np.reshape(MLAPDV, [*x_px_idx.shape, 3]) + annotation = np.reshape(annotation, x_px_idx.shape) + + fov['MLAPDV'] = { + 'topLeft': MLAPDV[0, 0, :].tolist(), + 'topRight': MLAPDV[0, -1, :].tolist(), + 'bottomLeft': MLAPDV[-1, 0, :].tolist(), + 'bottomRight': MLAPDV[-1, -1, :].tolist(), + 'center': MLAPDV[round(x_px_idx.shape[0] / 2) - 1, round(x_px_idx.shape[1] / 2) - 1, :].tolist() + } + + # Save the brain regions of the corners/centers of FOV (annotation field) + fov['brainLocationIds'] = { + 'topLeft': int(annotation[0, 0]), + 'topRight': int(annotation[0, -1]), + 'bottomLeft': int(annotation[-1, 0]), + 'bottomRight': int(annotation[-1, -1]), + 'center': int(annotation[round(x_px_idx.shape[0] / 2) - 1, round(x_px_idx.shape[1] / 2) - 1]) + } + + mlapdv[i] = MLAPDV + location_id[i] = annotation + return mlapdv, location_id + + +def surface_normal(triangle): + """ + Calculate the surface normal unit vector of one or more triangles. + + Parameters + ---------- + triangle : numpy.array + An array of shape (n_triangles, 3, 3) representing (Px Py Pz). + + Returns + ------- + numpy.array + The surface normal unit vector(s). + """ + if triangle.shape == (3, 3): + triangle = triangle[np.newaxis, :, :] + if triangle.shape[1:] != (3, 3): + raise ValueError('expected array of shape (3, 3); 3 coordinates in x, y, and z') + V = triangle[:, 1, :] - triangle[:, 0, :] # V = P2 - P1 + W = triangle[:, 2, :] - triangle[:, 0, :] # W = P3 - P1 + + Nx = (V[:, 1] * W[:, 2]) - (V[:, 2] * W[:, 1]) # Nx = (Vy * Wz) - (Vz * Wy) + Ny = (V[:, 2] * W[:, 0]) - (V[:, 0] * W[:, 2]) # Ny = (Vz * Wx) - (Vx * Wz) + Nz = (V[:, 0] * W[:, 1]) - (V[:, 1] * W[:, 0]) # Nz = (Vx * Wy) - (Vy * Wx) + N = np.c_[Nx, Ny, Nz] + # Calculate unit vector. Transpose allows vectorized operation. + A = N / np.sqrt((Nx ** 2) + (Ny ** 2) + (Nz ** 2))[np.newaxis].T + return A.squeeze() + + +@nb.njit('b1(f8[:,:], f8[:])') +def in_triangle(triangle, point): + """ + Check whether `point` lies within `triangle`. + + Parameters + ---------- + triangle : numpy.array + A (2 x 3) array of x-y coordinates; A(x1, y1), B(x2, y2) and C(x3, y3). + point : numpy.array + A point, P(x, y). + + Returns + ------- + bool + True if coordinate lies within triangle. + """ + def area(x1, y1, x2, y2, x3, y3): + """Calculate the area of a triangle, given its vertices.""" + return abs((x1 * (y2 - y3) + x2 * (y3 - y1) + x3 * (y1 - y2)) / 2.) + + x1, y1, x2, y2, x3, y3 = triangle.flat + x, y = point + A = area(x1, y1, x2, y2, x3, y3) # area of triangle ABC + A1 = area(x, y, x2, y2, x3, y3) # area of triangle PBC + A2 = area(x1, y1, x, y, x3, y3) # area of triangle PAC + A3 = area(x1, y1, x2, y2, x, y) # area of triangle PAB + # Check if sum of A1, A2 and A3 equals that of A + diff = np.abs((A1 + A2 + A3) - A) + REL_TOL = 1e-9 + return diff <= np.abs(REL_TOL * A) # isclose not yet implemented in numba 0.57 + + +@nb.njit('i8(f8[:], f8[:,:], intp[:,:])', nogil=True) +def find_triangle(point, vertices, connectivity_list): + """ + Find which vertices contain a given point. + + Currently O(n) but could take advantage of connectivity order to be quicker. + + Parameters + ---------- + point : numpy.array + The (x, y) coordinate of a point to locate within one of the triangles. + vertices : numpy.array + An N x 3 array of vertices representing a triangle mesh. + connectivity_list : numpy.array + An N x 3 array of indices representing the connectivity of `points`. + + Returns + ------- + int + The index of the vertices containing `point`, or -1 if not within any triangle. + """ + face_ind = -1 + for i in nb.prange(connectivity_list.shape[0]): + triangle = vertices[connectivity_list[i, :], :] + if in_triangle(triangle, point): + face_ind = i + break + return face_ind + + +@nb.njit('Tuple((f8[:], intp[:]))(f8[:], f8[:])', nogil=True) +def _nearest_neighbour_1d(x, x_new): + """ + Nearest neighbour interpolation with extrapolation. + + This was adapted from scipy.interpolate.interp1d but returns the indices of each nearest x + value. Assumes x is not sorted. + + Parameters + ---------- + x : (N,) array_like + A 1-D array of real values. + x_new : (N,) array_like + A 1D array of values to apply function to. + + Returns + ------- + numpy.array + A 1D array of interpolated values. + numpy.array + A 1D array of indices. + """ + SIDE = 'left' # use 'right' to round up to nearest int instead of rounding down + # Sort values + ind = np.argsort(x, kind='mergesort') + x = x[ind] + x_bds = x / 2.0 # Do division before addition to prevent possible integer overflow + x_bds = x_bds[1:] + x_bds[:-1] + # Find where in the averaged data the values to interpolate would be inserted. + x_new_indices = np.searchsorted(x_bds, x_new, side=SIDE) + # Clip x_new_indices so that they are within the range of x indices. + x_new_indices = x_new_indices.clip(0, len(x) - 1).astype(np.intp) + # Calculate the actual value for each entry in x_new. + y_new = x[x_new_indices] + return y_new, ind[x_new_indices] + + +@nb.njit('Tuple((f8[:,:], u2[:]))(f8[:], f8[:], f8[:,:], f8[:], f8[:], f8[:], u2[:,:,:])', nogil=True) +def _update_points(t, normal_vector, coords, axis_ml_um, axis_ap_um, axis_dv_um, atlas_labels): + """ + Determine the MLAPDV coordinate and brain location index for each of the given coordinates. + + This has been optimized in numba. The majority of the time savings come from replacing iterp1d + and ismember with _nearest_neighbour_1d which were extremely slow. Parallel iteration further + halved the time it took per 512x512 FOV. + + Parameters + ---------- + t : numpy.array + An N x 3 evenly spaced set of coordinates representing points going down from the coverslip + towards the brain. + normal_vector : numpy.array + The unit vector of the face normal to the center of the window. + coords : numpy.array + A set of N x 3 coordinates representing the MLAPDV coordinates of each pixel relative to + the center of the window, in micrometers (um). + axis_ml_um : numpy.array + An evenly spaced array of medio-lateral brain coordinates relative to bregma in um, at the + resolution of the atlas image used. + axis_ap_um : numpy.array + An evenly spaced array of anterio-posterior brain coordinates relative to bregma in um, at + the resolution of the atlas image used. + axis_dv_um : numpy.array + An evenly spaced array of dorso-ventral brain coordinates relative to bregma in um, at + the resolution of the atlas image used. + atlas_labels : numpy.array + A 3D array of integers representing the brain location index of each voxel of a given + atlas. The shape is expected to be (nAP, nML, nDV). + + Returns + ------- + numpy.array + An N by 3 array containing the MLAPDV coordinates in um of each pixel coordinate. + Coordinates outside of the brain are NaN. + numpy.array + A 1D array of atlas label indices the length of `coordinates`. + """ + # passing through the center of the craniotomy/coverslip + traj_coords_centered = np.outer(t, -normal_vector) + MLAPDV = np.full_like(coords, np.nan) + annotation = np.zeros(coords.shape[0], dtype=np.uint16) + n_points = coords.shape[0] + for p in nb.prange(n_points): + # Shifted to the correct point on the coverslip, in true ML-AP-DV coords + traj_coords = traj_coords_centered + coords[p, :] + + # Find intersection coordinate with the brain. + # Only use coordinates that exist in the atlas (kind of nearest neighbour interpolation) + ml, ml_idx = _nearest_neighbour_1d(axis_ml_um, traj_coords[:, 0]) + ap, ap_idx = _nearest_neighbour_1d(axis_ap_um, traj_coords[:, 1]) + dv, dv_idx = _nearest_neighbour_1d(axis_dv_um, traj_coords[:, 2]) + + # Iterate over coordinates to find the first (if any) that is within the brain + ind = -1 + area = 0 # 0 = void; 1 = root + for i in nb.prange(traj_coords.shape[0]): + anno = atlas_labels[ap_idx[i], ml_idx[i], dv_idx[i]] + if anno > 0: # first coordinate in the brain + ind = i + area = anno + if area > 1: # non-root brain area; we're done + break + if area > 1: + point = traj_coords[ind, :] + MLAPDV[p, :] = point # in um + annotation[p] = area + else: + MLAPDV[p, :] = np.nan + annotation[p] = area # root or void + + return MLAPDV, annotation diff --git a/ibllib/pipes/misc.py b/ibllib/pipes/misc.py index 8f3c1db13..d3911533f 100644 --- a/ibllib/pipes/misc.py +++ b/ibllib/pipes/misc.py @@ -13,6 +13,7 @@ from inspect import signature import uuid import socket +import traceback import spikeglx from iblutil.io import hashfile, params @@ -79,10 +80,16 @@ def cli_ask_options(prompt: str, options: list, default_idx: int = 0) -> str: return ans -def behavior_exists(session_path: str) -> bool: +def behavior_exists(session_path: str, include_devices=False) -> bool: + """ + Returns True if the session has a task behaviour folder + :param session_path: + :return: + """ session_path = Path(session_path) - behavior_path = session_path / "raw_behavior_data" - if behavior_path.exists(): + if include_devices and session_path.joinpath("_devices").exists(): + return True + if session_path.joinpath("raw_behavior_data").exists(): return True return any(session_path.glob('raw_task_data_*')) @@ -379,8 +386,9 @@ def create_basic_transfer_params(param_str='transfer_params', local_data_path=No The name of the parameters to load/save. local_data_path : str, pathlib.Path The local root data path, stored with the DATA_FOLDER_PATH key. If None, user is prompted. - remote_data_path : str, pathlib.Path + remote_data_path : str, pathlib.Path, bool The local root data path, stored with the REMOTE_DATA_FOLDER_PATH key. If None, user is prompted. + If False, the REMOTE_DATA_PATH key is not updated or is set to False if clobber = True. clobber : bool If True, any parameters in existing parameter file not found as keyword args will be removed, otherwise the user is prompted for these also. @@ -409,6 +417,11 @@ def create_basic_transfer_params(param_str='transfer_params', local_data_path=No >>> from functools import partial >>> par = create_basic_transfer_params( ... custom_arg=partial(cli_ask_default, 'Please enter custom arg value')) + + Set up with no remote path (NB: if not the first time, use clobber=True to save param key) + + >>> par = create_basic_transfer_params(remote_data_path=False) + """ parameters = params.as_dict(params.read(param_str, {})) or {} if local_data_path is None: @@ -419,9 +432,12 @@ def create_basic_transfer_params(param_str='transfer_params', local_data_path=No if remote_data_path is None: remote_data_path = parameters.get('REMOTE_DATA_FOLDER_PATH') - if not remote_data_path or clobber: + if remote_data_path in (None, '') or clobber: remote_data_path = cli_ask_default("Where's your REMOTE 'Subjects' data folder?", remote_data_path) - parameters['REMOTE_DATA_FOLDER_PATH'] = remote_data_path + if remote_data_path is not False: + parameters['REMOTE_DATA_FOLDER_PATH'] = remote_data_path + elif 'REMOTE_DATA_FOLDER_PATH' not in parameters or clobber: + parameters['REMOTE_DATA_FOLDER_PATH'] = False # Always assume no remote path # Deal with extraneous parameters for k, v in kwargs.items(): @@ -674,11 +690,20 @@ def rsync_paths(src: Path, dst: Path) -> bool: return True -def confirm_ephys_remote_folder( - local_folder=False, remote_folder=False, force=False, iblscripts_folder=False -): +def confirm_ephys_remote_folder(local_folder=False, remote_folder=False, force=False, iblscripts_folder=False, + session_path=None): + """ + :param local_folder: The full path to the local Subjects folder + :param remote_folder: the full path to the remote Subjects folder + :param force: + :param iblscripts_folder: + :return: + """ + # FIXME: session_path can be relative pars = load_ephyspc_params() - + if not iblscripts_folder: + import deploy + iblscripts_folder = Path(deploy.__file__).parent.parent if not local_folder: local_folder = pars["DATA_FOLDER_PATH"] if not remote_folder: @@ -689,16 +714,22 @@ def confirm_ephys_remote_folder( local_folder = subjects_data_folder(local_folder, rglob=True) remote_folder = subjects_data_folder(remote_folder, rglob=True) - print("LOCAL:", local_folder) - print("REMOTE:", remote_folder) - src_session_paths = [x.parent for x in local_folder.rglob("transfer_me.flag")] + log.info(f"local folder: {local_folder}") + log.info(f"remote folder: {remote_folder}") + if session_path is None: + src_session_paths = [x.parent for x in local_folder.rglob("transfer_me.flag")] + else: + src_session_paths = session_path if isinstance(session_path, list) else [session_path] if not src_session_paths: - print("Nothing to transfer, exiting...") + log.info("Nothing to transfer, exiting...") return + for session_path in src_session_paths: + log.info(f"Found : {session_path}") + log.info(f"Found: {len(src_session_paths)} sessions to transfer, starting transferring now") for session_path in src_session_paths: - print(f"\nFound session: {session_path}") + log.info(f"Transferring session: {session_path}") # Rename ephys files # FIXME: if transfer has failed and wiring file is there renaming will fail! rename_ephys_files(str(session_path)) @@ -708,16 +739,13 @@ def confirm_ephys_remote_folder( copy_wiring_files(str(session_path), iblscripts_folder) try: create_alyx_probe_insertions(str(session_path)) - except BaseException as e: - print( - e, - "\nCreation failed, please create the probe insertions manually.", - "Continuing transfer...", - ) - msg = f"Transfer to {remote_folder} with the same name?" + except BaseException: + log.error(traceback.print_exc()) + log.info("Probe creation failed, please create the probe insertions manually. Continuing transfer...") + msg = f"Transfer {session_path }to {remote_folder} with the same name?" resp = input(msg + "\n[y]es/[r]ename/[s]kip/[e]xit\n ^\n> ") or "y" resp = resp.lower() - print(resp) + log.info(resp) if resp not in ["y", "r", "s", "e", "yes", "rename", "skip", "exit"]: return confirm_ephys_remote_folder( local_folder=local_folder, @@ -737,22 +765,21 @@ def confirm_ephys_remote_folder( return remote_session_path = remote_folder / Path(*session_path.parts[-3:]) - if not behavior_exists(remote_session_path): - print(f"No behavior folder found in {remote_session_path}: skipping session...") + if not behavior_exists(remote_session_path, include_devices=True): + log.error(f"No behavior folder found in {remote_session_path}: skipping session...") return # TODO: Check flagfiles on src.and dst + alf dir in session folder then remove # Try catch? wher catch condition is force transfer maybe - transfer_folder( - session_path / "raw_ephys_data", remote_session_path / "raw_ephys_data", force=force - ) + transfer_folder(session_path / "raw_ephys_data", remote_session_path / "raw_ephys_data", force=force) # if behavior extract_me.flag exists remove it, because of ephys flag flag_file = session_path / "transfer_me.flag" - flag_file.unlink() - if (remote_session_path / "extract_me.flag").exists(): - (remote_session_path / "extract_me.flag").unlink() - # Create remote flags - create_ephys_transfer_done_flag(remote_session_path) - check_create_raw_session_flag(remote_session_path) + if flag_file.exists(): # this file only exists for the iblrig v7 and lower + flag_file.unlink() + if (remote_session_path / "extract_me.flag").exists(): + (remote_session_path / "extract_me.flag").unlink() + # Create remote flags + create_ephys_transfer_done_flag(remote_session_path) + check_create_raw_session_flag(remote_session_path) def probe_labels_from_session_path(session_path: Union[str, Path]) -> List[str]: @@ -764,7 +791,7 @@ def probe_labels_from_session_path(session_path: Union[str, Path]) -> List[str]: :return: list of strings """ plabels = [] - raw_ephys_folder = session_path.joinpath('raw_ephys_data') + raw_ephys_folder = Path(session_path).joinpath('raw_ephys_data') for meta_file in raw_ephys_folder.rglob('*.ap.meta'): if meta_file.parents[1] != raw_ephys_folder: continue @@ -787,7 +814,7 @@ def create_alyx_probe_insertions( labels: list = None, ): if one is None: - one = ONE(cache_rest=None) + one = ONE(cache_rest=None, mode='local') eid = session_path if is_uuid_string(session_path) else one.path2eid(session_path) if eid is None: log.warning("Session not found on Alyx: please create session before creating insertions") diff --git a/ibllib/pipes/photometry_tasks.py b/ibllib/pipes/photometry_tasks.py index 425d5d7fd..a5de12717 100644 --- a/ibllib/pipes/photometry_tasks.py +++ b/ibllib/pipes/photometry_tasks.py @@ -4,14 +4,13 @@ from collections import OrderedDict from ibllib.pipes import tasks, base_tasks -from ibllib.pipes.training_preprocessing import ( - TrainingRegisterRaw, TrainingAudio, TrainingTrials, TrainingDLC, TrainingStatus, TrainingVideoCompress) +import ibllib.pipes.training_preprocessing as tpp from ibllib.io.extractors.fibrephotometry import FibrePhotometry _logger = logging.getLogger('ibllib') -class TaskFibrePhotometryRegisterRaw(base_tasks.RegisterRawDataTask): +class FibrePhotometryRegisterRaw(base_tasks.RegisterRawDataTask): priority = 100 job_size = 'small' @@ -19,22 +18,28 @@ class TaskFibrePhotometryRegisterRaw(base_tasks.RegisterRawDataTask): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.collection = self.get_task_collection(kwargs.get('collection', None)) + self.device_collection = self.get_device_collection('photometry', device_collection='raw_photometry_data') @property def signature(self): signature = { 'input_files': [], - 'output_files': [('_mcc_DAQdata.raw.tdms', self.collection, True), - ('_neurophotometrics_fpData.raw.pqt', self.collection, True)] + 'output_files': [('_mcc_DAQdata.raw.tdms', self.device_collection, True), + ('_neurophotometrics_fpData.raw.pqt', self.device_collection, True)] } return signature -class TaskFibrePhotometryPreprocess(base_tasks.DynamicTask): - signature = { - 'input_files': [('*fpData.raw*', 'raw_photometry_data', True), ], - 'output_files': [('photometry.signal.pqt', 'alf', True), ] - } +class FibrePhotometryPreprocess(base_tasks.DynamicTask): + @property + def signature(self): + signature = { + 'input_files': [('_mcc_DAQdata.raw.tdms', self.device_collection, True), + ('_neurophotometrics_fpData.raw.pqt', self.device_collection, True)], + 'output_files': [('photometry.signal.pqt', 'alf/photometry', True)] + } + return signature + priority = 90 level = 1 @@ -42,11 +47,12 @@ def __init__(self, session_path, regions=None, **kwargs): super().__init__(session_path, **kwargs) # Task collection (this needs to be specified in the task kwargs) self.collection = self.get_task_collection(kwargs.get('collection', None)) + self.device_collection = self.get_device_collection('photometry', device_collection='raw_photometry_data') self.regions = regions def _run(self, **kwargs): - _, out_files = FibrePhotometry(self.session_path, collection=self.collection).extract( - regions=self.regions, save=True) + _, out_files = FibrePhotometry(self.session_path, collection=self.device_collection).extract( + regions=self.regions, path_out=self.session_path.joinpath('alf', 'photometry'), save=True) return out_files @@ -63,13 +69,13 @@ def __init__(self, session_path=None, **kwargs): tasks = OrderedDict() self.session_path = session_path # level 0 - tasks['TrainingRegisterRaw'] = TrainingRegisterRaw(self.session_path) - tasks['TrainingTrials'] = TrainingTrials(self.session_path) - tasks['TrainingVideoCompress'] = TrainingVideoCompress(self.session_path) - tasks['TrainingAudio'] = TrainingAudio(self.session_path) + tasks['TrainingRegisterRaw'] = tpp.TrainingRegisterRaw(self.session_path) + tasks['TrainingTrials'] = tpp.TrainingTrials(self.session_path) + tasks['TrainingVideoCompress'] = tpp.TrainingVideoCompress(self.session_path) + tasks['TrainingAudio'] = tpp.TrainingAudio(self.session_path) # level 1 - tasks['BiasedFibrePhotometry'] = TaskFibrePhotometryPreprocess(self.session_path, parents=[tasks['TrainingTrials']]) - tasks['TrainingStatus'] = TrainingStatus(self.session_path, parents=[tasks['TrainingTrials']]) - tasks['TrainingDLC'] = TrainingDLC( + tasks['BiasedFibrePhotometry'] = FibrePhotometryPreprocess(self.session_path, parents=[tasks['TrainingTrials']]) + tasks['TrainingStatus'] = tpp.TrainingStatus(self.session_path, parents=[tasks['TrainingTrials']]) + tasks['TrainingDLC'] = tpp.TrainingDLC( self.session_path, parents=[tasks['TrainingVideoCompress']]) self.tasks = tasks diff --git a/ibllib/pipes/sync_tasks.py b/ibllib/pipes/sync_tasks.py index f7cca9aff..12b48ff22 100644 --- a/ibllib/pipes/sync_tasks.py +++ b/ibllib/pipes/sync_tasks.py @@ -10,8 +10,17 @@ class SyncRegisterRaw(base_tasks.RegisterRawDataTask): - """ - Task to register raw daq data + """Task to register raw DAQ data. + + Registers DAQ software output for a given device. The object should be _*_DAQdata, where the + namespace identifies the DAQ model or acquisition software, e.g. 'mcc', 'ni' or 'ni-usb-6211'. + At minimum there should be a raw data dataset of the form _*_DAQdata.raw*, e.g. + '_mc_DAQdata.raw.pqt'. The following are optional attribute datasets: + - _*_DAQdata.timestamps.npy: for timeline the timestamps array is separate from the samples. + - _*_DAQdata.meta.json: for timeline all acquisition meta data (e.g. sample rate, channel + names) are stored in a separate file. + - _*_DAQdata.wiring.json: for SpikeGLX the channel map is stored in this file. + _timeline_softwareEvents.log.htsv: UDP messages and other software events in DAQ time. """ priority = 100 job_size = 'small' @@ -21,7 +30,10 @@ def signature(self): signature = { 'input_files': [], 'output_files': [(f'*DAQdata.raw.{self.sync_ext}', self.sync_collection, True), - ('*DAQdata.wiring.json', self.sync_collection, True)] + ('*DAQdata.timestamps.npy', self.sync_collection, False), + ('*DAQdata.meta.json', self.sync_collection, False), + ('*DAQdata.wiring.json', self.sync_collection, False), + ('*softwareEvents.log.htsv', self.sync_collection, False)] } return signature diff --git a/ibllib/pipes/tasks.py b/ibllib/pipes/tasks.py index 89d6b229b..b6e632579 100644 --- a/ibllib/pipes/tasks.py +++ b/ibllib/pipes/tasks.py @@ -17,6 +17,7 @@ from iblutil.util import Bunch import one.params from one.api import ONE +from one import webclient _logger = logging.getLogger(__name__) TASK_STATUS_SET = {'Waiting', 'Held', 'Started', 'Errored', 'Empty', 'Complete', 'Incomplete', 'Abandoned'} @@ -73,6 +74,21 @@ def __init__(self, session_path, parents=None, taskid=None, one=None, def name(self): return self.__class__.__name__ + def path2eid(self): + """ + Fetch the experiment UUID from the Task session path, without using the REST cache. + + This method ensures that the eid will be returned for newly created sessions. + + Returns + ------- + str + The experiment UUID corresponding to the session path. + """ + assert self.session_path and self.one and not self.one.offline + with webclient.no_cache(self.one.alyx): + return self.one.path2eid(self.session_path, query_type='remote') + def run(self, **kwargs): """ --- do not overload, see _run() below--- @@ -479,7 +495,7 @@ def create_alyx_tasks(self, rerun__status__in=None, tasks_list=None): for t in task_items: # get the parents' alyx ids to reference in the database - if type(t) == dict: + if isinstance(t, dict): t = Bunch(t) executable = t.executable arguments = t.arguments diff --git a/ibllib/pipes/training_preprocessing.py b/ibllib/pipes/training_preprocessing.py index 1b9e8da80..b47adcc65 100644 --- a/ibllib/pipes/training_preprocessing.py +++ b/ibllib/pipes/training_preprocessing.py @@ -1,6 +1,7 @@ import logging from collections import OrderedDict from one.alf.files import session_path_parts +import warnings from ibllib.pipes.base_tasks import ExperimentDescriptionRegisterRaw from ibllib.pipes import tasks, training_status @@ -10,18 +11,17 @@ from ibllib.qc.camera import CameraQC from ibllib.qc.task_metrics import TaskQC, HabituationQC from ibllib.qc.task_extractors import TaskQCExtractor -from ibllib.oneibl.registration import register_session_raw_data _logger = logging.getLogger(__name__) +warnings.warn('`pipes.training_preprocessing` to be removed in favour of dynamic pipeline') # level 0 class TrainingRegisterRaw(tasks.Task): priority = 100 - def _run(self, overwrite=False): - out_files, _ = register_session_raw_data(self.session_path, one=self.one, dry=True) - return out_files + def _run(self): + return [] class TrainingTrials(tasks.Task): diff --git a/ibllib/pipes/training_status.py b/ibllib/pipes/training_status.py index 6651fe58a..3c3f8bb4d 100644 --- a/ibllib/pipes/training_status.py +++ b/ibllib/pipes/training_status.py @@ -4,7 +4,10 @@ from ibllib.io.raw_data_loaders import load_bpod from ibllib.oneibl.registration import _get_session_times from ibllib.io.extractors.base import get_pipeline, get_session_extractor_type +from ibllib.io.session_params import read_params +import ibllib.pipes.dynamic_pipeline as dyn +from iblutil.util import setup_logger from ibllib.plots.snapshot import ReportSnapshot from iblutil.numerical import ismember from brainbox.behavior import training @@ -17,6 +20,10 @@ from matplotlib.lines import Line2D from datetime import datetime import seaborn as sns +import boto3 +from botocore.exceptions import ProfileNotFound, ClientError + +logger = setup_logger(__name__) TRAINING_STATUS = {'untrainable': (-4, (0, 0, 0, 0)), @@ -31,28 +38,90 @@ 'ready4recording': (5, (20, 255, 91, 255))} +def get_training_table_from_aws(lab, subject): + """ + If aws credentials exist on the local server download the latest training table from aws s3 private bucket + :param lab: + :param subject: + :return: + """ + try: + session = boto3.Session(profile_name='ibl_training') + except ProfileNotFound: + return + + local_file_path = f'/mnt/s0/Data/Subjects/{subject}/training.csv' + dst_bucket_name = 'ibl-brain-wide-map-private' + try: + s3 = session.resource('s3') + bucket = s3.Bucket(name=dst_bucket_name) + bucket.download_file(f'resources/training/{lab}/{subject}/training.csv', + local_file_path) + df = pd.read_csv(local_file_path) + except ClientError: + return + + return df + + +def upload_training_table_to_aws(lab, subject): + """ + If aws credentials exist on the local server upload the training table to aws s3 private bucket + :param lab: + :param subject: + :return: + """ + try: + session = boto3.Session(profile_name='ibl_training') + except ProfileNotFound: + return + + local_file_path = f'/mnt/s0/Data/Subjects/{subject}/training.csv' + dst_bucket_name = 'ibl-brain-wide-map-private' + try: + s3 = session.resource('s3') + bucket = s3.Bucket(name=dst_bucket_name) + bucket.upload_file(local_file_path, + f'resources/training/{lab}/{subject}/training.csv') + except (ClientError, FileNotFoundError): + return + + def get_trials_task(session_path, one): - # TODO this eventually needs to be updated for dynamic pipeline tasks - pipeline = get_pipeline(session_path) - if pipeline == 'training': - from ibllib.pipes.training_preprocessing import TrainingTrials - task = TrainingTrials(session_path, one=one) - elif pipeline == 'ephys': - from ibllib.pipes.ephys_preprocessing import EphysTrials - task = EphysTrials(session_path, one=one) + # If experiment description file then process this + experiment_description_file = read_params(session_path) + if experiment_description_file is not None: + tasks = [] + pipeline = dyn.make_pipeline(session_path) + trials_tasks = [t for t in pipeline.tasks if 'Trials' in t] + for task in trials_tasks: + t = pipeline.tasks.get(task) + t.__init__(session_path, **t.kwargs) + tasks.append(t) else: - try: - # try and look if there is a custom extractor in the personal projects extraction class - import projects.base - task_type = get_session_extractor_type(session_path) - PipelineClass = projects.base.get_pipeline(task_type) - pipeline = PipelineClass(session_path, one) - trials_task_name = next(task for task in pipeline.tasks if 'Trials' in task) - task = pipeline.tasks.get(trials_task_name) - except Exception: - task = None + # Otherwise default to old way of doing things + pipeline = get_pipeline(session_path) + if pipeline == 'training': + from ibllib.pipes.training_preprocessing import TrainingTrials + tasks = [TrainingTrials(session_path)] + elif pipeline == 'ephys': + from ibllib.pipes.ephys_preprocessing import EphysTrials + tasks = [EphysTrials(session_path)] + else: + try: + # try and look if there is a custom extractor in the personal projects extraction class + import projects.base + task_type = get_session_extractor_type(session_path) + PipelineClass = projects.base.get_pipeline(task_type) + pipeline = PipelineClass(session_path, one) + trials_task_name = next(task for task in pipeline.tasks if 'Trials' in task) + task = pipeline.tasks.get(trials_task_name) + task.__init__(session_path) + tasks = [task] + except Exception: + tasks = [] - return task + return tasks def save_path(subj_path): @@ -83,39 +152,80 @@ def load_existing_dataframe(subj_path): return None -def load_trials(sess_path, one, force=True): +def load_trials(sess_path, one, collections=None, force=True, mode='raise'): """ Load trials data for session. First attempts to load from local session path, if this fails will attempt to download via ONE, if this also fails, will then attempt to re-extraxt locally :param sess_path: session path :param one: ONE instance + :param force: when True and if the session trials can't be found, will attempt to re-extract from the disk + :param mode: 'raise' or 'warn', if 'raise', will error when forcing re-extraction of past sessions :return: """ - # try and load trials locally try: - trials = alfio.load_object(sess_path.joinpath('alf'), 'trials', short_keys=True) + # try and load all trials that are found locally in the session path locally + if collections is None: + trial_locations = list(sess_path.rglob('_ibl_trials.goCueTrigger_times.npy')) + else: + trial_locations = [Path(sess_path).joinpath(c, '_ibl_trials.goCueTrigger_times.npy') for c in collections] + + if len(trial_locations) > 1: + trial_dict = {} + for i, loc in enumerate(trial_locations): + trial_dict[i] = alfio.load_object(loc.parent, 'trials', short_keys=True) + trials = training.concatenate_trials(trial_dict) + elif len(trial_locations) == 1: + trials = alfio.load_object(trial_locations[0].parent, 'trials', short_keys=True) + else: + raise ALFObjectNotFound + if 'probabilityLeft' not in trials.keys(): raise ALFObjectNotFound except ALFObjectNotFound: + # Next try and load all trials data through ONE try: if not force: return None - # attempt to download trials using ONE - trials = one.load_object(one.path2eid(sess_path), 'trials') + eid = one.path2eid(sess_path) + if collections is None: + trial_collections = one.list_datasets(eid, '_ibl_trials.goCueTrigger_times.npy') + if len(trial_collections) > 0: + trial_collections = ['/'.join(c.split('/')[:-1]) for c in trial_collections] + else: + trial_collections = collections + + if len(trial_collections) > 1: + trial_dict = {} + for i, collection in enumerate(trial_collections): + trial_dict[i] = one.load_object(eid, 'trials', collection=collection) + trials = training.concatenate_trials(trial_dict) + elif len(trial_collections) == 1: + trials = one.load_object(eid, 'trials', collection=trial_collections[0]) + else: + raise ALFObjectNotFound + if 'probabilityLeft' not in trials.keys(): raise ALFObjectNotFound except Exception: + # Finally try to rextract the trials data locally try: - task = get_trials_task(sess_path, one=one) - if task is not None: - task.run() - trials = alfio.load_object(sess_path.joinpath('alf'), 'trials') - if 'probabilityLeft' not in trials.keys(): - raise ALFObjectNotFound + # Get the tasks that need to be run + tasks = get_trials_task(sess_path, one) + if len(tasks) > 0: + for task in tasks: + status = task.run() + if status == 0: + return load_trials(sess_path, collections=collections, one=one, force=False) + else: + return else: trials = None - except Exception: # TODO how can i make this more specific - trials = None + except Exception as e: + if mode == 'raise': + raise Exception(f'Exhausted all possibilities for loading trials for {sess_path}') from e + else: + logger.warning(f'Exhausted all possibilities for loading trials for {sess_path}') + return return trials @@ -154,7 +264,15 @@ def get_latest_training_information(sess_path, one): """ subj_path = sess_path.parent.parent - df = load_existing_dataframe(subj_path) + sub = subj_path.parts[-1] + if one.mode != 'local': + lab = one.alyx.rest('subjects', 'list', nickname=sub)[0]['lab'] + df = get_training_table_from_aws(lab, sub) + else: + df = None + + if df is None: + df = load_existing_dataframe(subj_path) # Find the dates and associated session paths where we don't have data stored in our dataframe missing_dates = check_up_to_date(subj_path, df) @@ -185,22 +303,28 @@ def get_latest_training_information(sess_path, one): df = compute_training_status(df, date, one) df_lim = df.drop_duplicates(subset='session_path', keep='first') + # Detect untrainable - un_df = df_lim[df_lim['training_status'] == 'in training'].sort_values('date') - if len(un_df) >= 40: - sess = un_df.iloc[39].session_path - df.loc[df['session_path'] == sess, 'training_status'] = 'untrainable' + if 'untrainable' not in df_lim.training_status.values: + un_df = df_lim[df_lim['training_status'] == 'in training'].sort_values('date') + if len(un_df) >= 40: + sess = un_df.iloc[39].session_path + df.loc[df['session_path'] == sess, 'training_status'] = 'untrainable' # Detect unbiasable - un_df = df_lim[df_lim['task_protocol'] == 'biased'].sort_values('date') - if len(un_df) >= 40: - tr_st = un_df[0:40].training_status.unique() - if 'ready4ephysrig' not in tr_st: - sess = un_df.iloc[39].session_path - df.loc[df['session_path'] == sess, 'training_status'] = 'unbiasable' + if 'unbiasable' not in df_lim.training_status.values: + un_df = df_lim[df_lim['task_protocol'] == 'biased'].sort_values('date') + if len(un_df) >= 40: + tr_st = un_df[0:40].training_status.unique() + if 'ready4ephysrig' not in tr_st: + sess = un_df.iloc[39].session_path + df.loc[df['session_path'] == sess, 'training_status'] = 'unbiasable' save_dataframe(df, subj_path) + if one.mode != 'local': + upload_training_table_to_aws(lab, sub) + return df @@ -220,7 +344,7 @@ def find_earliest_recompute_date(df): return df[first_index:].date.values -def compute_training_status(df, compute_date, one, force=True): +def compute_training_status(df, compute_date, one, force=True, task_collection='raw_behavior_data'): """ Compute the training status for compute date based on training from that session and two previous days :param df: training dataframe @@ -231,13 +355,16 @@ def compute_training_status(df, compute_date, one, force=True): # compute_date = str(one.path2ref(session_path)['date']) df_temp = df[df['date'] <= compute_date] - df_temp = df_temp.drop_duplicates('session_path') + df_temp = df_temp.drop_duplicates(subset=['session_path', 'task_protocol']) df_temp.sort_values('date') dates = df_temp.date.values - n_dates = np.min([3, len(dates)]).astype(int) + n_sess_for_date = len(np.where(dates == compute_date)[0]) + n_dates = np.min([2 + n_sess_for_date, len(dates)]).astype(int) compute_dates = dates[(-1 * n_dates):] + if n_sess_for_date > 1: + compute_dates = compute_dates[:(-1 * (n_sess_for_date - 1))] assert compute_dates[-1] == compute_date @@ -255,6 +382,8 @@ def compute_training_status(df, compute_date, one, force=True): # If habituation skip if df_date.iloc[-1]['task_protocol'] == 'habituation': continue + # Here we should split by protocol in an ideal world but that world isn't today. This is only really relevant for + # chained protocols trials[df_date.iloc[-1]['date']] = load_combined_trials(df_date.session_path.values, one, force=force) protocol.append(df_date.iloc[-1]['task_protocol']) status.append(df_date.iloc[-1]['training_status']) @@ -286,7 +415,7 @@ def pass_through_training_hierachy(status_new, status_old): return status_new -def compute_session_duration_delay_location(sess_path, **kwargs): +def compute_session_duration_delay_location(sess_path, collections=None, **kwargs): """ Get meta information about task. Extracts session duration, delay before session start and location of session @@ -294,7 +423,7 @@ def compute_session_duration_delay_location(sess_path, **kwargs): ---------- sess_path : pathlib.Path, str The session path with the pattern subject/yyyy-mm-dd/nnn. - task_collection : str + collections : list The location within the session path directory of task settings and data. Returns @@ -306,18 +435,111 @@ def compute_session_duration_delay_location(sess_path, **kwargs): str {'ephys_rig', 'training_rig'} The location of the session. """ - md, sess_data = load_bpod(sess_path, **kwargs) - start_time, end_time = _get_session_times(sess_path, md, sess_data) - session_duration = int((end_time - start_time).total_seconds() / 60) + if collections is None: + collections, _ = get_data_collection(sess_path) + + session_duration = 0 + session_delay = 0 + session_location = 'training_rig' + for collection in collections: + md, sess_data = load_bpod(sess_path, task_collection=collection) + if md is None: + continue + try: + start_time, end_time = _get_session_times(sess_path, md, sess_data) + session_duration = session_duration + int((end_time - start_time).total_seconds() / 60) + session_delay = session_delay + md.get('SESSION_START_DELAY_SEC', 0) + except Exception: + session_duration = session_duration + 0 + session_delay = session_delay + 0 - session_delay = md.get('SESSION_START_DELAY_SEC', 0) + if 'ephys' in md.get('PYBPOD_BOARD', None): + session_location = 'ephys_rig' + else: + session_location = 'training_rig' + + return session_duration, session_delay, session_location - if 'ephys' in md.get('PYBPOD_BOARD', None): - session_location = 'ephys_rig' + +def get_data_collection(session_path): + """ + Returns the location of the raw behavioral data and extracted trials data for the session path. If + multiple locations in one session (e.g for dynamic) returns all of these + :param session_path: path of session + :return: + """ + experiment_description_file = read_params(session_path) + if experiment_description_file is not None: + pipeline = dyn.make_pipeline(session_path) + trials_tasks = [t for t in pipeline.tasks if 'Trials' in t] + collections = [pipeline.tasks.get(task).kwargs['collection'] for task in trials_tasks] + if len(collections) == 1 and collections[0] == 'raw_behavior_data': + alf_collections = ['alf'] + elif all(['raw_task_data' in c for c in collections]): + alf_collections = [f'alf/task_{c[-2:]}' for c in collections] + else: + alf_collections = None else: - session_location = 'training_rig' + collections = ['raw_behavior_data'] + alf_collections = ['alf'] + + return collections, alf_collections + + +def get_sess_dict(session_path, one, protocol, alf_collections=None, raw_collections=None, force=True): + + sess_dict = {} + sess_dict['date'] = str(one.path2ref(session_path)['date']) + sess_dict['session_path'] = str(session_path) + sess_dict['task_protocol'] = protocol + + if sess_dict['task_protocol'] == 'habituation': + nan_array = np.array([np.nan]) + sess_dict['performance'], sess_dict['contrasts'], _ = (nan_array, nan_array, np.nan) + sess_dict['performance_easy'] = np.nan + sess_dict['reaction_time'] = np.nan + sess_dict['n_trials'] = np.nan + sess_dict['sess_duration'] = np.nan + sess_dict['n_delay'] = np.nan + sess_dict['location'] = np.nan + sess_dict['training_status'] = 'habituation' + sess_dict['bias_50'], sess_dict['thres_50'], sess_dict['lapsehigh_50'], sess_dict['lapselow_50'] = \ + (np.nan, np.nan, np.nan, np.nan) + sess_dict['bias_20'], sess_dict['thres_20'], sess_dict['lapsehigh_20'], sess_dict['lapselow_20'] = \ + (np.nan, np.nan, np.nan, np.nan) + sess_dict['bias_80'], sess_dict['thres_80'], sess_dict['lapsehigh_80'], sess_dict['lapselow_80'] = \ + (np.nan, np.nan, np.nan, np.nan) - return session_duration, session_delay, session_location + else: + # if we can't compute trials then we need to pass + trials = load_trials(session_path, one, collections=alf_collections, force=force, mode='warn') + if trials is None: + return + + sess_dict['performance'], sess_dict['contrasts'], _ = training.compute_performance(trials, prob_right=True) + if sess_dict['task_protocol'] == 'training': + sess_dict['bias_50'], sess_dict['thres_50'], sess_dict['lapsehigh_50'], sess_dict['lapselow_50'] = \ + training.compute_psychometric(trials) + sess_dict['bias_20'], sess_dict['thres_20'], sess_dict['lapsehigh_20'], sess_dict['lapselow_20'] = \ + (np.nan, np.nan, np.nan, np.nan) + sess_dict['bias_80'], sess_dict['thres_80'], sess_dict['lapsehigh_80'], sess_dict['lapselow_80'] = \ + (np.nan, np.nan, np.nan, np.nan) + else: + sess_dict['bias_50'], sess_dict['thres_50'], sess_dict['lapsehigh_50'], sess_dict['lapselow_50'] = \ + training.compute_psychometric(trials, block=0.5) + sess_dict['bias_20'], sess_dict['thres_20'], sess_dict['lapsehigh_20'], sess_dict['lapselow_20'] = \ + training.compute_psychometric(trials, block=0.2) + sess_dict['bias_80'], sess_dict['thres_80'], sess_dict['lapsehigh_80'], sess_dict['lapselow_80'] = \ + training.compute_psychometric(trials, block=0.8) + + sess_dict['performance_easy'] = training.compute_performance_easy(trials) + sess_dict['reaction_time'] = training.compute_median_reaction_time(trials) + sess_dict['n_trials'] = training.compute_n_trials(trials) + sess_dict['sess_duration'], sess_dict['n_delay'], sess_dict['location'] = \ + compute_session_duration_delay_location(session_path, collections=raw_collections) + sess_dict['training_status'] = 'not_computed' + + return sess_dict def get_training_info_for_session(session_paths, one, force=True): @@ -331,59 +553,33 @@ def get_training_info_for_session(session_paths, one, force=True): # return list of dicts to add sess_dicts = [] for session_path in session_paths: + collections, alf_collections = get_data_collection(session_path) session_path = Path(session_path) - sess_dict = {} - sess_dict['date'] = str(one.path2ref(session_path)['date']) - sess_dict['session_path'] = str(session_path) - sess_dict['task_protocol'] = get_session_extractor_type(session_path) - - if sess_dict['task_protocol'] == 'habituation': - nan_array = np.array([np.nan]) - sess_dict['performance'], sess_dict['contrasts'], _ = (nan_array, nan_array, np.nan) - sess_dict['performance_easy'] = np.nan - sess_dict['reaction_time'] = np.nan - sess_dict['n_trials'] = np.nan - sess_dict['sess_duration'] = np.nan - sess_dict['n_delay'] = np.nan - sess_dict['location'] = np.nan - sess_dict['training_status'] = 'habituation' - sess_dict['bias_50'], sess_dict['thres_50'], sess_dict['lapsehigh_50'], sess_dict['lapselow_50'] = \ - (np.nan, np.nan, np.nan, np.nan) - sess_dict['bias_20'], sess_dict['thres_20'], sess_dict['lapsehigh_20'], sess_dict['lapselow_20'] = \ - (np.nan, np.nan, np.nan, np.nan) - sess_dict['bias_80'], sess_dict['thres_80'], sess_dict['lapsehigh_80'], sess_dict['lapselow_80'] = \ - (np.nan, np.nan, np.nan, np.nan) - + protocols = [] + for c in collections: + protocols.append(get_session_extractor_type(session_path, task_collection=c)) + + un_protocols = np.unique(protocols) + # Example, training, training, biased - training would be combined, biased not + if len(un_protocols) != 1: + print(f'Different protocols in same session {session_path} : {protocols}') + for prot in un_protocols: + if prot is False: + continue + try: + alf = alf_collections[np.where(protocols == prot)[0]] + raw = collections[np.where(protocols == prot)[0]] + except TypeError: + alf = None + raw = None + sess_dict = get_sess_dict(session_path, one, prot, alf_collections=alf, raw_collections=raw, force=force) else: - # if we can't compute trials then we need to pass - trials = load_trials(session_path, one, force=force) - if trials is None: - continue - - sess_dict['performance'], sess_dict['contrasts'], _ = training.compute_performance(trials, prob_right=True) - if sess_dict['task_protocol'] == 'training': - sess_dict['bias_50'], sess_dict['thres_50'], sess_dict['lapsehigh_50'], sess_dict['lapselow_50'] = \ - training.compute_psychometric(trials) - sess_dict['bias_20'], sess_dict['thres_20'], sess_dict['lapsehigh_20'], sess_dict['lapselow_20'] = \ - (np.nan, np.nan, np.nan, np.nan) - sess_dict['bias_80'], sess_dict['thres_80'], sess_dict['lapsehigh_80'], sess_dict['lapselow_80'] = \ - (np.nan, np.nan, np.nan, np.nan) - else: - sess_dict['bias_50'], sess_dict['thres_50'], sess_dict['lapsehigh_50'], sess_dict['lapselow_50'] = \ - training.compute_psychometric(trials, block=0.5) - sess_dict['bias_20'], sess_dict['thres_20'], sess_dict['lapsehigh_20'], sess_dict['lapselow_20'] = \ - training.compute_psychometric(trials, block=0.2) - sess_dict['bias_80'], sess_dict['thres_80'], sess_dict['lapsehigh_80'], sess_dict['lapselow_80'] = \ - training.compute_psychometric(trials, block=0.8) - - sess_dict['performance_easy'] = training.compute_performance_easy(trials) - sess_dict['reaction_time'] = training.compute_median_reaction_time(trials) - sess_dict['n_trials'] = training.compute_n_trials(trials) - sess_dict['sess_duration'], sess_dict['n_delay'], sess_dict['location'] = \ - compute_session_duration_delay_location(session_path) - sess_dict['training_status'] = 'not_computed' - - sess_dicts.append(sess_dict) + prot = un_protocols[0] + sess_dict = get_sess_dict(session_path, one, prot, alf_collections=alf_collections, raw_collections=collections, + force=force) + + if sess_dict is not None: + sess_dicts.append(sess_dict) protocols = [s['task_protocol'] for s in sess_dicts] @@ -395,9 +591,9 @@ def get_training_info_for_session(session_paths, one, force=True): combined_trials = load_combined_trials(session_paths, one, force=force) performance, contrasts, _ = training.compute_performance(combined_trials, prob_right=True) psychs = {} - psychs['50'] = training.compute_psychometric(trials, block=0.5) - psychs['20'] = training.compute_psychometric(trials, block=0.2) - psychs['80'] = training.compute_psychometric(trials, block=0.8) + psychs['50'] = training.compute_psychometric(combined_trials, block=0.5) + psychs['20'] = training.compute_psychometric(combined_trials, block=0.2) + psychs['80'] = training.compute_psychometric(combined_trials, block=0.8) performance_easy = training.compute_performance_easy(combined_trials) reaction_time = training.compute_median_reaction_time(combined_trials) @@ -747,7 +943,7 @@ def plot_heatmap_performance_over_days(df, subject): return ax1 -def make_plots(session_path, one, df=None, save=False, upload=False): +def make_plots(session_path, one, df=None, save=False, upload=False, task_collection='raw_behavior_data'): subject = one.path2ref(session_path)['subject'] subj_path = session_path.parent.parent diff --git a/ibllib/pipes/video_tasks.py b/ibllib/pipes/video_tasks.py index fd97dad35..fcbef1d17 100644 --- a/ibllib/pipes/video_tasks.py +++ b/ibllib/pipes/video_tasks.py @@ -1,11 +1,16 @@ import logging import subprocess +import cv2 +import traceback +from pathlib import Path -from ibllib.io import ffmpeg +from ibllib.io import ffmpeg, raw_daq_loaders from ibllib.pipes import base_tasks -from ibllib.io.video import label_from_path, get_video_meta +from ibllib.io.video import get_video_meta from ibllib.io.extractors import camera from ibllib.qc.camera import run_all_qc as run_camera_qc +from ibllib.misc import check_nvidia_driver +from ibllib.io.video import label_from_path, assert_valid_label _logger = logging.getLogger('ibllib') @@ -223,7 +228,7 @@ def _run(self, **kwargs): # Video timestamps extraction output_files = [] data, files = camera.extract_all(self.session_path, sync_type=self.sync, sync_collection=self.sync_collection, - save=True, labels=labels) + save=True, labels=labels, task_collection=self.collection) output_files.extend(files) # Video QC @@ -266,10 +271,16 @@ def _run(self, **kwargs): mp4_files = self.session_path.joinpath(self.device_collection).glob('*.mp4') labels = [label_from_path(x) for x in mp4_files] + kwargs = {} + if self.sync_namespace == 'timeline': + # Load sync from timeline file + alf_path = self.session_path / self.sync_collection + kwargs['sync'], kwargs['chmap'] = raw_daq_loaders.load_timeline_sync_and_chmap(alf_path) + # Video timestamps extraction output_files = [] data, files = camera.extract_all(self.session_path, sync_type=self.sync, sync_collection=self.sync_collection, - save=True, labels=labels) + save=True, labels=labels, **kwargs) output_files.extend(files) # Video QC @@ -277,3 +288,164 @@ def _run(self, **kwargs): sync_collection=self.sync_collection, sync_type=self.sync) return output_files + + +class DLC(base_tasks.VideoTask): + """ + This task relies on a correctly installed dlc environment as per + https://docs.google.com/document/d/1g0scP6_3EmaXCU4SsDNZWwDTaD9MG0es_grLA-d0gh0/edit# + + If your environment is set up otherwise, make sure that you set the respective attributes: + t = EphysDLC(session_path) + t.dlcenv = Path('/path/to/your/dlcenv/bin/activate') + t.scripts = Path('/path/to/your/iblscripts/deploy/serverpc/dlc') + """ + gpu = 1 + cpu = 4 + io_charge = 100 + level = 2 + force = True + job_size = 'large' + + dlcenv = Path.home().joinpath('Documents', 'PYTHON', 'envs', 'dlcenv', 'bin', 'activate') + scripts = Path.home().joinpath('Documents', 'PYTHON', 'iblscripts', 'deploy', 'serverpc', 'dlc') + + @property + def signature(self): + signature = { + 'input_files': [(f'_iblrig_{cam}Camera.raw.mp4', self.device_collection, True) for cam in self.cameras], + 'output_files': [(f'_ibl_{cam}Camera.dlc.pqt', 'alf', True) for cam in self.cameras] + + [(f'{cam}Camera.ROIMotionEnergy.npy', 'alf', True) for cam in self.cameras] + + [(f'{cam}ROIMotionEnergy.position.npy', 'alf', True)for cam in self.cameras] + } + + return signature + + def _check_dlcenv(self): + """Check that scripts are present, dlcenv can be activated and get iblvideo version""" + assert len(list(self.scripts.rglob('run_dlc.*'))) == 2, \ + f'Scripts run_dlc.sh and run_dlc.py do not exist in {self.scripts}' + assert len(list(self.scripts.rglob('run_motion.*'))) == 2, \ + f'Scripts run_motion.sh and run_motion.py do not exist in {self.scripts}' + assert self.dlcenv.exists(), f"DLC environment does not exist in assumed location {self.dlcenv}" + command2run = f"source {self.dlcenv}; python -c 'import iblvideo; print(iblvideo.__version__)'" + process = subprocess.Popen( + command2run, + shell=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + executable="/bin/bash" + ) + info, error = process.communicate() + if process.returncode != 0: + raise AssertionError(f"DLC environment check failed\n{error.decode('utf-8')}") + version = info.decode("utf-8").strip().split('\n')[-1] + return version + + @staticmethod + def _video_intact(file_mp4): + """Checks that the downloaded video can be opened and is not empty""" + cap = cv2.VideoCapture(str(file_mp4)) + frame_count = cap.get(cv2.CAP_PROP_FRAME_COUNT) + intact = True if frame_count > 0 else False + cap.release() + return intact + + def _run(self, cams=None, overwrite=False): + # Check that the cams are valid for DLC, remove the ones that aren't + candidate_cams = cams or self.cameras + cams = [] + for cam in candidate_cams: + try: + cams.append(assert_valid_label(cam)) + except ValueError: + _logger.warning(f'{cam} is not a valid video label, this video will be skipped') + # Set up + self.session_id = self.one.path2eid(self.session_path) + actual_outputs = [] + + # Loop through cams + for cam in cams: + # Catch exceptions so that following cameras can still run + try: + # If all results exist and overwrite is False, skip computation + expected_outputs_present, expected_outputs = self.assert_expected(self.output_files, silent=True) + if overwrite is False and expected_outputs_present is True: + actual_outputs.extend(expected_outputs) + return actual_outputs + else: + file_mp4 = next(self.session_path.joinpath('raw_video_data').glob(f'_iblrig_{cam}Camera.raw*.mp4')) + if not file_mp4.exists(): + # In this case we set the status to Incomplete. + _logger.error(f"No raw video file available for {cam}, skipping.") + self.status = -3 + continue + if not self._video_intact(file_mp4): + _logger.error(f"Corrupt raw video file {file_mp4}") + self.status = -1 + continue + # Check that dlc environment is ok, shell scripts exists, and get iblvideo version, GPU addressable + self.version = self._check_dlcenv() + _logger.info(f'iblvideo version {self.version}') + check_nvidia_driver() + + _logger.info(f'Running DLC on {cam}Camera.') + command2run = f"{self.scripts.joinpath('run_dlc.sh')} {str(self.dlcenv)} {file_mp4} {overwrite}" + _logger.info(command2run) + process = subprocess.Popen( + command2run, + shell=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + executable="/bin/bash", + ) + info, error = process.communicate() + # info_str = info.decode("utf-8").strip() + # _logger.info(info_str) + if process.returncode != 0: + error_str = error.decode("utf-8").strip() + _logger.error(f'DLC failed for {cam}Camera.\n\n' + f'++++++++ Output of subprocess for debugging ++++++++\n\n' + f'{error_str}\n' + f'++++++++++++++++++++++++++++++++++++++++++++\n') + self.status = -1 + # We dont' run motion energy, or add any files if dlc failed to run + continue + dlc_result = next(self.session_path.joinpath('alf').glob(f'_ibl_{cam}Camera.dlc*.pqt')) + actual_outputs.append(dlc_result) + + _logger.info(f'Computing motion energy for {cam}Camera') + command2run = f"{self.scripts.joinpath('run_motion.sh')} {str(self.dlcenv)} {file_mp4} {dlc_result}" + _logger.info(command2run) + process = subprocess.Popen( + command2run, + shell=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + executable="/bin/bash", + ) + info, error = process.communicate() + # info_str = info.decode("utf-8").strip() + # _logger.info(info_str) + if process.returncode != 0: + error_str = error.decode("utf-8").strip() + _logger.error(f'Motion energy failed for {cam}Camera.\n\n' + f'++++++++ Output of subprocess for debugging ++++++++\n\n' + f'{error_str}\n' + f'++++++++++++++++++++++++++++++++++++++++++++\n') + self.status = -1 + continue + actual_outputs.append(next(self.session_path.joinpath('alf').glob( + f'{cam}Camera.ROIMotionEnergy*.npy'))) + actual_outputs.append(next(self.session_path.joinpath('alf').glob( + f'{cam}ROIMotionEnergy.position*.npy'))) + except BaseException: + _logger.error(traceback.format_exc()) + self.status = -1 + continue + # If status is Incomplete, check that there is at least one output. + # # Otherwise make sure it gets set to Empty (outputs = None), and set status to -1 to make sure it doesn't slip + if self.status == -3 and len(actual_outputs) == 0: + actual_outputs = None + self.status = -1 + return actual_outputs diff --git a/ibllib/pipes/widefield_tasks.py b/ibllib/pipes/widefield_tasks.py index 1b48031a0..37a793135 100644 --- a/ibllib/pipes/widefield_tasks.py +++ b/ibllib/pipes/widefield_tasks.py @@ -40,39 +40,10 @@ def signature(self): return signature def _run(self, symlink_old=True): - out_files = super()._run(symlink_old=True) + out_files = super()._run(symlink_old=symlink_old) self.register_snapshots() return out_files - def register_snapshots(self, unlink=False): - """ - Register any photos in the snapshots folder to the session. Typically user will take photo of dorsal cortex before - and after session - - Returns - ------- - - """ - snapshots_path = self.session_path.joinpath('raw_widefield_data', 'snapshots') - if not snapshots_path.exists(): - return - - eid = self.one.path2eid(self.session_path, query_type='remote') - if not eid: - _logger.warning('Failed to upload snapshots: session not found on Alyx') - return - note = dict(user=self.one.alyx.user, content_type='session', object_id=eid, text='') - - notes = [] - for snapshot in snapshots_path.glob('*.tif'): - with open(snapshot, 'rb') as img_file: - files = {'image': img_file} - notes.append(self.one.alyx.rest('notes', 'create', data=note, files=files)) - if unlink: - snapshot.unlink() - if unlink and next(snapshots_path.rglob('*'), None) is None: - snapshots_path.rmdir() - class WidefieldCompress(base_tasks.WidefieldTask): diff --git a/ibllib/qc/camera.py b/ibllib/qc/camera.py index 76b02cbaf..8cf993573 100644 --- a/ibllib/qc/camera.py +++ b/ibllib/qc/camera.py @@ -53,11 +53,12 @@ from iblutil.numerical import within_ranges from ibllib.io.extractors.camera import extract_camera_sync, extract_all -from ibllib.io.extractors import ephys_fpga, training_wheel +from ibllib.io.extractors import ephys_fpga, training_wheel, mesoscope from ibllib.io.extractors.video_motion import MotionAlignment from ibllib.io.extractors.base import get_session_extractor_type from ibllib.io import raw_data_loaders as raw -from ibllib.io.session_params import read_params, get_sync +from ibllib.io.raw_daq_loaders import load_timeline_sync_and_chmap +from ibllib.io.session_params import read_params, get_sync, get_sync_namespace import brainbox.behavior.wheel as wh from ibllib.io.video import get_video_meta, get_video_frames_preload, assert_valid_label from . import base @@ -210,6 +211,7 @@ def load_data(self, download_data: bool = None, extract_times: bool = False, loa # If there is an experiment description and there are video parameters sess_params = read_params(self.session_path) or {} task_collection = get_task_collection(sess_params) + ns = get_sync_namespace(sess_params) self._set_sync(sess_params) if not self.sync: if not self.type: @@ -220,7 +222,13 @@ def load_data(self, download_data: bool = None, extract_times: bool = False, loa # Load the audio and raw FPGA times if self.sync != 'bpod' and self.sync is not None: self.sync_collection = self.sync_collection or 'raw_ephys_data' - sync, chmap = ephys_fpga.get_sync_and_chn_map(self.session_path, self.sync_collection) + ns = ns or 'spikeglx' + if ns == 'spikeglx': + sync, chmap = ephys_fpga.get_sync_and_chn_map(self.session_path, self.sync_collection) + elif ns == 'timeline': + sync, chmap = load_timeline_sync_and_chmap(self.session_path / self.sync_collection) + else: + raise NotImplementedError(f'Unknown namespace "{ns}"') audio_ttls = ephys_fpga.get_sync_fronts(sync, chmap['audio']) self.data['audio'] = audio_ttls['times'] # Get rises # Load raw FPGA times @@ -258,7 +266,13 @@ def load_data(self, download_data: bool = None, extract_times: bool = False, loa except (StopIteration, ALFObjectNotFound): # Extract from raw data if self.sync != 'bpod' and self.sync is not None: - wheel_data = ephys_fpga.extract_wheel_sync(sync, chmap) + if ns == 'spikeglx': + wheel_data = ephys_fpga.extract_wheel_sync(sync, chmap) + elif ns == 'timeline': + extractor = mesoscope.TimelineTrials(self.session_path, sync_collection=self.sync_collection) + wheel_data = extractor.extract_wheel_sync() + else: + raise NotImplementedError(f'Unknown namespace "{ns}"') else: wheel_data = training_wheel.get_wheel_position( self.session_path, task_collection=task_collection) @@ -304,7 +318,7 @@ def get_active_wheel_period(wheel, duration_range=(3., 20.), display=False): :return: 2-element array comprising the start and end times of the active period """ pos, ts = wh.interpolate_position(wheel.timestamps, wheel.position) - v, acc = wh.velocity_smoothed(pos, 1000) + v, acc = wh.velocity_filtered(pos, 1000) on, off, *_ = wh.movements(ts, acc, pos_thresh=.1, make_plots=False) edges = np.c_[on, off] indices, _ = np.where(np.logical_and( @@ -354,19 +368,23 @@ def ensure_required_data(self): dtypes = self.dstypes + self.dstypes_fpga if is_fpga else self.dstypes assert_unique = True # Check we have raw ephys data for session - if is_ephys and len(self.one.list_datasets(self.eid, collection='raw_ephys_data')) == 0: - # Assert 3A probe model; if so download all probe data - det = self.one.get_details(self.eid, full=True) - probe_model = next(x['model'] for x in det['probe_insertion']) - assert probe_model == '3A', 'raw ephys data missing' - collections += (self.sync_collection or 'raw_ephys_data',) - if sess_params: - probes = sess_params.get('devices', {}).get('neuropixel', {}) - probes = set(x.get('collection') for x in chain(*map(dict.values, probes))) - collections += tuple(probes) + if is_ephys: + if len(self.one.list_datasets(self.eid, collection='raw_ephys_data')) == 0: + # Assert 3A probe model; if so download all probe data + det = self.one.get_details(self.eid, full=True) + probe_model = next(x['model'] for x in det['probe_insertion']) + assert probe_model == '3A', 'raw ephys data missing' + collections += (self.sync_collection or 'raw_ephys_data',) + if sess_params: + probes = sess_params.get('devices', {}).get('neuropixel', {}) + probes = set(x.get('collection') for x in chain(*map(dict.values, probes))) + collections += tuple(probes) + else: + collections += ('raw_ephys_data/probe00', 'raw_ephys_data/probe01') + assert_unique = False else: - collections += ('raw_ephys_data/probe00', 'raw_ephys_data/probe01') - assert_unique = False + # 3B probes have data in root collection + collections += ('raw_ephys_data',) for dstype in dtypes: datasets = self.one.type2datasets(self.eid, dstype, details=True) if 'camera' in dstype.lower(): # Download individual camera file @@ -586,7 +604,7 @@ def check_pin_state(self, display=False): if not data_for_keys(('video', 'pin_state', 'audio'), self.data): return 'NOT_SET' size_diff = int(self.data['pin_state'].shape[0] - self.data['video']['length']) - # NB: The pin state to be high for 2 consecutive frames + # NB: The pin state can be high for 2 consecutive frames low2high = np.insert(np.diff(self.data['pin_state'][:, -1].astype(int)) == 1, 0, False) # NB: Time between two consecutive TTLs can be sub-frame, so this will fail ndiff_low2high = int(self.data['audio'][::2].size - sum(low2high)) diff --git a/ibllib/qc/task_extractors.py b/ibllib/qc/task_extractors.py index 72656c95d..f0d46ed02 100644 --- a/ibllib/qc/task_extractors.py +++ b/ibllib/qc/task_extractors.py @@ -13,7 +13,7 @@ from one.api import ONE -_logger = logging.getLogger("ibllib") +_logger = logging.getLogger('ibllib') REQUIRED_FIELDS = ['choice', 'contrastLeft', 'contrastRight', 'correct', 'errorCueTrigger_times', 'errorCue_times', 'feedbackType', 'feedback_times', @@ -28,7 +28,7 @@ class TaskQCExtractor(object): def __init__(self, session_path, lazy=False, one=None, download_data=False, bpod_only=False, - sync_collection=None, sync_type=None, task_collection=None, save_path=None): + sync_collection=None, sync_type=None, task_collection=None): """ A class for extracting the task data required to perform task quality control :param session_path: a valid session path @@ -53,7 +53,6 @@ def __init__(self, session_path, lazy=False, one=None, download_data=False, bpod self.sync_collection = sync_collection or 'raw_ephys_data' self.sync_type = sync_type self.task_collection = task_collection or 'raw_behavior_data' - self.save_path = save_path if download_data: self.one = one or ONE() @@ -70,19 +69,19 @@ def _ensure_required_data(self): :return: """ dstypes = [ - "_iblrig_taskData.raw", - "_iblrig_taskSettings.raw", - "_iblrig_encoderPositions.raw", - "_iblrig_encoderEvents.raw", - "_iblrig_stimPositionScreen.raw", - "_iblrig_syncSquareUpdate.raw", - "_iblrig_encoderTrialInfo.raw", - "_iblrig_ambientSensorData.raw", + '_iblrig_taskData.raw', + '_iblrig_taskSettings.raw', + '_iblrig_encoderPositions.raw', + '_iblrig_encoderEvents.raw', + '_iblrig_stimPositionScreen.raw', + '_iblrig_syncSquareUpdate.raw', + '_iblrig_encoderTrialInfo.raw', + '_iblrig_ambientSensorData.raw', ] eid = self.one.path2eid(self.session_path) - self.log.info(f"Downloading data for session {eid}") + self.log.info(f'Downloading data for session {eid}') # Ensure we have the settings - settings, _ = self.one.load_datasets(eid, ["_iblrig_taskSettings.raw.json"], + settings, _ = self.one.load_datasets(eid, ['_iblrig_taskSettings.raw.json'], collections=[self.task_collection], download_only=True, assert_present=False) @@ -111,10 +110,10 @@ def _ensure_required_data(self): missing = [True] * len(dstypes) if not files else [x is None for x in files] if self.session_path is None or all(missing): self.lazy = True - self.log.error("Data not found on server, can't calculate QC.") + self.log.error('Data not found on server, can\'t calculate QC.') elif any(missing): self.log.warning( - f"Missing some datasets for session {eid} in path {self.session_path}" + f'Missing some datasets for session {eid} in path {self.session_path}' ) def load_raw_data(self): @@ -122,7 +121,7 @@ def load_raw_data(self): Loads the TTLs, raw task data and task settings :return: """ - self.log.info(f"Loading raw data from {self.session_path}") + self.log.info(f'Loading raw data from {self.session_path}') self.type = self.type or get_session_extractor_type(self.session_path, task_collection=self.task_collection) # Finds the sync type when it isn't explicitly set, if ephys we assume nidq otherwise bpod self.sync_type = self.sync_type or 'nidq' if self.type == 'ephys' else 'bpod' @@ -152,7 +151,7 @@ def extract_data(self): intervals_bpod to be assigned to the data attribute before calling this function. :return: """ - self.log.info(f"Extracting session: {self.session_path}") + self.log.info(f'Extracting session: {self.session_path}') self.type = self.type or get_session_extractor_type(self.session_path, task_collection=self.task_collection) # Finds the sync type when it isn't explicitly set, if ephys we assume nidq otherwise bpod self.sync_type = self.sync_type or 'nidq' if self.type == 'ephys' else 'bpod' @@ -163,7 +162,7 @@ def extract_data(self): self.load_raw_data() # Run extractors if self.sync_type != 'bpod' and not self.bpod_only: - data, _ = ephys_fpga.extract_all(self.session_path, task_collection=self.task_collection, save_path=self.save_path) + data, _ = ephys_fpga.extract_all(self.session_path, save=False, task_collection=self.task_collection) bpod2fpga = interp1d(data['intervals_bpod'][:, 0], data['table']['intervals_0'], fill_value='extrapolate') # Add Bpod wheel data @@ -171,8 +170,7 @@ def extract_data(self): data['wheel_timestamps_bpod'] = bpod2fpga(re_ts) data['wheel_position_bpod'] = pos else: - kwargs = dict(save=False, bpod_trials=self.raw_data, settings=self.settings, - task_collection=self.task_collection, save_path=self.save_path) + kwargs = dict(save=False, bpod_trials=self.raw_data, settings=self.settings, task_collection=self.task_collection) trials, wheel, _ = bpod_trials.extract_all(self.session_path, **kwargs) n_trials = np.unique(list(map(lambda k: trials[k].shape[0], trials)))[0] if self.type == 'habituation': @@ -201,15 +199,19 @@ def rename_data(data): correct = data['feedbackType'] > 0 # get valve_time and errorCue_times from feedback_times if 'errorCue_times' not in data: - data['errorCue_times'] = data["feedback_times"].copy() + data['errorCue_times'] = data['feedback_times'].copy() data['errorCue_times'][correct] = np.nan if 'valveOpen_times' not in data: - data['valveOpen_times'] = data["feedback_times"].copy() + data['valveOpen_times'] = data['feedback_times'].copy() data['valveOpen_times'][~correct] = np.nan + if 'wheel_moves_intervals' not in data and 'wheelMoves_intervals' in data: + data['wheel_moves_intervals'] = data.pop('wheelMoves_intervals') + if 'wheel_moves_peak_amplitude' not in data and 'wheelMoves_peakAmplitude' in data: + data['wheel_moves_peak_amplitude'] = data.pop('wheelMoves_peakAmplitude') data['correct'] = correct diff_fields = list(set(REQUIRED_FIELDS).difference(set(data.keys()))) for miss_field in diff_fields: - data[miss_field] = data["feedback_times"] * np.nan + data[miss_field] = data['feedback_times'] * np.nan if len(diff_fields): - _logger.warning(f"QC extractor, missing fields filled with NaNs: {diff_fields}") + _logger.warning(f'QC extractor, missing fields filled with NaNs: {diff_fields}') return data diff --git a/ibllib/qc/task_metrics.py b/ibllib/qc/task_metrics.py index ddb580ba7..36f2b4806 100644 --- a/ibllib/qc/task_metrics.py +++ b/ibllib/qc/task_metrics.py @@ -1,44 +1,50 @@ """Behaviour QC This module runs a list of quality control metrics on the behaviour data. -Examples: - # Running on a rig computer and updating QC fields in Alyx: - from ibllib.qc.task_metrics import TaskQC - TaskQC('path/to/session').run(update=True) - - # Downloading the required data and inspecting the QC on a different computer: - from ibllib.qc.task_metrics import TaskQC - qc = TaskQC(eid) - outcome, results = qc.run() - - # Inspecting individual test outcomes - from ibllib.qc.task_metrics import TaskQC - qc = TaskQC(eid) - outcome, results, outcomes = qc.compute().compute_session_status() - - # Running bpod QC on ephys session - from ibllib.qc.task_metrics import TaskQC - qc = TaskQC(eid) - qc.load_data(bpod_only=True) # Extract without FPGA - bpod_qc = qc.run() - - # Running bpod QC only, from training rig PC - from ibllib.qc.task_metrics import TaskQC - from ibllib.qc.qcplots import plot_results - session_path = r'/home/nico/Downloads/FlatIron/mrsicflogellab/Subjects/SWC_023/2020-02-14/001' - qc = TaskQC(session_path) - qc.load_data(bpod_only=True, download_data=False) # Extract without FPGA - qc.run() - plot_results(qc, save_path=session_path) - - # Running ephys QC, from local server PC (after ephys + bpod data have been copied to a same - folder) - from ibllib.qc.task_metrics import TaskQC - from ibllib.qc.qcplots import plot_results - session_path = r'/home/nico/Downloads/FlatIron/mrsicflogellab/Subjects/SWC_023/2020-02-14/001' - qc = TaskQC(session_path) - qc.run() - plot_results(qc, save_path=session_path) +Examples +-------- +Running on a rig computer and updating QC fields in Alyx: + +>>> from ibllib.qc.task_metrics import TaskQC +>>> TaskQC('path/to/session').run(update=True) + +Downloading the required data and inspecting the QC on a different computer: + +>>> from ibllib.qc.task_metrics import TaskQC +>>> qc = TaskQC(eid) +>>> outcome, results = qc.run() + +Inspecting individual test outcomes + +>>> from ibllib.qc.task_metrics import TaskQC +>>> qc = TaskQC(eid) +>>> outcome, results, outcomes = qc.compute().compute_session_status() + +Running bpod QC on ephys session + +>>> from ibllib.qc.task_metrics import TaskQC +>>> qc = TaskQC(eid) +>>> qc.load_data(bpod_only=True) # Extract without FPGA +>>> bpod_qc = qc.run() + +Running bpod QC only, from training rig PC + +>>> from ibllib.qc.task_metrics import TaskQC +>>> from ibllib.qc.qcplots import plot_results +>>> session_path = r'/home/nico/Downloads/FlatIron/mrsicflogellab/Subjects/SWC_023/2020-02-14/001' +>>> qc = TaskQC(session_path) +>>> qc.load_data(bpod_only=True, download_data=False) # Extract without FPGA +>>> qc.run() +>>> plot_results(qc, save_path=session_path) + +Running ephys QC, from local server PC (after ephys + bpod data have been copied to a same folder) + +>>> from ibllib.qc.task_metrics import TaskQC +>>> from ibllib.qc.qcplots import plot_results +>>> session_path = r'/home/nico/Downloads/FlatIron/mrsicflogellab/Subjects/SWC_023/2020-02-14/001' +>>> qc = TaskQC(session_path) +>>> qc.run() +>>> plot_results(qc, save_path=session_path) """ import logging import sys @@ -148,7 +154,7 @@ def compute(self, **kwargs): self.log.info(f"Session {self.session_path}: Running QC on behavior data...") self.metrics, self.passed = get_bpodqc_metrics_frame( self.extractor.data, - wheel_gain=self.extractor.settings["STIM_GAIN"], # The wheel gain + wheel_gain=self.extractor.settings['STIM_GAIN'], # The wheel gain photodiode=self.extractor.frame_ttls, audio=self.extractor.audio_ttls, re_encoding=self.extractor.wheel_encoding or 'X1', @@ -379,7 +385,7 @@ def check_response_feedback_delays(data, **_): """ Checks that the time difference between the response and the feedback onset (error sound or valve) is positive and less than 10ms. - Metric: M = Feedback_time - response_time + Metric: M = feedback_time - response_time Criterion: 0 < M < 0.010 s Units: seconds [s] diff --git a/ibllib/tests/extractors/test_ephys_trials.py b/ibllib/tests/extractors/test_ephys_trials.py index d22665137..ba49d31bb 100644 --- a/ibllib/tests/extractors/test_ephys_trials.py +++ b/ibllib/tests/extractors/test_ephys_trials.py @@ -105,6 +105,12 @@ def test_wheel_trace_from_sync(self): t, pos = ephys_fpga._rotary_encoder_positions_from_fronts(ta, pa, tb, pb, coding='x2') self.assertTrue(np.all(np.isclose(pos_, pos))) + def test_time_fields(self): + """Test for FpgaTrials._time_fields static method.""" + expected = ('intervals', 'fooBar_times_bpod', 'spike_times', 'baz_timestamps') + fields = ephys_fpga.FpgaTrials._time_fields(expected + ('position', 'timebase', 'fooBaz')) + self.assertCountEqual(expected, fields) + class TestEphysBehaviorExtraction(unittest.TestCase): def setUp(self): diff --git a/ibllib/tests/extractors/test_extractors.py b/ibllib/tests/extractors/test_extractors.py index 463ff78ea..56a8de86d 100644 --- a/ibllib/tests/extractors/test_extractors.py +++ b/ibllib/tests/extractors/test_extractors.py @@ -735,6 +735,13 @@ def test_attribute_times(self, display=False): expected[np.r_[1:3]] = expected[1:3] + 1 np.testing.assert_array_equal(matches, expected) + # Taking first after should exclude many pulses + matches = camera.attribute_times(tsa, tsb, take='after') + missing = [0, 4, 5, 7, 8, 10, 11, 13, 14, 16, 17, 19, 20, + 22, 23, 25, 26, 28, 29, 31, 32, 34, 35, 37, 40, 43] + expected[missing] = -1 + np.testing.assert_array_equal(matches, expected) + # Lower tolerance matches = camera.attribute_times(tsa, tsb, tol=0.05) expected = np.array([0, 2, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45, 49, 53, 57]) diff --git a/ibllib/tests/qc/test_alignment_qc.py b/ibllib/tests/qc/test_alignment_qc.py index 75db7ca38..55eddb68c 100644 --- a/ibllib/tests/qc/test_alignment_qc.py +++ b/ibllib/tests/qc/test_alignment_qc.py @@ -16,7 +16,7 @@ from ibllib.atlas import AllenAtlas from ibllib.pipes.misc import create_alyx_probe_insertions from ibllib.qc.alignment_qc import AlignmentQC -from ibllib.pipes.histology import register_track +from ibllib.pipes.histology import register_track, register_chronic_track from one.registration import RegistrationClient @@ -66,6 +66,59 @@ def tearDownClass(cls) -> None: one.alyx.rest('insertions', 'delete', id=cls.probe00_id) +class TestChronicTracingQC(unittest.TestCase): + @classmethod + def setUpClass(cls) -> None: + probe = ''.join(random.choices(string.ascii_letters, k=5)) + serial = ''.join(random.choices(string.ascii_letters, k=10)) + + # Make a chronic insertions + ref = one.eid2ref(EPHYS_SESSION) + insdict = {"subject": ref['subject'], "name": probe, "model": '3B2', "serial": serial} + ins = one.alyx.rest('chronic-insertions', 'create', data=insdict) + cls.chronic_id = ins['id'] + # Make a probe insertions + insdict = {"session": EPHYS_SESSION, "name": probe, "model": '3B2', "serial": serial, + "chronic_insertion": cls.chronic_id} + ins = one.alyx.rest('insertions', 'create', data=insdict) + cls.probe_id = ins['id'] + + # Load in the tracing data + data = np.load(Path(Path(__file__).parent.parent. + joinpath('fixtures', 'qc', 'data_alignmentqc_existing.npz')), + allow_pickle=True) + cls.xyz_picks = np.array(data['xyz_picks']) / 1e6 + + def test_tracing_exists(self): + register_chronic_track(self.chronic_id, picks=self.xyz_picks, one=one, overwrite=True, + channels=False, brain_atlas=brain_atlas) + insertion = one.alyx.get('/insertions/' + self.probe_id, clobber=True) + + assert (insertion['json']['qc'] == 'NOT_SET') + assert (insertion['json']['extended_qc']['tracing_exists'] == 1) + + insertion = one.alyx.get('/chronic-insertions/' + self.chronic_id, clobber=True) + + assert (insertion['json']['qc'] == 'NOT_SET') + assert (insertion['json']['extended_qc']['tracing_exists'] == 1) + + def test_tracing_not_exists(self): + register_chronic_track(self.chronic_id, picks=None, one=one, overwrite=True, + channels=False, brain_atlas=brain_atlas) + insertion = one.alyx.get('/insertions/' + self.probe_id, clobber=True) + assert (insertion['json']['qc'] == 'CRITICAL') + assert (insertion['json']['extended_qc']['tracing_exists'] == 0) + + insertion = one.alyx.get('/chronic-insertions/' + self.chronic_id, clobber=True) + assert (insertion['json']['qc'] == 'CRITICAL') + assert (insertion['json']['extended_qc']['tracing_exists'] == 0) + + @classmethod + def tearDownClass(cls) -> None: + one.alyx.rest('insertions', 'delete', id=cls.probe_id) + one.alyx.rest('chronic-insertions', 'delete', id=cls.chronic_id) + + class TestAlignmentQcExisting(unittest.TestCase): probe_id = None prev_traj_id = None diff --git a/ibllib/tests/test_atlas.py b/ibllib/tests/test_atlas.py index 155759d8b..f3d147c26 100644 --- a/ibllib/tests/test_atlas.py +++ b/ibllib/tests/test_atlas.py @@ -28,6 +28,15 @@ class TestBrainRegions(unittest.TestCase): def setUpClass(self): self.brs = BrainRegions() + def test_to_df(self): + df = self.brs.to_df() + self.assertTrue(df.shape[0] == self.brs.acronym.shape[0]) + self.assertEqual( + set(['id', 'name', 'acronym', 'hexcolor', 'level', 'parent', 'order']), set(list(df.columns))) + + def test_hexcolor(self): + assert self.brs.hexcolor.shape == (self.brs.rgb.shape[0],) + def test_rgba(self): assert self.brs.rgba.shape == (self.brs.rgb.shape[0], 4) @@ -87,6 +96,12 @@ def test_remap(self): expected_cosmos_id = [1089, 549] # HPF and TH assert np.all(cosmos_id == expected_cosmos_id) + # Test remap when we have nans + atlas_id = np.array([463, np.nan, 685]) + cosmos_id = self.brs.remap(atlas_id, source_map='Allen', target_map='Cosmos') + expected_cosmos_id = np.array([1089, np.nan, 549], dtype=float) # HPF and TH + np.testing.assert_equal(cosmos_id, expected_cosmos_id) + def test_id2id(self): # Test remapping of atlas id to atlas id atlas_id = np.array([463, 685]) @@ -366,6 +381,12 @@ def test_allen_ba(self): self.assertTrue(np.allclose(self.ba.bc.xyz2i(np.array([0, 0, 0]), round=False), ALLEN_CCF_LANDMARKS_MLAPDV_UM['bregma'] / 25)) + def test_lookup_outside_the_brain(self): + xyz = [0, 0, 15687588] + with self.assertRaises(ValueError): + self.ba.get_labels(xyz) + self.assertEqual(self.ba.get_labels(xyz, mode='clip'), 0) + def test_lookups(self): # the get_labels lookup returns the regions ids (not the indices !!) assert self.ba.get_labels([0, 0, self.ba.bc.i2z(103)]) == 304325711 @@ -480,7 +501,11 @@ def test_init_from_dict(self): 'theta': 5.0, 'depth': 4501.0, 'beta': 0.0} - ins = Insertion.from_dict(d) + + brain_atlas = _create_mock_atlas() + brain_atlas.compute_surface() + brain_atlas.top = brain_atlas.top * np.NaN + ins = Insertion.from_dict(d, brain_atlas=brain_atlas) # eval the entry point, should be super close dxyz = ins.trajectory.eval_x(d['x'] / 1e6) - np.array((d['x'], d['y'], d['z'])) / 1e6 self.assertTrue(np.all(np.isclose(dxyz, 0))) diff --git a/ibllib/tests/test_base_tasks.py b/ibllib/tests/test_base_tasks.py new file mode 100644 index 000000000..e91d20450 --- /dev/null +++ b/ibllib/tests/test_base_tasks.py @@ -0,0 +1,96 @@ +import unittest +import tempfile +from pathlib import Path +from functools import partial + +import matplotlib.pyplot as plt +import numpy as np +from one.api import ONE +from one.registration import RegistrationClient + +from ibllib.pipes import base_tasks +from ibllib.tests import TEST_DB + + +class TestRegisterRawDataTask(unittest.TestCase): + tmpdir = None + one = None + session_path = None + eid = None + + @classmethod + def setUpClass(cls) -> None: + cls.tmpdir = tempfile.TemporaryDirectory() + cls.one = ONE(**TEST_DB, cache_rest=None) + ses_dict = { + 'subject': 'algernon', + 'start_time': RegistrationClient.ensure_ISO8601(None), + 'number': 1, + 'users': ['test_user']} + ses = cls.one.alyx.rest('sessions', 'create', data=ses_dict) + cls.session_path = Path(cls.tmpdir.name).joinpath( + ses['subject'], ses['start_time'][:10], str(ses['number']).zfill(3)) + cls.eid = ses['url'][-36:] + + # Add a couple of images + cls.session_path.joinpath('snapshots').mkdir(parents=True) + for ext in ('.PNG', '.tif'): + plt.imshow(np.random.random((7, 7))) + plt.savefig(cls.session_path.joinpath('snapshots', 'foo').with_suffix(ext)) + plt.close() + + def test_register_snapshots(self): + """Test ibllib.pipes.base_tasks.RegisterRawDataTask.register_snapshots. + + A more thorough test for this exists in ibllib.tests.test_pipes.TestRegisterRawDataTask. + This test does not mock REST (and therefore requires a test database), while the other does. + This test could be removed as it's rather redundant. + """ + task = base_tasks.RegisterRawDataTask(self.session_path, one=self.one) + notes = task.register_snapshots() + self.assertEqual(2, len(notes)) + self.assertTrue(self.session_path.joinpath('snapshots').exists()) + task.register_snapshots(unlink=True) + self.assertFalse(self.session_path.joinpath('snapshots').exists()) + + def test_rename_files(self): + collection = 'raw_sync_data' + task = base_tasks.RegisterRawDataTask(self.session_path, one=self.one) + task.input_files = task.output_files = [] + task.rename_files() # Returns without raising + task.input_files = [('foo.*', collection, True), ] + task.output_files = [('_ns_DAQdata.raw.bar', collection, True), ] + self.session_path.joinpath(collection).mkdir() + self.session_path.joinpath(collection, 'foo.bar').touch() + task.rename_files() + self.assertTrue(self.session_path.joinpath(collection, '_ns_DAQdata.raw.bar').exists()) + self.assertFalse(self.session_path.joinpath(collection, 'foo.bar').exists()) + with self.assertRaises(FileNotFoundError): + task.rename_files() + # Check asserts number of inputs == number of outputs + task.output_files.append(('_ns_DAQdata.baz.bar', collection, True),) + with self.assertRaises(AssertionError): + task.rename_files() + + @classmethod + def tearDownClass(cls) -> None: + if cls.tmpdir: + cls.tmpdir.cleanup() + if cls.one and cls.eid: + cls.one.alyx.rest('sessions', 'delete', id=cls.eid) + + +class TestBehaviourTask(unittest.TestCase): + def test_spacer_support(self) -> None: + """Test for BehaviourTask._spacer_support method.""" + to_test = [('100.0.0', False), ('8.0.0', False), ('7.1.0', True), ('8.0.1', True), ('7.2.0', True)] + settings = {} + spacer_support = partial(base_tasks.BehaviourTask._spacer_support, settings) + for version, expected in to_test: + settings['IBLRIG_VERSION_TAG'] = version + with self.subTest(version): + self.assertIs(spacer_support(), expected) + + +if __name__ == '__main__': + unittest.main() diff --git a/ibllib/tests/test_io.py b/ibllib/tests/test_io.py index 234149701..967a88d32 100644 --- a/ibllib/tests/test_io.py +++ b/ibllib/tests/test_io.py @@ -6,8 +6,10 @@ from pathlib import Path import sys import logging +import json import numpy as np +import numpy.testing from one.api import ONE from iblutil.io import params import yaml @@ -15,6 +17,7 @@ from ibllib.tests import TEST_DB from ibllib.io import flags, misc, globus, video, session_params import ibllib.io.raw_data_loaders as raw +import ibllib.io.raw_daq_loaders as raw_daq class TestsParams(unittest.TestCase): @@ -462,7 +465,7 @@ def test_assert_valid_label(self): class TestSessionParams(unittest.TestCase): - """Tests for ibllib.io.session_params module""" + """Tests for ibllib.io.session_params module.""" def setUp(self) -> None: self.tmpdir = tempfile.TemporaryDirectory() @@ -475,17 +478,21 @@ def setUp(self) -> None: # save as individual files self.devices_path = Path(self.tmpdir.name).joinpath('_devices') + # a sync that's different to widefield and ephys + sync = {**self.fixture['sync']['nidq'].copy(), 'collection': 'raw_sync_data'} + computers_descriptions = { 'widefield': dict(devices={'widefield': self.fixture['devices']['widefield']}), 'video': '', 'ephys': dict(devices={'neuropixel': self.fixture['devices']['neuropixel']}), - 'behaviour': dict(devices={'microphone': self.fixture['devices']['microphone']}) + 'behaviour': dict(devices={'microphone': self.fixture['devices']['microphone']}), + 'sync': dict(sync={'nidq': sync}) } # the behaviour computer contains the task, project and procedure keys for k in filter(lambda x: x != 'devices', self.fixture): computers_descriptions['behaviour'][k] = self.fixture[k] - # the ephys computer contains another sync key! + # the ephys computer contains another identical sync key computers_descriptions['ephys']['sync'] = self.fixture['sync'] for label, data in computers_descriptions.items(): @@ -494,6 +501,7 @@ def setUp(self) -> None: @patch(session_params.__name__ + '.time.sleep') def test_aggregate(self, sleep_mock): + """A test for both aggregate_device and merge_params.""" fullfile = self.devices_path.parent.joinpath('_ibl_experiment.description.yaml') file_lock = fullfile.with_suffix('.lock') @@ -526,8 +534,12 @@ def test_aggregate(self, sleep_mock): self.assertCountEqual(data.keys(), expected_keys) self.assertTrue(len(data['devices'].keys()) > 1) - # A device with another sync key + # A device with another identical sync key file_device = self.devices_path.joinpath('ephys.yaml') + session_params.aggregate_device(file_device, fullfile, unlink=True) + + # A device with a different sync + file_device = self.devices_path.joinpath('sync.yaml') with self.assertRaises(AssertionError): session_params.aggregate_device(file_device, fullfile, unlink=True) @@ -548,7 +560,7 @@ def test_read_yaml(self): self.assertCountEqual(self.fixture.keys(), data_keys) def test_patch_data(self): - with patch(session_params.__name__ + '.SPEC_VERSION', '1.0.0'),\ + with patch(session_params.__name__ + '.SPEC_VERSION', '1.0.0'), \ self.assertLogs(session_params.__name__, logging.WARNING): data = session_params._patch_file({'version': '1.1.0'}) self.assertEqual(data, {'version': '1.0.0'}) @@ -567,5 +579,103 @@ def test_get_collections(self): self.assertCountEqual(expected, collections) -if __name__ == "__main__": +class TestRawDaqLoaders(unittest.TestCase): + """Tests for raw_daq_loaders module""" + def setUp(self): + self.tmpdir = tempfile.TemporaryDirectory() + self.addCleanup(self.tmpdir.cleanup) + # Create some toy DAQ data + N = 3000 + Fs = 1 + a0_clean = np.zeros(N) + self.n_ttl = 6 + pulse_width = int(np.floor(50 * Fs)) + for i in np.arange(1, N, int(np.floor(N / self.n_ttl))): + a0_clean[i:i + pulse_width] = 1 + a0 = (a0_clean * np.full(N, 5)) + np.random.rand(N) + 1 # 0 -> 5V w/ noise and 1V DC offset + ctr0 = np.cumsum(a0_clean) # Counter channel, e.g. [0, 0, 0, 1, 1, 2, 3, 3, 3, 3, [...] n] + ctr1 = np.cumsum(a0_clean * np.random.choice([1, -1], N)) # Position channel e.g. [0, 1, 2, 1, ...] + + self.timeline = {'timestamps': np.arange(0, N, Fs), 'raw': np.vstack([a0, ctr0, ctr1]).T} + self.meta = {'daqSampleRate': Fs, 'inputs': [ + {'name': 'bpod', 'arrayColumn': 1, 'measurement': 'Voltage', 'daqChannelID': 'ai0'}, + {'name': 'neuralFrames', 'arrayColumn': 2, 'measurement': 'EdgeCount', 'daqChannelID': 'ctr0'}, + {'name': 'rotaryEncoder', 'arrayColumn': 3, 'measurement': 'Position', 'daqChannelID': 'ctr1'} + ]} + # FIXME Because of non-standard ALF naming we cannot use save_object_npy for this purpose + # alfio.save_object_npy(self.tmpdir.name, self.timeline, 'DAQ data', namespace='timeline') + for k, v in self.timeline.items(): + np.save(self.tmpdir.name + f'/_timeline_DAQdata.{k}.npy', v) + with open(self.tmpdir.name + '/_timeline_DAQdata.meta.json', 'w') as fp: + json.dump(self.meta, fp) + + def test_extract_sync_timeline(self): + """Test for extract_sync_timeline function.""" + chmap = {'bpod': 0, 'neuralFrames': 1, 'rotaryEncoder': 3} + sync = raw_daq.extract_sync_timeline(self.tmpdir.name, chmap) + self.assertCountEqual(('times', 'channels', 'polarities'), sync.keys()) + # Should be sorted by times + self.assertTrue(np.all(np.diff(sync['times']) >= 0)) + # Number of detected fronts should be correct + self.assertEqual(len(sync['times'][sync['channels'] == 0]), self.n_ttl * 2) + # Check polarities + fronts = sync['polarities'][sync['channels'] == 0] + self.assertEqual(1, fronts[0]) + # Check polarities alternate between 1 and -1 + self.assertTrue( + np.all(np.unique(np.cumsum(fronts)) == [0, 1]) and np.all(np.unique(fronts) == [-1, 1]) + ) + # Check edge count channel sync + fronts = sync['polarities'][sync['channels'] == 1] + # Check a few timestamps + times = sync['times'][sync['channels'] == 1] + np.testing.assert_array_almost_equal(times[:5], np.arange(5) + 1.) + # Because of the way we made the data, the number of fronts should == pulse_width * n_ttl + # Minus one from unique values because one of those values will be zero + self.assertEqual(len(np.unique(self.timeline['raw'][:, 1])) - 1, len(fronts)) + self.assertTrue(np.all(fronts == 1)) + # Check position channel sync + fronts = sync['polarities'][sync['channels'] == 3] + self.assertEqual(len(np.unique(self.timeline['raw'][:, 1])) - 1, len(fronts)) + self.assertTrue(np.all(np.unique(fronts) == [-1, 1])) + + # Check for missing channel warnings + chmap['unknown'] = 2 # Add channel that's not in meta file + with self.assertLogs(logging.getLogger('ibllib.io.raw_daq_loaders'), logging.WARNING) as log: + raw_daq.extract_sync_timeline(self.tmpdir.name, chmap) + record, = log.records + self.assertIn('unknown', record.message) + + # Check measurement type validation + self.meta['inputs'][0]['measurement'] = 'FooBar' + with open(self.tmpdir.name + '/_timeline_DAQdata.meta.json', 'w') as fp: + json.dump(self.meta, fp) + self.assertRaises(NotImplementedError, raw_daq.extract_sync_timeline, self.tmpdir.name) + + def test_timeline_meta2wiring(self): + """Test for timeline_meta2wiring function.""" + wiring = raw_daq.timeline_meta2wiring(self.tmpdir.name, save=False) + expected = { + 'SYSTEM': 'timeline', + 'SYNC_WIRING_ANALOG': {'ai0': 'bpod'}, + 'SYNC_WIRING_DIGITAL': {'ctr0': 'neuralFrames', 'ctr1': 'rotaryEncoder'} + } + self.assertDictEqual(expected, wiring) + wiring, outpath = raw_daq.timeline_meta2wiring(self.tmpdir.name, save=True) + expected_path = Path(self.tmpdir.name, '_timeline_DAQData.wiring.json') + self.assertEqual(expected_path, outpath) + self.assertTrue(outpath.exists()) + + def test_timeline_meta2chmap(self): + """Test for timeline_meta2chmap function.""" + chmap = raw_daq.timeline_meta2chmap(self.meta) + expected = {'bpod': 1, 'neuralFrames': 2, 'rotaryEncoder': 3} + self.assertDictEqual(expected, chmap) + chmap = raw_daq.timeline_meta2chmap(self.meta, exclude_channels=('bpod', 'rotaryEncoder')) + self.assertDictEqual({'neuralFrames': expected.pop('neuralFrames')}, chmap) + chmap = raw_daq.timeline_meta2chmap(self.meta, include_channels=('bpod', 'rotaryEncoder')) + self.assertDictEqual(expected, chmap) + + +if __name__ == '__main__': unittest.main(exit=False, verbosity=2) diff --git a/ibllib/tests/test_mesoscope.py b/ibllib/tests/test_mesoscope.py new file mode 100644 index 000000000..4579d202b --- /dev/null +++ b/ibllib/tests/test_mesoscope.py @@ -0,0 +1,208 @@ +"""Tests for ibllib.pipes.mesoscope_tasks""" +import sys +import unittest +from unittest import mock +import tempfile +import json +from pathlib import Path + +from one.api import ONE +import numpy as np + +from ibllib.pipes.mesoscope_tasks import MesoscopePreprocess, MesoscopeFOV, \ + find_triangle, surface_normal, _nearest_neighbour_1d +from ibllib.tests import TEST_DB + +# Mock suit2p which is imported in MesoscopePreprocess +attrs = {'default_ops.return_value': {}} +sys.modules['suite2p'] = mock.MagicMock(**attrs) + + +class TestMesoscopePreprocess(unittest.TestCase): + """Test for MesoscopePreprocess task.""" + + def setUp(self) -> None: + self.td = tempfile.TemporaryDirectory() + self.session_path = Path(self.td.name).joinpath('subject', '2020-01-01', '001') + self.img_path = self.session_path.joinpath('raw_imaging_data_00') + self.img_path.mkdir(parents=True) + self.task = MesoscopePreprocess(self.session_path, one=ONE(**TEST_DB)) + + def test_meta(self): + """ + Test arguments that are overwritten by meta file and set in task.kwargs, + and that explicitly passed kwargs overwrite default and meta args + """ + expected = { + 'data_path': [str(self.img_path)], + 'fast_disk': '', + 'num_workers': -1, + 'save_path0': str(self.session_path.joinpath('alf')), + 'move_bin': True, + 'keep_movie_raw': False, + 'delete_bin': False, + 'batch_size': 500, + 'combined': False, + 'look_one_level_down': False, + 'num_workers_roi': -1, + 'nimg_init': 400, + 'nonrigid': True, + 'maxregshift': 0.05, + 'denoise': 1, + 'block_size': [128, 128], + 'save_mat': True, + 'scalefactor': 1, + 'mesoscan': True, + 'nplanes': 1, + 'tau': 1.5, + 'functional_chan': 1, + 'align_by_chan': 1, + 'nrois': 1, + 'nchannels': 1, + 'fs': 6.8, + 'lines': [[3, 4, 5]], + 'dx': np.array([0], dtype=int), + 'dy': np.array([0], dtype=int), + } + + meta = { + 'scanImageParams': {'hStackManager': {'zs': 320}, + 'hRoiManager': {'scanVolumeRate': 6.8}}, + 'FOV': [{'topLeftDeg': [-1, 1.3], 'topRightDeg': [3, 1.3], 'bottomLeftDeg': [-1, 5.2], + 'nXnYnZ': [512, 512, 1], 'channelIdx': 2, 'lineIdx': [4, 5, 6]}] + } + with open(self.img_path.joinpath('_ibl_rawImagingData.meta.json'), 'w') as f: + json.dump(meta, f) + self.img_path.joinpath('test.tif').touch() + with mock.patch.object(self.task, 'get_default_tau', return_value=1.5): + _ = self.task.run(run_suite2p=False, rename_files=False) + self.assertEqual(self.task.status, 0) + self.assertDictEqual(self.task.kwargs, expected) + # {k: v for k, v in self.task.kwargs.items() if expected[k] != v} + # Now overwrite a specific option with task.run kwarg + with mock.patch.object(self.task, 'get_default_tau', return_value=1.5): + _ = self.task.run(run_suite2p=False, rename_files=False, nchannels=2, delete_bin=True) + self.assertEqual(self.task.status, 0) + self.assertEqual(self.task.kwargs['nchannels'], 2) + self.assertEqual(self.task.kwargs['delete_bin'], True) + with open(self.img_path.joinpath('_ibl_rawImagingData.meta.json'), 'w') as f: + json.dump({}, f) + + def test_get_default_tau(self): + """Test for MesoscopePreprocess.get_default_tau method.""" + subject_detail = {'genotype': [{'allele': 'Cdh23', 'zygosity': 1}, + {'allele': 'Ai95-G6f', 'zygosity': 1}, + {'allele': 'Camk2a-tTa', 'zygosity': 1}]} + with mock.patch.object(self.task.one.alyx, 'rest', return_value=subject_detail): + self.assertEqual(self.task.get_default_tau(), .7) + subject_detail['genotype'].pop(1) + self.assertEqual(self.task.get_default_tau(), 1.5) # return the default value + + def tearDown(self) -> None: + self.td.cleanup() + + +class TestMesoscopeFOV(unittest.TestCase): + """Test for MesoscopeFOV task and associated functions.""" + + def test_get_provenance(self): + """Test for MesoscopeFOV.get_provenance method.""" + filename = 'mpciMeanImage.mlapdv_estimate.npy' + provenance = MesoscopeFOV.get_provenance(filename) + self.assertEqual('ESTIMATE', provenance.name) + filename = 'mpciROIs.brainLocation_ccf_2017.npy' + provenance = MesoscopeFOV.get_provenance(filename) + self.assertEqual('HISTOLOGY', provenance.name) + + def test_find_triangle(self): + """Test for find_triangle function.""" + points = np.array([[2.435, -3.37], [2.435, -1.82], [2.635, -2.], [2.535, -1.7]]) + connectivity_list = np.array([[0, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5]], dtype=np.intp) + point = np.array([2.6, -1.9]) + self.assertEqual(1, find_triangle(point, points, connectivity_list)) + point = np.array([3., 1.]) # outside of defined vertices + self.assertEqual(-1, find_triangle(point, points, connectivity_list)) + + def test_surface_normal(self): + """Test for surface_normal function.""" + vertices = np.array([[0, 1, 0], [0, 0, 0], [1, 0, 0]]) + expected = np.array([0, 0, 1]) + np.testing.assert_almost_equal(surface_normal(vertices), expected) + + # Test against multiple triangles + vertices = np.r_[vertices[np.newaxis, :, :], [[[0, 0, 0], [0, 2, 0], [2, 0, 0]]]] + expected = np.array([[0, 0, 1], [0, 0, -1]]) + np.testing.assert_almost_equal(surface_normal(vertices), expected) + + # Some real data + vertices = np.array([[2.435, -1.82, -0.53], [2.635, -2., -0.58], [2.535, -1.7, -0.58]]) + expected = np.array([0.33424239, 0.11141413, 0.93587869]) + np.testing.assert_almost_equal(surface_normal(vertices), expected) + + # Test input validation + self.assertRaises(ValueError, surface_normal, np.array([[1, 2, 3, 4]])) + + def test_nearest_neighbour_1d(self): + """Test for _nearest_neighbour_1d function.""" + x = np.array([2., 1., 4., 5., 3.]) + x_new = np.array([-3, 0, 1.2, 3, 3, 2.5, 4.7, 6]) + val, ind = _nearest_neighbour_1d(x, x_new) + np.testing.assert_array_equal(val, [1., 1., 1., 3., 3., 2., 5., 5.]) + np.testing.assert_array_equal(ind, [1, 1, 1, 4, 4, 0, 3, 3]) + + +class TestRegisterFOV(unittest.TestCase): + """Test for MesoscopeFOV.register_fov method.""" + + def setUp(self) -> None: + self.one = ONE(**TEST_DB) + tmpdir = tempfile.TemporaryDirectory() + self.addCleanup(tmpdir.cleanup) + self.session_path = Path(tmpdir.name, 'subject', '2020-01-01', '001') + self.session_path.joinpath('alf', 'FOV_00').mkdir(parents=True) + filename = self.session_path.joinpath('alf', 'FOV_00', 'mpciMeanImage.brainLocationIds_ccf_2017_estimate.npy') + np.save(filename, np.array([0, 1, 2, 2, 4, 7], dtype=int)) + + def test_register_fov(self): + """Test MesoscopeFOV.register_fov method. + + Note this doesn't actually hit Alyx. Also this doesn't test stack creation. + """ + task = MesoscopeFOV(self.session_path, device_collection='raw_imaging_data', one=self.one) + mlapdv = {'topLeft': [2317.2, -1599.8, -535.5], 'topRight': [2862.7, -1625.2, -748.7], + 'bottomLeft': [2317.3, -2181.4, -466.3], 'bottomRight': [2862.7, -2206.9, -679.4], + 'center': [2596.1, -1900.5, -588.6]} + meta = {'FOV': [{'MLAPDV': mlapdv, 'nXnYnZ': [512, 512, 1], 'roiUUID': 0}]} + with unittest.mock.patch.object(self.one.alyx, 'rest') as mock_rest: + task.register_fov(meta, 'estimate') + calls = mock_rest.call_args_list + self.assertEqual(3, len(calls)) + + args, kwargs = calls[1] + self.assertEqual(('fields-of-view', 'create'), args) + expected = {'data': {'session': None, 'imaging_type': 'mesoscope', 'name': 'FOV_00', 'stack': None}} + self.assertEqual(expected, kwargs) + + args, kwargs = calls[2] + self.assertEqual(('fov-location', 'create'), args) + expected = ['field_of_view', 'default_provenance', 'coordinate_system', 'n_xyz', 'provenance', 'x', 'y', 'z', + 'brain_region'] + self.assertCountEqual(expected, kwargs.get('data', {}).keys()) + self.assertEqual(5, len(kwargs['data']['brain_region'])) + self.assertEqual([512, 512, 1], kwargs['data']['n_xyz']) + self.assertIs(kwargs['data']['field_of_view'], mock_rest().get('id')) + self.assertEqual('E', kwargs['data']['provenance']) + self.assertEqual([2317.2, 2862.7, 2317.3, 2862.7], kwargs['data']['x']) + + # Check dry mode with suffix input = None + for file in self.session_path.joinpath('alf', 'FOV_00').glob('mpciMeanImage.*'): + file.replace(file.with_name(file.name.replace('_estimate', ''))) + self.one.mode = 'local' + with unittest.mock.patch.object(self.one.alyx, 'rest') as mock_rest: + out = task.register_fov(meta, None) + mock_rest.assert_not_called() + self.assertEqual(1, len(out)) + self.assertEqual('FOV_00', out[0].get('name')) + locations = out[0]['location'] + self.assertEqual(1, len(locations)) + self.assertEqual('L', locations[0].get('provenance', 'L')) diff --git a/ibllib/tests/test_oneibl.py b/ibllib/tests/test_oneibl.py index 3042fb98f..aa9483d6b 100644 --- a/ibllib/tests/test_oneibl.py +++ b/ibllib/tests/test_oneibl.py @@ -64,7 +64,7 @@ def test_setup(self, _): # Silent mode off self.reset_params() self.one.alyx.silent = False - with mock.patch('builtins.input', new=self.mock_input),\ + with mock.patch('builtins.input', new=self.mock_input), \ mock.patch('ibllib.oneibl.patcher.getpass', return_value='foobar'): patcher.FTPPatcher(one=self.one) self.assertEqual(self.one.alyx._par.FTP_DATA_SERVER_LOGIN, 'usr') diff --git a/ibllib/tests/test_pipes.py b/ibllib/tests/test_pipes.py index e4361027a..ba5c282dd 100644 --- a/ibllib/tests/test_pipes.py +++ b/ibllib/tests/test_pipes.py @@ -12,6 +12,7 @@ import datetime import random import string +from uuid import uuid4 from one.api import ONE import iblutil.io.params as iopar @@ -22,6 +23,7 @@ from ibllib.pipes import misc from ibllib.tests import TEST_DB import ibllib.pipes.scan_fix_passive_files as fix +from ibllib.pipes.base_tasks import RegisterRawDataTask from ibllib.pipes.ephys_preprocessing import SpikeSorting @@ -392,6 +394,15 @@ def test_create_basic_transfer_params(self): self.assertEqual(transfer_label, params.pop('TRANSFER_LABEL')) self.assertCountEqual(expected, params) + # Test remote as bool + with mock.patch('builtins.input', return_value='baz'): + params = misc.create_basic_transfer_params(PARAM_STR, remote_data_path=False) + self.assertEqual('~/remote_data', params.get('REMOTE_DATA_FOLDER_PATH')) + params = misc.create_basic_transfer_params(PARAM_STR, remote_data_path=False, clobber=True) + self.assertIs(params.get('REMOTE_DATA_FOLDER_PATH'), False) + params = misc.create_basic_transfer_params(PARAM_STR) + self.assertIs(params.get('REMOTE_DATA_FOLDER_PATH'), False) + # Test custom function and extra par delete with mock.patch('builtins.input', return_value='baz') as in_mock: params = misc.create_basic_transfer_params( @@ -647,5 +658,45 @@ def test_parse_version(self): SpikeSorting.parse_version('version-twelve') -if __name__ == "__main__": +class TestRegisterRawDataTask(unittest.TestCase): + def setUp(self) -> None: + self.one = ONE(**TEST_DB) + self.tempdir = tempfile.TemporaryDirectory() + self.addCleanup(self.tempdir.cleanup) + self.session_path = Path(self.tempdir.name).joinpath('subject', '2023-01-01', '001') + self.session_path.mkdir(parents=True) + + def test_rename_files(self): + """Test upload of snapshots. + + Another test for this exists in ibllib.tests.test_base_tasks.TestRegisterRawDataTask. + """ + # Add base dir snapshot + (folder := self.session_path.joinpath('snapshots')).mkdir() + folder.joinpath('snap.PNG').touch() + collection = 'raw_task_data' + for i, ext in enumerate(['tif', 'jpg']): + (p := self.session_path.joinpath(f'{collection}_{i:02}', 'snapshots')).mkdir(parents=True) + p.joinpath(f'snapshot.{ext}').touch() + # Stuff with text note + p = self.session_path.joinpath(f'{collection}_00', 'snapshots', 'pic.jpeg') + with open(p, 'wb') as fp: + fp.write('foo'.encode()) + with open(p.with_name('pic.txt'), 'w') as fp: + fp.write('bar') + + task = RegisterRawDataTask(self.session_path, one=self.one) + with mock.patch.object(self.one.alyx, 'rest') as rest, \ + mock.patch.object(self.one, 'path2eid', return_value=str(uuid4())): + task.register_snapshots(collection=['', f'{collection}*']) + self.assertEqual(4, rest.call_count) + files = [] + for args, kwargs in rest.call_args_list: + self.assertEqual(('notes', 'create'), args) + files.append(Path(kwargs['files']['image'].name).name) + expected = ('snap.PNG', 'pic.jpeg', 'snapshot.tif', 'snapshot.jpg') + self.assertCountEqual(expected, files) + + +if __name__ == '__main__': unittest.main(exit=False, verbosity=2) diff --git a/release_notes.md b/release_notes.md index 45eb76fc6..16451bf49 100644 --- a/release_notes.md +++ b/release_notes.md @@ -1,3 +1,12 @@ +## Develop +### features +- Training status pipeline now compatible with dynamic pipeline +- Dynamic DLC task using description file +- Full photometry lookup table + +### bugfixes +- fix for untrainable, unbiasable don't repolulate if already exists + ## Release Notes 2.23 ### Release Notes 2.23.1 2023-06-15 ### features diff --git a/requirements-analysis.txt b/requirements-analysis.txt index 70e552c22..192b25cbe 100644 --- a/requirements-analysis.txt +++ b/requirements-analysis.txt @@ -21,3 +21,4 @@ pyqt5 pyqtgraph ipython datajoint +psychofit diff --git a/requirements.txt b/requirements.txt index a8d0eade1..544601f8a 100644 --- a/requirements.txt +++ b/requirements.txt @@ -18,12 +18,15 @@ pytest requests>=2.22.0 scikit-learn>=0.22.1 scipy>=1.7.0 +scikit-image # this is a widefield requirement missing as of July 2023, we may remove it once wfield has this figured out +sparse seaborn>=0.9.0 tqdm>=4.32.1 # ibl libraries ibl-neuropixel>=0.4.0 -iblutil>=1.5.0 +iblutil>=1.7.0 labcams # widefield extractor -ONE-api>=2.0 +ONE-api>=2.2 slidingRP>=1.0.0 # steinmetz lab refractory period metrics -wfield>=0.3.6 # widefield extractor +wfield==0.3.7 # widefield extractor frozen for now (2023/07/15) until Joao fixes latest version +psychofit