diff --git a/MANIFEST.in b/MANIFEST.in index 0a9960553..2082b46c3 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,9 +1,3 @@ -include ibllib/atlas/allen_structure_tree.csv -include ibllib/atlas/franklin_paxinos_structure_tree.csv -include ibllib/atlas/beryl.npy -include ibllib/atlas/cosmos.npy -include ibllib/atlas/swanson.npy -include ibllib/atlas/mappings.pqt include ibllib/io/extractors/extractor_types.json include ibllib/io/extractors/task_extractor_map.json include brainbox/tests/wheel_test.p diff --git a/brainbox/atlas.py b/brainbox/atlas.py index 3c10ebdf1..8c28feb89 100644 --- a/brainbox/atlas.py +++ b/brainbox/atlas.py @@ -7,7 +7,7 @@ import numpy as np import seaborn as sns import matplotlib.pyplot as plt -from ibllib import atlas +from iblatlas import atlas def _label2values(imlabel, fill_values, ba): diff --git a/brainbox/ephys_plots.py b/brainbox/ephys_plots.py index 56ec34b40..66075cd26 100644 --- a/brainbox/ephys_plots.py +++ b/brainbox/ephys_plots.py @@ -5,7 +5,7 @@ plot_image, plot_probe, plot_scatter, arrange_channels2banks) from brainbox.processing import compute_cluster_average from iblutil.numerical import bincount2D -from ibllib.atlas.regions import BrainRegions +from iblatlas.regions import BrainRegions def image_lfp_spectrum_plot(lfp_power, lfp_freq, chn_coords=None, chn_inds=None, freq_range=(0, 300), diff --git a/brainbox/examples/best_available_channels_from_insertion_id.py b/brainbox/examples/best_available_channels_from_insertion_id.py index 390e9d41f..023aca30c 100644 --- a/brainbox/examples/best_available_channels_from_insertion_id.py +++ b/brainbox/examples/best_available_channels_from_insertion_id.py @@ -1,6 +1,6 @@ from one.api import ONE -from ibllib.atlas import atlas +from iblatlas import atlas from brainbox.io.one import load_channels_from_insertion pid = "8413c5c6-b42b-4ec6-b751-881a54413628" diff --git a/brainbox/examples/docs_load_spike_sorting.py b/brainbox/examples/docs_load_spike_sorting.py index 36611c79c..8a94ba0ad 100644 --- a/brainbox/examples/docs_load_spike_sorting.py +++ b/brainbox/examples/docs_load_spike_sorting.py @@ -14,7 +14,7 @@ """ from one.api import ONE -from ibllib.atlas import AllenAtlas +from iblatlas.atlas import AllenAtlas from brainbox.io.one import SpikeSortingLoader diff --git a/brainbox/examples/plot_atlas_color_values.py b/brainbox/examples/plot_atlas_color_values.py index 61758aa7f..a62d7a31d 100755 --- a/brainbox/examples/plot_atlas_color_values.py +++ b/brainbox/examples/plot_atlas_color_values.py @@ -1,6 +1,6 @@ import numpy as np import matplotlib.pyplot as plt -from ibllib import atlas +from iblatlas import atlas from brainbox.atlas import plot_atlas diff --git a/brainbox/io/one.py b/brainbox/io/one.py index b63faebbe..73870391c 100644 --- a/brainbox/io/one.py +++ b/brainbox/io/one.py @@ -21,7 +21,8 @@ from iblutil.util import Bunch from ibllib.io.extractors.training_wheel import extract_wheel_moves, extract_first_movement_times -from ibllib.atlas import atlas, AllenAtlas, BrainRegions +from iblatlas.atlas import AllenAtlas, BrainRegions +from iblatlas import atlas from ibllib.pipes import histology from ibllib.pipes.ephys_alignment import EphysAlignment from ibllib.plots import vertical_lines @@ -243,7 +244,7 @@ def channel_locations_interpolation(channels_aligned, channels=None, brain_regio 'x', 'y', 'z', 'acronym', 'axial_um' those are the guide for the interpolation :param channels: Bunch or dictionary of aligned channels containing at least keys 'localCoordinates' - :param brain_regions: None (default) or ibllib.atlas.BrainRegions object + :param brain_regions: None (default) or iblatlas.regions.BrainRegions object if None will return a dict with keys 'localCoordinates', 'mlapdv', 'brainLocationIds_ccf_2017 if a brain region object is provided, outputts a dict with keys 'x', 'y', 'z', 'acronym', 'atlas_id', 'axial_um', 'lateral_um' @@ -372,7 +373,7 @@ def load_channel_locations(eid, probe=None, one=None, aligned=False, brain_atlas An instance of ONE (shouldn't be in 'local' mode) aligned : bool Whether to get the latest user aligned channel when not resolved or use histology track - brain_atlas : ibllib.atlas.BrainAtlas + brain_atlas : iblatlas.BrainAtlas Brain atlas object (default: Allen atlas) Returns ------- @@ -417,7 +418,7 @@ def load_spike_sorting_fast(eid, one=None, probe=None, dataset_types=None, spike :param dataset_types: additional spikes/clusters objects to add to the standard default list :param spike_sorter: name of the spike sorting you want to load (None for default) :param collection: name of the spike sorting collection to load - exclusive with spike sorter name ex: "alf/probe00" - :param brain_regions: ibllib.atlas.regions.BrainRegions object - will label acronyms if provided + :param brain_regions: iblatlas.regions.BrainRegions object - will label acronyms if provided :param nested: if a single probe is required, do not output a dictionary with the probe name as key :param return_collection: (False) if True, will return the collection used to load :return: spikes, clusters, channels (dict of bunch, 1 bunch per probe) @@ -458,7 +459,7 @@ def load_spike_sorting(eid, one=None, probe=None, dataset_types=None, spike_sort :param probe: name of probe to load in, if not given all probes for session will be loaded :param dataset_types: additional spikes/clusters objects to add to the standard default list :param spike_sorter: name of the spike sorting you want to load (None for default) - :param brain_regions: ibllib.atlas.regions.BrainRegions object - will label acronyms if provided + :param brain_regions: iblatlas.regions.BrainRegions object - will label acronyms if provided :param return_collection:(bool - False) if True, returns the collection for loading the data :return: spikes, clusters (dict of bunch, 1 bunch per probe) """ @@ -497,7 +498,7 @@ def load_spike_sorting_with_channel(eid, one=None, probe=None, aligned=False, da spike_sorter : str Name of the spike sorting you want to load (None for default which is pykilosort if it's available otherwise the default MATLAB kilosort) - brain_atlas : ibllib.atlas.BrainAtlas + brain_atlas : iblatlas.atlas.BrainAtlas Brain atlas object (default: Allen atlas) return_collection: bool Returns an extra argument with the collection chosen @@ -1053,10 +1054,16 @@ def samples2times(self, values, direction='forward'): def pid2ref(self): return f"{self.one.eid2ref(self.eid, as_dict=False)}_{self.pname}" - def raster(self, spikes, channels, save_dir=None, br=None, label='raster', time_series=None): + def raster(self, spikes, channels, save_dir=None, br=None, label='raster', time_series=None, **kwargs): """ - :param spikes: spikes dictionary - :param save_dir: optional if specified + :param spikes: spikes dictionary or Bunch + :param channels: channels dictionary or Bunch. + :param save_dir: if specified save to this directory as "{pid}_{probe}_{label}.png". + Otherwise, plot. + :param br: brain regions object (optional) + :param label: label for saved image (optional, default="raster") + :param time_series: timeseries dictionary for behavioral event times (optional) + :param **kwargs: kwargs passed to `driftmap()` (optional) :return: """ br = br or BrainRegions() @@ -1065,7 +1072,10 @@ def raster(self, spikes, channels, save_dir=None, br=None, label='raster', time_ 'width_ratios': [.95, .05], 'height_ratios': [.1, .9]}, figsize=(16, 9), sharex='col') axs[0, 1].set_axis_off() # axs[0, 0].set_xticks([]) - brainbox.plot.driftmap(spikes['times'], spikes['depths'], t_bin=0.007, d_bin=10, vmax=0.5, ax=axs[1, 0]) + if kwargs is None: + # set default raster plot parameters + kwargs = {"t_bin": 0.007, "d_bin": 10, "vmax": 0.5} + brainbox.plot.driftmap(spikes['times'], spikes['depths'], ax=axs[1, 0], **kwargs) title_str = f"{self.pid2ref}, {self.pid} \n" \ f"{spikes['clusters'].size:_} spikes, {np.unique(spikes['clusters']).size:_} clusters" axs[0, 0].title.set_text(title_str) diff --git a/examples/atlas/Working with ibllib atlas.ipynb b/examples/atlas/Working with ibllib atlas.ipynb deleted file mode 100644 index 9e435aea7..000000000 --- a/examples/atlas/Working with ibllib atlas.ipynb +++ /dev/null @@ -1,365 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "b767b213", - "metadata": {}, - "source": [ - "# Working with IBL atlas object" - ] - }, - { - "cell_type": "markdown", - "id": "bba98311", - "metadata": {}, - "source": [ - "## Getting started" - ] - }, - { - "cell_type": "markdown", - "id": "461b8f34", - "metadata": {}, - "source": [ - "The Allen atlas image and annotation volumes can be accessed using the `ibllib.atlas.AllenAtlas` class. Upon instantiating the class for the first time, the relevant files will be downloaded from the Allen database." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "df873343", - "metadata": {}, - "outputs": [], - "source": [ - "from ibllib.atlas import AllenAtlas\n", - "\n", - "res = 25 # resolution of Atlas, available resolutions are 10, 25 (default) and 50\n", - "brain_atlas = AllenAtlas(res_um=res)" - ] - }, - { - "cell_type": "markdown", - "id": "95a8e4db", - "metadata": {}, - "source": [ - "## Exploring the volumes" - ] - }, - { - "cell_type": "markdown", - "id": "12f16b38", - "metadata": {}, - "source": [ - "The brain_atlas class contains two volumes, the dwi image volume and the annotation label volume" - ] - }, - { - "cell_type": "markdown", - "id": "5f34f56c", - "metadata": {}, - "source": [ - "### 1. Image Volume \n", - "Allen atlas dwi average template" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "769b4fd4", - "metadata": {}, - "outputs": [], - "source": [ - "# Access the image volume\n", - "im = brain_atlas.image\n", - "\n", - "# Explore the size of the image volume (ap, ml, dv)\n", - "print(f'Shape of image volume: {im.shape}')\n", - "\n", - "# Plot a coronal slice at ap = -1000um\n", - "ap = -1000 / 1e6 # input must be in metres\n", - "ax = brain_atlas.plot_cslice(ap, volume='image')\n" - ] - }, - { - "cell_type": "markdown", - "id": "1c46789b", - "metadata": {}, - "source": [ - "### Label Volume\n" - ] - }, - { - "cell_type": "markdown", - "id": "72bea21a", - "metadata": {}, - "source": [ - "The label volume contains information about which brain region each voxel in the volume belongs to." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "ff7cb654", - "metadata": {}, - "outputs": [], - "source": [ - "# Access the image volume\n", - "lab = brain_atlas.label\n", - "\n", - "# Explore the size of the image volume (ap, ml, dv)\n", - "print(f'Shape of label volume: {lab.shape}')\n", - "\n", - "# Plot a coronal slice at ap = -1000um\n", - "ap = -1000 / 1e6 # input must be in metres\n", - "ax = brain_atlas.plot_cslice(ap, volume='annotation')" - ] - }, - { - "cell_type": "markdown", - "id": "8bd69066", - "metadata": {}, - "source": [ - "The label volume used in the IBL AllenAtlas class differs from the Allen annotation volume in two ways.\n", - "- Each voxel has information about the index of the Allen region rather than the Allen atlas id\n", - "- The volume has been lateralised to differentiate between the left and right hemisphere\n", - "\n", - "To understand this better let's explore the BrainRegions class that contains information about the Allen structure tree." - ] - }, - { - "cell_type": "markdown", - "id": "04f601ed", - "metadata": {}, - "source": [ - "## Exploring brain regions" - ] - }, - { - "cell_type": "markdown", - "id": "a1802136", - "metadata": {}, - "source": [ - "The Allen brain region structure tree can be accessed through the class `ibllib.atlas.regions.BrainRegions`. " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "9c2d097f", - "metadata": {}, - "outputs": [], - "source": [ - "from ibllib.atlas.regions import BrainRegions\n", - "\n", - "brain_regions = BrainRegions()\n", - "\n", - "# Alternatively if you already have the AllenAtlas instantiated you can access it as an attribute\n", - "brain_regions = brain_atlas.regions" - ] - }, - { - "cell_type": "markdown", - "id": "6cf9ab47", - "metadata": {}, - "source": [ - "The brain_regions class has the following data attributes" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "1d078160", - "metadata": {}, - "outputs": [], - "source": [ - "brain_regions.__annotations__" - ] - }, - { - "cell_type": "markdown", - "id": "44339559", - "metadata": {}, - "source": [ - "These attributes are the same as the Allen structure tree and for example `id` corresponds to the Allen atlas id while the `name` represents the full anatomical brain region name." - ] - }, - { - "cell_type": "markdown", - "id": "fbe04558", - "metadata": {}, - "source": [ - "The index refers to the index in each of these attribute arrays. For example, index 1 corresponds to the `root` brain region with an atlas id of 977. " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "0c1fdf7c", - "metadata": {}, - "outputs": [], - "source": [ - "index = 1\n", - "print(brain_regions.id[index])\n", - "print(brain_regions.acronym[index])" - ] - }, - { - "cell_type": "markdown", - "id": "fd8e542c", - "metadata": {}, - "source": [ - "Alternatively, index 1000 corresponds to `PPYd` with an atlas id of 185" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "cf56d8d9", - "metadata": {}, - "outputs": [], - "source": [ - "index = 1000\n", - "print(brain_regions.id[index])\n", - "print(brain_regions.acronym[index])" - ] - }, - { - "cell_type": "markdown", - "id": "4c3acedd", - "metadata": {}, - "source": [ - "In the label volume we described above, it is these indices that we are referring to. Therefore, we know all voxels in the volume with a value of 0 will be voxels that lie in `root`, while the voxels that have a value of 1000 will be in `PPYd`" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "b607f170", - "metadata": {}, - "outputs": [], - "source": [ - "import numpy as np\n", - "root_voxels = np.where(brain_atlas.label == 1)\n", - "ppyd_voxels = np.where(brain_atlas.label == 1000)" - ] - }, - { - "cell_type": "markdown", - "id": "474bb26b", - "metadata": {}, - "source": [ - "An additional nuance is the lateralisation. If you compare the size of the brain_regions data class to the Allen structure tree. You will see that it has double the number of columms. This is because the IBL brain regions encodes both the left and right hemisphere. We can understand this better by exploring the `brain_regions.id` and `brain_regions.name` at the indices where it transitions between hemispheres." - ] - }, - { - "cell_type": "markdown", - "id": "861fef87", - "metadata": {}, - "source": [ - "The `brain_region.id` go from positive Allen atlas ids (right hemisphere) to negative Allen atlas ids (left hemisphere)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "31cceb95", - "metadata": {}, - "outputs": [], - "source": [ - "print(brain_regions.id[1320:1340])" - ] - }, - { - "cell_type": "markdown", - "id": "e2221959", - "metadata": {}, - "source": [ - "The `brain_region.name` go from right to left hemisphere descriptions" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "97079539", - "metadata": {}, - "outputs": [], - "source": [ - "print(brain_regions.name[1320:1340])" - ] - }, - { - "cell_type": "markdown", - "id": "7f35aa26", - "metadata": {}, - "source": [ - "In the label volume, we can therefore differentiate between left and right hemisphere voxels for the same brain region. First we will use a method in the brain_region class to find out the index of left and right `CA1`." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "4c93c1a0", - "metadata": {}, - "outputs": [], - "source": [ - "brain_regions.acronym2index('CA1')" - ] - }, - { - "cell_type": "markdown", - "id": "d8bb5fc2", - "metadata": {}, - "source": [ - "The method `acronym2index` returns a tuple, with the first value being a list of acronyms passed in and the second value giving the indices in the array that correspond to the left and right hemispheres for this region. We can now use these indices to search in the label volume" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "0680ca09", - "metadata": {}, - "outputs": [], - "source": [ - "CA1_right = np.where(brain_atlas.label == 458)\n", - "CA1_left = np.where(brain_atlas.label == 1785)" - ] - }, - { - "cell_type": "markdown", - "id": "42cc166b", - "metadata": {}, - "source": [ - "## Coordinate systems" - ] - }, - { - "cell_type": "markdown", - "id": "7ffcd53b", - "metadata": {}, - "source": [ - "The voxles can be translated to 3D space. In the IBL all xyz coordinates" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.16" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} \ No newline at end of file diff --git a/examples/atlas/atlas_circular_pyramidal_flatmap.ipynb b/examples/atlas/atlas_circular_pyramidal_flatmap.ipynb deleted file mode 100644 index cb84f8019..000000000 --- a/examples/atlas/atlas_circular_pyramidal_flatmap.ipynb +++ /dev/null @@ -1,248 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "f199bec2", - "metadata": {}, - "source": [ - "# Plotting brain region values on circular flatmap" - ] - }, - { - "cell_type": "markdown", - "id": "94c08e66", - "metadata": {}, - "source": [ - "This example walks through various ways to overlay brain region values on a circular flatmap" - ] - }, - { - "cell_type": "markdown", - "id": "17fd07ec", - "metadata": {}, - "source": [ - "## The circular flatmap" - ] - }, - { - "cell_type": "markdown", - "id": "3ca88864", - "metadata": {}, - "source": [ - "The circular flatmap is obtained by sampling the volume using concentric circles through the brain." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "1178246b", - "metadata": {}, - "outputs": [], - "source": [ - "from ibllib.atlas import FlatMap\n", - "flmap_cr = FlatMap(flatmap='circles')" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "490614c3", - "metadata": {}, - "outputs": [], - "source": [ - "# Display the concentric circles used in flatmap\n", - "ax = flmap_cr.plot_top(volume='image')\n", - "ax.plot(flmap_cr.ml_scale * 1e6, flmap_cr.ap_scale * 1e6)" - ] - }, - { - "cell_type": "markdown", - "id": "135dd187", - "metadata": {}, - "source": [ - "This results in a flatmap that can be displayed in the following way" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "8b8c4223", - "metadata": {}, - "outputs": [], - "source": [ - "import matplotlib.pyplot as plt\n", - "fig, ax = plt.subplots(figsize=(18,4))\n", - "flmap_cr.plot_flatmap(ax)" - ] - }, - { - "cell_type": "markdown", - "id": "ec15f88c", - "metadata": {}, - "source": [ - "It is also possible to display this flatmap such that each circle is stacked on top of eachother. For this, the **pyramid** flatmap should be used" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "7461e3f8", - "metadata": {}, - "outputs": [], - "source": [ - "# Instantiate flatmap with circles arranged vetically on top of eachother\n", - "flmap_py = FlatMap(flatmap='pyramid')" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "1f78b2ab", - "metadata": {}, - "outputs": [], - "source": [ - "fig, ax = plt.subplots(figsize=(8, 8))\n", - "flmap_py.plot_flatmap(ax=ax)" - ] - }, - { - "cell_type": "markdown", - "id": "e7af738a", - "metadata": {}, - "source": [ - "## Data preparation" - ] - }, - { - "cell_type": "markdown", - "id": "40fa09d0", - "metadata": {}, - "source": [ - "In order to plot brain regions values on the flatmap an array of acronyms and an array of values corresponding to each acronym must be provided. A detailed overview of how to prepare your data can be found [here](https://int-brain-lab.github.io/iblenv/notebooks_external/atlas_plotting_scalar_on_slice.html#Data-preparation)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "20a1db83", - "metadata": {}, - "outputs": [], - "source": [ - "import numpy as np\n", - "# prepare array of acronyms\n", - "acronyms = np.array(['VPM', 'PO', 'LP', 'CA1', 'DG-mo', 'VISa5', 'SSs5'])\n", - "# assign data to each acronym\n", - "values = np.arange(acronyms.size)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "e6ae51d0", - "metadata": {}, - "outputs": [], - "source": [ - "from ibllib.atlas.regions import BrainRegions\n", - "br = BrainRegions()\n", - "# prepare array of acronyms with beryl mapping\n", - "acronyms_beryl = np.unique(br.acronym2acronym(acronyms, mapping='Beryl'))\n", - "values_beryl = np.arange(acronyms_beryl.size)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3724b968", - "metadata": {}, - "outputs": [], - "source": [ - "# prepare different values for left and right hemipshere for Beryl acronyms\n", - "values_beryl_lh = np.random.randint(0, 10, acronyms_beryl.size)\n", - "values_beryl_rh = np.random.randint(0, 10, acronyms_beryl.size)\n", - "values_beryl_lr = np.c_[values_beryl_lh, values_beryl_rh]" - ] - }, - { - "cell_type": "markdown", - "id": "74fe528a", - "metadata": {}, - "source": [ - "## Examples" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "dfa2d623", - "metadata": {}, - "outputs": [], - "source": [ - "from ibllib.atlas.plots import plot_scalar_on_flatmap\n", - "# Plot region values on the left hemisphere of circle flatmap overlaid on brain region boundaries using Allen mapping\n", - "fig, ax = plt.subplots(figsize=(18,4))\n", - "fig, ax = plot_scalar_on_flatmap(acronyms, values, hemisphere='left', mapping='Allen', flmap_atlas=flmap_cr, ax=ax)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "cc78a1c7", - "metadata": {}, - "outputs": [], - "source": [ - "# Plot region values on the both hemispheres of circle flatmap overlaid on the dwi Allen image using Beryl mapping\n", - "fig, ax = plt.subplots(figsize=(18,4))\n", - "fig, ax = plot_scalar_on_flatmap(acronyms_beryl, values_beryl, hemisphere='both', mapping='Beryl', background='image', \n", - " cmap='Reds', flmap_atlas=flmap_cr, ax=ax)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "37bf7bd8", - "metadata": {}, - "outputs": [], - "source": [ - "# Plot region values on the right hemisphere of pyramidal flatmap overlaid on the dwi Allen image using Allen mapping\n", - "fig, ax = plt.subplots(figsize=(8,8))\n", - "fig, ax = plot_scalar_on_flatmap(acronyms, values, hemisphere='right', mapping='Allen', background='image', \n", - " cmap='Reds', flmap_atlas=flmap_py, ax=ax)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "28f7f30c", - "metadata": {}, - "outputs": [], - "source": [ - "# Plot two column region values on the both hemispheres of pyramidal flatmap overlaid on brain region boundaries \n", - "# using Beryl mapping\n", - "fig, ax = plt.subplots(figsize=(8,8))\n", - "fig, ax = plot_scalar_on_flatmap(acronyms_beryl, values_beryl_lr, hemisphere='both', mapping='Beryl', \n", - " background='boundary', cmap='Blues', flmap_atlas=flmap_py, ax=ax)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python [conda env:iblenv] *", - "language": "python", - "name": "conda-env-iblenv-py" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.7" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/examples/atlas/atlas_dorsal_cortex_flatmap.ipynb b/examples/atlas/atlas_dorsal_cortex_flatmap.ipynb deleted file mode 100644 index 5ac2d0f69..000000000 --- a/examples/atlas/atlas_dorsal_cortex_flatmap.ipynb +++ /dev/null @@ -1,203 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "fdc19afd", - "metadata": {}, - "source": [ - "# Plotting brain region values on cortical flatmap" - ] - }, - { - "cell_type": "markdown", - "id": "328a2d74", - "metadata": {}, - "source": [ - "This example walks through various ways to overlay brain region values on a cortical flatmap" - ] - }, - { - "cell_type": "markdown", - "id": "bd3eecf6", - "metadata": {}, - "source": [ - "## The dorsal cortex flatmap" - ] - }, - { - "cell_type": "markdown", - "id": "8354c14e", - "metadata": {}, - "source": [ - "The **dorsal_cortex** flatmap comprises a flattened volume of cortex up to depth 2000 um" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "33fa3ec7", - "metadata": {}, - "outputs": [], - "source": [ - "from ibllib.atlas import FlatMap\n", - "\n", - "res = 25\n", - "flmap = FlatMap(flatmap='dorsal_cortex', res_um=res)\n", - "\n", - "# Plot flatmap at depth = 0 \n", - "flmap.plot_flatmap(int(0 / res))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "dfdd67e8", - "metadata": {}, - "outputs": [], - "source": [ - "# Plot flatmap at depth = 800 um\n", - "flmap.plot_flatmap(int(800 / res))" - ] - }, - { - "cell_type": "markdown", - "id": "6e4a49d8", - "metadata": {}, - "source": [ - "## Data preparation" - ] - }, - { - "cell_type": "markdown", - "id": "919372e5", - "metadata": {}, - "source": [ - "In order to plot brain regions values on the flatmap an array of acronyms and an array of values corresponding to each acronym must be provided. A detailed overview of how to prepare your data can be found [here](https://int-brain-lab.github.io/iblenv/notebooks_external/atlas_plotting_scalar_on_slice.html#Data-preparation)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "0de1b18b", - "metadata": {}, - "outputs": [], - "source": [ - "import numpy as np\n", - "# prepare array of acronyms\n", - "acronyms = np.array(['ACAd1', 'ACAv1', 'AId1', 'AIp1', 'AIv1', 'AUDd1', 'AUDp1','AUDpo1', 'AUDv1', \n", - " 'SSp-m1', 'SSp-n1', 'SSp-tr1', 'SSp-ul1','SSp-un1', 'SSs1', \n", - " 'VISC1', 'VISa1', 'VISal1', 'VISam1', 'VISl1', 'VISli1', 'VISp1', 'VISp2/3', 'VISpl1', 'VISpm1', \n", - " 'SSp-n2/3', 'SSp-tr2/3', 'SSp-ul2/3', 'SSp-un2/3', 'SSs2/3',\n", - " 'VISC2/3', 'VISa2/3', 'VISal2/3', 'VISam2/3', 'VISl2/3','VISli2/3', 'VISp2/3', 'VISpl1', 'VISpl2/3'])\n", - "# assign data to each acronym\n", - "values = np.arange(acronyms.size)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "b9eb9b3a", - "metadata": {}, - "outputs": [], - "source": [ - "from ibllib.atlas.regions import BrainRegions\n", - "br = BrainRegions()\n", - "# prepare array of acronyms with beryl mapping\n", - "acronyms_beryl = np.unique(br.acronym2acronym(acronyms, mapping='Beryl'))\n", - "values_beryl = np.arange(acronyms_beryl.size)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "251385a1", - "metadata": {}, - "outputs": [], - "source": [ - "# prepare different values for left and right hemipshere for Beryl acronyms\n", - "values_beryl_lh = np.random.randint(0, 10, acronyms_beryl.size)\n", - "values_beryl_rh = np.random.randint(0, 10, acronyms_beryl.size)\n", - "values_beryl_lr = np.c_[values_beryl_lh, values_beryl_rh]" - ] - }, - { - "cell_type": "markdown", - "id": "6ad92aca", - "metadata": {}, - "source": [ - "## Examples" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "e26dcf4c", - "metadata": {}, - "outputs": [], - "source": [ - "from ibllib.atlas.plots import plot_scalar_on_flatmap\n", - "\n", - "# Plot region values on the left hemisphere at depth=0um overlaid on boundary image using Allen mapping\n", - "fig, ax = plot_scalar_on_flatmap(acronyms, values, depth=0, mapping='Allen', hemisphere='left', background='boundary',\n", - " cmap='viridis', flmap_atlas=flmap)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "de4a63a0", - "metadata": {}, - "outputs": [], - "source": [ - "# Plot region values on the right hemisphere at depth=200um overlaid on boundary image using Allen mapping and show cbar\n", - "fig, ax, cbar = plot_scalar_on_flatmap(acronyms, values, depth=200, mapping='Allen', hemisphere='right', background='boundary',\n", - " cmap='Reds', flmap_atlas=flmap, show_cbar=True)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "da848f40", - "metadata": {}, - "outputs": [], - "source": [ - "# Plot single column region values on the both hemisphere at depth=800um overlaid dwi Allen image using Beryl mapping\n", - "fig, ax = plot_scalar_on_flatmap(acronyms_beryl, values_beryl, depth=800, mapping='Beryl', hemisphere='both', \n", - " background='image', cmap='plasma', flmap_atlas=flmap)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "322f3a04", - "metadata": {}, - "outputs": [], - "source": [ - "# Plot two column region values on the both hemispheres at depth=0um on boundary image using Allen mapping\n", - "fig, ax, cbar = plot_scalar_on_flatmap(acronyms_beryl, values_beryl_lr, depth=0, mapping='Beryl', hemisphere='both', \n", - " background='boundary', cmap='Blues', flmap_atlas=flmap, show_cbar=True)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python [conda env:iblenv] *", - "language": "python", - "name": "conda-env-iblenv-py" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.7" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/examples/atlas/atlas_mapping.ipynb b/examples/atlas/atlas_mapping.ipynb deleted file mode 100644 index 41a050f8a..000000000 --- a/examples/atlas/atlas_mapping.ipynb +++ /dev/null @@ -1,650 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "b8ea3853", - "metadata": {}, - "source": [ - "# Atlas mapping" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2dae63ed", - "metadata": {}, - "outputs": [], - "source": [ - "# import brain atlas and brain regions objects\n", - "from ibllib.atlas import AllenAtlas\n", - "from ibllib.atlas.regions import BrainRegions\n", - "ba = AllenAtlas()\n", - "br = BrainRegions() # br is also an attribute of ba so could to br = ba.regions" - ] - }, - { - "cell_type": "markdown", - "id": "279114b7", - "metadata": {}, - "source": [ - "## Available Mappings\n", - "Four mappings are currently available within the IBL, these are:\n", - "\n", - "1. Allen Atlas - total of 1328 annotation regions provided by Allen Atlas\n", - "2. Beryl Atlas - total of 308 annotation regions\n", - "3. Cosmos Atlas - total of 12 annotation regions\n", - "4. Swanson Atlas - total of 319 annotation regions (*)\n", - "\n", - "(*) Note: The dedicated mapping for plotting on Swanson flatmap is explained in this [webpage](https://int-brain-lab.github.io/iblenv/notebooks_external/atlas_swanson_flatmap.html)." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3c9d922c", - "metadata": { - "pycharm": { - "is_executing": true - } - }, - "outputs": [], - "source": [ - "# create figure\n", - "import matplotlib.pyplot as plt\n", - "fig, axs = plt.subplots(1, 3, figsize=(15, 18))\n", - "\n", - "# plot coronal slice at ap = -2000 um\n", - "ap = -2000 / 1e6\n", - "# Allen mapping\n", - "ba.plot_cslice(ap, volume='annotation', mapping='Allen', ax=axs[0])\n", - "_ = axs[0].set_title('Allen')\n", - "# Beryl mapping\n", - "ba.plot_cslice(ap, volume='annotation', mapping='Beryl', ax=axs[1])\n", - "_ = axs[1].set_title('Beryl')\n", - "# Cosmos mapping\n", - "ba.plot_cslice(ap, volume='annotation', mapping='Cosmos', ax=axs[2])\n", - "_ = axs[2].set_title('Cosmos')" - ] - }, - { - "cell_type": "markdown", - "source": [ - "The `br.mappings` contains the `index` of the region for a given mapping.\n", - "You can use these indices to find for example the acronyms contained in Cosmos:" - ], - "metadata": { - "collapsed": false - } - }, - { - "cell_type": "code", - "execution_count": null, - "outputs": [], - "source": [ - "import numpy as np\n", - "cosmos_indices = np.unique(br.mappings['Cosmos'])\n", - "br.acronym[cosmos_indices]" - ], - "metadata": { - "collapsed": false, - "pycharm": { - "name": "#%%\n" - } - } - }, - { - "cell_type": "markdown", - "source": [ - "You can check that all brain regions within these 4 mappings are contained in the Allen parcellation, for example for Beryl:" - ], - "metadata": { - "collapsed": false - } - }, - { - "cell_type": "code", - "execution_count": null, - "outputs": [], - "source": [ - "set(np.unique(br.mappings['Beryl'])).difference(set(np.unique(br.mappings['Allen'])))\n", - "# Expect to return an empty set" - ], - "metadata": { - "collapsed": false, - "pycharm": { - "name": "#%%\n" - } - } - }, - { - "cell_type": "markdown", - "id": "a8460f28", - "metadata": {}, - "source": [ - "## Understanding the mappings\n", - "The mappings store the highest level annotation (or parent node) that should be considered. Any regions that are children of these nodes are assigned the same annotation as their parent. \n", - "\n", - "For example, consider the region with the acronym **MDm** (Mediodorsal nucleus of the thalamus, medial part). Firstly, to navigate ourselves, we can find the acronyms of all the ancestors to this region," - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "08393219", - "metadata": {}, - "outputs": [], - "source": [ - "# First find the atlas_id associated with acronym MDm\n", - "atlas_id = br.acronym2id('MDm')\n", - "# Then find the acronyms of the ancestors of this region\n", - "print(br.ancestors(ids=atlas_id)['acronym'])" - ] - }, - { - "cell_type": "markdown", - "id": "9081a1b3", - "metadata": {}, - "source": [ - "We can then take a look at what acronym this region will be assigned under the different mappings. Under the Allen mapping we expect it to be assigned the same acronym as this is the lowest level region parcelation that we use." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "ef838f27", - "metadata": {}, - "outputs": [], - "source": [ - "print(br.acronym2acronym('MDm', mapping='Allen'))" - ] - }, - { - "cell_type": "markdown", - "id": "2cc3d224", - "metadata": {}, - "source": [ - "Under the Beryl mapping, **MDm** is given the acronym of it's parent, **MD**" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3aa34ba5", - "metadata": {}, - "outputs": [], - "source": [ - "print(br.acronym2acronym('MDm', mapping='Beryl'))" - ] - }, - { - "cell_type": "markdown", - "id": "f574d115", - "metadata": {}, - "source": [ - "Under the Cosmos mapping, it is assigned to the region **TH**" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "f74fa398", - "metadata": {}, - "outputs": [], - "source": [ - "print(br.acronym2acronym('MDm', mapping='Cosmos'))" - ] - }, - { - "cell_type": "markdown", - "source": [ - "Under the Swanson mapping, it is assigned to the region **MD**" - ], - "metadata": { - "collapsed": false - } - }, - { - "cell_type": "code", - "execution_count": null, - "outputs": [], - "source": [ - "print(br.acronym2acronym('MDm', mapping='Swanson'))" - ], - "metadata": { - "collapsed": false, - "pycharm": { - "name": "#%%\n" - } - } - }, - { - "cell_type": "markdown", - "id": "23ed91cb", - "metadata": {}, - "source": [ - "Therefore any clusters that are assigned an acronym **MDm** in the Allen mapping, will instead be assigned to the region **MD** in the Beryl or Swanson mapping, and **TH** in the Cosmos mapping." - ] - }, - { - "cell_type": "markdown", - "id": "509bcee9", - "metadata": {}, - "source": [ - "If a region is above (i.e. parent) of what is included in a mapping, the value for this region is set to root. For example **TH** is not included in the Beryl nor Swanson mapping" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "76962ae7", - "metadata": {}, - "outputs": [], - "source": [ - "print(br.acronym2acronym('TH', mapping='Beryl'))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "outputs": [], - "source": [ - "print(br.acronym2acronym('TH', mapping='Swanson'))" - ], - "metadata": { - "collapsed": false, - "pycharm": { - "name": "#%%\n" - } - } - }, - { - "cell_type": "markdown", - "source": [ - "However, a child region that is not included in the mapping will be returned as its closest parent in the mapping. For example, VISa is not included in Swanson, but its parent PLTp is:" - ], - "metadata": { - "collapsed": false - } - }, - { - "cell_type": "code", - "execution_count": null, - "outputs": [], - "source": [ - "print(br.acronym2acronym('VISa', mapping='Swanson'))" - ], - "metadata": { - "collapsed": false, - "pycharm": { - "name": "#%%\n" - } - } - }, - { - "cell_type": "markdown", - "source": [], - "metadata": { - "collapsed": false - } - }, - { - "cell_type": "markdown", - "id": "607f1e1b", - "metadata": {}, - "source": [ - "## Lateralisation\n", - "Lateralised versions of each of the three mappings are also available. This allow regions on the left and right hemispheres of the brain to be differentiated. \n", - "\n", - "The convention used is that regions in the left hemisphere have negative atlas ids whereas those on the right hemisphere have positive atlas ids.\n", - "\n", - "For the non lateralised mappings the atlas id is always positive regardless of whether the region lies in the left or right hemisphere. Aggregating values over regions could result, therefore, in values from different hemispheres being considered together.\n", - "\n", - "One thing to be aware of, is that while lateralised mappings return distinct atlas ids for the left and right hemispheres, the acronyms returned are not lateralised. \n", - "\n", - "For example consider finding the atlas id when mapping the acronym **MDm** onto the Beryl atlas. When specifying the left hemisphere, the returned atlas id is negative" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "30f5e956", - "metadata": {}, - "outputs": [], - "source": [ - "# Left hemisphere gives negative atlas id\n", - "print(br.acronym2id('MDm', mapping='Beryl-lr', hemisphere='left'))" - ] - }, - { - "cell_type": "markdown", - "id": "aa94a06e", - "metadata": {}, - "source": [ - "When specifying the right hemisphere, the returned atlas id is positive" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "9125fc7c", - "metadata": {}, - "outputs": [], - "source": [ - "# Left hemisphere gives negative atlas id\n", - "print(br.acronym2id('MDm', mapping='Beryl-lr', hemisphere='right'))" - ] - }, - { - "cell_type": "markdown", - "id": "be5e0654", - "metadata": {}, - "source": [ - "However, when converting from atlas id to acronym, regardless of whether we specify a negative (left hemisphere) or positive (right hemisphere) value, the returned acronym is always the same" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "aaa8b3f5", - "metadata": {}, - "outputs": [], - "source": [ - "print(br.id2acronym(-362, mapping='Beryl-lr'))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "77291362", - "metadata": {}, - "outputs": [], - "source": [ - "print(br.id2acronym(362, mapping='Beryl-lr'))" - ] - }, - { - "cell_type": "markdown", - "id": "6dc83925", - "metadata": {}, - "source": [ - "## How to map your data" - ] - }, - { - "cell_type": "markdown", - "id": "9606cc0b", - "metadata": {}, - "source": [ - "### Mapping from mlapdv coordinates\n", - "The recommended and most versatile way to find the locations of clusters under different mappings is to use the mlapdv coordinates of the clusters. Given a probe insertion id, the clusters object can be loaded in using the following code" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "bc33122d", - "metadata": {}, - "outputs": [], - "source": [ - "from brainbox.io.one import SpikeSortingLoader\n", - "from one.api import ONE\n", - "one = ONE()\n", - "\n", - "pid = 'da8dfec1-d265-44e8-84ce-6ae9c109b8bd'\n", - "\n", - "sl = SpikeSortingLoader(pid=pid, one=one, atlas=ba)\n", - "spikes, clusters, channels = sl.load_spike_sorting()\n", - "clusters = sl.merge_clusters(spikes, clusters, channels)" - ] - }, - { - "cell_type": "markdown", - "id": "2a274d98", - "metadata": {}, - "source": [ - "You will find that the cluster object returned already contains an atlas_id attribute. These are the atlas ids that are obtained using the default mapping - **non lateralised Allen**. For this mapping regardless of whether the clusters lie in the right or left hemisphere the clusters are assigned positive atlas ids" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "03881f96", - "metadata": {}, - "outputs": [], - "source": [ - "print(clusters['atlas_id'][0:10])" - ] - }, - { - "cell_type": "markdown", - "id": "2170a215", - "metadata": {}, - "source": [ - "We can obtain the mlapdv coordinates of the clusters and explore in which hemisphere the clusters lie, clusters with negative x lie on the left hemisphere." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "74617c4b", - "metadata": {}, - "outputs": [], - "source": [ - "import numpy as np\n", - "mlapdv = np.c_[clusters['x'], clusters['y'], clusters['z']] # x = ml, y = ap, z = dv\n", - "clus_LH = np.sum(mlapdv[:, 0] < 0)\n", - "clus_RH = np.sum(mlapdv[:, 0] > 0)\n", - "print(f'Total clusters = {len(mlapdv)}, LH clusters = {clus_LH}, RH clusters = {clus_RH}')" - ] - }, - { - "cell_type": "markdown", - "id": "ac3c4252", - "metadata": {}, - "source": [ - "To get a better understanding of the difference between using lateralised and non-lateralised mappings, let's also make a manipulated version of the mlapdv positions where the first 5 clusters have been moved into the right hemisphere and call this `mlapdv_rh`" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "7e2c981f", - "metadata": {}, - "outputs": [], - "source": [ - "mlapdv_rh = np.copy(mlapdv)\n", - "mlapdv_rh[0:5, 0] = -1 * mlapdv_rh[0:5, 0]\n", - "clus_LH = np.sum(mlapdv_rh[:, 0] < 0)\n", - "clus_RH = np.sum(mlapdv_rh[:, 0] > 0)\n", - "print(f'Total clusters = {len(mlapdv_rh)}, LH clusters = {clus_LH}, RH clusters = {clus_RH}')" - ] - }, - { - "cell_type": "markdown", - "id": "a1323aed", - "metadata": {}, - "source": [ - "To find the locations of the clusters in the brain from the mlapdv position we can use the [get_labels](https://int-brain-lab.github.io/iblenv/_autosummary/ibllib.atlas.atlas.html#ibllib.atlas.atlas.BrainAtlas.get_labels) method in the AllenAtlas object. First let's explore the output of using a non lateralised mapping." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "d332a29a", - "metadata": {}, - "outputs": [], - "source": [ - "atlas_id_Allen = ba.get_labels(mlapdv, mapping='Allen')\n", - "atlas_id_Allen_rh = ba.get_labels(mlapdv_rh, mapping='Allen')\n", - "print(f'Non-lateralised Allen mapping ids using mlapdv: {atlas_id_Allen[0:10]}')\n", - "print(f'Non-lateralised Allen mapping ids using mlapdv_rh: {atlas_id_Allen_rh[0:10]}')" - ] - }, - { - "cell_type": "markdown", - "id": "5a2a5225", - "metadata": {}, - "source": [ - "Notice that regardless of whether the clusters lie in the left or right hemisphere the sign of the atlas id is the same. The result of this mapping is also equivalent the default output from `clusters['atlas_id']`" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "76664a2d", - "metadata": {}, - "outputs": [], - "source": [ - "np.array_equal(clusters['atlas_id'], atlas_id_Allen)" - ] - }, - { - "cell_type": "markdown", - "id": "185f9f6b", - "metadata": {}, - "source": [ - "Now if we use the lateralised mapping, we notice that the clusters in the left hemisphere have been assigned negative atlas ids whereas those in the right hemisphere have positive atlas ids" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "af431f8d", - "metadata": {}, - "outputs": [], - "source": [ - "atlas_id_Allen = ba.get_labels(mlapdv, mapping='Allen-lr')\n", - "atlas_id_Allen_rh = ba.get_labels(mlapdv_rh, mapping='Allen-lr')\n", - "print(f'Lateralised Allen mapping ids using mlapdv: {atlas_id_Allen[0:10]}')\n", - "print(f'Lateralised Allen mapping ids using mlapdv_rh: {atlas_id_Allen_rh[0:10]}')" - ] - }, - { - "cell_type": "markdown", - "id": "b0a09001", - "metadata": {}, - "source": [ - "By changing the mapping argument that we pass in, we can also easily obtain the atlas ids for the Beryl and Cosmos mappings" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "91143d6a", - "metadata": {}, - "outputs": [], - "source": [ - "atlas_id_Beryl = ba.get_labels(mlapdv_rh, mapping='Beryl-lr')\n", - "atlas_id_Cosmos = ba.get_labels(mlapdv_rh, mapping='Cosmos')\n", - "print(f'Lateralised Beryl mapping ids using mlapdv_rh: {atlas_id_Beryl[0:10]}')\n", - "print(f'Non-lateralised Cosmos mapping ids using mlapdv_rh: {atlas_id_Cosmos[0:10]}')" - ] - }, - { - "cell_type": "markdown", - "id": "c229e12d", - "metadata": {}, - "source": [ - "### Mapping from atlas ids\n", - "Methods are available that allow you to translate atlas ids from one mapping to another. " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "8be221d9", - "metadata": {}, - "outputs": [], - "source": [ - "# map atlas ids from lateralised Allen to lateralised Beryl\n", - "atlas_id_Allen = ba.get_labels(mlapdv_rh, mapping='Allen-lr') # lateralised Allen\n", - "\n", - "remap_beryl = br.id2id(atlas_id_Allen, mapping='Beryl-lr')\n", - "print(f'Lateralised Beryl mapping ids using remap: {remap_beryl[0:10]}')\n", - "\n", - "# map atlas ids from lateralised Allen to non-lateralised Cosmos\n", - "remap_cosmos = br.id2id(atlas_id_Allen_rh, mapping='Cosmos')\n", - "print(f'Non-lateralised Cosmos mapping ids using remap: {remap_cosmos[0:10]}')" - ] - }, - { - "cell_type": "markdown", - "id": "2cf81730", - "metadata": {}, - "source": [ - "When remapping with atlas ids it is not possible to map from \n", - "\n", - "1. A non-lateralised to a lateralised mapping. \n", - "2. From a higher mapping to a lower one (e.g cannot map from Beryl to Allen, or Cosmos to Allen)\n", - "3. From Beryl to Cosmos\n", - "\n", - "This is why it is recommened to use mlapdv coordinates for remappings as it allows complete flexibility" - ] - }, - { - "cell_type": "markdown", - "id": "ec294cf4", - "metadata": {}, - "source": [ - "### Converting to acronyms\n", - "Methods are available to convert between atlas ids and acronyms. Note that when converting to acronyms, even if the atlas ids are lateralised the returned acronyms are not" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "7f9428b9", - "metadata": {}, - "outputs": [], - "source": [ - "atlas_id_Allen = ba.get_labels(mlapdv_rh, mapping='Allen-lr') # lateralised Allen\n", - "acronym_Allen = br.id2acronym(atlas_id_Allen, mapping='Allen-lr')\n", - "print(f'Acronyms of lateralised Allen ids: {acronym_Allen[0:10]}')\n", - "\n", - "atlas_id_Allen = ba.get_labels(mlapdv_rh, mapping='Allen-lr') # Non-ateralised Allen\n", - "acronym_Allen = br.id2acronym(atlas_id_Allen)\n", - "print(f'Acronyms of non-lateralised Allen: {acronym_Allen[0:10]}')" - ] - }, - { - "cell_type": "markdown", - "id": "7cd9eb73", - "metadata": {}, - "source": [ - "It is also possible to simultaneously remap the acronyms with these methods" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "0f82948f", - "metadata": {}, - "outputs": [], - "source": [ - "acronym_Cosmos = br.id2acronym(atlas_id_Allen, mapping='Cosmos-lr')\n", - "print(acronym_Cosmos[0:10])" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python [conda env:iblenv] *", - "language": "python", - "name": "conda-env-iblenv-py" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.7" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} \ No newline at end of file diff --git a/examples/atlas/atlas_plotting_points_on_slice.ipynb b/examples/atlas/atlas_plotting_points_on_slice.ipynb deleted file mode 100644 index d65e687f8..000000000 --- a/examples/atlas/atlas_plotting_points_on_slice.ipynb +++ /dev/null @@ -1,251 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "87d1873a", - "metadata": {}, - "source": [ - "# Plotting cluster locations on histology slices" - ] - }, - { - "cell_type": "markdown", - "id": "77834a05", - "metadata": {}, - "source": [ - "This example walks through various ways to display the 3D location of clusters on histology slices" - ] - }, - { - "cell_type": "markdown", - "id": "5011bf58", - "metadata": {}, - "source": [ - "## Data Preparation" - ] - }, - { - "cell_type": "markdown", - "id": "60f93667", - "metadata": {}, - "source": [ - "For all examples below the xyz coordinates of each point and an array of values must be provided. Here we load in the spikesorting data for an example probe insertion and set the xyz coordinates to the coordinates of the clusters and the array of values to the firing rate." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "a5d22548", - "metadata": {}, - "outputs": [], - "source": [ - "from one.api import ONE\n", - "from brainbox.io.one import SpikeSortingLoader\n", - "from ibllib.atlas import AllenAtlas\n", - "import numpy as np\n", - "\n", - "one = ONE()\n", - "ba = AllenAtlas()\n", - "pid = 'da8dfec1-d265-44e8-84ce-6ae9c109b8bd'\n", - "sl = SpikeSortingLoader(pid=pid, one=one, atlas=ba)\n", - "spikes, clusters, channels = sl.load_spike_sorting()\n", - "clusters = sl.merge_clusters(spikes, clusters, channels)\n", - "\n", - "# Extract xyz coords from clusters dict\n", - "# Here we will set all ap values to a chosen value for visualisation purposes\n", - "xyz = np.c_[clusters['x'], np.ones_like(clusters['x']) * 400 / 1e6, clusters['z']]\n", - "values = clusters['firing_rate']" - ] - }, - { - "cell_type": "markdown", - "id": "c84d11bd", - "metadata": {}, - "source": [ - "## Example 1: Aggregation methods" - ] - }, - { - "cell_type": "markdown", - "id": "b9340c33", - "metadata": {}, - "source": [ - "The values of points that lie within the same voxel within the volume can be aggregated together in different ways by changing the `aggr` argument. Below shows an example where values in each voxel have been aggregated according to the average firing rate of the clusters (left) and according to the count of clusters in each voxel (right)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "f35b8827", - "metadata": {}, - "outputs": [], - "source": [ - "from ibllib.atlas.plots import plot_points_on_slice\n", - "import matplotlib.pyplot as plt\n", - "\n", - "fig, axs = plt.subplots(1, 2, figsize=(16,4))\n", - "\n", - "# Plot points on the coronal slice at ap=400um overlaid overlaid on brain region boundaries using Allen mapping \n", - "# and a 3D gaussian kernel with fwhm=100um is applied\n", - "\n", - "# Values in the same voxel are aggregated by the mean\n", - "fig, ax = plot_points_on_slice(xyz, values=values, coord=400, slice='coronal', mapping='Allen', background='boundary', \n", - " cmap='Reds', aggr='mean', fwhm=100, brain_atlas=ba, ax=axs[0])\n", - "\n", - "# Values in the same voxel are aggregated by the count\n", - "# N.B. can also pass values=None in this case as they are not used in the computation\n", - "fig, ax, cbar = plot_points_on_slice(xyz, values=values, coord=400, slice='coronal', mapping='Allen', background='boundary', \n", - " cmap='Reds', aggr='count', fwhm=100, brain_atlas=ba, ax=axs[1], show_cbar=True)" - ] - }, - { - "cell_type": "markdown", - "id": "54650899", - "metadata": {}, - "source": [ - "The different options for aggregation are listed in the docstring of the function" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "0b6ad58e", - "metadata": {}, - "outputs": [], - "source": [ - "help(plot_points_on_slice)" - ] - }, - { - "cell_type": "markdown", - "id": "ea0cf9a4", - "metadata": {}, - "source": [ - "## Example 2: Applying gaussian kernels with varying FWHM" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2ba3ca84", - "metadata": {}, - "outputs": [], - "source": [ - "# Plot points on the coronal slice at ap=400um overlaid overlaid on Allen dwi image using Cosmos mapping with\n", - "# values aggregated by max\n", - "\n", - "figs, axs = plt.subplots(1, 3, figsize=(18,4))\n", - "\n", - "# FWHM of 100 um\n", - "fig, ax = plot_points_on_slice(xyz, values=values, coord=400, slice='coronal', background='image', \n", - " cmap='Purples', aggr='max', fwhm=100, brain_atlas=ba, ax=axs[0])\n", - "\n", - "# FWHM of 300 um\n", - "fig, ax = plot_points_on_slice(xyz, values=values, coord=400, slice='coronal', background='image', \n", - " cmap='Purples', aggr='max', fwhm=300, brain_atlas=ba, ax=axs[1])\n", - "\n", - "# FWHM of 0 um\n", - "# if fwhm=0 no gaussian kernal applied\n", - "fig, ax = plot_points_on_slice(xyz, values=values, coord=400, slice='coronal', background='image', \n", - " cmap='Purples', aggr='max', fwhm=0, brain_atlas=ba, ax=axs[2])" - ] - }, - { - "cell_type": "markdown", - "id": "a0aca146", - "metadata": {}, - "source": [ - "## Example 3: Precomputing Volume" - ] - }, - { - "cell_type": "markdown", - "id": "19edcc5b", - "metadata": {}, - "source": [ - "Convolving the 3D volume with the gaussian kernal can take some time to compute, particularly when using a large fwhm value. When exploring the same volume at different coordinates and using different slices it is recommended to precompute the volume and then plot the slices. Below shows an example of how to do this." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "81f6919c", - "metadata": {}, - "outputs": [], - "source": [ - "from ibllib.atlas.plots import compute_volume_from_points, plot_volume_on_slice\n", - "\n", - "# Extract xyz coords from clusters dict\n", - "xyz = np.c_[clusters['x'], clusters['y'], clusters['z']]\n", - "values = clusters['amp_max']\n", - "\n", - "# Compute volume\n", - "volume = compute_volume_from_points(xyz, values=values, aggr='mean', fwhm=250, ba=ba)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "9d938d24", - "metadata": {}, - "outputs": [], - "source": [ - "# Plot points on the coronal slices on brain region boundaries using Beryl maping\n", - "figs, axs = plt.subplots(1, 3, figsize=(18,4))\n", - "fig, ax = plot_volume_on_slice(volume, coord=300, slice='coronal', mapping='Beryl', background='boundary', \n", - " cmap='Oranges', brain_atlas=ba, ax=axs[0])\n", - "ax.set_title('ap = 300um')\n", - "fig, ax = plot_volume_on_slice(volume, coord=400, slice='coronal', mapping='Beryl', background='boundary', \n", - " cmap='Oranges', brain_atlas=ba, ax=axs[1])\n", - "ax.set_title('ap = 400um')\n", - "fig, ax = plot_volume_on_slice(volume, coord=500, slice='coronal', mapping='Beryl', background='boundary', \n", - " cmap='Oranges', brain_atlas=ba,ax=axs[2])\n", - "ax.set_title('ap = 500um')" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "0311b5ad", - "metadata": {}, - "outputs": [], - "source": [ - "# Plot points on the saggital slice at ap=-800um overlaid on brain region boundaries using Cosmos mapping\n", - "fig, ax, cbar = plot_volume_on_slice(volume, coord=-800, slice='sagittal', mapping='Cosmos', background='boundary', \n", - " cmap='Blues', brain_atlas=ba, clevels=[0, 2e-7], show_cbar=True)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "7835fa36", - "metadata": {}, - "outputs": [], - "source": [ - "# Plot points on the horizontal slice at dv=-5000um overlaid on allen dwi image\n", - "fig, ax = plot_volume_on_slice(volume, coord=-5000, slice='horizontal', background='image', cmap='Greens', brain_atlas=ba)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python [conda env:iblenv] *", - "language": "python", - "name": "conda-env-iblenv-py" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.7" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} \ No newline at end of file diff --git a/examples/atlas/atlas_plotting_scalar_on_slice.ipynb b/examples/atlas/atlas_plotting_scalar_on_slice.ipynb deleted file mode 100644 index eddf024f1..000000000 --- a/examples/atlas/atlas_plotting_scalar_on_slice.ipynb +++ /dev/null @@ -1,277 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "12df3a26", - "metadata": {}, - "source": [ - "# Plotting brain region values on histology slices" - ] - }, - { - "cell_type": "markdown", - "id": "a056f78f", - "metadata": {}, - "source": [ - "This example walks through various ways to overlay brain region values on histology slices" - ] - }, - { - "cell_type": "markdown", - "id": "8cef5812", - "metadata": {}, - "source": [ - "## Data preparation\n", - "For all the examples below, an array of acronyms and an array of values corresponding to each acronym must be provided. " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "dd06e7ff", - "metadata": {}, - "outputs": [], - "source": [ - "import numpy as np\n", - "# prepare array of acronyms\n", - "acronyms = np.array(['VPM', 'VPL', 'PO', 'LP', 'CA1', 'DG-mo', 'SSs5', 'VISa5', 'AUDv6a', 'MOp5', 'FRP5'])\n", - "# assign data to each acronym\n", - "values = np.arange(len(acronyms))\n", - "\n", - "# acronyms and values must have the same number of rows\n", - "assert (acronyms.size == values.size)" - ] - }, - { - "cell_type": "markdown", - "id": "e0b26454", - "metadata": {}, - "source": [ - "If different values for each acronym want to be shown on each hemisphere, the array of values must contain two columns, the first corresponding to values on the left hemisphere and the second corresponding to values on the right hemisphere" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "fa6bdd24", - "metadata": {}, - "outputs": [], - "source": [ - "# values to be used for left and right hemisphere\n", - "values_lh = np.random.randint(0, 10, acronyms.size)\n", - "values_rh = np.random.randint(0, 10, acronyms.size)\n", - "values_lr = np.c_[values_lh, values_rh]" - ] - }, - { - "cell_type": "markdown", - "id": "7115c423", - "metadata": {}, - "source": [ - "When providing values for each hemisphere, if a value for a given acronym has only been computed, for example, in the left hemisphere, the corresponding value in right hemisphere array should be set to NaN. A helper function is available that will prepare the array of values in the correct format" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "06c9e0c7", - "metadata": {}, - "outputs": [], - "source": [ - "from ibllib.atlas.plots import prepare_lr_data\n", - "\n", - "acronyms_lh = np.array(['VPM', 'VPL', 'PO', 'LP', 'CA1', 'DG-mo'])\n", - "values_lh = np.random.randint(0, 10, acronyms_lh.size)\n", - "\n", - "acronyms_rh = np.array(['VPM', 'PO', 'LP', 'CA1', 'DG-mo', 'VISa5', 'SSs5'])\n", - "values_rh = np.random.randint(0, 10, acronyms_rh.size)\n", - "\n", - "acronyms_lr, values_lr = prepare_lr_data(acronyms_lh, values_lh, acronyms_rh, values_rh)" - ] - }, - { - "cell_type": "markdown", - "id": "45aa4b07", - "metadata": {}, - "source": [ - "Different mappings can be used when plotting the images (see [here](https://int-brain-lab.github.io/iblenv/notebooks_external/atlas_mapping.html) for more information on atlas mappings). The acronyms provided must correspond to the specified map" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "7afc0de4", - "metadata": {}, - "outputs": [], - "source": [ - "from ibllib.atlas.regions import BrainRegions\n", - "br = BrainRegions()\n", - "\n", - "acronyms_beryl = np.unique(br.acronym2acronym(acronyms, mapping='Beryl'))\n", - "values_beryl = np.arange(acronyms_beryl.size)\n", - "\n", - "acronyms_cosmos = np.unique(br.acronym2acronym(acronyms, mapping='Cosmos'))\n", - "values_cosmos = np.arange(acronyms_cosmos.size)" - ] - }, - { - "cell_type": "markdown", - "id": "9bcda7f3", - "metadata": {}, - "source": [ - "## Example 1: Coronal slices" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "33270b88", - "metadata": {}, - "outputs": [], - "source": [ - "from ibllib.atlas.plots import plot_scalar_on_slice\n", - "from ibllib.atlas import AllenAtlas\n", - "ba = AllenAtlas()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "0731a71d", - "metadata": {}, - "outputs": [], - "source": [ - "# Plot region values on the left hemisphere of a coronal slice at ap=-2000um overlaid on the dwi Allen image\n", - "fig, ax = plot_scalar_on_slice(acronyms, values, coord=-2000, slice='coronal', mapping='Allen', hemisphere='left',\n", - " background='image', cmap='Reds', brain_atlas=ba)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "284efd17", - "metadata": {}, - "outputs": [], - "source": [ - "# Plot single column region values on both hemispheres of a coronal slice at ap=-2000um overlaid on brain region boundaries\n", - "# using Beryl mapping\n", - "# If values only contains one column, the values will be mirrored on each hemisphere\n", - "fig, ax = plot_scalar_on_slice(acronyms_beryl, values_beryl, coord=-2000, slice='coronal', mapping='Beryl', \n", - " hemisphere='both', background='boundary', cmap='Blues', brain_atlas=ba)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2df60008", - "metadata": {}, - "outputs": [], - "source": [ - "# Plot two column region values on both hemispheres of a coronal slice at ap=-1800um overlaid on brain region boundaries\n", - "# and display colorbar\n", - "# Values contains two columns, so each hemisphere has different values \n", - "fig, ax, cbar = plot_scalar_on_slice(acronyms_lr, values_lr, coord=-1800, slice='coronal', mapping='Allen', hemisphere='both', \n", - " background='boundary', cmap='viridis', brain_atlas=ba, show_cbar=True)" - ] - }, - { - "cell_type": "markdown", - "id": "cbdc5f5d", - "metadata": {}, - "source": [ - "## Example 2: Sagittal slices" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "25fec19d", - "metadata": {}, - "outputs": [], - "source": [ - "# Plot region values on the left hemisphere of a sagittal slice at ml=-2000um overlaid on the dwi Allen image \n", - "# using cosmos mapping\n", - "fig, ax = plot_scalar_on_slice(acronyms_cosmos, values_cosmos, coord=-2000, slice='sagittal', mapping='Cosmos', \n", - " hemisphere='left', background='image', cmap='Greens', brain_atlas=ba)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "4ee0db12", - "metadata": {}, - "outputs": [], - "source": [ - "# Plot region values on the left hemisphere of a sagittal slice at ml=-1000um overlaid on brain region boundaries\n", - "# Create figure before and pass in axis on which to plot\n", - "import matplotlib.pyplot as plt\n", - "fig, ax = plt.subplots()\n", - "fig, ax = plot_scalar_on_slice(acronyms, values, coord=-1000, slice='sagittal', mapping='Allen', hemisphere='left', \n", - " background='boundary', cmap='plasma', brain_atlas=ba, ax=ax)" - ] - }, - { - "cell_type": "markdown", - "id": "b78a2be9", - "metadata": {}, - "source": [ - "## Example 3: Horizontal slices" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "11f7862e", - "metadata": {}, - "outputs": [], - "source": [ - "# Plot two column region values on the both hemispheres of a horizontal slice at dv=--2500um overlaid on the dwi Allen image\n", - "# Pass in clevels min max values to cap the colormap\n", - "fig, ax = plot_scalar_on_slice(acronyms_lr, values_lr, coord=-2500, slice='horizontal', mapping='Allen', hemisphere='both', \n", - " background='image', cmap='Reds', brain_atlas=ba, clevels=[0, 5])" - ] - }, - { - "cell_type": "markdown", - "id": "dbfc97d2", - "metadata": {}, - "source": [ - "## Example 4: Top view" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "238294a4", - "metadata": {}, - "outputs": [], - "source": [ - "# Plot region values on left hemisphere of a top view overlaid on brain region boundaries using Beryl mapping and show cbar\n", - "\n", - "fig, ax, cbar = plot_scalar_on_slice(acronyms_beryl, values_beryl, slice='top', mapping='Beryl', hemisphere='left', \n", - " background='boundary', cmap='Purples', brain_atlas=ba, show_cbar=True)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python [conda env:iblenv] *", - "language": "python", - "name": "conda-env-iblenv-py" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.7" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/examples/atlas/atlas_swanson_flatmap.ipynb b/examples/atlas/atlas_swanson_flatmap.ipynb deleted file mode 100644 index 4f92ee3ae..000000000 --- a/examples/atlas/atlas_swanson_flatmap.ipynb +++ /dev/null @@ -1,418 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Plotting brain region values on the Swanson flat map" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The Swanson flatmap is a 2D representation of the mouse brain to facilitate comparative analysis of brain data.\n", - "We extended the mouse atlas presented by [Hahn et al.](https://onlinelibrary.wiley.com/doi/full/10.1002/cne.24966?casa_token=kRb4fuUae6wAAAAA%3AHoiNx1MNVgZNUXT-MZN_mU6LAjKBiz5OE5cFj2Aj-GUE9l-oBllFUaM11XwCtEbpJyxKrwaMRnXC7MjY)\n", - "to interface programmatically with the Allen Atlas regions." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import numpy as np\n", - "from ibllib.atlas.plots import plot_swanson_vector\n", - "from ibllib.atlas import BrainRegions\n", - "\n", - "br = BrainRegions()\n", - "\n", - "# Plot Swanson map will default colors and acronyms\n", - "plot_swanson_vector(br=br, annotate=True)" - ] - }, - { - "cell_type": "markdown", - "source": [ - "### What regions are represented in the Swanson flatmap" - ], - "metadata": { - "collapsed": false - } - }, - { - "cell_type": "markdown", - "source": [ - "The Swanson map holds 323 brain region acronyms.\n", - "To find these acronyms, use the indices stored in the swanson mapping:" - ], - "metadata": { - "collapsed": false - } - }, - { - "cell_type": "code", - "execution_count": null, - "outputs": [], - "source": [ - "swanson_indices = np.unique(br.mappings['Swanson'])\n", - "swanson_ac = np.sort(br.acronym[swanson_indices])\n", - "swanson_ac.size" - ], - "metadata": { - "collapsed": false, - "pycharm": { - "name": "#%%\n" - } - } - }, - { - "cell_type": "markdown", - "source": [ - "Regions which are \"children\" or \"parents\" of a Swanson region will not be included in the acronyms. For example `VISa` is in Swanson, but its parent `PTLp` or child `VISa2/3` are not:" - ], - "metadata": { - "collapsed": false - } - }, - { - "cell_type": "code", - "execution_count": null, - "outputs": [], - "source": [ - "# Example: VISa is in Swanson\n", - "print(np.isin(['VISa'], swanson_ac))\n", - "\n", - "# Example child: VISa2/3 is not in Swanson\n", - "print(np.isin(['VISa2/3'], swanson_ac))\n", - "\n", - "# Example parent: PTLp is not in Swanson\n", - "print(np.isin(['PTLp'], swanson_ac))" - ], - "metadata": { - "collapsed": false, - "pycharm": { - "name": "#%%\n" - } - } - }, - { - "cell_type": "markdown", - "source": [ - "Also, only the indices corresponding to one hemisphere are represented in Swanson. For example, for VISa:" - ], - "metadata": { - "collapsed": false - } - }, - { - "cell_type": "code", - "execution_count": null, - "outputs": [], - "source": [ - "# indices of VISa\n", - "indices = br.acronym2index('VISa')[1][0]\n", - "print(f'Index {indices[0]} in swanson? {indices[0] in swanson_indices}')\n", - "print(f'Index {indices[1]} in swanson? {indices[1] in swanson_indices}')" - ], - "metadata": { - "collapsed": false, - "pycharm": { - "name": "#%%\n" - } - } - }, - { - "cell_type": "markdown", - "source": [ - "### Selecting the brain regions for plotting" - ], - "metadata": { - "collapsed": false - } - }, - { - "cell_type": "markdown", - "source": [ - "You can only plot value for a given region that is in Swanson, or a parent region (see below for detailed explanation on this latter point). You cannot plot value on children regions of those in the Swanson mapping. In other words, the brain regions contains in the Swanson mapping are the lowest hierarchical level you can plot onto.\n", - "\n", - "This was done to ensure there is no confusion about how data is aggregated and represented per region.\n", - "For example, if you were to input values for both `VISa1` and `VISa2/3`, it is unclear whether the mean, median or else should have been plotted onto the `VISa` area - instead, we ask you to do the aggregation yourself and pass this into the plotting function.\n", - "\n", - "For example," - ], - "metadata": { - "collapsed": false - } - }, - { - "cell_type": "code", - "execution_count": null, - "outputs": [], - "source": [ - "# 'VISa', 'CA1', 'VPM' are in Swanson and all 3 are plotted\n", - "acronyms = ['VISa', 'CA1', 'VPM']\n", - "values = np.array([1.5, 3, 4])\n", - "plot_swanson_vector(acronyms, values, annotate=True,\n", - " annotate_list=['VISa', 'CA1', 'VPM'],empty_color='silver')\n", - "\n", - "# 'VISa1','VISa2/3' are not in Swanson, only 'CA1', 'VPM' are plotted\n", - "acronyms = ['VISa1','VISa2/3', 'CA1', 'VPM']\n", - "values = np.array([1, 2, 3, 4])\n", - "plot_swanson_vector(acronyms, values, annotate=True,\n", - " annotate_list=['VISa1','VISa2/3', 'CA1', 'VPM'],empty_color='silver')" - ], - "metadata": { - "collapsed": false, - "pycharm": { - "name": "#%%\n" - } - } - }, - { - "cell_type": "markdown", - "source": [ - "You can plot onto the parent of a region in the Swanson mapping, for example you can plot over `PTLp` (which is the parent of `VISa` and `VISrl`). This paints the same value across all regions of the Swanson mapping contained in the parent region." - ], - "metadata": { - "collapsed": false - } - }, - { - "cell_type": "code", - "execution_count": null, - "outputs": [], - "source": [ - "# Plotting over a parent region (PTLp) paints the same value across all children (VISa and VISrl)\n", - "acronyms = ['PTLp', 'CA1', 'VPM']\n", - "values = np.array([1.5, 3, 4])\n", - "plot_swanson_vector(acronyms, values, annotate=True,\n", - " annotate_list=['PTLp', 'CA1', 'VPM'],empty_color='silver')" - ], - "metadata": { - "collapsed": false, - "pycharm": { - "name": "#%%\n" - } - } - }, - { - "cell_type": "markdown", - "source": [ - "Plotting over a parent and child region simultaneously will overwrite the corresponding portion of the parent region:" - ], - "metadata": { - "collapsed": false - } - }, - { - "cell_type": "code", - "execution_count": null, - "outputs": [], - "source": [ - "# Plotting over 'PTLp' and overwriting the 'VISrl' value\n", - "acronyms = ['PTLp','VISrl', 'CA1', 'VPM']\n", - "values = np.array([1, 2, 3, 4])\n", - "plot_swanson_vector(acronyms, values, annotate=True,\n", - " annotate_list=['PTLp','VISrl', 'CA1', 'VPM'],empty_color='silver')" - ], - "metadata": { - "collapsed": false, - "pycharm": { - "name": "#%%\n" - } - } - }, - { - "cell_type": "markdown", - "source": [ - "As such, you can easily fill in a whole top-hierarchy region, supplemented by one particular region of interest." - ], - "metadata": { - "collapsed": false - } - }, - { - "cell_type": "code", - "execution_count": null, - "outputs": [], - "source": [ - "acronyms = ['Isocortex', 'VISa']\n", - "values = np.array([1, 2])\n", - "plot_swanson_vector(acronyms, values, annotate=True,\n", - " annotate_list=['Isocortex', 'VISa'],empty_color='silver')" - ], - "metadata": { - "collapsed": false, - "pycharm": { - "name": "#%%\n" - } - } - }, - { - "cell_type": "markdown", - "source": [ - "## Mapping to the swanson brain regions\n", - "\n", - "Similarly as explained in this [page](https://int-brain-lab.github.io/iblenv/notebooks_external/atlas_mapping.html), you can map brain regions to those found in Swanson using `br.acronym2acronym`:" - ], - "metadata": { - "collapsed": false - } - }, - { - "cell_type": "code", - "execution_count": null, - "outputs": [], - "source": [ - "br.acronym2acronym('MDm', mapping='Swanson')" - ], - "metadata": { - "collapsed": false, - "pycharm": { - "name": "#%%\n" - } - } - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Plotting values on the swanson flatmap\n", - "### Single hemisphere display\n", - "You simply need to provide an array of brain region acronyms, and an array of corresponding values." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# prepare array of acronyms\n", - "acronyms = np.array(\n", - " ['VPLpc', 'PO', 'LP', 'DG', 'CA1', 'PTLp', 'MRN', 'APN', 'POL',\n", - " 'VISam', 'MY', 'PGRNl', 'IRN', 'PARN', 'SPVI', 'NTS', 'SPIV',\n", - " 'NOD', 'IP', 'AON', 'ORBl', 'AId', 'MOs', 'GRN', 'P', 'CENT',\n", - " 'CUL', 'COApm', 'PA', 'CA2', 'CA3', 'HY', 'ZI', 'MGv', 'LGd',\n", - " 'LHA', 'SF', 'TRS', 'PVT', 'LSc', 'ACAv', 'ACAd', 'MDRNv', 'MDRNd',\n", - " 'COPY', 'PRM', 'DCO', 'DN', 'SIM', 'MEA', 'SI', 'RT', 'MOp', 'PCG',\n", - " 'ICd', 'CS', 'PAG', 'SCdg', 'SCiw', 'VCO', 'ANcr1', 'ENTm', 'ENTl',\n", - " 'NOT', 'VPM', 'VAL', 'VPL', 'CP', 'SSp-ul', 'MV', 'VISl', 'LGv',\n", - " 'SSp-bfd', 'ANcr2', 'DEC', 'LD', 'SSp-ll', 'V', 'SUT', 'PB', 'CUN',\n", - " 'ICc', 'PAA', 'EPv', 'BLAa', 'CEAl', 'GPe', 'PPN', 'SCig', 'SCop',\n", - " 'SCsg', 'RSPd', 'RSPagl', 'VISp', 'HPF', 'MGm', 'SGN', 'TTd', 'DP',\n", - " 'ILA', 'PL', 'RSPv', 'SSp-n', 'ORBm', 'ORBvl', 'PRNc', 'ACB',\n", - " 'SPFp', 'VM', 'SUV', 'OT', 'MA', 'BST', 'LSv', 'LSr', 'UVU',\n", - " 'SSp-m', 'LA', 'CM', 'MD', 'SMT', 'PFL', 'MARN', 'PRE', 'POST',\n", - " 'PRNr', 'SSp-tr', 'PIR', 'CTXsp', 'RN', 'PSV', 'SUB', 'LDT', 'PAR',\n", - " 'SPVO', 'TR', 'VISpm', 'MS', 'COApl', 'BMAp', 'AMd', 'ICe', 'TEa',\n", - " 'MOB', 'SNr', 'GU', 'VISC', 'SSs', 'AIp', 'NPC', 'BLAp', 'SPVC',\n", - " 'PYR', 'AV', 'EPd', 'NLL', 'AIv', 'CLA', 'AAA', 'AUDv', 'TRN'],\n", - " dtype='0)[0]\n", - "negative_id = np.where(brain_regions.id<0)[0]\n", - "void_id = np.where(brain_regions.id==0)[0]\n", - "\n", - "print(len(positive_id) + len(negative_id) + len(void_id))" - ], - "metadata": { - "collapsed": false, - "pycharm": { - "name": "#%%\n" - } - } - }, - { - "cell_type": "markdown", - "source": [ - "We can understand this better by exploring the `brain_regions.id` and `brain_regions.name` at the indices where it transitions between hemispheres." - ], - "metadata": { - "collapsed": false - } - }, - { - "cell_type": "markdown", - "source": [ - "The first value of `brain_region.id` is `void` (Allen id `0`):" - ], - "metadata": { - "collapsed": false - } - }, - { - "cell_type": "code", - "execution_count": null, - "outputs": [], - "source": [ - "print(brain_regions.id[index_void])" - ], - "metadata": { - "collapsed": false, - "pycharm": { - "name": "#%%\n" - } - } - }, - { - "cell_type": "markdown", - "source": [ - "The point of change between right and left hemisphere is at the index:" - ], - "metadata": { - "collapsed": false - } - }, - { - "cell_type": "code", - "execution_count": null, - "outputs": [], - "source": [ - "print(len(positive_id))" - ], - "metadata": { - "collapsed": false, - "pycharm": { - "name": "#%%\n" - } - } - }, - { - "cell_type": "markdown", - "source": [ - "Around this index, the `brain_region.id` go from positive Allen atlas ids (right hemisphere) to negative Allen atlas ids (left hemisphere)." - ], - "metadata": { - "collapsed": false - } - }, - { - "cell_type": "code", - "execution_count": null, - "outputs": [], - "source": [ - "print(brain_regions.id[1320:1340])" - ], - "metadata": { - "collapsed": false, - "pycharm": { - "name": "#%%\n" - } - } - }, - { - "cell_type": "markdown", - "source": [ - "Regions are organised following the same index ordering in left/right hemisphere.\n", - "For example, you will find the same acronym `PPYd` at the index 1000, and once you've passed the positive integers:" - ], - "metadata": { - "collapsed": false - } - }, - { - "cell_type": "code", - "execution_count": null, - "outputs": [], - "source": [ - "index = 1000\n", - "print(brain_regions.acronym[index])\n", - "print(brain_regions.acronym[index + len(positive_id)])\n", - "# Note: do not re-use this approach, this is for explanation only - you will see below a dedicated function" - ], - "metadata": { - "collapsed": false, - "pycharm": { - "name": "#%%\n" - } - } - }, - { - "cell_type": "markdown", - "id": "0a1af738", - "metadata": {}, - "source": [ - "The `brain_region.name` also go from right to left hemisphere:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "c99a6e89", - "metadata": {}, - "outputs": [], - "source": [ - "print(brain_regions.name[1320:1340])" - ] - }, - { - "cell_type": "markdown", - "id": "c144bdd2", - "metadata": {}, - "source": [ - "In the label volume, we can therefore differentiate between left and right hemisphere voxels for the same brain region. First we will use a method in the brain_region class to find out the index of left and right `CA1`." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "0b7d5209", - "metadata": {}, - "outputs": [], - "source": [ - "brain_regions.acronym2index('CA1')\n", - "# The first values are the acronyms, the second values are the indices" - ] - }, - { - "cell_type": "markdown", - "id": "607ae9b6", - "metadata": {}, - "source": [ - "The method `acronym2index` returns a tuple, with the first value being a list of acronyms passed in and the second value giving the indices in the array that correspond to the left and right hemispheres for this region. We can now use these indices to search in the label volume" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "0680ca09", - "metadata": {}, - "outputs": [], - "source": [ - "CA1_right = np.where(brain_atlas.label == 458)\n", - "CA1_left = np.where(brain_atlas.label == 1785)" - ] - }, - { - "cell_type": "markdown", - "source": [ - "## Navigate the brain region hierarchy\n", - "The 1328 regions in the Allen parcelation are organised in a hierarchical tree.\n", - "For example, the region PPY encompasses both the regions PPYd and PPYs.\n", - "\n", - "You can visually explore the hierarchy through this [webpage](https://openalyx.internationalbrainlab.org/admin/experiments/brainregion/) (username: `intbrainlab`, password: `international`).\n", - "(TODO THIS IS NOT A GREAT WAY, CHANGE TO OTHER REFERENCE)\n" - ], - "metadata": { - "collapsed": false - } - }, - { - "cell_type": "markdown", - "source": [ - "### Ancestors\n", - "\n", - "To find ancestors of a region, i.e. regions that are higher in the hierarchy tree, use `brain_regions.ancestors`.\n", - "\n", - "Let's use the region PPYd as an example:" - ], - "metadata": { - "collapsed": false - } - }, - { - "cell_type": "code", - "execution_count": null, - "outputs": [], - "source": [ - "index = 1000 # Remember the Allen id at this index is 185\n", - "brain_regions.ancestors(ids=brain_regions.id[index])" - ], - "metadata": { - "collapsed": false, - "pycharm": { - "name": "#%%\n" - } - } - }, - { - "cell_type": "markdown", - "source": [ - "All parents along the hierarchy tree are returned.\n", - "The parents are organised in increasing order of `level` (0-1-2...), i.e. the highest, all-encompassing level is first (`root` in the example above).\n", - "Note:\n", - "- The fields contain all the parents regions, including the one passed in (which is last).\n", - "- The field `parent` returns the parent region id of the regions in `id` (you can notice they are the same as in `id` but incremented by one level).\n", - "- The field `order` returns values used for plotting (Note: this is *not* the parent's index)\n", - "\n", - "For example, the last `parent` region is PPY (which is indeed the closest parent of PPYd):" - ], - "metadata": { - "collapsed": false - } - }, - { - "cell_type": "code", - "execution_count": null, - "outputs": [], - "source": [ - "index = 999\n", - "print(brain_regions.id[index])\n", - "print(brain_regions.acronym[index])" - ], - "metadata": { - "collapsed": false, - "pycharm": { - "name": "#%%\n" - } - } - }, - { - "cell_type": "markdown", - "source": [ - "### Descendants\n", - "To find the descendants of a region, use `brain_regions.descendants`.\n", - "\n", - "Let's use the region PPY as an example:" - ], - "metadata": { - "collapsed": false - } - }, - { - "cell_type": "code", - "execution_count": null, - "outputs": [], - "source": [ - "index = 999\n", - "brain_regions.descendants(ids=brain_regions.id[index])" - ], - "metadata": { - "collapsed": false, - "pycharm": { - "name": "#%%\n" - } - } - }, - { - "cell_type": "markdown", - "source": [ - "Note:\n", - "- The fields contain all the descendant regions, including the one passed in (which is first).\n", - "- The field `parent` returns the parent region id of the regions in `id`.\n", - "- The field `order` returns values used for plotting (Note: this is *not* the parent's index)\n", - "\n", - "Note also that the `descendants` methods will return all descendants from all the different branches down, for example for PTLp :" - ], - "metadata": { - "collapsed": false - } - }, - { - "cell_type": "code", - "execution_count": null, - "outputs": [], - "source": [ - "atlas_id = brain_regions.acronym2id('PTLp')\n", - "# Print the acronyms of the descendants of this region\n", - "print(brain_regions.descendants(ids=atlas_id)['acronym'])\n", - "# Print the levels of the descendants of this region\n", - "print(brain_regions.descendants(ids=atlas_id)['level'])" - ], - "metadata": { - "collapsed": false, - "pycharm": { - "name": "#%%\n" - } - } - }, - { - "cell_type": "markdown", - "source": [ - "### Find region at a particular place in the hierarchy\n", - "\n", - "#### Leaf node\n", - "\n", - "If you need to check a region is a leaf node, i.e. that it has no descendant, you could use the `descendants` method and check that the returned length of the `id` is one (i.e. it only returns itself).\n", - "\n", - "For example, PPYd is a leaf node:" - ], - "metadata": { - "collapsed": false - } - }, - { - "cell_type": "code", - "execution_count": null, - "outputs": [], - "source": [ - "index = 1000\n", - "ppyd_desc = brain_regions.descendants(ids=brain_regions.id[index])\n", - "\n", - "len(ppyd_desc['id']) == 1" - ], - "metadata": { - "collapsed": false, - "pycharm": { - "name": "#%%\n" - } - } - }, - { - "cell_type": "markdown", - "source": [ - "However, there is a faster method.\n", - "To find all the regions that are leaf nodes, use `brain_regions.leaves`:" - ], - "metadata": { - "collapsed": false - } - }, - { - "cell_type": "code", - "execution_count": null, - "outputs": [], - "source": [ - "brain_regions.leaves()" - ], - "metadata": { - "collapsed": false, - "pycharm": { - "name": "#%%\n" - } - } - }, - { - "cell_type": "markdown", - "source": [ - "It is recommended you use this function to check whether a region is a leaf node:" - ], - "metadata": { - "collapsed": false - } - }, - { - "cell_type": "code", - "execution_count": null, - "outputs": [], - "source": [ - "index = 1000\n", - "brain_regions.id[index] in brain_regions.leaves().id" - ], - "metadata": { - "collapsed": false, - "pycharm": { - "name": "#%%\n" - } - } - }, - { - "cell_type": "markdown", - "source": [ - "#### Find region at a given hierarchy level\n", - "\n", - "To find all the regions that are on a given level of the hierarchy, use `brain_regions.level`:" - ], - "metadata": { - "collapsed": false - } - }, - { - "cell_type": "code", - "execution_count": null, - "outputs": [], - "source": [ - "print(f'brain_regions.level contains {brain_regions.level.size} values, which are either {np.unique(brain_regions.level)}')" - ], - "metadata": { - "collapsed": false, - "pycharm": { - "name": "#%%\n" - } - } - }, - { - "cell_type": "code", - "execution_count": null, - "outputs": [], - "source": [ - "# Example: find the index and acronyms of brain regions at level 0 (i.e. highest parents):\n", - "index = np.where(brain_regions.level == 0)[0]\n", - "print(index)\n", - "brain_regions.acronym[index] # Note that root appears twice because of the lateralisation" - ], - "metadata": { - "collapsed": false, - "pycharm": { - "name": "#%%\n" - } - } - }, - { - "cell_type": "markdown", - "id": "89d7f7d8", - "metadata": {}, - "source": [ - "## Coordinate systems" - ] - }, - { - "cell_type": "markdown", - "id": "0c47c5c7", - "metadata": {}, - "source": [ - "The voxels can be translated to 3D space.\n", - "In the IBL, all xyz coordinates are referenced from Bregma, which point is set as xyz coordinate [0,0,0].\n", - "\n", - "![IBL coordinate system](https://github.com/int-brain-lab/ibllib/blob/atlas_docs/examples/atlas/images/brain_xyz.png?raw=true)\n", - "\n", - "In contrast, in the Allen coordinate framework, the [0,0,0] point corresponds to one of the cubic volume edge." - ] - }, - { - "cell_type": "markdown", - "source": [ - "Below we show the value of Bregma in the Allen CCF space (in micrometer um):" - ], - "metadata": { - "collapsed": false - } - }, - { - "cell_type": "code", - "execution_count": null, - "outputs": [], - "source": [ - "from ibllib.atlas import ALLEN_CCF_LANDMARKS_MLAPDV_UM\n", - "print(ALLEN_CCF_LANDMARKS_MLAPDV_UM)" - ], - "metadata": { - "collapsed": false, - "pycharm": { - "name": "#%%\n" - } - } - }, - { - "cell_type": "markdown", - "source": [ - "To translate this into an index into the volume `brain_atlas`, you need to divide by the atlas resolution (also in micrometer):" - ], - "metadata": { - "collapsed": false - } - }, - { - "cell_type": "code", - "execution_count": null, - "outputs": [], - "source": [ - "# Find bregma position in indices\n", - "bregma_index = ALLEN_CCF_LANDMARKS_MLAPDV_UM['bregma'] / brain_atlas.res_um" - ], - "metadata": { - "collapsed": false, - "pycharm": { - "name": "#%%\n" - } - } - }, - { - "cell_type": "markdown", - "source": [ - "This index can be passed into `brain_atlas.bc.i2xyz` that converts volume indices into IBL xyz coordinates (i.e. relative to Bregma):" - ], - "metadata": { - "collapsed": false - } - }, - { - "cell_type": "code", - "execution_count": null, - "outputs": [], - "source": [ - "# Find bregma position in xyz in m (expect this to be 0 0 0)\n", - "bregma_xyz = brain_atlas.bc.i2xyz(bregma_index)\n", - "print(bregma_xyz)" - ], - "metadata": { - "collapsed": false, - "pycharm": { - "name": "#%%\n" - } - } - }, - { - "cell_type": "markdown", - "source": [ - "Functions exist in both direction, i.e. from a volume index to IBL xyz, and from xyz to an index.\n", - "Note that the functions return/input values are in *meters*, not micrometers." - ], - "metadata": { - "collapsed": false - } - }, - { - "cell_type": "code", - "execution_count": null, - "outputs": [], - "source": [ - "# Convert from arbitrary index to xyz position (m) position relative to Bregma\n", - "index = np.array([102, 234, 178]).astype(float)\n", - "xyz = brain_atlas.bc.i2xyz(index)\n", - "print(f'xyz values are in meters: {xyz}')\n", - "\n", - "# Convert from xyz position (m) to index in atlas\n", - "xyz = np.array([-325, 4000, 250]) / 1e6\n", - "index = brain_atlas.bc.xyz2i(xyz)" - ], - "metadata": { - "collapsed": false, - "pycharm": { - "name": "#%%\n" - } - } - }, - { - "cell_type": "markdown", - "source": [ - "To know the sign and voxel resolution for each xyz axis, use:" - ], - "metadata": { - "collapsed": false - } - }, - { - "cell_type": "code", - "execution_count": null, - "outputs": [], - "source": [ - "# Find the resolution (in meter) of each axis\n", - "res_xyz = brain_atlas.bc.dxyz\n", - "\n", - "# Find the sign of each axis\n", - "sign_xyz = np.sign(res_xyz)\n", - "\n", - "print(f\"Resolution xyz: {res_xyz} in meter \\nSign xyz:\\t\\t{sign_xyz}\")" - ], - "metadata": { - "collapsed": false, - "pycharm": { - "name": "#%%\n" - } - } - }, - { - "cell_type": "markdown", - "source": [ - "To jump directly from an Allen xyz value to an IBL xyz value, use `brain_atlas.ccf2xyz`:" - ], - "metadata": { - "collapsed": false - } - }, - { - "cell_type": "code", - "execution_count": null, - "outputs": [], - "source": [ - "# Example: Where is the Allen 0 relative to IBL Bregma?\n", - "# This will give the Bregma value shown above (in meters), but with opposite axis sign value\n", - "brain_atlas.ccf2xyz(np.array([0, 0, 0]))" - ], - "metadata": { - "collapsed": false, - "pycharm": { - "name": "#%%\n" - } - } - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.16" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} \ No newline at end of file diff --git a/examples/atlas/images/brain_xyz.png b/examples/atlas/images/brain_xyz.png deleted file mode 100644 index 5d707848e..000000000 Binary files a/examples/atlas/images/brain_xyz.png and /dev/null differ diff --git a/examples/data_release/data_release_brainwidemap.ipynb b/examples/data_release/data_release_brainwidemap.ipynb index 552571e54..0c3b0a402 100644 --- a/examples/data_release/data_release_brainwidemap.ipynb +++ b/examples/data_release/data_release_brainwidemap.ipynb @@ -18,7 +18,7 @@ "cell_type": "markdown", "source": [ "## Overview of the Data\n", - "We have released data from 354 Neuropixel recording sessions, which encompass 547 probe insertions, obtained in 115 subjects performing the IBL task across 12 different laboratories. As output of spike-sorting, there are 295501 units; of which 32766 are considered to be of good quality. These units were recorded in overall 194 different brain regions.\n", + "We have released data from 354 Neuropixel recording sessions, which encompass 547 probe insertions, obtained in 115 subjects performing the IBL task across 11 different laboratories. As output of spike-sorting, there are 295501 units; of which 32766 are considered to be of good quality. These units were recorded in overall 194 different brain regions.\n", "\n", "## Data structure and download\n", "The organisation of the data follows the standard IBL data structure.\n", @@ -60,4 +60,4 @@ }, "nbformat": 4, "nbformat_minor": 5 -} \ No newline at end of file +} diff --git a/examples/ephys/example_amp_depth_scatter.py b/examples/ephys/example_amp_depth_scatter.py index 89d08e34f..0fd79ef1b 100644 --- a/examples/ephys/example_amp_depth_scatter.py +++ b/examples/ephys/example_amp_depth_scatter.py @@ -6,7 +6,7 @@ from one.api import ONE from brainbox.io.one import SpikeSortingLoader -from ibllib.atlas import AllenAtlas +from iblatlas.atlas import AllenAtlas import matplotlib.pyplot as plt import numpy as np diff --git a/examples/ephys/example_single_cluster_stim_aligned_activity.py b/examples/ephys/example_single_cluster_stim_aligned_activity.py index 2628d3f65..5bfeb085e 100644 --- a/examples/ephys/example_single_cluster_stim_aligned_activity.py +++ b/examples/ephys/example_single_cluster_stim_aligned_activity.py @@ -6,7 +6,7 @@ from one.api import ONE from brainbox.io.one import SpikeSortingLoader -from ibllib.atlas import AllenAtlas +from iblatlas.atlas import AllenAtlas import matplotlib.pyplot as plt import numpy as np diff --git a/examples/ephys/example_stim_aligned_over_depth.py b/examples/ephys/example_stim_aligned_over_depth.py index 0ded0bd3b..0da94a237 100644 --- a/examples/ephys/example_stim_aligned_over_depth.py +++ b/examples/ephys/example_stim_aligned_over_depth.py @@ -8,7 +8,7 @@ import matplotlib.pyplot as plt from one.api import ONE -from ibllib.atlas import AllenAtlas +from iblatlas.atlas import AllenAtlas from brainbox.io.one import SpikeSortingLoader from brainbox.task.passive import get_stim_aligned_activity as stim_aligned_activity_over_depth diff --git a/examples/loading_data/loading_multi_photon_imaging_data.ipynb b/examples/loading_data/loading_multi_photon_imaging_data.ipynb index d2bb08a1a..d80aa5452 100644 --- a/examples/loading_data/loading_multi_photon_imaging_data.ipynb +++ b/examples/loading_data/loading_multi_photon_imaging_data.ipynb @@ -134,7 +134,7 @@ " print('%s (%s: %i)' % (area['name'], area['acronym'], area['id']))\n", "\n", "\n", - "from ibllib.atlas import AllenAtlas\n", + "from iblatlas.atlas import AllenAtlas\n", "atlas = AllenAtlas()\n", "\n", "# Interconvert ID and acronym\n", diff --git a/examples/loading_data/loading_passive_data.ipynb b/examples/loading_data/loading_passive_data.ipynb index f27ae5af9..8c49a186b 100644 --- a/examples/loading_data/loading_passive_data.ipynb +++ b/examples/loading_data/loading_passive_data.ipynb @@ -176,7 +176,7 @@ "pid = one.alyx.rest('insertions', 'list', session=eid)[0]['id']\n", "\n", "from brainbox.io.one import SpikeSortingLoader\n", - "from ibllib.atlas import AllenAtlas\n", + "from iblatlas.atlas import AllenAtlas\n", "import numpy as np\n", "ba = AllenAtlas()\n", "\n", diff --git a/examples/loading_data/loading_spike_waveforms.ipynb b/examples/loading_data/loading_spike_waveforms.ipynb index 6fa32b654..7009b7855 100644 --- a/examples/loading_data/loading_spike_waveforms.ipynb +++ b/examples/loading_data/loading_spike_waveforms.ipynb @@ -57,7 +57,7 @@ "source": [ "from one.api import ONE\n", "from brainbox.io.one import SpikeSortingLoader\n", - "from ibllib.atlas import AllenAtlas\n", + "from iblatlas.atlas import AllenAtlas\n", "\n", "one = ONE()\n", "ba = AllenAtlas()\n", diff --git a/examples/loading_data/loading_spikesorting_data.ipynb b/examples/loading_data/loading_spikesorting_data.ipynb index d765642d0..5f22cc3d2 100644 --- a/examples/loading_data/loading_spikesorting_data.ipynb +++ b/examples/loading_data/loading_spikesorting_data.ipynb @@ -59,7 +59,7 @@ "source": [ "from one.api import ONE\n", "from brainbox.io.one import SpikeSortingLoader\n", - "from ibllib.atlas import AllenAtlas\n", + "from iblatlas.atlas import AllenAtlas\n", "\n", "one = ONE()\n", "ba = AllenAtlas()\n", diff --git a/examples/one/ephys/docs_get_cluster_brain_locations.py b/examples/one/ephys/docs_get_cluster_brain_locations.py index b86e6f5b4..8ca1cdbb2 100644 --- a/examples/one/ephys/docs_get_cluster_brain_locations.py +++ b/examples/one/ephys/docs_get_cluster_brain_locations.py @@ -13,7 +13,7 @@ from one.api import ONE import numpy as np import matplotlib.pyplot as plt -from ibllib.atlas import BrainRegions +from iblatlas.regions import BrainRegions one = ONE(base_url='https://openalyx.internationalbrainlab.org', silent=True) diff --git a/examples/one/histology/brain_regions_navigation.py b/examples/one/histology/brain_regions_navigation.py index 9fb0043dd..2bc0e7b2d 100644 --- a/examples/one/histology/brain_regions_navigation.py +++ b/examples/one/histology/brain_regions_navigation.py @@ -1,4 +1,4 @@ -from ibllib.atlas import AllenAtlas +from iblatlas.atlas import AllenAtlas ba = AllenAtlas() diff --git a/examples/one/histology/coverage_map.py b/examples/one/histology/coverage_map.py index b7689eeb7..31d60aa74 100644 --- a/examples/one/histology/coverage_map.py +++ b/examples/one/histology/coverage_map.py @@ -3,7 +3,7 @@ from one.api import ONE from neurodsp.utils import fcn_cosine -import ibllib.atlas as atlas +import iblatlas.atlas as atlas from ibllib.pipes.histology import coverage ba = atlas.AllenAtlas() diff --git a/examples/one/histology/docs_find_dist_neighbouring_region.py b/examples/one/histology/docs_find_dist_neighbouring_region.py index dd46ee271..8f4e27bee 100644 --- a/examples/one/histology/docs_find_dist_neighbouring_region.py +++ b/examples/one/histology/docs_find_dist_neighbouring_region.py @@ -19,7 +19,7 @@ import one.alf.io as alfio from ibllib.pipes.ephys_alignment import EphysAlignment -from ibllib.atlas import atlas +from iblatlas import atlas # Instantiate brain atlas and one diff --git a/examples/one/histology/docs_find_nearby_trajectories.py b/examples/one/histology/docs_find_nearby_trajectories.py index e22ff6066..22f22fd3a 100644 --- a/examples/one/histology/docs_find_nearby_trajectories.py +++ b/examples/one/histology/docs_find_nearby_trajectories.py @@ -12,7 +12,7 @@ from mayavi import mlab import ibllib.pipes.histology as histology -import ibllib.atlas as atlas +import iblatlas.atlas as atlas from atlaselectrophysiology import rendering # Instantiate brain atlas and one diff --git a/examples/one/histology/docs_find_previous_alignments.py b/examples/one/histology/docs_find_previous_alignments.py index c57e02f1a..790e2b8bd 100644 --- a/examples/one/histology/docs_find_previous_alignments.py +++ b/examples/one/histology/docs_find_previous_alignments.py @@ -13,7 +13,7 @@ from one.api import ONE from ibllib.pipes.ephys_alignment import EphysAlignment -import ibllib.atlas as atlas +import iblatlas.atlas as atlas # Instantiate brain atlas and one brain_atlas = atlas.AllenAtlas(25) diff --git a/examples/one/histology/docs_visualization3D_subject_channels.py b/examples/one/histology/docs_visualization3D_subject_channels.py index 75c5f9cee..bda59251e 100644 --- a/examples/one/histology/docs_visualization3D_subject_channels.py +++ b/examples/one/histology/docs_visualization3D_subject_channels.py @@ -18,7 +18,7 @@ import ibllib.plots from atlaselectrophysiology import rendering -import ibllib.atlas as atlas +import iblatlas.atlas as atlas one = ONE(base_url='https://openalyx.internationalbrainlab.org') subject = 'KS023' diff --git a/examples/one/histology/docs_visualize_session_coronal_tilted.py b/examples/one/histology/docs_visualize_session_coronal_tilted.py index 5f3336930..570e975a3 100644 --- a/examples/one/histology/docs_visualize_session_coronal_tilted.py +++ b/examples/one/histology/docs_visualize_session_coronal_tilted.py @@ -10,7 +10,7 @@ import numpy as np from one.api import ONE -import ibllib.atlas as atlas +import iblatlas.atlas as atlas import brainbox.io.one as bbone # === Parameters section (edit) === diff --git a/examples/one/histology/register_lasagna_tracks_alyx.py b/examples/one/histology/register_lasagna_tracks_alyx.py index 6ab0977db..5f79a86fe 100644 --- a/examples/one/histology/register_lasagna_tracks_alyx.py +++ b/examples/one/histology/register_lasagna_tracks_alyx.py @@ -24,7 +24,7 @@ """ # Author: Olivier, Gaelle from pathlib import Path -from ibllib.atlas import AllenAtlas +from iblatlas.atlas import AllenAtlas from one.api import ONE from ibllib.pipes import histology diff --git a/examples/one/histology/visualization3D_alyx_traj_planned_histology.py b/examples/one/histology/visualization3D_alyx_traj_planned_histology.py index ee5c5e1f3..0dcbf4203 100644 --- a/examples/one/histology/visualization3D_alyx_traj_planned_histology.py +++ b/examples/one/histology/visualization3D_alyx_traj_planned_histology.py @@ -10,7 +10,7 @@ from mayavi import mlab from atlaselectrophysiology import rendering -import ibllib.atlas as atlas +import iblatlas.atlas as atlas # === Parameters section (edit) === diff --git a/examples/one/histology/visualization3D_repeated_site.py b/examples/one/histology/visualization3D_repeated_site.py index ec90b8b62..9646c2846 100644 --- a/examples/one/histology/visualization3D_repeated_site.py +++ b/examples/one/histology/visualization3D_repeated_site.py @@ -9,7 +9,7 @@ import ibllib.plots from iblutil.util import Bunch -import ibllib.atlas as atlas +import iblatlas.atlas as atlas from atlaselectrophysiology import rendering import brainbox.io.one as bbone diff --git a/examples/one/histology/visualization3D_rotating_gif_firstpassmap_plan.py b/examples/one/histology/visualization3D_rotating_gif_firstpassmap_plan.py index f5e63216a..085127c06 100644 --- a/examples/one/histology/visualization3D_rotating_gif_firstpassmap_plan.py +++ b/examples/one/histology/visualization3D_rotating_gif_firstpassmap_plan.py @@ -6,7 +6,7 @@ from mayavi import mlab from atlaselectrophysiology import rendering -import ibllib.atlas as atlas +import iblatlas.atlas as atlas # the csv file is available here: # https://github.com/int-brain-lab/ibllib-matlab/blob/master/needles/maps/first_pass_map.csv diff --git a/examples/one/histology/visualization3D_rotating_gif_selectedmice.py b/examples/one/histology/visualization3D_rotating_gif_selectedmice.py index 42bb7e4f7..88a17829c 100644 --- a/examples/one/histology/visualization3D_rotating_gif_selectedmice.py +++ b/examples/one/histology/visualization3D_rotating_gif_selectedmice.py @@ -15,7 +15,7 @@ import ibllib.plots from atlaselectrophysiology import rendering -import ibllib.atlas as atlas +import iblatlas.atlas as atlas from iblutil.util import Bunch one = ONE(base_url="https://alyx.internationalbrainlab.org") diff --git a/examples/one/histology/visualization3D_subject_histology.py b/examples/one/histology/visualization3D_subject_histology.py index 0e0aaa44f..64c02cfe9 100644 --- a/examples/one/histology/visualization3D_subject_histology.py +++ b/examples/one/histology/visualization3D_subject_histology.py @@ -13,7 +13,7 @@ from one.api import ONE from atlaselectrophysiology import rendering -import ibllib.atlas as atlas +import iblatlas.atlas as atlas one = ONE(base_url="https://alyx.internationalbrainlab.org") diff --git a/examples/one/histology/visualize_alyx_channels_coronal.py b/examples/one/histology/visualize_alyx_channels_coronal.py index 7768db53b..9a8cde4d3 100644 --- a/examples/one/histology/visualize_alyx_channels_coronal.py +++ b/examples/one/histology/visualize_alyx_channels_coronal.py @@ -8,7 +8,7 @@ import numpy as np from one.api import ONE -import ibllib.atlas as atlas +import iblatlas.atlas as atlas import brainbox.io.one as bbone # === Parameters section (edit) === diff --git a/examples/one/histology/visualize_alyx_traj_coronal_sagittal_raster.py b/examples/one/histology/visualize_alyx_traj_coronal_sagittal_raster.py index c02fe8b86..1cedfb606 100644 --- a/examples/one/histology/visualize_alyx_traj_coronal_sagittal_raster.py +++ b/examples/one/histology/visualize_alyx_traj_coronal_sagittal_raster.py @@ -5,7 +5,7 @@ import matplotlib.pyplot as plt from one.api import ONE -import ibllib.atlas as atlas +import iblatlas.atlas as atlas import brainbox.io.one as bbone import brainbox.plot as bbplot diff --git a/examples/one/histology/visualize_track_file_coronal_GUIoption.py b/examples/one/histology/visualize_track_file_coronal_GUIoption.py index f655647c1..03a2b5ac6 100644 --- a/examples/one/histology/visualize_track_file_coronal_GUIoption.py +++ b/examples/one/histology/visualize_track_file_coronal_GUIoption.py @@ -7,7 +7,7 @@ import numpy as np from ibllib.pipes import histology -import ibllib.atlas as atlas +import iblatlas.atlas as atlas # === Parameters section (edit) === track_file = "/Users/gaelle/Downloads/electrodetracks_lic3/2019-08-27_lic3_002_probe00_pts.csv" diff --git a/examples/one/histology/visualize_track_file_coronal_sagittal_slice.py b/examples/one/histology/visualize_track_file_coronal_sagittal_slice.py index 84ffacccd..02e22e656 100644 --- a/examples/one/histology/visualize_track_file_coronal_sagittal_slice.py +++ b/examples/one/histology/visualize_track_file_coronal_sagittal_slice.py @@ -10,7 +10,7 @@ import matplotlib.pyplot as plt from ibllib.pipes import histology -import ibllib.atlas as atlas +import iblatlas.atlas as atlas # === Parameters section (edit) === diff --git a/ibllib/__init__.py b/ibllib/__init__.py index 79d4d537a..4e328ee70 100644 --- a/ibllib/__init__.py +++ b/ibllib/__init__.py @@ -2,7 +2,7 @@ import logging import warnings -__version__ = '2.25.2' +__version__ = '2.26' warnings.filterwarnings('always', category=DeprecationWarning, module='ibllib') # if this becomes a full-blown library we should let the logging configuration to the discretion of the dev diff --git a/ibllib/atlas/__init__.py b/ibllib/atlas/__init__.py index aaaaf24dd..22c7720bd 100644 --- a/ibllib/atlas/__init__.py +++ b/ibllib/atlas/__init__.py @@ -197,3 +197,7 @@ from .atlas import * # noqa from .regions import regions_from_allen_csv from .flatmaps import FlatMap +import warnings + +warnings.warn('ibllib.atlas is deprecated. Please install iblatlas using "pip install iblatlas" and use ' + 'this module instead', DeprecationWarning) diff --git a/ibllib/atlas/atlas.py b/ibllib/atlas/atlas.py index d4ec0914b..428c2ca2a 100644 --- a/ibllib/atlas/atlas.py +++ b/ibllib/atlas/atlas.py @@ -1,984 +1,32 @@ """ Classes for manipulating brain atlases, insertions, and coordinates. """ -from pathlib import Path, PurePosixPath -from dataclasses import dataclass -import logging -import matplotlib.pyplot as plt -import numpy as np -import nrrd +import warnings +import iblatlas.atlas -from one.webclient import http_download_file -import one.params -import one.remote.aws as aws -from iblutil.numerical import ismember -from ibllib.atlas.regions import BrainRegions, FranklinPaxinosRegions -ALLEN_CCF_LANDMARKS_MLAPDV_UM = {'bregma': np.array([5739, 5400, 332])} -"""dict: The ML AP DV voxel coordinates of brain landmarks in the Allen atlas.""" +def deprecated_decorator(function): + def deprecated_function(*args, **kwargs): + warning_text = f"{function.__module__}.{function.__name__} is deprecated. " \ + f"Use iblatlas.{function.__module__.split('.')[-1]}.{function.__name__} instead" + warnings.warn(warning_text, DeprecationWarning) + return function(*args, **kwargs) -PAXINOS_CCF_LANDMARKS_MLAPDV_UM = {'bregma': np.array([5700, 4300 + 160, 330])} -"""dict: The ML AP DV voxel coordinates of brain landmarks in the Franklin & Paxinos atlas.""" + return deprecated_function -S3_BUCKET_IBL = 'ibl-brain-wide-map-public' -"""str: The name of the public IBL S3 bucket containing atlas data.""" -_logger = logging.getLogger(__name__) +@deprecated_decorator +def BrainCoordinates(*args, **kwargs): + return iblatlas.atlas.BrainCoordinates(*args, **kwargs) -def cart2sph(x, y, z): - """ - Converts cartesian to spherical coordinates. - - Returns spherical coordinates (r, theta, phi). - - Parameters - ---------- - x : numpy.array - A 1D array of x-axis coordinates. - y : numpy.array - A 1D array of y-axis coordinates. - z : numpy.array - A 1D array of z-axis coordinates. - - Returns - ------- - numpy.array - The radial distance of each point. - numpy.array - The polar angle. - numpy.array - The azimuthal angle. - - See Also - -------- - sph2cart - """ - r = np.sqrt(x ** 2 + y ** 2 + z ** 2) - phi = np.arctan2(y, x) * 180 / np.pi - theta = np.zeros_like(r) - iok = r != 0 - theta[iok] = np.arccos(z[iok] / r[iok]) * 180 / np.pi - if theta.size == 1: - theta = float(theta) - return r, theta, phi - - -def sph2cart(r, theta, phi): - """ - Converts Spherical to Cartesian coordinates. - - Returns Cartesian coordinates (x, y, z). - - Parameters - ---------- - r : numpy.array - A 1D array of radial distances. - theta : numpy.array - A 1D array of polar angles. - phi : numpy.array - A 1D array of azimuthal angles. - - Returns - ------- - x : numpy.array - A 1D array of x-axis coordinates. - y : numpy.array - A 1D array of y-axis coordinates. - z : numpy.array - A 1D array of z-axis coordinates. - - See Also - -------- - cart2sph - """ - x = r * np.cos(phi / 180 * np.pi) * np.sin(theta / 180 * np.pi) - y = r * np.sin(phi / 180 * np.pi) * np.sin(theta / 180 * np.pi) - z = r * np.cos(theta / 180 * np.pi) - return x, y, z - - -class BrainCoordinates: - """ - Class for mapping and indexing a 3D array to real-world coordinates. - - * x = ml, right positive - * y = ap, anterior positive - * z = dv, dorsal positive - - The layout of the Atlas dimension is done according to the most used sections so they lay - contiguous on disk assuming C-ordering: V[iap, iml, idv] - - Parameters - ---------- - nxyz : array_like - Number of elements along each Cartesian axis (nx, ny, nz) = (nml, nap, ndv). - xyz0 : array_like - Coordinates of the element volume[0, 0, 0] in the coordinate space. - dxyz : array_like, float - Spatial interval of the volume along the 3 dimensions. - - Attributes - ---------- - xyz0 : numpy.array - The Cartesian coordinates of the element volume[0, 0, 0], i.e. the origin. - x0 : int - The x-axis origin coordinate of the element volume. - y0 : int - The y-axis origin coordinate of the element volume. - z0 : int - The z-axis origin coordinate of the element volume. - """ - - def __init__(self, nxyz, xyz0=(0, 0, 0), dxyz=(1, 1, 1)): - if np.isscalar(dxyz): - dxyz = [dxyz] * 3 - self.x0, self.y0, self.z0 = list(xyz0) - self.dx, self.dy, self.dz = list(dxyz) - self.nx, self.ny, self.nz = list(nxyz) - - @property - def dxyz(self): - """numpy.array: Spatial interval of the volume along the 3 dimensions.""" - return np.array([self.dx, self.dy, self.dz]) - - @property - def nxyz(self): - """numpy.array: Coordinates of the element volume[0, 0, 0] in the coordinate space.""" - return np.array([self.nx, self.ny, self.nz]) - - """Methods ratios to indices""" - def r2ix(self, r): - # FIXME Document - return int((self.nx - 1) * r) - - def r2iy(self, r): - # FIXME Document - return int((self.nz - 1) * r) - - def r2iz(self, r): - # FIXME Document - return int((self.nz - 1) * r) - - """Methods distance to indices""" - @staticmethod - def _round(i, round=True): - """ - Round an input value to the nearest integer, replacing NaN values with 0. - - Parameters - ---------- - i : int, float, numpy.nan, numpy.array - A value or array of values to round. - round : bool - If false this function is identity. - - Returns - ------- - int, float, numpy.nan, numpy.array - If round is true, returns the nearest integer, replacing NaN values with 0, otherwise - returns the input unaffected. - """ - nanval = 0 - if round: - ii = np.array(np.round(i)).astype(int) - ii[np.isnan(i)] = nanval - return ii - else: - return i - - def x2i(self, x, round=True, mode='raise'): - """ - Find the nearest volume image index to a given x-axis coordinate. - - Parameters - ---------- - x : float, numpy.array - One or more x-axis coordinates, relative to the origin, x0. - round : bool - If true, round to the nearest index, replacing NaN values with 0. - mode : {'raise', 'clip', 'wrap'}, default='raise' - How to behave if the coordinate lies outside of the volume: raise (default) will raise - a ValueError; 'clip' will replace the index with the closest index inside the volume; - 'wrap' will return the index as is. - - Returns - ------- - numpy.array - The nearest indices of the image volume along the first dimension. - - Raises - ------ - ValueError - At least one x value lies outside of the atlas volume. Change 'mode' input to 'wrap' to - keep these values unchanged, or 'clip' to return the nearest valid indices. - """ - i = np.asarray(self._round((x - self.x0) / self.dx, round=round)) - if np.any(i < 0) or np.any(i >= self.nx): - if mode == 'clip': - i[i < 0] = 0 - i[i >= self.nx] = self.nx - 1 - elif mode == 'raise': - raise ValueError("At least one x value lies outside of the atlas volume.") - elif mode == 'wrap': # This is only here for legacy reasons - pass - return i - - def y2i(self, y, round=True, mode='raise'): - """ - Find the nearest volume image index to a given y-axis coordinate. - - Parameters - ---------- - y : float, numpy.array - One or more y-axis coordinates, relative to the origin, y0. - round : bool - If true, round to the nearest index, replacing NaN values with 0. - mode : {'raise', 'clip', 'wrap'} - How to behave if the coordinate lies outside of the volume: raise (default) will raise - a ValueError; 'clip' will replace the index with the closest index inside the volume; - 'wrap' will return the index as is. - - Returns - ------- - numpy.array - The nearest indices of the image volume along the second dimension. - - Raises - ------ - ValueError - At least one y value lies outside of the atlas volume. Change 'mode' input to 'wrap' to - keep these values unchanged, or 'clip' to return the nearest valid indices. - """ - i = np.asarray(self._round((y - self.y0) / self.dy, round=round)) - if np.any(i < 0) or np.any(i >= self.ny): - if mode == 'clip': - i[i < 0] = 0 - i[i >= self.ny] = self.ny - 1 - elif mode == 'raise': - raise ValueError("At least one y value lies outside of the atlas volume.") - elif mode == 'wrap': # This is only here for legacy reasons - pass - return i - - def z2i(self, z, round=True, mode='raise'): - """ - Find the nearest volume image index to a given z-axis coordinate. - - Parameters - ---------- - z : float, numpy.array - One or more z-axis coordinates, relative to the origin, z0. - round : bool - If true, round to the nearest index, replacing NaN values with 0. - mode : {'raise', 'clip', 'wrap'} - How to behave if the coordinate lies outside of the volume: raise (default) will raise - a ValueError; 'clip' will replace the index with the closest index inside the volume; - 'wrap' will return the index as is. - - Returns - ------- - numpy.array - The nearest indices of the image volume along the third dimension. - - Raises - ------ - ValueError - At least one z value lies outside of the atlas volume. Change 'mode' input to 'wrap' to - keep these values unchanged, or 'clip' to return the nearest valid indices. - """ - i = np.asarray(self._round((z - self.z0) / self.dz, round=round)) - if np.any(i < 0) or np.any(i >= self.nz): - if mode == 'clip': - i[i < 0] = 0 - i[i >= self.nz] = self.nz - 1 - elif mode == 'raise': - raise ValueError("At least one z value lies outside of the atlas volume.") - elif mode == 'wrap': # This is only here for legacy reasons - pass - return i +@deprecated_decorator +def BrainAtlas(*args, **kwargs): + return iblatlas.atlas.BrainAtlas(*args, **kwargs) - def xyz2i(self, xyz, round=True, mode='raise'): - """ - Find the nearest volume image indices to the given Cartesian coordinates. - Parameters - ---------- - xyz : array_like - One or more Cartesian coordinates, relative to the origin, xyz0. - round : bool - If true, round to the nearest index, replacing NaN values with 0. - mode : {'raise', 'clip', 'wrap'} - How to behave if any coordinate lies outside of the volume: raise (default) will raise - a ValueError; 'clip' will replace the index with the closest index inside the volume; - 'wrap' will return the index as is. - - Returns - ------- - numpy.array - The nearest indices of the image volume. - - Raises - ------ - ValueError - At least one coordinate lies outside of the atlas volume. Change 'mode' input to 'wrap' - to keep these values unchanged, or 'clip' to return the nearest valid indices. - """ - xyz = np.array(xyz) - dt = int if round else float - out = np.zeros_like(xyz, dtype=dt) - out[..., 0] = self.x2i(xyz[..., 0], round=round, mode=mode) - out[..., 1] = self.y2i(xyz[..., 1], round=round, mode=mode) - out[..., 2] = self.z2i(xyz[..., 2], round=round, mode=mode) - return out - - """Methods indices to distance""" - def i2x(self, ind): - """ - Return the x-axis coordinate of a given index. - - Parameters - ---------- - ind : int, numpy.array - One or more indices along the first dimension of the image volume. - - Returns - ------- - float, numpy.array - The corresponding x-axis coordinate(s), relative to the origin, x0. - """ - return ind * self.dx + self.x0 - - def i2y(self, ind): - """ - Return the y-axis coordinate of a given index. - - Parameters - ---------- - ind : int, numpy.array - One or more indices along the second dimension of the image volume. - - Returns - ------- - float, numpy.array - The corresponding y-axis coordinate(s), relative to the origin, y0. - """ - return ind * self.dy + self.y0 - - def i2z(self, ind): - """ - Return the z-axis coordinate of a given index. - - Parameters - ---------- - ind : int, numpy.array - One or more indices along the third dimension of the image volume. - - Returns - ------- - float, numpy.array - The corresponding z-axis coordinate(s), relative to the origin, z0. - """ - return ind * self.dz + self.z0 - - def i2xyz(self, iii): - """ - Return the Cartesian coordinates of a given index. - - Parameters - ---------- - iii : array_like - One or more image volume indices. - - Returns - ------- - numpy.array - The corresponding xyz coordinates, relative to the origin, xyz0. - """ - - iii = np.array(iii, dtype=float) - out = np.zeros_like(iii) - out[..., 0] = self.i2x(iii[..., 0]) - out[..., 1] = self.i2y(iii[..., 1]) - out[..., 2] = self.i2z(iii[..., 2]) - return out - - """Methods bounds""" - @property - def xlim(self): - # FIXME Document - return self.i2x(np.array([0, self.nx - 1])) - - @property - def ylim(self): - # FIXME Document - return self.i2y(np.array([0, self.ny - 1])) - - @property - def zlim(self): - # FIXME Document - return self.i2z(np.array([0, self.nz - 1])) - - def lim(self, axis): - # FIXME Document - if axis == 0: - return self.xlim - elif axis == 1: - return self.ylim - elif axis == 2: - return self.zlim - - """returns scales""" - @property - def xscale(self): - # FIXME Document - return self.i2x(np.arange(self.nx)) - - @property - def yscale(self): - # FIXME Document - return self.i2y(np.arange(self.ny)) - - @property - def zscale(self): - # FIXME Document - return self.i2z(np.arange(self.nz)) - - """returns the 3d mgrid used for 3d visualization""" - @property - def mgrid(self): - # FIXME Document - return np.meshgrid(self.xscale, self.yscale, self.zscale) - - -class BrainAtlas: - """ - Objects that holds image, labels and coordinate transforms for a brain Atlas. - Currently this is designed for the AllenCCF at several resolutions, - yet this class can be used for other atlases arises. - """ - - """numpy.array: An image volume.""" - image = None - """numpy.array: An annotation label volume.""" - label = None - - def __init__(self, image, label, dxyz, regions, iorigin=[0, 0, 0], - dims2xyz=[0, 1, 2], xyz2dims=[0, 1, 2]): - """ - self.image: image volume (ap, ml, dv) - self.label: label volume (ap, ml, dv) - self.bc: atlas.BrainCoordinate object - self.regions: atlas.BrainRegions object - self.top: 2d np array (ap, ml) containing the z-coordinate (m) of the surface of the brain - self.dims2xyz and self.zyz2dims: map image axis order to xyz coordinates order - """ - - self.image = image - self.label = label - self.regions = regions - self.dims2xyz = dims2xyz - self.xyz2dims = xyz2dims - assert np.all(self.dims2xyz[self.xyz2dims] == np.array([0, 1, 2])) - assert np.all(self.xyz2dims[self.dims2xyz] == np.array([0, 1, 2])) - # create the coordinate transform object that maps volume indices to real world coordinates - nxyz = np.array(self.image.shape)[self.dims2xyz] - bc = BrainCoordinates(nxyz=nxyz, xyz0=(0, 0, 0), dxyz=dxyz) - self.bc = BrainCoordinates(nxyz=nxyz, xyz0=-bc.i2xyz(iorigin), dxyz=dxyz) - - self.surface = None - self.boundary = None - - @staticmethod - def _get_cache_dir(): - par = one.params.get(silent=True) - path_atlas = Path(par.CACHE_DIR).joinpath('histology', 'ATLAS', 'Needles', 'Allen', 'flatmaps') - return path_atlas - - def compute_surface(self): - """ - Get the volume top, bottom, left and right surfaces, and from these the outer surface of - the image volume. This is needed to compute probe insertions intersections. - - NOTE: In places where the top or bottom surface touch the top or bottom of the atlas volume, the surface - will be set to np.nan. If you encounter issues working with these surfaces check if this might be the cause. - """ - if self.surface is None: # only compute if it hasn't already been computed - axz = self.xyz2dims[2] # this is the dv axis - _surface = (self.label == 0).astype(np.int8) * 2 - l0 = np.diff(_surface, axis=axz, append=2) - _top = np.argmax(l0 == -2, axis=axz).astype(float) - _top[_top == 0] = np.nan - _bottom = self.bc.nz - np.argmax(np.flip(l0, axis=axz) == 2, axis=axz).astype(float) - _bottom[_bottom == self.bc.nz] = np.nan - self.top = self.bc.i2z(_top + 1) - self.bottom = self.bc.i2z(_bottom - 1) - self.surface = np.diff(_surface, axis=self.xyz2dims[0], append=2) + l0 - idx_srf = np.where(self.surface != 0) - self.surface[idx_srf] = 1 - self.srf_xyz = self.bc.i2xyz(np.c_[idx_srf[self.xyz2dims[0]], idx_srf[self.xyz2dims[1]], - idx_srf[self.xyz2dims[2]]].astype(float)) - - def _lookup_inds(self, ixyz, mode='raise'): - """ - Performs a 3D lookup from volume indices ixyz to the image volume - :param ixyz: [n, 3] array of indices in the mlapdv order - :return: n array of flat indices - """ - idims = np.split(ixyz[..., self.xyz2dims], [1, 2], axis=-1) - inds = np.ravel_multi_index(idims, self.bc.nxyz[self.xyz2dims], mode=mode) - return inds.squeeze() - - def _lookup(self, xyz, mode='raise'): - """ - Performs a 3D lookup from real world coordinates to the flat indices in the volume, - defined in the BrainCoordinates object. - - Parameters - ---------- - xyz : numpy.array - An (n, 3) array of Cartesian coordinates. - mode : {'raise', 'clip', 'wrap'} - How to behave if any coordinate lies outside of the volume: raise (default) will raise - a ValueError; 'clip' will replace the index with the closest index inside the volume; - 'wrap' will return the index as is. - - Returns - ------- - numpy.array - A 1D array of flat indices. - """ - return self._lookup_inds(self.bc.xyz2i(xyz, mode=mode), mode=mode) - - def get_labels(self, xyz, mapping=None, radius_um=None, mode='raise'): - """ - Performs a 3D lookup from real world coordinates to the volume labels - and return the regions ids according to the mapping - :param xyz: [n, 3] array of coordinates - :param mapping: brain region mapping (defaults to original Allen mapping) - :param radius_um: if not null, returns a regions ids array and an array of proportion - of regions in a sphere of size radius around the coordinates. - :param mode: {‘raise’, 'clip'} determines what to do when determined index lies outside the atlas volume - 'raise' will raise a ValueError (default) - 'clip' will replace the index with the closest index inside the volume - :return: n array of region ids - """ - mapping = mapping or self.regions.default_mapping - - if radius_um: - nrx = int(np.ceil(radius_um / abs(self.bc.dx) / 1e6)) - nry = int(np.ceil(radius_um / abs(self.bc.dy) / 1e6)) - nrz = int(np.ceil(radius_um / abs(self.bc.dz) / 1e6)) - nr = [nrx, nry, nrz] - iii = self.bc.xyz2i(xyz, mode=mode) - # computing the cube radius and indices is more complicated as volume indices are not - # necessarily in ml, ap, dv order so the indices order is dynamic - rcube = np.meshgrid(*tuple((np.arange( - -nr[i], nr[i] + 1) * self.bc.dxyz[i]) ** 2 for i in self.xyz2dims)) - rcube = np.sqrt(rcube[0] + rcube[1], rcube[2]) * 1e6 - icube = tuple(slice(-nr[i] + iii[i], nr[i] + iii[i] + 1) for i in self.xyz2dims) - cube = self.regions.mappings[mapping][self.label[icube]] - ilabs, counts = np.unique(cube[rcube <= radius_um], return_counts=True) - return self.regions.id[ilabs], counts / np.sum(counts) - else: - regions_indices = self._get_mapping(mapping=mapping)[self.label.flat[self._lookup(xyz, mode=mode)]] - return self.regions.id[regions_indices] - - def _get_mapping(self, mapping=None): - """ - Safe way to get mappings if nothing defined in regions. - A mapping transforms from the full allen brain Atlas ids to the remapped ids - new_ids = ids[mapping] - """ - mapping = mapping or self.regions.default_mapping - if hasattr(self.regions, 'mappings'): - return self.regions.mappings[mapping] - else: - return np.arange(self.regions.id.size) - - def _label2rgb(self, imlabel): - """ - Converts a slice from the label volume to its RGB equivalent for display - :param imlabel: 2D np-array containing label ids (slice of the label volume) - :return: 3D np-array of the slice uint8 rgb values - """ - if getattr(self.regions, 'rgb', None) is None: - return self.regions.id[imlabel] - else: # if the regions exist and have the rgb attribute, do the rgb lookup - return self.regions.rgb[imlabel] - - def tilted_slice(self, xyz, axis, volume='image'): - """ - From line coordinates, extracts the tilted plane containing the line from the 3D volume - :param xyz: np.array: points defining a probe trajectory in 3D space (xyz triplets) - if more than 2 points are provided will take the best fit - :param axis: - 0: along ml = sagittal-slice - 1: along ap = coronal-slice - 2: along dv = horizontal-slice - :param volume: 'image' or 'annotation' - :return: np.array, abscissa extent (width), ordinate extent (height), - squeezed axis extent (depth) - """ - if axis == 0: # sagittal slice (squeeze/take along ml-axis) - wdim, hdim, ddim = (1, 2, 0) - elif axis == 1: # coronal slice (squeeze/take along ap-axis) - wdim, hdim, ddim = (0, 2, 1) - elif axis == 2: # horizontal slice (squeeze/take along dv-axis) - wdim, hdim, ddim = (0, 1, 2) - # get the best fit and find exit points of the volume along squeezed axis - trj = Trajectory.fit(xyz) - sub_volume = trj._eval(self.bc.lim(axis=hdim), axis=hdim) - sub_volume[:, wdim] = self.bc.lim(axis=wdim) - sub_volume_i = self.bc.xyz2i(sub_volume) - tile_shape = np.array([np.diff(sub_volume_i[:, hdim])[0] + 1, self.bc.nxyz[wdim]]) - # get indices along each dimension - indx = np.arange(tile_shape[1]) - indy = np.arange(tile_shape[0]) - inds = np.linspace(*sub_volume_i[:, ddim], tile_shape[0]) - # compute the slice indices and output the slice - _, INDS = np.meshgrid(indx, np.int64(np.around(inds))) - INDX, INDY = np.meshgrid(indx, indy) - indsl = [[INDX, INDY, INDS][i] for i in np.argsort([wdim, hdim, ddim])[self.xyz2dims]] - if isinstance(volume, np.ndarray): - tslice = volume[indsl[0], indsl[1], indsl[2]] - elif volume.lower() == 'annotation': - tslice = self._label2rgb(self.label[indsl[0], indsl[1], indsl[2]]) - elif volume.lower() == 'image': - tslice = self.image[indsl[0], indsl[1], indsl[2]] - elif volume.lower() == 'surface': - tslice = self.surface[indsl[0], indsl[1], indsl[2]] - - # get extents with correct convention NB: matplotlib flips the y-axis on imshow ! - width = np.sort(sub_volume[:, wdim])[np.argsort(self.bc.lim(axis=wdim))] - height = np.flipud(np.sort(sub_volume[:, hdim])[np.argsort(self.bc.lim(axis=hdim))]) - depth = np.flipud(np.sort(sub_volume[:, ddim])[np.argsort(self.bc.lim(axis=ddim))]) - return tslice, width, height, depth - - def plot_tilted_slice(self, xyz, axis, volume='image', cmap=None, ax=None, return_sec=False, **kwargs): - """ - From line coordinates, extracts the tilted plane containing the line from the 3D volume - :param xyz: np.array: points defining a probe trajectory in 3D space (xyz triplets) - if more than 2 points are provided will take the best fit - :param axis: - 0: along ml = sagittal-slice - 1: along ap = coronal-slice - 2: along dv = horizontal-slice - :param volume: 'image' or 'annotation' - :return: matplotlib axis - """ - if axis == 0: - axis_labels = np.array(['ap (um)', 'dv (um)', 'ml (um)']) - elif axis == 1: - axis_labels = np.array(['ml (um)', 'dv (um)', 'ap (um)']) - elif axis == 2: - axis_labels = np.array(['ml (um)', 'ap (um)', 'dv (um)']) - - tslice, width, height, depth = self.tilted_slice(xyz, axis, volume=volume) - width = width * 1e6 - height = height * 1e6 - depth = depth * 1e6 - if not ax: - plt.figure() - ax = plt.gca() - ax.axis('equal') - if not cmap: - cmap = plt.get_cmap('bone') - # get the transfer function from y-axis to squeezed axis for second axe - ab = np.linalg.solve(np.c_[height, height * 0 + 1], depth) - height * ab[0] + ab[1] - ax.imshow(tslice, extent=np.r_[width, height], cmap=cmap, **kwargs) - sec_ax = ax.secondary_yaxis('right', functions=( - lambda x: x * ab[0] + ab[1], - lambda y: (y - ab[1]) / ab[0])) - ax.set_xlabel(axis_labels[0]) - ax.set_ylabel(axis_labels[1]) - sec_ax.set_ylabel(axis_labels[2]) - if return_sec: - return ax, sec_ax - else: - return ax - - @staticmethod - def _plot_slice(im, extent, ax=None, cmap=None, volume=None, **kwargs): - """ - Plot an atlas slice. - - Parameters - ---------- - im : numpy.array - A 2D image slice to plot. - extent : array_like - The bounding box in data coordinates that the image will fill specified as (left, - right, bottom, top) in data coordinates. - ax : matplotlib.pyplot.Axes - An optional Axes object to plot to. - cmap : str, matplotlib.colors.Colormap - The Colormap instance or registered colormap name used to map scalar data to colors. - Defaults to 'bone'. - volume : str - If 'boundary', assumes image is an outline of boundaries between all regions. - FIXME How does this affect the plot? - **kwargs - See matplotlib.pyplot.imshow. - - Returns - ------- - matplotlib.pyplot.Axes - The image axes. - """ - if not ax: - ax = plt.gca() - ax.axis('equal') - if not cmap: - cmap = plt.get_cmap('bone') - - if volume == 'boundary': - imb = np.zeros((*im.shape[:2], 4), dtype=np.uint8) - imb[im == 1] = np.array([0, 0, 0, 255]) - im = imb - - ax.imshow(im, extent=extent, cmap=cmap, **kwargs) - return ax - - def extent(self, axis): - """ - :param axis: direction along which the volume is stacked: - (2 = z for horizontal slice) - (1 = y for coronal slice) - (0 = x for sagittal slice) - :return: - """ - - if axis == 0: - extent = np.r_[self.bc.ylim, np.flip(self.bc.zlim)] * 1e6 - elif axis == 1: - extent = np.r_[self.bc.xlim, np.flip(self.bc.zlim)] * 1e6 - elif axis == 2: - extent = np.r_[self.bc.xlim, np.flip(self.bc.ylim)] * 1e6 - return extent - - def slice(self, coordinate, axis, volume='image', mode='raise', region_values=None, - mapping=None, bc=None): - """ - Get slice through atlas - - :param coordinate: coordinate to slice in metres, float - :param axis: xyz convention: 0 for ml, 1 for ap, 2 for dv - - 0: sagittal slice (along ml axis) - - 1: coronal slice (along ap axis) - - 2: horizontal slice (along dv axis) - :param volume: - - 'image' - allen image volume - - 'annotation' - allen annotation volume - - 'surface' - outer surface of mesh - - 'boundary' - outline of boundaries between all regions - - 'volume' - custom volume, must pass in volume of shape ba.image.shape as regions_value argument - - 'value' - custom value per allen region, must pass in array of shape ba.regions.id as regions_value argument - :param mode: error mode for out of bounds coordinates - - 'raise' raise an error - - 'clip' gets the first or last index - :param region_values: custom values to plot - - if volume='volume', region_values must have shape ba.image.shape - - if volume='value', region_values must have shape ba.regions.id - :param mapping: mapping to use. Options can be found using ba.regions.mappings.keys() - :return: 2d array or 3d RGB numpy int8 array - """ - if axis == 0: - index = self.bc.x2i(np.array(coordinate), mode=mode) - elif axis == 1: - index = self.bc.y2i(np.array(coordinate), mode=mode) - elif axis == 2: - index = self.bc.z2i(np.array(coordinate), mode=mode) - - # np.take is 50 thousand times slower than straight slicing ! - def _take(vol, ind, axis): - if mode == 'clip': - ind = np.minimum(np.maximum(ind, 0), vol.shape[axis] - 1) - if axis == 0: - return vol[ind, :, :] - elif axis == 1: - return vol[:, ind, :] - elif axis == 2: - return vol[:, :, ind] - - def _take_remap(vol, ind, axis, mapping): - # For the labels, remap the regions indices according to the mapping - return self._get_mapping(mapping=mapping)[_take(vol, ind, axis)] - - if isinstance(volume, np.ndarray): - return _take(volume, index, axis=self.xyz2dims[axis]) - elif volume in 'annotation': - iregion = _take_remap(self.label, index, self.xyz2dims[axis], mapping) - return self._label2rgb(iregion) - elif volume == 'image': - return _take(self.image, index, axis=self.xyz2dims[axis]) - elif volume == 'value': - return region_values[_take_remap(self.label, index, self.xyz2dims[axis], mapping)] - elif volume == 'image': - return _take(self.image, index, axis=self.xyz2dims[axis]) - elif volume in ['surface', 'edges']: - self.compute_surface() - return _take(self.surface, index, axis=self.xyz2dims[axis]) - elif volume == 'boundary': - iregion = _take_remap(self.label, index, self.xyz2dims[axis], mapping) - return self.compute_boundaries(iregion) - - elif volume == 'volume': - if bc is not None: - index = bc.xyz2i(np.array([coordinate] * 3))[axis] - return _take(region_values, index, axis=self.xyz2dims[axis]) - - def compute_boundaries(self, values): - """ - Compute the boundaries between regions on slice - :param values: - :return: - """ - boundary = np.abs(np.diff(values, axis=0, prepend=0)) - boundary = boundary + np.abs(np.diff(values, axis=1, prepend=0)) - boundary = boundary + np.abs(np.diff(values, axis=1, append=0)) - boundary = boundary + np.abs(np.diff(values, axis=0, append=0)) - - boundary[boundary != 0] = 1 - - return boundary - - def plot_slices(self, xyz, *args, **kwargs): - """ - From a single coordinate, plots the 3 slices that intersect at this point in a single - matplotlib figure - :param xyz: mlapdv coordinate in m - :param args: arguments to be forwarded to plot slices - :param kwargs: keyword arguments to be forwarded to plot slices - :return: 2 by 2 array of axes - """ - fig, axs = plt.subplots(2, 2) - self.plot_cslice(xyz[1], *args, ax=axs[0, 0], **kwargs) - self.plot_sslice(xyz[0], *args, ax=axs[0, 1], **kwargs) - self.plot_hslice(xyz[2], *args, ax=axs[1, 0], **kwargs) - xyz_um = xyz * 1e6 - axs[0, 0].plot(xyz_um[0], xyz_um[2], 'g*') - axs[0, 1].plot(xyz_um[1], xyz_um[2], 'g*') - axs[1, 0].plot(xyz_um[0], xyz_um[1], 'g*') - return axs - - def plot_cslice(self, ap_coordinate, volume='image', mapping=None, region_values=None, **kwargs): - """ - Plot coronal slice through atlas at given ap_coordinate - - :param: ap_coordinate (m) - :param volume: - - 'image' - allen image volume - - 'annotation' - allen annotation volume - - 'surface' - outer surface of mesh - - 'boundary' - outline of boundaries between all regions - - 'volume' - custom volume, must pass in volume of shape ba.image.shape as regions_value argument - - 'value' - custom value per allen region, must pass in array of shape ba.regions.id as regions_value argument - :param mapping: mapping to use. Options can be found using ba.regions.mappings.keys() - :param region_values: custom values to plot - - if volume='volume', region_values must have shape ba.image.shape - - if volume='value', region_values must have shape ba.regions.id - :param mapping: mapping to use. Options can be found using ba.regions.mappings.keys() - :param **kwargs: matplotlib.pyplot.imshow kwarg arguments - :return: matplotlib ax object - """ - - cslice = self.slice(ap_coordinate, axis=1, volume=volume, mapping=mapping, region_values=region_values) - return self._plot_slice(np.moveaxis(cslice, 0, 1), extent=self.extent(axis=1), volume=volume, **kwargs) - - def plot_hslice(self, dv_coordinate, volume='image', mapping=None, region_values=None, **kwargs): - """ - Plot horizontal slice through atlas at given dv_coordinate - - :param: dv_coordinate (m) - :param volume: - - 'image' - allen image volume - - 'annotation' - allen annotation volume - - 'surface' - outer surface of mesh - - 'boundary' - outline of boundaries between all regions - - 'volume' - custom volume, must pass in volume of shape ba.image.shape as regions_value argument - - 'value' - custom value per allen region, must pass in array of shape ba.regions.id as regions_value argument - :param mapping: mapping to use. Options can be found using ba.regions.mappings.keys() - :param region_values: custom values to plot - - if volume='volume', region_values must have shape ba.image.shape - - if volume='value', region_values must have shape ba.regions.id - :param mapping: mapping to use. Options can be found using ba.regions.mappings.keys() - :param **kwargs: matplotlib.pyplot.imshow kwarg arguments - :return: matplotlib ax object - """ - - hslice = self.slice(dv_coordinate, axis=2, volume=volume, mapping=mapping, region_values=region_values) - return self._plot_slice(hslice, extent=self.extent(axis=2), volume=volume, **kwargs) - - def plot_sslice(self, ml_coordinate, volume='image', mapping=None, region_values=None, **kwargs): - """ - Plot sagittal slice through atlas at given ml_coordinate - - :param: ml_coordinate (m) - :param volume: - - 'image' - allen image volume - - 'annotation' - allen annotation volume - - 'surface' - outer surface of mesh - - 'boundary' - outline of boundaries between all regions - - 'volume' - custom volume, must pass in volume of shape ba.image.shape as regions_value argument - - 'value' - custom value per allen region, must pass in array of shape ba.regions.id as regions_value argument - :param mapping: mapping to use. Options can be found using ba.regions.mappings.keys() - :param region_values: custom values to plot - - if volume='volume', region_values must have shape ba.image.shape - - if volume='value', region_values must have shape ba.regions.id - :param mapping: mapping to use. Options can be found using ba.regions.mappings.keys() - :param **kwargs: matplotlib.pyplot.imshow kwarg arguments - :return: matplotlib ax object - """ - - sslice = self.slice(ml_coordinate, axis=0, volume=volume, mapping=mapping, region_values=region_values) - return self._plot_slice(np.swapaxes(sslice, 0, 1), extent=self.extent(axis=0), volume=volume, **kwargs) - - def plot_top(self, volume='annotation', mapping=None, region_values=None, ax=None, **kwargs): - """ - Plot top view of atlas - :param volume: - - 'image' - allen image volume - - 'annotation' - allen annotation volume - - 'boundary' - outline of boundaries between all regions - - 'volume' - custom volume, must pass in volume of shape ba.image.shape as regions_value argument - - 'value' - custom value per allen region, must pass in array of shape ba.regions.id as regions_value argument - - :param mapping: mapping to use. Options can be found using ba.regions.mappings.keys() - :param region_values: - :param ax: - :param kwargs: - :return: - """ - - self.compute_surface() - ix, iy = np.meshgrid(np.arange(self.bc.nx), np.arange(self.bc.ny)) - iz = self.bc.z2i(self.top) - inds = self._lookup_inds(np.stack((ix, iy, iz), axis=-1)) - - regions = self._get_mapping(mapping=mapping)[self.label.flat[inds]] - - if volume == 'annotation': - im = self._label2rgb(regions) - elif volume == 'image': - im = self.top - elif volume == 'value': - im = region_values[regions] - elif volume == 'volume': - im = np.zeros((iz.shape)) - for x in range(im.shape[0]): - for y in range(im.shape[1]): - im[x, y] = region_values[x, y, iz[x, y]] - elif volume == 'boundary': - im = self.compute_boundaries(regions) - - return self._plot_slice(im, self.extent(axis=2), ax=ax, volume=volume, **kwargs) - - -@dataclass -class Trajectory: +class Trajectory(iblatlas.atlas.Trajectory): """ 3D Trajectory (usually for a linear probe), minimally defined by a vector and a point. @@ -988,485 +36,22 @@ class Trajectory: >>> trj = Trajectory.fit(xyz) """ - vector: np.ndarray - point: np.ndarray - - @staticmethod - def fit(xyz): - """ - Fits a line to a 3D cloud of points. - - Parameters - ---------- - xyz : numpy.array - An n by 3 array containing a cloud of points to fit a line to. - - Returns - ------- - Trajectory - A new trajectory object. - """ - xyz_mean = np.mean(xyz, axis=0) - return Trajectory(vector=np.linalg.svd(xyz - xyz_mean)[2][0], point=xyz_mean) - - def eval_x(self, x): - """ - given an array of x coordinates, returns the xyz array of coordinates along the insertion - :param x: n by 1 or numpy array containing x-coordinates - :return: n by 3 numpy array containing xyz-coordinates - """ - return self._eval(x, axis=0) - def eval_y(self, y): - """ - given an array of y coordinates, returns the xyz array of coordinates along the insertion - :param y: n by 1 or numpy array containing y-coordinates - :return: n by 3 numpy array containing xyz-coordinates - """ - return self._eval(y, axis=1) - def eval_z(self, z): - """ - given an array of z coordinates, returns the xyz array of coordinates along the insertion - :param z: n by 1 or numpy array containing z-coordinates - :return: n by 3 numpy array containing xyz-coordinates - """ - return self._eval(z, axis=2) - - def project(self, point): - """ - projects a point onto the trajectory line - :param point: np.array(x, y, z) coordinates - :return: - """ - # https://mathworld.wolfram.com/Point-LineDistance3-Dimensional.html - if point.ndim == 1: - return self.project(point[np.newaxis])[0] - return (self.point + np.dot(point[:, np.newaxis] - self.point, self.vector) / - np.dot(self.vector, self.vector) * self.vector) - - def mindist(self, xyz, bounds=None): - """ - Computes the minimum distance to the trajectory line for one or a set of points. - If bounds are provided, computes the minimum distance to the segment instead of an - infinite line. - :param xyz: [..., 3] - :param bounds: defaults to None. np.array [2, 3]: segment boundaries, inf line if None - :return: minimum distance [...] - """ - proj = self.project(xyz) - d = np.sqrt(np.sum((proj - xyz) ** 2, axis=-1)) - if bounds is not None: - # project the boundaries and the points along the traj - b = np.dot(bounds, self.vector) - ob = np.argsort(b) - p = np.dot(xyz[:, np.newaxis], self.vector).squeeze() - # for points below and above boundaries, compute cartesian distance to the boundary - imin = p < np.min(b) - d[imin] = np.sqrt(np.sum((xyz[imin, :] - bounds[ob[0], :]) ** 2, axis=-1)) - imax = p > np.max(b) - d[imax] = np.sqrt(np.sum((xyz[imax, :] - bounds[ob[1], :]) ** 2, axis=-1)) - return d - - def _eval(self, c, axis): - # uses symmetric form of 3d line equation to get xyz coordinates given one coordinate - if not isinstance(c, np.ndarray): - c = np.array(c) - while c.ndim < 2: - c = c[..., np.newaxis] - # there are cases where it's impossible to project if a line is // to the axis - if self.vector[axis] == 0: - return np.nan * np.zeros((c.shape[0], 3)) - else: - return (c - self.point[axis]) * self.vector / self.vector[axis] + self.point - - def exit_points(self, bc): - """ - Given a Trajectory and a BrainCoordinates object, computes the intersection of the - trajectory with the brain coordinates bounding box - :param bc: BrainCoordinate objects - :return: np.ndarray 2 y 3 corresponding to exit points xyz coordinates - """ - bounds = np.c_[bc.xlim, bc.ylim, bc.zlim] - epoints = np.r_[self.eval_x(bc.xlim), self.eval_y(bc.ylim), self.eval_z(bc.zlim)] - epoints = epoints[~np.all(np.isnan(epoints), axis=1)] - ind = np.all(np.bitwise_and(bounds[0, :] <= epoints, epoints <= bounds[1, :]), axis=1) - return epoints[ind, :] - - -@dataclass -class Insertion: +class Insertion(iblatlas.atlas.Insertion): """ Defines an ephys probe insertion in 3D coordinate. IBL conventions. To instantiate, use the static methods: `Insertion.from_track` and `Insertion.from_dict`. """ - x: float - y: float - z: float - phi: float - theta: float - depth: float - label: str = '' - beta: float = 0 - - @staticmethod - def from_track(xyzs, brain_atlas=None): - """ - Define an insersion from one or more trajectory. - - Parameters - ---------- - xyzs : numpy.array - An n by 3 array xyz coordinates representing an insertion trajectory. - brain_atlas : BrainAtlas - A brain atlas instance, used to attain the point of entry. - - Returns - ------- - Insertion - """ - assert brain_atlas, 'Input argument brain_atlas must be defined' - traj = Trajectory.fit(xyzs) - # project the deepest point into the vector to get the tip coordinate - tip = traj.project(xyzs[np.argmin(xyzs[:, 2]), :]) - # get intersection with the brain surface as an entry point - entry = Insertion.get_brain_entry(traj, brain_atlas) - # convert to spherical system to store the insertion - depth, theta, phi = cart2sph(*(entry - tip)) - insertion_dict = { - 'x': entry[0], 'y': entry[1], 'z': entry[2], 'phi': phi, 'theta': theta, 'depth': depth - } - return Insertion(**insertion_dict) - - @staticmethod - def from_dict(d, brain_atlas=None): - """ - Constructs an Insertion object from the json information stored in probes.description file. - - Parameters - ---------- - d : dict - A dictionary containing at least the following keys {'x', 'y', 'z', 'phi', 'theta', - 'depth'}. The depth and xyz coordinates must be in um. - brain_atlas : BrainAtlas, default=None - If provided, disregards the z coordinate and locks the insertion point to the z of the - brain surface. - - Returns - ------- - Insertion - - Examples - -------- - >>> tri = {'x': 544.0, 'y': 1285.0, 'z': 0.0, 'phi': 0.0, 'theta': 5.0, 'depth': 4501.0} - >>> ins = Insertion.from_dict(tri) - """ - assert brain_atlas, 'Input argument brain_atlas must be defined' - z = d['z'] / 1e6 - if not hasattr(brain_atlas, 'top'): - brain_atlas.compute_surface() - iy = brain_atlas.bc.y2i(d['y'] / 1e6) - ix = brain_atlas.bc.x2i(d['x'] / 1e6) - # Only use the brain surface value as z if it isn't NaN (this happens when the surface touches the edges - # of the atlas volume - if not np.isnan(brain_atlas.top[iy, ix]): - z = brain_atlas.top[iy, ix] - return Insertion(x=d['x'] / 1e6, y=d['y'] / 1e6, z=z, - phi=d['phi'], theta=d['theta'], depth=d['depth'] / 1e6, - beta=d.get('beta', 0), label=d.get('label', '')) - - @property - def trajectory(self): - """ - Gets the trajectory object matching insertion coordinates - :return: atlas.Trajectory - """ - return Trajectory.fit(self.xyz) - @property - def xyz(self): - return np.c_[self.entry, self.tip].transpose() - @property - def entry(self): - return np.array((self.x, self.y, self.z)) - - @property - def tip(self): - return sph2cart(- self.depth, self.theta, self.phi) + np.array((self.x, self.y, self.z)) - - @staticmethod - def _get_surface_intersection(traj, brain_atlas, surface='top'): - """ - TODO Document! - - Parameters - ---------- - traj - brain_atlas - surface - - Returns - ------- - - """ - brain_atlas.compute_surface() - - distance = traj.mindist(brain_atlas.srf_xyz) - dist_sort = np.argsort(distance) - # In some cases the nearest two intersection points are not the top and bottom of brain - # So we find all intersection points that fall within one voxel and take the one with - # highest dV to be entry and lowest dV to be exit - idx_lim = np.sum(distance[dist_sort] * 1e6 < np.max(brain_atlas.res_um)) - dist_lim = dist_sort[0:idx_lim] - z_val = brain_atlas.srf_xyz[dist_lim, 2] - if surface == 'top': - ma = np.argmax(z_val) - _xyz = brain_atlas.srf_xyz[dist_lim[ma], :] - _ixyz = brain_atlas.bc.xyz2i(_xyz) - _ixyz[brain_atlas.xyz2dims[2]] += 1 - elif surface == 'bottom': - ma = np.argmin(z_val) - _xyz = brain_atlas.srf_xyz[dist_lim[ma], :] - _ixyz = brain_atlas.bc.xyz2i(_xyz) - - xyz = brain_atlas.bc.i2xyz(_ixyz.astype(float)) - - return xyz - - @staticmethod - def get_brain_exit(traj, brain_atlas): - """ - Given a Trajectory and a BrainAtlas object, computes the brain exit coordinate as the - intersection of the trajectory and the brain surface (brain_atlas.surface) - :param brain_atlas: - :return: 3 element array x,y,z - """ - # Find point where trajectory intersects with bottom of brain - return Insertion._get_surface_intersection(traj, brain_atlas, surface='bottom') - - @staticmethod - def get_brain_entry(traj, brain_atlas): - """ - Given a Trajectory and a BrainAtlas object, computes the brain entry coordinate as the - intersection of the trajectory and the brain surface (brain_atlas.surface) - :param brain_atlas: - :return: 3 element array x,y,z - """ - # Find point where trajectory intersects with top of brain - return Insertion._get_surface_intersection(traj, brain_atlas, surface='top') - - -class AllenAtlas(BrainAtlas): - """ - The Allan Common Coordinate Framework (CCF) brain atlas. - - Instantiates an atlas.BrainAtlas corresponding to the Allen CCF at the given resolution - using the IBL Bregma and coordinate system. - """ - - """pathlib.PurePosixPath: The default relative path of the Allen atlas file.""" - atlas_rel_path = PurePosixPath('histology', 'ATLAS', 'Needles', 'Allen') - - """numpy.array: A diffusion weighted imaging (DWI) image volume. - - The Allen atlas DWI average template volume has with the shape (ap, ml, dv) and contains uint16 - values. FIXME What do the values represent? - """ - image = None - - """numpy.array: An annotation label volume. - - The Allen atlas label volume has with the shape (ap, ml, dv) and contains uint16 indices - of the Allen CCF brain regions to which each voxel belongs. - """ - label = None - - def __init__(self, res_um=25, scaling=(1, 1, 1), mock=False, hist_path=None): - """ - Instantiates an atlas.BrainAtlas corresponding to the Allen CCF at the given resolution - using the IBL Bregma and coordinate system. - - Parameters - ---------- - res_um : {10, 25, 50} int - The Atlas resolution in micrometres; one of 10, 25 or 50um. - scaling : float, numpy.array - Scale factor along ml, ap, dv for squeeze and stretch (default: [1, 1, 1]). - mock : bool - For testing purposes, return atlas object with image comprising zeros. - hist_path : str, pathlib.Path - The location of the image volume. May be a full file path or a directory. - - Examples - -------- - Instantiate Atlas from a non-default location, in this case the cache_dir of an ONE instance. - >>> target_dir = one.cache_dir / AllenAtlas.atlas_rel_path - ... ba = AllenAtlas(hist_path=target_dir) - """ - LUT_VERSION = 'v01' # version 01 is the lateralized version - regions = BrainRegions() - xyz2dims = np.array([1, 0, 2]) # this is the c-contiguous ordering - dims2xyz = np.array([1, 0, 2]) - # we use Bregma as the origin - self.res_um = res_um - ibregma = (ALLEN_CCF_LANDMARKS_MLAPDV_UM['bregma'] / self.res_um) - dxyz = self.res_um * 1e-6 * np.array([1, -1, -1]) * scaling - if mock: - image, label = [np.zeros((528, 456, 320), dtype=np.int16) for _ in range(2)] - label[:, :, 100:105] = 1327 # lookup index for retina, id 304325711 (no id 1327) - else: - # Hist path may be a full path to an existing image file, or a path to a directory - cache_dir = Path(one.params.get(silent=True).CACHE_DIR) - hist_path = Path(hist_path or cache_dir.joinpath(self.atlas_rel_path)) - if not hist_path.suffix: # check if folder - hist_path /= f'average_template_{res_um}.nrrd' - # get the image volume - if not hist_path.exists(): - hist_path = _download_atlas_allen(hist_path) - # get the remapped label volume - file_label = hist_path.with_name(f'annotation_{res_um}.nrrd') - if not file_label.exists(): - file_label = _download_atlas_allen(file_label) - file_label_remap = hist_path.with_name(f'annotation_{res_um}_lut_{LUT_VERSION}.npz') - if not file_label_remap.exists(): - label = self._read_volume(file_label).astype(dtype=np.int32) - _logger.info("Computing brain atlas annotations lookup table") - # lateralize atlas: for this the regions of the left hemisphere have primary - # keys opposite to to the normal ones - lateral = np.zeros(label.shape[xyz2dims[0]]) - lateral[int(np.floor(ibregma[0]))] = 1 - lateral = np.sign(np.cumsum(lateral)[np.newaxis, :, np.newaxis] - 0.5) - label = label * lateral.astype(np.int32) - # the 10 um atlas is too big to fit in memory so work by chunks instead - if res_um == 10: - first, ncols = (0, 10) - while True: - last = np.minimum(first + ncols, label.shape[-1]) - _logger.info(f"Computing... {last} on {label.shape[-1]}") - _, im = ismember(label[:, :, first:last], regions.id) - label[:, :, first:last] = np.reshape(im, label[:, :, first:last].shape) - if last == label.shape[-1]: - break - first += ncols - label = label.astype(dtype=np.uint16) - _logger.info("Saving npz, this can take a long time") - else: - _, im = ismember(label, regions.id) - label = np.reshape(im.astype(np.uint16), label.shape) - np.savez_compressed(file_label_remap, label) - _logger.info(f"Cached remapping file {file_label_remap} ...") - # loads the files - label = self._read_volume(file_label_remap) - image = self._read_volume(hist_path) - - super().__init__(image, label, dxyz, regions, ibregma, dims2xyz=dims2xyz, xyz2dims=xyz2dims) - - @staticmethod - def _read_volume(file_volume): - if file_volume.suffix == '.nrrd': - volume, _ = nrrd.read(file_volume, index_order='C') # ml, dv, ap - # we want the coronal slice to be the most contiguous - volume = np.transpose(volume, (2, 0, 1)) # image[iap, iml, idv] - elif file_volume.suffix == '.npz': - volume = np.load(file_volume)['arr_0'] - return volume - - def xyz2ccf(self, xyz, ccf_order='mlapdv', mode='raise'): - """ - Converts anatomical coordinates to CCF coordinates. - - Anatomical coordinates are in meters, relative to bregma, which CFF coordinates are - assumed to be the volume indices multiplied by the spacing in micormeters. - - Parameters - ---------- - xyz : numpy.array - An N by 3 array of anatomical coordinates in meters, relative to bregma. - ccf_order : {'mlapdv', 'apdvml'}, default='mlapdv' - The order of the CCF coordinates returned. For IBL (the default) this is (ML, AP, DV), - for Allen MCC vertices, this is (AP, DV, ML). - mode : {'raise', 'clip', 'wrap'}, default='raise' - How to behave if the coordinate lies outside of the volume: raise (default) will raise - a ValueError; 'clip' will replace the index with the closest index inside the volume; - 'wrap' will return the index as is. - - Returns - ------- - numpy.array - Coordinates in CCF space (um, origin is the front left top corner of the data - volume, order determined by ccf_order - """ - ordre = self._ccf_order(ccf_order) - ccf = self.bc.xyz2i(xyz, round=False, mode=mode) * float(self.res_um) - return ccf[..., ordre] - - def ccf2xyz(self, ccf, ccf_order='mlapdv'): - """ - Convert anatomical coordinates from CCF coordinates. - - Anatomical coordinates are in meters, relative to bregma, which CFF coordinates are - assumed to be the volume indices multiplied by the spacing in micormeters. - - Parameters - ---------- - ccf : numpy.array - An N by 3 array of coordinates in CCF space (atlas volume indices * um resolution). The - origin is the front left top corner of the data volume. - ccf_order : {'mlapdv', 'apdvml'}, default='mlapdv' - The order of the CCF coordinates given. For IBL (the default) this is (ML, AP, DV), - for Allen MCC vertices, this is (AP, DV, ML). - - Returns - ------- - numpy.array - The MLAPDV coordinates in meters, relative to bregma. - """ - ordre = self._ccf_order(ccf_order, reverse=True) - return self.bc.i2xyz((ccf[..., ordre] / float(self.res_um))) - - @staticmethod - def _ccf_order(ccf_order, reverse=False): - """ - Returns the mapping to go from CCF coordinates order to the brain atlas xyz - :param ccf_order: 'mlapdv' or 'apdvml' - :param reverse: defaults to False. - If False, returns from CCF to brain atlas - If True, returns from brain atlas to CCF - :return: - """ - if ccf_order == 'mlapdv': - return [0, 1, 2] - elif ccf_order == 'apdvml': - if reverse: - return [2, 0, 1] - else: - return [1, 2, 0] - else: - ValueError("ccf_order needs to be either 'mlapdv' or 'apdvml'") - - def compute_regions_volume(self, cumsum=False): - """ - Sums the number of voxels in the labels volume for each region. - Then compute volumes for all of the levels of hierarchy in cubic mm. - :param: cumsum: computes the cumulative sum of the volume as per the hierarchy (defaults to False) - :return: - """ - nr = self.regions.id.shape[0] - count = np.bincount(self.label.flatten(), minlength=nr) - if not cumsum: - self.regions.volume = count * (self.res_um / 1e3) ** 3 - else: - self.regions.compute_hierarchy() - self.regions.volume = np.zeros_like(count) - for i in np.arange(nr): - if count[i] == 0: - continue - self.regions.volume[np.unique(self.regions.hierarchy[:, i])] += count[i] - self.regions.volume = self.regions.volume * (self.res_um / 1e3) ** 3 +@deprecated_decorator +def AllenAtlas(*args, **kwargs): + return iblatlas.atlas.AllenAtlas(*args, **kwargs) +@deprecated_decorator def NeedlesAtlas(*args, **kwargs): """ Instantiates an atlas.BrainAtlas corresponding to the Allen CCF at the given resolution @@ -1500,12 +85,11 @@ def NeedlesAtlas(*args, **kwargs): three-dimensional brain atlas using an average magnetic resonance image of 40 adult C57Bl/6J mice. Neuroimage 42(1):60-9. [doi 10.1016/j.neuroimage.2008.03.037] """ - DV_SCALE = 0.952 # multiplicative factor on DV dimension, determined from MRI->CCF transform - AP_SCALE = 1.087 # multiplicative factor on AP dimension - kwargs['scaling'] = np.array([1, AP_SCALE, DV_SCALE]) - return AllenAtlas(*args, **kwargs) + + return iblatlas.atlas.NeedlesAtlas(*args, **kwargs) +@deprecated_decorator def MRITorontoAtlas(*args, **kwargs): """ The MRI Toronto brain atlas. @@ -1532,165 +116,9 @@ def MRITorontoAtlas(*args, **kwargs): relatively larger in males emerge before those larger in females. Nat Commun 9, 2615. [doi 10.1038/s41467-018-04921-2] """ - ML_SCALE = 0.952 - DV_SCALE = 0.885 # multiplicative factor on DV dimension, determined from MRI->CCF transform - AP_SCALE = 1.031 # multiplicative factor on AP dimension - kwargs['scaling'] = np.array([ML_SCALE, AP_SCALE, DV_SCALE]) - return AllenAtlas(*args, **kwargs) - - -def _download_atlas_allen(target_file_image): - """ - Download the Allen Atlas from the alleninstitute.org Website. - - Parameters - ---------- - target_file_image : str, pathlib.Path - The full target file path to which to download the file. The name of the image file name - must be either `average_template_.nrrd` or `annotation_.nrrd`, where is - one of {10, 25, 50}. - - Returns - ------- - pathlib.Path - The full path to the downloaded file. - - Notes - ----- - - © 2015 Allen Institute for Brain Science. Allen Mouse Brain Atlas (2015) with region annotations (2017). - - Available from: http://download.alleninstitute.org/informatics-archive/current-release/mouse_ccf/annotation/ - - See Allen Mouse Common Coordinate Framework Technical White Paper for details - http://help.brain-map.org/download/attachments/8323525/Mouse_Common_Coordinate_Framework.pdf - - """ - (target_file_image := Path(target_file_image)).parent.mkdir(exist_ok=True, parents=True) - ROOT_URL = 'http://download.alleninstitute.org/informatics-archive/' - - if target_file_image.name.split('_')[0] == 'average': - url = f'{ROOT_URL}current-release/mouse_ccf/average_template/' - elif target_file_image.name.split('_')[0] == 'annotation': - url = f'{ROOT_URL}current-release/mouse_ccf/annotation/ccf_2017/' - else: - raise ValueError('Unrecognized file image') - url += target_file_image.name - - return Path(http_download_file(url, target_dir=target_file_image.parent)) - - -class FranklinPaxinosAtlas(BrainAtlas): - - """pathlib.PurePosixPath: The default relative path of the atlas file.""" - atlas_rel_path = PurePosixPath('histology', 'ATLAS', 'Needles', 'FranklinPaxinos') - - def __init__(self, res_um=(10, 100, 10), scaling=(1, 1, 1), mock=False, hist_path=None): - """The Franklin & Paxinos brain atlas. - - Instantiates an atlas.BrainAtlas corresponding to the Franklin & Paxinos atlas [1]_ at the - given resolution, matched to the Allen coordinate Framework [2]_ and using the IBL Bregma - and coordinate system. The Franklin Paxisnos volume has resolution of 10um in ML and DV - axis and 100 um in AP direction. - - Parameters - ---------- - res_um : list, numpy.array - The Atlas resolution in micometres in each dimension. - scaling : float, numpy.array - Scale factor along ml, ap, dv for squeeze and stretch (default: [1, 1, 1]). - mock : bool - For testing purposes, return atlas object with image comprising zeros. - hist_path : str, pathlib.Path - The location of the image volume. May be a full file path or a directory. - - Examples - -------- - Instantiate Atlas from a non-default location, in this case the cache_dir of an ONE instance. - >>> target_dir = one.cache_dir / AllenAtlas.atlas_rel_path - ... ba = FranklinPaxinosAtlas(hist_path=target_dir) - - References - ---------- - .. [1] Paxinos G, and Franklin KBJ (2012) The Mouse Brain in Stereotaxic Coordinates, 4th - edition (Elsevier Academic Press) - .. [2] Chon U et al (2019) Enhanced and unified anatomical labeling for a common mouse - brain atlas [doi 10.1038/s41467-019-13057-w] - """ - # TODO interpolate? - LUT_VERSION = 'v01' # version 01 is the lateralized version - regions = FranklinPaxinosRegions() - xyz2dims = np.array([1, 0, 2]) # this is the c-contiguous ordering - dims2xyz = np.array([1, 0, 2]) - # we use Bregma as the origin - self.res_um = np.asarray(res_um) - ibregma = (PAXINOS_CCF_LANDMARKS_MLAPDV_UM['bregma'] / self.res_um) - dxyz = self.res_um * 1e-6 * np.array([1, -1, -1]) * scaling - if mock: - image, label = [np.zeros((528, 456, 320), dtype=np.int16) for _ in range(2)] - label[:, :, 100:105] = 1327 # lookup index for retina, id 304325711 (no id 1327) - else: - # Hist path may be a full path to an existing image file, or a path to a directory - cache_dir = Path(one.params.get(silent=True).CACHE_DIR) - hist_path = Path(hist_path or cache_dir.joinpath(self.atlas_rel_path)) - if not hist_path.suffix: # check if folder - hist_path /= f'average_template_{res_um[0]}_{res_um[1]}_{res_um[2]}.npz' - - # get the image volume - if not hist_path.exists(): - hist_path.parent.mkdir(exist_ok=True, parents=True) - aws.s3_download_file(f'atlas/FranklinPaxinos/{hist_path.name}', str(hist_path)) - # get the remapped label volume - file_label = hist_path.with_name(f'annotation_{res_um[0]}_{res_um[1]}_{res_um[2]}.npz') - if not file_label.exists(): - file_label.parent.mkdir(exist_ok=True, parents=True) - aws.s3_download_file(f'atlas/FranklinPaxinos/{file_label.name}', str(file_label)) - - file_label_remap = hist_path.with_name(f'annotation_{res_um[0]}_{res_um[1]}_{res_um[2]}_lut_{LUT_VERSION}.npz') - - if not file_label_remap.exists(): - label = self._read_volume(file_label).astype(dtype=np.int32) - _logger.info("computing brain atlas annotations lookup table") - # lateralize atlas: for this the regions of the left hemisphere have primary - # keys opposite to to the normal ones - lateral = np.zeros(label.shape[xyz2dims[0]]) - lateral[int(np.floor(ibregma[0]))] = 1 - lateral = np.sign(np.cumsum(lateral)[np.newaxis, :, np.newaxis] - 0.5) - label = label * lateral.astype(np.int32) - _, im = ismember(label, regions.id) - label = np.reshape(im.astype(np.uint16), label.shape) - np.savez_compressed(file_label_remap, label) - _logger.info(f"Cached remapping file {file_label_remap} ...") - # loads the files - label = self._read_volume(file_label_remap) - image = self._read_volume(hist_path) - - super().__init__(image, label, dxyz, regions, ibregma, dims2xyz=dims2xyz, xyz2dims=xyz2dims) - - @staticmethod - def _read_volume(file_volume): - """ - Loads an atlas image volume given a file path. - - Parameters - ---------- - file_volume : pathlib.Path - The file path of an image volume. Currently supports .nrrd and .npz files. + return iblatlas.atlas.MRITorontoAtlas(*args, **kwargs) - Returns - ------- - numpy.array - The loaded image volume with dimensions (ap, ml, dv). - Raises - ------ - ValueError - Unknown file extension, expects either '.nrrd' or '.npz'. - """ - if file_volume.suffix == '.nrrd': - volume, _ = nrrd.read(file_volume, index_order='C') # ml, dv, ap - # we want the coronal slice to be the most contiguous - volume = np.transpose(volume, (2, 0, 1)) # image[iap, iml, idv] - elif file_volume.suffix == '.npz': - volume = np.load(file_volume)['arr_0'] - else: - raise ValueError( - f'"{file_volume.suffix}" files not supported, must be either ".nrrd" or ".npz"') - return volume +@deprecated_decorator +def FranklinPaxinosAtlas(*args, **kwargs): + return iblatlas.atlas.FranklinPaxinosAtlas(*args, **kwargs) diff --git a/ibllib/atlas/flatmaps.py b/ibllib/atlas/flatmaps.py index 782fba4c6..4dcedc764 100644 --- a/ibllib/atlas/flatmaps.py +++ b/ibllib/atlas/flatmaps.py @@ -1,122 +1,15 @@ """Techniques to project the brain volume onto 2D images for visualisation purposes.""" -from functools import lru_cache -import logging -import json -import nrrd -import numpy as np -from scipy.interpolate import interp1d -import matplotlib.pyplot as plt +from ibllib.atlas import deprecated_decorator +from iblatlas import flatmaps -from iblutil.util import Bunch -from iblutil.io.hashfile import md5 -import one.remote.aws as aws -from ibllib.atlas.atlas import AllenAtlas +@deprecated_decorator +def FlatMap(**kwargs): + return flatmaps.FlatMap(**kwargs) -_logger = logging.getLogger(__name__) - - -class FlatMap(AllenAtlas): - """The Allen Atlas flatmap. - - FIXME Document! How are these flatmaps determined? Are they related to the Swansan atlas or is - that something else? - """ - - def __init__(self, flatmap='dorsal_cortex', res_um=25): - """ - Available flatmaps are currently 'dorsal_cortex', 'circles' and 'pyramid' - :param flatmap: - :param res_um: - """ - super().__init__(res_um=res_um) - self.name = flatmap - if flatmap == 'dorsal_cortex': - self._get_flatmap_from_file() - elif flatmap == 'circles': - if res_um != 25: - raise NotImplementedError('Pyramid circles not implemented for resolution other than 25um') - self.flatmap, self.ml_scale, self.ap_scale = circles(N=5, atlas=self, display='flat') - elif flatmap == 'pyramid': - if res_um != 25: - raise NotImplementedError('Pyramid circles not implemented for resolution other than 25um') - self.flatmap, self.ml_scale, self.ap_scale = circles(N=5, atlas=self, display='pyramid') - - def _get_flatmap_from_file(self): - # gets the file in the ONE cache for the flatmap name in the property, downloads it if needed - file_flatmap = self._get_cache_dir().joinpath(f'{self.name}_{self.res_um}.nrrd') - if not file_flatmap.exists(): - file_flatmap.parent.mkdir(exist_ok=True, parents=True) - aws.s3_download_file(f'atlas/{file_flatmap.name}', file_flatmap) - self.flatmap, _ = nrrd.read(file_flatmap) - - def plot_flatmap(self, depth=0, volume='annotation', mapping='Allen', region_values=None, ax=None, **kwargs): - """ - Displays the 2D image corresponding to the flatmap. - - If there are several depths, by default it will display the first one. - - Parameters - ---------- - depth : int - Index of the depth to display in the flatmap volume (the last dimension). - volume : {'image', 'annotation', 'boundary', 'value'} - - 'image' - Allen image volume. - - 'annotation' - Allen annotation volume. - - 'boundary' - outline of boundaries between all regions. - - 'volume' - custom volume, must pass in volume of shape BrainAtlas.image.shape as - regions_value argument. - mapping : str, default='Allen' - The brain region mapping to use. - region_values : numpy.array - An array the shape of the brain atlas image containing custom region values. Used when - `volume` value is 'volume'. - ax : matplotlib.pyplot.Axes, optional - A set of axes to plot to. - **kwargs - See matplotlib.pyplot.imshow. - - Returns - ------- - matplotlib.pyplot.Axes - The plotted image axes. - """ - if self.flatmap.ndim == 3: - inds = np.int32(self.flatmap[:, :, depth]) - else: - inds = np.int32(self.flatmap[:, :]) - regions = self._get_mapping(mapping=mapping)[self.label.flat[inds]] - if volume == 'annotation': - im = self._label2rgb(regions) - elif volume == 'value': - im = region_values[regions] - elif volume == 'boundary': - im = self.compute_boundaries(regions) - elif volume == 'image': - im = self.image.flat[inds] - else: - raise ValueError(f'Volume type "{volume}" not supported') - if not ax: - ax = plt.gca() - - return self._plot_slice(im, self.extent_flmap(), ax=ax, volume=volume, **kwargs) - - def extent_flmap(self): - """ - Returns the boundary coordinates of the flat map. - - Returns - ------- - numpy.array - The bounding coordinates of the flat map image, specified as (left, right, bottom, top). - """ - extent = np.r_[0, self.flatmap.shape[1], 0, self.flatmap.shape[0]] - return extent - - -@lru_cache(maxsize=1, typed=False) +@deprecated_decorator def circles(N=5, atlas=None, display='flat'): """ :param N: number of circles @@ -124,94 +17,11 @@ def circles(N=5, atlas=None, display='flat'): :param display: "flat" or "pyramid" :return: 2D map of indices, ap_coordinate, ml_coordinate """ - atlas = atlas if atlas else AllenAtlas() - - sz = np.array([]) - level = np.array([]) - for k in np.arange(N): - nlast = 2000 # 25 um for 5mm diameter - n = int((k + 1) * nlast / N) - r = .4 * (k + 1) / N - theta = (np.linspace(0, 2 * np.pi, n) + np.pi / 2) - sz = np.r_[sz, r * np.exp(1j * theta)] - level = np.r_[level, theta * 0 + k] - - atlas.compute_surface() - iy, ix = np.where(~np.isnan(atlas.top)) - centroid = np.array([np.mean(iy), np.mean(ix)]) - xlim = np.array([np.min(ix), np.max(ix)]) - ylim = np.array([np.min(iy), np.max(iy)]) - - s = Bunch( - x=np.real(sz) * np.diff(xlim) + centroid[1], - y=np.imag(sz) * np.diff(ylim) + centroid[0], - level=level, - distance=level * 0, - ) - - # compute the overall linear distance for each circle - d0 = 0 - for lev in np.unique(s['level']): - ind = s['level'] == lev - diff = np.abs(np.diff(s['x'][ind] + 1j * s['y'][ind])) - s['distance'][ind] = np.cumsum(np.r_[0, diff]) + d0 - d0 = s['distance'][ind][-1] - - fcn = interp1d(s['distance'], s['x'] + 1j * s['y'], fill_value='extrap') - d = np.arange(0, np.ceil(s['distance'][-1])) - - s_ = Bunch({ - 'x': np.real(fcn(d)), - 'y': np.imag(fcn(d)), - 'level': interp1d(s['distance'], level, kind='nearest')(d), - 'distance': d - }) - if display == 'flat': - ih = np.arange(atlas.bc.nz) - iw = np.arange(s_['distance'].size) - image_map = np.zeros((ih.size, iw.size), dtype=np.int32) - iw, ih = np.meshgrid(iw, ih) - # i2d = np.ravel_multi_index((ih[:], iw[:]), image_map.shape) - iml, _ = np.meshgrid(np.round(s_.x).astype(np.int32), np.arange(atlas.bc.nz)) - iap, idv = np.meshgrid(np.round(s_.y).astype(np.int32), np.arange(atlas.bc.nz)) - i3d = atlas._lookup_inds(np.c_[iml.flat, iap.flat, idv.flat]) - i3d = np.reshape(i3d, [atlas.bc.nz, s_['x'].size]) - image_map[ih, iw] = i3d - - elif display == 'pyramid': - for i in np.flipud(np.arange(N)): - ind = s_['level'] == i - dtot = s_['distance'][ind] - dtot = dtot - np.mean(dtot) - if i == N - 1: - ipx = np.arange(np.floor(dtot[0]), np.ceil(dtot[-1]) + 1) - nh = atlas.bc.nz * N - X0 = int(ipx[-1]) - image_map = np.zeros((nh, ipx.size), dtype=np.int32) - - iw = np.arange(np.sum(ind)) - iw = np.int32(iw - np.mean(iw) + X0) - ih = atlas.bc.nz * i + np.arange(atlas.bc.nz) - - iw, ih = np.meshgrid(iw, ih) - iml, _ = np.meshgrid(np.round(s_.x[ind]).astype(np.int32), np.arange(atlas.bc.nz)) - iap, idv = np.meshgrid(np.round(s_.y[ind]).astype(np.int32), np.arange(atlas.bc.nz)) - i3d = atlas._lookup_inds(np.c_[iml.flat, iap.flat, idv.flat]) - i3d = np.reshape(i3d, [atlas.bc.nz, s_['x'][ind].size]) - image_map[ih, iw] = i3d - x, y = (atlas.bc.i2x(s.x), atlas.bc.i2y(s.y)) - return image_map, x, y - # if display == 'flat': - # fig, ax = plt.subplots(2, 1, figsize=(16, 5)) - # elif display == 'pyramid': - # fig, ax = plt.subplots(1, 2, figsize=(14, 12)) - # ax[0].imshow(ba._label2rgb(ba.label.flat[image_map]), origin='upper') - # ax[1].imshow(ba.top) - # ax[1].plot(centroid[1], centroid[0], '*') - # ax[1].plot(s.x, s.y) + return flatmaps.circles(N=N, atlas=atlas, display=display) +@deprecated_decorator def swanson(filename="swanson2allen.npz"): """ FIXME Document! Which publication to reference? Are these specifically for flat maps? @@ -225,21 +35,11 @@ def swanson(filename="swanson2allen.npz"): ------- """ - # filename could be "swanson2allen_original.npz", or "swanson2allen.npz" for remapped indices to match - # existing labels in the brain atlas - OLD_MD5 = [ - 'bb0554ecc704dd4b540151ab57f73822', # version 2022-05-02 (remapped) - '7722c1307cf9a6f291ad7632e5dcc88b', # version 2022-05-09 (removed wolf pixels and 2 artefact regions) - ] - npz_file = AllenAtlas._get_cache_dir().joinpath(filename) - if not npz_file.exists() or md5(npz_file) in OLD_MD5: - npz_file.parent.mkdir(exist_ok=True, parents=True) - _logger.info(f'downloading swanson image from {aws.S3_BUCKET_IBL} s3 bucket...') - aws.s3_download_file(f'atlas/{npz_file.name}', npz_file) - s2a = np.load(npz_file)['swanson2allen'] # inds contains regions ids - return s2a + return flatmaps.swanson(filename=filename) + +@deprecated_decorator def swanson_json(filename="swansonpaths.json", remap=True): """ Vectorized version of the swanson bitmap file. The vectorized version was generated from swanson() using matlab @@ -255,98 +55,4 @@ def swanson_json(filename="swansonpaths.json", remap=True): ------- """ - OLD_MD5 = ['97ccca2b675b28ba9b15ca8af5ba4111', # errored map with FOTU and CUL4, 5 mixed up - '56daa7022b5e03080d8623814cda6f38', # old md5 of swanson json without CENT and PTLp - # and CUL4 split (on s3 called swansonpaths_56daa.json) - 'f848783954883c606ca390ceda9e37d2'] - - json_file = AllenAtlas._get_cache_dir().joinpath(filename) - if not json_file.exists() or md5(json_file) in OLD_MD5: - json_file.parent.mkdir(exist_ok=True, parents=True) - _logger.info(f'downloading swanson paths from {aws.S3_BUCKET_IBL} s3 bucket...') - aws.s3_download_file(f'atlas/{json_file.name}', json_file, overwrite=True) - - with open(json_file) as f: - sw_json = json.load(f) - - # The swanson contains regions that are children of regions contained within the Allen - # annotation volume. Here we remap these regions to the parent that is contained with the - # annotation volume - if remap: - id_map = {391: [392, 393, 394, 395, 396], - 474: [483, 487], - 536: [537, 541], - 601: [602, 603, 604, 608], - 622: [624, 625, 626, 627, 628, 629, 630, 631, 632, 634, 635, 636, 637, 638], - 686: [687, 688, 689], - 708: [709, 710], - 721: [723, 724, 726, 727, 729, 730, 731], - 740: [741, 742, 743], - 758: [759, 760, 761, 762], - 771: [772, 773], - 777: [778, 779, 780], - 788: [789, 790, 791, 792], - 835: [836, 837, 838], - 891: [894, 895, 896, 897, 898, 900, 901, 902], - 926: [927, 928], - 949: [950, 951, 952, 953, 954], - 957: [958, 959, 960, 961, 962], - 999: [1000, 1001], - 578: [579, 580]} - - rev_map = {} - for k, vals in id_map.items(): - for v in vals: - rev_map[v] = k - - for sw in sw_json: - sw['thisID'] = rev_map.get(sw['thisID'], sw['thisID']) - - return sw_json - - -@lru_cache(maxsize=None) -def _swanson_labels_positions(thres=20000): - """ - Computes label positions to overlay on the Swanson flatmap. - - Parameters - ---------- - thres : int, default=20000 - The number of pixels above which a region is labeled. - - Returns - ------- - dict of str - A map of brain acronym to a tuple of x y coordinates. - """ - s2a = swanson() - iw, ih = np.meshgrid(np.arange(s2a.shape[1]), np.arange(s2a.shape[0])) - # compute the center of mass of all regions (fast enough to do on the fly) - bc = np.maximum(1, np.bincount(s2a.flatten())) - cmw = np.bincount(s2a.flatten(), weights=iw.flatten()) / bc - cmh = np.bincount(s2a.flatten(), weights=ih.flatten()) / bc - bc[0] = 1 - - NWH, NWW = (200, 600) - h, w = s2a.shape - labels = {} - for ilabel in np.where(bc > thres)[0]: - x, y = (cmw[ilabel], cmh[ilabel]) - # the polygon is convex and the label is outside. Dammit !!! - if s2a[int(y), int(x)] != ilabel: - # find the nearest point to the center of mass - ih, iw = np.where(s2a == ilabel) - iimin = np.argmin(np.abs((x - iw) + 1j * (y - ih))) - # get the center of mass of a window around this point - sh = np.arange(np.maximum(0, ih[iimin] - NWH), np.minimum(ih[iimin] + NWH, h)) - sw = np.arange(np.maximum(0, iw[iimin] - NWW), np.minimum(iw[iimin] + NWW, w)) - roi = s2a[sh][:, sw] == ilabel - roi = roi / np.sum(roi) - # ax.plot(x, y, 'k+') - # ax.plot(iw[iimin], ih[iimin], '*k') - x = sw[np.searchsorted(np.cumsum(np.sum(roi, axis=0)), .5) - 1] - y = sh[np.searchsorted(np.cumsum(np.sum(roi, axis=1)), .5) - 1] - # ax.plot(x, y, 'r+') - labels[ilabel] = (x, y) - return labels + return flatmaps.swanson_json(filename=filename, remap=remap) diff --git a/ibllib/atlas/genes.py b/ibllib/atlas/genes.py index e3f32e378..34ad6c73e 100644 --- a/ibllib/atlas/genes.py +++ b/ibllib/atlas/genes.py @@ -1,18 +1,10 @@ """Gene expression maps.""" -import logging -from pathlib import Path -import numpy as np -import pandas as pd - -from iblutil.io.hashfile import md5 -import one.remote.aws as aws - -from ibllib.atlas.atlas import AllenAtlas - -_logger = logging.getLogger(__name__) +from iblatlas import genes +from ibllib.atlas import deprecated_decorator +@deprecated_decorator def allen_gene_expression(filename='gene-expression.pqt', folder_cache=None): """ Reads in the Allen gene expression experiments binary data. @@ -22,17 +14,5 @@ def allen_gene_expression(filename='gene-expression.pqt', folder_cache=None): and a memmap of all experiments volumes, size (4345, 58, 41, 67) corresponding to (nexperiments, ml, dv, ap). The spacing between slices is 200 um """ - OLD_MD5 = [] - DIM_EXP = (4345, 58, 41, 67) - folder_cache = folder_cache or AllenAtlas._get_cache_dir().joinpath(filename) - file_parquet = Path(folder_cache).joinpath('gene-expression.pqt') - file_bin = file_parquet.with_suffix(".bin") - if not file_parquet.exists() or md5(file_parquet) in OLD_MD5: - file_parquet.parent.mkdir(exist_ok=True, parents=True) - _logger.info(f'downloading gene expression data from {aws.S3_BUCKET_IBL} s3 bucket...') - aws.s3_download_file(f'atlas/{file_parquet.name}', file_parquet) - aws.s3_download_file(f'atlas/{file_bin.name}', file_bin) - df_genes = pd.read_parquet(file_parquet) - gexp_all = np.memmap(file_bin, dtype=np.float16, mode='r', offset=0, shape=DIM_EXP) - return df_genes, gexp_all + return genes.allen_gene_expression(filename=filename, folder_cache=folder_cache) diff --git a/ibllib/atlas/plots.py b/ibllib/atlas/plots.py index 9c5926dcb..c6e691877 100644 --- a/ibllib/atlas/plots.py +++ b/ibllib/atlas/plots.py @@ -1,150 +1,12 @@ """ Module that has convenience plotting functions for 2D atlas slices and flatmaps. """ -import copy -import logging -import numpy as np -from scipy.ndimage import gaussian_filter -from scipy.stats import binned_statistic -import matplotlib.pyplot as plt -from matplotlib import cm, colors -from matplotlib.patches import Polygon, PathPatch -import matplotlib.path as mpath -from iblutil.io.hashfile import md5 -import one.remote.aws as aws - -from ibllib.atlas import AllenAtlas -from ibllib.atlas.flatmaps import FlatMap, _swanson_labels_positions, swanson, swanson_json -from ibllib.atlas.regions import BrainRegions -from iblutil.numerical import ismember -from ibllib.atlas.atlas import BrainCoordinates, ALLEN_CCF_LANDMARKS_MLAPDV_UM - -_logger = logging.getLogger(__name__) - - -def get_bc_10(): - """ - Get BrainCoordinates object for 10um Allen Atlas - - Returns - ------- - BrainCoordinates object - """ - dims2xyz = np.array([1, 0, 2]) - res_um = 10 - scaling = np.array([1, 1, 1]) - image_10 = np.array([1320, 1140, 800]) - - iorigin = (ALLEN_CCF_LANDMARKS_MLAPDV_UM['bregma'] / res_um) - dxyz = res_um * 1e-6 * np.array([1, -1, -1]) * scaling - nxyz = np.array(image_10)[dims2xyz] - bc = BrainCoordinates(nxyz=nxyz, xyz0=(0, 0, 0), dxyz=dxyz) - bc = BrainCoordinates(nxyz=nxyz, xyz0=-bc.i2xyz(iorigin), dxyz=dxyz) - - return bc - - -def plot_polygon(ax, xy, color, reg_id, edgecolor='k', linewidth=0.3, alpha=1): - """ - Function to plot matplotlib polygon on an axis - - Parameters - ---------- - ax : matplotlib.pyplot.Axes - An axis object to plot onto. - xy: numpy.array - 2D array of x and y coordinates of vertices of polygon - color: str, tuple of int - The color to fill the polygon - reg_id: str, int - An id to assign to the polygon - edgecolor: str, tuple of int - The color of the edge of the polgon - linewidth: int - The width of the edges of the polygon - alpha: float between 0 and 1 - The opacitiy of the polygon - - Returns - ------- - - """ - p = Polygon(xy, facecolor=color, edgecolor=edgecolor, linewidth=linewidth, alpha=alpha, gid=f'region_{reg_id}') - ax.add_patch(p) - - -def plot_polygon_with_hole(ax, vertices, codes, color, reg_id, edgecolor='k', linewidth=0.3, alpha=1): - """ - Function to plot matplotlib polygon that contains a hole on an axis - - Parameters - ---------- - ax : matplotlib.pyplot.Axes - An axis object to plot onto. - vertices: numpy.array - 2D array of x and y coordinates of vertices of polygon - codes: numpy.array - 1D array of path codes used to link the vertices - (https://matplotlib.org/stable/tutorials/advanced/path_tutorial.html) - color: str, tuple of int - The color to fill the polygon - reg_id: str, int - An id to assign to the polygon - edgecolor: str, tuple of int - The color of the edge of the polgon - linewidth: int - The width of the edges of the polygon - alpha: float between 0 and 1 - The opacitiy of the polygon - - Returns - ------- - - """ - - path = mpath.Path(vertices, codes) - patch = PathPatch(path, facecolor=color, edgecolor=edgecolor, linewidth=linewidth, alpha=alpha, gid=f'region_{reg_id}') - ax.add_patch(patch) - - -def coords_for_poly_hole(coords): - """ - Function to convert - - Parameters - ---------- - coords : dict - Dictionary containing keys x, y and invert. x and y contain numpy.array of x coordinates, y coordinates - for the vertices of the polgyon. The invert key is either 1 or -1 and deterimine how to assign the paths. - The value for invert for each polygon was assigned manually after looking at the result - - Returns - ------- - all_coords: numpy.array - 2D array of x and y coordinates of vertices of polygon - all_codes: numpy.array - 1D array of path codes used to link the vertices - (https://matplotlib.org/stable/tutorials/advanced/path_tutorial.html) - - """ - for i, c in enumerate(coords): - xy = np.c_[c['x'], c['y']] - codes = np.ones(len(xy), dtype=mpath.Path.code_type) * mpath.Path.LINETO - codes[0] = mpath.Path.MOVETO - if i == 0: - val = c.get('invert', 1) - all_coords = xy[::val] - all_codes = codes - else: - codes[-1] = mpath.Path.CLOSEPOLY - val = c.get('invert', -1) - all_coords = np.concatenate((all_coords, xy[::val])) - all_codes = np.concatenate((all_codes, codes)) - - return all_coords, all_codes +import iblatlas.plots as atlas_plots +from ibllib.atlas import deprecated_decorator +@deprecated_decorator def prepare_lr_data(acronyms_lh, values_lh, acronyms_rh, values_rh): """ Prepare data in format needed for plotting when providing different region values per hemisphere @@ -156,16 +18,10 @@ def prepare_lr_data(acronyms_lh, values_lh, acronyms_rh, values_rh): :return: combined acronyms and two column array of values """ - acronyms = np.unique(np.r_[acronyms_lh, acronyms_rh]) - values = np.nan * np.ones((acronyms.shape[0], 2)) - _, l_idx = ismember(acronyms_lh, acronyms) - _, r_idx = ismember(acronyms_rh, acronyms) - values[l_idx, 0] = values_lh - values[r_idx, 1] = values_rh - - return acronyms, values + return atlas_plots.prepare_lr_data(acronyms_lh, values_lh, acronyms_rh, values_rh) +@deprecated_decorator def reorder_data(acronyms, values, brain_regions=None): """ Reorder list of acronyms and values to match the Allen ordering. @@ -189,178 +45,10 @@ def reorder_data(acronyms, values, brain_regions=None): An ordered array of values. I don't know what those values are, not IDs, so maybe indices? """ - br = brain_regions or BrainRegions() - atlas_id = br.acronym2id(acronyms, hemisphere='right') - all_ids = br.id[br.order][:br.n_lr + 1] - ordered_ids = np.zeros_like(all_ids) * np.nan - ordered_values = np.zeros_like(all_ids) * np.nan - _, idx = ismember(atlas_id, all_ids) - ordered_ids[idx] = atlas_id - ordered_values[idx] = values - - ordered_ids = ordered_ids[~np.isnan(ordered_ids)] - ordered_values = ordered_values[~np.isnan(ordered_values)] - ordered_acronyms = br.id2acronym(ordered_ids) - - return ordered_acronyms, ordered_values - - -def load_slice_files(slice, mapping): - """ - Function to load in set of vectorised atlas slices for a given atlas axis and mapping. - - If the data does not exist locally, it will download the files automatically stored in a AWS S3 - bucket. - - Parameters - ---------- - slice : {'coronal', 'sagittal', 'horizontal', 'top'} - The axis of the atlas to load. - mapping : {'Allen', 'Beryl', 'Cosmos'} - The mapping to load. - - Returns - ------- - slice_data : numpy.array - A json containing the vertices to draw each region for each slice in the Allen annotation volume. - - """ - OLD_MD5 = { - 'coronal': [], - 'sagittal': [], - 'horizontal': [], - 'top': [] - } - - slice_file = AllenAtlas._get_cache_dir().parent.joinpath('svg', f'{slice}_{mapping}_paths.npy') - if not slice_file.exists() or md5(slice_file) in OLD_MD5[slice]: - slice_file.parent.mkdir(exist_ok=True, parents=True) - _logger.info(f'downloading swanson paths from {aws.S3_BUCKET_IBL} s3 bucket...') - aws.s3_download_file(f'atlas/{slice_file.name}', slice_file) - - slice_data = np.load(slice_file, allow_pickle=True) - - return slice_data - - -def _plot_slice_vector(coords, slice, values, mapping, empty_color='silver', clevels=None, cmap='viridis', show_cbar=False, - ba=None, ax=None, slice_json=None, **kwargs): - """ - Function to plot scalar value per allen region on vectorised version of histology slice. Do not use directly but use - through plot_scalar_on_slice function with vector=True. - - Parameters - ---------- - coords: float - Coordinate of slice in um (not needed when slice='top'). - slice: {'coronal', 'sagittal', 'horizontal', 'top'} - The axis through the atlas volume to display. - values: numpy.array - Array of values for each of the lateralised Allen regions found using BrainRegions().acronym. If no - value is assigned to the acronym, the value at corresponding to that index should be NaN. - mapping: {'Allen', 'Beryl', 'Cosmos'} - The mapping to use. - empty_color: str, tuple of int, default='silver' - The color used to fill the regions that do not have any values assigned (regions with NaN). - clevels: numpy.array, list or tuple - The min and max values to use for the colormap. - cmap: string - Colormap to use. - show_cbar: bool, default=False - Whether to display a colorbar. - ba : ibllib.atlas.AllenAtlas - A brain atlas object. - ax : matplotlib.pyplot.Axes - An axis object to plot onto. - slice_json: numpy.array - The set of vectorised slices for this slice, obtained using load_slice_files(slice, mapping). - **kwargs - Set of kwargs passed into matplotlib.patches.Polygon. - - Returns - ------- - fig: matplotlib.figure.Figure - The plotted figure. - ax: matplotlib.pyplot.Axes - The plotted axes. - cbar: matplotlib.pyplot.colorbar, optional - matplotlib colorbar object, only returned if show_cbar=True - - """ - ba = ba or AllenAtlas() - mapping = mapping.split('-')[0].lower() - if clevels is None: - clevels = (np.nanmin(values), np.nanmax(values)) - - if ba.res_um == 10: - bc10 = ba.bc - else: - bc10 = get_bc_10() - - if ax is None: - fig, ax = plt.subplots() - ax.set_axis_off() - else: - fig = ax.get_figure() - - colormap = cm.get_cmap(cmap) - norm = colors.Normalize(vmin=clevels[0], vmax=clevels[1]) - nan_vals = np.isnan(values) - rgba_color = np.full((values.size, 4), fill_value=np.nan) - rgba_color[~nan_vals] = colormap(norm(values[~nan_vals]), bytes=True) - - if slice_json is None: - slice_json = load_slice_files(slice, mapping) - - if slice == 'coronal': - idx = bc10.y2i(coords) - xlim = np.array([0, bc10.nx]) - ylim = np.array([0, bc10.nz]) - elif slice == 'sagittal': - idx = bc10.x2i(coords) - xlim = np.array([0, bc10.ny]) - ylim = np.array([0, bc10.nz]) - elif slice == 'horizontal': - idx = bc10.z2i(coords) - xlim = np.array([0, bc10.nx]) - ylim = np.array([0, bc10.ny]) - else: - # top case - xlim = np.array([0, bc10.nx]) - ylim = np.array([0, bc10.ny]) - - if slice != 'top': - slice_json = slice_json.item().get(str(int(idx))) - - for i, reg in enumerate(slice_json): - color = rgba_color[reg['thisID']] - if any(np.isnan(color)): - color = empty_color - else: - color = color / 255 - coords = reg['coordsReg'] - - if len(coords) == 0: - continue - - if isinstance(coords, (list, tuple)): - vertices, codes = coords_for_poly_hole(coords) - plot_polygon_with_hole(ax, vertices, codes, color, **kwargs) - else: - xy = np.c_[coords['x'], coords['y']] - plot_polygon(ax, xy, color, **kwargs) - - ax.set_xlim(xlim) - ax.set_ylim(ylim) - ax.invert_yaxis() - - if show_cbar: - cbar = fig.colorbar(cm.ScalarMappable(norm=norm, cmap=cmap), ax=ax) - return fig, ax, cbar - else: - return fig, ax + return atlas_plots.reorder_data(acronyms, values, brain_regions=brain_regions) +@deprecated_decorator def plot_scalar_on_slice(regions, values, coord=-1000, slice='coronal', mapping=None, hemisphere='left', background='image', cmap='viridis', clevels=None, show_cbar=False, empty_color='silver', brain_atlas=None, ax=None, vector=False, slice_files=None, **kwargs): @@ -417,56 +105,13 @@ def plot_scalar_on_slice(regions, values, coord=-1000, slice='coronal', mapping= matplotlib colorbar object, only returned if show_cbar=True. """ - ba = brain_atlas or AllenAtlas() - br = ba.regions - mapping = mapping or br.default_mapping - - if clevels is None: - clevels = (np.nanmin(values), np.nanmax(values)) - - # Find the mapping to use - if '-lr' in mapping: - map = mapping - else: - map = mapping + '-lr' - - region_values = np.zeros_like(br.id) * np.nan - - if len(values.shape) == 2: - for r, vL, vR in zip(regions, values[:, 0], values[:, 1]): - idx = np.where(br.acronym[br.mappings[map]] == r)[0] - idx_lh = idx[idx > br.n_lr] - idx_rh = idx[idx <= br.n_lr] - region_values[idx_rh] = vR - region_values[idx_lh] = vL - else: - for r, v in zip(regions, values): - region_values[np.where(br.acronym[br.mappings[map]] == r)[0]] = v - if hemisphere == 'left': - region_values[0:(br.n_lr + 1)] = np.nan - elif hemisphere == 'right': - region_values[br.n_lr:] = np.nan - region_values[0] = np.nan - - if show_cbar: - if vector: - fig, ax, cbar = _plot_slice_vector(coord / 1e6, slice, region_values, map, clevels=clevels, cmap=cmap, ba=ba, - ax=ax, empty_color=empty_color, show_cbar=show_cbar, slice_json=slice_files, - **kwargs) - else: - fig, ax, cbar = _plot_slice(coord / 1e6, slice, region_values, 'value', background=background, map=map, - clevels=clevels, cmap=cmap, ba=ba, ax=ax, show_cbar=show_cbar) - return fig, ax, cbar - else: - if vector: - fig, ax = _plot_slice_vector(coord / 1e6, slice, region_values, map, clevels=clevels, cmap=cmap, ba=ba, - ax=ax, empty_color=empty_color, show_cbar=show_cbar, slice_json=slice_files, **kwargs) - else: - fig, ax = _plot_slice(coord / 1e6, slice, region_values, 'value', background=background, map=map, clevels=clevels, - cmap=cmap, ba=ba, ax=ax, show_cbar=show_cbar) - return fig, ax + return (atlas_plots.plot_scalar_on_slice(regions, values, coord=coord, slice=slice, mapping=mapping, + hemisphere=hemisphere, background=background, cmap=cmap, clevels=clevels, + show_cbar=show_cbar, empty_color=empty_color, brain_atlas=brain_atlas, + ax=ax, vector=vector, slice_files=slice_files, **kwargs)) +@deprecated_decorator def plot_scalar_on_flatmap(regions, values, depth=0, flatmap='dorsal_cortex', mapping='Allen', hemisphere='left', background='boundary', cmap='viridis', clevels=None, show_cbar=False, flmap_atlas=None, ax=None): """ @@ -488,71 +133,12 @@ def plot_scalar_on_flatmap(regions, values, depth=0, flatmap='dorsal_cortex', ma :return: """ - if clevels is None: - clevels = (np.nanmin(values), np.nanmax(values)) - - ba = flmap_atlas or FlatMap(flatmap=flatmap) - br = ba.regions - - # Find the mapping to use - if '-lr' in mapping: - map = mapping - else: - map = mapping + '-lr' - - region_values = np.zeros_like(br.id) * np.nan - - if len(values.shape) == 2: - for r, vL, vR in zip(regions, values[:, 0], values[:, 1]): - idx = np.where(br.acronym[br.mappings[map]] == r)[0] - idx_lh = idx[idx > br.n_lr] - idx_rh = idx[idx <= br.n_lr] - region_values[idx_rh] = vR - region_values[idx_lh] = vL - else: - for r, v in zip(regions, values): - region_values[np.where(br.acronym[br.mappings[map]] == r)[0]] = v - if hemisphere == 'left': - region_values[0:(br.n_lr + 1)] = np.nan - elif hemisphere == 'right': - region_values[br.n_lr:] = np.nan - region_values[0] = np.nan - - d_idx = int(np.round(depth / ba.res_um)) # need to find nearest to 25 - - if background == 'boundary': - cmap_bound = cm.get_cmap("bone_r").copy() - cmap_bound.set_under([1, 1, 1], 0) - - if ax: - fig = ax.get_figure() - else: - fig, ax = plt.subplots() - - if background == 'image': - ba.plot_flatmap(d_idx, volume='image', mapping=map, ax=ax) - ba.plot_flatmap(d_idx, volume='value', region_values=region_values, mapping=map, cmap=cmap, vmin=clevels[0], - vmax=clevels[1], ax=ax) - else: - ba.plot_flatmap(d_idx, volume='value', region_values=region_values, mapping=map, cmap=cmap, vmin=clevels[0], - vmax=clevels[1], ax=ax) - ba.plot_flatmap(d_idx, volume='boundary', mapping=map, ax=ax, cmap=cmap_bound, vmin=0.01, vmax=0.8) - - # For circle flatmap we don't want to cut the axis - if ba.name != 'circles': - if hemisphere == 'left': - ax.set_xlim(0, np.ceil(ba.flatmap.shape[1] / 2)) - elif hemisphere == 'right': - ax.set_xlim(np.ceil(ba.flatmap.shape[1] / 2), ba.flatmap.shape[1]) - - if show_cbar: - norm = colors.Normalize(vmin=clevels[0], vmax=clevels[1], clip=False) - cbar = fig.colorbar(cm.ScalarMappable(norm=norm, cmap=cmap), ax=ax) - return fig, ax, cbar - else: - return fig, ax + return atlas_plots.plot_scalar_on_slice(regions, values, depth=depth, flatmap=flatmap, mapping=mapping, + hemisphere=hemisphere, background=background, cmap=cmap, clevels=clevels, + show_cbar=show_cbar, flmap_atlas=flmap_atlas, ax=ax) +@deprecated_decorator def plot_volume_on_slice(volume, coord=-1000, slice='coronal', mapping='Allen', background='boundary', cmap='Reds', clevels=None, show_cbar=False, brain_atlas=None, ax=None): """ @@ -571,25 +157,12 @@ def plot_volume_on_slice(volume, coord=-1000, slice='coronal', mapping='Allen', :return: """ - ba = brain_atlas or AllenAtlas() - assert volume.shape == ba.image.shape, 'Volume must have same shape as ba' - - # Find the mapping to use - if '-lr' in mapping: - map = mapping - else: - map = mapping + '-lr' - - if show_cbar: - fig, ax, cbar = _plot_slice(coord / 1e6, slice, volume, 'volume', background=background, map=map, clevels=clevels, - cmap=cmap, ba=ba, ax=ax, show_cbar=show_cbar) - return fig, ax, cbar - else: - fig, ax = _plot_slice(coord / 1e6, slice, volume, 'volume', background=background, map=map, clevels=clevels, - cmap=cmap, ba=ba, ax=ax, show_cbar=show_cbar) - return fig, ax + return atlas_plots.plot_volume_on_slice(volume, coord=coord, slice=slice, mapping=mapping, background=background, + cmap=cmap, clevels=clevels, show_cbar=show_cbar, brain_atlas=brain_atlas, + ax=ax) +@deprecated_decorator def plot_points_on_slice(xyz, values=None, coord=-1000, slice='coronal', mapping='Allen', background='boundary', cmap='Reds', clevels=None, show_cbar=False, aggr='mean', fwhm=100, brain_atlas=None, ax=None): """ @@ -615,170 +188,12 @@ def plot_points_on_slice(xyz, values=None, coord=-1000, slice='coronal', mapping :return: """ - ba = brain_atlas or AllenAtlas() - - # Find the mapping to use - if '-lr' in mapping: - map = mapping - else: - map = mapping + '-lr' - - region_values = compute_volume_from_points(xyz, values, aggr=aggr, fwhm=fwhm, ba=ba) - - if show_cbar: - fig, ax, cbar = _plot_slice(coord / 1e6, slice, region_values, 'volume', background=background, map=map, clevels=clevels, - cmap=cmap, ba=ba, ax=ax, show_cbar=show_cbar) - return fig, ax, cbar - else: - fig, ax = _plot_slice(coord / 1e6, slice, region_values, 'volume', background=background, map=map, clevels=clevels, - cmap=cmap, ba=ba, ax=ax, show_cbar=show_cbar) - return fig, ax - - -def compute_volume_from_points(xyz, values=None, aggr='sum', fwhm=100, ba=None): - """ - Creates a 3D volume with xyz points placed in corresponding voxel in volume. Points that fall into the same voxel within the - volume are aggregated according to the method specified in aggr. Gaussian smoothing with a 3D kernel with distance specified - by fwhm (full width half max) argument is applied. If fwhm = 0, no gaussian smoothing is applied. - - :param xyz: 3 column array of xyz coordinates of points in metres - :param values: 1 column array of values per xyz coordinates, if no values are given the sum of xyz points in each voxel is - returned - :param aggr: aggregation method. Options are sum, count, mean, std, median, min and max. Can also give in custom function - (https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.binned_statistic.html) - :param fwhm: full width at half maximum of gaussian kernel in um - :param ba: AllenAtlas object - :return: - """ - - ba = ba or AllenAtlas() - - idx = ba._lookup(xyz) - ba_shape = ba.image.shape[0] * ba.image.shape[1] * ba.image.shape[2] - - if values is not None: - volume = binned_statistic(idx, values, range=[0, ba_shape], statistic=aggr, bins=ba_shape).statistic - volume[np.isnan(volume)] = 0 - else: - volume = np.bincount(idx, minlength=ba_shape, weights=values) - - volume = volume.reshape(ba.image.shape[0], ba.image.shape[1], ba.image.shape[2]).astype(np.float32) - - if fwhm > 0: - # Compute sigma used for gaussian kernel - fwhm_over_sigma_ratio = np.sqrt(8 * np.log(2)) - sigma = fwhm / (fwhm_over_sigma_ratio * ba.res_um) - # TODO to speed up only apply gaussian filter on slices within distance of chosen coordinate - volume = gaussian_filter(volume, sigma=sigma) - - # Mask so that outside of the brain is set to nan - volume[ba.label == 0] = np.nan - - return volume - - -def _plot_slice(coord, slice, region_values, vol_type, background='boundary', map='Allen', clevels=None, cmap='viridis', - show_cbar=False, ba=None, ax=None): - """ - Function to plot scalar value per allen region on histology slice. - - Do not use directly but use through plot_scalar_on_slice function. - - Parameters - ---------- - coord: float - coordinate of slice in um (not needed when slice='top'). - slice: {'coronal', 'sagittal', 'horizontal', 'top'} - the axis through the atlas volume to display. - region_values: numpy.array - Array of values for each of the lateralised Allen regions found using BrainRegions().acronym. If no - value is assigned to the acronym, the value at corresponding to that index should be nan. - vol_type: 'value' - The type of volume to be displayed, should always be 'value' if values want to be displayed. - background: {'image', 'boundary'} - The background slice to overlay the values onto. When 'image' it uses the Allen dwi image, when - 'boundary' it displays the boundaries between regions. - map: {'Allen', 'Beryl', 'Cosmos'} - the mapping to use. - clevels: numpy.array, list or tuple - The min and max values to use for the colormap. - cmap: str, default='viridis' - Colormap to use. - show_cbar: bool, default=False - Whether to display a colorbar. - ba : ibllib.atlas.AllenAtlas - A brain atlas object. - ax : matplotlib.pyplot.Axes - An axis object to plot onto. - - Returns - ------- - fig: matplotlib.figure.Figure - The plotted figure - ax: matplotlib.pyplot.Axes - The plotted axes. - cbar: matplotlib.pyplot.colorbar - matplotlib colorbar object, only returned if show_cbar=True. - - """ - ba = ba or AllenAtlas() - - if clevels is None: - clevels = (np.nanmin(region_values), np.nanmax(region_values)) - - if ax: - fig = ax.get_figure() - else: - fig, ax = plt.subplots() - - if slice == 'coronal': - if background == 'image': - ba.plot_cslice(coord, volume='image', mapping=map, ax=ax) - ba.plot_cslice(coord, volume=vol_type, region_values=region_values, mapping=map, cmap=cmap, vmin=clevels[0], - vmax=clevels[1], ax=ax) - else: - ba.plot_cslice(coord, volume=vol_type, region_values=region_values, mapping=map, cmap=cmap, vmin=clevels[0], - vmax=clevels[1], ax=ax) - ba.plot_cslice(coord, volume='boundary', mapping=map, ax=ax) - - elif slice == 'sagittal': - if background == 'image': - ba.plot_sslice(coord, volume='image', mapping=map, ax=ax) - ba.plot_sslice(coord, volume=vol_type, region_values=region_values, mapping=map, cmap=cmap, vmin=clevels[0], - vmax=clevels[1], ax=ax) - else: - ba.plot_sslice(coord, volume=vol_type, region_values=region_values, mapping=map, cmap=cmap, vmin=clevels[0], - vmax=clevels[1], ax=ax) - ba.plot_sslice(coord, volume='boundary', mapping=map, ax=ax) - - elif slice == 'horizontal': - if background == 'image': - ba.plot_hslice(coord, volume='image', mapping=map, ax=ax) - ba.plot_hslice(coord, volume=vol_type, region_values=region_values, mapping=map, cmap=cmap, vmin=clevels[0], - vmax=clevels[1], ax=ax) - else: - ba.plot_hslice(coord, volume=vol_type, region_values=region_values, mapping=map, cmap=cmap, vmin=clevels[0], - vmax=clevels[1], ax=ax) - ba.plot_hslice(coord, volume='boundary', mapping=map, ax=ax) - - elif slice == 'top': - if background == 'image': - ba.plot_top(volume='image', mapping=map, ax=ax) - ba.plot_top(volume=vol_type, region_values=region_values, mapping=map, cmap=cmap, vmin=clevels[0], - vmax=clevels[1], ax=ax) - else: - ba.plot_top(volume=vol_type, region_values=region_values, mapping=map, cmap=cmap, vmin=clevels[0], - vmax=clevels[1], ax=ax) - ba.plot_top(volume='boundary', mapping=map, ax=ax) - - if show_cbar: - norm = colors.Normalize(vmin=clevels[0], vmax=clevels[1], clip=False) - cbar = fig.colorbar(cm.ScalarMappable(norm=norm, cmap=cmap), ax=ax) - return fig, ax, cbar - else: - return fig, ax + return atlas_plots.plot_points_on_slice(xyz, values=values, coord=coord, slice=slice, mapping=mapping, + background=background, cmap=cmap, clevels=clevels, show_cbar=show_cbar, + aggr=aggr, fwhm=fwhm, brain_atlas=brain_atlas, ax=ax) +@deprecated_decorator def plot_scalar_on_barplot(acronyms, values, errors=None, order=True, ax=None, brain_regions=None): """ Function to plot scalar value per allen region on a bar plot. If order=True, the acronyms and values are reordered @@ -807,24 +222,12 @@ def plot_scalar_on_barplot(acronyms, values, errors=None, order=True, ax=None, b The plotted axes. """ - br = brain_regions or BrainRegions() - - if order: - acronyms, values = reorder_data(acronyms, values, brain_regions) - - _, idx = ismember(acronyms, br.acronym) - colours = br.rgb[idx] - - if ax: - fig = ax.get_figure() - else: - fig, ax = plt.subplots() - ax.bar(np.arange(acronyms.size), values, color=colours) - - return fig, ax + return atlas_plots.plot_scalar_on_barplot(acronyms, values, errors=errors, order=order, ax=ax, + brain_regions=brain_regions) +@deprecated_decorator def plot_swanson_vector(acronyms=None, values=None, ax=None, hemisphere=None, br=None, orientation='landscape', empty_color='silver', vmin=None, vmax=None, cmap='viridis', annotate=False, annotate_n=10, annotate_order='top', annotate_list=None, mask=None, mask_color='w', fontsize=10, **kwargs): @@ -878,173 +281,15 @@ def plot_swanson_vector(acronyms=None, values=None, ax=None, hemisphere=None, br The plotted axes. """ - br = BrainRegions() if br is None else br - br.compute_hierarchy() - sw_shape = (2968, 6820) - - if ax is None: - fig, ax = plt.subplots() - ax.set_axis_off() - - if hemisphere != 'both' and acronyms is not None and not isinstance(acronyms[0], str): - # If negative atlas ids are passed in and we are not going to lateralise (e.g hemisphere='both') - # transfer them over to one hemisphere - acronyms = np.abs(acronyms) - - if acronyms is not None: - ibr, vals = br.propagate_down(acronyms, values) - colormap = cm.get_cmap(cmap) - vmin = vmin or np.nanmin(vals) - vmax = vmax or np.nanmax(vals) - norm = colors.Normalize(vmin=vmin, vmax=vmax) - rgba_color = colormap(norm(vals), bytes=True) - - if mask is not None: - imr, _ = br.propagate_down(mask, np.ones_like(mask)) - else: - imr = [] - - sw_json = swanson_json() - if hemisphere == 'both': - sw_rev = copy.deepcopy(sw_json) - for sw in sw_rev: - sw['thisID'] = sw['thisID'] + br.n_lr - sw_json = sw_json + sw_rev - plot_idx = [] - plot_val = [] - for i, reg in enumerate(sw_json): - - coords = reg['coordsReg'] - reg_id = reg['thisID'] - - if acronyms is None: - color = br.rgba[br.mappings['Swanson'][reg['thisID']]] / 255 - if hemisphere is None: - col_l = None - col_r = color - elif hemisphere == 'left': - col_l = empty_color if orientation == 'portrait' else color - col_r = color if orientation == 'portrait' else empty_color - elif hemisphere == 'right': - col_l = color if orientation == 'portrait' else empty_color - col_r = empty_color if orientation == 'portrait' else color - elif hemisphere in ['both', 'mirror']: - col_l = color - col_r = color - else: - idx = np.where(ibr == reg['thisID'])[0] - idxm = np.where(imr == reg['thisID'])[0] - if len(idx) > 0: - plot_idx.append(ibr[idx[0]]) - plot_val.append(vals[idx[0]]) - color = rgba_color[idx[0]] / 255 - elif len(idxm) > 0: - color = mask_color - else: - color = empty_color - - if hemisphere is None: - col_l = None - col_r = color - elif hemisphere == 'left': - col_l = empty_color if orientation == 'portrait' else color - col_r = color if orientation == 'portrait' else empty_color - elif hemisphere == 'right': - col_l = color if orientation == 'portrait' else empty_color - col_r = empty_color if orientation == 'portrait' else color - elif hemisphere == 'mirror': - col_l = color - col_r = color - elif hemisphere == 'both': - if reg_id <= br.n_lr: - col_l = color if orientation == 'portrait' else None - col_r = None if orientation == 'portrait' else color - else: - col_l = None if orientation == 'portrait' else color - col_r = color if orientation == 'portrait' else None - - if reg['hole']: - vertices, codes = coords_for_poly_hole(coords) - if orientation == 'portrait': - vertices[:, [0, 1]] = vertices[:, [1, 0]] - if col_r is not None: - plot_polygon_with_hole(ax, vertices, codes, col_r, reg_id, **kwargs) - if col_l is not None: - vertices_inv = np.copy(vertices) - vertices_inv[:, 0] = -1 * vertices_inv[:, 0] + (sw_shape[0] * 2) - plot_polygon_with_hole(ax, vertices_inv, codes, col_l, reg_id, **kwargs) - else: - if col_r is not None: - plot_polygon_with_hole(ax, vertices, codes, col_r, reg_id, **kwargs) - if col_l is not None: - vertices_inv = np.copy(vertices) - vertices_inv[:, 1] = -1 * vertices_inv[:, 1] + (sw_shape[0] * 2) - plot_polygon_with_hole(ax, vertices_inv, codes, col_l, reg_id, **kwargs) - else: - coords = [coords] if isinstance(coords, dict) else coords - for c in coords: - if orientation == 'portrait': - xy = np.c_[c['y'], c['x']] - if col_r is not None: - plot_polygon(ax, xy, col_r, reg_id, **kwargs) - if col_l is not None: - xy_inv = np.copy(xy) - xy_inv[:, 0] = -1 * xy_inv[:, 0] + (sw_shape[0] * 2) - plot_polygon(ax, xy_inv, col_l, reg_id, **kwargs) - else: - xy = np.c_[c['x'], c['y']] - if col_r is not None: - plot_polygon(ax, xy, col_r, reg_id, **kwargs) - if col_l is not None: - xy_inv = np.copy(xy) - xy_inv[:, 1] = -1 * xy_inv[:, 1] + (sw_shape[0] * 2) - plot_polygon(ax, xy_inv, col_l, reg_id, **kwargs) - - if orientation == 'portrait': - ax.set_ylim(0, sw_shape[1]) - if hemisphere is None: - ax.set_xlim(0, sw_shape[0]) - else: - ax.set_xlim(0, 2 * sw_shape[0]) - else: - ax.set_xlim(0, sw_shape[1]) - if hemisphere is None: - ax.set_ylim(0, sw_shape[0]) - else: - ax.set_ylim(0, 2 * sw_shape[0]) - - if annotate: - if annotate_list is not None: - annotate_swanson(ax=ax, acronyms=annotate_list, orientation=orientation, br=br, thres=10, fontsize=fontsize) - elif acronyms is not None: - ids = br.index2id(np.array(plot_idx)) - _, indices, _ = np.intersect1d(br.id, br.remap(ids, 'Swanson-lr'), return_indices=True) - a, b = ismember(ids, br.id[indices]) - sorted_id = ids[a] - vals = np.array(plot_val)[a] - sort_vals = np.argsort(vals) if annotate_order == 'bottom' else np.argsort(vals)[::-1] - annotate_swanson(ax=ax, acronyms=sorted_id[sort_vals[:annotate_n]], orientation=orientation, br=br, - thres=10, fontsize=fontsize) - else: - annotate_swanson(ax=ax, orientation=orientation, br=br, fontsize=fontsize) - - def format_coord(x, y): - patch = next((p for p in ax.patches if p.contains_point(p.get_transform().transform(np.r_[x, y]))), None) - if patch is not None: - ind = int(patch.get_gid().split('_')[1]) - ancestors = br.ancestors(br.id[ind])['acronym'] - return f'sw-{ind}, {ancestors}, aid={br.id[ind]}-{br.acronym[ind]} \n {br.name[ind]}' - else: - return '' - - ax.format_coord = format_coord - - ax.invert_yaxis() - ax.set_aspect('equal') - return ax + return atlas_plots.plot_swanson_vector(acronyms=acronyms, values=values, ax=ax, hemisphere=hemisphere, br=br, + orientation=orientation, empty_color=empty_color, vmin=vmin, vmax=vmax, + cmap=cmap, annotate=annotate, annotate_n=annotate_n, + annotate_order=annotate_order, annotate_list=annotate_list, mask=mask, + mask_color=mask_color, fontsize=fontsize, **kwargs) +@deprecated_decorator def plot_swanson(acronyms=None, values=None, ax=None, hemisphere=None, br=None, orientation='landscape', annotate=False, empty_color='silver', **kwargs): """ @@ -1086,89 +331,6 @@ def plot_swanson(acronyms=None, values=None, ax=None, hemisphere=None, br=None, matplotlib.pyplot.Axes The plotted axes. """ - mapping = 'Swanson' - br = BrainRegions() if br is None else br - br.compute_hierarchy() - s2a = swanson() - # both hemispheres - if hemisphere == 'both': - _s2a = s2a + np.sum(br.id > 0) - _s2a[s2a == 0] = 0 - _s2a[s2a == 1] = 1 - s2a = np.r_[s2a, np.flipud(_s2a)] - mapping = 'Swanson-lr' - elif hemisphere == 'mirror': - s2a = np.r_[s2a, np.flipud(s2a)] - if orientation == 'portrait': - s2a = np.transpose(s2a) - if acronyms is None: - regions = br.mappings[mapping][s2a] - im = br.rgba[regions] - iswan = None - else: - ibr, vals = br.propagate_down(acronyms, values) - # we now have the mapped regions and aggregated values, map values onto swanson map - iswan, iv = ismember(s2a, ibr) - im = np.zeros_like(s2a, dtype=np.float32) - im[iswan] = vals[iv] - im[~iswan] = np.nan - if not ax: - ax = plt.gca() - ax.set_axis_off() # unless provided we don't need scales here - ax.imshow(im, **kwargs) - # overlay the boundaries if value plot - imb = np.zeros((*s2a.shape[:2], 4), dtype=np.uint8) - # fill in the empty regions with the blank regions colours if necessary - if iswan is not None: - imb[~iswan] = (np.array(colors.to_rgba(empty_color)) * 255).astype('uint8') - imb[s2a == 0] = 255 - # imb[s2a == 1] = np.array([167, 169, 172, 255]) - imb[s2a == 1] = np.array([0, 0, 0, 255]) - ax.imshow(imb) - if annotate: - annotate_swanson(ax=ax, orientation=orientation, br=br) - - # provides the mean to see the region on axis - def format_coord(x, y): - ind = s2a[int(y), int(x)] - ancestors = br.ancestors(br.id[ind])['acronym'] - return f'sw-{ind}, {ancestors}, aid={br.id[ind]}-{br.acronym[ind]} \n {br.name[ind]}' - ax.format_coord = format_coord - return ax - - -def annotate_swanson(ax, acronyms=None, orientation='landscape', br=None, thres=20000, **kwargs): - """ - Display annotations on a Swanson flatmap. - - Parameters - ---------- - ax : matplotlib.pyplot.Axes - An axis object to plot onto. - acronyms : array_like - A list or numpy array of acronyms or Allen region IDs. If None plot all acronyms. - orientation : {landscape', 'portrait'}, default='landscape' - The plot orientation. - br : ibllib.atlas.BrainRegions - A brain regions object. - thres : int, default=20000 - The number of pixels above which a region is labelled. - **kwargs - See matplotlib.pyplot.Axes.annotate. - - """ - br = br or BrainRegions() - if acronyms is None: - indices = np.arange(br.id.size) - else: # TODO we should in fact remap and compute labels for hierarchical regions - aids = br.parse_acronyms_argument(acronyms) - _, indices, _ = np.intersect1d(br.id, br.remap(aids, 'Swanson-lr'), return_indices=True) - labels = _swanson_labels_positions(thres=thres) - for ilabel in labels: - # do not display unwanted labels - if ilabel not in indices: - continue - # rotate the labels if the display is in portrait mode - xy = np.flip(labels[ilabel]) if orientation == 'portrait' else labels[ilabel] - ax.annotate(br.acronym[ilabel], xy=xy, ha='center', va='center', **kwargs) + return atlas_plots.plot_swanson(acronyms=acronyms, values=values, ax=ax, hemisphere=hemisphere, br=br, + orientation=orientation, annotate=annotate, empty_color=empty_color, **kwargs) diff --git a/ibllib/atlas/regions.py b/ibllib/atlas/regions.py index 85f94edbb..c5f3f609a 100644 --- a/ibllib/atlas/regions.py +++ b/ibllib/atlas/regions.py @@ -26,679 +26,24 @@ FIXME Document the two structure trees. Which Website did they come from, and which publication/edition? """ -from dataclasses import dataclass import logging -from pathlib import Path - -import numpy as np -import pandas as pd -from iblutil.util import Bunch -from iblutil.numerical import ismember +from iblatlas import regions +from ibllib.atlas import deprecated_decorator _logger = logging.getLogger(__name__) -FILE_MAPPINGS = str(Path(__file__).parent.joinpath('mappings.pqt')) -ALLEN_FILE_REGIONS = str(Path(__file__).parent.joinpath('allen_structure_tree.csv')) -FRANKLIN_FILE_REGIONS = str(Path(__file__).parent.joinpath('franklin_paxinos_structure_tree.csv')) - - -@dataclass -class _BrainRegions: - """A struct of brain regions, their names, IDs, relationships and associated plot colours.""" - - """numpy.array: An integer array of unique brain region IDs.""" - id: np.ndarray - """numpy.array: A str array of verbose brain region names.""" - name: object - """numpy.array: A str array of brain region acronyms.""" - acronym: object - """numpy.array: A, (n, 3) uint8 array of brain region RGB colour values.""" - rgb: np.uint8 - """numpy.array: An unsigned integer array indicating the number of degrees removed from root.""" - level: np.ndarray - """numpy.array: An integer array of parent brain region IDs.""" - parent: np.ndarray - """numpy.array: The position within the flattened graph.""" - order: np.uint16 - - def __post_init__(self): - self._compute_mappings() - - def _compute_mappings(self): - """Compute default mapping for the structure tree. - - Default mapping is identity. This method is intended to be overloaded by subclasses. - """ - self.default_mapping = None - self.mappings = dict(default_mapping=self.order) - # the number of lateralized regions (typically half the number of regions in a lateralized structure tree) - self.n_lr = 0 - - def to_df(self): - """ - Return dataclass as a pandas DataFrame. - - Returns - ------- - pandas.DataFrame - The object as a pandas DataFrame with attributes as columns. - """ - attrs = ['id', 'name', 'acronym', 'hexcolor', 'level', 'parent', 'order'] - d = dict(zip(attrs, list(map(self.__getattribute__, attrs)))) - return pd.DataFrame(d) - - @property - def rgba(self): - """numpy.array: An (n, 4) uint8 array of RGBA values for all n brain regions.""" - rgba = np.c_[self.rgb, self.rgb[:, 0] * 0 + 255] - rgba[0, :] = 0 # set the void to transparent - return rgba - - @property - def hexcolor(self): - """numpy.array of str: The RGB colour values as hexadecimal triplet strings.""" - return np.apply_along_axis(lambda x: "#{0:02x}{1:02x}{2:02x}".format(*x.astype(int)), 1, self.rgb) - - def get(self, ids) -> Bunch: - """ - Return a map of id, name, acronym, etc. for the provided IDs. - - Parameters - ---------- - ids : int, tuple of ints, numpy.array - One or more brain region IDs to get information for. - - Returns - ------- - iblutil.util.Bunch[str, numpy.array] - A dict-like object containing the keys {'id', 'name', 'acronym', 'rgb', 'level', - 'parent', 'order'} with arrays the length of `ids`. - """ - uid, uind = np.unique(ids, return_inverse=True) - a, iself, _ = np.intersect1d(self.id, uid, assume_unique=False, return_indices=True) - b = Bunch() - for k in self.__dataclass_fields__.keys(): - b[k] = self.__getattribute__(k)[iself[uind]] - return b - - def _navigate_tree(self, ids, direction='down', return_indices=False): - """ - Navigate the tree and get all related objects either up, down or along the branch. - - By convention the provided id is returned in the list of regions. - - Parameters - ---------- - ids : int, array_like - One or more brain region IDs (int32). - direction : {'up', 'down'} - Whether to return ancestors ('up') or descendants ('down'). - return_indices : bool, default=False - If true returns a second argument with indices mapping to the current brain region - object. - - Returns - ------- - iblutil.util.Bunch[str, numpy.array] - A dict-like object containing the keys {'id', 'name', 'acronym', 'rgb', 'level', - 'parent', 'order'} with arrays the length of `ids`. - """ - indices = ismember(self.id, ids)[0] - count = np.sum(indices) - while True: - if direction == 'down': - indices |= ismember(self.parent, self.id[indices])[0] - elif direction == 'up': - indices |= ismember(self.id, self.parent[indices])[0] - else: - raise ValueError("direction should be either 'up' or 'down'") - if count == np.sum(indices): # last iteration didn't find any match - break - else: - count = np.sum(indices) - if return_indices: - return self.get(self.id[indices]), np.where(indices)[0] - else: - return self.get(self.id[indices]) - - def subtree(self, scalar_id, return_indices=False): - """ - Given a node, returns the subtree containing the node along with ancestors. - - Parameters - ---------- - scalar_id : int - A brain region ID. - return_indices : bool, default=False - If true returns a second argument with indices mapping to the current brain region - object. - - Returns - ------- - iblutil.util.Bunch[str, numpy.array] - A dict-like object containing the keys {'id', 'name', 'acronym', 'rgb', 'level', - 'parent', 'order'} with arrays the length of one. - """ - if not np.isscalar(scalar_id): - assert scalar_id.size == 1 - _, idown = self._navigate_tree(scalar_id, direction='down', return_indices=True) - _, iup = self._navigate_tree(scalar_id, direction='up', return_indices=True) - indices = np.unique(np.r_[idown, iup]) - if return_indices: - return self.get(self.id[indices]), np.where(indices)[0] - else: - return self.get(self.id[indices]) - - def descendants(self, ids, **kwargs): - """ - Get descendants from one or more IDs. - - Parameters - ---------- - ids : int, array_like - One or more brain region IDs. - return_indices : bool, default=False - If true returns a second argument with indices mapping to the current brain region - object. - - Returns - ------- - iblutil.util.Bunch[str, numpy.array] - A dict-like object containing the keys {'id', 'name', 'acronym', 'rgb', 'level', - 'parent', 'order'} with arrays the length of `ids`. - """ - return self._navigate_tree(ids, direction='down', **kwargs) - - def ancestors(self, ids, **kwargs): - """ - Get ancestors from one or more IDs. - - Parameters - ---------- - ids : int, array_like - One or more brain region IDs. - return_indices : bool, default=False - If true returns a second argument with indices mapping to the current brain region - object. - - Returns - ------- - iblutil.util.Bunch[str, numpy.array] - A dict-like object containing the keys {'id', 'name', 'acronym', 'rgb', 'level', - 'parent', 'order'} with arrays the length of `ids`. - """ - return self._navigate_tree(ids, direction='up', **kwargs) - - def leaves(self): - """ - Get all regions that do not have children. - - Returns - ------- - iblutil.util.Bunch[str, numpy.array] - A dict-like object containing the keys {'id', 'name', 'acronym', 'rgb', 'level', - 'parent', 'order'} with arrays of matching length. - """ - leaves = np.setxor1d(self.id, self.parent) - return self.get(np.int64(leaves[~np.isnan(leaves)])) - - def _mapping_from_regions_list(self, new_map, lateralize=False): - """ - From a vector of region IDs, creates a structure tree index mapping. - - For example, given a subset of brain region IDs, this returns an array the length of the - total number of brain regions, where each element is the structure tree index for that - region. The IDs in `new_map` and their descendants are given that ID's index and any - missing IDs are given the root index. - - - Parameters - ---------- - new_map : array_like of int - An array of atlas brain region IDs. - lateralize : bool - If true, lateralized indices are assigned to all IDs. If false, IDs are assigned to q - non-lateralized index regardless of their sign. - - Returns - ------- - numpy.array - A vector of brain region indices representing the structure tree order corresponding to - each input ID and its descendants. - """ - I_ROOT = 1 - I_VOID = 0 - # to lateralize we make sure all regions are represented in + and - - new_map = np.unique(np.r_[-new_map, new_map]) - assert np.all(np.isin(new_map, self.id)), \ - "All mapping ids should be represented in the Allen ids" - # with the lateralization, self.id may have duplicate values so ismember is necessary - iid, inm = ismember(self.id, new_map) - iid = np.where(iid)[0] - mapind = np.zeros_like(self.id) + I_ROOT # non assigned regions are root - # TODO should root be lateralised? - mapind[iid] = iid # regions present in the list have the same index - # Starting by the higher up levels in the hierarchy, assign all descendants to the mapping - for i in np.argsort(self.level[iid]): - descendants = self.descendants(self.id[iid[i]]).id - _, idesc, _ = np.intersect1d(self.id, descendants, return_indices=True) - mapind[idesc] = iid[i] - mapind[0] = I_VOID # void stays void - # to delateralize the regions, assign the positive index to all mapind elements - if lateralize is False: - _, iregion = ismember(np.abs(self.id), self.id) - mapind = mapind[iregion] - return mapind - - def acronym2acronym(self, acronym, mapping=None): - """ - Remap acronyms onto mapping - - :param acronym: list or array of acronyms - :param mapping: target map to remap acronyms - :return: array of remapped acronyms - """ - mapping = mapping or self.default_mapping - inds = self._find_inds(acronym, self.acronym) - return self.acronym[self.mappings[mapping]][inds] - - def acronym2id(self, acronym, mapping=None, hemisphere=None): - """ - Convert acronyms to atlas ids and remap - - :param acronym: list or array of acronyms - :param mapping: target map to remap atlas_ids - :param hemisphere: which hemisphere to return atlas ids for, options left or right - :return: array of remapped atlas ids - """ - mapping = mapping or self.default_mapping - inds = self._find_inds(acronym, self.acronym) - return self.id[self.mappings[mapping]][self._filter_lr(inds, mapping, hemisphere)] - - def acronym2index(self, acronym, mapping=None, hemisphere=None): - """ - Convert acronym to index and remap - :param acronym: - :param mapping: - :param hemisphere: - :return: array of remapped acronyms and list of indexes for each acronnym - """ - mapping = mapping or self.default_mapping - acronym = self.acronym2acronym(acronym, mapping=mapping) - index = list() - for id in acronym: - inds = np.where(self.acronym[self.mappings[mapping]] == id)[0] - index.append(self._filter_lr_index(inds, hemisphere)) - - return acronym, index - - def id2acronym(self, atlas_id, mapping=None): - """ - Convert atlas id to acronym and remap - - :param atlas_id: list or array of atlas ids - :param mapping: target map to remap acronyms - :return: array of remapped acronyms - """ - mapping = mapping or self.default_mapping - inds = self._find_inds(atlas_id, self.id) - return self.acronym[self.mappings[mapping]][inds] - - def id2id(self, atlas_id, mapping='Allen'): - """ - Remap atlas id onto mapping - - :param atlas_id: list or array of atlas ids - :param mapping: target map to remap acronyms - :return: array of remapped atlas ids - """ - - inds = self._find_inds(atlas_id, self.id) - return self.id[self.mappings[mapping]][inds] - - def id2index(self, atlas_id, mapping='Allen'): - """ - Convert atlas id to index and remap - - :param atlas_id: list or array of atlas ids - :param mapping: mapping to use - :return: dict of indices for each atlas_id - """ - - atlas_id = self.id2id(atlas_id, mapping=mapping) - index = list() - for id in atlas_id: - inds = np.where(self.id[self.mappings[mapping]] == id)[0] - index.append(inds) - - return atlas_id, index - - def index2acronym(self, index, mapping=None): - """ - Convert index to acronym and remap - - :param index: - :param mapping: - :return: - """ - mapping = mapping or self.default_mapping - inds = self.acronym[self.mappings[mapping]][index] - return inds - - def index2id(self, index, mapping=None): - """ - Convert index to atlas id and remap - - :param index: - :param mapping: - :return: - """ - mapping = mapping or self.default_mapping - inds = self.id[self.mappings[mapping]][index] - return inds - - def _filter_lr(self, values, mapping, hemisphere): - """ - Filter values by those on left or right hemisphere - :param values: array of index values - :param mapping: mapping to use - :param hemisphere: hemisphere - :return: - """ - if 'lr' in mapping: - if hemisphere == 'left': - return values + self.n_lr - elif hemisphere == 'right': - return values - else: - return np.c_[values + self.n_lr, values] - else: - return values - - def _filter_lr_index(self, values, hemisphere): - """ - Filter index values by those on left or right hemisphere - - :param values: array of index values - :param hemisphere: hemisphere - :return: - """ - if hemisphere == 'left': - return values[values > self.n_lr] - elif hemisphere == 'right': - return values[values <= self.n_lr] - else: - return values - - def _find_inds(self, values, all_values): - if not isinstance(values, list) and not isinstance(values, np.ndarray): - values = np.array([values]) - _, inds = ismember(np.array(values), all_values) - - return inds - - def parse_acronyms_argument(self, acronyms, mode='raise'): - """Parse input acronyms. - - Returns a numpy array of region IDs regardless of the input: list of acronyms, array of - acronym strings or region IDs. To be used by functions to provide flexible input type. - - Parameters - ---------- - acronyms : array_like - An array of region acronyms to convert to IDs. An array of region IDs may also be - provided, in which case they are simply returned. - mode : str, optional - If 'raise', asserts that all acronyms exist in the structure tree. - - Returns - ------- - numpy.array of int - An array of brain regions corresponding to `acronyms`. - """ - # first get the allen region ids regardless of the input type - acronyms = np.array(acronyms) - # if the user provides acronyms they're not signed by definition - if not np.issubdtype(acronyms.dtype, np.number): - user_aids = self.acronym2id(acronyms) - if mode == 'raise': - assert user_aids.size == acronyms.size, 'all acronyms must exist in the ontology' - else: - user_aids = acronyms - return user_aids - - -class FranklinPaxinosRegions(_BrainRegions): - """Mouse Brain in Stereotaxic Coordinates (MBSC). - - Paxinos G, and Franklin KBJ (2012). The Mouse Brain in Stereotaxic Coordinates, 4th edition (Elsevier Academic Press). - """ - def __init__(self): - df_regions = pd.read_csv(FRANKLIN_FILE_REGIONS) - # get rid of nan values, there are rows that are in Allen but are not in the Franklin Paxinos atlas - df_regions = df_regions[~df_regions['Structural ID'].isna()] - # add in root - root = [{'Structural ID': int(997), 'Franklin-Paxinos Full name': 'root', 'Franklin-Paxinos abbreviation': 'root', - 'structure Order': 50, 'red': 255, 'green': 255, 'blue': 255, 'Allen Full name': 'root', - 'Allen abbreviation': 'root'}] - df_regions = pd.concat([pd.DataFrame(root), df_regions], ignore_index=True) - - allen_regions = pd.read_csv(ALLEN_FILE_REGIONS) - - # Find the level of acronyms that are the same as Allen - a, b = ismember(df_regions['Allen abbreviation'].values, allen_regions['acronym'].values) - level = allen_regions['depth'].values[b] - df_regions['level'] = np.full(len(df_regions), np.nan) - df_regions['allen level'] = np.full(len(df_regions), np.nan) - df_regions.loc[a, 'level'] = level - df_regions.loc[a, 'allen level'] = level - - nan_idx = np.where(df_regions['Allen abbreviation'].isna())[0] - df_regions.loc[nan_idx, 'Allen abbreviation'] = df_regions['Franklin-Paxinos abbreviation'].values[nan_idx] - df_regions.loc[nan_idx, 'Allen Full name'] = df_regions['Franklin-Paxinos Full name'].values[nan_idx] - - # Now fill in the nan values with one level up from their parents we need to this multiple times - while np.sum(np.isnan(df_regions['level'].values)) > 0: - nan_loc = np.isnan(df_regions['level'].values) - parent_level = df_regions['Parent ID'][nan_loc].values - a, b = ismember(parent_level, df_regions['Structural ID'].values) - assert len(a) == len(b) == np.sum(nan_loc) - level = df_regions['level'].values[b] + 1 - df_regions.loc[nan_loc, 'level'] = level - - # lateralize - df_regions_left = df_regions.iloc[np.array(df_regions['Structural ID'] > 0), :].copy() - df_regions_left['Structural ID'] = - df_regions_left['Structural ID'] - df_regions_left['Parent ID'] = - df_regions_left['Parent ID'] - df_regions_left['Allen Full name'] = \ - df_regions_left['Allen Full name'].apply(lambda x: x + ' (left)') - df_regions = pd.concat((df_regions, df_regions_left), axis=0) - - # insert void - void = [{'Structural ID': int(0), 'Franklin-Paxinos Full Name': 'void', 'Franklin-Paxinos abbreviation': 'void', - 'Parent ID': int(0), 'structure Order': 0, 'red': 0, 'green': 0, 'blue': 0, 'Allen Full name': 'void', - 'Allen abbreviation': 'void'}] - df_regions = pd.concat([pd.DataFrame(void), df_regions], ignore_index=True) - - # converts colors to RGB uint8 array - c = np.c_[df_regions['red'], df_regions['green'], df_regions['blue']].astype(np.uint32) - - super().__init__(id=df_regions['Structural ID'].to_numpy().astype(np.int64), - name=df_regions['Allen Full name'].to_numpy(), - acronym=df_regions['Allen abbreviation'].to_numpy(), - rgb=c, - level=df_regions['level'].to_numpy().astype(np.uint16), - parent=df_regions['Parent ID'].to_numpy(), - order=df_regions['structure Order'].to_numpy().astype(np.uint16)) - - def _compute_mappings(self): - """ - Compute lateralized and non-lateralized mappings. - - This method is called by __post_init__. - """ - self.mappings = { - 'FranklinPaxinos': self._mapping_from_regions_list(np.unique(np.abs(self.id)), lateralize=False), - 'FranklinPaxinos-lr': np.arange(self.id.size), - } - self.default_mapping = 'FranklinPaxinos' - self.n_lr = int((len(self.id) - 1) / 2) # the number of lateralized regions - - -class BrainRegions(_BrainRegions): - """ - A struct of Allen brain regions, their names, IDs, relationships and associated plot colours. - - ibllib.atlas.regions.BrainRegions(brainmap='Allen') - - Notes - ----- - The Allen atlas IDs are kept intact but lateralized as follows: labels are duplicated - and IDs multiplied by -1, with the understanding that left hemisphere regions have negative - IDs. - """ - def __init__(self): - df_regions = pd.read_csv(ALLEN_FILE_REGIONS) - # lateralize - df_regions_left = df_regions.iloc[np.array(df_regions.id > 0), :].copy() - df_regions_left['id'] = - df_regions_left['id'] - df_regions_left['parent_structure_id'] = - df_regions_left['parent_structure_id'] - df_regions_left['name'] = df_regions_left['name'].apply(lambda x: x + ' (left)') - df_regions = pd.concat((df_regions, df_regions_left), axis=0) - # converts colors to RGB uint8 array - c = np.uint32(df_regions.color_hex_triplet.map( - lambda x: int(x, 16) if isinstance(x, str) else 256 ** 3 - 1)) - c = np.flip(np.reshape(c.view(np.uint8), (df_regions.id.size, 4))[:, :3], 1) - c[0, :] = 0 # set the void region to black - # For void assign the depth and level to avoid warnings of nan being converted to int - df_regions.loc[0, 'depth'] = 0 - df_regions.loc[0, 'graph_order'] = 0 - # creates the BrainRegion instance - super().__init__(id=df_regions.id.to_numpy(), - name=df_regions.name.to_numpy(), - acronym=df_regions.acronym.to_numpy(), - rgb=c, - level=df_regions.depth.to_numpy().astype(np.uint16), - parent=df_regions.parent_structure_id.to_numpy(), - order=df_regions.graph_order.to_numpy().astype(np.uint16)) - - def _compute_mappings(self): - """ - Recomputes the mapping indices for all mappings. - - Attempts to load mappings from the FILE_MAPPINGS file, otherwise generates from arrays of - brain IDs. In production, we use the MAPPING_FILES pqt to avoid recomputing at each - instantiation as this take a few seconds to execute. - - Currently there are 8 available mappings (Allen, Beryl, Cosmos, and Swanson), lateralized - (with suffix -lr) and non-lateralized. Each row contains the correspondence to the Allen - CCF structure tree order (i.e. index) for each mapping. - - This method is called by __post_init__. - """ - # mappings are indices not ids: they range from 0 to n regions -1 - if Path(FILE_MAPPINGS).exists(): - mappings = pd.read_parquet(FILE_MAPPINGS) - self.mappings = {k: mappings[k].to_numpy() for k in mappings} - else: - beryl = np.load(Path(__file__).parent.joinpath('beryl.npy')) - cosmos = np.load(Path(__file__).parent.joinpath('cosmos.npy')) - swanson = np.load(Path(__file__).parent.joinpath('swanson_regions.npy')) - self.mappings = { - 'Allen': self._mapping_from_regions_list(np.unique(np.abs(self.id)), lateralize=False), - 'Allen-lr': np.arange(self.id.size), - 'Beryl': self._mapping_from_regions_list(beryl, lateralize=False), - 'Beryl-lr': self._mapping_from_regions_list(beryl, lateralize=True), - 'Cosmos': self._mapping_from_regions_list(cosmos, lateralize=False), - 'Cosmos-lr': self._mapping_from_regions_list(cosmos, lateralize=True), - 'Swanson': self._mapping_from_regions_list(swanson, lateralize=False), - 'Swanson-lr': self._mapping_from_regions_list(swanson, lateralize=True), - } - pd.DataFrame(self.mappings).to_parquet(FILE_MAPPINGS) - self.default_mapping = 'Allen' - self.n_lr = int((len(self.id) - 1) / 2) # the number of lateralized regions - - def compute_hierarchy(self): - """ - Creates a self.hierarchy attribute that is an n_levels by n_region array - of indices. This is useful to perform fast vectorized computations of - ancestors and descendants. - :return: - """ - if hasattr(self, 'hierarchy'): - return - n_levels = np.max(self.level) - n_regions = self.id.size - # creates the parent index. Void and root are omitted from intersection - # as they figure as NaN - pmask, i_p = ismember(self.parent, self.id) - self.iparent = np.arange(n_regions) - self.iparent[pmask] = i_p - # the last level of the hierarchy is the actual mapping, then going up level per level - # we assign the parend index - self.hierarchy = np.tile(np.arange(n_regions), (n_levels, 1)) - _mask = np.zeros(n_regions, bool) - for lev in np.flipud(np.arange(n_levels)): - if lev < (n_levels - 1): - self.hierarchy[lev, _mask] = self.iparent[self.hierarchy[lev + 1, _mask]] - sel = self.level == (lev + 1) - self.hierarchy[lev, sel] = np.where(sel)[0] - _mask[sel] = True - - def propagate_down(self, acronyms, values): - """ - This function remaps a set of user specified acronyms and values to the - swanson map, by filling down the child nodes when higher up values are - provided. - :param acronyms: list or array of allen ids or acronyms - :param values: list or array of associated values - :return: - # FIXME Why only the swanson map? Also, how is this actually related to the Swanson map? - """ - user_aids = self.parse_acronyms_argument(acronyms) - _, user_indices = ismember(user_aids, self.id) - self.compute_hierarchy() - ia, ib = ismember(self.hierarchy, user_indices) - v = np.zeros_like(ia, dtype=np.float64) * np.NaN - v[ia] = values[ib] - all_values = np.nanmedian(v, axis=0) - indices = np.where(np.any(ia, axis=0))[0] - all_values = all_values[indices] - return indices, all_values - - def remap(self, region_ids, source_map='Allen', target_map='Beryl'): - """ - Remap atlas regions IDs from source map to target map. - Any NaNs in `region_ids` remain as NaN in the output array. - Parameters - ---------- - region_ids : array_like of int - The region IDs to remap. - source_map : str - The source map name, in `self.mappings`. - target_map : str - The target map name, in `self.mappings`. +@deprecated_decorator +def BrainRegions(): + return regions.BrainRegions() - Returns - ------- - numpy.array of int - The input IDs mapped to `target_map`. - """ - isnan = np.isnan(region_ids) - if np.sum(isnan) > 0: - # In case the user provides nans - nan_loc = np.where(isnan)[0] - _, inds = ismember(region_ids[~isnan], self.id[self.mappings[source_map]]) - mapped_ids = self.id[self.mappings[target_map][inds]].astype(float) - mapped_ids = np.insert(mapped_ids, nan_loc, np.full(nan_loc.shape, np.nan)) - else: - _, inds = ismember(region_ids, self.id[self.mappings[source_map]]) - mapped_ids = self.id[self.mappings[target_map][inds]] - return mapped_ids +@deprecated_decorator +def FranklinPaxinosRegions(): + return regions.FranklinPaxinosRegions() +@deprecated_decorator def regions_from_allen_csv(): """ (DEPRECATED) Reads csv file containing the ALlen Ontology and instantiates a BrainRegions object. diff --git a/ibllib/io/extractors/ephys_passive.py b/ibllib/io/extractors/ephys_passive.py index fb1de6964..2dfcb34e2 100644 --- a/ibllib/io/extractors/ephys_passive.py +++ b/ibllib/io/extractors/ephys_passive.py @@ -227,7 +227,7 @@ def _get_passive_spacers(session_path, sync_collection='raw_ephys_data', f'trace ({int(np.size(spacer_times) / 2)})' ) - if tmax is None: + if tmax is None: # TODO THIS NEEDS CHANGING AS FOR DYNAMIC PIPELINE F2TTL slower than valve tmax = fttl['times'][-1] spacer_times = np.r_[spacer_times.flatten(), tmax] diff --git a/ibllib/oneibl/data_handlers.py b/ibllib/oneibl/data_handlers.py index de39f31b8..19c737e15 100644 --- a/ibllib/oneibl/data_handlers.py +++ b/ibllib/oneibl/data_handlers.py @@ -299,7 +299,7 @@ def uploadData(self, outputs, version, **kwargs): for collection, files in collections.items(): globus_files = self.globus.ls(f'flatiron_{self.lab}', collection, remove_uuid=True, return_size=True) - file_names = [gl[0] for gl in globus_files] + file_names = [str(gl[0]) for gl in globus_files] file_sizes = [gl[1] for gl in globus_files] for name, details in files.items(): diff --git a/ibllib/oneibl/patcher.py b/ibllib/oneibl/patcher.py index da97285d6..53b8c0b6f 100644 --- a/ibllib/oneibl/patcher.py +++ b/ibllib/oneibl/patcher.py @@ -498,7 +498,7 @@ def _scp(self, local_path, remote_path, dry=True): _logger.info(f"Copy {local_path} to {remote_path}") if not dry: if not Path(remote_path).parent.exists(): - Path(remote_path).parent.mkdir(exist_ok=True) + Path(remote_path).parent.mkdir(exist_ok=True, parents=True) shutil.copy(local_path, remote_path) return 0, '' diff --git a/ibllib/pipes/dynamic_pipeline.py b/ibllib/pipes/dynamic_pipeline.py index 044e242a6..7c8fd6065 100644 --- a/ibllib/pipes/dynamic_pipeline.py +++ b/ibllib/pipes/dynamic_pipeline.py @@ -305,8 +305,10 @@ def make_pipeline(session_path, **pkwargs): # Video tasks if 'cameras' in devices: + cams = list(devices['cameras'].keys()) + subset_cams = [c for c in cams if c in ('left', 'right', 'body', 'belly')] video_kwargs = {'device_collection': 'raw_video_data', - 'cameras': list(devices['cameras'].keys())} + 'cameras': cams} video_compressed = sess_params.get_video_compressed(acquisition_description) if video_compressed: @@ -326,10 +328,14 @@ def make_pipeline(session_path, **pkwargs): tasks[tn] = type((tn := f'VideoSyncQC_{sync}'), (vtasks.VideoSyncQcBpod,), {})( **kwargs, **video_kwargs, **sync_kwargs, parents=[tasks['VideoCompress']]) elif sync == 'nidq': + # Here we restrict to videos that we support (left, right or body) + video_kwargs['cameras'] = subset_cams tasks[tn] = type((tn := f'VideoSyncQC_{sync}'), (vtasks.VideoSyncQcNidq,), {})( **kwargs, **video_kwargs, **sync_kwargs, parents=[tasks['VideoCompress']] + sync_tasks) if sync_kwargs['sync'] != 'bpod': + # Here we restrict to videos that we support (left, right or body) + video_kwargs['cameras'] = subset_cams tasks[tn] = type((tn := 'DLC'), (vtasks.DLC,), {})( **kwargs, **video_kwargs, parents=[dlc_parent_task]) tasks['PostDLC'] = type('PostDLC', (epp.EphysPostDLC,), {})( diff --git a/ibllib/pipes/ephys_alignment.py b/ibllib/pipes/ephys_alignment.py index 9080b9053..cf99d3564 100644 --- a/ibllib/pipes/ephys_alignment.py +++ b/ibllib/pipes/ephys_alignment.py @@ -1,7 +1,7 @@ import scipy import numpy as np import ibllib.pipes.histology as histology -import ibllib.atlas as atlas +import iblatlas.atlas as atlas TIP_SIZE_UM = 200 diff --git a/ibllib/pipes/histology.py b/ibllib/pipes/histology.py index 6081f6eaa..ccf7ade22 100644 --- a/ibllib/pipes/histology.py +++ b/ibllib/pipes/histology.py @@ -7,7 +7,7 @@ import one.alf.io as alfio from neuropixel import TIP_SIZE_UM, trace_header -import ibllib.atlas as atlas +import iblatlas.atlas as atlas from ibllib.ephys.spikes import probes_description as extract_probes from ibllib.qc import base @@ -595,7 +595,7 @@ def coverage(trajs, ba=None, dist_fcn=[100, 150]): """ Computes a coverage volume from :param trajs: dictionary of trajectories from Alyx rest endpoint (one.alyx.rest...) - :param ba: ibllib.atlas.BrainAtlas instance + :param ba: iblatlas.atlas.BrainAtlas instance :return: 3D np.array the same size as the volume provided in the brain atlas """ # in um. Coverage = 1 below the first value, 0 after the second, cosine taper in between diff --git a/ibllib/pipes/mesoscope_tasks.py b/ibllib/pipes/mesoscope_tasks.py index c379ca1e3..e922eaefb 100644 --- a/ibllib/pipes/mesoscope_tasks.py +++ b/ibllib/pipes/mesoscope_tasks.py @@ -36,7 +36,7 @@ from ibllib.pipes import base_tasks from ibllib.io.extractors import mesoscope -from ibllib.atlas import ALLEN_CCF_LANDMARKS_MLAPDV_UM, MRITorontoAtlas +from iblatlas.atlas import ALLEN_CCF_LANDMARKS_MLAPDV_UM, MRITorontoAtlas _logger = logging.getLogger(__name__) diff --git a/ibllib/pipes/training_status.py b/ibllib/pipes/training_status.py index 3c3f8bb4d..fc73304c6 100644 --- a/ibllib/pipes/training_status.py +++ b/ibllib/pipes/training_status.py @@ -586,7 +586,8 @@ def get_training_info_for_session(session_paths, one, force=True): if len(protocols) > 0 and len(set(protocols)) != 1: print(f'Different protocols on same date {sess_dicts[0]["date"]} : {protocols}') - if len(sess_dicts) > 1 and len(set(protocols)) == 1: # Only if all protocols are the same + # Only if all protocols are the same and are not habituation + if len(sess_dicts) > 1 and len(set(protocols)) == 1 and protocols[0] != 'habituation': # Only if all protocols are the same print(f'{len(sess_dicts)} sessions being combined for date {sess_dicts[0]["date"]}') combined_trials = load_combined_trials(session_paths, one, force=force) performance, contrasts, _ = training.compute_performance(combined_trials, prob_right=True) diff --git a/ibllib/pipes/video_tasks.py b/ibllib/pipes/video_tasks.py index fcbef1d17..eaf00aaa0 100644 --- a/ibllib/pipes/video_tasks.py +++ b/ibllib/pipes/video_tasks.py @@ -270,6 +270,7 @@ def _run(self, **kwargs): mp4_files = self.session_path.joinpath(self.device_collection).glob('*.mp4') labels = [label_from_path(x) for x in mp4_files] + labels = [lab for lab in labels if lab in ('left', 'right', 'body', 'belly')] kwargs = {} if self.sync_namespace == 'timeline': diff --git a/ibllib/pipes/widefield_tasks.py b/ibllib/pipes/widefield_tasks.py index 37a793135..adf49f9eb 100644 --- a/ibllib/pipes/widefield_tasks.py +++ b/ibllib/pipes/widefield_tasks.py @@ -177,7 +177,7 @@ def _run(self): outfiles = [] # from wfield import load_allen_landmarks, SVDStack, atlas_from_landmarks_file - # from ibllib.atlas.regions import BrainRegions + # from iblatlas.regions import BrainRegions # from iblutil.numerical import ismember # import numpy as np # U = np.load(self.session_path.joinpath('alf/widefield', 'widefieldU.images.npy')) diff --git a/ibllib/plots/snapshot.py b/ibllib/plots/snapshot.py index da9f8cdd6..295ba7cc6 100644 --- a/ibllib/plots/snapshot.py +++ b/ibllib/plots/snapshot.py @@ -13,7 +13,7 @@ from ibllib import __version__ as ibllib_version from ibllib.pipes.ephys_alignment import EphysAlignment from ibllib.pipes.histology import interpolate_along_track -from ibllib.atlas import AllenAtlas +from iblatlas.atlas import AllenAtlas _logger = logging.getLogger(__name__) @@ -56,8 +56,8 @@ def __init__(self, pid, session_path=None, one=None, brain_regions=None, brain_a """ :param pid: probe insertion UUID from Alyx :param one: one instance - :param brain_regions: (optional) ibllib.atlas.BrainRegion object - :param brain_atlas: (optional) ibllib.atlas.AllenAtlas object + :param brain_regions: (optional) iblatlas.regions.BrainRegion object + :param brain_atlas: (optional) iblatlas.atlas.AllenAtlas object :param kwargs: """ assert one diff --git a/ibllib/qc/alignment_qc.py b/ibllib/qc/alignment_qc.py index 05d308427..20d686ec6 100644 --- a/ibllib/qc/alignment_qc.py +++ b/ibllib/qc/alignment_qc.py @@ -6,7 +6,7 @@ from neuropixel import trace_header import spikeglx -from ibllib.atlas import AllenAtlas +from iblatlas.atlas import AllenAtlas from ibllib.pipes import histology from ibllib.pipes.ephys_alignment import EphysAlignment from ibllib.qc import base diff --git a/ibllib/qc/critical_reasons.py b/ibllib/qc/critical_reasons.py index 378db9571..bf50bda20 100644 --- a/ibllib/qc/critical_reasons.py +++ b/ibllib/qc/critical_reasons.py @@ -3,32 +3,44 @@ Choices are listed in the global variables. Multiple reasons can be selected. Places info in Alyx session note in a format that is machine retrievable (text->json) """ -# Author: Gaelle import abc import logging import json -from requests.exceptions import HTTPError +import warnings from datetime import datetime -from one.api import ONE +from one.api import OneAlyx +from one.webclient import AlyxClient -_logger = logging.getLogger("ibllib") +_logger = logging.getLogger('ibllib') -def main_gui(uuid, reasons_selected, one=None): +def main_gui(uuid, reasons_selected, one=None, alyx=None): """ - Main function to call to input a reason for marking an insertion as - CRITICAL from the alignment GUI. It will: - - create note text, after deleting any similar notes existing already - - :param: uuid: insertion id - :param: reasons_selected: list of str, str are picked within REASONS_INS_CRIT_GUI + Main function to call to input a reason for marking an insertion as CRITICAL from the alignment GUI. + + It wil create note text, after deleting any similar notes existing already. + + Parameters + ---------- + uuid : uuid.UUID, str + An insertion ID. + reasons_selected : list of str + A subset of REASONS_INS_CRIT_GUI. + one : one.api.OneAlyx + (DEPRECATED) An instance of ONE. NB: Pass in an instance of AlyxClient instead. + alyx : one.webclient.AlyxClient + An AlyxClient instance. """ # hit the database to check if uuid is insertion uuid - ins_list = one.alyx.rest('insertions', 'list', id=uuid, no_cache=True) + if alyx is None and one is not None: + # Deprecate ONE in future because instantiating it takes longer and is unnecessary + warnings.warn('In future please pass in an AlyxClient instance (i.e. `one.alyx`)', FutureWarning) + alyx = one if isinstance(one, AlyxClient) else one.alyx + ins_list = alyx.rest('insertions', 'list', id=uuid, no_cache=True) if len(ins_list) != 1: raise ValueError(f'N={len(ins_list)} insertion found, expected N=1. Check uuid provided.') - note = CriticalInsertionNote(uuid, one) + note = CriticalInsertionNote(uuid, alyx) # assert that reasons are all within REASONS_INS_CRIT_GUI for item_str in reasons_selected: @@ -39,44 +51,57 @@ def main_gui(uuid, reasons_selected, one=None): note._upload_note(overwrite=True) -def main(uuid, one=None): +def main(uuid, one=None, alyx=None): """ - Main function to call to input a reason for marking a session/insertion - as CRITICAL programmatically. It will: + Main function to call to input a reason for marking a session/insertion as CRITICAL programmatically. + + It will: - ask reasons for selection of critical status - check if 'other' reason has been selected, inquire why (free text) - create note text, checking whether similar notes exist already - - upload note to Alyx if none exist previously or if overwrite is chosen - Q&A are prompted via the Python terminal. - - Example: - # Retrieve Alyx note to test - one = ONE(base_url='https://dev.alyx.internationalbrainlab.org') - uuid = '2ffd3ed5-477e-4153-9af7-7fdad3c6946b' - main(uuid=uuid, one=one) - - # Get notes with pattern - notes = one.alyx.rest('notes', 'list', - django=f'text__icontains,{STR_NOTES_STATIC},' - f'object_id,{uuid}') - test_json_read = json.loads(notes[0]['text']) - - :param uuid: session/insertion uuid - :param one: default: None -> ONE() - :return: + - upload note to Alyx if none exist previously or if overwrite is chosen Q&A are prompted via the Python terminal + + Parameters + ---------- + uuid : uuid.UUID, str + An experiment UUID or an insertion UUID. + one : one.api.OneAlyx + (DEPRECATED) An instance of ONE. NB: Pass in an instance of AlyxClient instead. + alyx : one.webclient.AlyxClient + An AlyxClient instance. + + Examples + -------- + Retrieve Alyx note to test + + >>> alyx = AlyxClient(base_url='https://dev.alyx.internationalbrainlab.org') + >>> uuid = '2ffd3ed5-477e-4153-9af7-7fdad3c6946b' + >>> main(uuid=uuid, alyx=alyx) + + Get notes with pattern + + >>> notes = alyx.rest('notes', 'list', + ... django=f'text__icontains,{STR_NOTES_STATIC},' + ... f'object_id,{uuid}') + >>> test_json_read = json.loads(notes[0]['text']) + """ - if one is None: - one = ONE() + if alyx is None and one is not None: + # Deprecate ONE in future because instantiating it takes longer and is unnecessary + warnings.warn('In future please pass in an AlyxClient instance (i.e. `one.alyx`)', FutureWarning) + alyx = one if isinstance(one, AlyxClient) else one.alyx + if not alyx: + alyx = AlyxClient() # ask reasons for selection of critical status # hit the database to know if uuid is insertion or session uuid - sess_list = one.alyx.get('/sessions?&django=pk,' + uuid, clobber=True) - ins_list = one.alyx.get('/insertions?&django=pk,' + uuid, clobber=True) + sess_list = alyx.get('/sessions?&django=pk,' + uuid, clobber=True) + ins_list = alyx.get('/insertions?&django=pk,' + uuid, clobber=True) if len(sess_list) > 0 and len(ins_list) == 0: # session - note = CriticalSessionNote(uuid, one) + note = CriticalSessionNote(uuid, alyx) elif len(ins_list) > 0 and len(sess_list) == 0: # insertion - note = CriticalInsertionNote(uuid, one) + note = CriticalInsertionNote(uuid, alyx) else: raise ValueError(f'Inadequate number of session (n={len(sess_list)}) ' f'or insertion (n={len(ins_list)}) found for uuid {uuid}.' @@ -104,17 +129,26 @@ def note_title(self): def n_description(self): return len(self.default_descriptions) - def __init__(self, uuid, one, content_type=None): + def __init__(self, uuid, alyx, content_type=None): """ Base class for attaching notes to an alyx endpoint. Do not use this class directly but use parent classes that inherit this base class - :param uuid: uuid of session/ insertion or other model to attach note to - :param one: ONE instance - :param content_type: alyx endpoint of uuid + Parameters + ---------- + uuid : uuid.UUID, str + A UUID of a session, insertion, or other Alyx model to attach note to. + alyx : one.webclient.AlyxClient + An AlyxClient instance. + content_type : str + The Alyx model name of the UUID. """ self.uuid = uuid - self.one = one + if isinstance(alyx, OneAlyx): + # Deprecate ONE in future because instantiating it takes longer and is unnecessary + warnings.warn('In future please pass in an AlyxClient instance (i.e. `one.alyx`)', FutureWarning) + alyx = alyx.alyx + self.alyx = alyx self.selected_reasons = [] self.other_reason = [] if content_type is not None: @@ -124,22 +158,24 @@ def __init__(self, uuid, one, content_type=None): def get_content_type(self): """ - Infer the content_type from the uuid. Only checks to see if uuid is a session or insertion. If not recognised will raise - an error and the content_type must be specified on note initialisation e.g Note(uuid, one, content_type='subject') - :return: + Infer the content_type from the uuid. Only checks to see if uuid is a session or insertion. + If not recognised will raise an error and the content_type must be specified on note + initialisation e.g. Note(uuid, alyx, content_type='subject') + + Returns + ------- + str + The Alyx model name, inferred from the UUID. """ # see if it as session or an insertion - if self.one.uuid2path(self.uuid): + if self.alyx.rest('sessions', 'list', id=self.uuid): content_type = 'session' + elif self.alyx.rest('insertions', 'list', id=self.uuid): + content_type = 'probeinsertion' else: - try: - _ = self.one.pid2uuid(self.uuid) - content_type = 'probeinsertion' - except HTTPError: - raise ValueError('Content type cannot be recognised from {uuid}. Specify on ' - 'initialistion e.g Note(uuid, one, content_type="subject"') - + raise ValueError(f'Content type cannot be recognised from {self.uuid}. ' + 'Specify on initialistion e.g Note(uuid, alyx, content_type="subject"') return content_type def describe(self): @@ -159,14 +195,21 @@ def numbered_descriptions(self): def upload_note(self, nums=None, other_reason=None, **kwargs): """ - Upload note to alyx. If no values for nums and other_reason are specified, user will receive a prompt in command line - asking them to choose from default list of reasons to add to note as well as option for free text. To upload without - receiving prompt a value for either nums or other_reason must be given + Upload note to Alyx. + + If no values for nums and other_reason are specified, user will receive a prompt in command + line asking them to choose from default list of reasons to add to note as well as option + for free text. To upload without receiving prompt a value for either `nums` or + `other_reason` must be given. + + Parameters + ---------- + nums : str + string of numbers matching those in default descriptions, e.g, '1,3'. Options can be + seen using note.describe(). + other_reason : str + Other comment or reason(s) to add to note. - :param nums: string of numbers matching those in default descrptions, e.g, '1,3'. Options can be see using note.describe() - :param other_reason: other comment or reasons to add to note (string) - :param kwargs: - :return: """ if nums is None and other_reason is None: @@ -188,30 +231,29 @@ def _upload_note(self, **kwargs): def _create_note(self, text): - data = {'user': self.one.alyx.user, + data = {'user': self.alyx.user, 'content_type': self.content_type, 'object_id': self.uuid, 'text': f'{text}'} - self.one.alyx.rest('notes', 'create', data=data) + self.alyx.rest('notes', 'create', data=data) def _update_note(self, note_id, text): - data = {'user': self.one.alyx.user, + data = {'user': self.alyx.user, 'content_type': self.content_type, 'object_id': self.uuid, 'text': f'{text}'} - self.one.alyx.rest('notes', 'partial_update', id=note_id, data=data) + self.alyx.rest('notes', 'partial_update', id=note_id, data=data) def _delete_note(self, note_id): - self.one.alyx.rest('notes', 'delete', id=note_id) + self.alyx.rest('notes', 'delete', id=note_id) def _delete_notes(self, notes): for note in notes: self._delete_note(note['id']) def _check_existing_note(self): - notes = self.one.alyx.rest('notes', 'list', django=f'text__icontains,{self.note_title},object_id,{self.uuid}', - no_cache=True) + notes = self.alyx.rest('notes', 'list', django=f'text__icontains,{self.note_title},object_id,{self.uuid}', no_cache=True) if len(notes) == 0: return False, None else: @@ -329,15 +371,21 @@ class CriticalInsertionNote(CriticalNote): """ Class for uploading a critical note to an insertion. - Example - ------- - note = CriticalInsertionNote(pid, one) - # print list of default reasons - note.describe() - # to receive a command line prompt to fill in note - note.upload_note() - # to upload note automatically without prompt - note.upload_note(nums='1,4', other_reason='lots of bad channels') + Examples + -------- + >>> note = CriticalInsertionNote(pid, AlyxClient()) + + Print list of default reasons + + >>> note.describe() + + To receive a command line prompt to fill in note + + >>> note.upload_note() + + To upload note automatically without prompt + + >>> note.upload_note(nums='1,4', other_reason='lots of bad channels') """ descriptions_gui = [ @@ -365,8 +413,8 @@ def extra_prompt(self): def note_title(self): return '=== EXPERIMENTER REASON(S) FOR MARKING THE INSERTION AS CRITICAL ===' - def __init__(self, uuid, one): - super(CriticalInsertionNote, self).__init__(uuid, one, content_type='probeinsertion') + def __init__(self, uuid, alyx): + super(CriticalInsertionNote, self).__init__(uuid, alyx, content_type='probeinsertion') class CriticalSessionNote(CriticalNote): @@ -375,13 +423,19 @@ class CriticalSessionNote(CriticalNote): Example ------- - note = CriticalInsertionNote(uuid, one) - # print list of default reasons - note.describe() - # to receive a command line prompt to fill in note - note.upload_note() - # to upload note automatically without prompt - note.upload_note(nums='1,4', other_reason='session with no ephys recording') + >>> note = CriticalInsertionNote(uuid, AlyxClient) + + Print list of default reasons + + >>> note.describe() + + To receive a command line prompt to fill in note + + >>> note.upload_note() + + To upload note automatically without prompt + + >>> note.upload_note(nums='1,4', other_reason='session with no ephys recording') """ descriptions = [ @@ -399,8 +453,8 @@ def extra_prompt(self): def note_title(self): return '=== EXPERIMENTER REASON(S) FOR MARKING THE SESSION AS CRITICAL ===' - def __init__(self, uuid, one): - super(CriticalSessionNote, self).__init__(uuid, one, content_type='session') + def __init__(self, uuid, alyx): + super(CriticalSessionNote, self).__init__(uuid, alyx, content_type='session') class SignOffNote(Note): @@ -417,11 +471,11 @@ def extra_prompt(self): def note_title(self): return f'=== SIGN-OFF NOTE FOR {self.sign_off_key} ===' - def __init__(self, uuid, one, sign_off_key): + def __init__(self, uuid, alyx, sign_off_key): self.sign_off_key = sign_off_key - super(SignOffNote, self).__init__(uuid, one, content_type='session') + super(SignOffNote, self).__init__(uuid, alyx, content_type='session') self.datetime_key = self.get_datetime_key() - self.session = one.alyx.rest('sessions', 'read', id=self.uuid, no_cache=True) + self.session = self.alyx.rest('sessions', 'read', id=self.uuid, no_cache=True) def upload_note(self, nums=None, other_reason=None, **kwargs): super(SignOffNote, self).upload_note(nums=nums, other_reason=other_reason, **kwargs) @@ -430,10 +484,17 @@ def upload_note(self, nums=None, other_reason=None, **kwargs): def sign_off(self): json = self.session['json'] - json['sign_off_checklist'][self.sign_off_key] = {'date': self.datetime_key.split('_')[0], - 'user': self.datetime_key.split('_')[1]} + sign_off_checklist = json.get('sign_off_checklist', None) + if sign_off_checklist is None: + sign_off_checklist = {self.sign_off_key: {'date': self.datetime_key.split('_')[0], + 'user': self.datetime_key.split('_')[1]}} + else: + sign_off_checklist[self.sign_off_key] = {'date': self.datetime_key.split('_')[0], + 'user': self.datetime_key.split('_')[1]} - self.one.alyx.json_field_update("sessions", self.uuid, 'json', data=json) + json['sign_off_checklist'] = sign_off_checklist + + self.alyx.json_field_update("sessions", self.uuid, 'json', data=json) def format_note(self, **kwargs): @@ -464,7 +525,10 @@ def update_existing_note(self, notes): self._update_note(notes[0]['id'], text) def get_datetime_key(self): - user = self.one.alyx.user + if not self.alyx.is_logged_in: + self.alyx.authenticate() + assert self.alyx.is_logged_in, 'you must be logged in to the AlyxClient' + user = self.alyx.user date = datetime.now().date().isoformat() return date + '_' + user @@ -474,17 +538,25 @@ class TaskSignOffNote(SignOffNote): """ Class for signing off a task part of a session and optionally adding a related explanation note. - Example - ------- - note = TaskSignOffNote(uuid, one, '_ephysChoiceWorld_00') - # to sign off session without any note - note.sign_off() - # print list of default reasons - note.describe() - # to upload note and sign off with prompt - note.upload_note() - # to upload note automatically without prompt - note.upload_note(nums='1,4', other_reason='session with no ephys recording') + Examples + -------- + >>> note = TaskSignOffNote(eid, AlyxClient(), '_ephysChoiceWorld_00') + + To sign off session without any note + + >>> note.sign_off() + + Print list of default reasons + + >>> note.describe() + + To upload note and sign off with prompt + + >>> note.upload_note() + + To upload note automatically without prompt + + >>> note.upload_note(nums='1,4', other_reason='session with no ephys recording') """ descriptions = [ @@ -499,17 +571,25 @@ class PassiveSignOffNote(SignOffNote): """ Class for signing off a passive part of a session and optionally adding a related explanation note. - Example - ------- - note = PassiveSignOffNote(uuid, one, '_passiveChoiceWorld_00') - # to sign off session without any note - note.sign_off() - # print list of default reasons - note.describe() - # to upload note and sign off with prompt - note.upload_note() - # to upload note automatically without prompt - note.upload_note(nums='1,4', other_reason='session with no ephys recording') + Examples + -------- + >>> note = PassiveSignOffNote(eid, AlyxClient(), '_passiveChoiceWorld_00') + + To sign off session without any note + + >>> note.sign_off() + + Print list of default reasons + + >>> note.describe() + + To upload note and sign off with prompt + + >>> note.upload_note() + + To upload note automatically without prompt + + >>> note.upload_note(nums='1,4', other_reason='session with no ephys recording') """ descriptions = [ @@ -526,17 +606,25 @@ class VideoSignOffNote(SignOffNote): """ Class for signing off a video part of a session and optionally adding a related explanation note. - Example - ------- - note = VideoSignOffNote(uuid, one, '_camera_left') - # to sign off session without any note - note.sign_off() - # print list of default reasons - note.describe() - # to upload note and sign off with prompt - note.upload_note() - # to upload note automatically without prompt - note.upload_note(nums='1,4', other_reason='session with no ephys recording') + Examples + -------- + >>> note = VideoSignOffNote(eid, AlyxClient(), '_camera_left') + + To sign off session without any note + + >>> note.sign_off() + + Print list of default reasons + + >>> note.describe() + + To upload note and sign off with prompt + + >>> note.upload_note() + + To upload note automatically without prompt + + >>> note.upload_note(nums='1,4', other_reason='session with no ephys recording') """ descriptions = [ @@ -554,17 +642,25 @@ class RawEphysSignOffNote(SignOffNote): """ Class for signing off a raw ephys part of a session and optionally adding a related explanation note. - Example - ------- - note = RawEphysSignOffNote(uuid, one, '_neuropixel_raw_probe00') - # to sign off session without any note - note.sign_off() - # print list of default reasons - note.describe() - # to upload note and sign off with prompt - note.upload_note() - # to upload note automatically without prompt - note.upload_note(nums='1,4', other_reason='session with no ephys recording') + Examples + -------- + >>> note = RawEphysSignOffNote(uuid, AlyxClient(), '_neuropixel_raw_probe00') + + To sign off session without any note + + >>> note.sign_off() + + Print list of default reasons + + >>> note.describe() + + To upload note and sign off with prompt + + >>> note.upload_note() + + To upload note automatically without prompt + + >>> note.upload_note(nums='1,4', other_reason='session with no ephys recording') """ descriptions = [ @@ -577,19 +673,27 @@ class RawEphysSignOffNote(SignOffNote): class SpikeSortingSignOffNote(SignOffNote): """ - Class for signing off a spikesorting part of a session and optionally adding a related explanation note. + Class for signing off a spike sorting part of a session and optionally adding a related explanation note. - Example - ------- - note = SpikeSortingSignOffNote(uuid, one, '_neuropixel_spike_sorting_probe00') - # to sign off session without any note - note.sign_off() - # print list of default reasons - note.describe() - # to upload note and sign off with prompt - note.upload_note() - # to upload note automatically without prompt - note.upload_note(nums='1,4', other_reason='session with no ephys recording') + Examples + -------- + >>> note = SpikeSortingSignOffNote(uuid, AlyxClient(), '_neuropixel_spike_sorting_probe00') + + To sign off session without any note + + >>> note.sign_off() + + Print list of default reasons + + >>> note.describe() + + To upload note and sign off with prompt + + >>> note.upload_note() + + To upload note automatically without prompt + + >>> note.upload_note(nums='1,4', other_reason='session with no ephys recording') """ descriptions = [ @@ -603,17 +707,25 @@ class AlignmentSignOffNote(SignOffNote): """ Class for signing off a alignment part of a session and optionally adding a related explanation note. - Example - ------- - note = AlignmentSignOffNote(uuid, one, '_neuropixel_alignment_probe00') - # to sign off session without any note - note.sign_off() - # print list of default reasons - note.describe() - # to upload note and sign off with prompt - note.upload_note() - # to upload note automatically without prompt - note.upload_note(nums='1,4', other_reason='session with no ephys recording') + Examples + -------- + >>> note = AlignmentSignOffNote(uuid, AlyxClient(), '_neuropixel_alignment_probe00') + + To sign off session without any note + + >>> note.sign_off() + + Print list of default reasons + + >>> note.describe() + + To upload note and sign off with prompt + + >>> note.upload_note() + + To upload note automatically without prompt + + >>> note.upload_note(nums='1,4', other_reason='session with no ephys recording') """ descriptions = [] diff --git a/ibllib/tests/qc/test_alignment_qc.py b/ibllib/tests/qc/test_alignment_qc.py index 55eddb68c..bb3e6a433 100644 --- a/ibllib/tests/qc/test_alignment_qc.py +++ b/ibllib/tests/qc/test_alignment_qc.py @@ -13,7 +13,7 @@ from neuropixel import trace_header from ibllib.tests import TEST_DB -from ibllib.atlas import AllenAtlas +from iblatlas.atlas import AllenAtlas from ibllib.pipes.misc import create_alyx_probe_insertions from ibllib.qc.alignment_qc import AlignmentQC from ibllib.pipes.histology import register_track, register_chronic_track diff --git a/ibllib/tests/qc/test_critical_reasons.py b/ibllib/tests/qc/test_critical_reasons.py index 9482710ff..1c6c44f00 100644 --- a/ibllib/tests/qc/test_critical_reasons.py +++ b/ibllib/tests/qc/test_critical_reasons.py @@ -170,7 +170,8 @@ def setUp(self) -> None: def test_sign_off(self): sess = one.alyx.rest('sessions', 'read', id=self.eid, no_cache=True) - note = usrpmt.TaskSignOffNote(self.eid, one, sign_off_key=self.sign_off_keys[0]) + with self.assertWarns(FutureWarning): + note = usrpmt.TaskSignOffNote(self.eid, one, sign_off_key=self.sign_off_keys[0]) note.sign_off() sess = one.alyx.rest('sessions', 'read', id=self.eid, no_cache=True) diff --git a/ibllib/tests/test_atlas.py b/ibllib/tests/test_atlas.py index f3d147c26..04273dc6e 100644 --- a/ibllib/tests/test_atlas.py +++ b/ibllib/tests/test_atlas.py @@ -2,9 +2,8 @@ import numpy as np import matplotlib.pyplot as plt - -from ibllib.atlas import (BrainCoordinates, cart2sph, sph2cart, Trajectory, - Insertion, ALLEN_CCF_LANDMARKS_MLAPDV_UM, AllenAtlas) +from iblatlas.atlas import cart2sph, sph2cart, ALLEN_CCF_LANDMARKS_MLAPDV_UM +from ibllib.atlas import (BrainCoordinates, Trajectory, Insertion, AllenAtlas) from ibllib.atlas.regions import BrainRegions, FranklinPaxinosRegions from ibllib.atlas.plots import prepare_lr_data, reorder_data from iblutil.numerical import ismember diff --git a/ibllib/tests/test_histology.py b/ibllib/tests/test_histology.py index acd26b5d2..dce24ed43 100644 --- a/ibllib/tests/test_histology.py +++ b/ibllib/tests/test_histology.py @@ -5,7 +5,7 @@ from ibllib.pipes import histology from ibllib.pipes.ephys_alignment import (EphysAlignment, TIP_SIZE_UM, _cumulative_distance) -import ibllib.atlas as atlas +import iblatlas.atlas as atlas # TODO Place this in setUpModule() brain_atlas = atlas.AllenAtlas(res_um=25) diff --git a/release_notes.md b/release_notes.md index 7e1552106..2f93201dc 100644 --- a/release_notes.md +++ b/release_notes.md @@ -1,3 +1,9 @@ +## Release Notes 2.26 + +### features +- Deprecate ibllib.atlas. Code now contained in package iblatlas + + ## Release Notes 2.25 ### features diff --git a/requirements.txt b/requirements.txt index ed29169af..2b16fae7d 100644 --- a/requirements.txt +++ b/requirements.txt @@ -30,3 +30,4 @@ ONE-api>=2.2 slidingRP>=1.0.0 # steinmetz lab refractory period metrics wfield==0.3.7 # widefield extractor frozen for now (2023/07/15) until Joao fixes latest version psychofit +iblatlas