Skip to content

Commit

Permalink
Merge branch 'release/2.6.0'
Browse files Browse the repository at this point in the history
  • Loading branch information
juhuntenburg committed Dec 8, 2021
2 parents b2fb64a + f290617 commit 0c01ef8
Show file tree
Hide file tree
Showing 12 changed files with 899 additions and 134 deletions.
336 changes: 329 additions & 7 deletions brainbox/behavior/dlc.py

Large diffs are not rendered by default.

12 changes: 7 additions & 5 deletions brainbox/io/one.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@

from ibllib.io import spikeglx
from ibllib.io.extractors.training_wheel import extract_wheel_moves, extract_first_movement_times
from ibllib.ephys.neuropixel import SITES_COORDINATES, TIP_SIZE_UM
from ibllib.ephys.neuropixel import SITES_COORDINATES, TIP_SIZE_UM, trace_header
from ibllib.atlas import atlas
from ibllib.atlas import AllenAtlas
from ibllib.pipes import histology
Expand Down Expand Up @@ -221,7 +221,7 @@ def _load_channels_locations_from_disk(eid, collection=None, one=None, revision=
return channels


def channel_locations_interpolation(channels_aligned, channels, brain_regions=None):
def channel_locations_interpolation(channels_aligned, channels=None, brain_regions=None):
"""
oftentimes the channel map for different spike sorters may be different so interpolate the alignment onto
if there is no spike sorting in the base folder, the alignment doesn't have the localCoordinates field
Expand All @@ -238,16 +238,18 @@ def channel_locations_interpolation(channels_aligned, channels, brain_regions=No
'x', 'y', 'z', 'acronym', 'atlas_id', 'axial_um', 'lateral_um'
:return: Bunch or dictionary of channels with brain coordinates keys
"""
NEUROPIXEL_VERSION = 1
h = trace_header(version=NEUROPIXEL_VERSION)
if channels is None:
channels = {'localCoordinates': np.c_[h['x'], h['y']]}
nch = channels['localCoordinates'].shape[0]
if set(['x', 'y', 'z']).issubset(set(channels_aligned.keys())):
channels_aligned = _channels_bunch2alf(channels_aligned)
if 'localCoordinates' in channels_aligned.keys():
aligned_depths = channels_aligned['localCoordinates'][:, 1]
else: # this is a edge case for a few spike sorting sessions
assert channels_aligned['mlapdv'].shape[0] == 384
NEUROPIXEL_VERSION = 1
from ibllib.ephys.neuropixel import trace_header
aligned_depths = trace_header(version=NEUROPIXEL_VERSION)['y']
aligned_depths = h['y']
depth_aligned, ind_aligned = np.unique(aligned_depths, return_index=True)
depths, ind, iinv = np.unique(channels['localCoordinates'][:, 1], return_index=True, return_inverse=True)
channels['mlapdv'] = np.zeros((nch, 3))
Expand Down
49 changes: 43 additions & 6 deletions ibllib/dsp/voltage.py
Original file line number Diff line number Diff line change
Expand Up @@ -109,6 +109,37 @@ def fk(x, si=.002, dx=1, vbounds=None, btype='highpass', ntr_pad=0, ntr_tap=None
return xf / gain


def car(x, collection=None, lagc=300, butter_kwargs=None):
"""
Applies common average referencing with optional automatic gain control
:param x: the input array to be filtered. dimension, the filtering is considering
axis=0: spatial dimension, axis=1 temporal dimension. (ntraces, ns)
:param collection:
:param lagc: window size for time domain automatic gain control (no agc otherwise)
:param butter_kwargs: filtering parameters: defaults: {'N': 3, 'Wn': 0.1, 'btype': 'highpass'}
:return:
"""
if butter_kwargs is None:
butter_kwargs = {'N': 3, 'Wn': 0.1, 'btype': 'highpass'}
if collection is not None:
xout = np.zeros_like(x)
for c in np.unique(collection):
sel = collection == c
xout[sel, :] = kfilt(x=x[sel, :], ntr_pad=0, ntr_tap=None, collection=None,
butter_kwargs=butter_kwargs)
return xout

# apply agc and keep the gain in handy
if not lagc:
xf = np.copy(x)
gain = 1
else:
xf, gain = agc(x, wl=lagc, si=1.0)
# apply CAR and then un-apply the gain
xf = xf - np.median(xf, axis=0)
return xf / gain


def kfilt(x, collection=None, ntr_pad=0, ntr_tap=None, lagc=300, butter_kwargs=None):
"""
Applies a butterworth filter on the 0-axis with tapering / padding
Expand Down Expand Up @@ -209,13 +240,19 @@ def destripe(x, fs, neuropixel_version=1, butter_kwargs=None, k_kwargs=None, cha
True: deduces the bad channels from the data provided
:param butter_kwargs: (optional, None) butterworth params, see the code for the defaults dict
:param k_kwargs: (optional, None) K-filter params, see the code for the defaults dict
can also be set to 'car', in which case the median accross channels will be subtracted
:return: x, filtered array
"""
if butter_kwargs is None:
butter_kwargs = {'N': 3, 'Wn': 300 / fs * 2, 'btype': 'highpass'}
if k_kwargs is None:
k_kwargs = {'ntr_pad': 60, 'ntr_tap': 0, 'lagc': 3000,
'butter_kwargs': {'N': 3, 'Wn': 0.01, 'btype': 'highpass'}}
spatial_fcn = lambda dat: kfilt(dat, **k_kwargs) # noqa
elif isinstance(k_kwargs, dict):
spatial_fcn = lambda dat: kfilt(dat, **k_kwargs) # noqa
else:
spatial_fcn = lambda dat: car(dat, lagc=int(0.1 * fs)) # noqa
h = neuropixel.trace_header(version=neuropixel_version)
if channel_labels is True:
channel_labels, _ = detect_bad_channels(x, fs)
Expand All @@ -231,9 +268,9 @@ def destripe(x, fs, neuropixel_version=1, butter_kwargs=None, k_kwargs=None, cha
if channel_labels is not None:
x = interpolate_bad_channels(x, channel_labels, h)
inside_brain = np.where(channel_labels != 3)[0]
x[inside_brain, :] = kfilt(x[inside_brain, :], **k_kwargs) # apply the k-filter
x[inside_brain, :] = spatial_fcn(x[inside_brain, :]) # apply the k-filter
else:
x = kfilt(x, **k_kwargs)
x = spatial_fcn(x)
return x


Expand All @@ -245,7 +282,7 @@ def decompress_destripe_cbin(sr_file, output_file=None, h=None, wrot=None, appen
Production version with optimized FFTs - requires pyfftw
:param sr: seismic reader object (spikeglx.Reader)
:param output_file: (optional, defaults to .bin extension of the compressed bin file)
:param h: (optional)
:param h: (optional) neuropixel trace header. Dictionary with key 'sample_shift'
:param wrot: (optional) whitening matrix [nc x nc] or amplitude scalar to apply to the output
:param append: (optional, False) for chronic recordings, append to end of file
:param nc_out: (optional, True) saves non selected channels (synchronisation trace) in output
Expand Down Expand Up @@ -273,7 +310,7 @@ def decompress_destripe_cbin(sr_file, output_file=None, h=None, wrot=None, appen
k_kwargs = {'ntr_pad': 60, 'ntr_tap': 0, 'lagc': 3000,
'butter_kwargs': {'N': 3, 'Wn': 0.01, 'btype': 'highpass'}}
h = neuropixel.trace_header(version=1) if h is None else h
ncv = h['x'].size # number of channels
ncv = h['sample_shift'].size # number of channels
output_file = sr.file_bin.with_suffix('.bin') if output_file is None else output_file
assert output_file != sr.file_bin
taper = np.r_[0, scipy.signal.windows.cosine((SAMPLES_TAPER - 1) * 2), 0]
Expand Down Expand Up @@ -504,7 +541,7 @@ def nxcor(x, ref):
xcor = channels_similarity(raw)
fscale, psd = scipy.signal.welch(raw * 1e6, fs=fs) # units; uV ** 2 / Hz

sos_hp = scipy.signal.butter(**{'N': 3, 'Wn': 1000 / fs / 2, 'btype': 'highpass'}, output='sos')
sos_hp = scipy.signal.butter(**{'N': 3, 'Wn': 300 / fs * 2, 'btype': 'highpass'}, output='sos')
hf = scipy.signal.sosfiltfilt(sos_hp, raw)
xcorf = channels_similarity(hf)

Expand All @@ -513,7 +550,7 @@ def nxcor(x, ref):
'rms_raw': rms(raw), # very similar to the rms avfter butterworth filter
'xcor_hf': detrend(xcor, 11),
'xcor_lf': xcorf - detrend(xcorf, 11) - 1,
'psd_hf': np.mean(psd[:, fscale > 12000], axis=-1),
'psd_hf': np.mean(psd[:, fscale > (fs / 2 * 0.8)], axis=-1), # 80% nyquists
})

# make recommendation
Expand Down
2 changes: 1 addition & 1 deletion ibllib/io/extractors/camera.py
Original file line number Diff line number Diff line change
Expand Up @@ -553,7 +553,7 @@ def groom_pin_state(gpio, audio, ts, tolerance=2., display=False, take='first',
downs = ts[high2low] - ts[high2low][0]
offsets = audio_times[1::2] - audio_times[1]
assigned = attribute_times(offsets, downs, tol=tolerance, take=take)
unassigned = np.setdiff1d(np.arange(onsets.size), assigned[assigned > -1])
unassigned = np.setdiff1d(np.arange(offsets.size), assigned[assigned > -1])
if unassigned.size > 0:
_logger.debug(f'{unassigned.size} audio TTL falls were not detected by the camera')
# Check that all pin state downticks could be attributed to an offset TTL
Expand Down
5 changes: 5 additions & 0 deletions ibllib/io/spikeglx.py
Original file line number Diff line number Diff line change
Expand Up @@ -478,6 +478,11 @@ def _geometry_from_meta(meta_data):
"""
cm = _map_channels_from_meta(meta_data)
major_version = _get_neuropixel_major_version_from_meta(meta_data)
if cm is None:
_logger.warning("Meta data doesn't have geometry (snsShankMap field), returning defaults")
th = neuropixel.trace_header(version=major_version)
th['flag'] = th['x'] * 0 + 1.
return th
th = cm.copy()
if major_version == 1:
# the spike sorting channel maps have a flipped version of the channel map
Expand Down
152 changes: 87 additions & 65 deletions ibllib/pipes/ephys_preprocessing.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@

import numpy as np
import pandas as pd

import one.alf.io as alfio

from ibllib.misc import check_nvidia_driver
Expand All @@ -24,6 +25,8 @@
from ibllib.qc.camera import run_all_qc as run_camera_qc
from ibllib.qc.dlc import DlcQC
from ibllib.dsp import rms
from ibllib.plots.figures import dlc_qc_plot
from ibllib.plots.snapshot import ReportSnapshot
from brainbox.behavior.dlc import likelihood_threshold, get_licks, get_pupil_diameter, get_smooth_pupil_diameter

_logger = logging.getLogger("ibllib")
Expand Down Expand Up @@ -871,85 +874,104 @@ class EphysPostDLC(tasks.Task):
('_ibl_rightCamera.times.npy', 'alf', True),
('_ibl_leftCamera.times.npy', 'alf', True),
('_ibl_bodyCamera.times.npy', 'alf', True)],
# More files are required for all panels of the DLC QC plot to function
'output_files': [('_ibl_leftCamera.features.pqt', 'alf', True),
('_ibl_rightCamera.features.pqt', 'alf', True),
('licks.times.npy', 'alf', True)]
}

def _run(self, overwrite=False, run_qc=True):
def _run(self, overwrite=False, run_qc=True, plot_qc=True):
# Check if output files exist locally
exist, output_files = self.assert_expected(self.signature['output_files'], silent=True)
if exist and not overwrite:
_logger.warning('EphysPostDLC outputs exist and overwrite=False, skipping.')
return output_files
if exist and overwrite:
_logger.warning('EphysPostDLC outputs exist and overwrite=True, overwriting existing outputs.')
# Find all available dlc traces and dlc times
dlc_files = list(Path(self.session_path).joinpath('alf').glob('_ibl_*Camera.dlc.*'))
for dlc_file in dlc_files:
_logger.debug(dlc_file)
output_files = []
combined_licks = []

for dlc_file in dlc_files:
# Catch unforeseen exceptions and move on to next cam
try:
cam = label_from_path(dlc_file)
# load dlc trace and camera times
dlc = pd.read_parquet(dlc_file)
dlc_thresh = likelihood_threshold(dlc, 0.9)
# try to load respective camera times
_logger.warning('EphysPostDLC outputs exist and overwrite=False, skipping computations of outputs.')
else:
if exist and overwrite:
_logger.warning('EphysPostDLC outputs exist and overwrite=True, overwriting existing outputs.')
# Find all available dlc traces and dlc times
dlc_files = list(Path(self.session_path).joinpath('alf').glob('_ibl_*Camera.dlc.*'))
for dlc_file in dlc_files:
_logger.debug(dlc_file)
output_files = []
combined_licks = []

for dlc_file in dlc_files:
# Catch unforeseen exceptions and move on to next cam
try:
dlc_t = np.load(next(Path(self.session_path).joinpath('alf').glob(f'_ibl_{cam}Camera.times.*npy')))
times = True
except StopIteration:
_logger.error(f'No camera.times found for {cam} camera. '
f'Computations using camera.times will be skipped')
self.status = -1
times = False

# These features are only computed from left and right cam
if cam in ('left', 'right'):
features = pd.DataFrame()
# If camera times are available, get the lick time stamps for combined array
if times:
_logger.info(f"Computing lick times for {cam} camera.")
combined_licks.append(get_licks(dlc_thresh, dlc_t))
cam = label_from_path(dlc_file)
# load dlc trace and camera times
dlc = pd.read_parquet(dlc_file)
dlc_thresh = likelihood_threshold(dlc, 0.9)
# try to load respective camera times
try:
dlc_t = np.load(next(Path(self.session_path).joinpath('alf').glob(f'_ibl_{cam}Camera.times.*npy')))
times = True
except StopIteration:
_logger.error(f'No camera.times found for {cam} camera. '
f'Computations using camera.times will be skipped')
self.status = -1
times = False

# These features are only computed from left and right cam
if cam in ('left', 'right'):
features = pd.DataFrame()
# If camera times are available, get the lick time stamps for combined array
if times:
_logger.info(f"Computing lick times for {cam} camera.")
combined_licks.append(get_licks(dlc_thresh, dlc_t))
else:
_logger.warning(f"Skipping lick times for {cam} camera as no camera.times available.")
# Compute pupil diameter, raw and smoothed
_logger.info(f"Computing raw pupil diameter for {cam} camera.")
features['pupilDiameter_raw'] = get_pupil_diameter(dlc_thresh)
_logger.info(f"Computing smooth pupil diameter for {cam} camera.")
features['pupilDiameter_smooth'] = get_smooth_pupil_diameter(features['pupilDiameter_raw'], cam)
# Safe to pqt
features_file = Path(self.session_path).joinpath('alf', f'_ibl_{cam}Camera.features.pqt')
features.to_parquet(features_file)
output_files.append(features_file)

# For all cams, compute DLC qc if times available
if times and run_qc:
# Setting download_data to False because at this point the data should be there
qc = DlcQC(self.session_path, side=cam, one=self.one, download_data=False)
qc.run(update=True)
else:
_logger.warning(f"Skipping lick times for {cam} camera as no camera.times available.")
# Compute pupil diameter, raw and smoothed
_logger.info(f"Computing raw pupil diameter for {cam} camera.")
features['pupilDiameter_raw'] = get_pupil_diameter(dlc_thresh)
_logger.info(f"Computing smooth pupil diameter for {cam} camera.")
features['pupilDiameter_smooth'] = get_smooth_pupil_diameter(features['pupilDiameter_raw'], cam)
# Safe to pqt
features_file = Path(self.session_path).joinpath('alf', f'_ibl_{cam}Camera.features.pqt')
features.to_parquet(features_file)
output_files.append(features_file)

# For all cams, compute DLC qc if times available
if times and run_qc:
# Setting download_data to False because at this point the data should be there
qc = DlcQC(self.session_path, side=cam, one=self.one, download_data=False)
qc.run(update=True)
else:
if not times:
_logger.warning(f"Skipping QC for {cam} camera as no camera.times available")
if not run_qc:
_logger.warning(f"Skipping QC for {cam} camera as run_qc=False")
if not times:
_logger.warning(f"Skipping QC for {cam} camera as no camera.times available")
if not run_qc:
_logger.warning(f"Skipping QC for {cam} camera as run_qc=False")

except BaseException:
_logger.error(traceback.format_exc())
self.status = -1
continue

# Combined lick times
if len(combined_licks) > 0:
lick_times_file = Path(self.session_path).joinpath('alf', 'licks.times.npy')
np.save(lick_times_file, sorted(np.concatenate(combined_licks)))
output_files.append(lick_times_file)
else:
_logger.warning("No lick times computed for this session.")

if plot_qc:
_logger.info("Creating DLC QC plot")
try:
session_id = self.one.path2eid(self.session_path)
fig_path = self.session_path.joinpath('snapshot', 'dlc_qc_plot.png')
if not fig_path.parent.exists():
fig_path.parent.mkdir(parents=True, exist_ok=True)
fig = dlc_qc_plot(self.one.path2eid(self.session_path), one=self.one)
fig.savefig(fig_path)
snp = ReportSnapshot(self.session_path, session_id, one=self.one)
snp.outputs = [fig_path]
snp.register_images(widths=['orig'],
function=str(dlc_qc_plot.__module__) + '.' + str(dlc_qc_plot.__name__))
except BaseException:
_logger.error('Could not create and/or upload DLC QC Plot')
_logger.error(traceback.format_exc())
self.status = -1
continue

# Combined lick times
if len(combined_licks) > 0:
lick_times_file = Path(self.session_path).joinpath('alf', 'licks.times.npy')
np.save(lick_times_file, sorted(np.concatenate(combined_licks)))
output_files.append(lick_times_file)
else:
_logger.warning("No lick times computed for this session.")

return output_files

Expand Down
10 changes: 5 additions & 5 deletions ibllib/pipes/histology.py
Original file line number Diff line number Diff line change
Expand Up @@ -347,11 +347,11 @@ def create_channel_dict(traj, brain_locations):
channel_dict = []
for i in np.arange(brain_locations.id.size):
channel_dict.append({
'x': brain_locations.xyz[i, 0] * 1e6,
'y': brain_locations.xyz[i, 1] * 1e6,
'z': brain_locations.xyz[i, 2] * 1e6,
'axial': brain_locations.axial[i],
'lateral': brain_locations.lateral[i],
'x': np.float64(brain_locations.xyz[i, 0] * 1e6),
'y': np.float64(brain_locations.xyz[i, 1] * 1e6),
'z': np.float64(brain_locations.xyz[i, 2] * 1e6),
'axial': np.float64(brain_locations.axial[i]),
'lateral': np.float64(brain_locations.lateral[i]),
'brain_region': int(brain_locations.id[i]),
'trajectory_estimate': traj['id']
})
Expand Down
Loading

0 comments on commit 0c01ef8

Please sign in to comment.