Skip to content

Commit

Permalink
Merge branch 'develop' into qc_viewer
Browse files Browse the repository at this point in the history
  • Loading branch information
k1o0 committed Oct 16, 2024
2 parents 5e54694 + 136978f commit a2a5d96
Show file tree
Hide file tree
Showing 27 changed files with 673 additions and 245 deletions.
2 changes: 1 addition & 1 deletion brainbox/metrics/single_units.py
Original file line number Diff line number Diff line change
Expand Up @@ -990,7 +990,7 @@ def quick_unit_metrics(spike_clusters, spike_times, spike_amps, spike_depths,
r.slidingRP_viol_forced[ir] = srp['value_forced']
r.max_confidence[ir] = srp['max_confidence']
r.min_contamination[ir] = srp['min_contamination']
r.n_spikes_below2 = srp['n_spikes_below2']
r.n_spikes_below2[ir] = srp['n_spikes_below2']

# loop over each cluster to compute the rest of the metrics
for ic in np.arange(nclust):
Expand Down
2 changes: 1 addition & 1 deletion ibllib/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
import logging
import warnings

__version__ = '2.38.0'
__version__ = '2.39.1'
warnings.filterwarnings('always', category=DeprecationWarning, module='ibllib')

# if this becomes a full-blown library we should let the logging configuration to the discretion of the dev
Expand Down
16 changes: 8 additions & 8 deletions ibllib/io/extractors/biased_trials.py
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,7 @@ class TrialsTableBiased(BaseBpodTrialsExtractor):
save_names = ('_ibl_trials.table.pqt', None, None, '_ibl_wheel.timestamps.npy', '_ibl_wheel.position.npy',
'_ibl_wheelMoves.intervals.npy', '_ibl_wheelMoves.peakAmplitude.npy', None, None)
var_names = ('table', 'stimOff_times', 'stimFreeze_times', 'wheel_timestamps', 'wheel_position', 'wheelMoves_intervals',
'wheelMoves_peakAmplitude', 'peakVelocity_times', 'is_final_movement')
'wheelMoves_peakAmplitude', 'wheelMoves_peakVelocity_times', 'is_final_movement')

def _extract(self, extractor_classes=None, **kwargs):
extractor_classes = extractor_classes or []
Expand Down Expand Up @@ -125,7 +125,7 @@ class TrialsTableEphys(BaseBpodTrialsExtractor):
'_ibl_wheelMoves.intervals.npy', '_ibl_wheelMoves.peakAmplitude.npy', None,
None, None, None, '_ibl_trials.quiescencePeriod.npy')
var_names = ('table', 'stimOff_times', 'stimFreeze_times', 'wheel_timestamps', 'wheel_position', 'wheelMoves_intervals',
'wheelMoves_peakAmplitude', 'peakVelocity_times', 'is_final_movement',
'wheelMoves_peakAmplitude', 'wheelMoves_peakVelocity_times', 'is_final_movement',
'phase', 'position', 'quiescence')

def _extract(self, extractor_classes=None, **kwargs):
Expand All @@ -152,12 +152,12 @@ class BiasedTrials(BaseBpodTrialsExtractor):
save_names = ('_ibl_trials.goCueTrigger_times.npy', '_ibl_trials.stimOnTrigger_times.npy', None,
'_ibl_trials.stimOffTrigger_times.npy', None, None, '_ibl_trials.table.pqt',
'_ibl_trials.stimOff_times.npy', None, '_ibl_wheel.timestamps.npy', '_ibl_wheel.position.npy',
'_ibl_wheelMoves.intervals.npy', '_ibl_wheelMoves.peakAmplitude.npy', None, None, '_ibl_trials.included.npy',
None, None, '_ibl_trials.quiescencePeriod.npy')
'_ibl_wheelMoves.intervals.npy', '_ibl_wheelMoves.peakAmplitude.npy', None, None,
'_ibl_trials.included.npy', None, None, '_ibl_trials.quiescencePeriod.npy')
var_names = ('goCueTrigger_times', 'stimOnTrigger_times', 'itiIn_times', 'stimOffTrigger_times', 'stimFreezeTrigger_times',
'errorCueTrigger_times', 'table', 'stimOff_times', 'stimFreeze_times', 'wheel_timestamps', 'wheel_position',
'wheelMoves_intervals', 'wheelMoves_peakAmplitude', 'peakVelocity_times', 'is_final_movement', 'included',
'phase', 'position', 'quiescence')
'wheelMoves_intervals', 'wheelMoves_peakAmplitude', 'wheelMoves_peakVelocity_times', 'is_final_movement',
'included', 'phase', 'position', 'quiescence')

def _extract(self, extractor_classes=None, **kwargs) -> dict:
extractor_classes = extractor_classes or []
Expand All @@ -182,8 +182,8 @@ class EphysTrials(BaseBpodTrialsExtractor):
'_ibl_trials.included.npy', None, None, '_ibl_trials.quiescencePeriod.npy')
var_names = ('goCueTrigger_times', 'stimOnTrigger_times', 'itiIn_times', 'stimOffTrigger_times', 'stimFreezeTrigger_times',
'errorCueTrigger_times', 'table', 'stimOff_times', 'stimFreeze_times', 'wheel_timestamps', 'wheel_position',
'wheelMoves_intervals', 'wheelMoves_peakAmplitude', 'peakVelocity_times', 'is_final_movement', 'included',
'phase', 'position', 'quiescence')
'wheelMoves_intervals', 'wheelMoves_peakAmplitude', 'wheelMoves_peakVelocity_times', 'is_final_movement',
'included', 'phase', 'position', 'quiescence')

def _extract(self, extractor_classes=None, **kwargs) -> dict:
extractor_classes = extractor_classes or []
Expand Down
4 changes: 2 additions & 2 deletions ibllib/io/extractors/ephys_fpga.py
Original file line number Diff line number Diff line change
Expand Up @@ -585,13 +585,13 @@ class FpgaTrials(extractors_base.BaseExtractor):
'_ibl_trials.stimOff_times.npy', None, None, None, '_ibl_trials.quiescencePeriod.npy',
'_ibl_trials.table.pqt', '_ibl_wheel.timestamps.npy',
'_ibl_wheel.position.npy', '_ibl_wheelMoves.intervals.npy',
'_ibl_wheelMoves.peakAmplitude.npy')
'_ibl_wheelMoves.peakAmplitude.npy', None)
var_names = ('goCueTrigger_times', 'stimOnTrigger_times',
'stimOffTrigger_times', 'stimFreezeTrigger_times', 'errorCueTrigger_times',
'errorCue_times', 'itiIn_times', 'stimFreeze_times', 'stimOff_times',
'valveOpen_times', 'phase', 'position', 'quiescence', 'table',
'wheel_timestamps', 'wheel_position',
'wheelMoves_intervals', 'wheelMoves_peakAmplitude')
'wheelMoves_intervals', 'wheelMoves_peakAmplitude', 'wheelMoves_peakVelocity_times')

bpod_rsync_fields = ('intervals', 'response_times', 'goCueTrigger_times',
'stimOnTrigger_times', 'stimOffTrigger_times',
Expand Down
2 changes: 1 addition & 1 deletion ibllib/io/extractors/mesoscope.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,8 +4,8 @@
import numpy as np
from scipy.signal import find_peaks
import one.alf.io as alfio
from one.util import ensure_list
from one.alf.files import session_path_parts
from iblutil.util import ensure_list
import matplotlib.pyplot as plt
from packaging import version

Expand Down
9 changes: 5 additions & 4 deletions ibllib/io/extractors/training_trials.py
Original file line number Diff line number Diff line change
Expand Up @@ -127,9 +127,10 @@ def get_trial_repeat(trial):
elif 'contrast' in trial and isinstance(trial['contrast'], dict):
return trial['contrast']['type'] == 'RepeatContrast'
else:
# For advanced choice world before version 8.19.0 there was no 'debias_trial' field
# For advanced choice world and its subclasses before version 8.19.0 there was no 'debias_trial' field
# and no debiasing protocol applied, so simply return False
assert self.settings['PYBPOD_PROTOCOL'].startswith('_iblrig_tasks_advancedChoiceWorld')
assert (self.settings['PYBPOD_PROTOCOL'].startswith('_iblrig_tasks_advancedChoiceWorld') or
self.settings['PYBPOD_PROTOCOL'].startswith('ccu_neuromodulatorChoiceWorld'))
return False

trial_repeated = np.fromiter(map(get_trial_repeat, self.bpod_trials), int)
Expand Down Expand Up @@ -708,7 +709,7 @@ class TrialsTable(BaseBpodTrialsExtractor):
save_names = ('_ibl_trials.table.pqt', None, None, '_ibl_wheel.timestamps.npy', '_ibl_wheel.position.npy',
'_ibl_wheelMoves.intervals.npy', '_ibl_wheelMoves.peakAmplitude.npy', None, None)
var_names = ('table', 'stimOff_times', 'stimFreeze_times', 'wheel_timestamps', 'wheel_position', 'wheelMoves_intervals',
'wheelMoves_peakAmplitude', 'peakVelocity_times', 'is_final_movement')
'wheelMoves_peakAmplitude', 'wheelMoves_peakVelocity_times', 'is_final_movement')

def _extract(self, extractor_classes=None, **kwargs):
base = [Intervals, GoCueTimes, ResponseTimes, Choice, StimOnOffFreezeTimes, ContrastLR, FeedbackTimes, FeedbackType,
Expand All @@ -731,7 +732,7 @@ class TrainingTrials(BaseBpodTrialsExtractor):
var_names = ('repNum', 'goCueTrigger_times', 'stimOnTrigger_times', 'itiIn_times', 'stimOffTrigger_times',
'stimFreezeTrigger_times', 'errorCueTrigger_times', 'table', 'stimOff_times', 'stimFreeze_times',
'wheel_timestamps', 'wheel_position', 'wheelMoves_intervals', 'wheelMoves_peakAmplitude',
'peakVelocity_times', 'is_final_movement', 'phase', 'position', 'quiescence', 'pause_duration')
'wheelMoves_peakVelocity_times', 'is_final_movement', 'phase', 'position', 'quiescence', 'pause_duration')

def _extract(self) -> dict:
base = [RepNum, GoCueTriggerTimes, StimOnTriggerTimes, ItiInTimes, StimOffTriggerTimes, StimFreezeTriggerTimes,
Expand Down
14 changes: 4 additions & 10 deletions ibllib/io/extractors/training_wheel.py
Original file line number Diff line number Diff line change
Expand Up @@ -330,11 +330,6 @@ def extract_first_movement_times(wheel_moves, trials, min_qt=None):
gap between quiescence end and cue start, or during the quiescence period but sub-
threshold). The movement is sufficiently large if it is greater than or equal to THRESH.
:param wheel_moves:
:param trials: dictionary of trial data
:param min_qt:
:return: numpy array of
Parameters
----------
wheel_moves : dict
Expand Down Expand Up @@ -407,9 +402,9 @@ class Wheel(BaseBpodTrialsExtractor):
save_names = ('_ibl_wheel.timestamps.npy', '_ibl_wheel.position.npy',
'_ibl_wheelMoves.intervals.npy', '_ibl_wheelMoves.peakAmplitude.npy', None,
'_ibl_trials.firstMovement_times.npy', None)
var_names = ('wheel_timestamps', 'wheel_position', 'wheelMoves_intervals',
'wheelMoves_peakAmplitude', 'peakVelocity_times', 'firstMovement_times',
'is_final_movement')
var_names = ('wheel_timestamps', 'wheel_position',
'wheelMoves_intervals', 'wheelMoves_peakAmplitude', 'wheelMoves_peakVelocity_times',
'firstMovement_times', 'is_final_movement')

def _extract(self):
ts, pos = get_wheel_position(self.session_path, self.bpod_trials, task_collection=self.task_collection)
Expand All @@ -425,6 +420,5 @@ def _extract(self):
min_qt = self.settings.get('QUIESCENT_PERIOD', None)

first_moves, is_final, _ = extract_first_movement_times(moves, trials, min_qt=min_qt)
output = (ts, pos, moves['intervals'], moves['peakAmplitude'],
moves['peakVelocity_times'], first_moves, is_final)
output = (ts, pos, moves['intervals'], moves['peakAmplitude'], moves['peakVelocity_times'], first_moves, is_final)
return output
9 changes: 8 additions & 1 deletion ibllib/io/extractors/video_motion.py
Original file line number Diff line number Diff line change
Expand Up @@ -442,7 +442,6 @@ def fix_keys(alf_object):
# Compute wheel velocity
self.wheel_vel, _ = wh.velocity_filtered(wheel_pos, 1000)
# Load in original camera times
self.camera_times = alfio.load_file_content(next(alf_path.rglob(f'_ibl_{self.label}Camera.times*.npy')))
self.camera_path = str(next(self.session_path.joinpath('raw_video_data').glob(f'_iblrig_{self.label}Camera.raw*.mp4')))
self.camera_meta = vidio.get_video_meta(self.camera_path)

Expand All @@ -461,17 +460,25 @@ def fix_keys(alf_object):
# Check if the ttl and video sizes match up
self.tdiff = self.ttls.size - self.camera_meta['length']

# Load in original camera times if available otherwise set to ttls
camera_times = next(alf_path.rglob(f'_ibl_{self.label}Camera.times*.npy'), None)
self.camera_times = alfio.load_file_content(camera_times) if camera_times else self.ttls

if self.tdiff < 0:
# In this case there are fewer ttls than camera frames. This is not ideal, for now we pad the ttls with
# nans but if this is too many we reject the wheel alignment based on the qc
self.ttl_times = self.ttls
self.times = np.r_[self.ttl_times, np.full((np.abs(self.tdiff)), np.nan)]
if self.camera_times.size != self.camera_meta['length']:
self.camera_times = np.r_[self.camera_times, np.full((np.abs(self.tdiff)), np.nan)]
self.short_flag = True
elif self.tdiff > 0:
# In this case there are more ttls than camera frames. This happens often, for now we remove the first
# tdiff ttls from the ttls
self.ttl_times = self.ttls[self.tdiff:]
self.times = self.ttls[self.tdiff:]
if self.camera_times.size != self.camera_meta['length']:
self.camera_times = self.camera_times[self.tdiff:]
self.short_flag = False

# Compute the frame rate of the camera
Expand Down
39 changes: 25 additions & 14 deletions ibllib/oneibl/data_handlers.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,10 +15,10 @@

from one.api import ONE
from one.webclient import AlyxClient
from one.util import filter_datasets, ensure_list
from one.util import filter_datasets
from one.alf.files import add_uuid_string, session_path_parts
from one.alf.cache import _make_datasets_df
from iblutil.util import flatten
from iblutil.util import flatten, ensure_list

from ibllib.oneibl.registration import register_dataset, get_lab, get_local_data_repository
from ibllib.oneibl.patcher import FTPPatcher, SDSCPatcher, SDSC_ROOT_PATH, SDSC_PATCH_PATH
Expand Down Expand Up @@ -79,9 +79,16 @@ def identifiers(self):
"""tuple: the identifying parts of the dataset.
If no operator is applied, the identifiers are (collection, revision, name).
If an operator is applied, the identifiers are two instances of an ExpectedDataset.
If an operator is applied, a tuple of 3-element tuples is returned.
"""
return self._identifiers if self.operator is None else tuple(x.identifiers for x in self._identifiers)
if self.operator is None:
return self._identifiers
# Flatten nested identifiers into tuple of 3-element tuples
identifiers = []
for x in self._identifiers:
add = identifiers.extend if x.operator else identifiers.append
add(x.identifiers)
return tuple(identifiers)

@property
def glob_pattern(self):
Expand Down Expand Up @@ -463,7 +470,6 @@ def dataset_from_name(name, datasets):
list of ExpectedDataset
The ExpectedDataset instances that match the given name.
TODO Add tests
"""
matches = []
for dataset in datasets:
Expand All @@ -475,7 +481,7 @@ def dataset_from_name(name, datasets):
return matches


def update_collections(dataset, new_collection, substring=None):
def update_collections(dataset, new_collection, substring=None, unique=None):
"""
Update the collection of a dataset.
Expand All @@ -496,23 +502,28 @@ def update_collections(dataset, new_collection, substring=None):
ExpectedDataset
A copy of the dataset with the updated collection(s).
TODO Add tests
"""
after = ensure_list(new_collection)
D = ExpectedDataset.input if isinstance(dataset, Input) else ExpectedDataset.output
if dataset.operator is None:
collection, revsion, name = dataset.identifiers
collection, revision, name = dataset.identifiers
if revision is not None:
raise NotImplementedError
if substring:
after = [collection.replace(substring, x) for x in after]
unique = not set(name).intersection('*[?')
after = [(collection or '').replace(substring, x) or None for x in after]
if unique is None:
unique = [not set(name + (x or '')).intersection('*[?') for x in after]
else:
unique = [unique] * len(after)
register = dataset.register
updated = D(name, after[0], not isinstance(dataset, OptionalDataset), register, unique=unique)
updated = D(name, after[0], not isinstance(dataset, OptionalDataset), register, unique=unique[0])
if len(after) > 1:
for folder in after[1:]:
updated &= D(name, folder, not isinstance(dataset, OptionalDataset), register, unique=unique)
for folder, unq in zip(after[1:], unique[1:]):
updated &= D(name, folder, not isinstance(dataset, OptionalDataset), register, unique=unq)
else:
updated = copy(dataset)
updated._identifiers = [update_collections(dd, new_collection) for dd in updated._identifiers]
updated._identifiers = [update_collections(dd, new_collection, substring, unique)
for dd in updated._identifiers]
return updated


Expand Down
2 changes: 1 addition & 1 deletion ibllib/oneibl/patcher.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,14 +33,14 @@

import globus_sdk
import iblutil.io.params as iopar
from iblutil.util import ensure_list
from one.alf.files import get_session_path, add_uuid_string
from one.alf.spec import is_uuid_string, is_uuid
from one import params
from one.webclient import AlyxClient
from one.converters import path_from_dataset
from one.remote import globus
from one.remote.aws import url2uri
from one.util import ensure_list

from ibllib.oneibl.registration import register_dataset

Expand Down
4 changes: 3 additions & 1 deletion ibllib/oneibl/registration.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,9 @@
from one.webclient import AlyxClient, no_cache
from one.converters import ConversionMixin
import one.alf.exceptions as alferr
from one.util import datasets2records, ensure_list
from one.api import ONE
from one.util import datasets2records
from iblutil.util import ensure_list

import ibllib
import ibllib.io.extractors.base
Expand Down Expand Up @@ -239,6 +240,7 @@ def register_session(self, ses_path, file_list=True, projects=None, procedures=N
task_protocols = task_data = settings = []
json_field = None
users = session_details['users']
n_trials, n_correct_trials = 0
else: # Get session info from task data
collections = ensure_list(collections)
# read meta data from the rig for the session from the task settings file
Expand Down
8 changes: 5 additions & 3 deletions ibllib/pipes/base_tasks.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,8 +4,7 @@

from packaging import version
from one.webclient import no_cache
from one.util import ensure_list
from iblutil.util import flatten
from iblutil.util import flatten, ensure_list
import matplotlib.image
from skimage.io import ImageCollection, imread

Expand Down Expand Up @@ -551,7 +550,7 @@ def register_snapshots(self, unlink=False, collection=None):
snapshot = self._save_as_png(snapshot_tif := snapshot)
if unlink:
snapshot_tif.unlink()
_logger.debug('Uploading "%s"...', snapshot.relative_to(self.session_path))
_logger.info('Uploading "%s"...', snapshot.relative_to(self.session_path))
if snapshot.with_suffix('.txt').exists():
with open(snapshot.with_suffix('.txt'), 'r') as txt_file:
note['text'] = txt_file.read().strip()
Expand All @@ -571,6 +570,9 @@ def register_snapshots(self, unlink=False, collection=None):

def _run(self, **kwargs):
self.rename_files(**kwargs)
if not self.output_files:
return []

# FIXME Can be done with Task.assert_expected_outputs
ok, out_files, missing = map(flatten, zip(*map(lambda x: x.find_files(self.session_path), self.output_files)))
if not ok:
Expand Down
6 changes: 3 additions & 3 deletions ibllib/pipes/local_server.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ def _get_volume_usage(vol, label=''):
return {f"{label}_{k}": d[k] for k in d}


def report_health(one):
def report_health(alyx):
"""
Get a few indicators and label the json field of the corresponding lab with them.
"""
Expand All @@ -66,10 +66,10 @@ def report_health(one):
status.update(_get_volume_usage('/mnt/s0/Data', 'raid'))
status.update(_get_volume_usage('/', 'system'))

data_repos = one.alyx.rest('data-repository', 'list', globus_endpoint_id=get_local_endpoint_id())
data_repos = alyx.rest('data-repository', 'list', globus_endpoint_id=get_local_endpoint_id())

for dr in data_repos:
one.alyx.json_field_update(endpoint='data-repository', uuid=dr['name'], field_name='json', data=status)
alyx.json_field_update(endpoint='data-repository', uuid=dr['name'], field_name='json', data=status)


def job_creator(root_path, one=None, dry=False, rerun=False):
Expand Down
Loading

0 comments on commit a2a5d96

Please sign in to comment.