From d99f58215675b81fc54c9c0547440b1e974f8d1b Mon Sep 17 00:00:00 2001 From: Pat Gunn Date: Fri, 9 Aug 2019 14:58:50 -0400 Subject: [PATCH 1/2] yapf_2019_08_09 --- caiman/base/rois.py | 484 +++++++++++++---------- caiman/base/traces.py | 8 +- caiman/behavior/behavior.py | 106 +++-- caiman/external/cell_magic_wand.py | 103 ++--- caiman/external/houghvst/demo_VST.py | 7 +- caiman/external/houghvst/estimation.py | 37 +- caiman/external/houghvst/gat.py | 13 +- caiman/external/houghvst/measures.py | 14 +- caiman/external/houghvst/plotting.py | 95 +++-- caiman/external/houghvst/regions.py | 16 +- caiman/external/houghvst/stats.py | 9 +- caiman/gui/caiman_gui.py | 17 +- caiman/tests/comparison/comparison.py | 260 ++++++------ caiman/tests/comparison/create_gt.py | 164 +++++--- caiman/tests/comparison_general.py | 197 +++++---- caiman/tests/comparison_humans_online.py | 189 +++++---- caiman/tests/test_deconvolution.py | 10 +- caiman/tests/test_demo.py | 23 +- caiman/tests/test_import.py | 1 + caiman/tests/test_onacid.py | 58 +-- caiman/tests/test_pre_processing.py | 4 +- caiman/tests/test_temporal.py | 9 +- caiman/tests/test_toydata.py | 33 +- caiman/utils/stats.py | 99 ++--- caimanmanager.py | 12 +- 25 files changed, 1086 insertions(+), 882 deletions(-) diff --git a/caiman/base/rois.py b/caiman/base/rois.py index 0e6a2a7bb..dcefc0b2d 100644 --- a/caiman/base/rois.py +++ b/caiman/base/rois.py @@ -1,6 +1,5 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- - """ Created on Thu Oct 22 13:22:26 2015 @@ -42,7 +41,8 @@ except: pass -def com(A:np.ndarray, d1:int, d2:int, d3:Optional[int]=None) -> np.array: + +def com(A: np.ndarray, d1: int, d2: int, d3: Optional[int] = None) -> np.array: """Calculation of the center of mass for spatial components Args: @@ -68,17 +68,28 @@ def com(A:np.ndarray, d1:int, d2:int, d3:Optional[int]=None) -> np.array: if d3 is None: Coor = np.matrix([np.outer(np.ones(d2), np.arange(d1)).ravel(), - np.outer(np.arange(d2), np.ones(d1)).ravel()], dtype=A.dtype) + np.outer(np.arange(d2), np.ones(d1)).ravel()], + dtype=A.dtype) else: Coor = np.matrix([ - np.outer(np.ones(d3), np.outer(np.ones(d2), np.arange(d1)).ravel()).ravel(), - np.outer(np.ones(d3), np.outer(np.arange(d2), np.ones(d1)).ravel()).ravel(), - np.outer(np.arange(d3), np.outer(np.ones(d2), np.ones(d1)).ravel()).ravel()], - dtype=A.dtype) + np.outer(np.ones(d3), + np.outer(np.ones(d2), np.arange(d1)).ravel()).ravel(), + np.outer(np.ones(d3), + np.outer(np.arange(d2), np.ones(d1)).ravel()).ravel(), + np.outer(np.arange(d3), + np.outer(np.ones(d2), np.ones(d1)).ravel()).ravel() + ], + dtype=A.dtype) cm = (Coor * A / A.sum(axis=0)).T return np.array(cm) -def extract_binary_masks_from_structural_channel(Y, min_area_size:int=30, min_hole_size:int=15, gSig:int=5, expand_method:str='closing', selem:np.array=np.ones((3, 3))) -> Tuple[np.ndarray, np.array]: + +def extract_binary_masks_from_structural_channel(Y, + min_area_size: int = 30, + min_hole_size: int = 15, + gSig: int = 5, + expand_method: str = 'closing', + selem: np.array = np.ones((3, 3))) -> Tuple[np.ndarray, np.array]: """Extract binary masks by using adaptive thresholding on a structural channel Args: @@ -113,8 +124,7 @@ def extract_binary_masks_from_structural_channel(Y, min_area_size:int=30, min_ho img = (img - np.min(img)) / (np.max(img) - np.min(img)) * 255. img = img.astype(np.uint8) - th = cv2.adaptiveThreshold(img, np.max( - img), cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, gSig, 0) + th = cv2.adaptiveThreshold(img, np.max(img), cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, gSig, 0) th = remove_small_holes(th > 0, min_size=min_hole_size) th = remove_small_objects(th, min_size=min_area_size) areas = label(th) @@ -132,33 +142,55 @@ def extract_binary_masks_from_structural_channel(Y, min_area_size:int=30, min_ho return A, mR + def mask_to_2d(mask): # todo todocument if mask.ndim > 2: _, d1, d2 = np.shape(mask) dims = d1, d2 - return scipy.sparse.coo_matrix(np.reshape(mask[:].transpose([1, 2, 0]), (np.prod(dims), -1,), order='F')) + return scipy.sparse.coo_matrix(np.reshape(mask[:].transpose([1, 2, 0]), ( + np.prod(dims), + -1, + ), order='F')) else: dims = np.shape(mask) - return scipy.sparse.coo_matrix(np.reshape(mask, (np.prod(dims), -1,), order='F')) + return scipy.sparse.coo_matrix(np.reshape(mask, ( + np.prod(dims), + -1, + ), order='F')) + def get_distance_from_A(masks_gt, masks_comp, min_dist=10) -> List: # todo todocument _, d1, d2 = np.shape(masks_gt) dims = d1, d2 - A_ben = scipy.sparse.csc_matrix(np.reshape( - masks_gt[:].transpose([1, 2, 0]), (np.prod(dims), -1,), order='F')) - A_cnmf = scipy.sparse.csc_matrix(np.reshape( - masks_comp[:].transpose([1, 2, 0]), (np.prod(dims), -1,), order='F')) + A_ben = scipy.sparse.csc_matrix(np.reshape(masks_gt[:].transpose([1, 2, 0]), ( + np.prod(dims), + -1, + ), order='F')) + A_cnmf = scipy.sparse.csc_matrix(np.reshape(masks_comp[:].transpose([1, 2, 0]), ( + np.prod(dims), + -1, + ), order='F')) cm_ben = [scipy.ndimage.center_of_mass(mm) for mm in masks_gt] cm_cnmf = [scipy.ndimage.center_of_mass(mm) for mm in masks_comp] return distance_masks([A_ben, A_cnmf], [cm_ben, cm_cnmf], min_dist) -def nf_match_neurons_in_binary_masks(masks_gt, masks_comp, thresh_cost=.7, min_dist=10, print_assignment=False, - plot_results=False, Cn=None, labels=['Session 1','Session 2'], cmap='gray', D=None, enclosed_thr=None): + +def nf_match_neurons_in_binary_masks(masks_gt, + masks_comp, + thresh_cost=.7, + min_dist=10, + print_assignment=False, + plot_results=False, + Cn=None, + labels=['Session 1', 'Session 2'], + cmap='gray', + D=None, + enclosed_thr=None): """ Match neurons expressed as binary masks. Uses Hungarian matching algorithm @@ -207,10 +239,14 @@ def nf_match_neurons_in_binary_masks(masks_gt, masks_comp, thresh_cost=.7, min_d dims = d1, d2 # transpose to have a sparse list of components, then reshaping it to have a 1D matrix red in the Fortran style - A_ben = scipy.sparse.csc_matrix(np.reshape( - masks_gt[:].transpose([1, 2, 0]), (np.prod(dims), -1,), order='F')) - A_cnmf = scipy.sparse.csc_matrix(np.reshape( - masks_comp[:].transpose([1, 2, 0]), (np.prod(dims), -1,), order='F')) + A_ben = scipy.sparse.csc_matrix(np.reshape(masks_gt[:].transpose([1, 2, 0]), ( + np.prod(dims), + -1, + ), order='F')) + A_cnmf = scipy.sparse.csc_matrix(np.reshape(masks_comp[:].transpose([1, 2, 0]), ( + np.prod(dims), + -1, + ), order='F')) # have the center of mass of each element of the two masks cm_ben = [scipy.ndimage.center_of_mass(mm) for mm in masks_gt] @@ -219,8 +255,7 @@ def nf_match_neurons_in_binary_masks(masks_gt, masks_comp, thresh_cost=.7, min_d if D is None: #% find distances and matches # find the distance between each masks - D = distance_masks([A_ben, A_cnmf], [cm_ben, cm_cnmf], - min_dist, enclosed_thr=enclosed_thr) + D = distance_masks([A_ben, A_cnmf], [cm_ben, cm_cnmf], min_dist, enclosed_thr=enclosed_thr) level = 0.98 else: level = .98 @@ -246,32 +281,26 @@ def nf_match_neurons_in_binary_masks(masks_gt, masks_comp, thresh_cost=.7, min_d idx_tp_ben = matches[0][idx_tp] # ground truth idx_tp_cnmf = matches[1][idx_tp] # algorithm - comp - idx_fn = np.setdiff1d( - list(range(np.shape(masks_gt)[0])), matches[0][idx_tp]) + idx_fn = np.setdiff1d(list(range(np.shape(masks_gt)[0])), matches[0][idx_tp]) - idx_fp = np.setdiff1d( - list(range(np.shape(masks_comp)[0])), matches[1][idx_tp]) + idx_fp = np.setdiff1d(list(range(np.shape(masks_comp)[0])), matches[1][idx_tp]) idx_fp_cnmf = idx_fp idx_tp_gt, idx_tp_comp, idx_fn_gt, idx_fp_comp = idx_tp_ben, idx_tp_cnmf, idx_fn, idx_fp_cnmf if plot_results: - try: # Plotting function + try: # Plotting function pl.rcParams['pdf.fonttype'] = 42 - font = {'family': 'Myriad Pro', - 'weight': 'regular', - 'size': 10} + font = {'family': 'Myriad Pro', 'weight': 'regular', 'size': 10} pl.rc('font', **font) lp, hp = np.nanpercentile(Cn, [5, 95]) ses_1 = mpatches.Patch(color='red', label=labels[0]) ses_2 = mpatches.Patch(color='white', label=labels[1]) pl.subplot(1, 2, 1) pl.imshow(Cn, vmin=lp, vmax=hp, cmap=cmap) - [pl.contour(norm_nrg(mm), levels=[level], colors='w', linewidths=1) - for mm in masks_comp[idx_tp_comp]] - [pl.contour(norm_nrg(mm), levels=[level], colors='r', - linewidths=1) for mm in masks_gt[idx_tp_gt]] + [pl.contour(norm_nrg(mm), levels=[level], colors='w', linewidths=1) for mm in masks_comp[idx_tp_comp]] + [pl.contour(norm_nrg(mm), levels=[level], colors='r', linewidths=1) for mm in masks_gt[idx_tp_gt]] if labels is None: pl.title('MATCHES') else: @@ -281,10 +310,8 @@ def nf_match_neurons_in_binary_masks(masks_gt, masks_comp, thresh_cost=.7, min_d pl.axis('off') pl.subplot(1, 2, 2) pl.imshow(Cn, vmin=lp, vmax=hp, cmap=cmap) - [pl.contour(norm_nrg(mm), levels=[level], colors='w', linewidths=1) - for mm in masks_comp[idx_fp_comp]] - [pl.contour(norm_nrg(mm), levels=[level], colors='r', - linewidths=1) for mm in masks_gt[idx_fn_gt]] + [pl.contour(norm_nrg(mm), levels=[level], colors='w', linewidths=1) for mm in masks_comp[idx_fp_comp]] + [pl.contour(norm_nrg(mm), levels=[level], colors='r', linewidths=1) for mm in masks_gt[idx_fn_gt]] if labels is None: pl.title('FALSE POSITIVE (w), FALSE NEGATIVE (r)') else: @@ -297,10 +324,23 @@ def nf_match_neurons_in_binary_masks(masks_gt, masks_comp, thresh_cost=.7, min_d logging.warning(e) return idx_tp_gt, idx_tp_comp, idx_fn_gt, idx_fp_comp, performance -def register_ROIs(A1, A2, dims, template1=None, template2=None, align_flag=True, - D=None, max_thr = 0, use_opt_flow = True, thresh_cost=.7, - max_dist=10, enclosed_thr=None, print_assignment=False, - plot_results=False, Cn=None, cmap='viridis'): + +def register_ROIs(A1, + A2, + dims, + template1=None, + template2=None, + align_flag=True, + D=None, + max_thr=0, + use_opt_flow=True, + thresh_cost=.7, + max_dist=10, + enclosed_thr=None, + print_assignment=False, + plot_results=False, + Cn=None, + cmap='viridis'): """ Register ROIs across different sessions using an intersection over union metric and the Hungarian algorithm for optimal matching @@ -376,10 +416,10 @@ def register_ROIs(A1, A2, dims, template1=None, template2=None, align_flag=True, """ -# if 'csc_matrix' not in str(type(A1)): -# A1 = scipy.sparse.csc_matrix(A1) -# if 'csc_matrix' not in str(type(A2)): -# A2 = scipy.sparse.csc_matrix(A2) + # if 'csc_matrix' not in str(type(A1)): + # A1 = scipy.sparse.csc_matrix(A1) + # if 'csc_matrix' not in str(type(A2)): + # A2 = scipy.sparse.csc_matrix(A2) if 'ndarray' not in str(type(A1)): A1 = A1.toarray() @@ -389,49 +429,43 @@ def register_ROIs(A1, A2, dims, template1=None, template2=None, align_flag=True, if template1 is None or template2 is None: align_flag = False - x_grid, y_grid = np.meshgrid(np.arange(0., dims[1]).astype( - np.float32), np.arange(0., dims[0]).astype(np.float32)) + x_grid, y_grid = np.meshgrid(np.arange(0., dims[1]).astype(np.float32), np.arange(0., dims[0]).astype(np.float32)) - if align_flag: # first align ROIs from session 2 to the template from session 1 + if align_flag: # first align ROIs from session 2 to the template from session 1 template1 -= template1.min() template1 /= template1.max() template2 -= template2.min() template2 /= template2.max() if use_opt_flow: - template1_norm = np.uint8(template1*(template1 > 0)*255) - template2_norm = np.uint8(template2*(template2 > 0)*255) - flow = cv2.calcOpticalFlowFarneback(np.uint8(template1_norm*255), - np.uint8(template2_norm*255), - None,0.5,3,128,3,7,1.5,0) - x_remap = (flow[:,:,0] + x_grid).astype(np.float32) - y_remap = (flow[:,:,1] + y_grid).astype(np.float32) + template1_norm = np.uint8(template1 * (template1 > 0) * 255) + template2_norm = np.uint8(template2 * (template2 > 0) * 255) + flow = cv2.calcOpticalFlowFarneback(np.uint8(template1_norm * 255), np.uint8(template2_norm * 255), None, + 0.5, 3, 128, 3, 7, 1.5, 0) + x_remap = (flow[:, :, 0] + x_grid).astype(np.float32) + y_remap = (flow[:, :, 1] + y_grid).astype(np.float32) else: - template2, shifts, _, xy_grid = tile_and_correct(template2, template1 - template1.min(), - [int( - dims[0] / 4), int(dims[1] / 4)], [16, 16], [10, 10], - add_to_movie=template2.min(), shifts_opencv=True) + template2, shifts, _, xy_grid = tile_and_correct(template2, + template1 - template1.min(), + [int(dims[0] / 4), int(dims[1] / 4)], [16, 16], [10, 10], + add_to_movie=template2.min(), + shifts_opencv=True) - dims_grid = tuple(np.max(np.stack(xy_grid, axis=0), axis=0) - - np.min(np.stack(xy_grid, axis=0), axis=0) + 1) + dims_grid = tuple(np.max(np.stack(xy_grid, axis=0), axis=0) - np.min(np.stack(xy_grid, axis=0), axis=0) + 1) _sh_ = np.stack(shifts, axis=0) - shifts_x = np.reshape(_sh_[:, 1], dims_grid, - order='C').astype(np.float32) - shifts_y = np.reshape(_sh_[:, 0], dims_grid, - order='C').astype(np.float32) + shifts_x = np.reshape(_sh_[:, 1], dims_grid, order='C').astype(np.float32) + shifts_y = np.reshape(_sh_[:, 0], dims_grid, order='C').astype(np.float32) x_remap = (-np.resize(shifts_x, dims) + x_grid).astype(np.float32) y_remap = (-np.resize(shifts_y, dims) + y_grid).astype(np.float32) A_2t = np.reshape(A2, dims + (-1,), order='F').transpose(2, 0, 1) - A2 = np.stack([cv2.remap(img.astype(np.float32), x_remap, - y_remap, cv2.INTER_NEAREST) for img in A_2t], axis=0) - A2 = np.reshape(A2.transpose(1, 2, 0), - (A1.shape[0], A_2t.shape[0]), order='F') + A2 = np.stack([cv2.remap(img.astype(np.float32), x_remap, y_remap, cv2.INTER_NEAREST) for img in A_2t], axis=0) + A2 = np.reshape(A2.transpose(1, 2, 0), (A1.shape[0], A_2t.shape[0]), order='F') - A1 = np.stack([a*(a>max_thr*a.max()) for a in A1.T]).T - A2 = np.stack([a*(a>max_thr*a.max()) for a in A2.T]).T + A1 = np.stack([a * (a > max_thr * a.max()) for a in A1.T]).T + A2 = np.stack([a * (a > max_thr * a.max()) for a in A2.T]).T if D is None: if 'csc_matrix' not in str(type(A1)): @@ -443,8 +477,7 @@ def register_ROIs(A1, A2, dims, template1=None, template2=None, align_flag=True, cm_2 = com(A2, dims[0], dims[1]) A1_tr = (A1 > 0).astype(float) A2_tr = (A2 > 0).astype(float) - D = distance_masks([A1_tr, A2_tr], [cm_1, cm_2], - max_dist, enclosed_thr=enclosed_thr) + D = distance_masks([A1_tr, A2_tr], [cm_1, cm_2], max_dist, enclosed_thr=enclosed_thr) matches, costs = find_matches(D, print_assignment=print_assignment) matches = matches[0] @@ -454,12 +487,10 @@ def register_ROIs(A1, A2, dims, template1=None, template2=None, align_flag=True, idx_tp = np.where(np.array(costs) < thresh_cost)[0] if len(idx_tp) > 0: - matched_ROIs1 = matches[0][idx_tp] # ground truth - matched_ROIs2 = matches[1][idx_tp] # algorithm - comp - non_matched1 = np.setdiff1d( - list(range(D[0].shape[0])), matches[0][idx_tp]) - non_matched2 = np.setdiff1d( - list(range(D[0].shape[1])), matches[1][idx_tp]) + matched_ROIs1 = matches[0][idx_tp] # ground truth + matched_ROIs2 = matches[1][idx_tp] # algorithm - comp + non_matched1 = np.setdiff1d(list(range(D[0].shape[0])), matches[0][idx_tp]) + non_matched2 = np.setdiff1d(list(range(D[0].shape[1])), matches[1][idx_tp]) TP = np.sum(np.array(costs) < thresh_cost) * 1. else: TP = 0. @@ -491,43 +522,44 @@ def register_ROIs(A1, A2, dims, template1=None, template2=None, align_flag=True, else: Cn = np.reshape(A1.sum(1) + A2.sum(1), dims, order='F') - masks_1 = np.reshape(A1.toarray(), dims + (-1,), - order='F').transpose(2, 0, 1) - masks_2 = np.reshape(A2.toarray(), dims + (-1,), - order='F').transpose(2, 0, 1) -# try : #Plotting function + masks_1 = np.reshape(A1.toarray(), dims + (-1,), order='F').transpose(2, 0, 1) + masks_2 = np.reshape(A2.toarray(), dims + (-1,), order='F').transpose(2, 0, 1) + # try : #Plotting function level = 0.98 pl.rcParams['pdf.fonttype'] = 42 - font = {'family': 'Myriad Pro', - 'weight': 'regular', - 'size': 10} + font = {'family': 'Myriad Pro', 'weight': 'regular', 'size': 10} pl.rc('font', **font) lp, hp = np.nanpercentile(Cn, [5, 95]) pl.subplot(1, 2, 1) pl.imshow(Cn, vmin=lp, vmax=hp, cmap=cmap) - [pl.contour(norm_nrg(mm), levels=[level], colors='w', linewidths=1) - for mm in masks_1[matched_ROIs1]] - [pl.contour(norm_nrg(mm), levels=[level], colors='r', linewidths=1) - for mm in masks_2[matched_ROIs2]] + [pl.contour(norm_nrg(mm), levels=[level], colors='w', linewidths=1) for mm in masks_1[matched_ROIs1]] + [pl.contour(norm_nrg(mm), levels=[level], colors='r', linewidths=1) for mm in masks_2[matched_ROIs2]] pl.title('Matches') pl.axis('off') pl.subplot(1, 2, 2) pl.imshow(Cn, vmin=lp, vmax=hp, cmap=cmap) - [pl.contour(norm_nrg(mm), levels=[level], colors='w', linewidths=1) - for mm in masks_1[non_matched1]] - [pl.contour(norm_nrg(mm), levels=[level], colors='r', linewidths=1) - for mm in masks_2[non_matched2]] + [pl.contour(norm_nrg(mm), levels=[level], colors='w', linewidths=1) for mm in masks_1[non_matched1]] + [pl.contour(norm_nrg(mm), levels=[level], colors='r', linewidths=1) for mm in masks_2[non_matched2]] pl.title('Mismatches') pl.axis('off') + + # except Exception as e: # logging.warning("not able to plot precision recall usually because we are on travis") # logging.warning(e) return matched_ROIs1, matched_ROIs2, non_matched1, non_matched2, performance, A2 -def register_multisession(A, dims, templates = [None], align_flag=True, - max_thr = 0, use_opt_flow = True, thresh_cost=.7, - max_dist=10, enclosed_thr=None): + +def register_multisession(A, + dims, + templates=[None], + align_flag=True, + max_thr=0, + use_opt_flow=True, + thresh_cost=.7, + max_dist=10, + enclosed_thr=None): """ Register ROIs across multiple sessions using an intersection over union metric and the Hungarian algorithm for optimal matching. Registration occurs by @@ -580,7 +612,7 @@ def register_multisession(A, dims, templates = [None], align_flag=True, n_sessions = len(A) templates = list(templates) if len(templates) == 1: - templates = n_sessions*templates + templates = n_sessions * templates if n_sessions <= 1: raise Exception('number of sessions must be greater than 1') @@ -591,31 +623,37 @@ def register_multisession(A, dims, templates = [None], align_flag=True, matchings = [] matchings.append(list(range(A_union.shape[-1]))) - for sess in range(1,n_sessions): - reg_results = register_ROIs(A[sess], A_union, dims, - template1 = templates[sess], - template2 = templates[sess-1], align_flag=True, - max_thr = max_thr, use_opt_flow = use_opt_flow, - thresh_cost = thresh_cost, max_dist = max_dist, - enclosed_thr = enclosed_thr) + for sess in range(1, n_sessions): + reg_results = register_ROIs(A[sess], + A_union, + dims, + template1=templates[sess], + template2=templates[sess - 1], + align_flag=True, + max_thr=max_thr, + use_opt_flow=use_opt_flow, + thresh_cost=thresh_cost, + max_dist=max_dist, + enclosed_thr=enclosed_thr) mat_sess, mat_un, nm_sess, nm_un, _, A2 = reg_results logging.info(len(mat_sess)) A_union = A2.copy() - A_union[:,mat_un] = A[sess][:,mat_sess] - A_union = np.concatenate((A_union.toarray(), A[sess][:,nm_sess]), axis = 1) - new_match = np.zeros(A[sess].shape[-1], dtype = int) + A_union[:, mat_un] = A[sess][:, mat_sess] + A_union = np.concatenate((A_union.toarray(), A[sess][:, nm_sess]), axis=1) + new_match = np.zeros(A[sess].shape[-1], dtype=int) new_match[mat_sess] = mat_un - new_match[nm_sess] = range(A2.shape[-1],A_union.shape[-1]) + new_match[nm_sess] = range(A2.shape[-1], A_union.shape[-1]) matchings.append(new_match.tolist()) - assignments = np.empty((A_union.shape[-1], n_sessions))*np.nan + assignments = np.empty((A_union.shape[-1], n_sessions)) * np.nan for sess in range(n_sessions): - assignments[matchings[sess],sess] = range(len(matchings[sess])) + assignments[matchings[sess], sess] = range(len(matchings[sess])) return A_union, assignments, matchings -def extract_active_components(assignments, indices, only = True): + +def extract_active_components(assignments, indices, only=True): """ Computes the indices of components that were active in a specified set of sessions. @@ -639,16 +677,16 @@ def extract_active_components(assignments, indices, only = True): """ - components = np.where(np.isnan(assignments[:,indices]).sum(-1) == 0)[0] + components = np.where(np.isnan(assignments[:, indices]).sum(-1) == 0)[0] if only: not_inds = list(np.setdiff1d(range(assignments.shape[-1]), indices)) - not_comps = np.where(np.isnan(assignments[:,not_inds]).sum(-1) == - len(not_inds))[0] + not_comps = np.where(np.isnan(assignments[:, not_inds]).sum(-1) == len(not_inds))[0] components = np.intersect1d(components, not_comps) return components + def norm_nrg(a_): a = a_.copy() @@ -661,7 +699,8 @@ def norm_nrg(a_): a[indx] = cumEn return a.reshape(dims, order='F') -def distance_masks(M_s:List, cm_s:List[List], max_dist:float, enclosed_thr:Optional[float]=None) -> List: + +def distance_masks(M_s: List, cm_s: List[List], max_dist: float, enclosed_thr: Optional[float] = None) -> List: """ Compute distance matrix based on an intersection over union metric. Matrix are compared in order, with matrix i compared with matrix i+1 @@ -710,19 +749,18 @@ def distance_masks(M_s:List, cm_s:List[List], max_dist:float, enclosed_thr:Optio # for each components of gt k = gt_comp[:, np.repeat(i, nb_test)] + test_comp # k is correlation matrix of this neuron to every other of the test - for j in range(nb_test): # for each components on the tests + for j in range(nb_test): # for each components on the tests dist = np.linalg.norm(cmgt_comp[i] - cmtest_comp[j]) - # we compute the distance of this one to the other ones + # we compute the distance of this one to the other ones if dist < max_dist: - # union matrix of the i-th neuron to the jth one + # union matrix of the i-th neuron to the jth one union = k[:, j].sum() - # we could have used OR for union and AND for intersection while converting - # the matrice into real boolean before + # we could have used OR for union and AND for intersection while converting + # the matrice into real boolean before # product of the two elements' matrices # we multiply the boolean values from the jth omponent to the ith - intersection = np.array(gt_comp[:, i].T.dot( - test_comp[:, j]).todense()).squeeze() + intersection = np.array(gt_comp[:, i].T.dot(test_comp[:, j]).todense()).squeeze() # if we don't have even a union this is pointless if union > 0: @@ -745,7 +783,8 @@ def distance_masks(M_s:List, cm_s:List[List], max_dist:float, enclosed_thr:Optio D_s.append(D) return D_s -def find_matches(D_s, print_assignment:bool=False) -> Tuple[List, List]: + +def find_matches(D_s, print_assignment: bool = False) -> Tuple[List, List]: # todo todocument matches = [] @@ -770,14 +809,17 @@ def find_matches(D_s, print_assignment:bool=False) -> Tuple[List, List]: if print_assignment: logging.debug(('(%d, %d) -> %f' % (row, column, value))) total.append(value) - logging.debug(('FOV: %d, shape: %d,%d total cost: %f' % - (ii, DD.shape[0], DD.shape[1], np.sum(total)))) + logging.debug(('FOV: %d, shape: %d,%d total cost: %f' % (ii, DD.shape[0], DD.shape[1], np.sum(total)))) logging.debug((time.time() - t_start)) costs.append(total) # send back the results in the format we want return matches, costs -def link_neurons(matches:List[List[Tuple]], costs:List[List], max_cost:float=0.6, min_FOV_present:Optional[int]=None): + +def link_neurons(matches: List[List[Tuple]], + costs: List[List], + max_cost: float = 0.6, + min_FOV_present: Optional[int] = None): """ Link neurons from different FOVs given matches and costs obtained from the hungarian algorithm @@ -826,7 +868,8 @@ def link_neurons(matches:List[List[Tuple]], costs:List[List], max_cost:float=0.6 logging.info(('num_neurons:' + str(num_neurons))) return neurons -def nf_load_masks(file_name:str, dims:Tuple[int, ...]) -> np.array: + +def nf_load_masks(file_name: str, dims: Tuple[int, ...]) -> np.array: # todo todocument # load the regions (training data only) @@ -841,7 +884,8 @@ def tomask(coords): masks = np.array([tomask(s['coordinates']) for s in regions]) return masks -def nf_masks_to_json(binary_masks:np.ndarray, json_filename:str) -> List[Dict]: + +def nf_masks_to_json(binary_masks: np.ndarray, json_filename: str) -> List[Dict]: """ Take as input a tensor of binary mask and produces json format for neurofinder @@ -865,7 +909,8 @@ def nf_masks_to_json(binary_masks:np.ndarray, json_filename:str) -> List[Dict]: return regions -def nf_masks_to_neurof_dict(binary_masks:np.ndarray, dataset_name:str) -> Dict[str, Any]: + +def nf_masks_to_neurof_dict(binary_masks: np.ndarray, dataset_name: str) -> Dict[str, Any]: """ Take as input a tensor of binary mask and produces dict format for neurofinder @@ -887,6 +932,7 @@ def nf_masks_to_neurof_dict(binary_masks:np.ndarray, dataset_name:str) -> Dict[s return dset + def nf_read_roi(fileobj) -> np.ndarray: ''' points = read_roi(fileobj) @@ -894,9 +940,9 @@ def nf_read_roi(fileobj) -> np.ndarray: Adapted from https://gist.github.com/luispedro/3437255 ''' -# This is based on: -# http://rsbweb.nih.gov/ij/developer/source/ij/io/RoiDecoder.java.html -# http://rsbweb.nih.gov/ij/developer/source/ij/io/RoiEncoder.java.html + # This is based on: + # http://rsbweb.nih.gov/ij/developer/source/ij/io/RoiDecoder.java.html + # http://rsbweb.nih.gov/ij/developer/source/ij/io/RoiEncoder.java.html # TODO: Use an enum #SPLINE_FIT = 1 @@ -944,12 +990,12 @@ def getfloat(): # Discard second Byte: get8() -# if not (0 <= roi_type < 11): -# logging.error(('roireader: ROI type %s not supported' % roi_type)) -# -# if roi_type != 7: -# -# logging.error(('roireader: ROI type %s not supported (!= 7)' % roi_type)) + # if not (0 <= roi_type < 11): + # logging.error(('roireader: ROI type %s not supported' % roi_type)) + # + # if roi_type != 7: + # + # logging.error(('roireader: ROI type %s not supported (!= 7)' % roi_type)) top = get16() left = get16() @@ -967,8 +1013,7 @@ def getfloat(): fill_color = get32() subtype = get16() if subtype != 0: - raise ValueError( - 'roireader: ROI subtype %s not supported (!= 0)' % subtype) + raise ValueError('roireader: ROI subtype %s not supported (!= 0)' % subtype) options = get16() arrow_style = get8() arrow_head_size = get8() @@ -990,13 +1035,13 @@ def getfloat(): return points -def nf_read_roi_zip(fname:str, dims:Tuple[int, ...], return_names=False) -> np.array: + +def nf_read_roi_zip(fname: str, dims: Tuple[int, ...], return_names=False) -> np.array: # todo todocument with zipfile.ZipFile(fname) as zf: names = zf.namelist() - coords = [nf_read_roi(zf.open(n)) - for n in names] + coords = [nf_read_roi(zf.open(n)) for n in names] def tomask(coords): mask = np.zeros(dims) @@ -1012,7 +1057,8 @@ def tomask(coords): else: return masks -def nf_merge_roi_zip(fnames:List[str], idx_to_keep:List[List], new_fold:str): + +def nf_merge_roi_zip(fnames: List[str], idx_to_keep: List[List], new_fold: str): """ Create a zip file containing ROIs for ImageJ by combining elements from a list of ROI zip files @@ -1038,8 +1084,7 @@ def nf_merge_roi_zip(fnames:List[str], idx_to_keep:List[List], new_fold:str): logging.debug(len(name_rois)) zip_ref = zipfile.ZipFile(fn, 'r') zip_ref.extractall(dirpath) - files_to_keep.append([os.path.join(dirpath, ff) - for ff in np.array(name_rois)[idx]]) + files_to_keep.append([os.path.join(dirpath, ff) for ff in np.array(name_rois)[idx]]) zip_ref.close() os.makedirs(new_fold) @@ -1049,8 +1094,14 @@ def nf_merge_roi_zip(fnames:List[str], idx_to_keep:List[List], new_fold:str): shutil.make_archive(new_fold, 'zip', new_fold) shutil.rmtree(new_fold) -def extract_binary_masks_blob(A, neuron_radius:float, dims:Tuple[int, ...], num_std_threshold:int=1, minCircularity:float=0.5, - minInertiaRatio:float=0.2, minConvexity:float=.8) -> Tuple[np.array, np.array, np.array]: + +def extract_binary_masks_blob(A, + neuron_radius: float, + dims: Tuple[int, ...], + num_std_threshold: int = 1, + minCircularity: float = 0.5, + minInertiaRatio: float = 0.2, + minConvexity: float = .8) -> Tuple[np.array, np.array, np.array]: """ Function to extract masks from data. It will also perform a preliminary selectino of good masks based on criteria like shape and size @@ -1147,17 +1198,21 @@ def extract_binary_masks_blob(A, neuron_radius:float, dims:Tuple[int, ...], num_ return np.array(masks_ws), np.array(pos_examples), np.array(neg_examples) -def extract_binary_masks_blob_parallel(A, neuron_radius, dims, num_std_threshold=1, minCircularity=0.5, - minInertiaRatio=0.2, minConvexity=.8, dview=None) -> Tuple[List, List, List]: +def extract_binary_masks_blob_parallel(A, + neuron_radius, + dims, + num_std_threshold=1, + minCircularity=0.5, + minInertiaRatio=0.2, + minConvexity=.8, + dview=None) -> Tuple[List, List, List]: # todo todocument pars = [] for a in range(A.shape[-1]): - pars.append([A[:, a], neuron_radius, dims, num_std_threshold, - minCircularity, minInertiaRatio, minConvexity]) + pars.append([A[:, a], neuron_radius, dims, num_std_threshold, minCircularity, minInertiaRatio, minConvexity]) if dview is not None: - res = dview.map_sync( - extract_binary_masks_blob_parallel_place_holder, pars) + res = dview.map_sync(extract_binary_masks_blob_parallel_place_holder, pars) else: res = list(map(extract_binary_masks_blob_parallel_place_holder, pars)) @@ -1173,16 +1228,20 @@ def extract_binary_masks_blob_parallel(A, neuron_radius, dims, num_std_threshold return masks, is_pos, is_neg -def extract_binary_masks_blob_parallel_place_holder(pars:Tuple) -> Tuple[Any, Any, Any]: +def extract_binary_masks_blob_parallel_place_holder(pars: Tuple) -> Tuple[Any, Any, Any]: A, neuron_radius, dims, num_std_threshold, _, minInertiaRatio, minConvexity = pars - masks_ws, pos_examples, neg_examples = extract_binary_masks_blob(A, neuron_radius, - dims, num_std_threshold=num_std_threshold, minCircularity=0.5, - minInertiaRatio=minInertiaRatio, minConvexity=minConvexity) + masks_ws, pos_examples, neg_examples = extract_binary_masks_blob(A, + neuron_radius, + dims, + num_std_threshold=num_std_threshold, + minCircularity=0.5, + minInertiaRatio=minInertiaRatio, + minConvexity=minConvexity) return masks_ws, len(pos_examples), len(neg_examples) - -def extractROIsFromPCAICA(spcomps, numSTD=4, gaussiansigmax=2, gaussiansigmay=2, thresh=None) -> Tuple[List[np.array], List]: +def extractROIsFromPCAICA(spcomps, numSTD=4, gaussiansigmax=2, gaussiansigmay=2, + thresh=None) -> Tuple[List[np.array], List]: """ Given the spatial components output of the IPCA_stICA function extract possible regions of interest @@ -1222,18 +1281,22 @@ def extractROIsFromPCAICA(spcomps, numSTD=4, gaussiansigmax=2, gaussiansigmay=2, return allMasks, maskgrouped -def detect_duplicates_and_subsets(binary_masks, predictions=None, r_values=None, dist_thr:float=0.1, min_dist=10, - thresh_subset:float=0.8): + +def detect_duplicates_and_subsets(binary_masks, + predictions=None, + r_values=None, + dist_thr: float = 0.1, + min_dist=10, + thresh_subset: float = 0.8): cm = [scipy.ndimage.center_of_mass(mm) for mm in binary_masks] - sp_rois = scipy.sparse.csc_matrix( - np.reshape(binary_masks, (binary_masks.shape[0], -1)).T) + sp_rois = scipy.sparse.csc_matrix(np.reshape(binary_masks, (binary_masks.shape[0], -1)).T) D = distance_masks([sp_rois, sp_rois], [cm, cm], min_dist)[0] np.fill_diagonal(D, 1) overlap = sp_rois.T.dot(sp_rois).toarray() sz = np.array(sp_rois.sum(0)) logging.info(sz.shape) - overlap = overlap/sz.T + overlap = overlap / sz.T np.fill_diagonal(overlap, 0) # pairs of duplicate indices @@ -1250,26 +1313,26 @@ def detect_duplicates_and_subsets(binary_masks, predictions=None, r_values=None, logging.debug('***** USING MAX AREA BY DEFAULT') overlap_tmp = overlap.copy() >= thresh_subset - overlap_tmp = overlap_tmp*metric[:, None] + overlap_tmp = overlap_tmp * metric[:, None] max_idx = np.argmax(overlap_tmp) one, two = np.unravel_index(max_idx, overlap_tmp.shape) max_val = overlap_tmp[one, two] - indices_to_keep:List = [] + indices_to_keep: List = [] indices_to_remove = [] while max_val > 0: one, two = np.unravel_index(max_idx, overlap_tmp.shape) if metric[one] > metric[two]: #indices_to_keep.append(one) - overlap_tmp[:,two] = 0 - overlap_tmp[two,:] = 0 + overlap_tmp[:, two] = 0 + overlap_tmp[two, :] = 0 indices_to_remove.append(two) #if two in indices_to_keep: # indices_to_keep.remove(two) else: - overlap_tmp[:,one] = 0 - overlap_tmp[one,:] = 0 + overlap_tmp[:, one] = 0 + overlap_tmp[one, :] = 0 indices_to_remove.append(one) #indices_to_keep.append(two) #if one in indices_to_keep: @@ -1280,41 +1343,41 @@ def detect_duplicates_and_subsets(binary_masks, predictions=None, r_values=None, max_val = overlap_tmp[one, two] #indices_to_remove = np.setdiff1d(np.unique(indices_orig),indices_to_keep) - indices_to_keep = np.setdiff1d(np.unique(indices_orig),indices_to_remove) - -# if len(indices) > 0: -# if use_max_area: -# # if is to deal with tie breaks in case of same area -# indices_keep = np.argmax([[overlap[sec, frst], overlap[frst, sec]] -# for frst, sec in indices], 1) -# indices_remove = np.argmin([[overlap[sec, frst], overlap[frst, sec]] -# for frst, sec in indices], 1) -# -# -# else: #use CNN -# indices_keep = np.argmin([[metric[sec], metric[frst]] -# for frst, sec in indices], 1) -# indices_remove = np.argmax([[metric[sec], metric[frst]] -# for frst, sec in indices], 1) -# -# indices_keep = np.unique([elms[ik] for ik, elms in -# zip(indices_keep, indices)]) -# indices_remove = np.unique([elms[ik] for ik, elms in -# zip(indices_remove, indices)]) -# -# multiple_appearance = np.intersect1d(indices_keep,indices_remove) -# for mapp in multiple_appearance: -# indices_remove.remove(mapp) -# else: -# indices_keep = [] -# indices_remove = [] -# indices_keep = [] -# indices_remove = [] - + indices_to_keep = np.setdiff1d(np.unique(indices_orig), indices_to_remove) + + # if len(indices) > 0: + # if use_max_area: + # # if is to deal with tie breaks in case of same area + # indices_keep = np.argmax([[overlap[sec, frst], overlap[frst, sec]] + # for frst, sec in indices], 1) + # indices_remove = np.argmin([[overlap[sec, frst], overlap[frst, sec]] + # for frst, sec in indices], 1) + # + # + # else: #use CNN + # indices_keep = np.argmin([[metric[sec], metric[frst]] + # for frst, sec in indices], 1) + # indices_remove = np.argmax([[metric[sec], metric[frst]] + # for frst, sec in indices], 1) + # + # indices_keep = np.unique([elms[ik] for ik, elms in + # zip(indices_keep, indices)]) + # indices_remove = np.unique([elms[ik] for ik, elms in + # zip(indices_remove, indices)]) + # + # multiple_appearance = np.intersect1d(indices_keep,indices_remove) + # for mapp in multiple_appearance: + # indices_remove.remove(mapp) + # else: + # indices_keep = [] + # indices_remove = [] + # indices_keep = [] + # indices_remove = [] return indices_orig, indices_to_keep, indices_to_remove, D, overlap -def detect_duplicates(file_name:str, dist_thr:float=0.1, FOV:Tuple[int, ...]=(512, 512)) -> Tuple[List, List]: + +def detect_duplicates(file_name: str, dist_thr: float = 0.1, FOV: Tuple[int, ...] = (512, 512)) -> Tuple[List, List]: """ Removes duplicate ROIs from file file_name @@ -1333,11 +1396,10 @@ def detect_duplicates(file_name:str, dist_thr:float=0.1, FOV:Tuple[int, ...]=(51 """ rois = nf_read_roi_zip(file_name, FOV) cm = [scipy.ndimage.center_of_mass(mm) for mm in rois] - sp_rois = scipy.sparse.csc_matrix( - np.reshape(rois, (rois.shape[0], np.prod(FOV))).T) + sp_rois = scipy.sparse.csc_matrix(np.reshape(rois, (rois.shape[0], np.prod(FOV))).T) D = distance_masks([sp_rois, sp_rois], [cm, cm], 10)[0] np.fill_diagonal(D, 1) - indices = np.where(D < dist_thr) # pairs of duplicate indices + indices = np.where(D < dist_thr) # pairs of duplicate indices ind = list(np.unique(indices[1][indices[1] > indices[0]])) ind_keep = list(set(range(D.shape[0])) - set(ind)) diff --git a/caiman/base/traces.py b/caiman/base/traces.py index 9bec550f5..f37a10859 100644 --- a/caiman/base/traces.py +++ b/caiman/base/traces.py @@ -1,6 +1,5 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- - """ Spyder Editor @@ -74,14 +73,12 @@ def computeDFF(self, window_sec=5, minQuantile=20): window = int(window_sec * self.fr) logging.debug(window) if window >= T: - raise ValueError( - "The window must be shorter than the total length") + raise ValueError("The window must be shorter than the total length") tracesDFF = [] for tr in self.T: logging.debug("TR Shape is " + str(tr.shape)) - traceBL = [np.percentile(tr[i:i + window], minQuantile) - for i in range(1, len(tr) - window)] + traceBL = [np.percentile(tr[i:i + window], minQuantile) for i in range(1, len(tr) - window)] missing = np.percentile(tr[-window:], minQuantile) missing = np.repeat(missing, window + 1) traceBL = np.concatenate((traceBL, missing)) @@ -140,6 +137,7 @@ def plot(self, stacked=True, subtract_minimum=False, cmap=pl.cm.jet, **kwargs): def extract_epochs(self, trigs=None, tb=1, ta=1): raise Exception('Not Implemented. Look at movie resize') + if __name__ == "__main__": tracedata = trace(3 + np.random.random((2000, 4)), fr=30, start_time=0) tracedata_dff = tracedata.computeDFF() diff --git a/caiman/behavior/behavior.py b/caiman/behavior/behavior.py index 254ad7ce4..e3accd144 100644 --- a/caiman/behavior/behavior.py +++ b/caiman/behavior/behavior.py @@ -1,6 +1,5 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- - """ OPTICAL FLOW @@ -30,7 +29,8 @@ except: pass -def select_roi(img:np.ndarray, n_rois:int=1) -> List: + +def select_roi(img: np.ndarray, n_rois: int = 1) -> List: """ Create a mask from a the convex polygon enclosed between selected points @@ -63,13 +63,22 @@ def to_polar(x, y): mag, ang = cv2.cartToPolar(x, y) return mag, ang + def get_nonzero_subarray(arr, mask): x, y = mask.nonzero() return arr.toarray()[x.min():x.max() + 1, y.min():y.max() + 1] -def extract_motor_components_OF(m, n_components, mask=None, resize_fact:float=.5, only_magnitude:bool=False, max_iter:int=1000, - verbose:bool=False, method_factorization:str='nmf', max_iter_DL=-30) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: - # todo todocument + +def extract_motor_components_OF(m, + n_components, + mask=None, + resize_fact: float = .5, + only_magnitude: bool = False, + max_iter: int = 1000, + verbose: bool = False, + method_factorization: str = 'nmf', + max_iter_DL=-30) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: + # todo todocument if mask is not None: mask = coo_matrix(np.array(mask).squeeze()) ms = [get_nonzero_subarray(mask.multiply(fr), mask) for fr in m] @@ -79,8 +88,11 @@ def extract_motor_components_OF(m, n_components, mask=None, resize_fact:float=.5 else: ms = m of_or = compute_optical_flow(ms, do_show=False, polar_coord=False) - of_or = np.concatenate([cm.movie(of_or[0]).resize(resize_fact, resize_fact, 1)[np.newaxis, :, :, :], - cm.movie(of_or[1]).resize(resize_fact, resize_fact, 1)[np.newaxis, :, :, :]], axis=0) + of_or = np.concatenate([ + cm.movie(of_or[0]).resize(resize_fact, resize_fact, 1)[np.newaxis, :, :, :], + cm.movie(of_or[1]).resize(resize_fact, resize_fact, 1)[np.newaxis, :, :, :] + ], + axis=0) if only_magnitude: of = np.sqrt(of[0]**2 + of[1]**2) @@ -91,15 +103,23 @@ def extract_motor_components_OF(m, n_components, mask=None, resize_fact:float=.5 else: of = of_or - spatial_filter_, time_trace_, _ = extract_components(of, n_components=n_components, verbose=verbose, - normalize_std=False, max_iter=max_iter, + spatial_filter_, time_trace_, _ = extract_components(of, + n_components=n_components, + verbose=verbose, + normalize_std=False, + max_iter=max_iter, method_factorization=method_factorization, max_iter_DL=max_iter_DL) return spatial_filter_, time_trace_, of_or -def extract_magnitude_and_angle_from_OF(spatial_filter_, time_trace_, of_or, - num_std_mag_for_angle=.6, sav_filter_size=3, only_magnitude=False) -> Tuple[List, List, List, List]: + +def extract_magnitude_and_angle_from_OF(spatial_filter_, + time_trace_, + of_or, + num_std_mag_for_angle=.6, + sav_filter_size=3, + only_magnitude=False) -> Tuple[List, List, List, List]: # todo todocument mags = [] @@ -124,11 +144,9 @@ def extract_magnitude_and_angle_from_OF(spatial_filter_, time_trace_, of_or, # normalize to pixel units spatial_mask = spatial_filter - spatial_mask[spatial_mask < (np.nanpercentile( - spatial_mask[0], 99.9) * .9)] = np.nan + spatial_mask[spatial_mask < (np.nanpercentile(spatial_mask[0], 99.9) * .9)] = np.nan ofmk = of_or * spatial_mask[None, None, :, :] - range_ = np.std(np.nanpercentile( - np.sqrt(ofmk[0]**2 + ofmk[1]**2), 95, (1, 2))) + range_ = np.std(np.nanpercentile(np.sqrt(ofmk[0]**2 + ofmk[1]**2), 95, (1, 2))) mag = (mag / np.std(mag)) * range_ mag = scipy.signal.medfilt(mag.squeeze(), kernel_size=1) dirct_orig = dirct.copy() @@ -142,8 +160,21 @@ def extract_magnitude_and_angle_from_OF(spatial_filter_, time_trace_, of_or, return mags, dircts, dircts_thresh, spatial_masks_thr -def compute_optical_flow(m, mask=None, polar_coord:bool=True, do_show:bool=False, do_write:bool=False, file_name=None, gain_of=None, - frate:float=30.0, pyr_scale:float=.1, levels:int=3, winsize:int=25, iterations:int=3, poly_n=7, poly_sigma=1.5): + +def compute_optical_flow(m, + mask=None, + polar_coord: bool = True, + do_show: bool = False, + do_write: bool = False, + file_name=None, + gain_of=None, + frate: float = 30.0, + pyr_scale: float = .1, + levels: int = 3, + winsize: int = 25, + iterations: int = 3, + poly_n=7, + poly_sigma=1.5): """ This function compute the optical flow of behavioral movies using the opencv cv2.calcOpticalFlowFarneback function @@ -195,11 +226,9 @@ def compute_optical_flow(m, mask=None, polar_coord:bool=True, do_show:bool=False if do_write: if file_name is not None: - video = cv2.VideoWriter(file_name, cv2.VideoWriter_fourcc( - 'M', 'J', 'P', 'G'), frate, (d2 * 2, d1), 1) + video = cv2.VideoWriter(file_name, cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'), frate, (d2 * 2, d1), 1) else: - raise Exception( - 'You need to provide file name (.avi) when saving video') + raise Exception('You need to provide file name (.avi) when saving video') for counter, next_ in enumerate(m): if counter % 100 == 0: @@ -220,11 +249,9 @@ def compute_optical_flow(m, mask=None, polar_coord:bool=True, do_show:bool=False if polar_coord: hsv[..., 0] = coord_2 * 180 / np.pi / 2 else: - hsv[..., 0] = cv2.normalize( - coord_2, None, 0, 255, cv2.NORM_MINMAX) + hsv[..., 0] = cv2.normalize(coord_2, None, 0, 255, cv2.NORM_MINMAX) if gain_of is None: - hsv[..., 2] = cv2.normalize( - coord_1, None, 0, 255, cv2.NORM_MINMAX) + hsv[..., 2] = cv2.normalize(coord_1, None, 0, 255, cv2.NORM_MINMAX) else: hsv[..., 2] = gain_of * coord_1 rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR) @@ -254,7 +281,12 @@ def compute_optical_flow(m, mask=None, polar_coord:bool=True, do_show:bool=False #%% NMF -def extract_components(mov_tot, n_components:int=6, normalize_std:bool=True, max_iter_DL=-30, method_factorization:str='nmf', **kwargs) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: +def extract_components(mov_tot, + n_components: int = 6, + normalize_std: bool = True, + max_iter_DL=-30, + method_factorization: str = 'nmf', + **kwargs) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: """ From optical flow images can extract spatial and temporal components @@ -286,8 +318,7 @@ def extract_components(mov_tot, n_components:int=6, normalize_std:bool=True, max if mov_tot.ndim == 4: if normalize_std: norm_fact = np.nanstd(mov_tot, axis=(1, 2, 3)) - mov_tot = old_div( - mov_tot, norm_fact[:, np.newaxis, np.newaxis, np.newaxis]) + mov_tot = old_div(mov_tot, norm_fact[:, np.newaxis, np.newaxis, np.newaxis]) else: norm_fact = np.array([1., 1.]) c, T, d1, d2 = np.shape(mov_tot) @@ -305,20 +336,22 @@ def extract_components(mov_tot, n_components:int=6, normalize_std:bool=True, max time_trace = nmf.fit_transform(newm) spatial_filter = nmf.components_ - spatial_filter = np.concatenate( - [np.reshape(sp, (d1, d2))[np.newaxis, :, :] for sp in spatial_filter], axis=0) + spatial_filter = np.concatenate([np.reshape(sp, (d1, d2))[np.newaxis, :, :] for sp in spatial_filter], axis=0) elif method_factorization == 'dict_learn': import spams newm = np.asfortranarray(newm, dtype=np.float32) - time_trace = spams.trainDL( - newm, K=n_components, mode=0, lambda1=1, posAlpha=True, iter=max_iter_DL) + time_trace = spams.trainDL(newm, K=n_components, mode=0, lambda1=1, posAlpha=True, iter=max_iter_DL) - spatial_filter = spams.lasso(newm, D=time_trace, return_reg_path=False, lambda1=0.01, - mode=spams.spams_wrap.PENALTY, pos=True) + spatial_filter = spams.lasso(newm, + D=time_trace, + return_reg_path=False, + lambda1=0.01, + mode=spams.spams_wrap.PENALTY, + pos=True) - spatial_filter = np.concatenate([np.reshape( - sp, (d1, d2))[np.newaxis, :, :] for sp in spatial_filter.toarray()], axis=0) + spatial_filter = np.concatenate([np.reshape(sp, (d1, d2))[np.newaxis, :, :] for sp in spatial_filter.toarray()], + axis=0) time_trace = [np.reshape(ttr, (c, T)).T for ttr in time_trace.T] @@ -326,6 +359,7 @@ def extract_components(mov_tot, n_components:int=6, normalize_std:bool=True, max print(el_t) return spatial_filter, time_trace, norm_fact + def plot_components(sp_filt, t_trace) -> None: # todo: todocument pl.figure() diff --git a/caiman/external/cell_magic_wand.py b/caiman/external/cell_magic_wand.py index 009d17932..21524599e 100644 --- a/caiman/external/cell_magic_wand.py +++ b/caiman/external/cell_magic_wand.py @@ -35,8 +35,8 @@ def coord_polar_to_cart(r, theta, center): def coord_cart_to_polar(x, y, center): '''Converts Cartesian coordinates to polar''' - r = np.sqrt((x-center[0])**2 + (y-center[1])**2) - theta = np.arctan2((y-center[1]), (x-center[0])) + r = np.sqrt((x - center[0])**2 + (y - center[1])**2) + theta = np.arctan2((y - center[1]), (x - center[0])) return r, theta @@ -46,7 +46,7 @@ def image_cart_to_polar(image, center, min_radius, max_radius, phase_width, zoom # Upsample image if zoom_factor != 1: image = zoom(image, (zoom_factor, zoom_factor), order=4) - center = (center[0]*zoom_factor + zoom_factor/2, center[1]*zoom_factor + zoom_factor/2) + center = (center[0] * zoom_factor + zoom_factor / 2, center[1] * zoom_factor + zoom_factor / 2) min_radius = min_radius * zoom_factor max_radius = max_radius * zoom_factor @@ -59,16 +59,14 @@ def image_cart_to_polar(image, center, min_radius, max_radius, phase_width, zoom image = np.pad(image, pad_dist, 'constant') # coordinate conversion - theta, r = np.meshgrid(np.linspace(0, 2*np.pi, phase_width), - np.arange(min_radius, max_radius)) + theta, r = np.meshgrid(np.linspace(0, 2 * np.pi, phase_width), np.arange(min_radius, max_radius)) x, y = coord_polar_to_cart(r, theta, center) x, y = np.round(x), np.round(y) x, y = x.astype(int), y.astype(int) x = np.maximum(x, 0) y = np.maximum(y, 0) - x = np.minimum(x, max_x-1) - y = np.minimum(y, max_y-1) - + x = np.minimum(x, max_x - 1) + y = np.minimum(y, max_y - 1) polar = image[x, y] polar.reshape((max_radius - min_radius, phase_width)) @@ -81,7 +79,7 @@ def mask_polar_to_cart(mask, center, min_radius, max_radius, output_shape, zoom_ # Account for upsampling if zoom_factor != 1: - center = (center[0]*zoom_factor + zoom_factor/2, center[1]*zoom_factor + zoom_factor/2) + center = (center[0] * zoom_factor + zoom_factor / 2, center[1] * zoom_factor + zoom_factor / 2) min_radius = min_radius * zoom_factor max_radius = max_radius * zoom_factor output_shape = map(lambda a: a * zoom_factor, output_shape) @@ -90,20 +88,19 @@ def mask_polar_to_cart(mask, center, min_radius, max_radius, output_shape, zoom_ image = np.zeros(output_shape) # coordinate conversion - theta, r = np.meshgrid(np.linspace(0, 2*np.pi, mask.shape[1]), - np.arange(0, max_radius)) + theta, r = np.meshgrid(np.linspace(0, 2 * np.pi, mask.shape[1]), np.arange(0, max_radius)) x, y = coord_polar_to_cart(r, theta, center) x, y = np.round(x), np.round(y) x, y = x.astype(int), y.astype(int) - x = np.clip(x, 0, image.shape[0]-1) - y = np.clip(y, 0, image.shape[1]-1) - ix,iy = np.meshgrid(np.arange(0,mask.shape[1]), np.arange(0,mask.shape[0])) - image[x,y] = mask + x = np.clip(x, 0, image.shape[0] - 1) + y = np.clip(y, 0, image.shape[1] - 1) + ix, iy = np.meshgrid(np.arange(0, mask.shape[1]), np.arange(0, mask.shape[0])) + image[x, y] = mask # downsample image if zoom_factor != 1: - zf = 1/float(zoom_factor) + zf = 1 / float(zoom_factor) image = zoom(image, (zf, zf), order=4) # ensure image remains a filled binary mask @@ -119,13 +116,13 @@ def find_edge_2d(polar, min_radius): # Dynamic programming phase values_right_shift = np.pad(polar, ((0, 0), (0, 1)), 'constant', constant_values=0)[:, 1:] - values_closeright_shift = np.pad(polar, ((1, 0),(0, 1)), 'constant', constant_values=0)[:-1, 1:] + values_closeright_shift = np.pad(polar, ((1, 0), (0, 1)), 'constant', constant_values=0)[:-1, 1:] values_awayright_shift = np.pad(polar, ((0, 1), (0, 1)), 'constant', constant_values=0)[1:, 1:] values_move = np.zeros((polar.shape[0], polar.shape[1], 3)) - values_move[:, :, 2] = np.add(polar, values_closeright_shift) # closeright - values_move[:, :, 1] = np.add(polar, values_right_shift) # right - values_move[:, :, 0] = np.add(polar, values_awayright_shift) # awayright + values_move[:, :, 2] = np.add(polar, values_closeright_shift) # closeright + values_move[:, :, 1] = np.add(polar, values_right_shift) # right + values_move[:, :, 0] = np.add(polar, values_awayright_shift) # awayright values = np.amax(values_move, axis=2) directions = np.argmax(values_move, axis=2) @@ -136,20 +133,20 @@ def find_edge_2d(polar, min_radius): edge = [] mask = np.zeros(polar.shape) r_max, r = 0, 0 - for i,v in enumerate(values[:,0]): + for i, v in enumerate(values[:, 0]): if v >= r_max: r, r_max = i, v - edge.append((r+min_radius, 0)) - mask[0:r+1, 0] = 1 - for t in range(1,polar.shape[1]): - r += directions[r, t-1] - if r >= directions.shape[0]: r = directions.shape[0]-1 + edge.append((r + min_radius, 0)) + mask[0:r + 1, 0] = 1 + for t in range(1, polar.shape[1]): + r += directions[r, t - 1] + if r >= directions.shape[0]: r = directions.shape[0] - 1 if r < 0: r = 0 - edge.append((r+min_radius, t)) - mask[0:r+1, t] = 1 + edge.append((r + min_radius, t)) + mask[0:r + 1, t] = 1 # add to inside of mask accounting for min_radius - new_mask = np.ones((min_radius+mask.shape[0], mask.shape[1])) + new_mask = np.ones((min_radius + mask.shape[0], mask.shape[1])) new_mask[min_radius:, :] = mask return np.array(edge), new_mask @@ -158,14 +155,13 @@ def find_edge_2d(polar, min_radius): def edge_polar_to_cart(edge, center): '''Converts a list of polar edge points to a list of cartesian edge points''' cart_edge = [] - for (r,t) in edge: + for (r, t) in edge: x, y = coord_polar_to_cart(r, t, center) cart_edge.append((round(x), round(y))) return cart_edge -def cell_magic_wand_single_point(image, center, min_radius, max_radius, - roughness=2, zoom_factor=1): +def cell_magic_wand_single_point(image, center, min_radius, max_radius, roughness=2, zoom_factor=1): '''Draws a border within a specified radius around a specified center "seed" point using a polar transform and a dynamic programming edge-following algorithm. @@ -184,17 +180,19 @@ def cell_magic_wand_single_point(image, center, min_radius, max_radius, zoom_factor = 1 print("negative zoom_factor not allowed, setting zoom_factor to 1") phase_width = int(2 * np.pi * max_radius * roughness) - polar_image = image_cart_to_polar(image, center, min_radius, max_radius, - phase_width=phase_width, zoom_factor=zoom_factor) + polar_image = image_cart_to_polar(image, + center, + min_radius, + max_radius, + phase_width=phase_width, + zoom_factor=zoom_factor) polar_edge, polar_mask = find_edge_2d(polar_image, min_radius) cart_edge = edge_polar_to_cart(polar_edge, center) - cart_mask = mask_polar_to_cart(polar_mask, center, min_radius, max_radius, - image.shape, zoom_factor=zoom_factor) + cart_mask = mask_polar_to_cart(polar_mask, center, min_radius, max_radius, image.shape, zoom_factor=zoom_factor) return cart_mask, cart_edge -def cell_magic_wand(image, center, min_radius, max_radius, - roughness=2, zoom_factor=1, center_range=2): +def cell_magic_wand(image, center, min_radius, max_radius, roughness=2, zoom_factor=1, center_range=2): '''Runs the cell magic wand tool on multiple points near the supplied center and combines the results for a more robust edge detection then provided by the vanilla wand tool. @@ -203,28 +201,35 @@ def cell_magic_wand(image, center, min_radius, max_radius, centers = [] for i in [-center_range, 0, center_range]: for j in [-center_range, 0, center_range]: - centers.append((center[0]+i, center[1]+j)) + centers.append((center[0] + i, center[1] + j)) masks = np.zeros((image.shape[0], image.shape[1], len(centers))) for i, c in enumerate(centers): - mask, edge = cell_magic_wand_single_point(image, c, min_radius, max_radius, - roughness=roughness, zoom_factor=zoom_factor) - masks[:,:,i] = mask + mask, edge = cell_magic_wand_single_point(image, + c, + min_radius, + max_radius, + roughness=roughness, + zoom_factor=zoom_factor) + masks[:, :, i] = mask mean_mask = np.mean(masks, axis=2) final_mask = (mean_mask > 0.5).astype(int) return final_mask -def cell_magic_wand_3d(image_3d, center, min_radius, max_radius, - roughness=2, zoom_factor=1, center_range=2, z_step=1): +def cell_magic_wand_3d(image_3d, center, min_radius, max_radius, roughness=2, zoom_factor=1, center_range=2, z_step=1): '''Robust cell magic wand tool for 3D images with dimensions (z, x, y) - default for tifffile.load. This functions runs the robust wand tool on each z slice in the image and returns the mean mask thresholded to 0.5''' - masks = np.zeros((int(image_3d.shape[0]/z_step), image_3d.shape[1], image_3d.shape[2])) - for s in range(int(image_3d.shape[0]/z_step)): - mask = cell_magic_wand(image_3d[s*z_step,:,:], center, min_radius, max_radius, - roughness=roughness, zoom_factor=zoom_factor, + masks = np.zeros((int(image_3d.shape[0] / z_step), image_3d.shape[1], image_3d.shape[2])) + for s in range(int(image_3d.shape[0] / z_step)): + mask = cell_magic_wand(image_3d[s * z_step, :, :], + center, + min_radius, + max_radius, + roughness=roughness, + zoom_factor=zoom_factor, center_range=center_range) - masks[s,:,:] = mask + masks[s, :, :] = mask mean_mask = np.mean(masks, axis=0) final_mask = (mean_mask > 0.5).astype(int) return final_mask diff --git a/caiman/external/houghvst/demo_VST.py b/caiman/external/houghvst/demo_VST.py index fb224080d..0902049ec 100644 --- a/caiman/external/houghvst/demo_VST.py +++ b/caiman/external/houghvst/demo_VST.py @@ -7,6 +7,7 @@ import caiman as cm from caiman.paths import caiman_datadir + #%% def main(): fnames = [os.path.join(caiman_datadir(), 'example_movies', 'demoMovie.tif')] @@ -33,13 +34,13 @@ def main(): movie_gat = compute_gat(movie, sigma_sq, alpha=alpha) # save movie_gat here - movie_gat_inv = compute_inverse_gat(movie_gat, sigma_sq, alpha=alpha, - method='asym') + movie_gat_inv = compute_inverse_gat(movie_gat, sigma_sq, alpha=alpha, method='asym') # save movie_gat_inv here return movie, movie_gat_inv + #%% movie, movie_gat_inv = main() #%% -cm.concatenate([movie,movie_gat_inv],axis=1).play(gain = 10, magnification=4) +cm.concatenate([movie, movie_gat_inv], axis=1).play(gain=10, magnification=4) diff --git a/caiman/external/houghvst/estimation.py b/caiman/external/houghvst/estimation.py index d5a7b34ae..d3331c1e8 100644 --- a/caiman/external/houghvst/estimation.py +++ b/caiman/external/houghvst/estimation.py @@ -6,7 +6,7 @@ def compute_score(sigmas, tol): x = (sigmas - 1) / tol - weights = np.exp(-(x ** 2)) + weights = np.exp(-(x**2)) score = np.sum(weights) if score > 0: sigma_est = np.sum(sigmas * weights) / score @@ -15,9 +15,7 @@ def compute_score(sigmas, tol): return sigma_est, score -AccumulatorSpace = namedtuple('AccumulatorSpace', ['score', 'sigma_sq_range', - 'alpha_range', 'sigma_sq', - 'alpha']) +AccumulatorSpace = namedtuple('AccumulatorSpace', ['score', 'sigma_sq_range', 'alpha_range', 'sigma_sq', 'alpha']) def hough_estimation(blocks, sigma_sq_range, alpha_range, tol=1e-2): @@ -39,8 +37,7 @@ def hough_estimation(blocks, sigma_sq_range, alpha_range, tol=1e-2): print('\tHighest score=', score[best_params[0], best_params[1]]) - acc = AccumulatorSpace(score, sigma_sq_range, alpha_range, sigma_sq_est, - alpha_est) + acc = AccumulatorSpace(score, sigma_sq_range, alpha_range, sigma_sq_est, alpha_est) return sigma_sq_est, alpha_est, acc @@ -71,9 +68,7 @@ def initial_estimate_sigma_alpha(blocks): EstimationResult = namedtuple('EstimationResult', - ['alpha_init', 'sigma_sq_init', - 'alpha', 'sigma_sq', - 'acc_space_init', 'acc_space']) + ['alpha_init', 'sigma_sq_init', 'alpha', 'sigma_sq', 'acc_space_init', 'acc_space']) def estimate_vst_movie(movie, block_size=8, stride=8): @@ -91,29 +86,21 @@ def estimate_vst_image(img, block_size=8, stride=8): def estimate_vst_blocks(blocks): sigma_sq_init, alpha_init = initial_estimate_sigma_alpha(blocks) - print('\tinitial alpha = {}; sigma^2 = {}'.format(alpha_init, - sigma_sq_init)) + print('\tinitial alpha = {}; sigma^2 = {}'.format(alpha_init, sigma_sq_init)) diff_s = np.maximum(2e3, np.abs(sigma_sq_init)) diff_a = alpha_init * 0.9 - sigma_sq_range = np.linspace(sigma_sq_init - diff_s, sigma_sq_init + diff_s, - num=100) - alpha_range = np.linspace(alpha_init - diff_a, alpha_init + diff_a, - num=100) - sigma_sq_mid, alpha_mid, acc_init = hough_estimation(blocks, sigma_sq_range, - alpha_range) + sigma_sq_range = np.linspace(sigma_sq_init - diff_s, sigma_sq_init + diff_s, num=100) + alpha_range = np.linspace(alpha_init - diff_a, alpha_init + diff_a, num=100) + sigma_sq_mid, alpha_mid, acc_init = hough_estimation(blocks, sigma_sq_range, alpha_range) print('\tmid alpha = {}; sigma^2 = {}'.format(alpha_mid, sigma_sq_mid)) diff_s /= 10 diff_a /= 4 - sigma_sq_range = np.linspace(sigma_sq_mid - diff_s, sigma_sq_mid + diff_s, - num=100) - alpha_range = np.linspace(alpha_mid - diff_a, alpha_mid + diff_a, - num=100) - sigma_sq_final, alpha_final, acc = hough_estimation(blocks, sigma_sq_range, - alpha_range) + sigma_sq_range = np.linspace(sigma_sq_mid - diff_s, sigma_sq_mid + diff_s, num=100) + alpha_range = np.linspace(alpha_mid - diff_a, alpha_mid + diff_a, num=100) + sigma_sq_final, alpha_final, acc = hough_estimation(blocks, sigma_sq_range, alpha_range) print('\talpha = {}; sigma^2 = {}'.format(alpha_final, sigma_sq_final)) - return EstimationResult(alpha_init, sigma_sq_init, alpha_final, - sigma_sq_final, acc_init, acc) + return EstimationResult(alpha_init, sigma_sq_init, alpha_final, sigma_sq_final, acc_init, acc) diff --git a/caiman/external/houghvst/gat.py b/caiman/external/houghvst/gat.py index 132b7512d..f0a07ec61 100644 --- a/caiman/external/houghvst/gat.py +++ b/caiman/external/houghvst/gat.py @@ -19,7 +19,7 @@ def compute_gat(arr, sigma_sq, alpha=1): :param alpha: scaling factor of the Poisson noise component :return: variance-stabilized array """ - v = np.maximum((arr / alpha) + (3. / 8.) + sigma_sq / (alpha ** 2), 0) + v = np.maximum((arr / alpha) + (3. / 8.) + sigma_sq / (alpha**2), 0) f = 2. * np.sqrt(v) return f @@ -47,19 +47,16 @@ def compute_inverse_gat(arr, sigma_sq, m=0, alpha=1, method='asym'): approximation of the exact unbiased inverse. :return: inverse variance-stabilized array """ - sigma_sq /= alpha ** 2 + sigma_sq /= alpha**2 if method == 'closed-form': # closed-form approximation of the exact unbiased inverse: arr_trunc = np.maximum(arr, 0.8) - inverse = ((arr_trunc / 2.) ** 2 - + 0.25 * np.sqrt(1.5) * arr_trunc ** -1 - - (11. / 8.) * arr_trunc ** -2 - + (5. / 8.) * np.sqrt(1.5) * arr_trunc ** -3 - - (1. / 8.) - sigma_sq) + inverse = ((arr_trunc / 2.)**2 + 0.25 * np.sqrt(1.5) * arr_trunc**-1 - (11. / 8.) * arr_trunc**-2 + + (5. / 8.) * np.sqrt(1.5) * arr_trunc**-3 - (1. / 8.) - sigma_sq) elif method == 'asym': # asymptotic approximation of the exact unbiased inverse: - inverse = (arr / 2.) ** 2 - 1. / 8 - sigma_sq + inverse = (arr / 2.)**2 - 1. / 8 - sigma_sq # inverse = np.maximum(0, inverse) else: raise NotImplementedError('Only supports the closed-form') diff --git a/caiman/external/houghvst/measures.py b/caiman/external/houghvst/measures.py index e6412245e..467a799d7 100644 --- a/caiman/external/houghvst/measures.py +++ b/caiman/external/houghvst/measures.py @@ -2,15 +2,17 @@ from houghvst.estimation import gat -def compare_variance_stabilization(img, img_noisy, sigma_gt, alpha_gt, - sigma_est, alpha_est): - assess_variance_stabilization(img, img_noisy, sigma_gt, alpha_gt, - heading='Ground truth') +def compare_variance_stabilization(img, img_noisy, sigma_gt, alpha_gt, sigma_est, alpha_est): + assess_variance_stabilization(img, img_noisy, sigma_gt, alpha_gt, heading='Ground truth') assess_variance_stabilization(img, img_noisy, sigma_est, alpha_est) -def assess_variance_stabilization(img, img_noisy, sigma, alpha, - correct_noiseless=True, verbose=True, +def assess_variance_stabilization(img, + img_noisy, + sigma, + alpha, + correct_noiseless=True, + verbose=True, heading='Estimated'): if correct_noiseless: img = alpha * img diff --git a/caiman/external/houghvst/plotting.py b/caiman/external/houghvst/plotting.py index c1dee0747..0ea69e8d8 100644 --- a/caiman/external/houghvst/plotting.py +++ b/caiman/external/houghvst/plotting.py @@ -8,6 +8,7 @@ class Cut: + def __init__(self, orientation, idx, color): self.orientation = orientation self.idx = idx @@ -15,6 +16,7 @@ def __init__(self, orientation, idx, color): class PlotPatch: + def __init__(self, box, color): self.box = box self.color = color @@ -57,8 +59,7 @@ def test(x): ax.plot(curve, color=c.color, alpha=0.5) -def plot_patches_overlay(img, patches, selection=[], cmap='gray', - normalize=False): +def plot_patches_overlay(img, patches, selection=[], cmap='gray', normalize=False): if normalize: vmin, vmax = img.min(), img.max() else: @@ -69,12 +70,7 @@ def plot_patches_overlay(img, patches, selection=[], cmap='gray', else: subplot_idx = 111 fig = plt.gcf() - grid = ImageGrid(fig, subplot_idx, - nrows_ncols=(1, 1), - direction="row", - axes_pad=0.05, - add_all=True, - share_all=True) + grid = ImageGrid(fig, subplot_idx, nrows_ncols=(1, 1), direction="row", axes_pad=0.05, add_all=True, share_all=True) grid[0].imshow(img, vmin=vmin, vmax=vmax, cmap=cmap) grid[0].axis('off') for p in patches: @@ -82,28 +78,32 @@ def plot_patches_overlay(img, patches, selection=[], cmap='gray', zorder = 1 else: zorder = 2 - rect = mpatches.Rectangle((p.box[1], p.box[0]), p.box[2], p.box[3], - linewidth=2, edgecolor=p.color, - facecolor='none', zorder=zorder) + rect = mpatches.Rectangle((p.box[1], p.box[0]), + p.box[2], + p.box[3], + linewidth=2, + edgecolor=p.color, + facecolor='none', + zorder=zorder) grid[0].add_artist(rect) if len(selection) == 0: nrows = int(np.ceil(np.sqrt(len(selection)))) ncols = int(np.ceil(np.sqrt(len(selection)))) - grid = ImageGrid(fig, 122, - nrows_ncols=(nrows, ncols), - direction="row", - axes_pad=0.15, - add_all=True, - share_all=True) + grid = ImageGrid(fig, + 122, + nrows_ncols=(nrows, ncols), + direction="row", + axes_pad=0.15, + add_all=True, + share_all=True) for ax, idx in zip(grid, selection): p = patches[idx] - crop = img[p.box[0]: p.box[0] + p.box[2], p.box[1]: p.box[1] + p.box[3]] + crop = img[p.box[0]:p.box[0] + p.box[2], p.box[1]:p.box[1] + p.box[3]] - plot_patch(crop, edgecolor=p.color, ax=ax, vmin=vmin, vmax=vmax, - cmap=cmap) + plot_patch(crop, edgecolor=p.color, ax=ax, vmin=vmin, vmax=vmax, cmap=cmap) with warnings.catch_warnings(): warnings.filterwarnings("ignore", category=UserWarning) @@ -121,15 +121,17 @@ def plot_patch(patch, edgecolor=None, ax=None, vmin=None, vmax=None, cmap=None): ax.spines[loc].set_linewidth(4) ax.tick_params(axis='both', which='both', - left='off', right='off', - top='off', bottom='off', - labelleft='off', labelbottom='off') + left='off', + right='off', + top='off', + bottom='off', + labelleft='off', + labelbottom='off') return im_plot -def plot_vst_accumulator_space(acc_space, cmap=cc.m_fire, ax=None, - plot_estimates=False, plot_focus=False): +def plot_vst_accumulator_space(acc_space, cmap=cc.m_fire, ax=None, plot_estimates=False, plot_focus=False): if ax is None: ax = plt.gca() @@ -137,12 +139,11 @@ def plot_vst_accumulator_space(acc_space, cmap=cc.m_fire, ax=None, alpha_step1 = acc_space.alpha_range[-1] - acc_space.alpha_range[-2] sigma_step0 = acc_space.sigma_sq_range[1] - acc_space.sigma_sq_range[0] sigma_step1 = acc_space.sigma_sq_range[-1] - acc_space.sigma_sq_range[-2] - im_plt = ax.imshow(acc_space.score, cmap=cmap, - extent=(acc_space.alpha_range[0] - alpha_step0 / 2, - acc_space.alpha_range[-1] + alpha_step1 / 2, + im_plt = ax.imshow(acc_space.score, + cmap=cmap, + extent=(acc_space.alpha_range[0] - alpha_step0 / 2, acc_space.alpha_range[-1] + alpha_step1 / 2, acc_space.sigma_sq_range[-1] + sigma_step1 / 2, - acc_space.sigma_sq_range[0] - sigma_step0 / 2) - ) + acc_space.sigma_sq_range[0] - sigma_step0 / 2)) ax.axis('tight') ax.set_xlabel(r'$\alpha$', fontsize='xx-large') ax.set_ylabel(r'$\beta$', fontsize='xx-large') @@ -150,11 +151,12 @@ def plot_vst_accumulator_space(acc_space, cmap=cc.m_fire, ax=None, if plot_focus: len_a = (acc_space.alpha_range[-1] - acc_space.alpha_range[0]) / 4 - len_s = (acc_space.sigma_sq_range[-1] - - acc_space.sigma_sq_range[0]) / 10 - rect = mpatches.Rectangle((acc_space.alpha - len_a / 2, - acc_space.sigma_sq - len_s / 2), - len_a, len_s, ec='#0000cd', fc='none', + len_s = (acc_space.sigma_sq_range[-1] - acc_space.sigma_sq_range[0]) / 10 + rect = mpatches.Rectangle((acc_space.alpha - len_a / 2, acc_space.sigma_sq - len_s / 2), + len_a, + len_s, + ec='#0000cd', + fc='none', linewidth=3) ax.add_artist(rect) @@ -162,11 +164,18 @@ def plot_vst_accumulator_space(acc_space, cmap=cc.m_fire, ax=None, tag_str = r'$\alpha={:.2f}$, $\beta={:.2f}$' bbox_props = dict(boxstyle='round', fc='w', ec='#0000cd', alpha=0.5) ax.annotate(tag_str.format(acc_space.alpha, acc_space.sigma_sq), - xy=(acc_space.alpha, acc_space.sigma_sq), xycoords='data', - xytext=(0.95, 0.05), textcoords='axes fraction', - va='bottom', ha='right', color='#0000cd', size='xx-large', - bbox=bbox_props, - arrowprops=dict(facecolor='#0000cd', edgecolor='none', - shrink=0., width=2, headwidth=3, - headlength=3) - ) + xy=(acc_space.alpha, acc_space.sigma_sq), + xycoords='data', + xytext=(0.95, 0.05), + textcoords='axes fraction', + va='bottom', + ha='right', + color='#0000cd', + size='xx-large', + bbox=bbox_props, + arrowprops=dict(facecolor='#0000cd', + edgecolor='none', + shrink=0., + width=2, + headwidth=3, + headlength=3)) diff --git a/caiman/external/houghvst/regions.py b/caiman/external/houghvst/regions.py index 978bd11b5..070799925 100644 --- a/caiman/external/houghvst/regions.py +++ b/caiman/external/houghvst/regions.py @@ -15,8 +15,7 @@ def patch_selection(img, n_patches, patch_size=32, overlapping=False): masks = [] while len(patches) < n_patches: if not np.any(starting_points): - print('Could only produce {} patches ' - 'out of the requested {}'.format(len(patches), n_patches)) + print('Could only produce {} patches ' 'out of the requested {}'.format(len(patches), n_patches)) break k = np.random.choice(idx[starting_points.flat]) @@ -30,16 +29,14 @@ def patch_selection(img, n_patches, patch_size=32, overlapping=False): available[sl0, sl1] = False - sl0 = slice(np.maximum(k0 - patch_size + 1, 0), - np.minimum(k0 + patch_size, shape[0])) - sl1 = slice(np.maximum(k1 - patch_size + 1, 0), - np.minimum(k1 + patch_size, shape[1])) + sl0 = slice(np.maximum(k0 - patch_size + 1, 0), np.minimum(k0 + patch_size, shape[0])) + sl1 = slice(np.maximum(k1 - patch_size + 1, 0), np.minimum(k1 + patch_size, shape[1])) starting_points[sl0, sl1] = False patch_idx.append((k0, k1)) - patches.append(img[k0: k0 + patch_size, k1: k1 + patch_size]) + patches.append(img[k0:k0 + patch_size, k1:k1 + patch_size]) m = np.zeros(img.shape, dtype=np.bool) - m[k0: k0 + patch_size, k1: k1 + patch_size] = True + m[k0:k0 + patch_size, k1:k1 + patch_size] = True masks.append(m) patch_idx = np.array(patch_idx) @@ -50,8 +47,7 @@ def patch_selection(img, n_patches, patch_size=32, overlapping=False): else: k = np.random.choice(idx, size=n_patches) patch_idx = np.vstack(np.unravel_index(k, shape)).T - patches = [img[k0: k0 + patch_size, k1: k1 + patch_size] - for k0, k1 in patch_idx] + patches = [img[k0:k0 + patch_size, k1:k1 + patch_size] for k0, k1 in patch_idx] patches = np.array(patches) return patch_idx, patches diff --git a/caiman/external/houghvst/stats.py b/caiman/external/houghvst/stats.py index 35b1ff6d3..cb678af95 100644 --- a/caiman/external/houghvst/stats.py +++ b/caiman/external/houghvst/stats.py @@ -16,8 +16,7 @@ def gaussian_noise(arr, sigma): def std_mad(a, loc=None, axis=None): if loc is None: loc = np.median(a, axis=axis, keepdims=True) - return np.median(np.abs(a - loc), - axis=axis) / 0.6744897501960817 + return np.median(np.abs(a - loc), axis=axis) / 0.6744897501960817 def std_sn(a): @@ -57,7 +56,7 @@ def half_sample_mode(x, sort=True, axis=None): smallest_range_idx = np.argmin(ranges) # Now repeat the procedure on the half that spans the smallest range - x_subset = sorted_x[smallest_range_idx: (smallest_range_idx+half_idx)] + x_subset = sorted_x[smallest_range_idx:(smallest_range_idx + half_idx)] return half_sample_mode(x_subset, sort=False) @@ -90,5 +89,5 @@ def half_range_mode(x, sort=True): smallest_range_idx = np.argmin(ranges) # Now repeat the procedure on the half that spans the smallest range - x_subset = sorted_x[smallest_range_idx: (smallest_range_idx+half_idx)] - return half_sample_mode(x_subset, sort=False) \ No newline at end of file + x_subset = sorted_x[smallest_range_idx:(smallest_range_idx + half_idx)] + return half_sample_mode(x_subset, sort=False) diff --git a/caiman/gui/caiman_gui.py b/caiman/gui/caiman_gui.py index ebf80ddca..dfa4382f4 100644 --- a/caiman/gui/caiman_gui.py +++ b/caiman/gui/caiman_gui.py @@ -1,6 +1,5 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- - """ author: Pengcheng Zhou @@ -18,6 +17,7 @@ class FileOpen(QWidget): + def __init__(self, parent=None, pars=None, directory='.'): super(FileOpen, self).__init__(parent) @@ -85,9 +85,7 @@ def __init__(self, parent=None, pars=None, directory='.'): self.setWindowTitle("choose video data for processing") def load_from_file(self): - self.file_name, _ = QFileDialog.getOpenFileName(QFileDialog(), - "open file", - self.dir_folder) + self.file_name, _ = QFileDialog.getOpenFileName(QFileDialog(), "open file", self.dir_folder) self.dir_folder, file_name = os.path.split(self.file_name) self.name, self.type = os.path.splitext(file_name) self.dir_line.setText(self.dir_folder) @@ -96,8 +94,7 @@ def load_from_file(self): def done(self): self.fr = float(self.fr_line.text()) - self.pixel_size = [float(i) - for i in self.pixel_size_line.text().split(',')] + self.pixel_size = [float(i) for i in self.pixel_size_line.text().split(',')] self.close() @@ -114,12 +111,8 @@ def open_file(directory='.'): temp = file_ui.pixel_size_line.text() file_ui.pixel_size = [float(i) for i in temp.split(',')] - file_info = OrderedDict([('file_name', file_ui.file_name), - ('dir', file_ui.dir_folder), - ('name', file_ui.name), - ('type', file_ui.type), - ('Fs', file_ui.fr), - ('pixel_size', file_ui.pixel_size)]) + file_info = OrderedDict([('file_name', file_ui.file_name), ('dir', file_ui.dir_folder), ('name', file_ui.name), + ('type', file_ui.type), ('Fs', file_ui.fr), ('pixel_size', file_ui.pixel_size)]) return file_info diff --git a/caiman/tests/comparison/comparison.py b/caiman/tests/comparison/comparison.py index 5552aab14..9f882b4b7 100644 --- a/caiman/tests/comparison/comparison.py +++ b/caiman/tests/comparison/comparison.py @@ -1,5 +1,4 @@ #!/usr/bin/env python - """ compare how the elements behave We create a folder ground truth that possess the same thing than the other @@ -24,7 +23,6 @@ # # - import copy import datetime import logging @@ -38,10 +36,10 @@ # You can log to a file using the filename parameter, or make the output more or less # verbose by setting level to logging.DEBUG, logging.INFO, logging.WARNING, or logging.ERROR -logging.basicConfig(format= - "%(relativeCreated)12d [%(filename)s:%(funcName)20s():%(lineno)s] [%(process)d] %(message)s", - # filename="/tmp/caiman.log", - level=logging.DEBUG) +logging.basicConfig( + format="%(relativeCreated)12d [%(filename)s:%(funcName)20s():%(lineno)s] [%(process)d] %(message)s", + # filename="/tmp/caiman.log", + level=logging.DEBUG) import caiman as cm from caiman.paths import caiman_datadir @@ -129,33 +127,26 @@ class you instanciate to compare the different parts of CaImAn def __init__(self): - self.comparison = {'rig_shifts': {}, - 'pwrig_shifts': {}, - 'cnmf_on_patch': {}, - 'cnmf_full_frame': {}, - } + self.comparison = { + 'rig_shifts': {}, + 'pwrig_shifts': {}, + 'cnmf_on_patch': {}, + 'cnmf_full_frame': {}, + } self.comparison['rig_shifts'] = { 'ourdata': None, 'timer': None, - 'sensitivity': 0.001 # the sensitivity USER TO CHOOSE - } - # apparently pwrig shift are not used any more and the comparison are useless - # self.comparison['pwrig_shifts']={ - # 'ourdata': None, - # 'timer': None, - # 'sensitivity': 0.001 - # } - self.comparison['cnmf_on_patch'] = { - 'ourdata': None, - 'timer': None, - 'sensitivity': 0.01 - } - self.comparison['cnmf_full_frame'] = { - 'ourdata': None, - 'timer': None, - 'sensitivity': 0.01 + 'sensitivity': 0.001 # the sensitivity USER TO CHOOSE } + # apparently pwrig shift are not used any more and the comparison are useless + # self.comparison['pwrig_shifts']={ + # 'ourdata': None, + # 'timer': None, + # 'sensitivity': 0.001 + # } + self.comparison['cnmf_on_patch'] = {'ourdata': None, 'timer': None, 'sensitivity': 0.01} + self.comparison['cnmf_full_frame'] = {'ourdata': None, 'timer': None, 'sensitivity': 0.01} self.cnmpatch = None self.information = None @@ -241,7 +232,6 @@ def save_with_compare(self, istruth=False, params=None, dview=None, Cn=None): 'cnmf_full_frame': self.comparison['cnmf_full_frame']['timer'], 'rig_shifts': self.comparison['rig_shifts']['timer'] } - } rootdir = os.path.abspath(cm.__path__[0])[:-7] @@ -250,18 +240,21 @@ def save_with_compare(self, istruth=False, params=None, dview=None, Cn=None): # OPENINGS # if we want to set this data as truth if istruth: - # we just save it + # we just save it if os._exists(file_path): os.remove(file_path) logging.debug("nothing to remove\n") - np.savez_compressed(file_path, information=information, A_full=self.comparison['cnmf_full_frame']['ourdata'][0], - C_full=self.comparison['cnmf_full_frame']['ourdata'][ - 1], A_patch=self.comparison['cnmf_on_patch']['ourdata'][0], - C_patch=self.comparison['cnmf_on_patch']['ourdata'][1], rig_shifts=self.comparison['rig_shifts']['ourdata']) + np.savez_compressed(file_path, + information=information, + A_full=self.comparison['cnmf_full_frame']['ourdata'][0], + C_full=self.comparison['cnmf_full_frame']['ourdata'][1], + A_patch=self.comparison['cnmf_on_patch']['ourdata'][0], + C_patch=self.comparison['cnmf_on_patch']['ourdata'][1], + rig_shifts=self.comparison['rig_shifts']['ourdata']) logging.info('we now have ground truth\n') return - else: # if not we create a comparison first + else: # if not we create a comparison first try: with np.load(file_path, encoding='latin1', allow_pickle=True) as dt: rig_shifts = dt['rig_shifts'][()] @@ -270,18 +263,21 @@ def save_with_compare(self, istruth=False, params=None, dview=None, Cn=None): C_full = dt['C_full'][()] C_patch = dt['C_patch'][()] data = dt['information'][()] - # if we cannot manage to open it or it doesnt exist: + # if we cannot manage to open it or it doesnt exist: except (IOError, OSError): - # we save but we explain why there were a problem + # we save but we explain why there were a problem logging.warning('we were not able to read the file ' + str(file_path) + ' to compare it\n') file_path = os.path.join(caiman_datadir(), "testdata", "NC" + dt + ".npz") - np.savez_compressed(file_path, information=information, A_full=self.comparison['cnmf_full_frame']['ourdata'][0], - C_full=self.comparison['cnmf_full_frame']['ourdata'][ - 1], A_patch=self.comparison['cnmf_on_patch']['ourdata'][0], - C_patch=self.comparison['cnmf_on_patch']['ourdata'][1], rig_shifts=self.comparison['rig_shifts']['ourdata']) + np.savez_compressed(file_path, + information=information, + A_full=self.comparison['cnmf_full_frame']['ourdata'][0], + C_full=self.comparison['cnmf_full_frame']['ourdata'][1], + A_patch=self.comparison['cnmf_on_patch']['ourdata'][0], + C_patch=self.comparison['cnmf_on_patch']['ourdata'][1], + rig_shifts=self.comparison['rig_shifts']['ourdata']) return - # creating the FOLDER to store our data - # XXX Is this still hooked up to anything? + # creating the FOLDER to store our data + # XXX Is this still hooked up to anything? i = 0 dr = os.path.join(caiman_datadir(), "testdata") for name in os.listdir(dr): @@ -290,38 +286,41 @@ def save_with_compare(self, istruth=False, params=None, dview=None, Cn=None): if not os.path.exists(dr + i): os.makedirs(dr + i) information.update({'diff': {}}) - information.update({'differences': { - 'proc': False, - 'params_movie': False, - 'params_cnm': False}}) - # INFORMATION FOR THE USER + information.update({'differences': {'proc': False, 'params_movie': False, 'params_cnm': False}}) + # INFORMATION FOR THE USER if data['processor'] != information['processor']: logging.info("you don't have the same processor as groundtruth.. the time difference can vary" - " because of that\n try recreate your own groundtruth before testing. Compare: " + str(data['processor']) + " to " + str(information['processor']) + "\n") + " because of that\n try recreate your own groundtruth before testing. Compare: " + + str(data['processor']) + " to " + str(information['processor']) + "\n") information['differences']['proc'] = True if data['params'] != information['params']: logging.warning("you are not using the same movie parameters... Things can go wrong") logging.warning('you must use the same parameters to compare your version of the code with ' - 'the groundtruth one. look for the groundtruth parameters with the see() method\n') + 'the groundtruth one. look for the groundtruth parameters with the see() method\n') information['differences']['params_movie'] = True - # We must cleanup some fields to permit an accurate comparison + # We must cleanup some fields to permit an accurate comparison if not normalised_compare_cnmpatches(data['cnmpatch'], cnmpatch): if data['cnmpatch'].keys() != cnmpatch.keys(): - logging.error('DIFFERENCES IN THE FIELDS OF CNMF') # TODO: Now that we have deeply nested data structures, find a module that gives you tight differences. - diffkeys = [k for k in data['cnmpatch'] - if data['cnmpatch'][k] != cnmpatch[k]] + logging.error( + 'DIFFERENCES IN THE FIELDS OF CNMF' + ) # TODO: Now that we have deeply nested data structures, find a module that gives you tight differences. + diffkeys = [k for k in data['cnmpatch'] if data['cnmpatch'][k] != cnmpatch[k]] for k in diffkeys: logging.info("{}:{}->{}".format(k, data['cnmpatch'][k], cnmpatch[k])) - logging.warning( - 'you are not using the same parameters in your cnmf on patches initialization\n') + logging.warning('you are not using the same parameters in your cnmf on patches initialization\n') information['differences']['params_cnm'] = True # for rigid # plotting part information['diff'].update({ - 'rig': plotrig(init=rig_shifts, curr=self.comparison['rig_shifts']['ourdata'], timer=self.comparison['rig_shifts']['timer'] - data['timer']['rig_shifts'], sensitivity=self.comparison['rig_shifts']['sensitivity'])}) + 'rig': + plotrig(init=rig_shifts, + curr=self.comparison['rig_shifts']['ourdata'], + timer=self.comparison['rig_shifts']['timer'] - data['timer']['rig_shifts'], + sensitivity=self.comparison['rig_shifts']['sensitivity']) + }) #try: # pl.gcf().savefig(dr + str(i) + '/' + 'rigidcorrection.pdf') # pl.close() @@ -330,49 +329,61 @@ def save_with_compare(self, istruth=False, params=None, dview=None, Cn=None): # for cnmf on patch information['diff'].update({ - 'cnmpatch': cnmf(Cn=Cn, A_gt=A_patch, - A_test=self.comparison['cnmf_on_patch']['ourdata'][0], - C_gt=C_patch, - C_test=self.comparison['cnmf_on_patch']['ourdata'][1], - dview=dview, sensitivity=self.comparison[ - 'cnmf_on_patch']['sensitivity'], - dims_test=dims_test, dims_gt=dims_gt, - timer=self.comparison['cnmf_on_patch']['timer'] - data['timer']['cnmf_on_patch'])}) + 'cnmpatch': + cnmf(Cn=Cn, + A_gt=A_patch, + A_test=self.comparison['cnmf_on_patch']['ourdata'][0], + C_gt=C_patch, + C_test=self.comparison['cnmf_on_patch']['ourdata'][1], + dview=dview, + sensitivity=self.comparison['cnmf_on_patch']['sensitivity'], + dims_test=dims_test, + dims_gt=dims_gt, + timer=self.comparison['cnmf_on_patch']['timer'] - data['timer']['cnmf_on_patch']) + }) #try: # pl.gcf().savefig(dr + i + '/' + 'onpatch.pdf') # pl.close() #except: # pass - -# CNMF FULL FRAME + # CNMF FULL FRAME information['diff'].update({ - 'cnmfull': cnmf(Cn=Cn, A_gt=A_full, - A_test=self.comparison['cnmf_full_frame']['ourdata'][0], - C_gt=C_full, - C_test=self.comparison['cnmf_full_frame']['ourdata'][1], - dview=dview, sensitivity=self.comparison[ - 'cnmf_full_frame']['sensitivity'], - dims_test=dims_test, dims_gt=dims_gt, - timer=self.comparison['cnmf_full_frame']['timer'] - data['timer']['cnmf_full_frame'])}) + 'cnmfull': + cnmf(Cn=Cn, + A_gt=A_full, + A_test=self.comparison['cnmf_full_frame']['ourdata'][0], + C_gt=C_full, + C_test=self.comparison['cnmf_full_frame']['ourdata'][1], + dview=dview, + sensitivity=self.comparison['cnmf_full_frame']['sensitivity'], + dims_test=dims_test, + dims_gt=dims_gt, + timer=self.comparison['cnmf_full_frame']['timer'] - data['timer']['cnmf_full_frame']) + }) #try: # pl.gcf().savefig(dr + i + '/' + 'cnmfull.pdf') # pl.close() #except: # pass -# Saving of everything + # Saving of everything target_dir = os.path.join(caiman_datadir(), "testdata", i) if not os.path.exists(target_dir): - os.makedirs(os.path.join(caiman_datadir(), "testdata", i)) # XXX If we ever go Python3, just use the exist_ok flag to os.makedirs + os.makedirs(os.path.join(caiman_datadir(), "testdata", + i)) # XXX If we ever go Python3, just use the exist_ok flag to os.makedirs file_path = os.path.join(target_dir, i + ".npz") - np.savez_compressed(file_path, information=information, A_full=self.comparison['cnmf_full_frame']['ourdata'][0], - C_full=self.comparison['cnmf_full_frame']['ourdata'][ - 1], A_patch=self.comparison['cnmf_on_patch']['ourdata'][0], - C_patch=self.comparison['cnmf_on_patch']['ourdata'][1], rig_shifts=self.comparison['rig_shifts']['ourdata']) + np.savez_compressed(file_path, + information=information, + A_full=self.comparison['cnmf_full_frame']['ourdata'][0], + C_full=self.comparison['cnmf_full_frame']['ourdata'][1], + A_patch=self.comparison['cnmf_on_patch']['ourdata'][0], + C_patch=self.comparison['cnmf_on_patch']['ourdata'][1], + rig_shifts=self.comparison['rig_shifts']['ourdata']) self.information = information + def see(filename=None): """shows you the important data about a certain test file ( just give the number or name) @@ -435,13 +446,27 @@ def cnmf(Cn, A_gt, A_test, C_gt, C_test, dims_gt, dims_test, dview=None, sensiti A_test = A_test.toarray() # coo sparse matrix A_gt = A_gt.toarray() - # proceed to a trhreshold - A_test_thr = cm.source_extraction.cnmf.spatial.threshold_components( - A_test, dims_test, medw=None, thr_method='max', maxthr=0.2, nrgthr=0.99, extract_cc=True, - se=None, ss=None, dview=dview) - A_gt_thr = cm.source_extraction.cnmf.spatial.threshold_components( - A_gt, dims_gt, medw=None, thr_method='max', maxthr=0.2, nrgthr=0.99, extract_cc=True, - se=None, ss=None, dview=dview) + # proceed to a trhreshold + A_test_thr = cm.source_extraction.cnmf.spatial.threshold_components(A_test, + dims_test, + medw=None, + thr_method='max', + maxthr=0.2, + nrgthr=0.99, + extract_cc=True, + se=None, + ss=None, + dview=dview) + A_gt_thr = cm.source_extraction.cnmf.spatial.threshold_components(A_gt, + dims_gt, + medw=None, + thr_method='max', + maxthr=0.2, + nrgthr=0.99, + extract_cc=True, + se=None, + ss=None, + dview=dview) # compute C using this A thr A_test_thr = A_test_thr.toarray() > 0 @@ -451,16 +476,12 @@ def cnmf(Cn, A_gt, A_test, C_gt, C_test, dims_gt, dims_test, dview=None, sensiti C_gt_thr = C_gt # we would also like the difference in the number of neurons diffneur = A_test_thr.shape[1] - A_gt_thr.shape[1] -# print(diffneur+1) + # print(diffneur+1) # computing the values - C_test_thr = np.array( - [CC.reshape([-1, n_frames_per_bin]).max(1) for CC in C_test_thr]) - C_gt_thr = np.array([CC.reshape([-1, n_frames_per_bin]).max(1) - for CC in C_gt_thr]) - maskgt = A_gt_thr[:, :].reshape([dims_gt[0], dims_gt[1], -1], - order='F').transpose([2, 0, 1]) * 1. - masktest = A_test_thr[:, :].reshape( - [dims_test[0], dims_test[1], -1], order='F').transpose([2, 0, 1]) * 1. + C_test_thr = np.array([CC.reshape([-1, n_frames_per_bin]).max(1) for CC in C_test_thr]) + C_gt_thr = np.array([CC.reshape([-1, n_frames_per_bin]).max(1) for CC in C_gt_thr]) + maskgt = A_gt_thr[:, :].reshape([dims_gt[0], dims_gt[1], -1], order='F').transpose([2, 0, 1]) * 1. + masktest = A_test_thr[:, :].reshape([dims_test[0], dims_test[1], -1], order='F').transpose([2, 0, 1]) * 1. idx_tp_gt, idx_tp_comp, idx_fn_gt, idx_fp_comp, performance_off_on = \ cm.base.rois.nf_match_neurons_in_binary_masks(masks_gt=maskgt, @@ -469,33 +490,33 @@ def cnmf(Cn, A_gt, A_test, C_gt, C_test, dims_gt, dims_test, dview=None, sensiti # the pearson's correlation coefficient of the two Calcium activities thresholded # comparing Calcium activities of all the components that are defined by - corrs = np.array([scipy.stats.pearsonr( - C_gt_thr[gt, :], C_test_thr[comp, :])[0] for gt, comp in zip(idx_tp_gt, idx_tp_comp)]) + corrs = np.array( + [scipy.stats.pearsonr(C_gt_thr[gt, :], C_test_thr[comp, :])[0] for gt, comp in zip(idx_tp_gt, idx_tp_comp)]) # todo, change this test when I will have found why I have one additionnal neuron - isdiff = True if ((np.linalg.norm(corrs) < sensitivity) or ( - performance_off_on['f1_score'] < 0.98)) else False - info = {'isdifferent': int(isdiff), - 'diff_data': {'performance': performance_off_on, - 'corelations': corrs.tolist(), - #performance = dict() - #performance['recall'] = old_div(TP,(TP+FN)) - #performance['precision'] = old_div(TP,(TP+FP)) - #performance['accuracy'] = old_div((TP+TN),(TP+FP+FN+TN)) - #performance['f1_score'] = 2*TP/(2*TP+FP+FN) - 'diffneur': diffneur}, - 'diff_timing': timer} + isdiff = True if ((np.linalg.norm(corrs) < sensitivity) or (performance_off_on['f1_score'] < 0.98)) else False + info = { + 'isdifferent': int(isdiff), + 'diff_data': { + 'performance': performance_off_on, + 'corelations': corrs.tolist(), + #performance = dict() + #performance['recall'] = old_div(TP,(TP+FN)) + #performance['precision'] = old_div(TP,(TP+FP)) + #performance['accuracy'] = old_div((TP+TN),(TP+FP+FN+TN)) + #performance['f1_score'] = 2*TP/(2*TP+FP+FN) + 'diffneur': diffneur + }, + 'diff_timing': timer + } return info def plotrig(init, curr, timer, sensitivity): - diff = np.linalg.norm(np.asarray( - init) - np.asarray(curr)) / np.linalg.norm(init) + diff = np.linalg.norm(np.asarray(init) - np.asarray(curr)) / np.linalg.norm(init) isdiff = diff > sensitivity - info = {'isdifferent': int(isdiff), - 'diff_data': diff, - 'diff_timing': timer} + info = {'isdifferent': int(isdiff), 'diff_data': diff, 'diff_timing': timer} curr = np.asarray(curr).transpose([1, 0]) init = init.transpose([1, 0]) xc = np.arange(curr.shape[1]) @@ -530,9 +551,10 @@ def normalised_compare_cnmpatches(a, b): params_b = mutable_b['params'] if hasattr(params_a, 'online') and hasattr(params_b, 'online'): if 'path_to_model' in params_a.online and 'path_to_model' in params_b.online: - _, params_a.online['path_to_model'] = os.path.split(params_a.online['path_to_model']) # Remove all but the last part + _, params_a.online['path_to_model'] = os.path.split( + params_a.online['path_to_model']) # Remove all but the last part _, params_b.online['path_to_model'] = os.path.split(params_b.online['path_to_model']) - # print("Normalised A: " + str(params_a.online['path_to_model'])) - # print("Normalised B: " + str(params_b.online['path_to_model'])) + # print("Normalised A: " + str(params_a.online['path_to_model'])) + # print("Normalised B: " + str(params_b.online['path_to_model'])) return mutable_a == mutable_b diff --git a/caiman/tests/comparison/create_gt.py b/caiman/tests/comparison/create_gt.py index 4080572dd..1f2f788ad 100644 --- a/caiman/tests/comparison/create_gt.py +++ b/caiman/tests/comparison/create_gt.py @@ -1,5 +1,4 @@ #!/usr/bin/env python - """ create a groundtruth for different videos use for nosetests and continuous integration development. @@ -16,7 +15,6 @@ # \date Created on june 2017 # \author: Jeremie KALFON - from builtins import str from builtins import range @@ -51,41 +49,39 @@ from caiman.utils.utils import download_demo # GLOBAL VAR -params_movie = {'fname': ['Sue_2x_3000_40_-46.tif'], - 'niter_rig': 1, - 'max_shifts': (3, 3), # maximum allow rigid shift - 'splits_rig': 20, # for parallelization split the movies in num_splits chuncks across time - # if none all the splits are processed and the movie is saved - 'num_splits_to_process_rig': None, - 'p': 1, # order of the autoregressive system - 'merge_thresh': 0.8, # merging threshold, max correlation allowed - 'rf': 15, # half-size of the patches in pixels. rf=25, patches are 50x50 - 'stride_cnmf': 6, # amounpl.it of overlap between the patches in pixels - 'K': 4, # number of components per patch - # if dendritic. In this case you need to set init_method to - # sparse_nmf - 'is_dendrites': False, - 'init_method': 'greedy_roi', - 'gSig': [4, 4], # expected half size of neurons - 'alpha_snmf': None, # this controls sparsity - 'final_frate': 30, - 'r_values_min_patch': .7, # threshold on space consistency - 'fitness_min_patch': -40, # threshold on time variability - # threshold on time variability (if nonsparse activity) - 'fitness_delta_min_patch': -40, - 'Npeaks': 10, - 'r_values_min_full': .85, - 'fitness_min_full': - 50, - 'fitness_delta_min_full': - 50, - 'only_init_patch': True, - 'gnb': 1, - 'memory_fact': 1, - 'n_chunks': 10 - } -params_display = { - 'downsample_ratio': .2, - 'thr_plot': 0.9 +params_movie = { + 'fname': ['Sue_2x_3000_40_-46.tif'], + 'niter_rig': 1, + 'max_shifts': (3, 3), # maximum allow rigid shift + 'splits_rig': 20, # for parallelization split the movies in num_splits chuncks across time + # if none all the splits are processed and the movie is saved + 'num_splits_to_process_rig': None, + 'p': 1, # order of the autoregressive system + 'merge_thresh': 0.8, # merging threshold, max correlation allowed + 'rf': 15, # half-size of the patches in pixels. rf=25, patches are 50x50 + 'stride_cnmf': 6, # amounpl.it of overlap between the patches in pixels + 'K': 4, # number of components per patch + # if dendritic. In this case you need to set init_method to + # sparse_nmf + 'is_dendrites': False, + 'init_method': 'greedy_roi', + 'gSig': [4, 4], # expected half size of neurons + 'alpha_snmf': None, # this controls sparsity + 'final_frate': 30, + 'r_values_min_patch': .7, # threshold on space consistency + 'fitness_min_patch': -40, # threshold on time variability + # threshold on time variability (if nonsparse activity) + 'fitness_delta_min_patch': -40, + 'Npeaks': 10, + 'r_values_min_full': .85, + 'fitness_min_full': -50, + 'fitness_delta_min_full': -50, + 'only_init_patch': True, + 'gnb': 1, + 'memory_fact': 1, + 'n_chunks': 10 } +params_display = {'downsample_ratio': .2, 'thr_plot': 0.9} # params_movie = {'fname': [u'./example_movies/demoMovieJ.tif'], # 'max_shifts': (2, 2), # maximum allow rigid shift (2,2) @@ -150,7 +146,7 @@ def create(): num_splits_to_process_rig = params_movie['num_splits_to_process_rig'] download_demo(fname[0]) - fname = os.path.join(caiman_datadir(),'example_movies', fname[0]) + fname = os.path.join(caiman_datadir(), 'example_movies', fname[0]) m_orig = cm.load(fname) min_mov = m_orig[:400].min() comp = comparison.Comparison() @@ -158,10 +154,14 @@ def create(): ################ RIG CORRECTION ################# t1 = time.time() - mc = MotionCorrect(fname, min_mov, - max_shifts=max_shifts, niter_rig=niter_rig, splits_rig=splits_rig, + mc = MotionCorrect(fname, + min_mov, + max_shifts=max_shifts, + niter_rig=niter_rig, + splits_rig=splits_rig, num_splits_to_process_rig=num_splits_to_process_rig, - shifts_opencv=True, nonneg_movie=True) + shifts_opencv=True, + nonneg_movie=True) mc.motion_correct_rigid(save_movie=True) m_rig = cm.load(mc.fname_tot_rig) bord_px_rig = np.ceil(np.max(mc.shifts_rig)).astype(np.int) @@ -172,24 +172,27 @@ def create(): if 'max_shifts' not in params_movie: fnames = params_movie['fname'] border_to_0 = 0 - else: # elif not params_movie.has_key('overlaps'): + else: # elif not params_movie.has_key('overlaps'): fnames = [mc.fname_tot_rig] border_to_0 = bord_px_rig m_els = m_rig idx_xy = None - add_to_movie = -np.nanmin(m_els) + 1 # movie must be positive + add_to_movie = -np.nanmin(m_els) + 1 # movie must be positive remove_init = 0 downsample_factor = 1 base_name = fname[0].split('/')[-1][:-4] - name_new = cm.save_memmap_each(fnames, base_name=base_name, resize_fact=( - 1, 1, downsample_factor), remove_init=remove_init, - idx_xy=idx_xy, add_to_movie=add_to_movie, border_to_0=border_to_0) + name_new = cm.save_memmap_each(fnames, + base_name=base_name, + resize_fact=(1, 1, downsample_factor), + remove_init=remove_init, + idx_xy=idx_xy, + add_to_movie=add_to_movie, + border_to_0=border_to_0) name_new.sort() if len(name_new) > 1: - fname_new = cm.save_memmap_join( - name_new, base_name='Yr', n_chunks=params_movie['n_chunks'], dview=None) + fname_new = cm.save_memmap_join(name_new, base_name='Yr', n_chunks=params_movie['n_chunks'], dview=None) else: print('One file only, not saving!') fname_new = name_new[0] @@ -203,8 +206,7 @@ def create(): raise Exception('Movie too negative, add_to_movie should be larger') if np.sum(np.isnan(images)) > 0: # TODO: same here - raise Exception( - 'Movie contains nan! You did not remove enough borders') + raise Exception('Movie contains nan! You did not remove enough borders') Cn = cm.local_correlations(Y) Cn[np.isnan(Cn)] = 0 @@ -224,11 +226,20 @@ def create(): ################ CNMF PART PATCH ################# t1 = time.time() - cnm = cnmf.CNMF(n_processes=1, k=K, gSig=gSig, merge_thresh=params_movie['merge_thresh'], p=params_movie['p'], - dview=None, rf=rf, stride=stride_cnmf, memory_fact=params_movie['memory_fact'], - method_init=init_method, alpha_snmf=alpha_snmf, only_init_patch=params_movie[ - 'only_init_patch'], - gnb=params_movie['gnb'], method_deconvolution='oasis') + cnm = cnmf.CNMF(n_processes=1, + k=K, + gSig=gSig, + merge_thresh=params_movie['merge_thresh'], + p=params_movie['p'], + dview=None, + rf=rf, + stride=stride_cnmf, + memory_fact=params_movie['memory_fact'], + method_init=init_method, + alpha_snmf=alpha_snmf, + only_init_patch=params_movie['only_init_patch'], + gnb=params_movie['gnb'], + method_deconvolution='oasis') comp.cnmpatch = copy.copy(cnm) comp.cnmpatch.estimates = None cnm = cnm.fit(images) @@ -247,10 +258,17 @@ def create(): fitness_delta_min = params_movie['fitness_delta_min_patch'] Npeaks = params_movie['Npeaks'] traces = C_tot + YrA_tot - idx_components, idx_components_bad = estimate_components_quality( - traces, Y, A_tot, C_tot, b_tot, f_tot, final_frate=final_frate, - Npeaks=Npeaks, r_values_min=r_values_min, fitness_min=fitness_min, - fitness_delta_min=fitness_delta_min) + idx_components, idx_components_bad = estimate_components_quality(traces, + Y, + A_tot, + C_tot, + b_tot, + f_tot, + final_frate=final_frate, + Npeaks=Npeaks, + r_values_min=r_values_min, + fitness_min=fitness_min, + fitness_delta_min=fitness_delta_min) ####### A_tot = A_tot.tocsc()[:, idx_components] C_tot = C_tot[idx_components] @@ -260,8 +278,17 @@ def create(): ################ CNMF PART FULL ################# t1 = time.time() - cnm = cnmf.CNMF(n_processes=1, k=A_tot.shape, gSig=gSig, merge_thresh=merge_thresh, p=p, Ain=A_tot, Cin=C_tot, - f_in=f_tot, rf=None, stride=None, method_deconvolution='oasis') + cnm = cnmf.CNMF(n_processes=1, + k=A_tot.shape, + gSig=gSig, + merge_thresh=merge_thresh, + p=p, + Ain=A_tot, + Cin=C_tot, + f_in=f_tot, + rf=None, + stride=None, + method_deconvolution='oasis') cnm = cnm.fit(images) # DISCARDING A, C, b, f, YrA, sn = cnm.estimates.A, cnm.estimates.C, cnm.estimates.b, cnm.estimates.f, cnm.estimates.YrA, cnm.estimates.sn @@ -274,14 +301,23 @@ def create(): Npeaks = params_movie['Npeaks'] traces = C + YrA idx_components, idx_components_bad, fitness_raw, fitness_delta, r_values = estimate_components_quality( - traces, Y, A, C, b, f, final_frate=final_frate, Npeaks=Npeaks, r_values_min=r_values_min, fitness_min=fitness_min, - fitness_delta_min=fitness_delta_min, return_all=True) + traces, + Y, + A, + C, + b, + f, + final_frate=final_frate, + Npeaks=Npeaks, + r_values_min=r_values_min, + fitness_min=fitness_min, + fitness_delta_min=fitness_delta_min, + return_all=True) ########### A_tot_full = A_tot.tocsc()[:, idx_components] C_tot_full = C_tot[idx_components] comp.comparison['cnmf_full_frame']['timer'] = time.time() - t1 - comp.comparison['cnmf_full_frame']['ourdata'] = [ - A_tot_full.copy(), C_tot_full.copy()] + comp.comparison['cnmf_full_frame']['ourdata'] = [A_tot_full.copy(), C_tot_full.copy()] #################### ######################## print(comp.dims) comp.save_with_compare(istruth=True, params=params_movie, Cn=Cn) diff --git a/caiman/tests/comparison_general.py b/caiman/tests/comparison_general.py index f273d38d1..640aec543 100644 --- a/caiman/tests/comparison_general.py +++ b/caiman/tests/comparison_general.py @@ -1,5 +1,4 @@ #!/usr/bin/env python - """ test the principal functions of CaImAn use for nosetests and continuous integration development. @@ -17,7 +16,6 @@ #\date Created on june 2017 #\author: Jremie KALFON - from builtins import str from builtins import range @@ -44,7 +42,6 @@ except NameError: pass - import caiman as cm from caiman.components_evaluation import estimate_components_quality from caiman.motion_correction import MotionCorrect @@ -56,47 +53,45 @@ # You can log to a file using the filename parameter, or make the output more or less # verbose by setting level to logging.DEBUG, logging.INFO, logging.WARNING, or logging.ERROR -logging.basicConfig(format= - "%(relativeCreated)12d [%(filename)s:%(funcName)20s():%(lineno)s] [%(process)d] %(message)s", - # filename="/tmp/caiman.log", - level=logging.DEBUG) +logging.basicConfig( + format="%(relativeCreated)12d [%(filename)s:%(funcName)20s():%(lineno)s] [%(process)d] %(message)s", + # filename="/tmp/caiman.log", + level=logging.DEBUG) # GLOBAL VAR -params_movie = {'fname': ['Sue_2x_3000_40_-46.tif'], - 'niter_rig': 1, - 'max_shifts': (3, 3), # maximum allow rigid shift - 'splits_rig': 20, # for parallelization split the movies in num_splits chuncks across time - # if none all the splits are processed and the movie is saved - 'num_splits_to_process_rig': None, - # intervals at which patches are laid out for motion correction - 'p': 1, # order of the autoregressive system - 'merge_thresh': 0.8, # merging threshold, max correlation allowed - 'rf': 15, # half-size of the patches in pixels. rf=25, patches are 50x50 - 'stride_cnmf': 6, # amounpl.it of overlap between the patches in pixels - 'K': 4, # number of components per patch - # if dendritic. In this case you need to set init_method to - # sparse_nmf - 'is_dendrites': False, - 'init_method': 'greedy_roi', - 'gSig': [4, 4], # expected half size of neurons - 'final_frate': 30, - 'r_values_min_patch': .7, # threshold on space consistency - 'fitness_min_patch': -40, # threshold on time variability - # threshold on time variability (if nonsparse activity) - 'fitness_delta_min_patch': -40, - 'Npeaks': 10, - 'r_values_min_full': .85, - 'fitness_min_full': - 50, - 'fitness_delta_min_full': - 50, - 'only_init_patch': True, - 'gnb': 1, - 'memory_fact': 1, - 'n_chunks': 10 - } -params_display = { - 'downsample_ratio': .2, - 'thr_plot': 0.9 +params_movie = { + 'fname': ['Sue_2x_3000_40_-46.tif'], + 'niter_rig': 1, + 'max_shifts': (3, 3), # maximum allow rigid shift + 'splits_rig': 20, # for parallelization split the movies in num_splits chuncks across time + # if none all the splits are processed and the movie is saved + 'num_splits_to_process_rig': None, + # intervals at which patches are laid out for motion correction + 'p': 1, # order of the autoregressive system + 'merge_thresh': 0.8, # merging threshold, max correlation allowed + 'rf': 15, # half-size of the patches in pixels. rf=25, patches are 50x50 + 'stride_cnmf': 6, # amounpl.it of overlap between the patches in pixels + 'K': 4, # number of components per patch + # if dendritic. In this case you need to set init_method to + # sparse_nmf + 'is_dendrites': False, + 'init_method': 'greedy_roi', + 'gSig': [4, 4], # expected half size of neurons + 'final_frate': 30, + 'r_values_min_patch': .7, # threshold on space consistency + 'fitness_min_patch': -40, # threshold on time variability + # threshold on time variability (if nonsparse activity) + 'fitness_delta_min_patch': -40, + 'Npeaks': 10, + 'r_values_min_full': .85, + 'fitness_min_full': -50, + 'fitness_delta_min_full': -50, + 'only_init_patch': True, + 'gnb': 1, + 'memory_fact': 1, + 'n_chunks': 10 } +params_display = {'downsample_ratio': .2, 'thr_plot': 0.9} # params_movie = {'fname': [u'./example_movies/demoMovieJ.tif'], # 'max_shifts': (2, 2), # maximum allow rigid shift (2,2) @@ -153,8 +148,8 @@ def test_general(): """ -#\bug -#\warning + #\bug + #\warning global params_movie global params_diplay @@ -171,41 +166,47 @@ def test_general(): comp = comparison.Comparison() comp.dims = np.shape(m_orig)[1:] - -################ RIG CORRECTION ################# + ################ RIG CORRECTION ################# t1 = time.time() - mc = MotionCorrect(fname, min_mov, - max_shifts=max_shifts, niter_rig=niter_rig, splits_rig=splits_rig, + mc = MotionCorrect(fname, + min_mov, + max_shifts=max_shifts, + niter_rig=niter_rig, + splits_rig=splits_rig, num_splits_to_process_rig=num_splits_to_process_rig, - shifts_opencv=True, nonneg_movie=True) + shifts_opencv=True, + nonneg_movie=True) mc.motion_correct_rigid(save_movie=True) m_rig = cm.load(mc.fname_tot_rig) bord_px_rig = np.ceil(np.max(mc.shifts_rig)).astype(np.int) comp.comparison['rig_shifts']['timer'] = time.time() - t1 comp.comparison['rig_shifts']['ourdata'] = mc.shifts_rig -########################################### + ########################################### if 'max_shifts' not in params_movie: fnames = params_movie['fname'] border_to_0 = 0 - else: # elif not params_movie.has_key('overlaps'): + else: # elif not params_movie.has_key('overlaps'): fnames = mc.fname_tot_rig border_to_0 = bord_px_rig m_els = m_rig idx_xy = None - add_to_movie = -np.nanmin(m_els) + 1 # movie must be positive + add_to_movie = -np.nanmin(m_els) + 1 # movie must be positive remove_init = 0 downsample_factor = 1 base_name = fname[0].split('/')[-1][:-4] - name_new = cm.save_memmap_each(fnames, base_name=base_name, resize_fact=( - 1, 1, downsample_factor), remove_init=remove_init, - idx_xy=idx_xy, add_to_movie=add_to_movie, border_to_0=border_to_0) + name_new = cm.save_memmap_each(fnames, + base_name=base_name, + resize_fact=(1, 1, downsample_factor), + remove_init=remove_init, + idx_xy=idx_xy, + add_to_movie=add_to_movie, + border_to_0=border_to_0) name_new.sort() if len(name_new) > 1: - fname_new = cm.save_memmap_join( - name_new, base_name='Yr', n_chunks=params_movie['n_chunks'], dview=None) + fname_new = cm.save_memmap_join(name_new, base_name='Yr', n_chunks=params_movie['n_chunks'], dview=None) else: logging.warning('One file only, not saving!') fname_new = name_new[0] @@ -219,8 +220,7 @@ def test_general(): raise Exception('Movie too negative, add_to_movie should be larger') if np.sum(np.isnan(images)) > 0: # TODO: same here - raise Exception( - 'Movie contains nan! You did not remove enough borders') + raise Exception('Movie contains nan! You did not remove enough borders') Cn = cm.local_correlations(Y) Cn[np.isnan(Cn)] = 0 @@ -238,14 +238,22 @@ def test_general(): if params_movie['alpha_snmf'] is None: raise Exception('need to set a value for alpha_snmf') - ################ CNMF PART PATCH ################# t1 = time.time() - cnm = cnmf.CNMF(n_processes=1, k=K, gSig=gSig, merge_thresh=params_movie['merge_thresh'], p=params_movie['p'], - dview=None, rf=rf, stride=stride_cnmf, memory_fact=params_movie['memory_fact'], - method_init=init_method, alpha_snmf=100, only_init_patch=params_movie[ - 'only_init_patch'], - gnb=params_movie['gnb'], method_deconvolution='oasis') + cnm = cnmf.CNMF(n_processes=1, + k=K, + gSig=gSig, + merge_thresh=params_movie['merge_thresh'], + p=params_movie['p'], + dview=None, + rf=rf, + stride=stride_cnmf, + memory_fact=params_movie['memory_fact'], + method_init=init_method, + alpha_snmf=100, + only_init_patch=params_movie['only_init_patch'], + gnb=params_movie['gnb'], + method_deconvolution='oasis') comp.cnmpatch = copy.copy(cnm) comp.cnmpatch.estimates = None cnm = cnm.fit(images) @@ -264,22 +272,37 @@ def test_general(): fitness_delta_min = params_movie['fitness_delta_min_patch'] Npeaks = params_movie['Npeaks'] traces = C_tot + YrA_tot - idx_components, idx_components_bad = estimate_components_quality( - traces, Y, A_tot, C_tot, b_tot, f_tot, final_frate=final_frate, - Npeaks=Npeaks, r_values_min=r_values_min, fitness_min=fitness_min, - fitness_delta_min=fitness_delta_min) + idx_components, idx_components_bad = estimate_components_quality(traces, + Y, + A_tot, + C_tot, + b_tot, + f_tot, + final_frate=final_frate, + Npeaks=Npeaks, + r_values_min=r_values_min, + fitness_min=fitness_min, + fitness_delta_min=fitness_delta_min) ####### A_tot = A_tot.tocsc()[:, idx_components] C_tot = C_tot[idx_components] comp.comparison['cnmf_on_patch']['timer'] = time.time() - t1 comp.comparison['cnmf_on_patch']['ourdata'] = [A_tot.copy(), C_tot.copy()] -#################### ######################## - + #################### ######################## -################ CNMF PART FULL ################# + ################ CNMF PART FULL ################# t1 = time.time() - cnm = cnmf.CNMF(n_processes=1, k=A_tot.shape, gSig=gSig, merge_thresh=merge_thresh, p=p, Ain=A_tot, Cin=C_tot, - f_in=f_tot, rf=None, stride=None, method_deconvolution='oasis') + cnm = cnmf.CNMF(n_processes=1, + k=A_tot.shape, + gSig=gSig, + merge_thresh=merge_thresh, + p=p, + Ain=A_tot, + Cin=C_tot, + f_in=f_tot, + rf=None, + stride=None, + method_deconvolution='oasis') cnm = cnm.fit(images) # DISCARDING A, C, b, f, YrA, sn = cnm.estimates.A, cnm.estimates.C, cnm.estimates.b, cnm.estimates.f, cnm.estimates.YrA, cnm.estimates.sn @@ -292,16 +315,24 @@ def test_general(): Npeaks = params_movie['Npeaks'] traces = C + YrA idx_components, idx_components_bad, fitness_raw, fitness_delta, r_values = estimate_components_quality( - traces, Y, A, C, b, f, final_frate=final_frate, Npeaks=Npeaks, r_values_min=r_values_min, + traces, + Y, + A, + C, + b, + f, + final_frate=final_frate, + Npeaks=Npeaks, + r_values_min=r_values_min, fitness_min=fitness_min, - fitness_delta_min=fitness_delta_min, return_all=True) + fitness_delta_min=fitness_delta_min, + return_all=True) ########## A_tot_full = A_tot.tocsc()[:, idx_components] C_tot_full = C_tot[idx_components] comp.comparison['cnmf_full_frame']['timer'] = time.time() - t1 - comp.comparison['cnmf_full_frame']['ourdata'] = [ - A_tot_full.copy(), C_tot_full.copy()] -#################### ######################## + comp.comparison['cnmf_full_frame']['ourdata'] = [A_tot_full.copy(), C_tot_full.copy()] + #################### ######################## comp.save_with_compare(istruth=False, params=params_movie, Cn=Cn) log_files = glob.glob('*_LOG_*') try: @@ -309,13 +340,19 @@ def test_general(): os.remove(log_file) except: logging.warning('Cannot remove log files') + + ############ assertions ################## pb = False if (comp.information['differences']['params_movie']): - logging.error("you need to set the same movie parameters than the ground truth to have a real comparison (use the comp.see() function to explore it)") + logging.error( + "you need to set the same movie parameters than the ground truth to have a real comparison (use the comp.see() function to explore it)" + ) pb = True if (comp.information['differences']['params_cnm']): - logging.warning("you need to set the same cnmf parameters than the ground truth to have a real comparison (use the comp.see() function to explore it)") + logging.warning( + "you need to set the same cnmf parameters than the ground truth to have a real comparison (use the comp.see() function to explore it)" + ) # pb = True if (comp.information['diff']['rig']['isdifferent']): logging.error("the rigid shifts are different from the groundtruth ") diff --git a/caiman/tests/comparison_humans_online.py b/caiman/tests/comparison_humans_online.py index 1ae3b3d6b..428ab8d10 100644 --- a/caiman/tests/comparison_humans_online.py +++ b/caiman/tests/comparison_humans_online.py @@ -1,6 +1,5 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- - """ Complete pipeline for CaImAn online processing and comparison with consensus annotation. The script processes one (or more) of the provided datasets and @@ -58,7 +57,7 @@ # 6: sue_ann_k53_20160530 (K53) # 7: J115 (J115) # 8: J123 (J123) - + try: if 'pydevconsole' in sys.argv[0]: raise Exception() @@ -78,21 +77,22 @@ base_folder = '/mnt/ceph/neuro/DataForPublications/DATA_PAPER_ELIFE/' # %% set some global parameters here # 'use_cases/edge-cutter/binary_cross_bootstrapped.json' -global_params = {'min_SNR': 1.2, # minimum SNR when considering adding a new neuron - 'p': 1, # order of indicator dynamics - 'gnb': 2, # number of background components - 'epochs': 2, # number of passes over the data - 'rval_thr': 0.75, # spatial correlation threshold - 'max_thr': 0.15, # parameter for thresholding components when cleaning up shapes - 'mot_corr': False, # flag for motion correction (set to False to compare directly on the same FOV) - 'min_num_trial': 10, # maximum number of candidate components per frame - 'use_peak_max': True, - 'thresh_CNN_noisy': .65, # CNN threshold for accepting a candidate component - 'sniper_mode': True, # use the CNN for testing candidate components - 'update_freq': 500 - } - -params_movie = [{}] * 5 # set up list of dictionaries +global_params = { + 'min_SNR': 1.2, # minimum SNR when considering adding a new neuron + 'p': 1, # order of indicator dynamics + 'gnb': 2, # number of background components + 'epochs': 2, # number of passes over the data + 'rval_thr': 0.75, # spatial correlation threshold + 'max_thr': 0.15, # parameter for thresholding components when cleaning up shapes + 'mot_corr': False, # flag for motion correction (set to False to compare directly on the same FOV) + 'min_num_trial': 10, # maximum number of candidate components per frame + 'use_peak_max': True, + 'thresh_CNN_noisy': .65, # CNN threshold for accepting a candidate component + 'sniper_mode': True, # use the CNN for testing candidate components + 'update_freq': 500 +} + +params_movie = [{}] * 5 # set up list of dictionaries # % neurofinder.00.00 params_movie[0] = { @@ -101,18 +101,11 @@ 'decay_time': 0.4, 'epochs': 3, 'fr': 16, - 'gSig': [8, 8], # expected half size of neurons + 'gSig': [8, 8], # expected half size of neurons 'gnb': 2, } -# % neurofinder.01.01 -params_movie[1] = { - 'folder_name': 'N.01.01/', - 'ds_factor': 1, - 'fr': 8, - 'gnb': 1, - 'decay_time': 1.4, - 'gSig': [6, 6] -} + # % neurofinder.01.01 +params_movie[1] = {'folder_name': 'N.01.01/', 'ds_factor': 1, 'fr': 8, 'gnb': 1, 'decay_time': 1.4, 'gSig': [6, 6]} # % neurofinder.03.00.test params_movie[2] = { @@ -120,7 +113,7 @@ 'ds_factor': 2, 'fr': 7, 'decay_time': 0.4, - 'gSig': [12, 12], # expected half size of neurons + 'gSig': [12, 12], # expected half size of neurons 'gnb': 3, } @@ -132,7 +125,7 @@ 'fr': 10, 'decay_time': .75, 'gnb': 3, - 'gSig': [6, 6], # expected half size of neurons + 'gSig': [6, 6], # expected half size of neurons } # % neurofinder.04.00.test @@ -141,8 +134,8 @@ 'epochs': 2, 'ds_factor': 1, 'fr': 8, - 'gSig': [7, 7], # expected half size of neurons - 'decay_time': 1.2, # rough length of a transient + 'gSig': [7, 7], # expected half size of neurons + 'decay_time': 1.2, # rough length of a transient 'gnb': 3, } @@ -164,20 +157,21 @@ print(fls) - Cn = np.array(cm.load(os.path.abspath( - base_folder + params_movie[ind_dataset]['folder_name']) + '/correlation_image.tif')).squeeze() + Cn = np.array( + cm.load(os.path.abspath(base_folder + params_movie[ind_dataset]['folder_name']) + + '/correlation_image.tif')).squeeze() # %% Set up some parameters ds_factor = params_movie[ind_dataset][ - 'ds_factor'] # spatial downsampling factor (increases speed but may lose some fine structure) + 'ds_factor'] # spatial downsampling factor (increases speed but may lose some fine structure) fr = params_movie[ind_dataset]['fr'] decay_time = params_movie[ind_dataset]['decay_time'] gSig = tuple(np.ceil(np.array(params_movie[ind_dataset]['gSig']) / ds_factor).astype( - np.int)) # expected half size of neurons - init_batch = 200 # number of frames for initialization (presumably from the first file) - expected_comps = 1000 # maximum number of expected components used for memory pre-allocation (exaggerate here) - K = 2 # initial number of components - rval_thr = global_params['rval_thr'] # correlation threshold for new component inclusion + np.int)) # expected half size of neurons + init_batch = 200 # number of frames for initialization (presumably from the first file) + expected_comps = 1000 # maximum number of expected components used for memory pre-allocation (exaggerate here) + K = 2 # initial number of components + rval_thr = global_params['rval_thr'] # correlation threshold for new component inclusion try: gnb = params_movie[ind_dataset]['gnb'] @@ -185,36 +179,37 @@ gnb = global_params['gnb'] try: - epochs = params_movie[ind_dataset]['epochs'] # number of background components + epochs = params_movie[ind_dataset]['epochs'] # number of background components except: - epochs = global_params['epochs'] # number of passes over the data + epochs = global_params['epochs'] # number of passes over the data # %% - params_dict = {'fnames': fls, - 'fr': fr, - 'decay_time': decay_time, - 'gSig': gSig, - 'p': global_params['p'], - 'min_SNR': global_params['min_SNR'], - 'rval_thr': global_params['rval_thr'], - 'ds_factor': ds_factor, - 'nb': gnb, - 'motion_correct': global_params['mot_corr'], - 'init_batch': init_batch, - 'init_method': 'bare', - 'normalize': True, - 'expected_comps': expected_comps, - 'dist_shape_update': True, - 'K': K, - 'epochs': epochs, - 'show_movie': False, - 'min_num_trial': global_params['min_num_trial'], - 'use_peak_max': True, - 'thresh_CNN_noisy': global_params['thresh_CNN_noisy'], - 'sniper_mode': global_params['sniper_mode'], - 'use_dense': False, - 'update_freq' : global_params['update_freq'] - } + params_dict = { + 'fnames': fls, + 'fr': fr, + 'decay_time': decay_time, + 'gSig': gSig, + 'p': global_params['p'], + 'min_SNR': global_params['min_SNR'], + 'rval_thr': global_params['rval_thr'], + 'ds_factor': ds_factor, + 'nb': gnb, + 'motion_correct': global_params['mot_corr'], + 'init_batch': init_batch, + 'init_method': 'bare', + 'normalize': True, + 'expected_comps': expected_comps, + 'dist_shape_update': True, + 'K': K, + 'epochs': epochs, + 'show_movie': False, + 'min_num_trial': global_params['min_num_trial'], + 'use_peak_max': True, + 'thresh_CNN_noisy': global_params['thresh_CNN_noisy'], + 'sniper_mode': global_params['sniper_mode'], + 'use_dense': False, + 'update_freq': global_params['update_freq'] + } opts = cnmf.params.CNMFParams(params_dict=params_dict) if not reload: @@ -225,7 +220,7 @@ #cnm.save(fls[0][:-4]+'hdf5') else: print('***** reloading **********') - cnm = load_OnlineCNMF(fls[0][:-4]+'hdf5') + cnm = load_OnlineCNMF(fls[0][:-4] + 'hdf5') # %% filter results by using the batch CNN use_cnn = False @@ -236,10 +231,10 @@ # %% remove small and duplicate components - min_radius = max(cnm.params.init['gSig'][0]*ds_factor / 2., 2.) # minimum acceptable radius - max_radius = 2. * cnm.params.init['gSig'][0]*ds_factor # maximum acceptable radius - min_size_neuro = min_radius ** 2 * np.pi - max_size_neuro = max_radius ** 2 * np.pi + min_radius = max(cnm.params.init['gSig'][0] * ds_factor / 2., 2.) # minimum acceptable radius + max_radius = 2. * cnm.params.init['gSig'][0] * ds_factor # maximum acceptable radius + min_size_neuro = min_radius**2 * np.pi + max_size_neuro = max_radius**2 * np.pi cnm.estimates.threshold_spatial_components(maxthr=global_params['max_thr'], dview=None) cnm.estimates.remove_small_large_neurons(min_size_neuro, max_size_neuro) @@ -270,28 +265,33 @@ print(gt_estimate.A.shape) # %% compute performance and plot against consensus annotations - tp_gt, tp_comp, fn_gt, fp_comp, performance_cons_off = compare_components(gt_estimate, cnm.estimates, - Cn=Cn_orig, thresh_cost=.8, + tp_gt, tp_comp, fn_gt, fp_comp, performance_cons_off = compare_components(gt_estimate, + cnm.estimates, + Cn=Cn_orig, + thresh_cost=.8, min_dist=10, print_assignment=False, labels=['CA', 'CMO'], plot_results=plot_results) plt.rcParams['pdf.fonttype'] = 42 - font = {'family': 'Arial', - 'weight': 'regular', - 'size': 20} + font = {'family': 'Arial', 'weight': 'regular', 'size': 20} # %% compute correlations plt.rc('font', **font) print(gt_file) - print(params_movie[ind_dataset]['folder_name']+ str({a: b.astype(np.float16) for a, b in performance_cons_off.items()})) + print(params_movie[ind_dataset]['folder_name'] + + str({a: b.astype(np.float16) for a, b in performance_cons_off.items()})) xcorrs = [scipy.stats.pearsonr(a, b)[0] for a, b in zip(gt_estimate.C[tp_gt], cnm.estimates.C[tp_comp])] xcorrs = [x for x in xcorrs if str(x) != 'nan'] if plot_results: - plt.figure(); - plt.subplot(1,2,1); plt.hist(xcorrs, 100); plt.title('Empirical PDF of trace correlation coefficients') - plt.subplot(1,2,2); plt.hist(xcorrs, 100, cumulative=True); plt.title('Empirical CDF of trace correlation coefficients') + plt.figure() + plt.subplot(1, 2, 1) + plt.hist(xcorrs, 100) + plt.title('Empirical PDF of trace correlation coefficients') + plt.subplot(1, 2, 2) + plt.hist(xcorrs, 100, cumulative=True) + plt.title('Empirical CDF of trace correlation coefficients') # %% Save results performance_tmp = performance_cons_off.copy() @@ -320,36 +320,33 @@ # %% Plot Timing performance if plot_results: - plt.figure(); - plt.stackplot(np.arange(len(cnm.t_detect)), 1e3*np.array(cnm.t_online)-np.array(cnm.t_detect) - np.array(cnm.t_shapes), - 1e3*np.array(cnm.t_detect), 1e3*np.array(cnm.t_shapes)) + plt.figure() + plt.stackplot(np.arange(len(cnm.t_detect)), + 1e3 * np.array(cnm.t_online) - np.array(cnm.t_detect) - np.array(cnm.t_shapes), + 1e3 * np.array(cnm.t_detect), 1e3 * np.array(cnm.t_shapes)) plt.title('Processing time per frame') plt.xlabel('Frame #') plt.ylabel('Processing time [ms]') - plt.ylim([0,100]) - plt.legend(labels=['process','detect','shapes']) + plt.ylim([0, 100]) + plt.legend(labels=['process', 'detect', 'shapes']) if save_results: - path_save_file = os.path.join(base_folder, 'results_CaImAn_Online_'+ str(ID[0])+'.npz') + path_save_file = os.path.join(base_folder, 'results_CaImAn_Online_' + str(ID[0]) + '.npz') np.savez(path_save_file, all_results=all_results) # %% The variables ALL_CCs and all_results contain all the info necessary to create the figures -results_old = {'N.00.00': 0.692, - 'N.01.01': 0.75, - 'N.03.00.t': 0.742, - 'N.04.00.t': 0.7, - 'YST': 0.775} +results_old = {'N.00.00': 0.692, 'N.01.01': 0.75, 'N.03.00.t': 0.742, 'N.04.00.t': 0.7, 'YST': 0.775} results_holding = True improvement = False for key in results_old.keys(): - print(key + ' f1_score_new : ' + str(all_results[key+'/']['f1_score']) + ',f1_score_old:' + str(results_old[key]), - ',delta:' + str(results_old[key] - all_results[key+'/']['f1_score'])) - if (results_old[key] - all_results[key+'/']['f1_score']) > 0.01: + print(key + ' f1_score_new : ' + str(all_results[key + '/']['f1_score']) + ',f1_score_old:' + str(results_old[key]), + ',delta:' + str(results_old[key] - all_results[key + '/']['f1_score'])) + if (results_old[key] - all_results[key + '/']['f1_score']) > 0.01: results_holding = False - if (results_old[key] - all_results[key+'/']['f1_score']) < -0.01: + if (results_old[key] - all_results[key + '/']['f1_score']) < -0.01: improvement = True assert results_holding, 'F1 scores are decreasing, check your code for errors' if improvement: - print("F1 scores are improving, keep up the good work!") \ No newline at end of file + print("F1 scores are improving, keep up the good work!") diff --git a/caiman/tests/test_deconvolution.py b/caiman/tests/test_deconvolution.py index 4d955e9a5..2e8a70fb5 100644 --- a/caiman/tests/test_deconvolution.py +++ b/caiman/tests/test_deconvolution.py @@ -11,10 +11,11 @@ # You can log to a file using the filename parameter, or make the output more or less # verbose by setting level to logging.DEBUG, logging.INFO, logging.WARNING, or logging.ERROR -logging.basicConfig(format= - "%(relativeCreated)12d [%(filename)s:%(funcName)20s():%(lineno)s] [%(process)d] %(message)s", - # filename="/tmp/caiman.log", - level=logging.DEBUG) +logging.basicConfig( + format="%(relativeCreated)12d [%(filename)s:%(funcName)20s():%(lineno)s] [%(process)d] %(message)s", + # filename="/tmp/caiman.log", + level=logging.DEBUG) + def gen_data(g=[.95], sn=.2, T=1000, framerate=30, firerate=.5, b=10, N=1, seed=0): """ @@ -76,4 +77,3 @@ def foo(method, p): def test_oasis(): foo('oasis', 1) foo('oasis', 2) - diff --git a/caiman/tests/test_demo.py b/caiman/tests/test_demo.py index 1befced4d..0a3b5dd55 100644 --- a/caiman/tests/test_demo.py +++ b/caiman/tests/test_demo.py @@ -7,23 +7,30 @@ from caiman.source_extraction import cnmf from caiman.paths import caiman_datadir + def demo(parallel=False): - p = 2 # order of the AR model (in general 1 or 2) + p = 2 # order of the AR model (in general 1 or 2) if parallel: - c, dview, n_processes = cm.cluster.setup_cluster( - backend='local', n_processes=None, single_thread=False) + c, dview, n_processes = cm.cluster.setup_cluster(backend='local', n_processes=None, single_thread=False) else: n_processes, dview = 2, None # LOAD MOVIE AND MEMORYMAP fname_new = cm.save_memmap([os.path.join(caiman_datadir(), 'example_movies', 'demoMovie.tif')], - base_name='Yr', - order = 'C') + base_name='Yr', + order='C') Yr, dims, T = cm.load_memmap(fname_new) # INIT - cnm = cnmf.CNMF(n_processes, method_init='greedy_roi', k=30, gSig=[4, 4], merge_thresh=.8, - p=p, dview=dview, Ain=None, method_deconvolution='oasis') + cnm = cnmf.CNMF(n_processes, + method_init='greedy_roi', + k=30, + gSig=[4, 4], + merge_thresh=.8, + p=p, + dview=dview, + Ain=None, + method_deconvolution='oasis') # FIT images = np.reshape(Yr.T, [T] + list(dims), order='F') cnm = cnm.fit(images) @@ -39,10 +46,12 @@ def demo(parallel=False): except: pass + def test_single_thread(): demo() pass + def test_parallel(): demo(True) pass diff --git a/caiman/tests/test_import.py b/caiman/tests/test_import.py index 5109e23ed..e87ce15f4 100644 --- a/caiman/tests/test_import.py +++ b/caiman/tests/test_import.py @@ -1,4 +1,5 @@ #!/usr/bin/env python + def test_base(): return True diff --git a/caiman/tests/test_onacid.py b/caiman/tests/test_onacid.py index a62385d4f..7c1363319 100644 --- a/caiman/tests/test_onacid.py +++ b/caiman/tests/test_onacid.py @@ -8,37 +8,39 @@ def demo(): fname = [os.path.join(caiman_datadir(), 'example_movies', 'demoMovie.tif')] - fr = 10 # frame rate (Hz) - decay_time = .75 # approximate length of transient event in seconds - gSig = [6, 6] # expected half size of neurons - p = 1 # order of AR indicator dynamics - min_SNR = 1 # minimum SNR for accepting candidate components - thresh_CNN_noisy = 0.65 # CNN threshold for candidate components - gnb = 2 # number of background components - init_method = 'cnmf' # initialization method + fr = 10 # frame rate (Hz) + decay_time = .75 # approximate length of transient event in seconds + gSig = [6, 6] # expected half size of neurons + p = 1 # order of AR indicator dynamics + min_SNR = 1 # minimum SNR for accepting candidate components + thresh_CNN_noisy = 0.65 # CNN threshold for candidate components + gnb = 2 # number of background components + init_method = 'cnmf' # initialization method # set up CNMF initialization parameters - init_batch = 400 # number of frames for initialization - patch_size = 32 # size of patch - stride = 3 # amount of overlap between patches - K = 4 # max number of components in each patch - - params_dict = {'fr': fr, - 'fnames': fname, - 'decay_time': decay_time, - 'gSig': gSig, - 'p': p, - 'motion_correct': False, - 'min_SNR': min_SNR, - 'nb': gnb, - 'init_batch': init_batch, - 'init_method': init_method, - 'rf': patch_size//2, - 'stride': stride, - 'sniper_mode': True, - 'thresh_CNN_noisy': thresh_CNN_noisy, - 'K': K} + init_batch = 400 # number of frames for initialization + patch_size = 32 # size of patch + stride = 3 # amount of overlap between patches + K = 4 # max number of components in each patch + + params_dict = { + 'fr': fr, + 'fnames': fname, + 'decay_time': decay_time, + 'gSig': gSig, + 'p': p, + 'motion_correct': False, + 'min_SNR': min_SNR, + 'nb': gnb, + 'init_batch': init_batch, + 'init_method': init_method, + 'rf': patch_size // 2, + 'stride': stride, + 'sniper_mode': True, + 'thresh_CNN_noisy': thresh_CNN_noisy, + 'K': K + } opts = cnmf.params.CNMFParams(params_dict=params_dict) cnm = cnmf.online_cnmf.OnACID(params=opts) cnm.fit_online() diff --git a/caiman/tests/test_pre_processing.py b/caiman/tests/test_pre_processing.py index ca89ede70..b31ec95ea 100644 --- a/caiman/tests/test_pre_processing.py +++ b/caiman/tests/test_pre_processing.py @@ -4,11 +4,11 @@ import numpy as np from caiman.source_extraction import cnmf as cnmf + def test_axcov(): data = np.random.randn(1000) maxlag = 5 C = cnmf.pre_processing.axcov(data, maxlag) print(C) - npt.assert_allclose(C, np.concatenate( - (np.zeros(maxlag), np.array([1]), np.zeros(maxlag))), atol=1) + npt.assert_allclose(C, np.concatenate((np.zeros(maxlag), np.array([1]), np.zeros(maxlag))), atol=1) diff --git a/caiman/tests/test_temporal.py b/caiman/tests/test_temporal.py index 4e199ecca..74179f1b1 100644 --- a/caiman/tests/test_temporal.py +++ b/caiman/tests/test_temporal.py @@ -10,12 +10,7 @@ def test_make_G_matrix(): T = 6 G = cnmf.temporal.make_G_matrix(T, g) G = G.todense() - true_G = np.matrix( - [[1., 0., 0., 0., 0., 0.], - [-1., 1., 0., 0., 0., 0.], - [-2., -1., 1., 0., 0., 0.], - [-3., -2., -1., 1., 0., 0.], - [0., -3., -2., -1., 1., 0.], - [0., 0., -3., -2., -1., 1.]]) + true_G = np.matrix([[1., 0., 0., 0., 0., 0.], [-1., 1., 0., 0., 0., 0.], [-2., -1., 1., 0., 0., 0.], + [-3., -2., -1., 1., 0., 0.], [0., -3., -2., -1., 1., 0.], [0., 0., -3., -2., -1., 1.]]) npt.assert_allclose(G, true_G) diff --git a/caiman/tests/test_toydata.py b/caiman/tests/test_toydata.py index c76c8a7a1..f55316f83 100644 --- a/caiman/tests/test_toydata.py +++ b/caiman/tests/test_toydata.py @@ -7,16 +7,16 @@ import caiman.source_extraction.cnmf.params from caiman.source_extraction import cnmf as cnmf + #%% def gen_data(D=3, noise=.5, T=300, framerate=30, firerate=2.): - N = 4 # number of neurons - dims = [(20, 30), (12, 14, 16)][D - 2] # size of image - sig = (2, 2, 2)[:D] # neurons size - bkgrd = 10 # fluorescence baseline - gamma = .9 # calcium decay time constant + N = 4 # number of neurons + dims = [(20, 30), (12, 14, 16)][D - 2] # size of image + sig = (2, 2, 2)[:D] # neurons size + bkgrd = 10 # fluorescence baseline + gamma = .9 # calcium decay time constant np.random.seed(5) - centers = np.asarray([[np.random.randint(4, x - 4) - for x in dims] for i in range(N)]) + centers = np.asarray([[np.random.randint(4, x - 4) for x in dims] for i in range(N)]) trueA = np.zeros(dims + (N,), dtype=np.float32) trueS = np.random.rand(N, T) < firerate / float(framerate) trueS[:, 0] = 0 @@ -39,9 +39,13 @@ def pipeline(D): Yr, trueC, trueS, trueA, centers, dims = gen_data(D) N, T = trueC.shape # INIT - params = caiman.source_extraction.cnmf.params.CNMFParams( - dims=dims, k=4, gSig=[2, 2, 2][:D], p=1, - n_pixels_per_process=np.prod(dims), block_size_spat=np.prod(dims), block_size_temp=np.prod(dims)) + params = caiman.source_extraction.cnmf.params.CNMFParams(dims=dims, + k=4, + gSig=[2, 2, 2][:D], + p=1, + n_pixels_per_process=np.prod(dims), + block_size_spat=np.prod(dims), + block_size_temp=np.prod(dims)) params.spatial['thr_method'] = 'nrg' params.spatial['extract_cc'] = False cnm = cnmf.CNMF(2, params=params) @@ -50,14 +54,15 @@ def pipeline(D): cnm = cnm.fit(images) # VERIFY HIGH CORRELATION WITH GROUND TRUTH - sorting = [np.argmax([np.corrcoef(tc, c)[0, 1] - for tc in trueC]) for c in cnm.estimates.C] + sorting = [np.argmax([np.corrcoef(tc, c)[0, 1] for tc in trueC]) for c in cnm.estimates.C] # verifying the temporal components corr = [np.corrcoef(trueC[sorting[i]], cnm.estimates.C[i])[0, 1] for i in range(N)] npt.assert_allclose(corr, 1, .05) # verifying the spatial components - corr = [np.corrcoef(np.reshape(trueA, (-1, 4), order='F')[:, sorting[i]], - cnm.estimates.A.toarray()[:, i])[0, 1] for i in range(N)] + corr = [ + np.corrcoef(np.reshape(trueA, (-1, 4), order='F')[:, sorting[i]], + cnm.estimates.A.toarray()[:, i])[0, 1] for i in range(N) + ] npt.assert_allclose(corr, 1, .05) diff --git a/caiman/utils/stats.py b/caiman/utils/stats.py index 94b8a904b..36c0d3e4a 100644 --- a/caiman/utils/stats.py +++ b/caiman/utils/stats.py @@ -1,22 +1,23 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- - """ Created on Thu Oct 20 13:49:57 2016 @author: agiovann """ -import logging + from builtins import range from past.utils import old_div + +import logging +import numpy as np +import scipy + try: import numba except: pass -import numpy as np -import scipy - #%% @@ -29,7 +30,9 @@ def mode_robust_fast(inputData, axis=None): if axis is not None: - def fnc(x): return mode_robust_fast(x) + def fnc(x): + return mode_robust_fast(x) + dataMode = np.apply_along_axis(fnc, axis, inputData) else: # Create the function that we can use for the half-sample mode @@ -40,6 +43,8 @@ def fnc(x): return mode_robust_fast(x) dataMode = _hsm(data) return dataMode + + #%% @@ -50,7 +55,10 @@ def mode_robust(inputData, axis=None, dtype=None): .. versionadded: 1.0.3 """ if axis is not None: - def fnc(x): return mode_robust(x, dtype=dtype) + + def fnc(x): + return mode_robust(x, dtype=dtype) + dataMode = np.apply_along_axis(fnc, axis, inputData) else: # Create the function that we can use for the half-sample mode @@ -95,6 +103,7 @@ def _hsm(data): return dataMode + #%% #@numba.jit("void(f4[:])") @@ -128,14 +137,17 @@ def _hsm(data): #%% kernel density estimation - -def mode_robust_kde(inputData, axis = None): + + +def mode_robust_kde(inputData, axis=None): """ Extracting the dataset of the mode using kernel density estimation """ if axis is not None: - def fnc(x): return mode_robust_kde(x) + def fnc(x): + return mode_robust_kde(x) + dataMode = np.apply_along_axis(fnc, axis, inputData) else: # Create the function that we can use for the half-sample mode @@ -144,7 +156,8 @@ def fnc(x): return mode_robust_kde(x) return dataMode -def df_percentile(inputData, axis = None): + +def df_percentile(inputData, axis=None): """ Extracting the percentile of the data where the mode occurs and its value. Used to determine the filtering level for DF/F extraction. Note that @@ -152,7 +165,9 @@ def df_percentile(inputData, axis = None): """ if axis is not None: - def fnc(x): return df_percentile(x) + def fnc(x): + return df_percentile(x) + result = np.apply_along_axis(fnc, axis, inputData) data_prct = result[:, 0] val = result[:, 1] @@ -164,17 +179,15 @@ def fnc(x): return df_percentile(x) bandwidth, mesh, density, cdf = kde(inputData) err = False except: - logging.warning('Percentile computation failed. Duplicating ' + - 'and trying again.') + logging.warning('Percentile computation failed. Duplicating ' + 'and trying again.') if type(inputData) is not list: inputData = inputData.tolist() inputData += inputData - data_prct = cdf[np.argmax(density)]*100 + data_prct = cdf[np.argmax(density)] * 100 val = mesh[np.argmax(density)] if data_prct >= 100 or data_prct < 0: - logging.warning('Invalid percentile computed possibly due ' + - 'short trace. Duplicating and recomuputing.') + logging.warning('Invalid percentile computed possibly due ' + 'short trace. Duplicating and recomuputing.') if type(inputData) is not list: inputData = inputData.tolist() inputData *= 2 @@ -196,6 +209,7 @@ def fnc(x): return df_percentile(x) Updated 1-23-2013 """ + def kde(data, N=None, MIN=None, MAX=None): # Parameters to set up the mesh on which to calculate @@ -204,55 +218,54 @@ def kde(data, N=None, MIN=None, MAX=None): minimum = min(data) maximum = max(data) Range = maximum - minimum - MIN = minimum - Range/10 if MIN is None else MIN - MAX = maximum + Range/10 if MAX is None else MAX + MIN = minimum - Range / 10 if MIN is None else MIN + MAX = maximum + Range / 10 if MAX is None else MAX # Range of the data - R = MAX-MIN + R = MAX - MIN # Histogram the data to get a crude first approximation of the density M = len(data) DataHist, bins = scipy.histogram(data, bins=N, range=(MIN, MAX)) - DataHist = DataHist/M + DataHist = DataHist / M DCTData = scipy.fftpack.dct(DataHist, norm=None) - I = [iN*iN for iN in range(1, N)] - SqDCTData = (DCTData[1:]/2)**2 + I = [iN * iN for iN in range(1, N)] + SqDCTData = (DCTData[1:] / 2)**2 # The fixed point calculation finds the bandwidth = t_star guess = 0.1 try: - t_star = scipy.optimize.brentq(fixed_point, 0, guess, - args=(M, I, SqDCTData)) + t_star = scipy.optimize.brentq(fixed_point, 0, guess, args=(M, I, SqDCTData)) except ValueError: print('Oops!') return None # Smooth the DCTransformed data using t_star - SmDCTData = DCTData*scipy.exp(-scipy.arange(N)**2*scipy.pi**2*t_star/2) + SmDCTData = DCTData * scipy.exp(-scipy.arange(N)**2 * scipy.pi**2 * t_star / 2) # Inverse DCT to get density - density = scipy.fftpack.idct(SmDCTData, norm=None)*N/R - mesh = [(bins[i]+bins[i+1])/2 for i in range(N)] - bandwidth = scipy.sqrt(t_star)*R + density = scipy.fftpack.idct(SmDCTData, norm=None) * N / R + mesh = [(bins[i] + bins[i + 1]) / 2 for i in range(N)] + bandwidth = scipy.sqrt(t_star) * R - density = density/scipy.trapz(density, mesh) - cdf = np.cumsum(density)*(mesh[1]-mesh[0]) + density = density / scipy.trapz(density, mesh) + cdf = np.cumsum(density) * (mesh[1] - mesh[0]) return bandwidth, mesh, density, cdf def fixed_point(t, M, I, a2): - l=7 + l = 7 I = scipy.float64(I) M = scipy.float64(M) a2 = scipy.float64(a2) - f = 2*scipy.pi**(2*l)*scipy.sum(I**l*a2*scipy.exp(-I*scipy.pi**2*t)) + f = 2 * scipy.pi**(2 * l) * scipy.sum(I**l * a2 * scipy.exp(-I * scipy.pi**2 * t)) for s in range(l, 1, -1): - K0 = scipy.prod(range(1, 2*s, 2))/scipy.sqrt(2*scipy.pi) - const = (1 + (1/2)**(s + 1/2))/3 - time=(2*const*K0/M/f)**(2/(3+2*s)) - f=2*scipy.pi**(2*s)*scipy.sum(I**s*a2*scipy.exp(-I*scipy.pi**2*time)) - return t-(2*M*scipy.sqrt(scipy.pi)*f)**(-2/5) + K0 = scipy.prod(range(1, 2 * s, 2)) / scipy.sqrt(2 * scipy.pi) + const = (1 + (1 / 2)**(s + 1 / 2)) / 3 + time = (2 * const * K0 / M / f)**(2 / (3 + 2 * s)) + f = 2 * scipy.pi**(2 * s) * scipy.sum(I**s * a2 * scipy.exp(-I * scipy.pi**2 * time)) + return t - (2 * M * scipy.sqrt(scipy.pi) * f)**(-2 / 5) def csc_column_remove(A, ind): @@ -265,14 +278,13 @@ def csc_column_remove(A, ind): """ d1, d2 = A.shape if 'csc_matrix' not in str(type(A)): - logging.warning("Original matrix not in csc_format. Converting it" + - " anyway.") + logging.warning("Original matrix not in csc_format. Converting it" + " anyway.") A = scipy.sparse.csc_matrix(A) indptr = A.indptr ind_diff = np.diff(A.indptr).tolist() ind_sort = sorted(ind, reverse=True) - data_list = [A.data[indptr[i]:indptr[i+1]] for i in range(d2)] - indices_list = [A.indices[indptr[i]:indptr[i+1]] for i in range(d2)] + data_list = [A.data[indptr[i]:indptr[i + 1]] for i in range(d2)] + indices_list = [A.indices[indptr[i]:indptr[i + 1]] for i in range(d2)] for i in ind_sort: del data_list[i] del indices_list[i] @@ -280,6 +292,5 @@ def csc_column_remove(A, ind): indptr_final = np.cumsum([0] + ind_diff) data_final = [item for sublist in data_list for item in sublist] indices_final = [item for sublist in indices_list for item in sublist] - A = scipy.sparse.csc_matrix((data_final, indices_final, indptr_final), - shape=[d1, d2 - len(ind)]) + A = scipy.sparse.csc_matrix((data_final, indices_final, indptr_final), shape=[d1, d2 - len(ind)]) return A diff --git a/caimanmanager.py b/caimanmanager.py index 0eb963617..539cd809f 100755 --- a/caimanmanager.py +++ b/caimanmanager.py @@ -156,18 +156,24 @@ def comparitor_all_left_only_files(comparitor, path_prepend: str): ret.append(*to_append) return ret + ############### # + def system_diagnose() -> None: # Print out some diagnostic information useful for tickets. - platstring = platform.platform() # Do not try to parse this. Format is variable by platform. + platstring = platform.platform() # Do not try to parse this. Format is variable by platform. py_version = platform.python_version() - py_family = platform.python_implementation() # Probably CPython, but if not that's fun. + py_family = platform.python_implementation() # Probably CPython, but if not that's fun. memory = psutil.virtual_memory() pcpu = psutil.cpu_count(logical=False) swap = psutil.swap_memory() - print("\n==============\n".join([f"Platform: {platstring}", f"Python version: {py_version}", f"Python Family: {py_family}", f"Memory: {memory}", f"Physical CPUs: {pcpu}", f"Swap: {swap}"])) + print("\n==============\n".join([ + f"Platform: {platstring}", f"Python version: {py_version}", f"Python Family: {py_family}", f"Memory: {memory}", + f"Physical CPUs: {pcpu}", f"Swap: {swap}" + ])) + ############### From 162712a1e115e6fc0e477b89f2b07317b3e0f1eb Mon Sep 17 00:00:00 2001 From: Pat Gunn Date: Fri, 9 Aug 2019 17:06:31 -0400 Subject: [PATCH 2/2] Prevent some reformatting we don't like --- caiman/tests/comparison_humans_online.py | 25 +++++++++++++++++------- caiman/tests/test_temporal.py | 11 +++++++++-- 2 files changed, 27 insertions(+), 9 deletions(-) diff --git a/caiman/tests/comparison_humans_online.py b/caiman/tests/comparison_humans_online.py index 428ab8d10..7fad4a378 100644 --- a/caiman/tests/comparison_humans_online.py +++ b/caiman/tests/comparison_humans_online.py @@ -94,6 +94,8 @@ params_movie = [{}] * 5 # set up list of dictionaries +# yapf: disable + # % neurofinder.00.00 params_movie[0] = { 'folder_name': 'N.00.00/', @@ -101,11 +103,18 @@ 'decay_time': 0.4, 'epochs': 3, 'fr': 16, - 'gSig': [8, 8], # expected half size of neurons + 'gSig': [8, 8], # expected half size of neurons 'gnb': 2, } - # % neurofinder.01.01 -params_movie[1] = {'folder_name': 'N.01.01/', 'ds_factor': 1, 'fr': 8, 'gnb': 1, 'decay_time': 1.4, 'gSig': [6, 6]} +# % neurofinder.01.01 +params_movie[1] = { + 'folder_name': 'N.01.01/', + 'ds_factor': 1, + 'fr': 8, + 'gnb': 1, + 'decay_time': 1.4, + 'gSig': [6, 6] +} # % neurofinder.03.00.test params_movie[2] = { @@ -113,7 +122,7 @@ 'ds_factor': 2, 'fr': 7, 'decay_time': 0.4, - 'gSig': [12, 12], # expected half size of neurons + 'gSig': [12, 12], # expected half size of neurons 'gnb': 3, } @@ -125,7 +134,7 @@ 'fr': 10, 'decay_time': .75, 'gnb': 3, - 'gSig': [6, 6], # expected half size of neurons + 'gSig': [6, 6], # expected half size of neurons } # % neurofinder.04.00.test @@ -134,11 +143,13 @@ 'epochs': 2, 'ds_factor': 1, 'fr': 8, - 'gSig': [7, 7], # expected half size of neurons - 'decay_time': 1.2, # rough length of a transient + 'gSig': [7, 7], # expected half size of neurons + 'decay_time': 1.2, # rough length of a transient 'gnb': 3, } +# yapf: enable + all_results = dict() # iterate over all datasets to be processed diff --git a/caiman/tests/test_temporal.py b/caiman/tests/test_temporal.py index 74179f1b1..5049bf6a4 100644 --- a/caiman/tests/test_temporal.py +++ b/caiman/tests/test_temporal.py @@ -10,7 +10,14 @@ def test_make_G_matrix(): T = 6 G = cnmf.temporal.make_G_matrix(T, g) G = G.todense() - true_G = np.matrix([[1., 0., 0., 0., 0., 0.], [-1., 1., 0., 0., 0., 0.], [-2., -1., 1., 0., 0., 0.], - [-3., -2., -1., 1., 0., 0.], [0., -3., -2., -1., 1., 0.], [0., 0., -3., -2., -1., 1.]]) + # yapf: disable + true_G = np.matrix( + [[1., 0., 0., 0., 0., 0.], + [-1., 1., 0., 0., 0., 0.], + [-2., -1., 1., 0., 0., 0.], + [-3., -2., -1., 1., 0., 0.], + [0., -3., -2., -1., 1., 0.], + [0., 0., -3., -2., -1., 1.]]) + # yapf: enable npt.assert_allclose(G, true_G)