diff --git a/caiman/cluster.py b/caiman/cluster.py index 46a2b2352..2eb97a39c 100644 --- a/caiman/cluster.py +++ b/caiman/cluster.py @@ -1,6 +1,5 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- - """ functions related to the creation and management of the cluster We put arrays on disk as raw bytes, extending along the first dimension. @@ -40,7 +39,12 @@ logger = logging.getLogger(__name__) -def extract_patch_coordinates(dims:Tuple, rf:Union[List,Tuple], stride:Union[List[int],Tuple], border_pix:int=0, indices=[slice(None)]*2) -> Tuple[List, List]: + +def extract_patch_coordinates(dims: Tuple, + rf: Union[List, Tuple], + stride: Union[List[int], Tuple], + border_pix: int = 0, + indices=[slice(None)] * 2) -> Tuple[List, List]: """ Partition the FOV in patches and return the indexed in 2D and 1D (flatten, order='F') formats @@ -58,25 +62,24 @@ def extract_patch_coordinates(dims:Tuple, rf:Union[List,Tuple], stride:Union[Lis sl_start = [0 if sl.start is None else sl.start for sl in indices] sl_stop = [dim if sl.stop is None else sl.stop for (sl, dim) in zip(indices, dims)] - sl_step = [1 for sl in indices] # not used + sl_step = [1 for sl in indices] # not used dims_large = dims dims = np.minimum(np.array(dims) - border_pix, sl_stop) - np.maximum(border_pix, sl_start) coords_flat = [] shapes = [] - iters = [list(range(rf[i], dims[i] - rf[i], 2 * rf[i] - stride[i])) + [dims[i] - rf[i]] - for i in range(len(dims))] + iters = [list(range(rf[i], dims[i] - rf[i], 2 * rf[i] - stride[i])) + [dims[i] - rf[i]] for i in range(len(dims))] coords = np.empty(list(map(len, iters)) + [len(dims)], dtype=np.object) for count_0, xx in enumerate(iters[0]): coords_x = np.arange(xx - rf[0], xx + rf[0] + 1) coords_x = coords_x[(coords_x >= 0) & (coords_x < dims[0])] - coords_x += border_pix*0 + np.maximum(sl_start[0], border_pix) + coords_x += border_pix * 0 + np.maximum(sl_start[0], border_pix) for count_1, yy in enumerate(iters[1]): coords_y = np.arange(yy - rf[1], yy + rf[1] + 1) coords_y = coords_y[(coords_y >= 0) & (coords_y < dims[1])] - coords_y += border_pix*0 + np.maximum(sl_start[1], border_pix) + coords_y += border_pix * 0 + np.maximum(sl_start[1], border_pix) if len(dims) == 2: idxs = np.meshgrid(coords_x, coords_y) @@ -86,7 +89,7 @@ def extract_patch_coordinates(dims:Tuple, rf:Union[List,Tuple], stride:Union[Lis coords_ = np.ravel_multi_index(idxs, dims_large, order='F') coords_flat.append(coords_.flatten()) - else: # 3D data + else: # 3D data if border_pix > 0: raise Exception( @@ -109,7 +112,8 @@ def extract_patch_coordinates(dims:Tuple, rf:Union[List,Tuple], stride:Union[Lis #%% -def apply_to_patch(mmap_file, shape:Tuple[Any,Any,Any], dview, rf, stride, function, *args, **kwargs) -> Tuple[List,Any,Tuple]: +def apply_to_patch(mmap_file, shape: Tuple[Any, Any, Any], dview, rf, stride, function, *args, + **kwargs) -> Tuple[List, Any, Tuple]: """ apply function to patches in parallel or not @@ -152,11 +156,9 @@ def apply_to_patch(mmap_file, shape:Tuple[Any,Any,Any], dview, rf, stride, funct stride1 = stride stride2 = stride - idx_flat, idx_2d = extract_patch_coordinates( - (d1, d2), rf=(rf1, rf2), stride=(stride1, stride2)) + idx_flat, idx_2d = extract_patch_coordinates((d1, d2), rf=(rf1, rf2), stride=(stride1, stride2)) - shape_grid = tuple(np.ceil( - (d1 * 1. / (rf1 * 2 - stride1), d2 * 1. / (rf2 * 2 - stride2))).astype(np.int)) + shape_grid = tuple(np.ceil((d1 * 1. / (rf1 * 2 - stride1), d2 * 1. / (rf2 * 2 - stride2))).astype(np.int)) if d1 <= rf1 * 2: shape_grid = (1, shape_grid[1]) if d2 <= rf2 * 2: @@ -167,8 +169,7 @@ def apply_to_patch(mmap_file, shape:Tuple[Any,Any,Any], dview, rf, stride, funct args_in = [] for id_f, id_2d in zip(idx_flat[:], idx_2d[:]): - args_in.append((mmap_file.filename, id_f, - id_2d, function, args, kwargs)) + args_in.append((mmap_file.filename, id_f, id_2d, function, args, kwargs)) logger.debug("Flat index is of length " + str(len(idx_flat))) if dview is not None: @@ -184,10 +185,12 @@ def apply_to_patch(mmap_file, shape:Tuple[Any,Any,Any], dview, rf, stride, funct file_res = list(map(function_place_holder, args_in)) return file_res, idx_flat, shape_grid + + #%% -def function_place_holder(args_in:Tuple) -> np.ndarray: +def function_place_holder(args_in: Tuple) -> np.ndarray: #todo: todocument file_name, idx_, shapes, function, args, kwargs = args_in @@ -195,8 +198,7 @@ def function_place_holder(args_in:Tuple) -> np.ndarray: Yr = Yr[idx_, :] Yr.filename = file_name _, T = Yr.shape - Y = np.reshape(Yr, (shapes[1], shapes[0], T), - order='F').transpose([2, 0, 1]) + Y = np.reshape(Yr, (shapes[1], shapes[0], T), order='F').transpose([2, 0, 1]) [T, d1, d2] = Y.shape res_fun = function(Y, *args, **kwargs) @@ -208,10 +210,11 @@ def function_place_holder(args_in:Tuple) -> np.ndarray: return res_fun + #%% -def start_server(slurm_script:str=None, ipcluster:str="ipcluster", ncpus:int=None) -> None: +def start_server(slurm_script: str = None, ipcluster: str = "ipcluster", ncpus: int = None) -> None: """ programmatically start the ipyparallel server @@ -220,7 +223,7 @@ def start_server(slurm_script:str=None, ipcluster:str="ipcluster", ncpus:int=Non number of processors ipcluster : str - ipcluster binary file name; requires 4 path separators on Windows. ipcluster="C:\\\\Anaconda2\\\\Scripts\\\\ipcluster.exe" + ipcluster binary file name; requires 4 path separators on Windows. ipcluster="C:\\\\Anaconda3\\\\Scripts\\\\ipcluster.exe" Default: "ipcluster" """ logger.info("Starting cluster...") @@ -230,21 +233,21 @@ def start_server(slurm_script:str=None, ipcluster:str="ipcluster", ncpus:int=Non if slurm_script is None: if ipcluster == "ipcluster": - subprocess.Popen( - "ipcluster start -n {0}".format(ncpus), shell=True, close_fds=(os.name != 'nt')) + subprocess.Popen("ipcluster start -n {0}".format(ncpus), shell=True, close_fds=(os.name != 'nt')) else: - subprocess.Popen(shlex.split( - "{0} start -n {1}".format(ipcluster, ncpus)), shell=True, close_fds=(os.name != 'nt')) + subprocess.Popen(shlex.split("{0} start -n {1}".format(ipcluster, ncpus)), + shell=True, + close_fds=(os.name != 'nt')) time.sleep(1.5) # Check that all processes have started client = ipyparallel.Client() time.sleep(1.5) while len(client) < ncpus: - sys.stdout.write(".") # Give some visual feedback of things starting - sys.stdout.flush() # (de-buffered) + sys.stdout.write(".") # Give some visual feedback of things starting + sys.stdout.flush() # (de-buffered) time.sleep(0.5) logger.debug('Making sure everything is up and running') - client.direct_view().execute('__a=1', block=True) # when done on all, we're set to go + client.direct_view().execute('__a=1', block=True) # when done on all, we're set to go else: shell_source(slurm_script) pdir, profile = os.environ['IPPPDIR'], os.environ['IPPPROFILE'] @@ -256,18 +259,18 @@ def start_server(slurm_script:str=None, ipcluster:str="ipcluster", ncpus:int=Non c.close() sys.stdout.write("start_server: done\n") -def shell_source(script:str) -> None: + +def shell_source(script: str) -> None: """ Run a source-style bash script, copy resulting env vars to current process. """ # XXX This function is weird and maybe not a good idea. People easily might expect # it to handle conditionals. Maybe just make them provide a key-value file #introduce echo to indicate the end of the output - pipe = subprocess.Popen(". %s; env; echo 'FINISHED_CLUSTER'" % - script, stdout=subprocess.PIPE, shell=True) + pipe = subprocess.Popen(". %s; env; echo 'FINISHED_CLUSTER'" % script, stdout=subprocess.PIPE, shell=True) env = dict() while True: line = pipe.stdout.readline().decode('utf-8').rstrip() - if 'FINISHED_CLUSTER' in line: # find the keyword set above to determine the end of the output stream + if 'FINISHED_CLUSTER' in line: # find the keyword set above to determine the end of the output stream break logger.debug("shell_source parsing line[" + str(line) + "]") lsp = str(line).split("=", 1) @@ -277,7 +280,8 @@ def shell_source(script:str) -> None: os.environ.update(env) pipe.stdout.close() -def stop_server(ipcluster:str='ipcluster', pdir:str=None, profile:str=None, dview=None) -> None: + +def stop_server(ipcluster: str = 'ipcluster', pdir: str = None, profile: str = None, dview=None) -> None: """ programmatically stops the ipyparallel server @@ -315,7 +319,7 @@ def stop_server(ipcluster:str='ipcluster', pdir:str=None, profile:str=None, dvie try: shutil.rmtree('./log/') except: - logger.info('creating log folder') # FIXME Not what this means + logger.info('creating log folder') # FIXME Not what this means files = glob.glob('*.log') os.mkdir('./log') @@ -325,11 +329,15 @@ def stop_server(ipcluster:str='ipcluster', pdir:str=None, profile:str=None, dvie else: if ipcluster == "ipcluster": - proc = subprocess.Popen( - "ipcluster stop", shell=True, stderr=subprocess.PIPE, close_fds=(os.name != 'nt')) + proc = subprocess.Popen("ipcluster stop", + shell=True, + stderr=subprocess.PIPE, + close_fds=(os.name != 'nt')) else: proc = subprocess.Popen(shlex.split(ipcluster + " stop"), - shell=True, stderr=subprocess.PIPE, close_fds=(os.name != 'nt')) + shell=True, + stderr=subprocess.PIPE, + close_fds=(os.name != 'nt')) line_out = proc.stderr.readline() if b'CRITICAL' in line_out: @@ -348,10 +356,15 @@ def stop_server(ipcluster:str='ipcluster', pdir:str=None, profile:str=None, dvie proc.stderr.close() logger.info("stop_cluster(): done") + + #%% -def setup_cluster(backend:str='multiprocessing', n_processes:int=None, single_thread:bool=False, ignore_preexisting:bool=False) -> Tuple[Any, Any, Optional[int]]: +def setup_cluster(backend: str = 'multiprocessing', + n_processes: int = None, + single_thread: bool = False, + ignore_preexisting: bool = False) -> Tuple[Any, Any, Optional[int]]: """Setup and/or restart a parallel cluster. Args: backend: str @@ -386,7 +399,7 @@ def setup_cluster(backend:str='multiprocessing', n_processes:int=None, single_th stop_server() except: logger.debug('Nothing to stop') - slurm_script = '/mnt/home/agiovann/SOFTWARE/CaImAn/SLURM/slurmStart.sh' + slurm_script = '/mnt/home/agiovann/SOFTWARE/CaImAn/SLURM/slurmStart.sh' # FIXME: Make this a documented environment variable logger.info([str(n_processes), slurm_script]) start_server(slurm_script=slurm_script, ncpus=n_processes) pdir, profile = os.environ['IPPPDIR'], os.environ['IPPPROFILE'] @@ -397,7 +410,7 @@ def setup_cluster(backend:str='multiprocessing', n_processes:int=None, single_th stop_server() start_server(ncpus=n_processes) c = Client() - logger.info('Started ipyparallel cluster: Using ' + str(len(c)) + ' processes') + logger.info(f'Started ipyparallel cluster: Using {len(c)} processes') dview = c[:len(c)] elif (backend == 'multiprocessing') or (backend == 'local'): @@ -411,16 +424,16 @@ def setup_cluster(backend:str='multiprocessing', n_processes:int=None, single_th 'A cluster is already runnning. Terminate with dview.terminate() if you want to restart.') if (platform.system() == 'Darwin') and (sys.version_info > (3, 0)): try: - if 'kernel' in get_ipython().trait_names(): # type: ignore - # If you're on OSX and you're running under Jupyter or Spyder, - # which already run the code in a forkserver-friendly way, this - # can eliminate some setup and make this a reasonable approach. - # Otherwise, seting VECLIB_MAXIMUM_THREADS=1 or using a different - # blas/lapack is the way to avoid the issues. - # See https://github.com/flatironinstitute/CaImAn/issues/206 for more - # info on why we're doing this (for now). + if 'kernel' in get_ipython().trait_names(): # type: ignore + # If you're on OSX and you're running under Jupyter or Spyder, + # which already run the code in a forkserver-friendly way, this + # can eliminate some setup and make this a reasonable approach. + # Otherwise, seting VECLIB_MAXIMUM_THREADS=1 or using a different + # blas/lapack is the way to avoid the issues. + # See https://github.com/flatironinstitute/CaImAn/issues/206 for more + # info on why we're doing this (for now). multiprocessing.set_start_method('forkserver', force=True) - except: # If we're not running under ipython, don't do anything. + except: # If we're not running under ipython, don't do anything. pass c = None diff --git a/caiman/mmapping.py b/caiman/mmapping.py index 8160bccf0..56a4d41c4 100644 --- a/caiman/mmapping.py +++ b/caiman/mmapping.py @@ -1,6 +1,5 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- - """ Created on Thu Oct 20 11:33:35 2016 @@ -26,15 +25,17 @@ import caiman as cm from caiman.paths import memmap_frames_filename -def prepare_shape(mytuple:Tuple) -> Tuple: + +def prepare_shape(mytuple: Tuple) -> Tuple: """ This promotes the elements inside a shape into np.uint64. It is intended to prevent overflows with some numpy operations that are sensitive to it, e.g. np.memmap """ if not isinstance(mytuple, tuple): raise Exception("Internal error: prepare_shape() passed a non-tuple") return tuple(map(lambda x: np.uint64(x), mytuple)) + #%% -def load_memmap(filename:str, mode:str='r') -> Tuple[Any,Tuple,int]: +def load_memmap(filename: str, mode: str = 'r') -> Tuple[Any, Tuple, int]: """ Load a memory mapped file created by the function save_memmap Args: @@ -63,11 +64,9 @@ def load_memmap(filename:str, mode:str='r') -> Tuple[Any,Tuple,int]: # TODO: Eventually get the code to save these in a different dir file_to_load = filename filename = os.path.split(filename)[-1] - fpart = filename.split('_')[1:-1] # The filename encodes the structure of the map - d1, d2, d3, T, order = int(fpart[-9]), int(fpart[-7] - ), int(fpart[-5]), int(fpart[-1]), fpart[-3] - Yr = np.memmap(file_to_load, mode=mode, shape=prepare_shape(( - d1 * d2 * d3, T)), dtype=np.float32, order=order) + fpart = filename.split('_')[1:-1] # The filename encodes the structure of the map + d1, d2, d3, T, order = int(fpart[-9]), int(fpart[-7]), int(fpart[-5]), int(fpart[-1]), fpart[-3] + Yr = np.memmap(file_to_load, mode=mode, shape=prepare_shape((d1 * d2 * d3, T)), dtype=np.float32, order=order) if d3 == 1: return (Yr, (d1, d2), T) else: @@ -76,9 +75,19 @@ def load_memmap(filename:str, mode:str='r') -> Tuple[Any,Tuple,int]: logging.error("Unknown extension for file " + str(filename)) raise Exception('Unknown file extension (should be .mmap)') + #%% -def save_memmap_each(fnames:List[str], dview=None, base_name:str=None, resize_fact=(1, 1, 1), remove_init:int=0, - idx_xy=None, xy_shifts=None, add_to_movie:float=0, border_to_0:int=0, order:str='C', slices=None) -> List[str]: +def save_memmap_each(fnames: List[str], + dview=None, + base_name: str = None, + resize_fact=(1, 1, 1), + remove_init: int = 0, + idx_xy=None, + xy_shifts=None, + add_to_movie: float = 0, + border_to_0: int = 0, + order: str = 'C', + slices=None) -> List[str]: """ Create several memory mapped files using parallel processing @@ -129,11 +138,16 @@ def save_memmap_each(fnames:List[str], dview=None, base_name:str=None, resize_fa for idx, f in enumerate(fnames): if base_name is not None: - pars.append([f, base_name + '{:04d}'.format(idx), resize_fact[idx], remove_init, - idx_xy, order, xy_shifts[idx], add_to_movie, border_to_0, slices]) + pars.append([ + f, base_name + '{:04d}'.format(idx), resize_fact[idx], remove_init, idx_xy, order, xy_shifts[idx], + add_to_movie, border_to_0, slices + ]) else: - pars.append([f, os.path.splitext(f)[0], resize_fact[idx], remove_init, idx_xy, order, - xy_shifts[idx], add_to_movie, border_to_0, slices]) + pars.append([ + f, + os.path.splitext(f)[0], resize_fact[idx], remove_init, idx_xy, order, xy_shifts[idx], add_to_movie, + border_to_0, slices + ]) # Perform the job using whatever computing framework we're set to use if dview is not None: @@ -148,7 +162,8 @@ def save_memmap_each(fnames:List[str], dview=None, base_name:str=None, resize_fa #%% -def save_memmap_join(mmap_fnames:List[str], base_name:str=None, n_chunks:int=20, dview=None, add_to_mov=0) -> str: +def save_memmap_join(mmap_fnames: List[str], base_name: str = None, n_chunks: int = 20, dview=None, + add_to_mov=0) -> str: """ Makes a large file memmap from a number of smaller files @@ -169,7 +184,7 @@ def save_memmap_join(mmap_fnames:List[str], base_name:str=None, n_chunks:int=20, order = 'C' for f in mmap_fnames: Yr, dims, T = load_memmap(f) - logging.debug((f, T)) # TODO: Add a text header so this isn't just numeric output, but what to say? + logging.debug((f, T)) # TODO: Add a text header so this isn't just numeric output, but what to say? tot_frames += T del Yr @@ -178,15 +193,13 @@ def save_memmap_join(mmap_fnames:List[str], base_name:str=None, n_chunks:int=20, if base_name is None: base_name = mmap_fnames[0] - base_name = base_name[:base_name.find( - '_d1_')] + '-#-' + str(len(mmap_fnames)) + base_name = base_name[:base_name.find('_d1_')] + '-#-' + str(len(mmap_fnames)) fname_tot = memmap_frames_filename(base_name, dims, tot_frames, order) fname_tot = os.path.join(os.path.split(mmap_fnames[0])[0], fname_tot) logging.info("Memmap file for fname_tot: " + str(fname_tot)) - big_mov = np.memmap(fname_tot, mode='w+', dtype=np.float32, - shape=prepare_shape((d, tot_frames)), order='C') + big_mov = np.memmap(fname_tot, mode='w+', dtype=np.float32, shape=prepare_shape((d, tot_frames)), order='C') step = np.int(old_div(d, n_chunks)) pars = [] @@ -194,7 +207,8 @@ def save_memmap_join(mmap_fnames:List[str], base_name:str=None, n_chunks:int=20, pars.append([fname_tot, d, tot_frames, mmap_fnames, ref, ref + step, add_to_mov]) if len(pars[-1]) != 7: - raise Exception('You cannot change the number of element in list without changing the statement below (pars[]..)') + raise Exception( + 'You cannot change the number of element in list without changing the statement below (pars[]..)') else: # last batch should include the leftover pixels pars[-1][-2] = d @@ -215,6 +229,7 @@ def save_memmap_join(mmap_fnames:List[str], base_name:str=None, n_chunks:int=20, sys.stdout.flush() return fname_tot + def my_map(dv, func, args) -> List: v = dv rc = v.client @@ -224,7 +239,7 @@ def my_map(dv, func, args) -> List: amr = v.map(func, args) pending = set(amr.msg_ids) - results_all:Dict = dict() + results_all: Dict = dict() counter = 0 while pending: try: @@ -236,18 +251,18 @@ def my_map(dv, func, args) -> List: finished = pending.difference(rc.outstanding) # update pending to exclude those that just finished pending = pending.difference(finished) - if counter%10 == 0: + if counter % 10 == 0: logging.debug(amr.progress) for msg_id in finished: # we know these are done, so don't worry about blocking ar = rc.get_result(msg_id) - logging.debug("job id %s finished on engine %i" % (msg_id, ar.engine_id)) + logging.debug("job id %s finished on engine %i" % (msg_id, ar.engine_id)) # TODO: Abstract out the ugly bits logging.debug("with stdout:") logging.debug(' ' + ar.stdout.replace('\n', '\n ').rstrip()) logging.debug("and errors:") logging.debug(' ' + ar.stderr.replace('\n', '\n ').rstrip()) - # note that each job in a map always returns a list of length chunksize - # even if chunksize == 1 + # note that each job in a map always returns a list of length chunksize + # even if chunksize == 1 results_all.update(ar.get_dict()) counter += 1 @@ -255,25 +270,26 @@ def my_map(dv, func, args) -> List: del results_all return result_ordered + def save_portion(pars) -> int: # todo: todocument use_mmap_save = False big_mov, d, tot_frames, fnames, idx_start, idx_end, add_to_mov = pars Ttot = 0 - Yr_tot = np.zeros((idx_end - idx_start, tot_frames), dtype = np.float32) + Yr_tot = np.zeros((idx_end - idx_start, tot_frames), dtype=np.float32) logging.debug("Shape of Yr_tot is " + str(Yr_tot.shape)) for f in fnames: logging.debug("Saving portion to " + str(f)) Yr, _, T = load_memmap(f) - Yr_tot[:, Ttot:Ttot + T] = np.ascontiguousarray(Yr[idx_start:idx_end] , dtype = np.float32) + np.float32(add_to_mov) + Yr_tot[:, Ttot:Ttot + + T] = np.ascontiguousarray(Yr[idx_start:idx_end], dtype=np.float32) + np.float32(add_to_mov) Ttot = Ttot + T del Yr logging.debug("Index start and end are " + str(idx_start) + " and " + str(idx_end)) if use_mmap_save: - big_mov = np.memmap(big_mov, mode='r+', dtype=np.float32, - shape=prepare_shape((d, tot_frames)), order='C') + big_mov = np.memmap(big_mov, mode='r+', dtype=np.float32, shape=prepare_shape((d, tot_frames)), order='C') big_mov[idx_start:idx_end, :] = Yr_tot del big_mov else: @@ -282,10 +298,12 @@ def save_portion(pars) -> int: f.write(Yr_tot) computed_position = np.uint64(idx_end * Yr_tot.dtype.itemsize * tot_frames) if f.tell() != computed_position: - logging.critical(f"Error in mmap portion write: at position {f.tell()}") - logging.critical(f"But should be at position {idx_end} * {Yr_tot.dtype.itemsize} * {tot_frames} = {computed_position}") - f.close() - raise Exception('Internal error in mmapping: Actual position does not match computed position') + logging.critical(f"Error in mmap portion write: at position {f.tell()}") + logging.critical( + f"But should be at position {idx_end} * {Yr_tot.dtype.itemsize} * {tot_frames} = {computed_position}" + ) + f.close() + raise Exception('Internal error in mmapping: Actual position does not match computed position') del Yr_tot logging.debug('done') @@ -293,24 +311,39 @@ def save_portion(pars) -> int: #%% -def save_place_holder(pars:List) -> str: +def save_place_holder(pars: List) -> str: """ To use map reduce """ # todo: todocument - (f, base_name, resize_fact, remove_init, idx_xy, order, - xy_shifts, add_to_movie, border_to_0, slices) = pars + (f, base_name, resize_fact, remove_init, idx_xy, order, xy_shifts, add_to_movie, border_to_0, slices) = pars - return save_memmap([f], base_name=base_name, resize_fact=resize_fact, remove_init=remove_init, - idx_xy=idx_xy, order=order, xy_shifts=xy_shifts, - add_to_movie=add_to_movie, border_to_0=border_to_0, slices=slices) + return save_memmap([f], + base_name=base_name, + resize_fact=resize_fact, + remove_init=remove_init, + idx_xy=idx_xy, + order=order, + xy_shifts=xy_shifts, + add_to_movie=add_to_movie, + border_to_0=border_to_0, + slices=slices) #%% -def save_memmap(filenames:List[str], base_name:str='Yr', resize_fact:Tuple=(1, 1, 1), remove_init:int=0, idx_xy:Tuple=None, - order:str='F', xy_shifts:Optional[List]=None, is_3D:bool=False, add_to_movie:float=0, border_to_0=0, dview = None, - n_chunks:int=100, slices=None) -> str: - +def save_memmap(filenames: List[str], + base_name: str = 'Yr', + resize_fact: Tuple = (1, 1, 1), + remove_init: int = 0, + idx_xy: Tuple = None, + order: str = 'F', + xy_shifts: Optional[List] = None, + is_3D: bool = False, + add_to_movie: float = 0, + border_to_0=0, + dview=None, + n_chunks: int = 100, + slices=None) -> str: """ Efficiently write data from a list of tif files into a memory mappable file Args: @@ -369,7 +402,7 @@ def save_memmap(filenames:List[str], base_name:str='Yr', resize_fact:Tuple=(1, 1 recompute_each_memmap = False for file__ in filenames: if ('order_' + order not in file__) or ('.mmap' not in file__): - recompute_each_memmap = True + recompute_each_memmap = True if recompute_each_memmap or (remove_init>0) or (idx_xy is not None)\ @@ -379,16 +412,16 @@ def save_memmap(filenames:List[str], base_name:str='Yr', resize_fact:Tuple=(1, 1 logging.debug('Distributing memory map over many files') # Here we make a bunch of memmap files in the right order. Same parameters fname_parts = cm.save_memmap_each(filenames, - base_name = base_name, - order = order, - border_to_0 = border_to_0, - dview = dview, - resize_fact = resize_fact, - remove_init = remove_init, - idx_xy = idx_xy, - xy_shifts = xy_shifts, - slices = slices, - add_to_movie = add_to_movie) + base_name=base_name, + order=order, + border_to_0=border_to_0, + dview=dview, + resize_fact=resize_fact, + remove_init=remove_init, + idx_xy=idx_xy, + xy_shifts=xy_shifts, + slices=slices, + add_to_movie=add_to_movie) else: fname_parts = filenames @@ -396,26 +429,25 @@ def save_memmap(filenames:List[str], base_name:str='Yr', resize_fact:Tuple=(1, 1 if order == 'F': raise Exception('You cannot merge files in F order, they must be in C order for CaImAn') - fname_new = cm.save_memmap_join(fname_parts, base_name=base_name, dview=dview, n_chunks=n_chunks) else: - # TODO: can be done online + # TODO: can be done online Ttot = 0 for idx, f in enumerate(filenames): - if isinstance(f, str): # Might not always be filenames. + if isinstance(f, str): # Might not always be filenames. logging.debug(f) if is_3D: - Yr = f if not(isinstance(f, basestring)) else tifffile.imread(f) + Yr = f if not (isinstance(f, basestring)) else tifffile.imread(f) if slices is not None: Yr = Yr[slices] else: - if idx_xy is None: #todo remove if not used, superceded by the slices parameter + if idx_xy is None: #todo remove if not used, superceded by the slices parameter Yr = Yr[remove_init:] - elif len(idx_xy) == 2: #todo remove if not used, superceded by the slices parameter + elif len(idx_xy) == 2: #todo remove if not used, superceded by the slices parameter Yr = Yr[remove_init:, idx_xy[0], idx_xy[1]] - else: #todo remove if not used, superceded by the slices parameter + else: #todo remove if not used, superceded by the slices parameter Yr = Yr[remove_init:, idx_xy[0], idx_xy[1], idx_xy[2]] else: @@ -439,9 +471,11 @@ def save_memmap(filenames:List[str], base_name:str='Yr', resize_fact:Tuple=(1, 1 Yr = np.array(Yr)[remove_init:, idx_xy[0], idx_xy[1], idx_xy[2]] if border_to_0 > 0: - if slices is not None: + if slices is not None: if type(slices) is list: - raise Exception('You cannot slice in x and y and then use add_to_movie: if you only want to slice in time do not pass in a list but just a slice object') + raise Exception( + 'You cannot slice in x and y and then use add_to_movie: if you only want to slice in time do not pass in a list but just a slice object' + ) min_mov = Yr.calc_min() Yr[:, :border_to_0, :] = min_mov @@ -461,21 +495,28 @@ def save_memmap(filenames:List[str], base_name:str='Yr', resize_fact:Tuple=(1, 1 Yr = np.ascontiguousarray(Yr, dtype=np.float32) + np.float32(0.0001) + np.float32(add_to_movie) if idx == 0: - fname_tot = base_name + '_d1_' + str(dims[0]) + '_d2_' + str(dims[1]) + '_d3_' + str( - 1 if len(dims) == 2 else dims[2]) + '_order_' + str(order) # TODO: Rewrite more legibly + fname_tot = base_name + '_d1_' + str( + dims[0]) + '_d2_' + str(dims[1]) + '_d3_' + str(1 if len(dims) == 2 else dims[2]) + '_order_' + str( + order) # TODO: Rewrite more legibly if isinstance(f, str): fname_tot = os.path.join(os.path.split(f)[0], fname_tot) if len(filenames) > 1: - big_mov = np.memmap(fname_tot, mode='w+', dtype=np.float32, - shape=prepare_shape((np.prod(dims), T)), order=order) + big_mov = np.memmap(fname_tot, + mode='w+', + dtype=np.float32, + shape=prepare_shape((np.prod(dims), T)), + order=order) big_mov[:, Ttot:Ttot + T] = Yr del big_mov else: logging.debug('SAVING WITH numpy.tofile()') Yr.tofile(fname_tot) else: - big_mov = np.memmap(fname_tot, dtype=np.float32, mode='r+', - shape=prepare_shape((np.prod(dims), Ttot + T)), order=order) + big_mov = np.memmap(fname_tot, + dtype=np.float32, + mode='r+', + shape=prepare_shape((np.prod(dims), Ttot + T)), + order=order) big_mov[:, Ttot:Ttot + T] = Yr del big_mov @@ -493,10 +534,12 @@ def save_memmap(filenames:List[str], base_name:str='Yr', resize_fact:Tuple=(1, 1 return fname_new + #%% -def parallel_dot_product(A:np.ndarray, b, block_size:int=5000, dview=None, transpose=False, num_blocks_per_run=20) -> np.ndarray: +def parallel_dot_product(A: np.ndarray, b, block_size: int = 5000, dview=None, transpose=False, + num_blocks_per_run=20) -> np.ndarray: # todo: todocument """ Chunk matrix product between matrix and column vectors @@ -550,11 +593,9 @@ def parallel_dot_product(A:np.ndarray, b, block_size:int=5000, dview=None, trans for itera in range(0, len(pars), num_blocks_per_run): if 'multiprocessing' in str(type(dview)): - results = dview.map_async( - dot_place_holder, pars[itera:itera + num_blocks_per_run]).get(4294967) + results = dview.map_async(dot_place_holder, pars[itera:itera + num_blocks_per_run]).get(4294967) else: - results = dview.map_sync( - dot_place_holder, pars[itera:itera + num_blocks_per_run]) + results = dview.map_sync(dot_place_holder, pars[itera:itera + num_blocks_per_run]) logging.debug('Processed:' + str([itera, itera + len(results)])) @@ -576,7 +617,7 @@ def parallel_dot_product(A:np.ndarray, b, block_size:int=5000, dview=None, trans #%% -def dot_place_holder(par:List) -> Tuple: +def dot_place_holder(par: List) -> Tuple: # todo: todocument A_name, idx_to_pass, b_, transpose = par @@ -586,10 +627,9 @@ def dot_place_holder(par:List) -> Tuple: logging.debug((idx_to_pass[-1])) if 'sparse' in str(type(b_)): if transpose: -# outp = (b_.tocsr()[idx_to_pass].T.dot( -# A_[idx_to_pass])).T.astype(np.float32) - outp = (b_.T.tocsc()[:, idx_to_pass].dot( - A_[idx_to_pass])).T.astype(np.float32) + # outp = (b_.tocsr()[idx_to_pass].T.dot( + # A_[idx_to_pass])).T.astype(np.float32) + outp = (b_.T.tocsc()[:, idx_to_pass].dot(A_[idx_to_pass])).T.astype(np.float32) else: outp = (b_.T.dot(A_[idx_to_pass].T)).T.astype(np.float32) else: @@ -603,23 +643,25 @@ def dot_place_holder(par:List) -> Tuple: #%% -def save_tif_to_mmap_online(movie_iterable, save_base_name='YrOL_', order='C', - add_to_movie=0, border_to_0=0) -> str: +def save_tif_to_mmap_online(movie_iterable, save_base_name='YrOL_', order='C', add_to_movie=0, border_to_0=0) -> str: # todo: todocument - if isinstance(movie_iterable, basestring): # Allow specifying a filename rather than its data rep - with tifffile.TiffFile(movie_iterable) as tf: # And load it if that happens + if isinstance(movie_iterable, basestring): # Allow specifying a filename rather than its data rep + with tifffile.TiffFile(movie_iterable) as tf: # And load it if that happens movie_iterable = cm.movie(tf) count = 0 new_mov = [] - dims = (len(movie_iterable),) + movie_iterable[0].shape # TODO: Don't pack length into dims + dims = (len(movie_iterable),) + movie_iterable[0].shape # TODO: Don't pack length into dims fname_tot = (memmap_frames_filename(save_base_name, dims[1:], dims[0], order)) - big_mov = np.memmap(fname_tot, mode='w+', dtype=np.float32, - shape=prepare_shape((np.prod(dims[1:]), dims[0])), order=order) + big_mov = np.memmap(fname_tot, + mode='w+', + dtype=np.float32, + shape=prepare_shape((np.prod(dims[1:]), dims[0])), + order=order) for page in movie_iterable: if count % 100 == 0: @@ -631,8 +673,7 @@ def save_tif_to_mmap_online(movie_iterable, save_base_name='YrOL_', order='C', img = np.array(page, dtype=np.float32) new_img = img if save_base_name is not None: - big_mov[:, count] = np.reshape( - new_img, np.prod(dims[1:]), order='F') + big_mov[:, count] = np.reshape(new_img, np.prod(dims[1:]), order='F') else: new_mov.append(new_img) @@ -642,8 +683,7 @@ def save_tif_to_mmap_online(movie_iterable, save_base_name='YrOL_', order='C', img[:, -border_to_0:] = 0 img[-border_to_0:, :] = 0 - big_mov[:, count] = np.reshape( - img + add_to_movie, np.prod(dims[1:]), order='F') + big_mov[:, count] = np.reshape(img + add_to_movie, np.prod(dims[1:]), order='F') count += 1 big_mov.flush() diff --git a/caiman/paths.py b/caiman/paths.py index 92b2de63b..d50cb3360 100644 --- a/caiman/paths.py +++ b/caiman/paths.py @@ -1,5 +1,4 @@ #!/usr/bin/env python - """ Utilities for retrieving paths around the caiman package and its datadirs """ @@ -7,22 +6,25 @@ import os from typing import Tuple + ####### # datadir def caiman_datadir() -> str: - """ + """ The datadir is a user-configurable place which holds a user-modifiable copy of data the Caiman libraries need to function, alongside code demos and other things. This is meant to be separate from the library install of Caiman, which may be installed into the global python library path (or into a conda path or somewhere else messy). """ - if "CAIMAN_DATA" in os.environ: - return os.environ["CAIMAN_DATA"] - else: - return os.path.join(os.path.expanduser("~"), "caiman_data") + if "CAIMAN_DATA" in os.environ: + return os.environ["CAIMAN_DATA"] + else: + return os.path.join(os.path.expanduser("~"), "caiman_data") + def caiman_datadir_exists() -> bool: - return os.path.isdir(caiman_datadir()) + return os.path.isdir(caiman_datadir()) + ###### # memmap files @@ -30,14 +32,14 @@ def caiman_datadir_exists() -> bool: # Right now these are usually stored in the cwd of the script, although the basename could change that # In the future we may consistently store these somewhere under the caiman_datadir -def memmap_frames_filename(basename:str, dims:Tuple, frames:int, order:str='F') -> str: - # Some functions calling this have the first part of *their* dims Tuple be the number of frames. - # They *must* pass a slice to this so dims is only X, Y, and optionally Z. Frames is passed separately. - dimfield_0 = dims[0] - dimfield_1 = dims[1] - if len(dims) == 3: - dimfield_2 = dims[2] - else: - dimfield_2 = 1 - return f"{basename}_d1_{dimfield_0}_d2_{dimfield_1}_d3_{dimfield_2}_order_{order}_frames_{frames}_.mmap" +def memmap_frames_filename(basename: str, dims: Tuple, frames: int, order: str = 'F') -> str: + # Some functions calling this have the first part of *their* dims Tuple be the number of frames. + # They *must* pass a slice to this so dims is only X, Y, and optionally Z. Frames is passed separately. + dimfield_0 = dims[0] + dimfield_1 = dims[1] + if len(dims) == 3: + dimfield_2 = dims[2] + else: + dimfield_2 = 1 + return f"{basename}_d1_{dimfield_0}_d2_{dimfield_1}_d3_{dimfield_2}_order_{order}_frames_{frames}_.mmap" diff --git a/caiman/summary_images.py b/caiman/summary_images.py index a7fd1399f..d888897c9 100644 --- a/caiman/summary_images.py +++ b/caiman/summary_images.py @@ -1,6 +1,5 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- - """ functions that creates image from a video file Primarily intended for plotting, returns correlation images ( local or max ) @@ -8,17 +7,9 @@ See Also: ------------ -@url -.. image:: @author andrea giovannucci """ -# \package caiman -# \version 1.0 -# \copyright GNU General Public License v2.0 -# \date Created on Thu Oct 20 11:41:21 2016 - - from builtins import range import cv2 @@ -31,14 +22,8 @@ import caiman as cm from caiman.source_extraction.cnmf.pre_processing import get_noise_fft -#try: -# cv2.setNumThreads(0) -#except: -# pass -#%% - -def max_correlation_image(Y, bin_size:int=1000, eight_neighbours:bool=True, swap_dim:bool=True) -> np.ndarray: +def max_correlation_image(Y, bin_size: int = 1000, eight_neighbours: bool = True, swap_dim: bool = True) -> np.ndarray: """Computes the max-correlation image for the input dataset Y with bin_size Args: @@ -62,13 +47,11 @@ def max_correlation_image(Y, bin_size:int=1000, eight_neighbours:bool=True, swap """ if swap_dim: - Y = np.transpose( - Y, tuple(np.hstack((Y.ndim - 1, list(range(Y.ndim))[:-1])))) + Y = np.transpose(Y, tuple(np.hstack((Y.ndim - 1, list(range(Y.ndim))[:-1])))) T = Y.shape[0] if T <= bin_size: - Cn_bins = local_correlations_fft( - Y, eight_neighbours=eight_neighbours, swap_dim=False) + Cn_bins = local_correlations_fft(Y, eight_neighbours=eight_neighbours, swap_dim=False) return Cn_bins else: if T % bin_size < bin_size / 2.: @@ -78,7 +61,8 @@ def max_correlation_image(Y, bin_size:int=1000, eight_neighbours:bool=True, swap Cn_bins = np.zeros(((n_bins,) + Y.shape[1:])) for i in range(n_bins): Cn_bins[i] = local_correlations_fft(Y[i * bin_size:(i + 1) * bin_size], - eight_neighbours=eight_neighbours, swap_dim=False) + eight_neighbours=eight_neighbours, + swap_dim=False) logging.debug(i * bin_size) Cn = np.max(Cn_bins, axis=0) @@ -86,7 +70,11 @@ def max_correlation_image(Y, bin_size:int=1000, eight_neighbours:bool=True, swap #%% -def local_correlations_fft(Y, eight_neighbours:bool=True, swap_dim:bool=True, opencv:bool=True, rolling_window = None) -> np.ndarray: +def local_correlations_fft(Y, + eight_neighbours: bool = True, + swap_dim: bool = True, + opencv: bool = True, + rolling_window=None) -> np.ndarray: """Computes the correlation image for the input dataset Y using a faster FFT based method Args: @@ -111,8 +99,7 @@ def local_correlations_fft(Y, eight_neighbours:bool=True, swap_dim:bool=True, op """ if swap_dim: - Y = np.transpose( - Y, tuple(np.hstack((Y.ndim - 1, list(range(Y.ndim))[:-1])))) + Y = np.transpose(Y, tuple(np.hstack((Y.ndim - 1, list(range(Y.ndim))[:-1])))) Y = Y.astype('float32') if rolling_window is None: @@ -122,12 +109,12 @@ def local_correlations_fft(Y, eight_neighbours:bool=True, swap_dim:bool=True, op Y /= Ystd else: Ysum = np.cumsum(Y, axis=0) - Yrm = (Ysum[rolling_window:] - Ysum[:-rolling_window])/rolling_window + Yrm = (Ysum[rolling_window:] - Ysum[:-rolling_window]) / rolling_window Y[:rolling_window] -= Yrm[0] Y[rolling_window:] -= Yrm del Yrm, Ysum Ystd = np.cumsum(Y**2, axis=0) - Yrst = np.sqrt((Ystd[rolling_window:] - Ystd[:-rolling_window])/rolling_window) + Yrst = np.sqrt((Ystd[rolling_window:] - Ystd[:-rolling_window]) / rolling_window) Yrst[Yrst == 0] = np.inf Y[:rolling_window] /= Yrst[0] Y[rolling_window:] /= Yrst @@ -138,9 +125,12 @@ def local_correlations_fft(Y, eight_neighbours:bool=True, swap_dim:bool=True, op sz = np.ones((3, 3, 3), dtype='float32') sz[1, 1, 1] = 0 else: + # yapf: disable sz = np.array([[[0, 0, 0], [0, 1, 0], [0, 0, 0]], [[0, 1, 0], [1, 0, 1], [0, 1, 0]], - [[0, 0, 0], [0, 1, 0], [0, 0, 0]]], dtype='float32') + [[0, 0, 0], [0, 1, 0], [0, 0, 0]]], + dtype='float32') + # yapf: enable else: if eight_neighbours: sz = np.ones((3, 3), dtype='float32') @@ -150,27 +140,26 @@ def local_correlations_fft(Y, eight_neighbours:bool=True, swap_dim:bool=True, op if opencv and Y.ndim == 3: Yconv = np.stack([cv2.filter2D(img, -1, sz, borderType=0) for img in Y]) - MASK = cv2.filter2D( - np.ones(Y.shape[1:], dtype='float32'), -1, sz, borderType=0) + MASK = cv2.filter2D(np.ones(Y.shape[1:], dtype='float32'), -1, sz, borderType=0) else: Yconv = convolve(Y, sz[np.newaxis, :], mode='constant') - MASK = convolve( - np.ones(Y.shape[1:], dtype='float32'), sz, mode='constant') + MASK = convolve(np.ones(Y.shape[1:], dtype='float32'), sz, mode='constant') - YYconv = Yconv*Y + YYconv = Yconv * Y del Y, Yconv if rolling_window is None: Cn = np.mean(YYconv, axis=0) / MASK else: YYconv_cs = np.cumsum(YYconv, axis=0) del YYconv - YYconv_rm = (YYconv_cs[rolling_window:] - YYconv_cs[:-rolling_window])/rolling_window + YYconv_rm = (YYconv_cs[rolling_window:] - YYconv_cs[:-rolling_window]) / rolling_window del YYconv_cs Cn = YYconv_rm / MASK return Cn -def local_correlations_multicolor(Y, swap_dim:bool=True) -> np.ndarray: + +def local_correlations_multicolor(Y, swap_dim: bool = True) -> np.ndarray: """Computes the correlation image with color depending on orientation Args: @@ -187,22 +176,20 @@ def local_correlations_multicolor(Y, swap_dim:bool=True) -> np.ndarray: if Y.ndim == 4: raise Exception('Not Implemented') - if swap_dim: - Y = np.transpose( - Y, tuple(np.hstack((Y.ndim - 1, list(range(Y.ndim))[:-1])))) + Y = np.transpose(Y, tuple(np.hstack((Y.ndim - 1, list(range(Y.ndim))[:-1])))) w_mov = (Y - np.mean(Y, axis=0)) / np.std(Y, axis=0) rho_h = np.mean(np.multiply(w_mov[:, :-1, :], w_mov[:, 1:, :]), axis=0) rho_w = np.mean(np.multiply(w_mov[:, :, :-1], w_mov[:, :, 1:]), axis=0) - rho_d1 = np.mean(np.multiply(w_mov[:, 1:, :-1], w_mov[:, :-1, 1:, ]), axis=0) - rho_d2 = np.mean(np.multiply(w_mov[:, :-1, :-1], w_mov[:, 1:, 1:, ]), axis=0) + rho_d1 = np.mean(np.multiply(w_mov[:, 1:, :-1], w_mov[:, :-1, 1:,]), axis=0) + rho_d2 = np.mean(np.multiply(w_mov[:, :-1, :-1], w_mov[:, 1:, 1:,]), axis=0) - return np.dstack([rho_h[:,1:]/2, rho_d1/2, rho_d2/2]) + return np.dstack([rho_h[:, 1:] / 2, rho_d1 / 2, rho_d2 / 2]) -def local_correlations(Y, eight_neighbours:bool=True, swap_dim:bool=True, order_mean=1) -> np.ndarray: +def local_correlations(Y, eight_neighbours: bool = True, swap_dim: bool = True, order_mean=1) -> np.ndarray: """Computes the correlation image for the input dataset Y Args: @@ -224,8 +211,7 @@ def local_correlations(Y, eight_neighbours:bool=True, swap_dim:bool=True, order_ """ if swap_dim: - Y = np.transpose( - Y, tuple(np.hstack((Y.ndim - 1, list(range(Y.ndim))[:-1])))) + Y = np.transpose(Y, tuple(np.hstack((Y.ndim - 1, list(range(Y.ndim))[:-1])))) rho = np.zeros(np.shape(Y)[1:]) w_mov = (Y - np.mean(Y, axis=0)) / np.std(Y, axis=0) @@ -233,79 +219,79 @@ def local_correlations(Y, eight_neighbours:bool=True, swap_dim:bool=True, order_ rho_h = np.mean(np.multiply(w_mov[:, :-1, :], w_mov[:, 1:, :]), axis=0) rho_w = np.mean(np.multiply(w_mov[:, :, :-1], w_mov[:, :, 1:]), axis=0) + # yapf: disable if order_mean == 0: rho = np.ones(np.shape(Y)[1:]) rho_h = rho_h rho_w = rho_w - rho[:-1, :] = rho[:-1, :]*rho_h - rho[1:, :] = rho[1:, :]*rho_h - rho[:, :-1] = rho[:, :-1]*rho_w - rho[:, 1:] = rho[:, 1:]*rho_w + rho[:-1, :] = rho[:-1, :] * rho_h + rho[1:, :] = rho[1:, :] * rho_h + rho[:, :-1] = rho[:, :-1] * rho_w + rho[:, 1:] = rho[:, 1:] * rho_w else: rho[:-1, :] = rho[:-1, :] + rho_h**(order_mean) - rho[1:, :] = rho[1:, :] + rho_h**(order_mean) + rho[1:, :] = rho[1:, :] + rho_h**(order_mean) rho[:, :-1] = rho[:, :-1] + rho_w**(order_mean) - rho[:, 1:] = rho[:, 1:] + rho_w**(order_mean) + rho[:, 1:] = rho[:, 1:] + rho_w**(order_mean) if Y.ndim == 4: - rho_d = np.mean(np.multiply( - w_mov[:, :, :, :-1], w_mov[:, :, :, 1:]), axis=0) + rho_d = np.mean(np.multiply(w_mov[:, :, :, :-1], w_mov[:, :, :, 1:]), axis=0) rho[:, :, :-1] = rho[:, :, :-1] + rho_d rho[:, :, 1:] = rho[:, :, 1:] + rho_d neighbors = 6 * np.ones(np.shape(Y)[1:]) - neighbors[0] = neighbors[0] - 1 - neighbors[-1] = neighbors[-1] - 1 - neighbors[:, 0] = neighbors[:, 0] - 1 - neighbors[:, -1] = neighbors[:, -1] - 1 - neighbors[:, :, 0] = neighbors[:, :, 0] - 1 + neighbors[0] = neighbors[0] - 1 + neighbors[-1] = neighbors[-1] - 1 + neighbors[:, 0] = neighbors[:, 0] - 1 + neighbors[:, -1] = neighbors[:, -1] - 1 + neighbors[:, :, 0] = neighbors[:, :, 0] - 1 neighbors[:, :, -1] = neighbors[:, :, -1] - 1 else: if eight_neighbours: - rho_d1 = np.mean(np.multiply( - w_mov[:, 1:, :-1], w_mov[:, :-1, 1:, ]), axis=0) - rho_d2 = np.mean(np.multiply( - w_mov[:, :-1, :-1], w_mov[:, 1:, 1:, ]), axis=0) + rho_d1 = np.mean(np.multiply(w_mov[:, 1:, :-1], w_mov[:, :-1, 1:,]), axis=0) + rho_d2 = np.mean(np.multiply(w_mov[:, :-1, :-1], w_mov[:, 1:, 1:,]), axis=0) if order_mean == 0: rho_d1 = rho_d1 rho_d2 = rho_d2 - rho[:-1, :-1] = rho[:-1, :-1]*rho_d2 - rho[1:, 1:] = rho[1:, 1:]*rho_d1 - rho[1:, :-1] = rho[1:, :-1]*rho_d1 - rho[:-1, 1:] = rho[:-1, 1:]*rho_d2 + rho[:-1, :-1] = rho[:-1, :-1] * rho_d2 + rho[1:, 1:] = rho[1:, 1:] * rho_d1 + rho[1:, :-1] = rho[1:, :-1] * rho_d1 + rho[:-1, 1:] = rho[:-1, 1:] * rho_d2 else: rho[:-1, :-1] = rho[:-1, :-1] + rho_d2**(order_mean) - rho[1:, 1:] = rho[1:, 1:] + rho_d1**(order_mean) - rho[1:, :-1] = rho[1:, :-1] + rho_d1**(order_mean) - rho[:-1, 1:] = rho[:-1, 1:] + rho_d2**(order_mean) + rho[1:, 1:] = rho[1:, 1:] + rho_d1**(order_mean) + rho[1:, :-1] = rho[1:, :-1] + rho_d1**(order_mean) + rho[:-1, 1:] = rho[:-1, 1:] + rho_d2**(order_mean) neighbors = 8 * np.ones(np.shape(Y)[1:3]) - neighbors[0, :] = neighbors[0, :] - 3 - neighbors[-1, :] = neighbors[-1, :] - 3 - neighbors[:, 0] = neighbors[:, 0] - 3 - neighbors[:, -1] = neighbors[:, -1] - 3 - neighbors[0, 0] = neighbors[0, 0] + 1 + neighbors[0, :] = neighbors[0, :] - 3 + neighbors[-1, :] = neighbors[-1, :] - 3 + neighbors[:, 0] = neighbors[:, 0] - 3 + neighbors[:, -1] = neighbors[:, -1] - 3 + neighbors[0, 0] = neighbors[0, 0] + 1 neighbors[-1, -1] = neighbors[-1, -1] + 1 - neighbors[-1, 0] = neighbors[-1, 0] + 1 - neighbors[0, -1] = neighbors[0, -1] + 1 + neighbors[-1, 0] = neighbors[-1, 0] + 1 + neighbors[0, -1] = neighbors[0, -1] + 1 else: neighbors = 4 * np.ones(np.shape(Y)[1:3]) - neighbors[0, :] = neighbors[0, :] - 1 - neighbors[-1, :] = neighbors[-1, :] - 1 - neighbors[:, 0] = neighbors[:, 0] - 1 - neighbors[:, -1] = neighbors[:, -1] - 1 + neighbors[0, :] = neighbors[0, :] - 1 + neighbors[-1, :] = neighbors[-1, :] - 1 + neighbors[:, 0] = neighbors[:, 0] - 1 + neighbors[:, -1] = neighbors[:, -1] - 1 + # yapf: enable if order_mean == 0: - rho = np.power(rho, 1./neighbors) + rho = np.power(rho, 1. / neighbors) else: - rho = np.power(np.divide(rho, neighbors),1/order_mean) + rho = np.power(np.divide(rho, neighbors), 1 / order_mean) return rho -def correlation_pnr(Y, gSig=None, center_psf:bool=True, swap_dim:bool=True, background_filter:str='disk') -> Tuple[np.ndarray, np.ndarray]: +def correlation_pnr(Y, gSig=None, center_psf: bool = True, swap_dim: bool = True, + background_filter: str = 'disk') -> Tuple[np.ndarray, np.ndarray]: """ compute the correlation image and the peak-to-noise ratio (PNR) image. If gSig is provided, then spatially filtered the video. @@ -332,8 +318,7 @@ def correlation_pnr(Y, gSig=None, center_psf:bool=True, swap_dim:bool=True, back """ if swap_dim: - Y = np.transpose( - Y, tuple(np.hstack((Y.ndim - 1, list(range(Y.ndim))[:-1])))) + Y = np.transpose(Y, tuple(np.hstack((Y.ndim - 1, list(range(Y.ndim))[:-1])))) # parameters _, d1, d2 = Y.shape @@ -353,19 +338,18 @@ def correlation_pnr(Y, gSig=None, center_psf:bool=True, swap_dim:bool=True, back img, ksize=ksize, sigmaX=gSig[0], sigmaY=gSig[1], borderType=1) \ - cv2.boxFilter(img, ddepth=-1, ksize=ksize, borderType=1) else: - psf = cv2.getGaussianKernel(ksize[0], gSig[0], cv2.CV_32F).dot( - cv2.getGaussianKernel(ksize[1], gSig[1], cv2.CV_32F).T) + psf = cv2.getGaussianKernel(ksize[0], gSig[0], + cv2.CV_32F).dot(cv2.getGaussianKernel(ksize[1], gSig[1], cv2.CV_32F).T) ind_nonzero = psf >= psf[0].max() psf -= psf[ind_nonzero].mean() psf[~ind_nonzero] = 0 for idx, img in enumerate(data_filtered): - data_filtered[idx, ] = cv2.filter2D(img, -1, psf, borderType=1) + data_filtered[idx,] = cv2.filter2D(img, -1, psf, borderType=1) # data_filtered[idx, ] = cv2.filter2D(img, -1, psf, borderType=1) else: for idx, img in enumerate(data_filtered): - data_filtered[idx, ] = cv2.GaussianBlur( - img, ksize=ksize, sigmaX=gSig[0], sigmaY=gSig[1], borderType=1) + data_filtered[idx,] = cv2.GaussianBlur(img, ksize=ksize, sigmaX=gSig[0], sigmaY=gSig[1], borderType=1) # compute peak-to-noise ratio data_filtered -= data_filtered.mean(axis=0) @@ -384,7 +368,7 @@ def correlation_pnr(Y, gSig=None, center_psf:bool=True, swap_dim:bool=True, back return cn, pnr -def iter_chunk_array(arr:np.array, chunk_size:int): +def iter_chunk_array(arr: np.array, chunk_size: int): if ((arr.shape[0] // chunk_size) - 1) > 0: for i in range((arr.shape[0] // chunk_size) - 1): yield arr[chunk_size * i:chunk_size * (i + 1)] @@ -393,7 +377,7 @@ def iter_chunk_array(arr:np.array, chunk_size:int): yield arr -def correlation_image_ecobost(mov, chunk_size:int=1000, dview=None): +def correlation_image_ecobost(mov, chunk_size: int = 1000, dview=None): """ Compute correlation image as Erick. Removes the mean from each chunk before computing the correlation Args: @@ -416,9 +400,8 @@ def correlation_image_ecobost(mov, chunk_size:int=1000, dview=None): num_frames = scan.shape[0] res = map(map_corr, iter_chunk_array(scan, chunk_size)) - sum_x, sum_sqx, sum_xy, num_frames = [np.sum(np.array(a), 0) - for a in zip(*res)] - denom_factor = np.sqrt(num_frames * sum_sqx - sum_x ** 2) + sum_x, sum_sqx, sum_xy, num_frames = [np.sum(np.array(a), 0) for a in zip(*res)] + denom_factor = np.sqrt(num_frames * sum_sqx - sum_x**2) corrs = np.zeros(sum_xy.shape) for k in [0, 1, 2, 3]: rotated_corrs = np.rot90(corrs, k=k) @@ -451,6 +434,7 @@ def correlation_image_ecobost(mov, chunk_size:int=1000, dview=None): return correlation_image + def map_corr(scan) -> Tuple[Any, Any, Any, int]: '''This part of the code is in a mapping function that's run over different movies in parallel @@ -476,10 +460,8 @@ def map_corr(scan) -> Tuple[Any, Any, Any, int]: rotated_xysum = np.rot90(chunk_xysum, k=k) # Multiply each pixel by one above and by one above to the left - rotated_xysum[1:, :, k] = np.sum(rotated_chunk[1:] * rotated_chunk[:-1], - axis=-1, dtype=float) - rotated_xysum[1:, 1:, 4 + k] = np.sum(rotated_chunk[1:, 1:] * - rotated_chunk[:-1, :-1], axis=-1, dtype=float) + rotated_xysum[1:, :, k] = np.sum(rotated_chunk[1:] * rotated_chunk[:-1], axis=-1, dtype=float) + rotated_xysum[1:, 1:, 4 + k] = np.sum(rotated_chunk[1:, 1:] * rotated_chunk[:-1, :-1], axis=-1, dtype=float) # Return back to original orientation chunk = np.rot90(rotated_chunk, k=4 - k) @@ -490,7 +472,8 @@ def map_corr(scan) -> Tuple[Any, Any, Any, int]: return chunk_sum, chunk_sqsum, chunk_xysum, num_frames -def prepare_local_correlations(Y, swap_dim:bool=False, eight_neighbours:bool=False) -> Tuple[Any, Any, Any, Any, Any, Any, Any, Any]: +def prepare_local_correlations(Y, swap_dim: bool = False, + eight_neighbours: bool = False) -> Tuple[Any, Any, Any, Any, Any, Any, Any, Any]: """Computes the correlation image and some statistics to update it online Args: @@ -538,6 +521,7 @@ def get_indices_of_neighbors(pixel): else: inside = (x >= 0) * (x < d1) * (y >= 0) * (y < d2) return np.ravel_multi_index((x[inside], y[inside]), dims, order='F') + # more compact but slower code # idx = np.asarray([i - 1 for i in np.nonzero(sz)]) # def get_indices_of_neighbors(pixel): @@ -559,16 +543,26 @@ def get_indices_of_neighbors(pixel): # for (r_, c_) in zip(row_ind, col_ind)]) / Yr.shape[1] sig = np.sqrt(second_moment - first_moment**2) - M = coo_matrix(((crosscorr - first_moment[row_ind] * first_moment[col_ind]) / - (sig[row_ind] * sig[col_ind]) / num_neigbors, - (row_ind, col_ind)), dtype=Yr.dtype) + M = coo_matrix( + ((crosscorr - first_moment[row_ind] * first_moment[col_ind]) / (sig[row_ind] * sig[col_ind]) / num_neigbors, + (row_ind, col_ind)), + dtype=Yr.dtype) cn = M.dot(np.ones(M.shape[1], dtype=M.dtype)).reshape(dims, order='F') return first_moment, second_moment, crosscorr, col_ind, row_ind, num_neigbors, M, cn -def update_local_correlations(t, frames, first_moment, second_moment, crosscorr, - col_ind, row_ind, num_neigbors, M, cn, del_frames=None) -> np.ndarray: +def update_local_correlations(t, + frames, + first_moment, + second_moment, + crosscorr, + col_ind, + row_ind, + num_neigbors, + M, + cn, + del_frames=None) -> np.ndarray: """Updates sufficient statistics in place and returns correlation image""" dims = frames.shape[1:] stride = len(frames) @@ -584,7 +578,7 @@ def update_local_correlations(t, frames, first_moment, second_moment, crosscorr, first_moment -= del_frames.sum(0) / t second_moment -= (del_frames**2).sum(0) / t crosscorr -= np.sum(del_frames[:, row_ind] * del_frames[:, col_ind], 0) / t - else: # loop is faster + else: # loop is faster for f in del_frames: f = f.ravel(order='F') first_moment -= f / t @@ -595,21 +589,27 @@ def update_local_correlations(t, frames, first_moment, second_moment, crosscorr, first_moment += frames.sum(0) / t second_moment += (frames**2).sum(0) / t crosscorr += np.sum(frames[:, row_ind] * frames[:, col_ind], 0) / t - else: # loop is faster + else: # loop is faster for f in frames: f = f.ravel(order='F') first_moment += f / t second_moment += (f**2) / t crosscorr += (f[row_ind] * f[col_ind]) / t sig = np.sqrt(second_moment - first_moment**2) - M.data = ((crosscorr - first_moment[row_ind] * first_moment[col_ind]) / - (sig[row_ind] * sig[col_ind]) / num_neigbors) + M.data = ((crosscorr - first_moment[row_ind] * first_moment[col_ind]) / (sig[row_ind] * sig[col_ind]) / + num_neigbors) cn = M.dot(np.ones(M.shape[1], dtype=M.dtype)).reshape(dims, order='F') return cn -def local_correlations_movie(file_name, tot_frames:Optional[int]=None, fr:int=30, window:int=30, stride:int=1, - swap_dim:bool=False, eight_neighbours:bool=True, mode:str='simple'): +def local_correlations_movie(file_name, + tot_frames: Optional[int] = None, + fr: int = 30, + window: int = 30, + stride: int = 1, + swap_dim: bool = False, + eight_neighbours: bool = True, + mode: str = 'simple'): """ Compute an online correlation image as moving average @@ -651,50 +651,56 @@ def local_correlations_movie(file_name, tot_frames:Optional[int]=None, fr:int=30 corr_movie[0] = cn if mode == 'simple': for tt in range((T - window) // stride): - corr_movie[tt + 1] = update_local_correlations( - window, Y[tt * stride + window:(tt + 1) * stride + window], - first_moment, second_moment, crosscorr, - col_ind, row_ind, num_neigbors, M, cn, Y[tt * stride:(tt + 1) * stride]) + corr_movie[tt + 1] = update_local_correlations(window, Y[tt * stride + window:(tt + 1) * stride + window], + first_moment, second_moment, crosscorr, col_ind, row_ind, + num_neigbors, M, cn, Y[tt * stride:(tt + 1) * stride]) elif mode == 'exponential': - for tt, frames in enumerate(Y[window:window + (T - window) // stride * stride] - .reshape((-1, stride) + dims)): - corr_movie[tt + 1] = update_local_correlations( - window, frames, first_moment, second_moment, crosscorr, - col_ind, row_ind, num_neigbors, M, cn) + for tt, frames in enumerate(Y[window:window + (T - window) // stride * stride].reshape((-1, stride) + dims)): + corr_movie[tt + 1] = update_local_correlations(window, frames, first_moment, second_moment, crosscorr, + col_ind, row_ind, num_neigbors, M, cn) elif mode == 'cumulative': - for tt, frames in enumerate(Y[window:window + (T - window) // stride * stride] - .reshape((-1, stride) + dims)): - corr_movie[tt + 1] = update_local_correlations( - tt + window + 1, frames, first_moment, second_moment, crosscorr, - col_ind, row_ind, num_neigbors, M, cn) + for tt, frames in enumerate(Y[window:window + (T - window) // stride * stride].reshape((-1, stride) + dims)): + corr_movie[tt + 1] = update_local_correlations(tt + window + 1, frames, first_moment, second_moment, + crosscorr, col_ind, row_ind, num_neigbors, M, cn) else: raise Exception('mode of the moving average must be simple, exponential or cumulative') return cm.movie(corr_movie, fr=fr) -def local_correlations_movie_offline(file_name, Tot_frames = None, fr:int=10, window:int=30, stride:int=3, swap_dim:bool=True, eight_neighbours:bool=True, order_mean:int=1, ismulticolor:bool=False, dview=None): - if Tot_frames is None: - Tot_frames = cm.load(file_name).shape[0] - - params:List = [[file_name,range(j,j + window), eight_neighbours, swap_dim, order_mean, ismulticolor] for j in range(0,Tot_frames - window,stride)] - if dview is None: - parallel_result = list(map(local_correlations_movie_parallel,params)) - else: - if 'multiprocessing' in str(type(dview)): - parallel_result = dview.map_async( - local_correlations_movie_parallel, params).get(4294967) - else: - parallel_result = dview.map_sync( - local_correlations_movie_parallel, params) - dview.results.clear() - - mm = cm.movie(np.concatenate(parallel_result, axis=0),fr=fr) - return mm - -def local_correlations_movie_parallel(params:Tuple) -> np.ndarray: - mv_name, idx, eight_neighbours, swap_dim, order_mean, ismulticolor = params - mv = cm.load(mv_name,subindices=idx) - if ismulticolor: - return local_correlations_multicolor(mv,swap_dim=swap_dim)[None,:,:].astype(np.float32) +def local_correlations_movie_offline(file_name, + Tot_frames=None, + fr: int = 10, + window: int = 30, + stride: int = 3, + swap_dim: bool = True, + eight_neighbours: bool = True, + order_mean: int = 1, + ismulticolor: bool = False, + dview=None): + + if Tot_frames is None: + Tot_frames = cm.load(file_name).shape[0] + + params: List = [[file_name, range(j, j + window), eight_neighbours, swap_dim, order_mean, ismulticolor] + for j in range(0, Tot_frames - window, stride)] + if dview is None: + parallel_result = list(map(local_correlations_movie_parallel, params)) + else: + if 'multiprocessing' in str(type(dview)): + parallel_result = dview.map_async(local_correlations_movie_parallel, params).get(4294967) else: - return local_correlations(mv, eight_neighbours=eight_neighbours, swap_dim=swap_dim, order_mean=order_mean)[None,:,:].astype(np.float32) + parallel_result = dview.map_sync(local_correlations_movie_parallel, params) + dview.results.clear() + + mm = cm.movie(np.concatenate(parallel_result, axis=0), fr=fr) + return mm + + +def local_correlations_movie_parallel(params: Tuple) -> np.ndarray: + mv_name, idx, eight_neighbours, swap_dim, order_mean, ismulticolor = params + mv = cm.load(mv_name, subindices=idx) + if ismulticolor: + return local_correlations_multicolor(mv, swap_dim=swap_dim)[None, :, :].astype(np.float32) + else: + return local_correlations(mv, eight_neighbours=eight_neighbours, swap_dim=swap_dim, + order_mean=order_mean)[None, :, :].astype(np.float32)