diff --git a/caiman/base/movies.py b/caiman/base/movies.py index 4b47ef2c1..809af8355 100644 --- a/caiman/base/movies.py +++ b/caiman/base/movies.py @@ -923,7 +923,25 @@ def extract_traces_from_masks(self, masks:np.ndarray) -> trace: return traces def resize(self, fx=1, fy=1, fz=1, interpolation=cv2.INTER_AREA): - # todo: todocument + """ + Resizing caiman movie into a new one. Note that the temporal + dimension is controlled by fz and fx, fy, fz correspond to + magnification factors. For example to downsample in time by + a factor of 2, you need to set fz = 0.5. + + Args: + fx (float): + Magnification factor along x-dimension + + fy (float): + Magnification factor along y-dimension + + fz (float): + Magnification factor along temporal dimension + + Returns: + self (caiman movie) + """ T, d1, d2 = self.shape d = d1 * d2 elm = d * T diff --git a/caiman/source_extraction/cnmf/initialization.py b/caiman/source_extraction/cnmf/initialization.py index 0fda2318e..84adc81af 100644 --- a/caiman/source_extraction/cnmf/initialization.py +++ b/caiman/source_extraction/cnmf/initialization.py @@ -79,8 +79,8 @@ def downscale(Y, ds, opencv=False): Y = Y[..., None] ds = tuple(ds) + (1,) else: - Y_ds = movie(Y).resize(fx=1. / ds[0], fy=1. / ds[1], fz=1. / ds[2], - interpolation=cv2.INTER_AREA) + Y_ds = movie(Y.transpose(2, 0, 1)).resize(fx=1. / ds[0], fy=1. / ds[1], fz=1. / ds[2], + interpolation=cv2.INTER_AREA).transpose(1, 2, 0) logging.info('Downscaling using OpenCV') else: if d > 3: @@ -298,15 +298,15 @@ def initialize_components(Y, K=30, gSig=[5, 5], gSiz=None, ssub=1, tsub=1, nIter if ssub != 1 or tsub != 1: if method == 'corr_pnr': - logging.info("Spatial downsampling 1-photon") + logging.info("Spatial/Temporal downsampling 1-photon") # this icrements the performance against ground truth and solves border problems Y_ds = downscale(Y, tuple([ssub] * len(d) + [tsub]), opencv=False) else: - logging.info("Spatial downsampling 2-photon") + logging.info("Spatial/Temporal downsampling 2-photon") # this icrements the performance against ground truth and solves border problems Y_ds = downscale(Y, tuple([ssub] * len(d) + [tsub]), opencv=True) # mean_val = np.mean(Y) -# Y_ds = downscale_local_mean(Y, tuple([ssub] * len(d) + [tsub]), cval=mean_val) # this gives better results against ground truth for 2-photon datasets +# Y_ds = downscale_local_mean(Y, tuple([ssub] * len(d) + [tsub]), cval=mean_val) else: Y_ds = Y @@ -416,7 +416,6 @@ def initialize_components(Y, K=30, gSig=[5, 5], gSiz=None, ssub=1, tsub=1, nIter if Ain.size > 0: Cin = resize(Cin, [K, T]) - center = np.asarray( [center_of_mass(a.reshape(d, order='F')) for a in Ain.T]) else: diff --git a/docs/source/Handling_Movies.rst b/docs/source/Handling_Movies.rst index a5d4a93a2..f2c2601c4 100644 --- a/docs/source/Handling_Movies.rst +++ b/docs/source/Handling_Movies.rst @@ -121,7 +121,7 @@ the opencv library. Below an example putting it all together: movies_chained = cm.concatenate([movie1, movie2] , axis = 1).resize(1,1,.5).play(magnification=2, fr=50) This command will concatenate `movie1` and `movie2` along axis `x`, then it will -downsample the resulting movie along the axis y by a factor of 2, and finally it +downsample the resulting movie along the temporal axis by a factor of 2, and finally it will play the resulting movie magnified by a factor of 2. Noe that unlike `cm.concatenate`, for `movie.resize` the axis ordering is