Skip to content

Commit

Permalink
fixing resizing bug, issue flatironinstitute#651
Browse files Browse the repository at this point in the history
  • Loading branch information
epnev committed Oct 9, 2019
1 parent 52b69d2 commit ef5228e
Show file tree
Hide file tree
Showing 3 changed files with 25 additions and 8 deletions.
20 changes: 19 additions & 1 deletion caiman/base/movies.py
Original file line number Diff line number Diff line change
Expand Up @@ -923,7 +923,25 @@ def extract_traces_from_masks(self, masks:np.ndarray) -> trace:
return traces

def resize(self, fx=1, fy=1, fz=1, interpolation=cv2.INTER_AREA):
# todo: todocument
"""
Resizing caiman movie into a new one. Note that the temporal
dimension is controlled by fz and fx, fy, fz correspond to
magnification factors. For example to downsample in time by
a factor of 2, you need to set fz = 0.5.
Args:
fx (float):
Magnification factor along x-dimension
fy (float):
Magnification factor along y-dimension
fz (float):
Magnification factor along temporal dimension
Returns:
self (caiman movie)
"""
T, d1, d2 = self.shape
d = d1 * d2
elm = d * T
Expand Down
11 changes: 5 additions & 6 deletions caiman/source_extraction/cnmf/initialization.py
Original file line number Diff line number Diff line change
Expand Up @@ -79,8 +79,8 @@ def downscale(Y, ds, opencv=False):
Y = Y[..., None]
ds = tuple(ds) + (1,)
else:
Y_ds = movie(Y).resize(fx=1. / ds[0], fy=1. / ds[1], fz=1. / ds[2],
interpolation=cv2.INTER_AREA)
Y_ds = movie(Y.transpose(2, 0, 1)).resize(fx=1. / ds[0], fy=1. / ds[1], fz=1. / ds[2],
interpolation=cv2.INTER_AREA).transpose(1, 2, 0)
logging.info('Downscaling using OpenCV')
else:
if d > 3:
Expand Down Expand Up @@ -298,15 +298,15 @@ def initialize_components(Y, K=30, gSig=[5, 5], gSiz=None, ssub=1, tsub=1, nIter
if ssub != 1 or tsub != 1:

if method == 'corr_pnr':
logging.info("Spatial downsampling 1-photon")
logging.info("Spatial/Temporal downsampling 1-photon")
# this icrements the performance against ground truth and solves border problems
Y_ds = downscale(Y, tuple([ssub] * len(d) + [tsub]), opencv=False)
else:
logging.info("Spatial downsampling 2-photon")
logging.info("Spatial/Temporal downsampling 2-photon")
# this icrements the performance against ground truth and solves border problems
Y_ds = downscale(Y, tuple([ssub] * len(d) + [tsub]), opencv=True)
# mean_val = np.mean(Y)
# Y_ds = downscale_local_mean(Y, tuple([ssub] * len(d) + [tsub]), cval=mean_val) # this gives better results against ground truth for 2-photon datasets
# Y_ds = downscale_local_mean(Y, tuple([ssub] * len(d) + [tsub]), cval=mean_val)
else:
Y_ds = Y

Expand Down Expand Up @@ -416,7 +416,6 @@ def initialize_components(Y, K=30, gSig=[5, 5], gSiz=None, ssub=1, tsub=1, nIter

if Ain.size > 0:
Cin = resize(Cin, [K, T])

center = np.asarray(
[center_of_mass(a.reshape(d, order='F')) for a in Ain.T])
else:
Expand Down
2 changes: 1 addition & 1 deletion docs/source/Handling_Movies.rst
Original file line number Diff line number Diff line change
Expand Up @@ -121,7 +121,7 @@ the opencv library. Below an example putting it all together:
movies_chained = cm.concatenate([movie1, movie2] , axis = 1).resize(1,1,.5).play(magnification=2, fr=50)
This command will concatenate `movie1` and `movie2` along axis `x`, then it will
downsample the resulting movie along the axis y by a factor of 2, and finally it
downsample the resulting movie along the temporal axis by a factor of 2, and finally it
will play the resulting movie magnified by a factor of 2.

Noe that unlike `cm.concatenate`, for `movie.resize` the axis ordering is
Expand Down

0 comments on commit ef5228e

Please sign in to comment.