diff --git a/foundation/stimulus/compute/video.py b/foundation/stimulus/compute/video.py index 5b13a0c..caf7d70 100644 --- a/foundation/stimulus/compute/video.py +++ b/foundation/stimulus/compute/video.py @@ -364,6 +364,83 @@ def video(self): current_time = times[-1] return video.Video(images, times=times) +@keys +class Frame2List(VideoType): + """A video composed of an ordered list of stimulus.Frame2""" + + @property + def keys(self): + return [ + stimulus.Frame2List, + ] + + @rowproperty + def video(self): + members = stimulus.Frame2List.Member & self.item + if len(members) != (stimulus.Frame2List & self.item).fetch1('members'): + raise MissingError(f"Frame2List {self.item} is missing members") + + tups = merge( + members, + pipe_stim.StaticImage.Image, + pipe_stim.Frame2, + ) + + images = [] + times = [] + current_time = 0 + def mask_image(cond, frame): + frame = frame.astype(float) + frame_size = frame.T.shape # frame is height by width + radius = float(cond['aperture_r']) * frame_size[0] + transition = float(cond['aperture_transition']) * frame_size[0] + x_, y_ = float(cond['aperture_x']), float(cond['aperture_y']) + sz = frame_size + x = np.linspace(-sz[1] / 2, sz[1] / 2, sz[1]) - y_ * sz[0] + y = np.linspace(-sz[0] / 2, sz[0] / 2, sz[0]) - x_ * sz[0] + [X, Y] = np.meshgrid(x, y) + rr = np.sqrt(X * X + Y * Y) + fxn = lambda r: 0.5 * (1 + np.cos(np.pi * r)) * (r < 1) * (r > 0) + (r < 0) + alpha_mask = fxn((rr - radius) / transition + 1) + bg = cond['background_value'] + img = (frame - bg) * alpha_mask.T + bg + return img.astype(np.uint8) + + for image, pre_blank, duration, r, x, y, trans, bg in zip( + *tups.fetch( + "image", + "pre_blank_period", + "presentation_time", + "aperture_r", + "aperture_x", + "aperture_y", + "aperture_transition", + "background_value", + order_by="frame2list_index", + ) + ): + cond = dict(aperture_x=x, aperture_y=y, aperture_r=r, aperture_transition=trans, background_value=bg) + image = video.Frame.fromarray(mask_image(cond, image)) + + if image.mode == "L": + blank = np.full([image.height, image.width], 128, dtype=np.uint8) + blank = video.Frame.fromarray(blank) + else: + raise NotImplementedError(f"Frame mode {image.mode} not implemented") + + if pre_blank > 0 and current_time == 0: + images += [blank, image, blank] + times += [ + current_time, + current_time + pre_blank, + current_time + pre_blank + duration, + ] + else: + images += [image, blank] + times += [current_time + pre_blank, current_time + pre_blank + duration] + current_time = times[-1] + return video.Video(images, times=times) + # -- Video Sets -- @@ -466,3 +543,4 @@ def df(self): rows.append(row) return pd.DataFrame(rows).sort_values(by=["spatial_type", "video_id", "onset"]).reset_index(drop=True) + diff --git a/foundation/stimulus/fill/scan.py b/foundation/stimulus/fill/scan.py index e26287c..167246b 100644 --- a/foundation/stimulus/fill/scan.py +++ b/foundation/stimulus/fill/scan.py @@ -67,4 +67,34 @@ def fill(self): f"{self.item['animal_id']}-{self.item['session']}-{self.item['scan_idx']}, "\ "ordered by the trial_idx of the first repetition.", ), + ) + +@keys +class VisualScanFrame2List: + """Visual scan with static images presented""" + + @property + def keys(self): + return [ + pipe_exp.Scan & ( + pipe_stim.Trial * pipe_stim.Condition & pipe_stim.Frame2 + ), + ] + + def fill(self): + from foundation.stimulus.video import Frame2List + + keys = U('condition_hash').aggr( + (pipe_stim.Frame2 * pipe_stim.Condition * pipe_stim.Trial & self.key), + trial_idx='MIN(trial_idx)' + ).fetch( + "KEY", order_by="trial_idx ASC" + ) + return Frame2List.fill( + restrictions=keys, + note=( + "All unique stimulus.Frame2 conditions presented in "\ + f"{self.item['animal_id']}-{self.item['session']}-{self.item['scan_idx']}, "\ + "ordered by the trial_idx of the first repetition.", + ), ) \ No newline at end of file diff --git a/foundation/stimulus/video.py b/foundation/stimulus/video.py index dfba7e4..379c034 100644 --- a/foundation/stimulus/video.py +++ b/foundation/stimulus/video.py @@ -142,6 +142,19 @@ def compute(self): return FrameList & self +@schema.list +class Frame2List(VideoType): + keys = [pipe_stim.Frame2] + name = "frame2list" + comment = "an ordered list of frame2 stimuli" + + @rowproperty + def compute(self): + from foundation.stimulus.compute.video import Frame2List + + return Frame2List & self + + # -- Video --