-
Notifications
You must be signed in to change notification settings - Fork 48
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Server responding to MIT1003 stimulus
- Loading branch information
1 parent
43d8543
commit a37a645
Showing
3 changed files
with
140 additions
and
111 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,50 +1,79 @@ | ||
import numpy as np | ||
# import pysaliency | ||
import sys | ||
from typing import Union | ||
from scipy.ndimage import gaussian_filter | ||
sys.path.insert(0, '..') | ||
import pysaliency | ||
|
||
class SampleScanpathModel(): | ||
def __init__(self): | ||
super().__init__() | ||
|
||
def conditional_log_density(self, stimulus, x_hist, y_hist, t_hist, attributes=None, out=None): | ||
return np.log(stimulus) | ||
class LocalContrastModel(pysaliency.Model): | ||
def __init__(self, bandwidth=0.05, **kwargs): | ||
super().__init__(**kwargs) | ||
self.bandwidth = bandwidth | ||
|
||
def _log_density(self, stimulus: Union[pysaliency.datasets.Stimulus, np.ndarray]): | ||
|
||
# _log_density can either take pysaliency Stimulus objects, or, for convenience, simply numpy arrays | ||
# `as_stimulus` ensures that we have a Stimulus object | ||
stimulus_object = pysaliency.datasets.as_stimulus(stimulus) | ||
|
||
# from io import BytesIO | ||
# grayscale image | ||
gray_stimulus = np.mean(stimulus_object.stimulus_data, axis=2) | ||
|
||
# import pysaliency | ||
# import requests | ||
# from PIL import Image | ||
# import numpy as np | ||
# size contains the height and width of the image, but not potential color channels | ||
height, width = stimulus_object.size | ||
|
||
# define kernel size based on image size | ||
kernel_size = np.round(self.bandwidth * max(width, height)).astype(int) | ||
sigma = (kernel_size - 1) / 6 | ||
|
||
# apply Gausian blur and calculate squared difference between blurred and original image | ||
blurred_stimulus = gaussian_filter(gray_stimulus, sigma) | ||
|
||
# class HTTPScanpathModel(pysaliency.ScanpathModel): | ||
# def __init__(self, url): | ||
# self.url = url | ||
prediction = gaussian_filter((gray_stimulus - blurred_stimulus)**2, sigma) | ||
|
||
# def conditional_log_density(self, stimulus, x_hist, y_hist, t_hist, attributes=None, out=None): | ||
# # build request | ||
# pil_image = Image.fromarray(stimulus) | ||
# image_bytes = BytesIO() | ||
# pil_image.save(image_bytes, format='png') | ||
# normalize to [1, 255] | ||
prediction = (254 * (prediction / prediction.max())).astype(int) + 1 | ||
|
||
# def _convert_attribute(attribute): | ||
# if isinstance(attribute, np.ndarray): | ||
# return attribute.tolist() | ||
# return attribute | ||
density = prediction / prediction.sum() | ||
|
||
return np.log(density) | ||
|
||
class MySimpleScanpathModel(pysaliency.ScanpathModel): | ||
def __init__(self, spatial_model_bandwidth: float=0.05, saccade_width: float=0.1): | ||
self.spatial_model_bandwidth = spatial_model_bandwidth | ||
self.saccade_width = saccade_width | ||
self.spatial_model = LocalContrastModel(spatial_model_bandwidth) | ||
|
||
# json_data = { | ||
# "x_hist": list(x_hist), | ||
# "y_hist": list(y_hist), | ||
# "t_hist": list(t_hist), | ||
# "attributes": {key: _convert_attribute(value) for key, value in (attributes or {}).items()} | ||
# } | ||
|
||
# # send request | ||
def conditional_log_density(self, stimulus, x_hist, y_hist, t_hist, attributes=None, out=None,): | ||
stimulus_object = pysaliency.datasets.as_stimulus(stimulus) | ||
|
||
# response = requests.post(f"{self.url}/conditional_log_density", json=json_data, files={'stimulus': image_bytes.getvalue()}) | ||
# size contains the height and width of the image, but not potential color channels | ||
height, width = stimulus_object.size | ||
|
||
# # parse response | ||
spatial_prior_log_density = self.spatial_model.log_density(stimulus) | ||
spatial_prior_density = np.exp(spatial_prior_log_density) | ||
|
||
# if response.status_code != 200: | ||
# raise ValueError(f"Server returned status code {response.status_code}") | ||
# compute saccade bias | ||
last_x = x_hist[-1] | ||
last_y = y_hist[-1] | ||
|
||
xs = np.arange(width, dtype=float) | ||
ys = np.arange(height, dtype=float) | ||
XS, YS = np.meshgrid(xs, ys) | ||
|
||
XS -= last_x | ||
YS -= last_y | ||
|
||
# compute prior | ||
max_size = max(width, height) | ||
actual_kernel_size = self.saccade_width * max_size | ||
|
||
saccade_bias = np.exp(-0.5 * (XS ** 2 + YS ** 2) / actual_kernel_size ** 2) | ||
|
||
prediction = spatial_prior_density * saccade_bias | ||
|
||
density = prediction / prediction.sum() | ||
return np.log(density) | ||
|
||
# return np.array(response.json()['log_density']) |