diff --git a/examples/docker_submission/README.md b/examples/docker_submission/README.md index a525eaa..325b2ce 100644 --- a/examples/docker_submission/README.md +++ b/examples/docker_submission/README.md @@ -4,24 +4,30 @@ TODO: Add a description of the submission process here. ## Launching the submission container -TODO: Create a docker-compose file + +First we have to build the container ```bash -cd ./http_submission -docker build -t sample_pysaliency . +docker build -t sample_pysaliency docker ``` +Then we can start it +```bash +docker run --rm -it -p 4000:4000 sample_pysaliency +``` +The above command will launch the image as interactive container in the foregroun +and expose the port `4000` to the host machine. +If you prefer to run it in the background, use ```bash docker run --name sample_pysaliency -dp 4000:4000 sample_pysaliency ``` -The above command will launch a container named `sample_pysaliency` and expose the port `4000` to the host machine. The container will be running in the background. +which will launch a container named `sample_pysaliency`. The container will be running in the background. To test the model server, run the sample_evaluation script (Make sure to have the `pysaliency` package installed): ```bash -python ./http_evaluation/sample_evaluation.py +python ./sample_evaluation.py ``` - -To delete the container, run the following command: +To delete the background container, run the following command: ```bash docker stop sample_pysaliency && docker rm sample_pysaliency ``` \ No newline at end of file diff --git a/examples/docker_submission/Dockerfile b/examples/docker_submission/docker/Dockerfile similarity index 57% rename from examples/docker_submission/Dockerfile rename to examples/docker_submission/docker/Dockerfile index 26fe9df..a8627d3 100644 --- a/examples/docker_submission/Dockerfile +++ b/examples/docker_submission/docker/Dockerfile @@ -8,16 +8,19 @@ WORKDIR /app ENV HTTP_PORT=4000 RUN apt-get update \ - && apt-get -y install gcc + && apt-get -y install gcc \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists/* /var/cache/apt/* COPY ./requirements.txt ./ -RUN python -m pip install -U pip \ - && python -m pip install -r requirements.txt +RUN python -m pip install --no-cache -U pip \ + && python -m pip install --no-cache -r requirements.txt -COPY . ./ +COPY ./model_server.py ./ +COPY ./sample_submission.py ./ # This is needed for Singularity builds. EXPOSE $HTTP_PORT # The entrypoint for a container, -CMD ["gunicorn", "-w", "1", "-b", "0.0.0.0:4000", "--pythonpath", ".", "model_server:app"] \ No newline at end of file +CMD ["gunicorn", "-w", "1", "-b", "0.0.0.0:4000", "--pythonpath", ".", "--access-logfile", "-", "model_server:app"] \ No newline at end of file diff --git a/examples/docker_submission/model_server.py b/examples/docker_submission/docker/model_server.py similarity index 79% rename from examples/docker_submission/model_server.py rename to examples/docker_submission/docker/model_server.py index 2c38337..063b996 100644 --- a/examples/docker_submission/model_server.py +++ b/examples/docker_submission/docker/model_server.py @@ -1,20 +1,22 @@ from flask import Flask, request, jsonify +from flask_orjson import OrjsonProvider import numpy as np import json from PIL import Image from io import BytesIO -# import pickle +import orjson + # Import your model here from sample_submission import MySimpleScanpathModel app = Flask("saliency-model-server") +app.json_provider = OrjsonProvider(app) app.logger.setLevel("DEBUG") # # TODO - replace this with your model model = MySimpleScanpathModel() - @app.route('/conditional_log_density', methods=['POST']) def conditional_log_density(): data = json.loads(request.form['json_data']) @@ -28,14 +30,16 @@ def conditional_log_density(): stimulus = np.array(image) log_density = model.conditional_log_density(stimulus, x_hist, y_hist, t_hist, attributes) - return jsonify({'log_density': log_density.tolist()}) + log_density_list = log_density.tolist() + response = orjson.dumps({'log_density': log_density_list}) + return response @app.route('/type', methods=['GET']) def type(): type = "ScanpathModel" version = "v1.0.0" - return jsonify({'type': type, 'version': version}) + return orjson.dumps({'type': type, 'version': version}) def main(): diff --git a/examples/docker_submission/docker/requirements.txt b/examples/docker_submission/docker/requirements.txt new file mode 100644 index 0000000..ebaa2c5 --- /dev/null +++ b/examples/docker_submission/docker/requirements.txt @@ -0,0 +1,10 @@ +cython +flask +gunicorn +numpy + +# Add additional dependencies here +pysaliency +scipy +torch +flask_orjson diff --git a/examples/docker_submission/docker/sample_submission.py b/examples/docker_submission/docker/sample_submission.py new file mode 100644 index 0000000..b2d0be8 --- /dev/null +++ b/examples/docker_submission/docker/sample_submission.py @@ -0,0 +1,79 @@ +import numpy as np +import sys +from typing import Union +from scipy.ndimage import gaussian_filter +import pysaliency + + +class LocalContrastModel(pysaliency.Model): + def __init__(self, bandwidth=0.05, **kwargs): + super().__init__(**kwargs) + self.bandwidth = bandwidth + + def _log_density(self, stimulus: Union[pysaliency.datasets.Stimulus, np.ndarray]): + + # _log_density can either take pysaliency Stimulus objects, or, for convenience, simply numpy arrays + # `as_stimulus` ensures that we have a Stimulus object + stimulus_object = pysaliency.datasets.as_stimulus(stimulus) + + # grayscale image + gray_stimulus = np.mean(stimulus_object.stimulus_data, axis=2) + + # size contains the height and width of the image, but not potential color channels + height, width = stimulus_object.size + + # define kernel size based on image size + kernel_size = np.round(self.bandwidth * max(width, height)).astype(int) + sigma = (kernel_size - 1) / 6 + + # apply Gausian blur and calculate squared difference between blurred and original image + blurred_stimulus = gaussian_filter(gray_stimulus, sigma) + + prediction = gaussian_filter((gray_stimulus - blurred_stimulus)**2, sigma) + + # normalize to [1, 255] + prediction = (254 * (prediction / prediction.max())).astype(int) + 1 + + density = prediction / prediction.sum() + + return np.log(density) + +class MySimpleScanpathModel(pysaliency.ScanpathModel): + def __init__(self, spatial_model_bandwidth: float=0.05, saccade_width: float=0.1): + self.spatial_model_bandwidth = spatial_model_bandwidth + self.saccade_width = saccade_width + self.spatial_model = LocalContrastModel(spatial_model_bandwidth) + # self.spatial_model = pysaliency.UniformModel() + + + def conditional_log_density(self, stimulus, x_hist, y_hist, t_hist, attributes=None, out=None,): + stimulus_object = pysaliency.datasets.as_stimulus(stimulus) + + # size contains the height and width of the image, but not potential color channels + height, width = stimulus_object.size + + spatial_prior_log_density = self.spatial_model.log_density(stimulus) + spatial_prior_density = np.exp(spatial_prior_log_density) + + # compute saccade bias + last_x = x_hist[-1] + last_y = y_hist[-1] + + xs = np.arange(width, dtype=float) + ys = np.arange(height, dtype=float) + XS, YS = np.meshgrid(xs, ys) + + XS -= last_x + YS -= last_y + + # compute prior + max_size = max(width, height) + actual_kernel_size = self.saccade_width * max_size + + saccade_bias = np.exp(-0.5 * (XS ** 2 + YS ** 2) / actual_kernel_size ** 2) + + prediction = spatial_prior_density * saccade_bias + + density = prediction / prediction.sum() + return np.log(density) + diff --git a/examples/docker_submission/docker_deepgaze3/Dockerfile b/examples/docker_submission/docker_deepgaze3/Dockerfile new file mode 100644 index 0000000..a8627d3 --- /dev/null +++ b/examples/docker_submission/docker_deepgaze3/Dockerfile @@ -0,0 +1,26 @@ +# Specify a base image depending on the project. +FROM bitnami/python:3.8 +# For more complex examples, might need to use a different base image. +# FROM pytorch/pytorch:1.9.1-cuda11.1-cudnn8-runtime + +WORKDIR /app + +ENV HTTP_PORT=4000 + +RUN apt-get update \ + && apt-get -y install gcc \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists/* /var/cache/apt/* + +COPY ./requirements.txt ./ +RUN python -m pip install --no-cache -U pip \ + && python -m pip install --no-cache -r requirements.txt + +COPY ./model_server.py ./ +COPY ./sample_submission.py ./ + +# This is needed for Singularity builds. +EXPOSE $HTTP_PORT + +# The entrypoint for a container, +CMD ["gunicorn", "-w", "1", "-b", "0.0.0.0:4000", "--pythonpath", ".", "--access-logfile", "-", "model_server:app"] \ No newline at end of file diff --git a/examples/docker_submission/docker_deepgaze3/model_server.py b/examples/docker_submission/docker_deepgaze3/model_server.py new file mode 100644 index 0000000..78bc513 --- /dev/null +++ b/examples/docker_submission/docker_deepgaze3/model_server.py @@ -0,0 +1,76 @@ +from flask import Flask, request +# from flask_orjson import OrjsonProvider +import numpy as np +import json +from PIL import Image +from io import BytesIO +import orjson +from scipy.ndimage import zoom +from scipy.special import logsumexp +import torch + +# Import your model here +import deepgaze_pytorch + +# Flask server +app = Flask("saliency-model-server") +# app.json_provider = OrjsonProvider(app) +app.logger.setLevel("DEBUG") + +# # TODO - replace this with your model +model = deepgaze_pytorch.DeepGazeIII(pretrained=True) + +@app.route('/conditional_log_density', methods=['POST']) +def conditional_log_density(): + # get data + data = json.loads(request.form['json_data']) + + # extract scanpath history + x_hist = np.array(data['x_hist']) + y_hist = np.array(data['y_hist']) + # t_hist = np.array(data['t_hist']) + # attributes = data.get('attributes', {}) + + # extract stimulus + image_bytes = request.files['stimulus'].read() + image = Image.open(BytesIO(image_bytes)) + stimulus = np.array(image) + + # centerbias for deepgaze3 model + centerbias_template = np.zeros((1024, 1024)) # (1024, 1024) + centerbias = zoom(centerbias_template, + (stimulus.shape[0]/centerbias_template.shape[0], + stimulus.shape[1]/centerbias_template.shape[1]), + order=0, + mode='nearest' + ) + centerbias -= logsumexp(centerbias) + + # make tensors for deepgaze3 model + image_tensor = torch.tensor([stimulus.transpose(2, 0, 1)]) + centerbias_tensor = torch.tensor([centerbias]) + x_hist_tensor = torch.tensor([x_hist[model.included_fixations]]) + y_hist_tensor = torch.tensor([y_hist[model.included_fixations]]) + + # return model response + log_density = model(image_tensor, centerbias_tensor, x_hist_tensor, y_hist_tensor) + log_density_list = log_density.tolist() + response = orjson.dumps({'log_density': log_density_list}) + return response + + +@app.route('/type', methods=['GET']) +def type(): + type = "ScanpathModel" + version = "v1.0.0" + return orjson.dumps({'type': type, 'version': version}) + + + + +def main(): + app.run(host="localhost", port="4000", debug="True", threaded=True) + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/examples/docker_submission/docker_deepgaze3/requirements.txt b/examples/docker_submission/docker_deepgaze3/requirements.txt new file mode 100644 index 0000000..5a0ab4d --- /dev/null +++ b/examples/docker_submission/docker_deepgaze3/requirements.txt @@ -0,0 +1,11 @@ +cython +flask +gunicorn +numpy + +# Add additional dependencies here +pysaliency +scipy +torch +flask_orjson +git+https://github.com/matthias-k/deepgaze \ No newline at end of file diff --git a/examples/docker_submission/docker_deepgaze3/sample_submission.py b/examples/docker_submission/docker_deepgaze3/sample_submission.py new file mode 100644 index 0000000..b2d0be8 --- /dev/null +++ b/examples/docker_submission/docker_deepgaze3/sample_submission.py @@ -0,0 +1,79 @@ +import numpy as np +import sys +from typing import Union +from scipy.ndimage import gaussian_filter +import pysaliency + + +class LocalContrastModel(pysaliency.Model): + def __init__(self, bandwidth=0.05, **kwargs): + super().__init__(**kwargs) + self.bandwidth = bandwidth + + def _log_density(self, stimulus: Union[pysaliency.datasets.Stimulus, np.ndarray]): + + # _log_density can either take pysaliency Stimulus objects, or, for convenience, simply numpy arrays + # `as_stimulus` ensures that we have a Stimulus object + stimulus_object = pysaliency.datasets.as_stimulus(stimulus) + + # grayscale image + gray_stimulus = np.mean(stimulus_object.stimulus_data, axis=2) + + # size contains the height and width of the image, but not potential color channels + height, width = stimulus_object.size + + # define kernel size based on image size + kernel_size = np.round(self.bandwidth * max(width, height)).astype(int) + sigma = (kernel_size - 1) / 6 + + # apply Gausian blur and calculate squared difference between blurred and original image + blurred_stimulus = gaussian_filter(gray_stimulus, sigma) + + prediction = gaussian_filter((gray_stimulus - blurred_stimulus)**2, sigma) + + # normalize to [1, 255] + prediction = (254 * (prediction / prediction.max())).astype(int) + 1 + + density = prediction / prediction.sum() + + return np.log(density) + +class MySimpleScanpathModel(pysaliency.ScanpathModel): + def __init__(self, spatial_model_bandwidth: float=0.05, saccade_width: float=0.1): + self.spatial_model_bandwidth = spatial_model_bandwidth + self.saccade_width = saccade_width + self.spatial_model = LocalContrastModel(spatial_model_bandwidth) + # self.spatial_model = pysaliency.UniformModel() + + + def conditional_log_density(self, stimulus, x_hist, y_hist, t_hist, attributes=None, out=None,): + stimulus_object = pysaliency.datasets.as_stimulus(stimulus) + + # size contains the height and width of the image, but not potential color channels + height, width = stimulus_object.size + + spatial_prior_log_density = self.spatial_model.log_density(stimulus) + spatial_prior_density = np.exp(spatial_prior_log_density) + + # compute saccade bias + last_x = x_hist[-1] + last_y = y_hist[-1] + + xs = np.arange(width, dtype=float) + ys = np.arange(height, dtype=float) + XS, YS = np.meshgrid(xs, ys) + + XS -= last_x + YS -= last_y + + # compute prior + max_size = max(width, height) + actual_kernel_size = self.saccade_width * max_size + + saccade_bias = np.exp(-0.5 * (XS ** 2 + YS ** 2) / actual_kernel_size ** 2) + + prediction = spatial_prior_density * saccade_bias + + density = prediction / prediction.sum() + return np.log(density) + diff --git a/examples/docker_submission/requirements.txt b/examples/docker_submission/requirements.txt deleted file mode 100644 index 289461c..0000000 --- a/examples/docker_submission/requirements.txt +++ /dev/null @@ -1,6 +0,0 @@ -cython -flask -gunicorn -numpy - -# Add additional dependencies here \ No newline at end of file diff --git a/examples/docker_submission/sample_evaluation.py b/examples/docker_submission/sample_evaluation.py index 67ce6e4..4de23e5 100644 --- a/examples/docker_submission/sample_evaluation.py +++ b/examples/docker_submission/sample_evaluation.py @@ -6,19 +6,34 @@ import pysaliency +from tqdm import tqdm + +import deepgaze_pytorch + + if __name__ == "__main__": + + # initialize HTTPScanpathModel http_model = HTTPScanpathModel("http://localhost:4000") http_model.check_type() # for testing - model = MySimpleScanpathModel() + test_model = deepgaze_pytorch.DeepGazeIII(pretrained=True) # get MIT1003 dataset stimuli, fixations = pysaliency.get_mit1003(location='pysaliency_datasets') + for stimulus in tqdm(stimuli): + stimulus.stimulus_data + + # filter for scanpaths with 3 < fixations for deepgaze3 + eval_fixations = fixations[fixations.scanpath_history_length > 3] + + eval_fixations = eval_fixations[:10] - eval_fixations = fixations[fixations.scanpath_history_length > 0] + # information_gain = http_model.information_gain(stimuli, eval_fixations, average="image", verbose=True) + # print("IG:", information_gain) - for fixation_index in range(10): + for fixation_index in tqdm(range(10)): # get server response for one stimulus server_density = http_model.conditional_log_density( stimulus=stimuli.stimuli[eval_fixations.n[fixation_index]], @@ -26,14 +41,14 @@ y_hist=eval_fixations.y_hist[fixation_index], t_hist=eval_fixations.t_hist[fixation_index] ) - # get model response - model_density = model.conditional_log_density( - stimulus=stimuli.stimuli[eval_fixations.n[fixation_index]], - x_hist=eval_fixations.x_hist[fixation_index], - y_hist=eval_fixations.y_hist[fixation_index], - t_hist=eval_fixations.t_hist[fixation_index] - ) + # get test model response + # test_model_density = test_model( + # stimulus=stimuli.stimuli[eval_fixations.n[fixation_index]], + # x_hist=eval_fixations.x_hist[fixation_index], + # y_hist=eval_fixations.y_hist[fixation_index], + # t_hist=eval_fixations.t_hist[fixation_index] + # ) # Testing - test = np.testing.assert_allclose(server_density, model_density) + # test = np.testing.assert_allclose(server_density, test_model_density) \ No newline at end of file diff --git a/examples/docker_submission/sample_submission.py b/examples/docker_submission/sample_submission.py index e73b8c3..e6d4b5c 100644 --- a/examples/docker_submission/sample_submission.py +++ b/examples/docker_submission/sample_submission.py @@ -2,7 +2,7 @@ import sys from typing import Union from scipy.ndimage import gaussian_filter -sys.path.insert(0, '..') +sys.path.insert(0, '../..') import pysaliency diff --git a/pysaliency/http_models.py b/pysaliency/http_models.py index ec16641..3cd2b41 100644 --- a/pysaliency/http_models.py +++ b/pysaliency/http_models.py @@ -4,8 +4,20 @@ import requests import json import numpy as np +import orjson + +from .datasets import as_stimulus class HTTPScanpathModel(ScanpathModel): + """ + A scanpath model that uses a HTTP server to make predictions. + + The model is provided with an URL where it expects a server with the following API: + + /conditional_log_density: expects a POST request with a file attachtment `stimulus` + containing the stimulus and a json body containing x_hist, y_hist, t_hist and a dictionary with other attributes + /type: returns the model type and version + """ def __init__(self, url): self.url = url self.check_type() @@ -20,30 +32,38 @@ def type_url(self): def conditional_log_density(self, stimulus, x_hist, y_hist, t_hist, attributes=None, out=None): # build request - pil_image = Image.fromarray(stimulus) + stimulus_object = as_stimulus(stimulus) + + # TODO: check for file stimuli, in this case use original file to save encoding time + pil_image = Image.fromarray(stimulus_object.stimulus_data) image_bytes = BytesIO() pil_image.save(image_bytes, format='png') def _convert_attribute(attribute): if isinstance(attribute, np.ndarray): return attribute.tolist() + if isinstance(attribute, (np.int64, np.int32)): + return int(attribute) + if isinstance(attribute, (np.float64, np.float32)): + return float(attribute) return attribute json_data = { - "x_hist": list(x_hist), - "y_hist": list(y_hist), - "t_hist": list(t_hist), + "x_hist": list(x_hist.tolist()), # .tolist() + "y_hist": list(y_hist.tolist()), + "t_hist": list(t_hist.tolist()), "attributes": {key: _convert_attribute(value) for key, value in (attributes or {}).items()} } - # send request - response = requests.post(f"{self.log_density_url}", data={'json_data': json.dumps(json_data)}, files={'stimulus': image_bytes.getvalue()}) + response = requests.post(f"{self.log_density_url}", data={'json_data': orjson.dumps(json_data)}, files={'stimulus': image_bytes.getvalue()}) # parse response if response.status_code != 200: raise ValueError(f"Server returned status code {response.status_code}") - return np.array(response.json()['log_density']) + json_data = orjson.loads(response.text) + prediction = np.array(json_data['log_density']) + return prediction def check_type(self): response = requests.get(f"{self.type_url}").json()