From fc31507fd0be7f94ed7a8c879a7ea8d4555f689c Mon Sep 17 00:00:00 2001 From: pellegreene <36171165+pellegreene@users.noreply.github.com> Date: Tue, 30 Jul 2024 13:57:22 -0400 Subject: [PATCH 01/28] Add cv_18_dagger_408_pretrained (#1104) * Add model files * Adjust timm version --------- Co-authored-by: Ethan Pellegrini --- .../cv_18_dagger_408_pretrained/__init__.py | 8 +++ .../cv_18_dagger_408_pretrained/model.py | 57 +++++++++++++++++++ .../requirements.txt | 3 + .../cv_18_dagger_408_pretrained/test.py | 25 ++++++++ 4 files changed, 93 insertions(+) create mode 100644 brainscore_vision/models/cv_18_dagger_408_pretrained/__init__.py create mode 100644 brainscore_vision/models/cv_18_dagger_408_pretrained/model.py create mode 100644 brainscore_vision/models/cv_18_dagger_408_pretrained/requirements.txt create mode 100644 brainscore_vision/models/cv_18_dagger_408_pretrained/test.py diff --git a/brainscore_vision/models/cv_18_dagger_408_pretrained/__init__.py b/brainscore_vision/models/cv_18_dagger_408_pretrained/__init__.py new file mode 100644 index 000000000..7658e4b6f --- /dev/null +++ b/brainscore_vision/models/cv_18_dagger_408_pretrained/__init__.py @@ -0,0 +1,8 @@ +from brainscore_vision import model_registry +from brainscore_vision.model_helpers.brain_transformation import ModelCommitment +from .model import get_model, LAYERS + +model_registry['cv_18_dagger_408_pretrained'] = lambda: ModelCommitment( + identifier='cv_18_dagger_408_pretrained', + activations_model=get_model(), + layers=LAYERS) diff --git a/brainscore_vision/models/cv_18_dagger_408_pretrained/model.py b/brainscore_vision/models/cv_18_dagger_408_pretrained/model.py new file mode 100644 index 000000000..ef51007aa --- /dev/null +++ b/brainscore_vision/models/cv_18_dagger_408_pretrained/model.py @@ -0,0 +1,57 @@ +# -*- coding: utf-8 -*- +import functools +import ssl + +from timm.models import create_model +from brainscore_vision.model_helpers.activations.pytorch import ( + PytorchWrapper, + load_images, + preprocess_images, +) +from brainscore_vision.model_helpers.check_submission import check_models + +ssl._create_default_https_context = ssl._create_unverified_context + +BIBTEX = "" +LAYERS = [ + "blocks.1.blocks.1.0.norm1", + "blocks.1.blocks.1.4.norm2", + "blocks.1.blocks.1.0.mlp.act", + "blocks.2.revert_projs.1.2", +] +# Description of Layers: +# Behavior : 'blocks.2.revert_projs.1.2' +# IT : 'blocks.1.blocks.1.4.norm2' +# V1 : 'blocks.1.blocks.1.0.norm1' +# V2 : 'blocks.1.blocks.1.0.mlp.act' +# V4 : 'blocks.1.blocks.1.0.mlp.act' +INPUT_SIZE = 256 + + +def load_preprocess_custom_model(image_filepaths, image_size, **kwargs): + images = load_images(image_filepaths) + images = preprocess_images(images, image_size=image_size, **kwargs) + return images + + +def get_model(): + # Generate Model + model = create_model("crossvit_18_dagger_408", pretrained=True) + model.eval() + + # Load Model and create necessary methods + # init the model and the preprocessing: + preprocessing = functools.partial( + load_preprocess_custom_model, image_size=224) + # get an activations model from the Pytorch Wrapper + wrapper = PytorchWrapper( + identifier="cv_18_dagger_408_pretrained", model=model, preprocessing=preprocessing + ) + wrapper.image_size = 224 + return wrapper + + +if __name__ == "__main__": + # Use this method to ensure the correctness of the BaseModel implementations. + # It executes a mock run of brain-score benchmarks. + check_models.check_base_models(__name__) diff --git a/brainscore_vision/models/cv_18_dagger_408_pretrained/requirements.txt b/brainscore_vision/models/cv_18_dagger_408_pretrained/requirements.txt new file mode 100644 index 000000000..6d5299635 --- /dev/null +++ b/brainscore_vision/models/cv_18_dagger_408_pretrained/requirements.txt @@ -0,0 +1,3 @@ +opencv-python-headless<4.3 +timm +torch diff --git a/brainscore_vision/models/cv_18_dagger_408_pretrained/test.py b/brainscore_vision/models/cv_18_dagger_408_pretrained/test.py new file mode 100644 index 000000000..ba0c1df1c --- /dev/null +++ b/brainscore_vision/models/cv_18_dagger_408_pretrained/test.py @@ -0,0 +1,25 @@ +import logging +import sys + +import pytest +from pytest import approx + +from brainscore_vision import score + +logging.basicConfig(stream=sys.stdout, level=logging.DEBUG) + + +@pytest.mark.slow +@pytest.mark.travis_slow +@pytest.mark.memory_intense +@pytest.mark.parametrize("model_identifier, benchmark_identifier, expected_score", [ + # Private + pytest.param("cv_18_dagger_408_pretrained", "MajajHong2015.IT-pls", approx(0.5126, abs=0.0005), marks=[pytest.mark.private_access]), + + # Public + pytest.param("cv_18_dagger_408_pretrained", "MajajHong2015public.IT-pls", approx(0.5362, abs=0.0005)), +]) +def test_score(model_identifier, benchmark_identifier, expected_score): + actual_score = score(model_identifier=model_identifier, benchmark_identifier=benchmark_identifier, + conda_active=False) + assert actual_score == expected_score From 63c595e7c93c4738854504b358a5c6cd10a28a14 Mon Sep 17 00:00:00 2001 From: Katherine Fairchild Date: Thu, 1 Aug 2024 20:56:48 -0400 Subject: [PATCH 02/28] add eBarlow_lmda_02_200_full to models (#1121) Co-authored-by: AutoJenkins --- .../eBarlow_lmda_02_200_full/__init__.py | 9 ++ .../models/eBarlow_lmda_02_200_full/model.py | 85 +++++++++++++++++++ .../models/eBarlow_lmda_02_200_full/setup.py | 25 ++++++ .../models/eBarlow_lmda_02_200_full/test.py | 1 + 4 files changed, 120 insertions(+) create mode 100644 brainscore_vision/models/eBarlow_lmda_02_200_full/__init__.py create mode 100644 brainscore_vision/models/eBarlow_lmda_02_200_full/model.py create mode 100644 brainscore_vision/models/eBarlow_lmda_02_200_full/setup.py create mode 100644 brainscore_vision/models/eBarlow_lmda_02_200_full/test.py diff --git a/brainscore_vision/models/eBarlow_lmda_02_200_full/__init__.py b/brainscore_vision/models/eBarlow_lmda_02_200_full/__init__.py new file mode 100644 index 000000000..a479777d4 --- /dev/null +++ b/brainscore_vision/models/eBarlow_lmda_02_200_full/__init__.py @@ -0,0 +1,9 @@ +from brainscore_vision import model_registry +from brainscore_vision.model_helpers.brain_transformation import ModelCommitment +from .model import get_model, get_layers + +model_registry["eBarlow_lmda_02_200_full"] = lambda: ModelCommitment( + identifier="eBarlow_lmda_02_200_full", + activations_model=get_model("eBarlow_lmda_02_200_full"), + layers=get_layers("eBarlow_lmda_02_200_full"), +) diff --git a/brainscore_vision/models/eBarlow_lmda_02_200_full/model.py b/brainscore_vision/models/eBarlow_lmda_02_200_full/model.py new file mode 100644 index 000000000..5cc8e5d8c --- /dev/null +++ b/brainscore_vision/models/eBarlow_lmda_02_200_full/model.py @@ -0,0 +1,85 @@ +from brainscore_vision.model_helpers.check_submission import check_models +import functools +import os +from urllib.request import urlretrieve +import torchvision.models +from brainscore_vision.model_helpers.activations.pytorch import PytorchWrapper +from brainscore_vision.model_helpers.activations.pytorch import load_preprocess_images +from pathlib import Path +from brainscore_vision.model_helpers import download_weights +import torch +from collections import OrderedDict + +# This is an example implementation for submitting resnet-50 as a pytorch model + +# Attention: It is important, that the wrapper identifier is unique per model! +# The results will otherwise be the same due to brain-scores internal result caching mechanism. +# Please load your pytorch model for usage in CPU. There won't be GPUs available for scoring your model. +# If the model requires a GPU, contact the brain-score team directly. +from brainscore_vision.model_helpers.check_submission import check_models + + +def get_model_list(): + return ["eBarlow_lmda_02_200_full"] + + +def get_model(name): + assert name == "eBarlow_lmda_02_200_full" + url = "https://users.flatironinstitute.org/~tyerxa/equi_proj/training_checkpoints/fresh/paired/lmda_0.2/Barlow_200ep/latest-rank0" + fh = urlretrieve(url) + state_dict = torch.load(fh[0], map_location=torch.device("cpu"))["state"]["model"] + model = load_composer_classifier(state_dict) + preprocessing = functools.partial(load_preprocess_images, image_size=224) + wrapper = PytorchWrapper(identifier=name, model=model, preprocessing=preprocessing) + wrapper.image_size = 224 + return wrapper + +def load_composer_classifier(sd): + model = torchvision.models.resnet.resnet50() + new_sd = OrderedDict() + for k, v in sd.items(): + if 'lin_cls' in k: + new_sd['fc.' + k.split('.')[-1]] = v + if ".f." not in k: + continue + parts = k.split(".") + idx = parts.index("f") + new_k = ".".join(parts[idx + 1 :]) + new_sd[new_k] = v + model.load_state_dict(new_sd, strict=True) + return model + +def get_layers(name): + assert name == "eBarlow_lmda_02_200_full" + layers = [ + "layer1.0", + "layer1.1", + "layer1.2", + "layer2.0", + "layer2.1", + "layer2.2", + "layer2.3", + "layer3.0", + "layer3.1", + "layer3.2", + "layer3.3", + "layer3.4", + "layer3.5", + "layer4.0", + "layer4.1", + "layer4.2", + "avgpool", + "fc", + ] + outs = ["conv1", "layer1", "layer2", "layer3", "layer4"] + + return layers + outs + + + +def get_bibtex(model_identifier): + return """xx""" + + +if __name__ == "__main__": + check_models.check_base_models(__name__) diff --git a/brainscore_vision/models/eBarlow_lmda_02_200_full/setup.py b/brainscore_vision/models/eBarlow_lmda_02_200_full/setup.py new file mode 100644 index 000000000..421914cfb --- /dev/null +++ b/brainscore_vision/models/eBarlow_lmda_02_200_full/setup.py @@ -0,0 +1,25 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +from setuptools import setup, find_packages + +requirements = [ "torchvision", + "torch" +] + +setup( + packages=find_packages(exclude=['tests']), + include_package_data=True, + install_requires=requirements, + license="MIT license", + zip_safe=False, + keywords='brain-score template', + classifiers=[ + 'Development Status :: 2 - Pre-Alpha', + 'Intended Audience :: Developers', + 'License :: OSI Approved :: MIT License', + 'Natural Language :: English', + 'Programming Language :: Python :: 3.7', + ], + test_suite='tests', +) diff --git a/brainscore_vision/models/eBarlow_lmda_02_200_full/test.py b/brainscore_vision/models/eBarlow_lmda_02_200_full/test.py new file mode 100644 index 000000000..e594ba9e1 --- /dev/null +++ b/brainscore_vision/models/eBarlow_lmda_02_200_full/test.py @@ -0,0 +1 @@ +# Left empty as part of 2023 models migration From cda897b1f5349133893ff130d2ea226caaff977a Mon Sep 17 00:00:00 2001 From: Michael Ferguson Date: Tue, 13 Aug 2024 09:46:09 -0400 Subject: [PATCH 03/28] remove old tutorials in favor of new link (#1170) --- docs/source/modules/benchmark_tutorial.rst | 397 +-------------------- docs/source/modules/model_tutorial.rst | 126 +------ 2 files changed, 2 insertions(+), 521 deletions(-) diff --git a/docs/source/modules/benchmark_tutorial.rst b/docs/source/modules/benchmark_tutorial.rst index 673fc3740..258590d79 100644 --- a/docs/source/modules/benchmark_tutorial.rst +++ b/docs/source/modules/benchmark_tutorial.rst @@ -6,399 +6,4 @@ Benchmark Tutorial ================== -Benchmarks are at the core of Brain-Score and test models' match to experimental observations. -New benchmarks keep models in check and require them to generalize to new experiments. - -A benchmark reproduces the experimental paradigm on a model candidate, the experimentally observed data, -and a metric to compare model with experimental observations. - -To submit a new benchmark, there are three steps: -1. packaging stimuli and data, -2. creating the benchmark with experimental paradigm and metric to compare against data, and -3. opening a pull request on the github repository to commit the updates from 1 and 2 -In order to ensure the continued validity of the benchmark, we require unit tests for all components -(stimuli and data as well as the benchmark itself). - -1. Package stimuli and data -=========================== -We require a certain format for stimuli and data so that we can maintain them for long-term use. -In particular, we use BrainIO for data management. BrainIO uses -`StimulusSet `_ (a subclass of -`pandas DataFrame `_) to maintain stimuli, and -`DataAssembly `_ -(a subclass of `xarray DataArray `_) -to maintain experimental measurements. -Aside from unifying data from different sources, the advantage of these formats is that all data are kept together with -metadata such as image parameters, electrode locations, and details on behavioral choices. -For both StimulusSet and DataAssembly, BrainIO provides packaging methods that upload to S3 cloud storage, and add the -entries to `lookup.csv `_ from which they -can later be accessed. - -Data and stimuli can be made public or kept private. It is your choice if you wish to release the data itself or only -the benchmark. If you choose to keep the data private, model submissions can be scored on the data, but the actual data -itself will not be visible. Publicly released data can also be scored against, but will be fully accessible. - -Getting started, please create a new folder :code:`/__init__.py` in the :code:`packaging` directory in -which you keep all your packaging scripts. -If your code depends on additional requirements, it is good practice to additionally keep a :code:`requirements.txt` -or :code:`setup.py` file specifying the dependencies. - -Before executing the packaging methods to actually upload to S3, please check in with us via -`Slack or Github Issue `_ so that we can give you access. -With the credentials, you can then configure the awscli (:code:`pip install awscli`, :code:`aws configure` using region :code:`us-east-1`, -output format :code:`json`) to make the packaging methods upload successfully. - -**StimulusSet**: -The StimulusSet contains the stimuli that were used in the experiment as well as any kind of metadata for the stimuli. -Below is a slim example of creating and uploading a StimulusSet. The :code:`package_stimulus_set` method returns the -AWS metadata needed in the :code:`data/__init__.py` file (such as :code:`sha1` and the :code:`version_id`). -In this example, we store the metadata in the :code:`packaged_stimulus_metadata` variable. - -.. code-block:: python - - from pathlib import Path - from brainio.stimuli import StimulusSet - from brainio.packaging import package_stimulus_set - - stimuli = [] # collect meta - stimulus_paths = {} # collect mapping of stimulus_id to filepath - for filepath in Path(stimuli_directory).glob('*.png'): - stimulus_id = filepath.stem - object_name = filepath.stem.split('_')[0] # if the filepath contains meta, this can come from anywhere - # ...and other metadata - stimulus_paths[stimulus_id] = filepath - stimuli.append({ - 'stimulus_id': stimulus_id, - 'object_name': object_name, - # ...and other metadata - # optionally you can set 'stimulus_path_within_store' to define the filename in the packaged stimuli - }) - stimuli = StimulusSet(stimuli) - stimuli.stimulus_paths = stimulus_paths - stimuli.name = '' # give the StimulusSet an identifier name - - assert len(stimuli) == 1600 # make sure the StimulusSet is what you would expect - - packaged_stimulus_metadata = package_stimulus_set(catalog_name=None, proto_stimulus_set=stimuli, - stimulus_set_identifier=stimuli.name, bucket_name="brainio-brainscore") # upload to S3 - - -**DataAssembly**: - -DataAssemblies contain the actual experimental measurements as well as any metadata on them. -Note that these do not necessarily have to be raw data, but can also be previously published characterizations of the -data such as preference distributions. -As such, the person submitting the data to Brain-Score does not have to be involved in the data collection. -If you package someone else's data, we do however recommend checking the specifics with them to avoid mis-interpretation. -So far, we have encountered data in three forms: - -* NeuroidAssembly: neural data recorded from "neuroids" -- neurons or their analogues such as multi-unit activity from - Utah array electrodes. These assemblies typically contain spike rates structured in three dimensions - :code:`presentation x neuroid x time_bin` where - the :code:`presentation` dimension represents stimulus presentations (e.g. images x trials), - the :code:`neuroid` dimension represents e.g. electrodes (with metadata such as neuroid_id and location), and - the :code:`time_bin` dimension contains information about the start (:code:`time_bin_start`) and - end (:code:`time_bin_end`) of a time bin of spike rates. -* BehavioralAssembly: behavioral measurements, typically choices in a task structured in one dimension - :code:`presentation` that represents stimulus presentations (e.g. images x trials, with metadata on the task such - as the sample object and the distractor object in a match-to-sample task) with the actual choices - (e.g. "dog"/"cat", "left"/"right") in the assembly values. -* PropertiesAssembly: any kind of data in a pre-processed form, such as a surround suppression index per :code:`neuroid`. - -Here is an example of a BehavioralAssembly: - -.. code-block:: python - - from brainio.assemblies import BehavioralAssembly - from brainio.packaging import package_data_assembly - - assembly = BehavioralAssembly(['dog', 'dog', 'cat', 'dog', ...], - coords={ - 'stimulus_id': ('presentation', ['image1', 'image2', 'image3', 'image4', ...]), - 'sample_object': ('presentation', ['dog', 'cat', 'cat', 'dog', ...]), - 'distractor_object': ('presentation', ['cat', 'dog', 'dog', 'cat', ...]), - # ...more meta - # Note that meta from the StimulusSet will automatically be merged into the - # presentation dimension: - # https://github.com/brain-score/brainio/blob/d0ac841779fb47fa7b8bdad3341b68357c8031d9/brainio/fetch.py#L125-L132 - }, - dims=['presentation']) - assembly.name = '' # give the assembly an identifier name - - # make sure the assembly is what you would expect - assert len(assembly['presentation']) == 179660 - assert len(set(assembly['stimulus_id'].values)) == 1600 - assert len(set(assembly['choice'].values)) == len(set(assembly['sample_object'].values)) \ - == len(set(assembly['distractor_object'].values)) == 2 - - # upload to S3 - packaged_assembly_metadata = package_data_assembly(proto_data_assembly=assembly, assembly_identifier=assembly.name, - stimulus_set_identifier=stimuli.name, # link to the StimulusSet packaged above - assembly_class_name="BehavioralAssembly", bucket_name="brainio-brainscore", - catalog_identifier=None) - -In our experience, it is generally a good idea to include as much metadata as possible (on both StimulusSet and -Assembly). This will increase the utility of the data and make it a more valuable long-term contribution. -Please note that, like in :code:`package_stimulus_set`, The :code:`package_data_assembly` method returns the -AWS metadata needed in the :code:`data/__init__.py` file (such as :code:`sha1` and the :code:`version_id`). -In this example, we store the metadata in the :code:`packaged_assembly_metadata` variable. - -You can also put both of these packaging methods inside of one Python file, called e.g. :code:`data_packaging.py`. This file -would then package and upload both the stimulus_set and assembly. - -**Unit Tests (test.py)**: -We ask that packaged stimuli and assemblies are tested so that their validity can be confirmed for a long time, even as -details in the system might change. For instance, we want to avoid accidental overwrite of a packaged experiment, -and the unit tests guard against that. - -When creating your benchmark, we require you to include a :code:`test.py` file. For what this file should contain, see -below. - -|UnitTestSupport| - -There are already generic tests in place to which you can add your StimulusSet and assembly identifiers: - -#. :meth:`tests.test_stimuli.test_list_stimulus_set` -#. :meth:`tests.test_assemblies.test_list_assembly` -#. :meth:`tests.test_assemblies.test_existence` - -Simply add your identifiers to the list. - -Additionally, you can write your own test method to run some more detailed checks on the validity of StimulusSet and -assembly: - -.. code-block:: python - - # in test_stimuli.py - def test_: - stimulus_set = brainio.get_stimulus_set('') - assert len(stimulus_set) == 123 # check number of stimuli - assert len(set(stimulus_set['stimulus_id'])) == 12 # check number of unique stimuli - assert set(stimulus_set['object_name']) == {'dog', 'cat'} - # etc - - - # in test_assemblies.py - def test_: - assembly = brainscore.get_assembly('') - np.testing.assert_array_equal(assembly.dims, ['presentation']) - assert len(set(assembly['stimulus_id'].values)) == 123 # check number of stimuli - assert len(assembly) == 123456 # check number of trials - assert assembly.stimulus_set is not None - assert len(assembly.stimulus_set) == 123 # make sure number of stimuli in stimulus_set lines up with assembly - # etc - - - -**Adding your data to Brain-Score**: -You will also need an :code:`__init__.py` file to go along with your submission. The purpose of this file is to register the -benchmark inside the Brain-Score ecosystem. This involves adding both the stimuli and the data to the -:code:`stimulus_set_registry` and :code:`data_registry` respectively. See below for an example from the data for :code:`Geirhos2021`: - -.. code-block:: python - - # assembly - data_registry['Geirhos2021_colour'] = lambda: load_assembly_from_s3( - identifier='brendel.Geirhos2021_colour', - version_id="RDjCFAFt_J5mMwFBN9Ifo0OyNPKlToqf", - sha1="258862d82467614e45cc1e488a5ac909eb6e122d", - bucket="brainio-brainscore", - cls=BehavioralAssembly, - stimulus_set_loader=lambda: load_stimulus_set('Geirhos2021_colour'), - ) - - # stimulus set - stimulus_set_registry['Geirhos2021_colour'] = lambda: load_stimulus_set_from_s3( - identifier='Geirhos2021_colour', - bucket="brainio-brainscore", - csv_sha1="9c97c155fd6039a95978be89eb604c6894c5fa16", - zip_sha1="d166f1d3dc3d00c4f51a489e6fcf96dbbe778d2c", - csv_version_id="Rz_sX3_48Lg3vtvfT63AFiFslyXaRy.Y", - zip_version_id="OJh8OmoKjG_7guxLW2fF_GA7ehxbJrvG") - - -**Data Packaging Summary**: -Part 1 of creating a benchmark involves packaging the stimuli and data, adding a :code:`test.py` file, and adding these stimuli -and data to the :code:`data_registry`. The summary of what to submit is seen below with an example structure of an example -submission structure: - -.. code-block:: python - - MyBenchmark2024_stimuli_and_data/ - data/ - data_packaging.py - test.py - __init__.py - -2. Create the benchmark -======================= -The :class:`~brainscore.benchmarks.Benchmark` brings together the experimental paradigm with stimuli, -and a :class:`~brainscore.metrics.Metric` to compare model measurements against experimental data. -The paradigm typically involves telling the model candidate to perform a task or start recording in a particular area, -while looking at images from the previously packaged StimulusSet. -Interacting with the model candidate is agnostic of the specific model and is guided by the -:class:`~brainscore.model_interface.BrainModel` -- all models implement this interface, -and through this interface the benchmark can interact with all current and future model candidates. - -Typically, all benchmarks inherit from :class:`~brainscore.benchmarks.BenchmarkBase`, a super-class requesting the -commmonly used attributes. These attributes include - -* the *identifier* which uniquely designates the benchmark -* the *version* number which increases when changes to the benchmark are made -* a *ceiling_func* that, when run, returns a ceiling for this benchmark -* the benchmark's *parent* to group under e.g. V1, V2, V4, IT, behavior, or engineering (machine learning benchmarks) -* a *bibtex* that is used to link to the publication from the benchmark and website for further details - (we are working on crediting benchmark submitters more prominently in addition to only the data source.) - -Here is an example of a behavioral benchmark that uses an already defined metric, -:class:`~brainscore.metrics.image_level_behavior.I2n`, to compare image-level behaviors: - -.. code-block:: python - - import brainscore - from brainscore.benchmarks import BenchmarkBase - from brainscore.benchmarks.screen import place_on_screen - from brainscore.metrics.image_level_behavior import I2n - from brainscore.model_interface import BrainModel - from brainscore.utils import LazyLoad - - # the BIBTEX will be used to link to the publication from the benchmark for further details - BIBTEX = """@article {AuthorYear, - author = {Author}, - title = {title}, - year = {2021}, - url = {link}, - journal = {bioRxiv} - }""" - - - class AuthorYearI2n(BenchmarkBase): - def __init__(self): - self._metric = I2n() # use a previously defined metric - # we typically use the LazyLoad wrapper to only load the assembly on demand - self._fitting_stimuli = LazyLoad(lambda: brainscore.get_stimulus_set('')) - self._assembly = LazyLoad(lambda: brainscore.get_assembly('')) - # at what degree visual angle stimuli were presented - self._visual_degrees = 8 - # how many repeated trials each stimulus was shown for - self._number_of_trials = 2 - super(AuthorYearI2n, self).__init__( - identifier='-i2n', - # the version number increases when changes to the benchmark are made; start with 1 - version=1, - # the ceiling function outputs a ceiling estimate of how reliable the data is, or in other words, how - # well we would expect the perfect model to perform on this benchmark - ceiling_func=lambda: self._metric.ceiling(self._assembly), - parent='behavior', - bibtex=BIBTEX, - ) - - # The __call__ method takes as input a candidate BrainModel and outputs a similarity score of how brain-like - # the candidate is under this benchmark. - # A candidate here could be a model such as CORnet or brain-mapped Alexnet, but importantly the benchmark can be - # agnostic to the details of the candidate and instead only engage with the BrainModel interface. - def __call__(self, candidate: BrainModel): - # based on the visual degrees of the candidate - fitting_stimuli = place_on_screen(self._fitting_stimuli, target_visual_degrees=candidate.visual_degrees(), - source_visual_degrees=self._visual_degrees) - candidate.start_task(BrainModel.Task.probabilities, fitting_stimuli) - stimulus_set = place_on_screen(self._assembly.stimulus_set, target_visual_degrees=candidate.visual_degrees(), - source_visual_degrees=self._visual_degrees) - probabilities = candidate.look_at(stimulus_set, number_of_trials=self._number_of_trials) - score = self._metric(probabilities, self._assembly) - score = self._metric.ceil_score(score, self.ceiling) - return score - - -We also need to register the benchmark in the benchmark registry in order to make it accessible by its identifier. -This is done in the :code:`__init__.py` file inside the benchmark directory: - -.. code-block:: python - - # in brainscore_vision/benchmarks/mybenchmark/__init__.py - - from brainscore_vision import benchmark_registry - - benchmark_registry['mybenchmark-i2n'] = AuthorYearI2n # specify the class and not the object, i.e. without `()` - - -**Unit Tests** - -Like with the stimuli and data, we want to ensure the continued validity of the benchmark so that it remains valuable -and can be maintained. -All tests are in your plugin folder's ``test.py``, e.g. ``brainscore_vision/benchmarks/mybenchmark/test.py``. - -|UnitTestSupport| - -We ask that all benchmarks test at least two things: - -#. The ceiling value of the benchmark: - -.. code-block:: python - - benchmark = load_benchmark('mybenchmark') - assert benchmark.ceiling == expected - - -#. The score of one or more models: - -The idea for scores of existing models is to run a few models on the benchmark, -and test that running them on the benchmark will reproduce the same score. - -.. code-block:: python - - from brainscore_vision import score - - actual_score = score(model_identifier='your-favorite-model', benchmark_identifier='mybenchmark') - assert actual_score == expected - -**Benchmark Summary**: -To summarize, Part 2 of creating a benchmark involves making the actual benchmark package. This is done by adding the -:code:`benchmark.py` file, the :code:`test.py` file, and registering the benchmark via the :code:`__init__.py` file. - -The summary of what to submit is seen below with an example structure of an example -submission structure: - -.. code-block:: python - - MyBenchmark2024_stimuli_and_data/ - benchmarks/ - benchmark.py - test.py - __init__.py - - - -3. Submit the benchmark and iterate to finalize -================================================================== -Finally, submit your entire model plugin. -You can do this by either opening a pull request on https://github.com/brain-score/vision/compare -or by submitting a zip file containing your plugin (``/benchmarks/mybenchmark``) on the website. - -This will trigger server-side unit tests which ensure that all unit tests pass successfully. -Often, this step can highlight some issues in the code, so it can take some iterations on the code to make sure -everything runs smoothly. -Please open an issue if you run into trouble or get stuck. - -If any stimuli or data should be made public, please let us know so that we can change the corresponding S3 bucket -policy. - -After the PR has been merged, the submission system will automatically run all existing models on the new benchmark. - - -Naming conventions -================== -**Identifiers**: - -* Benchmark: At the top level, benchmark identifiers should combine data and metric identifiers, - separated by a dash: ``{data}-{metric}``. - -* Data: Identifiers for datasets vary depending on the community but should ideally not include dashes - (which are used in the benchmark identifier, see above). - For brain and cognitive science datasets, data identifiers often point to the paper or report - where the data was first introduced (e.g. ``MajajHong2015`` or ``Sanghavi2020``). - When using components of datasets, we recommend separating those with a dot, - and to use an underscore between multiple words (e.g. ``MajajHong2015.IT`` or ``Malania2007.vernier_only``). - For machine learning ("engineering") datasets, data identifiers are often descriptive (e.g. ``ImageNet``). - -* Metric: Identifiers for metrics are typically descriptive (e.g. ``rdm``, ``pls``, ``accuracy``). +The Brain-Score benchmark tutorial has moved, and can be found here: https://www.brain-score.org/tutorials/benchmarks \ No newline at end of file diff --git a/docs/source/modules/model_tutorial.rst b/docs/source/modules/model_tutorial.rst index 1d1b16d11..005b3066f 100644 --- a/docs/source/modules/model_tutorial.rst +++ b/docs/source/modules/model_tutorial.rst @@ -15,128 +15,4 @@ Model Tutorial ============== -The Brain-Score platform aims to yield strong computational models of the ventral stream. -We enable researchers to quickly get a sense of how their model scores against -standardized brain and behavior benchmarks on multiple dimensions and facilitate -comparisons to other state-of-the-art models. At the same time, new brain -data can quickly be tested against a wide range of models to determine how -well existing models explain the data. - -In particular, Brain-Score Vision evaluates -the similarity to neural recordings in the primate visual areas as well as behavioral outputs, -with a score (ranging from 0 "not aligned" to 1 "aligned at noise ceiling") on these various -brain and behavioral benchmarks. This guide is a tutorial for researchers and tinkerers -alike that outlines the setup, submission, and common issues for users. - - -Quickstart -========== -In this section, we will provide a quick and easy way -to get your model(s) ready for submission. This is mainly for those who do not have the time to read -or do the whole tutorial, or for those who just want to go ahead and submit -a model quickly; however, we recommend referring back to this tutorial, -especially if you encounter errors. This section also does not -have pictures, which the other more lengthy sections below do. As an example, -we will submit a version of AlexNet from Pytorch’s library; the main steps are outlined below: - -1. Make a new directory in ``brainscore_vision/models``, e.g. ``brainscore_vision/models/mymodel``. - We refer to this as a new *model plugin*. -2. Specify the dependencies in ``brainscore_vision/models/mymodel/setup.py``. -3. In the ``brainscore_vision/models/mymodel/__init__.py``, implement the model such that it follows the :ref:`interface` - and register it to the ``brainscore_vision.model_registry``: - ``model_registry['myalexnet'] = lambda: ModelCommitment(identifier='myalexnet', ...)`` -4. In the ``brainscore_vision/models/mymodel/test.py``, write unit tests for your model and make sure they pass locally. - You might for instance want to test that - ``score(model_identifier='myalexnet', benchmark_identifier='MajajHong2015public.IT-pls')`` returns a reasonable score. -5. Submit to ``brain-score.org``. You can do this by either opening a pull request on the `Github repository`, - or by submitting a zip file with your plugin on the website. - That’s it! Read more below to get a better idea of the process, or to help fix bugs that might come up. - - -Common Errors: Setup -==================== - -Below are some common errors that you might encounter while setting up -this project or doing this tutorial. We will add more soon! - -1. When running ``pip install .``, you get a message - from the terminal like:: - Directory '.' is not installable. Neither 'setup.py' nor 'pyproject.toml' found. - *Cause*: Not running ``pip install .`` in the right directory: - most likely you are in the plugin folder we created, - and not the top-level folder containing ``brainscore_vision`` we should be in. - - *Fix*: if you are in the plugin directory ``brainscore_vision/models/mymodel``, simply run:: - cd ../../../ - and then rerun - the :: - pip install . - command. This navigates to the correct top-level folder and - installs the packages where they are supposed to be. - More generally: make sure you are in the top-level folder containing ``brainscore_vision`` - (and not its parent or child folder) before you run the pip command above. This should fix the error. - -2. After implementing a pytorch model and running ``score`` for the first time, you get:: - ssl.SSLCertVerificationError: [SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate (_ssl.c:1076) - *Cause*: Pytorch’s backend. The SSL certificate for downloading a pre-trained model has expired - from their end and Pytorch should renew soon (usually ~4 hrs) - - *Fix*: If you can’t wait, add the following lines of code to your plugin: - (*Note that Pycharm might throw a warning about this line)*:: - import ssl - ssl._create_default_https_context = ssl._create_unverified_context - - - -Common Errors: Submission -========================= - -1. It has been 24 hours since I submitted my model, and I have not gotten a score? What happened? - - *Cause*: There are many issues that could cause this. - - *Fix*: If it happens, please open an issue on ``https://github.com/brain-score/vision/issues/new`` - and we can check the logs and tell you what happened. If it is really urgent, additionally send us an email. - You will, hopefully soon, be able to log in and check the logs yourself, so stay tuned! - - - -Frequently Asked Questions -========================== - -1. **What are all the numbers on the Brain-Score site?** - - As of now on the leaderboard (Brain-Score), there are many scores that your model would obtain. - These are sub-divided into ``neural`` and ``behavioral`` scores which themselves are further hierarchically organized. - Each one of these is a set of benchmarks that tests how "brain-like" - your model is to various cognitive and neural data -- in essence, - it is a measure of how similar the model is to the brain's visual system. - Models are also tested on "Engineering" benchmarks which do not include biological data - but typically test against ground truth, often for a machine learning benchmark. - These are often to the brain and behavioral scores (e.g. more V1-like → more robust to image perturbations). - -2. **What is the idea behind Brain-Score? Where can I learn more?** - - The website is a great place to start, and for those who want to dive deep, - we recommend reading the `perspective paper`_ and the `technical paper`_ - that outline the idea and the inner workings of how Brain-Score operates. - -3. **I was looking at the code and I found an error in the code/docs/etc. How can I contribute?** - - The easiest way would be to fork the repository - (make a copy of the Brain-Score `Github repository` locally and/or in your own Github), - make the necessary edits there, - and submit a pull request (PR) to merge it into our master branch. - We will have to confirm that PR, and thank you for contributing! - -4. **I really like Brain-Score, and I have some ideas that I would love to - talk to someone about. How do I get in touch?** - - Make an issue ``https://github.com/brain-score/vision/issues/new``, or send us an email! - We will also be creating a mailing list soon, so stay tuned. - -5. **Is there any reward for reaching the top overall Brain-Score? Or even a top - score on the individual benchmarks?** - - We sometimes run competitions (e.g. ``https://www.brainscoreworkshop.com/``). - A top Brain-Score result is also a great way to show the goodness of your model and market its value to the community. +The Brain-Score model tutorial has moved, and can be found here: https://www.brain-score.org/tutorials/models From a4aabd8cf017bea355eb7417b40d4f20f7ec41c9 Mon Sep 17 00:00:00 2001 From: Kartik Pradeepan Date: Wed, 14 Aug 2024 15:41:44 -0400 Subject: [PATCH 04/28] remove old tutorials in favor of new link (#1170) (#1174) Co-authored-by: Michael Ferguson --- docs/source/modules/benchmark_tutorial.rst | 397 +-------------------- docs/source/modules/model_tutorial.rst | 126 +------ 2 files changed, 2 insertions(+), 521 deletions(-) diff --git a/docs/source/modules/benchmark_tutorial.rst b/docs/source/modules/benchmark_tutorial.rst index 673fc3740..258590d79 100644 --- a/docs/source/modules/benchmark_tutorial.rst +++ b/docs/source/modules/benchmark_tutorial.rst @@ -6,399 +6,4 @@ Benchmark Tutorial ================== -Benchmarks are at the core of Brain-Score and test models' match to experimental observations. -New benchmarks keep models in check and require them to generalize to new experiments. - -A benchmark reproduces the experimental paradigm on a model candidate, the experimentally observed data, -and a metric to compare model with experimental observations. - -To submit a new benchmark, there are three steps: -1. packaging stimuli and data, -2. creating the benchmark with experimental paradigm and metric to compare against data, and -3. opening a pull request on the github repository to commit the updates from 1 and 2 -In order to ensure the continued validity of the benchmark, we require unit tests for all components -(stimuli and data as well as the benchmark itself). - -1. Package stimuli and data -=========================== -We require a certain format for stimuli and data so that we can maintain them for long-term use. -In particular, we use BrainIO for data management. BrainIO uses -`StimulusSet `_ (a subclass of -`pandas DataFrame `_) to maintain stimuli, and -`DataAssembly `_ -(a subclass of `xarray DataArray `_) -to maintain experimental measurements. -Aside from unifying data from different sources, the advantage of these formats is that all data are kept together with -metadata such as image parameters, electrode locations, and details on behavioral choices. -For both StimulusSet and DataAssembly, BrainIO provides packaging methods that upload to S3 cloud storage, and add the -entries to `lookup.csv `_ from which they -can later be accessed. - -Data and stimuli can be made public or kept private. It is your choice if you wish to release the data itself or only -the benchmark. If you choose to keep the data private, model submissions can be scored on the data, but the actual data -itself will not be visible. Publicly released data can also be scored against, but will be fully accessible. - -Getting started, please create a new folder :code:`/__init__.py` in the :code:`packaging` directory in -which you keep all your packaging scripts. -If your code depends on additional requirements, it is good practice to additionally keep a :code:`requirements.txt` -or :code:`setup.py` file specifying the dependencies. - -Before executing the packaging methods to actually upload to S3, please check in with us via -`Slack or Github Issue `_ so that we can give you access. -With the credentials, you can then configure the awscli (:code:`pip install awscli`, :code:`aws configure` using region :code:`us-east-1`, -output format :code:`json`) to make the packaging methods upload successfully. - -**StimulusSet**: -The StimulusSet contains the stimuli that were used in the experiment as well as any kind of metadata for the stimuli. -Below is a slim example of creating and uploading a StimulusSet. The :code:`package_stimulus_set` method returns the -AWS metadata needed in the :code:`data/__init__.py` file (such as :code:`sha1` and the :code:`version_id`). -In this example, we store the metadata in the :code:`packaged_stimulus_metadata` variable. - -.. code-block:: python - - from pathlib import Path - from brainio.stimuli import StimulusSet - from brainio.packaging import package_stimulus_set - - stimuli = [] # collect meta - stimulus_paths = {} # collect mapping of stimulus_id to filepath - for filepath in Path(stimuli_directory).glob('*.png'): - stimulus_id = filepath.stem - object_name = filepath.stem.split('_')[0] # if the filepath contains meta, this can come from anywhere - # ...and other metadata - stimulus_paths[stimulus_id] = filepath - stimuli.append({ - 'stimulus_id': stimulus_id, - 'object_name': object_name, - # ...and other metadata - # optionally you can set 'stimulus_path_within_store' to define the filename in the packaged stimuli - }) - stimuli = StimulusSet(stimuli) - stimuli.stimulus_paths = stimulus_paths - stimuli.name = '' # give the StimulusSet an identifier name - - assert len(stimuli) == 1600 # make sure the StimulusSet is what you would expect - - packaged_stimulus_metadata = package_stimulus_set(catalog_name=None, proto_stimulus_set=stimuli, - stimulus_set_identifier=stimuli.name, bucket_name="brainio-brainscore") # upload to S3 - - -**DataAssembly**: - -DataAssemblies contain the actual experimental measurements as well as any metadata on them. -Note that these do not necessarily have to be raw data, but can also be previously published characterizations of the -data such as preference distributions. -As such, the person submitting the data to Brain-Score does not have to be involved in the data collection. -If you package someone else's data, we do however recommend checking the specifics with them to avoid mis-interpretation. -So far, we have encountered data in three forms: - -* NeuroidAssembly: neural data recorded from "neuroids" -- neurons or their analogues such as multi-unit activity from - Utah array electrodes. These assemblies typically contain spike rates structured in three dimensions - :code:`presentation x neuroid x time_bin` where - the :code:`presentation` dimension represents stimulus presentations (e.g. images x trials), - the :code:`neuroid` dimension represents e.g. electrodes (with metadata such as neuroid_id and location), and - the :code:`time_bin` dimension contains information about the start (:code:`time_bin_start`) and - end (:code:`time_bin_end`) of a time bin of spike rates. -* BehavioralAssembly: behavioral measurements, typically choices in a task structured in one dimension - :code:`presentation` that represents stimulus presentations (e.g. images x trials, with metadata on the task such - as the sample object and the distractor object in a match-to-sample task) with the actual choices - (e.g. "dog"/"cat", "left"/"right") in the assembly values. -* PropertiesAssembly: any kind of data in a pre-processed form, such as a surround suppression index per :code:`neuroid`. - -Here is an example of a BehavioralAssembly: - -.. code-block:: python - - from brainio.assemblies import BehavioralAssembly - from brainio.packaging import package_data_assembly - - assembly = BehavioralAssembly(['dog', 'dog', 'cat', 'dog', ...], - coords={ - 'stimulus_id': ('presentation', ['image1', 'image2', 'image3', 'image4', ...]), - 'sample_object': ('presentation', ['dog', 'cat', 'cat', 'dog', ...]), - 'distractor_object': ('presentation', ['cat', 'dog', 'dog', 'cat', ...]), - # ...more meta - # Note that meta from the StimulusSet will automatically be merged into the - # presentation dimension: - # https://github.com/brain-score/brainio/blob/d0ac841779fb47fa7b8bdad3341b68357c8031d9/brainio/fetch.py#L125-L132 - }, - dims=['presentation']) - assembly.name = '' # give the assembly an identifier name - - # make sure the assembly is what you would expect - assert len(assembly['presentation']) == 179660 - assert len(set(assembly['stimulus_id'].values)) == 1600 - assert len(set(assembly['choice'].values)) == len(set(assembly['sample_object'].values)) \ - == len(set(assembly['distractor_object'].values)) == 2 - - # upload to S3 - packaged_assembly_metadata = package_data_assembly(proto_data_assembly=assembly, assembly_identifier=assembly.name, - stimulus_set_identifier=stimuli.name, # link to the StimulusSet packaged above - assembly_class_name="BehavioralAssembly", bucket_name="brainio-brainscore", - catalog_identifier=None) - -In our experience, it is generally a good idea to include as much metadata as possible (on both StimulusSet and -Assembly). This will increase the utility of the data and make it a more valuable long-term contribution. -Please note that, like in :code:`package_stimulus_set`, The :code:`package_data_assembly` method returns the -AWS metadata needed in the :code:`data/__init__.py` file (such as :code:`sha1` and the :code:`version_id`). -In this example, we store the metadata in the :code:`packaged_assembly_metadata` variable. - -You can also put both of these packaging methods inside of one Python file, called e.g. :code:`data_packaging.py`. This file -would then package and upload both the stimulus_set and assembly. - -**Unit Tests (test.py)**: -We ask that packaged stimuli and assemblies are tested so that their validity can be confirmed for a long time, even as -details in the system might change. For instance, we want to avoid accidental overwrite of a packaged experiment, -and the unit tests guard against that. - -When creating your benchmark, we require you to include a :code:`test.py` file. For what this file should contain, see -below. - -|UnitTestSupport| - -There are already generic tests in place to which you can add your StimulusSet and assembly identifiers: - -#. :meth:`tests.test_stimuli.test_list_stimulus_set` -#. :meth:`tests.test_assemblies.test_list_assembly` -#. :meth:`tests.test_assemblies.test_existence` - -Simply add your identifiers to the list. - -Additionally, you can write your own test method to run some more detailed checks on the validity of StimulusSet and -assembly: - -.. code-block:: python - - # in test_stimuli.py - def test_: - stimulus_set = brainio.get_stimulus_set('') - assert len(stimulus_set) == 123 # check number of stimuli - assert len(set(stimulus_set['stimulus_id'])) == 12 # check number of unique stimuli - assert set(stimulus_set['object_name']) == {'dog', 'cat'} - # etc - - - # in test_assemblies.py - def test_: - assembly = brainscore.get_assembly('') - np.testing.assert_array_equal(assembly.dims, ['presentation']) - assert len(set(assembly['stimulus_id'].values)) == 123 # check number of stimuli - assert len(assembly) == 123456 # check number of trials - assert assembly.stimulus_set is not None - assert len(assembly.stimulus_set) == 123 # make sure number of stimuli in stimulus_set lines up with assembly - # etc - - - -**Adding your data to Brain-Score**: -You will also need an :code:`__init__.py` file to go along with your submission. The purpose of this file is to register the -benchmark inside the Brain-Score ecosystem. This involves adding both the stimuli and the data to the -:code:`stimulus_set_registry` and :code:`data_registry` respectively. See below for an example from the data for :code:`Geirhos2021`: - -.. code-block:: python - - # assembly - data_registry['Geirhos2021_colour'] = lambda: load_assembly_from_s3( - identifier='brendel.Geirhos2021_colour', - version_id="RDjCFAFt_J5mMwFBN9Ifo0OyNPKlToqf", - sha1="258862d82467614e45cc1e488a5ac909eb6e122d", - bucket="brainio-brainscore", - cls=BehavioralAssembly, - stimulus_set_loader=lambda: load_stimulus_set('Geirhos2021_colour'), - ) - - # stimulus set - stimulus_set_registry['Geirhos2021_colour'] = lambda: load_stimulus_set_from_s3( - identifier='Geirhos2021_colour', - bucket="brainio-brainscore", - csv_sha1="9c97c155fd6039a95978be89eb604c6894c5fa16", - zip_sha1="d166f1d3dc3d00c4f51a489e6fcf96dbbe778d2c", - csv_version_id="Rz_sX3_48Lg3vtvfT63AFiFslyXaRy.Y", - zip_version_id="OJh8OmoKjG_7guxLW2fF_GA7ehxbJrvG") - - -**Data Packaging Summary**: -Part 1 of creating a benchmark involves packaging the stimuli and data, adding a :code:`test.py` file, and adding these stimuli -and data to the :code:`data_registry`. The summary of what to submit is seen below with an example structure of an example -submission structure: - -.. code-block:: python - - MyBenchmark2024_stimuli_and_data/ - data/ - data_packaging.py - test.py - __init__.py - -2. Create the benchmark -======================= -The :class:`~brainscore.benchmarks.Benchmark` brings together the experimental paradigm with stimuli, -and a :class:`~brainscore.metrics.Metric` to compare model measurements against experimental data. -The paradigm typically involves telling the model candidate to perform a task or start recording in a particular area, -while looking at images from the previously packaged StimulusSet. -Interacting with the model candidate is agnostic of the specific model and is guided by the -:class:`~brainscore.model_interface.BrainModel` -- all models implement this interface, -and through this interface the benchmark can interact with all current and future model candidates. - -Typically, all benchmarks inherit from :class:`~brainscore.benchmarks.BenchmarkBase`, a super-class requesting the -commmonly used attributes. These attributes include - -* the *identifier* which uniquely designates the benchmark -* the *version* number which increases when changes to the benchmark are made -* a *ceiling_func* that, when run, returns a ceiling for this benchmark -* the benchmark's *parent* to group under e.g. V1, V2, V4, IT, behavior, or engineering (machine learning benchmarks) -* a *bibtex* that is used to link to the publication from the benchmark and website for further details - (we are working on crediting benchmark submitters more prominently in addition to only the data source.) - -Here is an example of a behavioral benchmark that uses an already defined metric, -:class:`~brainscore.metrics.image_level_behavior.I2n`, to compare image-level behaviors: - -.. code-block:: python - - import brainscore - from brainscore.benchmarks import BenchmarkBase - from brainscore.benchmarks.screen import place_on_screen - from brainscore.metrics.image_level_behavior import I2n - from brainscore.model_interface import BrainModel - from brainscore.utils import LazyLoad - - # the BIBTEX will be used to link to the publication from the benchmark for further details - BIBTEX = """@article {AuthorYear, - author = {Author}, - title = {title}, - year = {2021}, - url = {link}, - journal = {bioRxiv} - }""" - - - class AuthorYearI2n(BenchmarkBase): - def __init__(self): - self._metric = I2n() # use a previously defined metric - # we typically use the LazyLoad wrapper to only load the assembly on demand - self._fitting_stimuli = LazyLoad(lambda: brainscore.get_stimulus_set('')) - self._assembly = LazyLoad(lambda: brainscore.get_assembly('')) - # at what degree visual angle stimuli were presented - self._visual_degrees = 8 - # how many repeated trials each stimulus was shown for - self._number_of_trials = 2 - super(AuthorYearI2n, self).__init__( - identifier='-i2n', - # the version number increases when changes to the benchmark are made; start with 1 - version=1, - # the ceiling function outputs a ceiling estimate of how reliable the data is, or in other words, how - # well we would expect the perfect model to perform on this benchmark - ceiling_func=lambda: self._metric.ceiling(self._assembly), - parent='behavior', - bibtex=BIBTEX, - ) - - # The __call__ method takes as input a candidate BrainModel and outputs a similarity score of how brain-like - # the candidate is under this benchmark. - # A candidate here could be a model such as CORnet or brain-mapped Alexnet, but importantly the benchmark can be - # agnostic to the details of the candidate and instead only engage with the BrainModel interface. - def __call__(self, candidate: BrainModel): - # based on the visual degrees of the candidate - fitting_stimuli = place_on_screen(self._fitting_stimuli, target_visual_degrees=candidate.visual_degrees(), - source_visual_degrees=self._visual_degrees) - candidate.start_task(BrainModel.Task.probabilities, fitting_stimuli) - stimulus_set = place_on_screen(self._assembly.stimulus_set, target_visual_degrees=candidate.visual_degrees(), - source_visual_degrees=self._visual_degrees) - probabilities = candidate.look_at(stimulus_set, number_of_trials=self._number_of_trials) - score = self._metric(probabilities, self._assembly) - score = self._metric.ceil_score(score, self.ceiling) - return score - - -We also need to register the benchmark in the benchmark registry in order to make it accessible by its identifier. -This is done in the :code:`__init__.py` file inside the benchmark directory: - -.. code-block:: python - - # in brainscore_vision/benchmarks/mybenchmark/__init__.py - - from brainscore_vision import benchmark_registry - - benchmark_registry['mybenchmark-i2n'] = AuthorYearI2n # specify the class and not the object, i.e. without `()` - - -**Unit Tests** - -Like with the stimuli and data, we want to ensure the continued validity of the benchmark so that it remains valuable -and can be maintained. -All tests are in your plugin folder's ``test.py``, e.g. ``brainscore_vision/benchmarks/mybenchmark/test.py``. - -|UnitTestSupport| - -We ask that all benchmarks test at least two things: - -#. The ceiling value of the benchmark: - -.. code-block:: python - - benchmark = load_benchmark('mybenchmark') - assert benchmark.ceiling == expected - - -#. The score of one or more models: - -The idea for scores of existing models is to run a few models on the benchmark, -and test that running them on the benchmark will reproduce the same score. - -.. code-block:: python - - from brainscore_vision import score - - actual_score = score(model_identifier='your-favorite-model', benchmark_identifier='mybenchmark') - assert actual_score == expected - -**Benchmark Summary**: -To summarize, Part 2 of creating a benchmark involves making the actual benchmark package. This is done by adding the -:code:`benchmark.py` file, the :code:`test.py` file, and registering the benchmark via the :code:`__init__.py` file. - -The summary of what to submit is seen below with an example structure of an example -submission structure: - -.. code-block:: python - - MyBenchmark2024_stimuli_and_data/ - benchmarks/ - benchmark.py - test.py - __init__.py - - - -3. Submit the benchmark and iterate to finalize -================================================================== -Finally, submit your entire model plugin. -You can do this by either opening a pull request on https://github.com/brain-score/vision/compare -or by submitting a zip file containing your plugin (``/benchmarks/mybenchmark``) on the website. - -This will trigger server-side unit tests which ensure that all unit tests pass successfully. -Often, this step can highlight some issues in the code, so it can take some iterations on the code to make sure -everything runs smoothly. -Please open an issue if you run into trouble or get stuck. - -If any stimuli or data should be made public, please let us know so that we can change the corresponding S3 bucket -policy. - -After the PR has been merged, the submission system will automatically run all existing models on the new benchmark. - - -Naming conventions -================== -**Identifiers**: - -* Benchmark: At the top level, benchmark identifiers should combine data and metric identifiers, - separated by a dash: ``{data}-{metric}``. - -* Data: Identifiers for datasets vary depending on the community but should ideally not include dashes - (which are used in the benchmark identifier, see above). - For brain and cognitive science datasets, data identifiers often point to the paper or report - where the data was first introduced (e.g. ``MajajHong2015`` or ``Sanghavi2020``). - When using components of datasets, we recommend separating those with a dot, - and to use an underscore between multiple words (e.g. ``MajajHong2015.IT`` or ``Malania2007.vernier_only``). - For machine learning ("engineering") datasets, data identifiers are often descriptive (e.g. ``ImageNet``). - -* Metric: Identifiers for metrics are typically descriptive (e.g. ``rdm``, ``pls``, ``accuracy``). +The Brain-Score benchmark tutorial has moved, and can be found here: https://www.brain-score.org/tutorials/benchmarks \ No newline at end of file diff --git a/docs/source/modules/model_tutorial.rst b/docs/source/modules/model_tutorial.rst index 1d1b16d11..005b3066f 100644 --- a/docs/source/modules/model_tutorial.rst +++ b/docs/source/modules/model_tutorial.rst @@ -15,128 +15,4 @@ Model Tutorial ============== -The Brain-Score platform aims to yield strong computational models of the ventral stream. -We enable researchers to quickly get a sense of how their model scores against -standardized brain and behavior benchmarks on multiple dimensions and facilitate -comparisons to other state-of-the-art models. At the same time, new brain -data can quickly be tested against a wide range of models to determine how -well existing models explain the data. - -In particular, Brain-Score Vision evaluates -the similarity to neural recordings in the primate visual areas as well as behavioral outputs, -with a score (ranging from 0 "not aligned" to 1 "aligned at noise ceiling") on these various -brain and behavioral benchmarks. This guide is a tutorial for researchers and tinkerers -alike that outlines the setup, submission, and common issues for users. - - -Quickstart -========== -In this section, we will provide a quick and easy way -to get your model(s) ready for submission. This is mainly for those who do not have the time to read -or do the whole tutorial, or for those who just want to go ahead and submit -a model quickly; however, we recommend referring back to this tutorial, -especially if you encounter errors. This section also does not -have pictures, which the other more lengthy sections below do. As an example, -we will submit a version of AlexNet from Pytorch’s library; the main steps are outlined below: - -1. Make a new directory in ``brainscore_vision/models``, e.g. ``brainscore_vision/models/mymodel``. - We refer to this as a new *model plugin*. -2. Specify the dependencies in ``brainscore_vision/models/mymodel/setup.py``. -3. In the ``brainscore_vision/models/mymodel/__init__.py``, implement the model such that it follows the :ref:`interface` - and register it to the ``brainscore_vision.model_registry``: - ``model_registry['myalexnet'] = lambda: ModelCommitment(identifier='myalexnet', ...)`` -4. In the ``brainscore_vision/models/mymodel/test.py``, write unit tests for your model and make sure they pass locally. - You might for instance want to test that - ``score(model_identifier='myalexnet', benchmark_identifier='MajajHong2015public.IT-pls')`` returns a reasonable score. -5. Submit to ``brain-score.org``. You can do this by either opening a pull request on the `Github repository`, - or by submitting a zip file with your plugin on the website. - That’s it! Read more below to get a better idea of the process, or to help fix bugs that might come up. - - -Common Errors: Setup -==================== - -Below are some common errors that you might encounter while setting up -this project or doing this tutorial. We will add more soon! - -1. When running ``pip install .``, you get a message - from the terminal like:: - Directory '.' is not installable. Neither 'setup.py' nor 'pyproject.toml' found. - *Cause*: Not running ``pip install .`` in the right directory: - most likely you are in the plugin folder we created, - and not the top-level folder containing ``brainscore_vision`` we should be in. - - *Fix*: if you are in the plugin directory ``brainscore_vision/models/mymodel``, simply run:: - cd ../../../ - and then rerun - the :: - pip install . - command. This navigates to the correct top-level folder and - installs the packages where they are supposed to be. - More generally: make sure you are in the top-level folder containing ``brainscore_vision`` - (and not its parent or child folder) before you run the pip command above. This should fix the error. - -2. After implementing a pytorch model and running ``score`` for the first time, you get:: - ssl.SSLCertVerificationError: [SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate (_ssl.c:1076) - *Cause*: Pytorch’s backend. The SSL certificate for downloading a pre-trained model has expired - from their end and Pytorch should renew soon (usually ~4 hrs) - - *Fix*: If you can’t wait, add the following lines of code to your plugin: - (*Note that Pycharm might throw a warning about this line)*:: - import ssl - ssl._create_default_https_context = ssl._create_unverified_context - - - -Common Errors: Submission -========================= - -1. It has been 24 hours since I submitted my model, and I have not gotten a score? What happened? - - *Cause*: There are many issues that could cause this. - - *Fix*: If it happens, please open an issue on ``https://github.com/brain-score/vision/issues/new`` - and we can check the logs and tell you what happened. If it is really urgent, additionally send us an email. - You will, hopefully soon, be able to log in and check the logs yourself, so stay tuned! - - - -Frequently Asked Questions -========================== - -1. **What are all the numbers on the Brain-Score site?** - - As of now on the leaderboard (Brain-Score), there are many scores that your model would obtain. - These are sub-divided into ``neural`` and ``behavioral`` scores which themselves are further hierarchically organized. - Each one of these is a set of benchmarks that tests how "brain-like" - your model is to various cognitive and neural data -- in essence, - it is a measure of how similar the model is to the brain's visual system. - Models are also tested on "Engineering" benchmarks which do not include biological data - but typically test against ground truth, often for a machine learning benchmark. - These are often to the brain and behavioral scores (e.g. more V1-like → more robust to image perturbations). - -2. **What is the idea behind Brain-Score? Where can I learn more?** - - The website is a great place to start, and for those who want to dive deep, - we recommend reading the `perspective paper`_ and the `technical paper`_ - that outline the idea and the inner workings of how Brain-Score operates. - -3. **I was looking at the code and I found an error in the code/docs/etc. How can I contribute?** - - The easiest way would be to fork the repository - (make a copy of the Brain-Score `Github repository` locally and/or in your own Github), - make the necessary edits there, - and submit a pull request (PR) to merge it into our master branch. - We will have to confirm that PR, and thank you for contributing! - -4. **I really like Brain-Score, and I have some ideas that I would love to - talk to someone about. How do I get in touch?** - - Make an issue ``https://github.com/brain-score/vision/issues/new``, or send us an email! - We will also be creating a mailing list soon, so stay tuned. - -5. **Is there any reward for reaching the top overall Brain-Score? Or even a top - score on the individual benchmarks?** - - We sometimes run competitions (e.g. ``https://www.brainscoreworkshop.com/``). - A top Brain-Score result is also a great way to show the goodness of your model and market its value to the community. +The Brain-Score model tutorial has moved, and can be found here: https://www.brain-score.org/tutorials/models From 58bffac300df44c8a8fdec3e876e6f60413bb116 Mon Sep 17 00:00:00 2001 From: jimn2 <105658975+jimn2@users.noreply.github.com> Date: Fri, 23 Aug 2024 11:44:54 -0500 Subject: [PATCH 05/28] Fixing a detail in pyprojecct.toml that surfaced while running the quickstart tutorial. (#1192) --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index a336cc52c..3b28322e9 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -49,7 +49,7 @@ test = [ [build-system] requires = [ - "setuptools>=65.*", + "setuptools>=65.0.0", "wheel" ] From ed16fd9635ccc8c40061d569a6453d1b97ad18ce Mon Sep 17 00:00:00 2001 From: Katherine Fairchild Date: Fri, 23 Aug 2024 15:14:51 -0400 Subject: [PATCH 06/28] add eBarlow_lmda_02_1000ep to models (#1200) Co-authored-by: AutoJenkins --- .../models/eBarlow_lmda_02_1000ep/__init__.py | 9 ++ .../models/eBarlow_lmda_02_1000ep/model.py | 84 +++++++++++++++++++ .../models/eBarlow_lmda_02_1000ep/setup.py | 25 ++++++ .../models/eBarlow_lmda_02_1000ep/test.py | 1 + 4 files changed, 119 insertions(+) create mode 100644 brainscore_vision/models/eBarlow_lmda_02_1000ep/__init__.py create mode 100644 brainscore_vision/models/eBarlow_lmda_02_1000ep/model.py create mode 100644 brainscore_vision/models/eBarlow_lmda_02_1000ep/setup.py create mode 100644 brainscore_vision/models/eBarlow_lmda_02_1000ep/test.py diff --git a/brainscore_vision/models/eBarlow_lmda_02_1000ep/__init__.py b/brainscore_vision/models/eBarlow_lmda_02_1000ep/__init__.py new file mode 100644 index 000000000..f29cd461a --- /dev/null +++ b/brainscore_vision/models/eBarlow_lmda_02_1000ep/__init__.py @@ -0,0 +1,9 @@ +from brainscore_vision import model_registry +from brainscore_vision.model_helpers.brain_transformation import ModelCommitment +from .model import get_model, get_layers + +model_registry["eBarlow_lmda_02_1000ep"] = lambda: ModelCommitment( + identifier="eBarlow_lmda_02_1000ep", + activations_model=get_model("eBarlow_lmda_02_1000ep"), + layers=get_layers("eBarlow_lmda_02_1000ep"), +) diff --git a/brainscore_vision/models/eBarlow_lmda_02_1000ep/model.py b/brainscore_vision/models/eBarlow_lmda_02_1000ep/model.py new file mode 100644 index 000000000..2a4d8797e --- /dev/null +++ b/brainscore_vision/models/eBarlow_lmda_02_1000ep/model.py @@ -0,0 +1,84 @@ +from brainscore_vision.model_helpers.check_submission import check_models +import functools +import os +from urllib.request import urlretrieve +import torchvision.models +from brainscore_vision.model_helpers.activations.pytorch import PytorchWrapper +from brainscore_vision.model_helpers.activations.pytorch import load_preprocess_images +from pathlib import Path +from brainscore_vision.model_helpers import download_weights +import torch +from collections import OrderedDict + +# This is an example implementation for submitting resnet-50 as a pytorch model + +# Attention: It is important, that the wrapper identifier is unique per model! +# The results will otherwise be the same due to brain-scores internal result caching mechanism. +# Please load your pytorch model for usage in CPU. There won't be GPUs available for scoring your model. +# If the model requires a GPU, contact the brain-score team directly. +from brainscore_vision.model_helpers.check_submission import check_models + + +def get_model_list(): + return ["eBarlow_lmda_02_1000ep"] + + +def get_model(name): + assert name == "eBarlow_lmda_02_1000ep" + url = "https://users.flatironinstitute.org/~tyerxa/equi_proj/training_checkpoints/fresh/paired/lmda_0.2/Barlow_1000ep/ep1000-ba625000-rank0" + fh = urlretrieve(url) + state_dict = torch.load(fh[0], map_location=torch.device("cpu"))["state"]["model"] + model = load_composer_classifier(state_dict) + preprocessing = functools.partial(load_preprocess_images, image_size=224) + wrapper = PytorchWrapper(identifier=name, model=model, preprocessing=preprocessing) + wrapper.image_size = 224 + return wrapper + +def load_composer_classifier(sd): + model = torchvision.models.resnet.resnet50() + new_sd = OrderedDict() + for k, v in sd.items(): + if 'lin_cls' in k: + new_sd['fc.' + k.split('.')[-1]] = v + if ".f." not in k: + continue + parts = k.split(".") + idx = parts.index("f") + new_k = ".".join(parts[idx + 1 :]) + new_sd[new_k] = v + model.load_state_dict(new_sd, strict=True) + return model + +def get_layers(name): + assert name == "eBarlow_lmda_02_1000ep" + layers = [ + "layer1.0", + "layer1.1", + "layer1.2", + "layer2.0", + "layer2.1", + "layer2.2", + "layer2.3", + "layer3.0", + "layer3.1", + "layer3.2", + "layer3.3", + "layer3.4", + "layer3.5", + "layer4.0", + "layer4.1", + "layer4.2", + "avgpool", + "fc", + ] + outs = ["conv1", "layer1", "layer2", "layer3", "layer4"] + + return layers + outs + + +def get_bibtex(model_identifier): + return """xx""" + + +if __name__ == "__main__": + check_models.check_base_models(__name__) diff --git a/brainscore_vision/models/eBarlow_lmda_02_1000ep/setup.py b/brainscore_vision/models/eBarlow_lmda_02_1000ep/setup.py new file mode 100644 index 000000000..421914cfb --- /dev/null +++ b/brainscore_vision/models/eBarlow_lmda_02_1000ep/setup.py @@ -0,0 +1,25 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +from setuptools import setup, find_packages + +requirements = [ "torchvision", + "torch" +] + +setup( + packages=find_packages(exclude=['tests']), + include_package_data=True, + install_requires=requirements, + license="MIT license", + zip_safe=False, + keywords='brain-score template', + classifiers=[ + 'Development Status :: 2 - Pre-Alpha', + 'Intended Audience :: Developers', + 'License :: OSI Approved :: MIT License', + 'Natural Language :: English', + 'Programming Language :: Python :: 3.7', + ], + test_suite='tests', +) diff --git a/brainscore_vision/models/eBarlow_lmda_02_1000ep/test.py b/brainscore_vision/models/eBarlow_lmda_02_1000ep/test.py new file mode 100644 index 000000000..e594ba9e1 --- /dev/null +++ b/brainscore_vision/models/eBarlow_lmda_02_1000ep/test.py @@ -0,0 +1 @@ +# Left empty as part of 2023 models migration From 6196b1ddd3f7972faf5ff1d2a858e5b6045c6d56 Mon Sep 17 00:00:00 2001 From: Michael Ferguson Date: Mon, 26 Aug 2024 15:54:43 -0400 Subject: [PATCH 07/28] add developer doc notes (#1201) * add developer doc notes * update version number --- docs/source/conf.py | 4 +-- .../modules/developer_clarifications.rst | 36 +++++++++++++++++++ 2 files changed, 38 insertions(+), 2 deletions(-) create mode 100644 docs/source/modules/developer_clarifications.rst diff --git a/docs/source/conf.py b/docs/source/conf.py index 295f247a9..d0c56d306 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -17,11 +17,11 @@ # -- Project information ----------------------------------------------------- project = 'Brain-Score' -copyright = '2022, Brain-Score Team' +copyright = '2024, Brain-Score Team' author = 'Brain-Score Team' # The full version, including alpha/beta/rc tags -release = '1.3' +release = '2.0.0' # -- General configuration --------------------------------------------------- diff --git a/docs/source/modules/developer_clarifications.rst b/docs/source/modules/developer_clarifications.rst new file mode 100644 index 000000000..6e63f2008 --- /dev/null +++ b/docs/source/modules/developer_clarifications.rst @@ -0,0 +1,36 @@ +.. _interface: + +************************ +Developer Clarifications +************************ + +The Following documentation stores commonly-asked developer questions. We hope this will be useful to +anyone interested in contributing to Brain-Score's codebase or scientific workings. + + + +1. **For a given model, are activations different on each benchmark? How?** + + + Activations per model are generated based on benchmark stimuli; not every benchmark has unique stimuli. For most + model-benchmark pairs, activations will be different because stimuli will be different. The exceptions to this + are the benchmarks that use the same stimuli, such as the `MajajHong20215` family of benchmarks. + +2. **Result Caching** + + Result Caching is a Brain-Score `repo `_ that allows model activations (and other functions) to be cached + to disk, in order to speed up the process of rescoring models. It contains a decorator that can be attached to a function + right before it is defined. On the first run of that function, `result_caching` will save to disk the result of tha function + and will load that result from disk in future calls with the same parameters. All files are saved in the user's `~/result_caching` + folder, and they are persistent, as there is no garbage collection built in. You can deactivate + `result_caching` by simply setting the environment flag `RESULTCACHING_DISABLE` to `1`. Please see the link above + for more detailed documentation. + +3. **Model Mapping Procedure** + + In general, there are different methods that are used in the Brain-Score code to instruct the model to "begin recording", + observe stimuli, and to generate scores. Models follow the `ModelCommitment` to conform to the `BrainModel` API. A + `BrainModel` is any model that has a `region_layer_map`. This allows the layers in the model to be mapped to layers in + the ventral visual stream, and is chosen by scoring models on the public version of a benchmark (the private + benchmark data is heldout for the BrainModel to be scored on). See the more technical docs + `here `_ for additional notes. From 4a7458ea28bd6721bcf2526f9d5c41dfdf2413bd Mon Sep 17 00:00:00 2001 From: Kartik Pradeepan Date: Tue, 27 Aug 2024 16:06:12 -0400 Subject: [PATCH 08/28] Github Actions Workflow to keep develop synchronized with changes from master (#1191) * Added branch sync workflow. * Swapped rebase with merge. Added some push error handling in autosync. * Swapped rebase with merge. Added some push error handling in autosync. --- .../workflows/sync_develop_with_master.yml | 111 ++++++++++++++++++ 1 file changed, 111 insertions(+) create mode 100644 .github/workflows/sync_develop_with_master.yml diff --git a/.github/workflows/sync_develop_with_master.yml b/.github/workflows/sync_develop_with_master.yml new file mode 100644 index 000000000..df8cc238d --- /dev/null +++ b/.github/workflows/sync_develop_with_master.yml @@ -0,0 +1,111 @@ +name: Sync develop with master +'on': + pull_request: + types: + - closed + branches: + - master +permissions: + contents: write +jobs: + start: + name: "Starting -🤞" + runs-on: ubuntu-latest + steps: + - name: Starting + id: init + run: | + echo "Starting branch synchronization of ${{ github.repository }}" + create_pr_for_nonplugin: + name: Synchronizing non-plugin PR + needs: start # This job now needs the 'start' job to complete first + if: > + github.event.pull_request.merged == true && + !(startsWith(github.event.pull_request.head.ref, 'web_submission_') && + contains(github.event.pull_request.title, 'brain-score.org submission')) + runs-on: ubuntu-latest + steps: + - name: Check out the develop branch + uses: actions/checkout@v3 + with: + fetch-depth: 0 # Fetch full history + ref: develop + - name: Reset the develop branch + run: | + git fetch origin master + git reset --hard origin/master + - name: Create pull request in develop + uses: peter-evans/create-pull-request@v6 + with: + token: '${{ secrets.PAT }}' + commit-message: Sync master into develop + title: Sync master into develop + body: >- + This PR syncs the latest changes from the master branch into the + develop branch. + base: develop + branch: 'developer-sync-pr-${{ github.event.pull_request.number }}' + + auto_sync_for_plugin: + needs: start + if: > + github.event.pull_request.merged == true && + startsWith(github.event.pull_request.head.ref, 'web_submission_') && + contains(github.event.pull_request.title, 'brain-score.org submission') + runs-on: ubuntu-latest + steps: + - name: Checkout the develop branch + uses: actions/checkout@v3 + with: + fetch-depth: 0 + ref: develop + - name: Configure Git user + run: | + git config --global user.name "Branch Synchronizer" + git config --global user.email "action@github.com" + - name: Ensure develop branch is updated + run: | + git fetch origin develop + git checkout develop + git merge origin/develop + # Fetch latest change from master, checkout develop, merge changes from master to develop. + # Includes conflict handling + - name: Merge master into develop + id: merge + run: | + git fetch origin master + git checkout develop + git merge origin/master || { + if git diff --name-only --diff-filter=U | grep -q '.'; then + echo "Merge conflict detected" + echo "::set-output name=merge_conflict::true" + else + echo "Merge failed due to another reason" + exit 1 + fi + } + - name: Push changes to develop (if merge is successful) + if: steps.merge.conclusion == 'success' + run: | #Use force-with-lease to prevent accidental overwrite if branch has been updated. If fails, rebase the update and retry + git push origin develop --force-with-lease || { + echo "Push failed due to updates in develop. Attempting to rebase and retry..." + git fetch origin develop + git rebase origin/develop + git push origin develop --force-with-lease + } + - name: Create pull request for merge conflicts + if: steps.merge.outputs.merge_conflict == 'true' + uses: peter-evans/create-pull-request@v6 + with: + token: '${{ secrets.PAT }}' + commit-message: Merge master into develop with conflict resolution + title: Resolve conflicts between master and develop + body: This PR resolves merge conflicts between master and develop. + base: develop + branch: 'developer-sync-pr-conflict-${{ github.event.pull_request.number }}' + - name: Handle other merge failures + if: failure() && steps.merge.outputs.merge_conflict != 'true' + run: > + echo "Handle non-conflict related failure, such as network issues or missing branches" + + # Possibly incorporate additional handling logic here (e.g.,notifications or retries) \ No newline at end of file From 3c27e4ae955832272402736aca2ed8974dfd110e Mon Sep 17 00:00:00 2001 From: Michael Ferguson Date: Wed, 28 Aug 2024 11:29:31 -0400 Subject: [PATCH 09/28] update theme to match language (#1204) --- docs/requirements.txt | 1 + docs/source/conf.py | 2 +- docs/source/index.rst | 3 +-- docs/source/modules/benchmark_tutorial.rst | 9 --------- docs/source/modules/model_tutorial.rst | 18 ------------------ 5 files changed, 3 insertions(+), 30 deletions(-) delete mode 100644 docs/source/modules/benchmark_tutorial.rst delete mode 100644 docs/source/modules/model_tutorial.rst diff --git a/docs/requirements.txt b/docs/requirements.txt index 0f71f0a9f..23532fd21 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -1,4 +1,5 @@ -e . Sphinx>=4 +sphinx_rtd_theme recommonmark \ No newline at end of file diff --git a/docs/source/conf.py b/docs/source/conf.py index d0c56d306..1f14b464d 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -58,7 +58,7 @@ # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # -html_theme = 'alabaster' +html_theme = 'sphinx_rtd_theme' # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, diff --git a/docs/source/index.rst b/docs/source/index.rst index c7d629d85..453876e60 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -12,9 +12,8 @@ By following a unified :ref:`interface`, all models can be treated as an experim :caption: Contents: examples - modules/model_tutorial - modules/benchmark_tutorial modules/model_interface + modules/developer_clarifications.rst modules/benchmarks modules/metrics modules/submission diff --git a/docs/source/modules/benchmark_tutorial.rst b/docs/source/modules/benchmark_tutorial.rst deleted file mode 100644 index 258590d79..000000000 --- a/docs/source/modules/benchmark_tutorial.rst +++ /dev/null @@ -1,9 +0,0 @@ -.. _Benchmark_Tutorial: -.. |UnitTestSupport| replace:: We realize that unit tests can be a hurdle and we can take over this task for you. - Please let us know of any hurdles and we will do our best to support. - -================== -Benchmark Tutorial -================== - -The Brain-Score benchmark tutorial has moved, and can be found here: https://www.brain-score.org/tutorials/benchmarks \ No newline at end of file diff --git a/docs/source/modules/model_tutorial.rst b/docs/source/modules/model_tutorial.rst deleted file mode 100644 index 005b3066f..000000000 --- a/docs/source/modules/model_tutorial.rst +++ /dev/null @@ -1,18 +0,0 @@ -.. _model_tutorial: - -.. _technical paper: https://www.biorxiv.org/content/10.1101/407007v1 -.. _perspective paper: https://www.cell.com/neuron/fulltext/S0896-6273(20)30605-X -.. _Pycharm: https://www.jetbrains.com/pycharm/download -.. _introduction: https://www.tutorialspoint.com/pycharm/index.htm -.. _sample-model-submission: https://github.com/brain-score/sample-model-submission -.. _github: https://github.com/brain-score -.. _github repository: https://github.com/brain-score/vision -.. _windows: https://git-scm.com/download/win -.. _mac: https://git-scm.com/download/mac -.. _profile: http://www.brain-score.org/profile/ - -============== -Model Tutorial -============== - -The Brain-Score model tutorial has moved, and can be found here: https://www.brain-score.org/tutorials/models From 97bae3ec2d50966d5c651354b109be524fa66331 Mon Sep 17 00:00:00 2001 From: Kartik Pradeepan Date: Thu, 29 Aug 2024 10:41:46 -0400 Subject: [PATCH 10/28] Sync master into develop (#1205) * remove old tutorials in favor of new link (#1170) * Fixing a detail in pyprojecct.toml that surfaced while running the quickstart tutorial. (#1192) * add eBarlow_lmda_02_1000ep to models (#1200) Co-authored-by: AutoJenkins * add developer doc notes (#1201) * add developer doc notes * update version number * Github Actions Workflow to keep develop synchronized with changes from master (#1191) * Added branch sync workflow. * Swapped rebase with merge. Added some push error handling in autosync. * Swapped rebase with merge. Added some push error handling in autosync. * update theme to match language (#1204) --------- Co-authored-by: Michael Ferguson Co-authored-by: jimn2 <105658975+jimn2@users.noreply.github.com> Co-authored-by: Katherine Fairchild Co-authored-by: AutoJenkins --- .../workflows/sync_develop_with_master.yml | 111 ++++++++++++++++++ .../models/eBarlow_lmda_02_1000ep/__init__.py | 9 ++ .../models/eBarlow_lmda_02_1000ep/model.py | 84 +++++++++++++ .../models/eBarlow_lmda_02_1000ep/setup.py | 25 ++++ .../models/eBarlow_lmda_02_1000ep/test.py | 1 + docs/requirements.txt | 1 + docs/source/conf.py | 6 +- docs/source/index.rst | 3 +- docs/source/modules/benchmark_tutorial.rst | 9 -- .../modules/developer_clarifications.rst | 36 ++++++ docs/source/modules/model_tutorial.rst | 18 --- pyproject.toml | 2 +- 12 files changed, 272 insertions(+), 33 deletions(-) create mode 100644 .github/workflows/sync_develop_with_master.yml create mode 100644 brainscore_vision/models/eBarlow_lmda_02_1000ep/__init__.py create mode 100644 brainscore_vision/models/eBarlow_lmda_02_1000ep/model.py create mode 100644 brainscore_vision/models/eBarlow_lmda_02_1000ep/setup.py create mode 100644 brainscore_vision/models/eBarlow_lmda_02_1000ep/test.py delete mode 100644 docs/source/modules/benchmark_tutorial.rst create mode 100644 docs/source/modules/developer_clarifications.rst delete mode 100644 docs/source/modules/model_tutorial.rst diff --git a/.github/workflows/sync_develop_with_master.yml b/.github/workflows/sync_develop_with_master.yml new file mode 100644 index 000000000..df8cc238d --- /dev/null +++ b/.github/workflows/sync_develop_with_master.yml @@ -0,0 +1,111 @@ +name: Sync develop with master +'on': + pull_request: + types: + - closed + branches: + - master +permissions: + contents: write +jobs: + start: + name: "Starting -🤞" + runs-on: ubuntu-latest + steps: + - name: Starting + id: init + run: | + echo "Starting branch synchronization of ${{ github.repository }}" + create_pr_for_nonplugin: + name: Synchronizing non-plugin PR + needs: start # This job now needs the 'start' job to complete first + if: > + github.event.pull_request.merged == true && + !(startsWith(github.event.pull_request.head.ref, 'web_submission_') && + contains(github.event.pull_request.title, 'brain-score.org submission')) + runs-on: ubuntu-latest + steps: + - name: Check out the develop branch + uses: actions/checkout@v3 + with: + fetch-depth: 0 # Fetch full history + ref: develop + - name: Reset the develop branch + run: | + git fetch origin master + git reset --hard origin/master + - name: Create pull request in develop + uses: peter-evans/create-pull-request@v6 + with: + token: '${{ secrets.PAT }}' + commit-message: Sync master into develop + title: Sync master into develop + body: >- + This PR syncs the latest changes from the master branch into the + develop branch. + base: develop + branch: 'developer-sync-pr-${{ github.event.pull_request.number }}' + + auto_sync_for_plugin: + needs: start + if: > + github.event.pull_request.merged == true && + startsWith(github.event.pull_request.head.ref, 'web_submission_') && + contains(github.event.pull_request.title, 'brain-score.org submission') + runs-on: ubuntu-latest + steps: + - name: Checkout the develop branch + uses: actions/checkout@v3 + with: + fetch-depth: 0 + ref: develop + - name: Configure Git user + run: | + git config --global user.name "Branch Synchronizer" + git config --global user.email "action@github.com" + - name: Ensure develop branch is updated + run: | + git fetch origin develop + git checkout develop + git merge origin/develop + # Fetch latest change from master, checkout develop, merge changes from master to develop. + # Includes conflict handling + - name: Merge master into develop + id: merge + run: | + git fetch origin master + git checkout develop + git merge origin/master || { + if git diff --name-only --diff-filter=U | grep -q '.'; then + echo "Merge conflict detected" + echo "::set-output name=merge_conflict::true" + else + echo "Merge failed due to another reason" + exit 1 + fi + } + - name: Push changes to develop (if merge is successful) + if: steps.merge.conclusion == 'success' + run: | #Use force-with-lease to prevent accidental overwrite if branch has been updated. If fails, rebase the update and retry + git push origin develop --force-with-lease || { + echo "Push failed due to updates in develop. Attempting to rebase and retry..." + git fetch origin develop + git rebase origin/develop + git push origin develop --force-with-lease + } + - name: Create pull request for merge conflicts + if: steps.merge.outputs.merge_conflict == 'true' + uses: peter-evans/create-pull-request@v6 + with: + token: '${{ secrets.PAT }}' + commit-message: Merge master into develop with conflict resolution + title: Resolve conflicts between master and develop + body: This PR resolves merge conflicts between master and develop. + base: develop + branch: 'developer-sync-pr-conflict-${{ github.event.pull_request.number }}' + - name: Handle other merge failures + if: failure() && steps.merge.outputs.merge_conflict != 'true' + run: > + echo "Handle non-conflict related failure, such as network issues or missing branches" + + # Possibly incorporate additional handling logic here (e.g.,notifications or retries) \ No newline at end of file diff --git a/brainscore_vision/models/eBarlow_lmda_02_1000ep/__init__.py b/brainscore_vision/models/eBarlow_lmda_02_1000ep/__init__.py new file mode 100644 index 000000000..f29cd461a --- /dev/null +++ b/brainscore_vision/models/eBarlow_lmda_02_1000ep/__init__.py @@ -0,0 +1,9 @@ +from brainscore_vision import model_registry +from brainscore_vision.model_helpers.brain_transformation import ModelCommitment +from .model import get_model, get_layers + +model_registry["eBarlow_lmda_02_1000ep"] = lambda: ModelCommitment( + identifier="eBarlow_lmda_02_1000ep", + activations_model=get_model("eBarlow_lmda_02_1000ep"), + layers=get_layers("eBarlow_lmda_02_1000ep"), +) diff --git a/brainscore_vision/models/eBarlow_lmda_02_1000ep/model.py b/brainscore_vision/models/eBarlow_lmda_02_1000ep/model.py new file mode 100644 index 000000000..2a4d8797e --- /dev/null +++ b/brainscore_vision/models/eBarlow_lmda_02_1000ep/model.py @@ -0,0 +1,84 @@ +from brainscore_vision.model_helpers.check_submission import check_models +import functools +import os +from urllib.request import urlretrieve +import torchvision.models +from brainscore_vision.model_helpers.activations.pytorch import PytorchWrapper +from brainscore_vision.model_helpers.activations.pytorch import load_preprocess_images +from pathlib import Path +from brainscore_vision.model_helpers import download_weights +import torch +from collections import OrderedDict + +# This is an example implementation for submitting resnet-50 as a pytorch model + +# Attention: It is important, that the wrapper identifier is unique per model! +# The results will otherwise be the same due to brain-scores internal result caching mechanism. +# Please load your pytorch model for usage in CPU. There won't be GPUs available for scoring your model. +# If the model requires a GPU, contact the brain-score team directly. +from brainscore_vision.model_helpers.check_submission import check_models + + +def get_model_list(): + return ["eBarlow_lmda_02_1000ep"] + + +def get_model(name): + assert name == "eBarlow_lmda_02_1000ep" + url = "https://users.flatironinstitute.org/~tyerxa/equi_proj/training_checkpoints/fresh/paired/lmda_0.2/Barlow_1000ep/ep1000-ba625000-rank0" + fh = urlretrieve(url) + state_dict = torch.load(fh[0], map_location=torch.device("cpu"))["state"]["model"] + model = load_composer_classifier(state_dict) + preprocessing = functools.partial(load_preprocess_images, image_size=224) + wrapper = PytorchWrapper(identifier=name, model=model, preprocessing=preprocessing) + wrapper.image_size = 224 + return wrapper + +def load_composer_classifier(sd): + model = torchvision.models.resnet.resnet50() + new_sd = OrderedDict() + for k, v in sd.items(): + if 'lin_cls' in k: + new_sd['fc.' + k.split('.')[-1]] = v + if ".f." not in k: + continue + parts = k.split(".") + idx = parts.index("f") + new_k = ".".join(parts[idx + 1 :]) + new_sd[new_k] = v + model.load_state_dict(new_sd, strict=True) + return model + +def get_layers(name): + assert name == "eBarlow_lmda_02_1000ep" + layers = [ + "layer1.0", + "layer1.1", + "layer1.2", + "layer2.0", + "layer2.1", + "layer2.2", + "layer2.3", + "layer3.0", + "layer3.1", + "layer3.2", + "layer3.3", + "layer3.4", + "layer3.5", + "layer4.0", + "layer4.1", + "layer4.2", + "avgpool", + "fc", + ] + outs = ["conv1", "layer1", "layer2", "layer3", "layer4"] + + return layers + outs + + +def get_bibtex(model_identifier): + return """xx""" + + +if __name__ == "__main__": + check_models.check_base_models(__name__) diff --git a/brainscore_vision/models/eBarlow_lmda_02_1000ep/setup.py b/brainscore_vision/models/eBarlow_lmda_02_1000ep/setup.py new file mode 100644 index 000000000..421914cfb --- /dev/null +++ b/brainscore_vision/models/eBarlow_lmda_02_1000ep/setup.py @@ -0,0 +1,25 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +from setuptools import setup, find_packages + +requirements = [ "torchvision", + "torch" +] + +setup( + packages=find_packages(exclude=['tests']), + include_package_data=True, + install_requires=requirements, + license="MIT license", + zip_safe=False, + keywords='brain-score template', + classifiers=[ + 'Development Status :: 2 - Pre-Alpha', + 'Intended Audience :: Developers', + 'License :: OSI Approved :: MIT License', + 'Natural Language :: English', + 'Programming Language :: Python :: 3.7', + ], + test_suite='tests', +) diff --git a/brainscore_vision/models/eBarlow_lmda_02_1000ep/test.py b/brainscore_vision/models/eBarlow_lmda_02_1000ep/test.py new file mode 100644 index 000000000..e594ba9e1 --- /dev/null +++ b/brainscore_vision/models/eBarlow_lmda_02_1000ep/test.py @@ -0,0 +1 @@ +# Left empty as part of 2023 models migration diff --git a/docs/requirements.txt b/docs/requirements.txt index 0f71f0a9f..23532fd21 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -1,4 +1,5 @@ -e . Sphinx>=4 +sphinx_rtd_theme recommonmark \ No newline at end of file diff --git a/docs/source/conf.py b/docs/source/conf.py index 295f247a9..1f14b464d 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -17,11 +17,11 @@ # -- Project information ----------------------------------------------------- project = 'Brain-Score' -copyright = '2022, Brain-Score Team' +copyright = '2024, Brain-Score Team' author = 'Brain-Score Team' # The full version, including alpha/beta/rc tags -release = '1.3' +release = '2.0.0' # -- General configuration --------------------------------------------------- @@ -58,7 +58,7 @@ # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # -html_theme = 'alabaster' +html_theme = 'sphinx_rtd_theme' # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, diff --git a/docs/source/index.rst b/docs/source/index.rst index c7d629d85..453876e60 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -12,9 +12,8 @@ By following a unified :ref:`interface`, all models can be treated as an experim :caption: Contents: examples - modules/model_tutorial - modules/benchmark_tutorial modules/model_interface + modules/developer_clarifications.rst modules/benchmarks modules/metrics modules/submission diff --git a/docs/source/modules/benchmark_tutorial.rst b/docs/source/modules/benchmark_tutorial.rst deleted file mode 100644 index 258590d79..000000000 --- a/docs/source/modules/benchmark_tutorial.rst +++ /dev/null @@ -1,9 +0,0 @@ -.. _Benchmark_Tutorial: -.. |UnitTestSupport| replace:: We realize that unit tests can be a hurdle and we can take over this task for you. - Please let us know of any hurdles and we will do our best to support. - -================== -Benchmark Tutorial -================== - -The Brain-Score benchmark tutorial has moved, and can be found here: https://www.brain-score.org/tutorials/benchmarks \ No newline at end of file diff --git a/docs/source/modules/developer_clarifications.rst b/docs/source/modules/developer_clarifications.rst new file mode 100644 index 000000000..6e63f2008 --- /dev/null +++ b/docs/source/modules/developer_clarifications.rst @@ -0,0 +1,36 @@ +.. _interface: + +************************ +Developer Clarifications +************************ + +The Following documentation stores commonly-asked developer questions. We hope this will be useful to +anyone interested in contributing to Brain-Score's codebase or scientific workings. + + + +1. **For a given model, are activations different on each benchmark? How?** + + + Activations per model are generated based on benchmark stimuli; not every benchmark has unique stimuli. For most + model-benchmark pairs, activations will be different because stimuli will be different. The exceptions to this + are the benchmarks that use the same stimuli, such as the `MajajHong20215` family of benchmarks. + +2. **Result Caching** + + Result Caching is a Brain-Score `repo `_ that allows model activations (and other functions) to be cached + to disk, in order to speed up the process of rescoring models. It contains a decorator that can be attached to a function + right before it is defined. On the first run of that function, `result_caching` will save to disk the result of tha function + and will load that result from disk in future calls with the same parameters. All files are saved in the user's `~/result_caching` + folder, and they are persistent, as there is no garbage collection built in. You can deactivate + `result_caching` by simply setting the environment flag `RESULTCACHING_DISABLE` to `1`. Please see the link above + for more detailed documentation. + +3. **Model Mapping Procedure** + + In general, there are different methods that are used in the Brain-Score code to instruct the model to "begin recording", + observe stimuli, and to generate scores. Models follow the `ModelCommitment` to conform to the `BrainModel` API. A + `BrainModel` is any model that has a `region_layer_map`. This allows the layers in the model to be mapped to layers in + the ventral visual stream, and is chosen by scoring models on the public version of a benchmark (the private + benchmark data is heldout for the BrainModel to be scored on). See the more technical docs + `here `_ for additional notes. diff --git a/docs/source/modules/model_tutorial.rst b/docs/source/modules/model_tutorial.rst deleted file mode 100644 index 005b3066f..000000000 --- a/docs/source/modules/model_tutorial.rst +++ /dev/null @@ -1,18 +0,0 @@ -.. _model_tutorial: - -.. _technical paper: https://www.biorxiv.org/content/10.1101/407007v1 -.. _perspective paper: https://www.cell.com/neuron/fulltext/S0896-6273(20)30605-X -.. _Pycharm: https://www.jetbrains.com/pycharm/download -.. _introduction: https://www.tutorialspoint.com/pycharm/index.htm -.. _sample-model-submission: https://github.com/brain-score/sample-model-submission -.. _github: https://github.com/brain-score -.. _github repository: https://github.com/brain-score/vision -.. _windows: https://git-scm.com/download/win -.. _mac: https://git-scm.com/download/mac -.. _profile: http://www.brain-score.org/profile/ - -============== -Model Tutorial -============== - -The Brain-Score model tutorial has moved, and can be found here: https://www.brain-score.org/tutorials/models diff --git a/pyproject.toml b/pyproject.toml index a336cc52c..3b28322e9 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -49,7 +49,7 @@ test = [ [build-system] requires = [ - "setuptools>=65.*", + "setuptools>=65.0.0", "wheel" ] From 42d65e43fc34e4a4f32ef4415442b3996aa97f96 Mon Sep 17 00:00:00 2001 From: Katherine Fairchild Date: Thu, 29 Aug 2024 15:34:33 -0400 Subject: [PATCH 11/28] add alexnet_7be5be79 to models (#1207) Co-authored-by: AutoJenkins --- .../models/alexnet_7be5be79/__init__.py | 7 +++ .../models/alexnet_7be5be79/model.py | 44 +++++++++++++++++++ .../models/alexnet_7be5be79/setup.py | 25 +++++++++++ .../models/alexnet_7be5be79/test.py | 1 + 4 files changed, 77 insertions(+) create mode 100644 brainscore_vision/models/alexnet_7be5be79/__init__.py create mode 100644 brainscore_vision/models/alexnet_7be5be79/model.py create mode 100644 brainscore_vision/models/alexnet_7be5be79/setup.py create mode 100644 brainscore_vision/models/alexnet_7be5be79/test.py diff --git a/brainscore_vision/models/alexnet_7be5be79/__init__.py b/brainscore_vision/models/alexnet_7be5be79/__init__.py new file mode 100644 index 000000000..f86852625 --- /dev/null +++ b/brainscore_vision/models/alexnet_7be5be79/__init__.py @@ -0,0 +1,7 @@ +from brainscore_vision import model_registry +from brainscore_vision.model_helpers.brain_transformation import ModelCommitment +from .model import get_model, get_layers + +model_registry['alexnet_7be5be79'] = lambda: ModelCommitment(identifier='alexnet_7be5be79', + activations_model=get_model('alexnet_7be5be79'), + layers=get_layers('alexnet_7be5be79')) diff --git a/brainscore_vision/models/alexnet_7be5be79/model.py b/brainscore_vision/models/alexnet_7be5be79/model.py new file mode 100644 index 000000000..47d8ff207 --- /dev/null +++ b/brainscore_vision/models/alexnet_7be5be79/model.py @@ -0,0 +1,44 @@ +from brainscore_vision.model_helpers.check_submission import check_models +import functools +import os +import torchvision.models +from brainscore_vision.model_helpers.activations.pytorch import PytorchWrapper +from brainscore_vision.model_helpers.activations.pytorch import load_preprocess_images +from pathlib import Path +from brainscore_vision.model_helpers import download_weights +import torch + +# This is an example implementation for submitting resnet-50 as a pytorch model + +# Attention: It is important, that the wrapper identifier is unique per model! +# The results will otherwise be the same due to brain-scores internal result caching mechanism. +# Please load your pytorch model for usage in CPU. There won't be GPUs available for scoring your model. +# If the model requires a GPU, contact the brain-score team directly. +from brainscore_vision.model_helpers.check_submission import check_models + + +def get_model_list(): + return ['alexnet_7be5be79'] + + +def get_model(name): + assert name == 'alexnet_7be5be79' + model = torchvision.models.alexnet(pretrained=True) + preprocessing = functools.partial(load_preprocess_images, image_size=224) + wrapper = PytorchWrapper(identifier='alexnet_7be5be79', model=model, preprocessing=preprocessing) + wrapper.image_size = 224 + return wrapper + + +def get_layers(name): + assert name == 'alexnet_7be5be79' + return ['features.0','features.3', 'features.6', 'features.8', 'features.10', 'classifier.1', + 'classifier.4', 'classifier.6'] + + +def get_bibtex(model_identifier): + return """xx""" + + +if __name__ == '__main__': + check_models.check_base_models(__name__) diff --git a/brainscore_vision/models/alexnet_7be5be79/setup.py b/brainscore_vision/models/alexnet_7be5be79/setup.py new file mode 100644 index 000000000..421914cfb --- /dev/null +++ b/brainscore_vision/models/alexnet_7be5be79/setup.py @@ -0,0 +1,25 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +from setuptools import setup, find_packages + +requirements = [ "torchvision", + "torch" +] + +setup( + packages=find_packages(exclude=['tests']), + include_package_data=True, + install_requires=requirements, + license="MIT license", + zip_safe=False, + keywords='brain-score template', + classifiers=[ + 'Development Status :: 2 - Pre-Alpha', + 'Intended Audience :: Developers', + 'License :: OSI Approved :: MIT License', + 'Natural Language :: English', + 'Programming Language :: Python :: 3.7', + ], + test_suite='tests', +) diff --git a/brainscore_vision/models/alexnet_7be5be79/test.py b/brainscore_vision/models/alexnet_7be5be79/test.py new file mode 100644 index 000000000..e594ba9e1 --- /dev/null +++ b/brainscore_vision/models/alexnet_7be5be79/test.py @@ -0,0 +1 @@ +# Left empty as part of 2023 models migration From bb197b53d7b89f4dab0b4bdca5afb2673b55952f Mon Sep 17 00:00:00 2001 From: Michael Ferguson Date: Tue, 3 Sep 2024 09:58:14 -0400 Subject: [PATCH 12/28] update reqs (#1210) --- .../models/temporal_model_VideoMAE/requirements.txt | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/brainscore_vision/models/temporal_model_VideoMAE/requirements.txt b/brainscore_vision/models/temporal_model_VideoMAE/requirements.txt index 58b366c47..d8d4d6fa0 100644 --- a/brainscore_vision/models/temporal_model_VideoMAE/requirements.txt +++ b/brainscore_vision/models/temporal_model_VideoMAE/requirements.txt @@ -1,4 +1,6 @@ torch torchvision timm -videomae @ git+https://github.com/YingtianDt/VideoMAE.git \ No newline at end of file +videomae @ git+https://github.com/YingtianDt/VideoMAE.git +iopath +simplejson \ No newline at end of file From 2ce81693e8de2af856e2fee44a16044895c73bda Mon Sep 17 00:00:00 2001 From: Kartik Pradeepan Date: Tue, 3 Sep 2024 10:10:35 -0400 Subject: [PATCH 13/28] update reqs (#1210) (#1211) Co-authored-by: Michael Ferguson --- .../models/temporal_model_VideoMAE/requirements.txt | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/brainscore_vision/models/temporal_model_VideoMAE/requirements.txt b/brainscore_vision/models/temporal_model_VideoMAE/requirements.txt index 58b366c47..d8d4d6fa0 100644 --- a/brainscore_vision/models/temporal_model_VideoMAE/requirements.txt +++ b/brainscore_vision/models/temporal_model_VideoMAE/requirements.txt @@ -1,4 +1,6 @@ torch torchvision timm -videomae @ git+https://github.com/YingtianDt/VideoMAE.git \ No newline at end of file +videomae @ git+https://github.com/YingtianDt/VideoMAE.git +iopath +simplejson \ No newline at end of file From 6c4723574421c6dc8b94ef85e77f08c2be328e4a Mon Sep 17 00:00:00 2001 From: Ben Lonnqvist Date: Fri, 6 Sep 2024 08:59:12 +0200 Subject: [PATCH 14/28] update minor stimulus mismatch in malania 2007 (#1208) --- .../benchmarks/malania2007/benchmark.py | 17 +- .../data/malania2007/__init__.py | 154 +++++++++--------- 2 files changed, 91 insertions(+), 80 deletions(-) diff --git a/brainscore_vision/benchmarks/malania2007/benchmark.py b/brainscore_vision/benchmarks/malania2007/benchmark.py index 7ad587b4d..18bc9e7ee 100644 --- a/brainscore_vision/benchmarks/malania2007/benchmark.py +++ b/brainscore_vision/benchmarks/malania2007/benchmark.py @@ -110,7 +110,12 @@ def __init__(self, condition: str): def __call__(self, candidate: BrainModel): model_responses = {} - candidate.start_task(BrainModel.Task.probabilities, fitting_stimuli=self._fitting_stimuli, + fitting_stimulus_set = place_on_screen( + self._fitting_stimuli, + target_visual_degrees=candidate.visual_degrees(), + source_visual_degrees=self._visual_degrees + ) + candidate.start_task(BrainModel.Task.probabilities, fitting_stimuli=fitting_stimulus_set, number_of_trials=2, require_variance=True) for condition in (self.baseline_condition, self.condition): stimulus_set = place_on_screen( @@ -160,9 +165,15 @@ def __init__(self): def __call__(self, candidate: BrainModel): scores = [] + for condition in self.conditions: - candidate.start_task(BrainModel.Task.probabilities, fitting_stimuli=self._fitting_stimuli[condition], - number_of_trials=2, require_variance=True) + fitting_stimulus_set = place_on_screen( + self._fitting_stimuli[condition], + target_visual_degrees=candidate.visual_degrees(), + source_visual_degrees=self._visual_degrees + ) + candidate.start_task(BrainModel.Task.probabilities, fitting_stimuli=fitting_stimulus_set, + number_of_trials=2, require_variance=True) stimulus_set = place_on_screen( self._stimulus_set, target_visual_degrees=candidate.visual_degrees(), diff --git a/brainscore_vision/data/malania2007/__init__.py b/brainscore_vision/data/malania2007/__init__.py index e6ecbb5cd..d6cb25ea1 100644 --- a/brainscore_vision/data/malania2007/__init__.py +++ b/brainscore_vision/data/malania2007/__init__.py @@ -103,152 +103,152 @@ stimulus_set_registry['Malania2007.equal2'] = lambda: load_stimulus_set_from_s3( identifier='Malania2007_equal-2', bucket="brainio-brainscore", - csv_sha1="77e94b9b5122a83ebbaffb4a06fcab68ef652751", - zip_sha1="99826d459f6920dafab72eed69eb2a90492ce796", - csv_version_id="MlRpSz.4.jvVRFAZl8tGEum1P0Q0GtyS", - zip_version_id="vHbAM_FjTbjp5U12BkAelJu4KW6PLYFn" + csv_sha1="36f3c92a1335895b10c4150f5c25a68ab4576d4a", + zip_sha1="80be52e8701ecb8e7fbb81c0bff9c148ddc2b401", + csv_version_id="lkbpUNzhET3.hR.9StpSVvoxh05aWzoi", + zip_version_id="Wd7Fc3QVn1baC53Iy.E5ha4YqWvdybM3" ) stimulus_set_registry['Malania2007.equal2_fit'] = lambda: load_stimulus_set_from_s3( identifier='Malania2007_equal-2_fit', bucket="brainio-brainscore", - csv_sha1="bafdfc855c164d3e5443d67dcf9eb7762443f964", - zip_sha1="e52fec1a79ac8837e331b180c2a8a140840d6666", - csv_version_id="PIXEW.2vHvjIBP0Q2KHIpnxns7t9o8Cf", - zip_version_id="h7pp84CYFGLKlPhveD0L5ogePqisk_I7" + csv_sha1="b7105f44d5d781f5e06159008a3f63c9f774c2d1", + zip_sha1="ba5c1bacbb4afe40c5a19eddb07fc9f98312ec69", + csv_version_id="qhsx4_OM0FSCl7SU0hXhhmiJvLnDU6Dm", + zip_version_id="PzGnzHtpGghaHGK6MO4DeSy7w.rDUfRN" ) stimulus_set_registry['Malania2007.equal16'] = lambda: load_stimulus_set_from_s3( identifier='Malania2007_equal-16', bucket="brainio-brainscore", - csv_sha1="5fedcff56c302339c3451ae2edbcb846c39c3189", - zip_sha1="b30dc2dc90e4f3d88775622e558db963765f38e0", - csv_version_id="VmRGiQkhPALDwq74NpE2VpTiKTGn.30T", - zip_version_id="c.DOlVULXZingRJ9gVY_NbZwRrj_xs_i" + csv_sha1="14f9f7098831691811abf9953766951edc952203", + zip_sha1="5127e88eaed1ef64247c7cb4262868533fb4ebae", + csv_version_id="bgBDFK3666NPXwINqGcdouvtWy12yqyY", + zip_version_id="zlkMQOE9wfTQHPohmxol4uAR6y0zqwjI" ) stimulus_set_registry['Malania2007.equal16_fit'] = lambda: load_stimulus_set_from_s3( identifier='Malania2007_equal-16_fit', bucket="brainio-brainscore", - csv_sha1="3de3e5de19a638767a01ba68cb690dc746c29a77", - zip_sha1="1728920c5ea4fb7b3a3cf3c076165aca65c8b751", - csv_version_id="joAq8JBC_7axZDfLNFgoXFhTCLU_KKr_", - zip_version_id="77JRwdldaHDr6TLW1NnB5HucIrkUCVg." + csv_sha1="2ff7f2f97250b9bcce3d2753be6e5b98e083892b", + zip_sha1="db07ef4862fd9cb65c1e726cacc5914821296a5b", + csv_version_id="cVYkl_N7c36UfjbWqAffYrHVDbPhmiwa", + zip_version_id="azu8FTnJVmsou98co5iVE2G8OemMIl4H" ) stimulus_set_registry['Malania2007.long2'] = lambda: load_stimulus_set_from_s3( identifier='Malania2007_long-2', bucket="brainio-brainscore", - csv_sha1="ba65316a63dc688d8dfb410219a28fd02850b991", - zip_sha1="7fd431fbbd4a4dc0cd271624d3297c19a28a70b5", - csv_version_id="_0fqObn6k5KvXurHMsuD4IqtrqbNskyo", - zip_version_id="foL92ndVAAAETzMYHdmMtwIwKxXYhAB." + csv_sha1="153b987c4c6b8a22efb88be26aaa46bd36912c9b", + zip_sha1="07bb413d56ac77fc71823559780cdb16e4df563d", + csv_version_id="nKEYl_hb8tBKOg47O28iLY5.oYyimmAf", + zip_version_id="3BS.xmMHnND1C3bjDsut8qILlzMIJhHQ" ) stimulus_set_registry['Malania2007.long2_fit'] = lambda: load_stimulus_set_from_s3( identifier='Malania2007_long-2_fit', bucket="brainio-brainscore", - csv_sha1="b91dd9261c1d47bdd37f9b60eb8066b7b719709f", - zip_sha1="5be3e1cd57b59081103715b5d318505166e0045e", - csv_version_id="mATh8lcVisdsDnPnU6ACE23iBPfpkLZA", - zip_version_id="6nEviShTyCYQKrmxyjDyNov9Skc77eXT" + csv_sha1="8b6d1557879e6271554c0fcb67bf6b2941dad2c8", + zip_sha1="66205529af748ffd88579caef86b107838c0b0da", + csv_version_id="ut0_Zbq97vwzmkk9MY.0h5phJZqp_McX", + zip_version_id="qcGk5zy7KN.vEmLgtZKRYn0OXyWoER9G" ) stimulus_set_registry['Malania2007.long16'] = lambda: load_stimulus_set_from_s3( identifier='Malania2007_long-16', bucket="brainio-brainscore", - csv_sha1="1f1b03319b81698ba5e7db389dcd4248f94e45ca", - zip_sha1="97c70462a28905b58058c687880188d634d357f0", - csv_version_id="4RtywQ40hfQA4N80g8lxEScAmMXFRg7E", - zip_version_id="lJy2QosABzHtiA6BJaE4OqCn1w1Jhz2k" + csv_sha1="6c5d45b489bc290e41013d211d18570368012c9b", + zip_sha1="10944e5b65e8da9e52087d3cbbdc6575538c2847", + csv_version_id="sswWsVsgFbPU1psGfoIS.0Goi6b.9Dn2", + zip_version_id="5.gegdwQMNpqcP3FnW4DkTZ7s3bT0j75" ) stimulus_set_registry['Malania2007.long16_fit'] = lambda: load_stimulus_set_from_s3( identifier='Malania2007_long-16_fit', bucket="brainio-brainscore", - csv_sha1="d80a02c75b9908301c3c8dc9f7116fecf8e060ec", - zip_sha1="d8819b94d3f502d7a382c8a0db0a34627132e5e2", - csv_version_id="gOxY6tjnT7LO.FDeL1xkRmowl5wYeAia", - zip_version_id="71UAPTnZscIuqdx2dhuW9V0O0DO_TgTM" + csv_sha1="603dc8edb169f39e322f8980972eda1930c300ed", + zip_sha1="a67330e18f1e5d9ad3829d8d8c000487fe3e4d48", + csv_version_id=".qV8En95o4QR_jgvr145ww8xvgAnoIs5", + zip_version_id="h_IWMOSq4uJe91XgOEHFSVwa01vH74.H" ) stimulus_set_registry['Malania2007.short2'] = lambda: load_stimulus_set_from_s3( identifier='Malania2007_short-2', bucket="brainio-brainscore", - csv_sha1="bf0252056d2084e855646f624700ab03c19cfc3d", - zip_sha1="eee1270feb7443e7e315d8feb7fb0a6b6908f554", - csv_version_id="zcJqM.ZPwJyiMRWa3RBdvv401yPnLQAp", - zip_version_id="C8WZzAAQ0JGHAAKii4JpvlRhcUOhgSj." + csv_sha1="c8bb84c5468a43348149afe24d5c0ebda233d54e", + zip_sha1="1739226e7e32f60a7bb060fecc14c4a6353ca2ad", + csv_version_id="sGDRldX6CEbDguYsikFArt1P5aMMCueM", + zip_version_id="1RjSss5KIKKfK7UzeGRyQgBu.I47xcIQ" ) stimulus_set_registry['Malania2007.short2_fit'] = lambda: load_stimulus_set_from_s3( identifier='Malania2007_short-2_fit', bucket="brainio-brainscore", - csv_sha1="73127d279a2cd254ae4f07b0053580851e84b00c", - zip_sha1="918736349d714a4f784c29bf7e7d218b103e128d", - csv_version_id="iwGRp3_ktAHfJ6r7ktSK9gsthDjKek70", - zip_version_id="6RpplJ9UVXTlvhmFSXla0Qa20b44m8Ds" + csv_sha1="600c754811aa27d80a155c8ac643a81f2347ce3a", + zip_sha1="a1a121dbbbf761caea0a086c2a74ab511f296ed5", + csv_version_id="X7c1h_64KB18noSoG2uaGo2baYTvblKa", + zip_version_id="hx5Of92KkReH_GXPll4MnFrJ.yI3UQhH" ) stimulus_set_registry['Malania2007.short4'] = lambda: load_stimulus_set_from_s3( identifier='Malania2007_short-4', bucket="brainio-brainscore", - csv_sha1="816326d89d358f6592bd1f789e5c8d429fbca1cd", - zip_sha1="ff57d976ef75ede9148a4097e90d6cf6c8054d34", - csv_version_id="Waikk.bktXIncCUtCIAyB2EqynGk.H.F", - zip_version_id="rl_muxI4UEpwXVaXuhsqroG..COGILvR" + csv_sha1="181c912c03fdb3e4f89a737584a3a0816859f816", + zip_sha1="820019a7f68db60fac11a5c5f3e42037cf205248", + csv_version_id="pjGwais_x1SzlK9kOWzypnEjWOqIJejt", + zip_version_id="uHY9JlyoIKF7QY.7h2YnaMjoGMOLTS0Y" ) stimulus_set_registry['Malania2007.short4_fit'] = lambda: load_stimulus_set_from_s3( identifier='Malania2007_short-4_fit', bucket="brainio-brainscore", - csv_sha1="3512cfd029f4e4299bc41ede519e691d80cfc3d5", - zip_sha1="301386408dd1fb8556881f9a171be2d43dbfec6e", - csv_version_id="UhisdJqiEmkQ_4zsUtAmaxtle2kMZdcD", - zip_version_id="xt_v0xgCB8YUptyPB0yZFHIUcel5MF_x" + csv_sha1="65af7b5d3845a7ea284aefba21734e1d298742c8", + zip_sha1="5234b449c05a43e726543029137fe26930157b09", + csv_version_id=".HhdLMWwnSAJTeBICSys3fjbWpa9V3ee", + zip_version_id="M1eKvnklDoUFZ70K4x9EFiYycyIAybXY" ) stimulus_set_registry['Malania2007.short6'] = lambda: load_stimulus_set_from_s3( identifier='Malania2007_short-6', bucket="brainio-brainscore", - csv_sha1="3d5dd9b48a56ba0c31de94b6221b97df962b6f8a", - zip_sha1="120d90a143d1577d4745c3f69291d0db6c7e512e", - csv_version_id="GwGHPJkMDdg8N_.boyj8qJ3ChsEx4w._", - zip_version_id="gIN1O4yz.THvK0Ifm5M3AI58ZACE1QFh" + csv_sha1="57813230e337a09c4c423da927c1f33e62054547", + zip_sha1="dab58e8e996f91af643a0b61247e7ef87f35338d", + csv_version_id="4GLCXr_ii4r7jHsOoRbTZWgdQMrChMwy", + zip_version_id="WQ.jkGJuKabBAt9br9oAYB4wDstDVPer" ) stimulus_set_registry['Malania2007.short6_fit'] = lambda: load_stimulus_set_from_s3( identifier='Malania2007_short-6_fit', bucket="brainio-brainscore", - csv_sha1="27a5be4fca190837fc5b75ed2cdbbffbf6b41338", - zip_sha1="c88e05c6cadec88a2c9475b0735323a2b049bd75", - csv_version_id="oMlj7wV85s00hJFE84ym0AJHLCfYHVA6", - zip_version_id="oS.KrBTlcYAgr_lWyA_bIjVc2js_VeUe" + csv_sha1="ea7eb26b42fe9e4fc1ac2ed6e9bad439e8077ce1", + zip_sha1="895e69c835b22b07ee66a0f5f53e7a108ac8287c", + csv_version_id="agzJvrPzCyMsHVPuJeHnu.kWLqCOgTyv", + zip_version_id="6ArqgIEm9wZoihx6_swTilr.fBtd14Gw" ) stimulus_set_registry['Malania2007.short8'] = lambda: load_stimulus_set_from_s3( identifier='Malania2007_short-8', bucket="brainio-brainscore", - csv_sha1="8fc35f607196b4c0cdcebd8102d17e3a637e5988", - zip_sha1="a9215ed0cb0f0333582dda65f6afd7015c506ba5", - csv_version_id="gzys8s7j7euMEl7JJpqBFLFHMpFjwbA7", - zip_version_id="3fYb4Iruh3lRKUwC1APqFH4CNbE5DEuk" + csv_sha1="3df9a38605a4590eac8a1151779ba68c3cd54dc1", + zip_sha1="7626364e0776b2809ae36d9cb713c6ff9a0d0c05", + csv_version_id="8OV0COxeMrzgsJnm_vC3q9pEB44LSllC", + zip_version_id="YmcL0kN4_sDVMxegHdurephfduxWHFua" ) stimulus_set_registry['Malania2007.short8_fit'] = lambda: load_stimulus_set_from_s3( identifier='Malania2007_short-8_fit', bucket="brainio-brainscore", - csv_sha1="aa4133a9fe19a3c9004a9cb5e6eb5a72564e4883", - zip_sha1="beb9f068794708e41750202b78c438538a40a8fb", - csv_version_id="7N1Z.uiagqBknJUSBQ4mVfHKWgocM5aA", - zip_version_id="kcEOPOkvWymO0wX5j_QKxcNPl9sZsjFd" + csv_sha1="2782c818056b374e86195cbdb0ab1a52ef0d01da", + zip_sha1="ec2fa2a261d965455ffa81acdb0fddef447ad4ff", + csv_version_id="iInrw3cTlTQw0NxQ0bvpbar.jD64IkYh", + zip_version_id="hCWq6yFtO6LlDrAY46B0fhHVWaxJSDGY" ) stimulus_set_registry['Malania2007.short16'] = lambda: load_stimulus_set_from_s3( identifier='Malania2007_short-16', bucket="brainio-brainscore", - csv_sha1="addd260c9959f2f315db03c0a39c6c1b01fef685", - zip_sha1="cba4c2866ec692fb808471df7c2fed446d9fb3fe", - csv_version_id="Peu7WU5vanLoZNOFIAbuPzZNPDRgbCSX", - zip_version_id="wFkJkZMC8Fs_HfPJy32CMKcHJWeQIUDB" + csv_sha1="9f5f4e3597006c50530017ce769c8689d43b06f5", + zip_sha1="b67b1e70e8ba698907c95614bcb16eea6ff2f090", + csv_version_id="syxlZTsmHlr6eL8L4aI9ddWbKhr8tLUk", + zip_version_id="6kSKtajsISK6TTE6Ej3UG0oGblFzU9dk" ) stimulus_set_registry['Malania2007.short16_fit'] = lambda: load_stimulus_set_from_s3( identifier='Malania2007_short-16_fit', bucket="brainio-brainscore", - csv_sha1="9b340fe242117482f6992f48a805297215ba9924", - zip_sha1="4a90d511a3ceb3307a672177a3ad6b76521e65e5", - csv_version_id="sYBPEmXDgbWipuepciLirlorQE3L8BLc", - zip_version_id="pYvOkrLxadkQ67K3__wmciNwaCW.hyyN" + csv_sha1="5bc0314a6c16095a70049fa5e8df5e9b94345f30", + zip_sha1="0ca3930831ca926ea8b8c3600695b639ff05ddb5", + csv_version_id="HZWWB5vyyMoaTCyM7t.4RwfnNtP4e64I", + zip_version_id="aWy0F_It4iUGAgGJCECz6NOJ__JHr2ib" ) stimulus_set_registry['Malania2007.vernier_only'] = lambda: load_stimulus_set_from_s3( identifier='Malania2007_vernier-only', bucket="brainio-brainscore", - csv_sha1="b2cb0f2ed32426b739f90187ae24ad4adf84110d", - zip_sha1="0e177aea523adc320070196fbb777af4cdba2144", - csv_version_id="c8wpZpqoMqdATlqdoq3srPUi_8fYg6a.", - zip_version_id="28lHgxERhw32Ux6IBCxWWTtRwIaRrwo6" -) + csv_sha1="c71f654fccf1265a95dd0585c186232a2519e944", + zip_sha1="eadff359975c3ba250224ce1942544b091415095", + csv_version_id="PQoHljauNff1yWCMNKd5JEzR8Y38_j.0", + zip_version_id="6gE8TX1J89BnsHmdqb7rIGvRMw.sZdo6" +) \ No newline at end of file From 3df462f171409df3ce6388e99ce34e8fa2fe328b Mon Sep 17 00:00:00 2001 From: Katherine Fairchild Date: Sun, 8 Sep 2024 08:12:46 -0400 Subject: [PATCH 15/28] add alexnet_7be5be79_convs to models (#1220) Co-authored-by: AutoJenkins --- .../models/alexnet_7be5be79_convs/__init__.py | 5 +++ .../models/alexnet_7be5be79_convs/model.py | 42 +++++++++++++++++++ .../models/alexnet_7be5be79_convs/setup.py | 25 +++++++++++ .../models/alexnet_7be5be79_convs/test.py | 1 + 4 files changed, 73 insertions(+) create mode 100644 brainscore_vision/models/alexnet_7be5be79_convs/__init__.py create mode 100644 brainscore_vision/models/alexnet_7be5be79_convs/model.py create mode 100644 brainscore_vision/models/alexnet_7be5be79_convs/setup.py create mode 100644 brainscore_vision/models/alexnet_7be5be79_convs/test.py diff --git a/brainscore_vision/models/alexnet_7be5be79_convs/__init__.py b/brainscore_vision/models/alexnet_7be5be79_convs/__init__.py new file mode 100644 index 000000000..36cd00711 --- /dev/null +++ b/brainscore_vision/models/alexnet_7be5be79_convs/__init__.py @@ -0,0 +1,5 @@ +from brainscore_vision import model_registry +from brainscore_vision.model_helpers.brain_transformation import ModelCommitment +from .model import get_model, get_layers + +model_registry['alexnet_7be5be79_convs'] = lambda: ModelCommitment(identifier='alexnet_7be5be79_convs', activations_model=get_model('alexnet_7be5be79_convs'), layers=get_layers('alexnet_7be5be79_convs')) diff --git a/brainscore_vision/models/alexnet_7be5be79_convs/model.py b/brainscore_vision/models/alexnet_7be5be79_convs/model.py new file mode 100644 index 000000000..42ad4e2d0 --- /dev/null +++ b/brainscore_vision/models/alexnet_7be5be79_convs/model.py @@ -0,0 +1,42 @@ +from brainscore_vision.model_helpers.check_submission import check_models +import functools +import os +import torchvision.models +from brainscore_vision.model_helpers.activations.pytorch import PytorchWrapper +from brainscore_vision.model_helpers.activations.pytorch import load_preprocess_images +from pathlib import Path +from brainscore_vision.model_helpers import download_weights +import torch + +# This is an example implementation for submitting resnet-50 as a pytorch model + +# Attention: It is important, that the wrapper identifier is unique per model! +# The results will otherwise be the same due to brain-scores internal result caching mechanism. +# Please load your pytorch model for usage in CPU. There won't be GPUs available for scoring your model. +# If the model requires a GPU, contact the brain-score team directly. +from brainscore_vision.model_helpers.check_submission import check_models + + +def get_model_list(): + return ['alexnet_7be5be79_convs'] + + +def get_model(name): + assert name == 'alexnet_7be5be79_convs' + model = torchvision.models.alexnet(pretrained=True) + preprocessing = functools.partial(load_preprocess_images, image_size=224) + wrapper = PytorchWrapper(identifier='alexnet_7be5be79_convs', model=model, preprocessing=preprocessing) + wrapper.image_size = 224 + return wrapper + + +def get_layers(name): + assert name == 'alexnet_7be5be79_convs' + return ['features.3','features.6', 'features.8', 'features.10', 'classifier.1', 'classifier.4'] + +def get_bibtex(model_identifier): + return """xx""" + + +if __name__ == '__main__': + check_models.check_base_models(__name__) diff --git a/brainscore_vision/models/alexnet_7be5be79_convs/setup.py b/brainscore_vision/models/alexnet_7be5be79_convs/setup.py new file mode 100644 index 000000000..421914cfb --- /dev/null +++ b/brainscore_vision/models/alexnet_7be5be79_convs/setup.py @@ -0,0 +1,25 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +from setuptools import setup, find_packages + +requirements = [ "torchvision", + "torch" +] + +setup( + packages=find_packages(exclude=['tests']), + include_package_data=True, + install_requires=requirements, + license="MIT license", + zip_safe=False, + keywords='brain-score template', + classifiers=[ + 'Development Status :: 2 - Pre-Alpha', + 'Intended Audience :: Developers', + 'License :: OSI Approved :: MIT License', + 'Natural Language :: English', + 'Programming Language :: Python :: 3.7', + ], + test_suite='tests', +) diff --git a/brainscore_vision/models/alexnet_7be5be79_convs/test.py b/brainscore_vision/models/alexnet_7be5be79_convs/test.py new file mode 100644 index 000000000..e594ba9e1 --- /dev/null +++ b/brainscore_vision/models/alexnet_7be5be79_convs/test.py @@ -0,0 +1 @@ +# Left empty as part of 2023 models migration From 4dd1d0f48390ec8c1a0508fdff2accc28f4015dd Mon Sep 17 00:00:00 2001 From: David Coggan <06case_hoses@icloud.com> Date: Tue, 10 Sep 2024 08:22:06 -0500 Subject: [PATCH 16/28] Updated accuracy distance metric to accomodate condition-wise approaches (#1217) Co-authored-by: ddcoggan --- .../metrics/accuracy_distance/metric.py | 43 +++++++++++++++++-- .../metrics/accuracy_distance/test.py | 15 +++++++ 2 files changed, 54 insertions(+), 4 deletions(-) diff --git a/brainscore_vision/metrics/accuracy_distance/metric.py b/brainscore_vision/metrics/accuracy_distance/metric.py index fb31a7280..eb47e3bba 100644 --- a/brainscore_vision/metrics/accuracy_distance/metric.py +++ b/brainscore_vision/metrics/accuracy_distance/metric.py @@ -10,17 +10,52 @@ class AccuracyDistance(Metric): """ - Computes the accuracy distance using the relative distance between the source and target accuracies, adjusted - for the maximum possible difference between the two accuracies. + Computes the accuracy distance using the relative distance between the + source and target accuracies, adjusted for the maximum possible + difference between the two accuracies. By default, the distance is computed + from a single accuracy score on the entire BehavioralAssembly. However, + the distance can also be computed on a condition-wise basis using the + 'variables' argument. The advantage of the condition-wise approach is that + it can separate two models with identical overall accuracy if one exhibits a + more target-like pattern of performance across conditions. """ - def __call__(self, source: BehavioralAssembly, target: BehavioralAssembly) -> Score: + def __call__(self, source: BehavioralAssembly, target: + BehavioralAssembly, variables: tuple=()) -> Score: """Target should be the entire BehavioralAssembly, containing truth values.""" subjects = self.extract_subjects(target) subject_scores = [] for subject in subjects: subject_assembly = target.sel(subject=subject) - subject_score = self.compare_single_subject(source, subject_assembly) + + # compute single score across the entire dataset + if len(variables) == 0: + subject_score = self.compare_single_subject(source, subject_assembly) + + # compute scores for each condition, then average + else: + cond_scores = [] + + # get iterator across all combinations of variables + if len(variables) == 1: + conditions = set(subject_assembly[variables[0]].values) + conditions = [[c] for c in conditions] # to mimic itertools.product + else: + conditions = itertools.product( + *[set(subject_assembly[v].values) for v in variables]) + + # loop over conditions and compute scores + for cond in conditions: + indexers = {v: cond[i] for i, v in enumerate(variables)} + subject_cond_assembly = subject_assembly.sel(**indexers) + source_cond_assembly = source.sel(**indexers) + # to accomodate unbalanced designs, skip combinations of + # variables that don't exist in both assemblies + if len(subject_cond_assembly) and len(source_cond_assembly): + cond_scores.append(self.compare_single_subject( + source_cond_assembly, subject_cond_assembly)) + subject_score = Score(np.mean(cond_scores)) + subject_score = subject_score.expand_dims('subject') subject_score['subject'] = 'subject', [subject] subject_scores.append(subject_score) diff --git a/brainscore_vision/metrics/accuracy_distance/test.py b/brainscore_vision/metrics/accuracy_distance/test.py index 2fc15b792..d6414b790 100644 --- a/brainscore_vision/metrics/accuracy_distance/test.py +++ b/brainscore_vision/metrics/accuracy_distance/test.py @@ -12,6 +12,20 @@ def test_score(): assert score == approx(0.74074074) +def test_score_single_variable(): + assembly = _make_data() + metric = load_metric('accuracy_distance') + score = metric(assembly.sel(subject='C'), assembly, ('condition',)) + assert score == approx(0.55555556) + + +def test_score_multi_variable(): + assembly = _make_data() + metric = load_metric('accuracy_distance') + score = metric(assembly.sel(subject='C'), assembly, ('condition','animacy')) + assert score == approx(0.55555556) + + def test_has_error(): assembly = _make_data() metric = load_metric('accuracy_distance') @@ -38,5 +52,6 @@ def _make_data(): coords={'stimulus_id': ('presentation', np.resize(np.arange(9), 9 * 3)), 'truth': ('presentation', np.resize(['dog', 'cat', 'chair'], 9 * 3)), 'condition': ('presentation', np.resize([1, 1, 1, 2, 2, 2, 3, 3, 3], 9 * 3)), + 'animacy': ('presentation', np.resize(['animate', 'animate', 'inanimate'], 9 * 3)), 'subject': ('presentation', ['A'] * 9 + ['B'] * 9 + ['C'] * 9)}, dims=['presentation']) From b17ed0b9e1067139d4f41eefd26ff3f56d498b11 Mon Sep 17 00:00:00 2001 From: YingtianDt <90408985+YingtianDt@users.noreply.github.com> Date: Tue, 10 Sep 2024 18:28:03 +0200 Subject: [PATCH 17/28] Add temporal metrics; add temporal versions of MajajHong2015 (#1109) * feature: support temporal models for neural alignment by chaning TemporalIgnore to Temporal Aligned * add example temporal submission * complete new framework * new module: temporal model helpers * change the arch of temporal; add tutorials * improve: better naming * update: wrapper tutorial on brain model * add feature: inferencer identifier tracked by extractor for result caching * fix: video fps sampling; need more tests! * fix bugs: video sampling based on fps was wrong. * add mmaction2 models; add more features to the inferencers * PR: temporal model helpers * PR fix: not including gitmodules for now * Update brainscore_vision/model_helpers/brain_transformation/temporal.py Co-authored-by: Martin Schrimpf * Update brainscore_vision/model_helpers/brain_transformation/temporal.py Co-authored-by: Martin Schrimpf * Update brainscore_vision/model_helpers/brain_transformation/temporal.py Co-authored-by: Martin Schrimpf * Update brainscore_vision/models/temporal_models/test.py Co-authored-by: Martin Schrimpf * add mae_st; add ding2012 * try new arch * init ding2012 * add tests for temporal model helpers; add block inferencer * Delete tests/test_model_helpers/temporal/test___init__.py delete the old test * add benchmark ding2012 * add mutliple libs for temporal models * change executor output format; add more inference tests; init load_weight in s3 * add openstl * update backend for executor * feat:load_weight_file and corresponding test * change:resize strategy changed from bilinear to pooling * change:resize strategy changed from bilinear to pooling * fix mae_st submission * minor * fix:dtype in assembly time align * minor * update model submissions * fix dependency * refactor: simplify the inferencer methods * fix:block inferencer, neuroid coord while merging * fix:inferencer identifier * fix:weigh download * change tests to have max_workers=1 * revert screen.py * not submit region_layer_map * remove torch dependency * make fake modules in tests * add torch to requirements; avoid torch in tests * minor * minor * np.object changed to object * remove return in tests * fix insertion position bug * Apply suggestions from code review add: more type hints Co-authored-by: Martin Schrimpf * add: more type hints and comments * minor * pr:only commit temporal model helpers * pr: add one model for example * undo whole_brain in Brainodel.RecordingTarget * use logger and fix newlines * fix: video fps with copy was wrong * feat:fractional max_spatial_size * downsample layers in VideoMAE * fix:video sampling wrong duration * add more tests * fix merge * fix merge * module refactor; add more input test * add more temporal models * fix videomaev2 sha * fix:temporal_modelmae_st * change:video conservative loading; rename:image to pil image * fix:video last frame sampling; fix_time_naming * ignore pytest_cache * re-trigger tests * add joblib pool error management; fix video/image path recognizer * update: naming of failed to pickle func in joblibmapper * add temporal metric helpers * add temporal version of mamjajhong2015 * Update benchmark.py type hint * Update benchmark.py * Update brainscore_vision/metric_helpers/temporal.py Co-authored-by: Martin Schrimpf * Update brainscore_vision/metrics/internal_consistency/__init__.py Co-authored-by: Martin Schrimpf * Update benchmark.py --------- Co-authored-by: Yingtian Tang Co-authored-by: Martin Schrimpf Co-authored-by: Martin Schrimpf Co-authored-by: deirdre-k <95875723+deirdre-k@users.noreply.github.com> Co-authored-by: Michael Ferguson --- .../benchmarks/majajhong2015/__init__.py | 5 + .../benchmarks/majajhong2015/benchmark.py | 44 +++++-- brainscore_vision/metric_helpers/temporal.py | 119 ++++++++++++++++++ .../metric_helpers/xarray_utils.py | 59 +++++++++ .../metrics/internal_consistency/__init__.py | 4 + .../regression_correlation/__init__.py | 9 ++ .../metrics/regression_correlation/metric.py | 10 ++ tests/test_metric_helpers/test_temporal.py | 80 ++++++++++++ 8 files changed, 320 insertions(+), 10 deletions(-) create mode 100644 brainscore_vision/metric_helpers/temporal.py create mode 100644 tests/test_metric_helpers/test_temporal.py diff --git a/brainscore_vision/benchmarks/majajhong2015/__init__.py b/brainscore_vision/benchmarks/majajhong2015/__init__.py index 24fe8651e..5ae8988fd 100644 --- a/brainscore_vision/benchmarks/majajhong2015/__init__.py +++ b/brainscore_vision/benchmarks/majajhong2015/__init__.py @@ -11,3 +11,8 @@ benchmark_registry['MajajHong2015public.V4-pls'] = MajajHongV4PublicBenchmark benchmark_registry['MajajHong2015public.IT-pls'] = MajajHongITPublicBenchmark + +# temporal +from .benchmark import MajajHongV4TemporalPublicBenchmark, MajajHongITTemporalPublicBenchmark +benchmark_registry['MajajHong2015public.V4-temporal-pls'] = lambda: MajajHongV4TemporalPublicBenchmark(time_interval=10) +benchmark_registry['MajajHong2015public.IT-temporal-pls'] = lambda: MajajHongITTemporalPublicBenchmark(time_interval=10) diff --git a/brainscore_vision/benchmarks/majajhong2015/benchmark.py b/brainscore_vision/benchmarks/majajhong2015/benchmark.py index 766f5c93f..5270ab7af 100644 --- a/brainscore_vision/benchmarks/majajhong2015/benchmark.py +++ b/brainscore_vision/benchmarks/majajhong2015/benchmark.py @@ -1,7 +1,8 @@ from brainscore_core import Metric from brainscore_vision import load_metric, Ceiling, load_ceiling, load_dataset -from brainscore_vision.benchmark_helpers.neural_common import NeuralBenchmark, average_repetition +from brainscore_vision.benchmark_helpers.neural_common import NeuralBenchmark, average_repetition, apply_keep_attrs +from brainscore_vision.model_helpers.brain_transformation.temporal import assembly_time_align VISUAL_DEGREES = 8 NUMBER_OF_TRIALS = 50 @@ -20,13 +21,14 @@ eprint = {https://www.jneurosci.org/content/35/39/13402.full.pdf}, journal = {Journal of Neuroscience}}""" -pls_metric = lambda: load_metric('pls', crossvalidation_kwargs=dict(stratification_coord='object_name')) - +crossvalidation_kwargs = dict(stratification_coord='object_name') +pls_metric = lambda: load_metric('pls', crossvalidation_kwargs=crossvalidation_kwargs) +spantime_pls_metric = lambda: load_metric('spantime_pls', crossvalidation_kwargs=crossvalidation_kwargs) def _DicarloMajajHong2015Region(region: str, access: str, identifier_metric_suffix: str, - similarity_metric: Metric, ceiler: Ceiling): - assembly_repetition = load_assembly(average_repetitions=False, region=region, access=access) - assembly = load_assembly(average_repetitions=True, region=region, access=access) + similarity_metric: Metric, ceiler: Ceiling, time_interval: float = None): + assembly_repetition = load_assembly(average_repetitions=False, region=region, access=access, time_interval=time_interval) + assembly = load_assembly(average_repetitions=True, region=region, access=access, time_interval=time_interval) benchmark_identifier = f'MajajHong2015.{region}' + ('.public' if access == 'public' else '') return NeuralBenchmark(identifier=f'{benchmark_identifier}-{identifier_metric_suffix}', version=3, assembly=assembly, similarity_metric=similarity_metric, @@ -60,13 +62,35 @@ def MajajHongITPublicBenchmark(): ceiler=load_ceiling('internal_consistency')) -def load_assembly(average_repetitions, region, access='private'): - assembly = load_dataset(f'MajajHong2015.{access}') +def MajajHongV4TemporalPublicBenchmark(time_interval: float = None): + return _DicarloMajajHong2015Region(region='V4', access='public', identifier_metric_suffix='pls', + similarity_metric=spantime_pls_metric(), time_interval=time_interval, + ceiler=load_ceiling('internal_consistency_temporal')) + + +def MajajHongITTemporalPublicBenchmark(time_interval: float = None): + return _DicarloMajajHong2015Region(region='IT', access='public', identifier_metric_suffix='pls', + similarity_metric=spantime_pls_metric(), time_interval=time_interval, + ceiler=load_ceiling('internal_consistency_temporal')) + + +def load_assembly(average_repetitions: bool, region: str, access: str = 'private', time_interval: float = None): + temporal = time_interval is not None + if not temporal: + assembly = load_dataset(f'MajajHong2015.{access}') + assembly = assembly.squeeze("time_bin") + else: + assembly = load_dataset(f'MajajHong2015.temporal.{access}') + assembly = assembly.__class__(assembly) + target_time_bins = [ + (t, t+time_interval) for t in range(0, assembly.time_bin_end.max().item()-time_interval, time_interval) + ] + assembly = apply_keep_attrs(assembly, lambda assembly: assembly_time_align(assembly, target_time_bins)) + assembly = assembly.sel(region=region) assembly['region'] = 'neuroid', [region] * len(assembly['neuroid']) - assembly = assembly.squeeze("time_bin") assembly.load() - assembly = assembly.transpose('presentation', 'neuroid') + assembly = assembly.transpose('presentation', 'neuroid', ...) if average_repetitions: assembly = average_repetition(assembly) return assembly diff --git a/brainscore_vision/metric_helpers/temporal.py b/brainscore_vision/metric_helpers/temporal.py new file mode 100644 index 000000000..0c110b9f2 --- /dev/null +++ b/brainscore_vision/metric_helpers/temporal.py @@ -0,0 +1,119 @@ +import xarray as xr +import numpy as np + +from brainscore_vision.benchmark_helpers.neural_common import Score +from brainscore_vision.metric_helpers.transformations import standard_error_of_the_mean + +from .xarray_utils import apply_over_dims, recursive_op + + +# take the mean of scores (medians of single neuron scores) over time + + +def average_over_presentation(score: Score) -> Score: + raw = score + score = raw.mean('presentation') + score.attrs['raw'] = raw + return score + + +# PerOps is applied to every slice/chunk of the xarray along the specified dimensions +class PerOps: + def __init__(self, callable, dims, check_coords=[]): + # for coordinate checking, they are supposed to be the same across assemblies + self.dims = dims + self.callable = callable + self.check_coords = check_coords + + def __call__(self, *asms): + for check_coord in self.check_coords: + asms = [asm.sortby(check_coord) for asm in asms] + for asm in asms[1:]: + assert (asm[check_coord].values == asms[0][check_coord].values).all() + ret = apply_over_dims(self.callable, *asms, dims=self.dims) + return ret + + +# SpanOps aggregates specified dimensions to one dimension +class SpanOps: + def __init__(self, callable, source_dims, aggregated_dim, resample=False): + # if resample, randomly choose samples from the aggregated dimension, + # whose size is the same as the assembly.sizes[aggregated_dim] + self.source_dims = source_dims + self.aggregated_dim = aggregated_dim + self.callable = callable + self.resample = resample + + def __call__(self, *asms): + asms = [self._stack(asm) for asm in asms] + return self.callable(*asms) + + def _stack(self, assembly): + assembly_type = type(assembly) + size = assembly.sizes[self.aggregated_dim] + assembly = xr.DataArray(assembly) # xarray cannot deal with stacking MultiIndex (pydata/xarray#1554) + assembly = assembly.reset_index(self.source_dims) + assembly = assembly.rename({dim:dim+"_" for dim in self.source_dims}) # we'll call stacked timebins "presentation" + assembly = assembly.stack({self.aggregated_dim : [dim+"_" for dim in self.source_dims]}) + if self.resample: + indices = np.random.randint(0, assembly.sizes[self.aggregated_dim], size) + assembly = assembly.isel({self.aggregated_dim: indices}) + return assembly_type(assembly) + +class PerTime(PerOps): + def __init__(self, callable, time_dim="time_bin", check_coord="time_bin_start", **kwargs): + self.time_bin = time_dim + super().__init__(callable, dims=[time_dim], check_coords=[check_coord], **kwargs) + +class PerPresentation(PerOps): + def __init__(self, callable, presentation_dim="presentation", check_coord="stimulus_id", **kwargs): + self.presentation_dim = presentation_dim + super().__init__(callable, dims=[presentation_dim], check_coords=[check_coord], **kwargs) + +class PerNeuroid(PerOps): + def __init__(self, callable, neuroid_dim="neuroid", check_coord="neuroid_id", **kwargs): + self.neuroid_dim = neuroid_dim + super().__init__(callable, dims=[neuroid_dim], check_coords=[check_coord], **kwargs) + +class SpanTime(SpanOps): + def __init__(self, callable, time_dim="time_bin", presentation_dim="presentation", resample=False): + self.time_dim = time_dim + self.presentation_dim = presentation_dim + source_dims = [self.time_dim, self.presentation_dim] + aggregated_dim = self.presentation_dim + super().__init__(callable, source_dims, aggregated_dim, resample=resample) + +class SpanTimeRegression: + """ + Fits a regression with weights shared across the time bins. + """ + + def __init__(self, regression): + self._regression = regression + + def fit(self, source, target): + assert (source['time_bin'].values == target['time_bin'].values).all() + SpanTime(self._regression.fit)(source, target) + + def predict(self, source): + return PerTime(self._regression.predict)(source) + +class PerTimeRegression: + """ + Fits a regression with different weights for each time bins. + """ + + def __init__(self, regression): + self._regression = regression + + def fit(self, source, target): + # Lazy fit until predict + assert (source['time_bin'].values == target['time_bin'].values).all() + self._train_source = source + self._train_target = target + + def predict(self, source): + def fit_predict(train_source, train_target, test_source): + self._regression.fit(train_source, train_target) + return self._regression.predict(test_source) + return PerTime(fit_predict)(self._train_source, self._train_target, source) \ No newline at end of file diff --git a/brainscore_vision/metric_helpers/xarray_utils.py b/brainscore_vision/metric_helpers/xarray_utils.py index ce67654ff..8998b6003 100644 --- a/brainscore_vision/metric_helpers/xarray_utils.py +++ b/brainscore_vision/metric_helpers/xarray_utils.py @@ -1,4 +1,5 @@ import numpy as np +import xarray as xr from brainio.assemblies import NeuroidAssembly, array_is_element, walk_coords from brainscore_vision.metric_helpers import Defaults @@ -90,3 +91,61 @@ def __call__(self, prediction, target): for coord, dims, values in walk_coords(target) if dims == neuroid_dims}, dims=neuroid_dims) return result + + +# ops that also applies to attrs (and attrs of attrs), which are xarrays +def recursive_op(*arrs, op=lambda x:x): + # the attrs structure of each arr must be the same + val = op(*arrs) + attrs = arrs[0].attrs + for attr in attrs: + attr_val = arrs[0].attrs[attr] + if isinstance(attr_val, xr.DataArray): + attr_arrs = [arr.attrs[attr] for arr in arrs] + attr_val = recursive_op(*attr_arrs, op=op) + val.attrs[attr] = attr_val + return val + + +# apply a callable to every slice of the xarray along the specified dimensions +def apply_over_dims(callable, *asms, dims, njobs=-1): + asms = [asm.transpose(*dims, ...) for asm in asms] + sizes = [asms[0].sizes[dim] for dim in dims] + + def apply_helper(sizes, dims, *asms): + xarr = [] + attrs = {} + size = sizes[0] + rsizes = sizes[1:] + dim = dims[0] + rdims = dims[1:] + + if len(sizes) == 1: + # parallel execution on the last applied dimension + from joblib import Parallel, delayed + results = Parallel(n_jobs=njobs)(delayed(callable)(*[asm.isel({dim:s}) for asm in asms]) for s in range(size)) + else: + results = [] + for s in range(size): + arr = apply_helper(rsizes, rdims, *[asm.isel({dim:s}) for asm in asms]) + results.append(arr) + + for arr in results: + if arr is not None: + for k,v in arr.attrs.items(): + assert isinstance(v, xr.DataArray) + attrs.setdefault(k, []).append(v.expand_dims(dim)) + xarr.append(arr) + + if not xarr: + return + else: + xarr = xr.concat(xarr, dim=dim) + attrs = {k: xr.concat(vs, dim=dim) for k,vs in attrs.items()} + xarr.coords[dim] = asms[0].coords[dim] + for k,v in attrs.items(): + attrs[k].coords[dim] = asms[0].coords[dim] + xarr.attrs[k] = attrs[k] + return xarr + + return apply_helper(sizes, dims, *asms) \ No newline at end of file diff --git a/brainscore_vision/metrics/internal_consistency/__init__.py b/brainscore_vision/metrics/internal_consistency/__init__.py index bd71776be..ae6a41ea6 100644 --- a/brainscore_vision/metrics/internal_consistency/__init__.py +++ b/brainscore_vision/metrics/internal_consistency/__init__.py @@ -1,4 +1,8 @@ from brainscore_vision import metric_registry from .ceiling import InternalConsistency +from brainscore_vision.metric_helpers.temporal import PerTime + + metric_registry['internal_consistency'] = InternalConsistency +metric_registry['internal_consistency_temporal'] = lambda *args, **kwargs: PerTime(InternalConsistency(*args, **kwargs)) \ No newline at end of file diff --git a/brainscore_vision/metrics/regression_correlation/__init__.py b/brainscore_vision/metrics/regression_correlation/__init__.py index 2f8019b3f..691e82685 100644 --- a/brainscore_vision/metrics/regression_correlation/__init__.py +++ b/brainscore_vision/metrics/regression_correlation/__init__.py @@ -11,6 +11,15 @@ metric_registry['linear_predictivity'] = lambda *args, **kwargs: CrossRegressedCorrelation( regression=linear_regression(), correlation=pearsonr_correlation(), *args, **kwargs) +# temporal metrics +from .metric import SpanTimeCrossRegressedCorrelation + +metric_registry['spantime_pls'] = lambda *args, **kwargs: SpanTimeCrossRegressedCorrelation( + regression=pls_regression(), correlation=pearsonr_correlation(), *args, **kwargs) +metric_registry['spantime_ridge'] = lambda *args, **kwargs: SpanTimeCrossRegressedCorrelation( + regression=ridge_regression(), correlation=pearsonr_correlation(), *args, **kwargs) + + BIBTEX = """@article{schrimpf2018brain, title={Brain-score: Which artificial neural network for object recognition is most brain-like?}, author={Schrimpf, Martin and Kubilius, Jonas and Hong, Ha and Majaj, Najib J and Rajalingham, Rishi and Issa, Elias B and Kar, Kohitij and Bashivan, Pouya and Prescott-Roy, Jonathan and Geiger, Franziska and others}, diff --git a/brainscore_vision/metrics/regression_correlation/metric.py b/brainscore_vision/metrics/regression_correlation/metric.py index 365f63868..a09ba03e0 100644 --- a/brainscore_vision/metrics/regression_correlation/metric.py +++ b/brainscore_vision/metrics/regression_correlation/metric.py @@ -8,6 +8,7 @@ from brainscore_core.metrics import Metric, Score from brainscore_vision.metric_helpers.transformations import CrossValidation from brainscore_vision.metric_helpers.xarray_utils import XarrayRegression, XarrayCorrelation +from brainscore_vision.metric_helpers.temporal import SpanTimeRegression, PerTime class CrossRegressedCorrelation(Metric): @@ -65,6 +66,15 @@ def predict(self, X): return Ypred +# make the crc to consider time as a sample dimension +def SpanTimeCrossRegressedCorrelation(regression, correlation, *args, **kwargs): + return CrossRegressedCorrelation( + regression=SpanTimeRegression(regression), + correlation=PerTime(correlation), + *args, **kwargs + ) + + def pls_regression(regression_kwargs=None, xarray_kwargs=None): regression_defaults = dict(n_components=25, scale=False) regression_kwargs = {**regression_defaults, **(regression_kwargs or {})} diff --git a/tests/test_metric_helpers/test_temporal.py b/tests/test_metric_helpers/test_temporal.py new file mode 100644 index 000000000..64dffe8de --- /dev/null +++ b/tests/test_metric_helpers/test_temporal.py @@ -0,0 +1,80 @@ +import numpy as np +import scipy.stats +from pytest import approx +from sklearn.linear_model import LinearRegression + +from brainio.assemblies import NeuroidAssembly +from brainscore_vision.metric_helpers.xarray_utils import XarrayRegression, XarrayCorrelation +from brainscore_vision.metric_helpers.temporal import PerTime, SpanTime, PerTimeRegression, SpanTimeRegression + + +class TestMetricHelpers: + def test_pertime_ops(self): + jumbled_source = NeuroidAssembly(np.random.rand(500, 10, 20), + coords={'stimulus_id': ('presentation', list(reversed(range(500)))), + 'image_meta': ('presentation', [0] * 500), + 'neuroid_id': ('neuroid', list(reversed(range(10)))), + 'neuroid_meta': ('neuroid', [0] * 10), + 'time_bin_start': ('time_bin', np.arange(0, 400, 20)), + 'time_bin_end': ('time_bin', np.arange(20, 420, 20))}, + dims=['presentation', 'neuroid', 'time_bin']) + mean_neuroid = lambda arr: arr.mean('neuroid') + pertime_mean_neuroid = PerTime(mean_neuroid) + output = pertime_mean_neuroid(jumbled_source) + output = output.transpose('presentation', 'time_bin') + target = jumbled_source.transpose('presentation', 'time_bin', 'neuroid').mean('neuroid') + assert (output == approx(target)).all().item() + + def test_spantime_ops(self): + jumbled_source = NeuroidAssembly(np.random.rand(500, 10, 20), + coords={'stimulus_id': ('presentation', list(reversed(range(500)))), + 'image_meta': ('presentation', [0] * 500), + 'neuroid_id': ('neuroid', list(reversed(range(10)))), + 'neuroid_meta': ('neuroid', [0] * 10), + 'time_bin_start': ('time_bin', np.arange(0, 400, 20)), + 'time_bin_end': ('time_bin', np.arange(20, 420, 20))}, + dims=['presentation', 'neuroid', 'time_bin']) + mean_presentation = lambda arr: arr.mean("presentation") + spantime_mean_presentation = SpanTime(mean_presentation) + output = spantime_mean_presentation(jumbled_source) + output = output.transpose('neuroid') + target = jumbled_source.transpose('presentation', 'time_bin', 'neuroid').mean('presentation').mean('time_bin') + assert (output == approx(target)).all().item() + + def test_pertime_regression(self): + jumbled_source = NeuroidAssembly(np.random.rand(500, 10, 20), + coords={'stimulus_id': ('presentation', list(reversed(range(500)))), + 'image_meta': ('presentation', [0] * 500), + 'neuroid_id': ('neuroid', list(reversed(range(10)))), + 'neuroid_meta': ('neuroid', [0] * 10), + 'time_bin_start': ('time_bin', np.arange(0, 400, 20)), + 'time_bin_end': ('time_bin', np.arange(20, 420, 20))}, + dims=['presentation', 'neuroid', 'time_bin']) + target = jumbled_source.sortby(['stimulus_id', 'neuroid_id']) + pertime_regression = PerTimeRegression(XarrayRegression(LinearRegression())) + pertime_regression.fit(jumbled_source, target) + prediction = pertime_regression.predict(jumbled_source) + prediction = prediction.transpose(*target.dims) + # do not test for alignment of metadata - it is only important that the data is well-aligned with the metadata. + np.testing.assert_array_almost_equal(prediction.sortby(['stimulus_id', 'neuroid_id', 'time_bin']).values, + target.sortby(['stimulus_id', 'neuroid_id', 'time_bin']).values) + + + def test_spantime_regression(self): + jumbled_source = NeuroidAssembly(np.random.rand(500, 10, 20), + coords={'stimulus_id': ('presentation', list(reversed(range(500)))), + 'image_meta': ('presentation', [0] * 500), + 'neuroid_id': ('neuroid', list(reversed(range(10)))), + 'neuroid_meta': ('neuroid', [0] * 10), + 'time_bin_start': ('time_bin', np.arange(0, 400, 20)), + 'time_bin_end': ('time_bin', np.arange(20, 420, 20))}, + dims=['presentation', 'neuroid', 'time_bin']) + target = jumbled_source.sortby(['stimulus_id', 'neuroid_id']) + spantime_regression = SpanTimeRegression(XarrayRegression(LinearRegression())) + spantime_regression.fit(jumbled_source, target) + prediction = spantime_regression.predict(jumbled_source) + prediction = prediction.transpose(*target.dims) + # do not test for alignment of metadata - it is only important that the data is well-aligned with the metadata. + np.testing.assert_array_almost_equal(prediction.sortby(['stimulus_id', 'neuroid_id', 'time_bin']).values, + target.sortby(['stimulus_id', 'neuroid_id', 'time_bin']).values) + From 4e0ca0ce55b99aec71329bf024801d4e3a84baef Mon Sep 17 00:00:00 2001 From: Sam Winebrake <85908068+samwinebrake@users.noreply.github.com> Date: Tue, 10 Sep 2024 20:05:10 -0400 Subject: [PATCH 18/28] Python 3.11 Migration (v2.1.0) (#1199) * changes necessary for 3.11 * change references of 3.7 to 3.11, remove tf and keras * fix geirhos data tests * Normalize deprecated * np.float is deprecated * update w/ master (#1066) * add r101_eBarlow_lmda_01_1 to models (#1037) Co-authored-by: AutoJenkins * update hmax requirements.txt (#1053) * update hmax requirements.txt to add torchvision (#1054) * fix mobilenet `pooler` layer prefix (#1055) * add r34_eMMCR_Mom_lmda_02_1 to models (#1061) Co-authored-by: AutoJenkins * add r101_eBarlow_lmda_02_1_copy to models (#1059) Co-authored-by: AutoJenkins * add r34_eMMCR_Mom_lmda_01_1 to models (#1060) Co-authored-by: AutoJenkins * add r34_eMMCR_Mom_Vanilla_1 to models (#1063) Co-authored-by: AutoJenkins * ignore time dimension in Bracci2019 (#1064) --------- Co-authored-by: Katherine Fairchild Co-authored-by: AutoJenkins Co-authored-by: Michael Ferguson Co-authored-by: Martin Schrimpf * update (#1093) * add r101_eBarlow_lmda_01_1 to models (#1037) Co-authored-by: AutoJenkins * update hmax requirements.txt (#1053) * update hmax requirements.txt to add torchvision (#1054) * fix mobilenet `pooler` layer prefix (#1055) * add r34_eMMCR_Mom_lmda_02_1 to models (#1061) Co-authored-by: AutoJenkins * add r101_eBarlow_lmda_02_1_copy to models (#1059) Co-authored-by: AutoJenkins * add r34_eMMCR_Mom_lmda_01_1 to models (#1060) Co-authored-by: AutoJenkins * add r34_eMMCR_Mom_Vanilla_1 to models (#1063) Co-authored-by: AutoJenkins * ignore time dimension in Bracci2019 (#1064) * Update behavior.py (#742) Co-authored-by: Sam Winebrake <85908068+samwinebrake@users.noreply.github.com> Co-authored-by: deirdre-k <95875723+deirdre-k@users.noreply.github.com> * add eBarlow_lmda_02_1_full to models (#1067) Co-authored-by: AutoJenkins * oddoneout: build full stimulus id index for non-numeric ids (#1068) * Add mobilevit_small - take 2 (#1051) * Add mobilevit_small - take 2 * Update brainscore_vision/models/mobilevit_small/model.py Co-authored-by: Martin Schrimpf --------- Co-authored-by: Martin Schrimpf Co-authored-by: Michael Ferguson * add yudixie_resnet18_240719_1 to models (#1070) Co-authored-by: AutoJenkins * add yudixie_resnet18_240719_0 to models (#1069) Co-authored-by: AutoJenkins * add yudixie_resnet18_240719_2 to models (#1071) Co-authored-by: AutoJenkins * add yudixie_resnet18_240719_10 to models (#1079) Co-authored-by: AutoJenkins --------- Co-authored-by: Katherine Fairchild Co-authored-by: AutoJenkins Co-authored-by: Michael Ferguson Co-authored-by: Martin Schrimpf Co-authored-by: Linus Sommer <95619282+linus-md@users.noreply.github.com> Co-authored-by: deirdre-k <95875723+deirdre-k@users.noreply.github.com> Co-authored-by: Kartik Pradeepan * remove keraswrapper * remove brendel * Trying a fix for travis * add necessary imports * Updating ubuntu version as xenial doesn't have 3.11 * Adding fix for pyproject.toml * Pin numpy below 2.0 * pointing to test branch for travis * update s3 bucket to brainscore-unittests * update s3 bucket to brainscore-unittests (rajalingham2020) * update s3 bucket to brainscore-unittests (sanghavi2020) * update folder name of models to folder_name="models-to-integrate-for-2.0" * update folder name of MAE model to folder_name="models-to-integrate-for-2.0" * update folder name of MAEv2 to folder_name="models-to-integrate-for-2.0" * update folder name of mae_st to folder_name="models-to-integrate-for-2.0" * PredNet commented out in init * remove prednet dead code * remove prednet code * scialom data: update image_id to stimulus_id * add number_of_trials keyword to rajalingham2018 * pointing to branch * Removing dash from python module name (#1118) * Removing dash from module name (#1119) * add require_variance keyword to precomputedfeatures call * add s3 download if not present to test helper * add missing s3 import * Update test_helper.py * pull item from ceiling Need to check with martin if this will break anything * update coggan benchmark to work w/ new pandas/python * add identifier function to pre computed features * forgot to add string line * update rajalingham2018 deprecated test * update w/ master (#1194) * add r101_eBarlow_lmda_01_1 to models (#1037) Co-authored-by: AutoJenkins * update hmax requirements.txt (#1053) * update hmax requirements.txt to add torchvision (#1054) * fix mobilenet `pooler` layer prefix (#1055) * add r34_eMMCR_Mom_lmda_02_1 to models (#1061) Co-authored-by: AutoJenkins * add r101_eBarlow_lmda_02_1_copy to models (#1059) Co-authored-by: AutoJenkins * add r34_eMMCR_Mom_lmda_01_1 to models (#1060) Co-authored-by: AutoJenkins * add r34_eMMCR_Mom_Vanilla_1 to models (#1063) Co-authored-by: AutoJenkins * ignore time dimension in Bracci2019 (#1064) * Update behavior.py (#742) Co-authored-by: Sam Winebrake <85908068+samwinebrake@users.noreply.github.com> Co-authored-by: deirdre-k <95875723+deirdre-k@users.noreply.github.com> * add eBarlow_lmda_02_1_full to models (#1067) Co-authored-by: AutoJenkins * oddoneout: build full stimulus id index for non-numeric ids (#1068) * Add mobilevit_small - take 2 (#1051) * Add mobilevit_small - take 2 * Update brainscore_vision/models/mobilevit_small/model.py Co-authored-by: Martin Schrimpf --------- Co-authored-by: Martin Schrimpf Co-authored-by: Michael Ferguson * add yudixie_resnet18_240719_1 to models (#1070) Co-authored-by: AutoJenkins * add yudixie_resnet18_240719_0 to models (#1069) Co-authored-by: AutoJenkins * add yudixie_resnet18_240719_2 to models (#1071) Co-authored-by: AutoJenkins * add yudixie_resnet18_240719_10 to models (#1079) Co-authored-by: AutoJenkins * Add cv_18_dagger_408_pretrained (#1104) * Add model files * Adjust timm version --------- Co-authored-by: Ethan Pellegrini * add eBarlow_lmda_02_200_full to models (#1121) Co-authored-by: AutoJenkins * remove old tutorials in favor of new link (#1170) --------- Co-authored-by: Katherine Fairchild Co-authored-by: AutoJenkins Co-authored-by: Michael Ferguson Co-authored-by: Martin Schrimpf Co-authored-by: Linus Sommer <95619282+linus-md@users.noreply.github.com> Co-authored-by: deirdre-k <95875723+deirdre-k@users.noreply.github.com> Co-authored-by: Kartik Pradeepan Co-authored-by: pellegreene <36171165+pellegreene@users.noreply.github.com> Co-authored-by: Ethan Pellegrini * environment lock for 2.1.0 * Rename environment.yml to environment_lock.yml * add test dependencies and move certain dependencies to pip * remove branch from core dependency * new env lock with all dependencies of env (including test) * add informative comment to env lock * Update .travis.yml * point to 3.11 branches * update pointers to 3.11 branches * add back check for forks in travis * remove f string from full_name variable * remove if/else that enabled tf * remove folder_name for models that were moved to correct directory * version less specific (2.1.0 to 2.1) * remove protobuf * remove python specification in setup.pys * Update mismatched s3 folder name * Update environment_lock.yml w/ main branches * update branch pointers to main --------- Co-authored-by: Katherine Fairchild Co-authored-by: AutoJenkins Co-authored-by: Michael Ferguson Co-authored-by: Martin Schrimpf Co-authored-by: Linus Sommer <95619282+linus-md@users.noreply.github.com> Co-authored-by: deirdre-k <95875723+deirdre-k@users.noreply.github.com> Co-authored-by: Kartik Pradeepan Co-authored-by: Deirdre Kelliher Co-authored-by: pellegreene <36171165+pellegreene@users.noreply.github.com> Co-authored-by: Ethan Pellegrini --- .github/workflows/score_new_plugins.yml | 8 +- .readthedocs.yml | 2 +- .travis.yml | 13 +- README.md | 2 +- .../benchmark_helpers/__init__.py | 8 + .../benchmark_helpers/test_helper.py | 3 + .../coggan2024_behavior/benchmark.py | 12 +- .../domain_transfer_analysis.py | 7 +- brainscore_vision/benchmarks/kar2019/test.py | 2 +- .../benchmarks/rajalingham2018/test.py | 47 +---- .../benchmarks/rajalingham2020/test.py | 2 +- .../benchmarks/sanghavi2020/test.py | 2 +- brainscore_vision/data/geirhos2021/test.py | 4 +- brainscore_vision/data/scialom2024/test.py | 2 +- .../metrics/internal_consistency/test.py | 2 +- brainscore_vision/metrics/ost/metric.py | 2 +- .../model_helpers/activations/__init__.py | 2 - .../model_helpers/activations/core.py | 13 +- .../model_helpers/activations/keras.py | 92 --------- .../activations/temporal/inputs/base.py | 1 - .../model_helpers/activations/tensorflow.py | 71 ------- .../models/bp_resnet50_julios/setup.py | 1 - .../models/cornet_s_ynshah/setup.py | 1 - .../models/dbp_resnet50_julios/setup.py | 1 - .../models/eBarlow_Vanilla/setup.py | 1 - .../models/eBarlow_Vanilla_1/setup.py | 1 - .../models/eBarlow_Vanilla_2/setup.py | 1 - .../models/eBarlow_augself_linear_1/setup.py | 1 - .../models/eBarlow_augself_mlp_1/setup.py | 1 - .../models/eBarlow_lmda_0001_1/setup.py | 1 - .../models/eBarlow_lmda_001_1/setup.py | 1 - .../models/eBarlow_lmda_001_2/setup.py | 1 - .../models/eBarlow_lmda_001_3/setup.py | 1 - .../models/eBarlow_lmda_01/setup.py | 1 - .../models/eBarlow_lmda_01_1/setup.py | 1 - .../models/eBarlow_lmda_01_2/setup.py | 1 - .../models/eBarlow_lmda_02_1/setup.py | 1 - .../models/eBarlow_lmda_03_1/setup.py | 1 - .../models/eBarlow_lmda_04_1/setup.py | 1 - .../models/eBarlow_lmda_05_1/setup.py | 1 - .../models/eMMCR_Mom_Vanilla_1/setup.py | 1 - .../models/eMMCR_Mom_Vanilla_2/setup.py | 1 - .../models/eMMCR_Mom_lmda_0001_1/setup.py | 1 - .../models/eMMCR_Mom_lmda_001_1/setup.py | 1 - .../models/eMMCR_Mom_lmda_01_1/setup.py | 1 - .../models/eMMCR_Mom_lmda_01_2/setup.py | 1 - .../models/eMMCR_Mom_lmda_02_1/setup.py | 1 - .../models/eMMCR_Mom_lmda_03_1/setup.py | 1 - .../models/eMMCR_Mom_lmda_04_1/setup.py | 1 - .../models/eMMCR_Mom_lmda_05_1/setup.py | 1 - .../models/eMMCR_Vanilla/setup.py | 1 - .../models/eMMCR_VanillaV2/setup.py | 1 - .../models/eMMCR_Vanilla_1/setup.py | 1 - .../models/eMMCR_Vanilla_2/setup.py | 1 - .../models/eMMCR_lmda_01/setup.py | 1 - .../models/eMMCR_lmda_01V2/setup.py | 1 - .../models/eMMCR_lmda_01_1/setup.py | 1 - .../models/eMMCR_lmda_01_2/setup.py | 1 - .../models/eMMCR_lmda_01_3/setup.py | 1 - .../models/eSimCLR_Vanilla_1/setup.py | 1 - .../models/eSimCLR_Vanilla_2/setup.py | 1 - .../models/eSimCLR_lmda_0001_1/setup.py | 1 - .../models/eSimCLR_lmda_001_1/setup.py | 1 - .../models/eSimCLR_lmda_01_1/setup.py | 1 - .../models/eSimCLR_lmda_01_2/setup.py | 1 - .../models/eSimCLR_lmda_02_1/setup.py | 1 - .../models/eSimCLR_lmda_02_1_1/setup.py | 1 - .../models/eSimCLR_lmda_03_1/setup.py | 1 - .../models/eSimCLR_lmda_04_1/setup.py | 1 - .../models/eSimCLR_lmda_04_1_1/setup.py | 1 - .../models/eSimCLR_lmda_05_1/setup.py | 1 - .../model.py | 2 +- brainscore_vision/models/r50_tvpt/setup.py | 1 - .../models/resnet50_eMMCR_Vanilla/setup.py | 1 - .../models/resnet50_eMMCR_VanillaV2/setup.py | 1 - .../models/resnet50_eMMCR_eqp10_lm1/setup.py | 1 - .../models/resnet50_julios/setup.py | 1 - .../__init__.py | 0 .../model.py | 6 +- .../requirements.txt | 0 .../test.py | 0 .../models/temporal_model_GDT/model.py | 2 +- .../models/temporal_model_VideoMAEv2/model.py | 2 +- .../models/temporal_model_openstl/__init__.py | 1 - .../models/temporal_model_openstl/model.py | 19 +- .../models/temporal_model_openstl/test.py | 3 +- .../__init__.py | 0 .../model.py | 0 .../setup.py | 1 - .../test.py | 0 .../setup.py | 1 - .../setup.py | 1 - environment_lock.yml | 182 ++++++++++++++++++ pyproject.toml | 17 +- .../activations/test___init__.py | 69 +------ 95 files changed, 255 insertions(+), 403 deletions(-) delete mode 100644 brainscore_vision/model_helpers/activations/keras.py delete mode 100644 brainscore_vision/model_helpers/activations/tensorflow.py rename brainscore_vision/models/{temporal_model_AVID-CMA => temporal_model_AVID_CMA}/__init__.py (100%) rename brainscore_vision/models/{temporal_model_AVID-CMA => temporal_model_AVID_CMA}/model.py (94%) rename brainscore_vision/models/{temporal_model_AVID-CMA => temporal_model_AVID_CMA}/requirements.txt (100%) rename brainscore_vision/models/{temporal_model_AVID-CMA => temporal_model_AVID_CMA}/test.py (100%) rename brainscore_vision/models/{tv_efficientnet-b1 => tv_efficientnet_b1}/__init__.py (100%) rename brainscore_vision/models/{tv_efficientnet-b1 => tv_efficientnet_b1}/model.py (100%) rename brainscore_vision/models/{tv_efficientnet-b1 => tv_efficientnet_b1}/setup.py (92%) rename brainscore_vision/models/{tv_efficientnet-b1 => tv_efficientnet_b1}/test.py (100%) create mode 100644 environment_lock.yml diff --git a/.github/workflows/score_new_plugins.yml b/.github/workflows/score_new_plugins.yml index 8e4c8aec9..1f4c6a176 100644 --- a/.github/workflows/score_new_plugins.yml +++ b/.github/workflows/score_new_plugins.yml @@ -32,10 +32,10 @@ jobs: with: fetch-depth: 0 - - name: Set up Python 3.7 + - name: Set up Python 3.11 uses: actions/setup-python@v4 with: - python-version: 3.7 + python-version: 3.11 - name: Configure AWS Credentials uses: aws-actions/configure-aws-credentials@v1 @@ -176,10 +176,10 @@ jobs: - name: Check out repository code uses: actions/checkout@v4 - - name: Set up Python 3.7 + - name: Set up Python 3.11 uses: actions/setup-python@v4 with: - python-version: 3.7 + python-version: 3.11 - name: Build project run: | diff --git a/.readthedocs.yml b/.readthedocs.yml index 229a16285..ecc53316a 100644 --- a/.readthedocs.yml +++ b/.readthedocs.yml @@ -3,7 +3,7 @@ version: 2 build: os: "ubuntu-20.04" tools: - python: "3.7" + python: "3.11" python: install: diff --git a/.travis.yml b/.travis.yml index 69e9e9b03..75196cb31 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,5 +1,6 @@ version: ~> 1.0 language: python +dist: jammy env: global: - PYTEST_SETTINGS="not requires_gpu and not memory_intense and not slow and not travis_slow" @@ -9,7 +10,7 @@ env: - WEB_SUBMISSION="False" before_install: - pip install --upgrade pip -- pip install setuptools==60.5.0 +- pip install setuptools - pip install pytest # download large files - pip install awscli @@ -31,18 +32,18 @@ import: jobs: include: - - name: 3.7 public - python: '3.7.13' - - name: 3.7 private + - name: 3.11 public + python: '3.11' + - name: 3.11 private if: fork = false - python: '3.7.13' + python: '3.11' env: - PRIVATE_ACCESS=1 - secure: f1rWEwrslh7qa2g/QlKs001sGC3uaOxZNQSfNOPj+TMCqEo2c6OzImC4hyz+WqCyc6N/lFT4yYo2RhvaqStHMRmu/+9aZmuH05Bb0KQpfzNFA+yGa/U5WR3/4u6KRvDAeNEi9drT2LuacTyGbldmQsquujK0jrPpFWpe7zUUKv0zb0lJf0zcjeSrZlDXLlgD6DCqow7OqHRvW04dPZVy1OArRwtPV6DJ6Rqo1MqFQGHJ806VPlXhSoydb7a58dhGajqPjomdmZjhd3wS6Lv6uetTE/VVb4EP4e7n0qfZIx/TpnWG0SR44pcP7OCNARWYANsAivzxnQ0shyXnIzOo8ZcPYiPpt/5D53i5idTBxXyuDaHGQvgwuY5XLZzznEedBgZa4OvjxAXlLEQjdVDfSsZeYaV9gyFkeTlLnK1zvWi0US38eF2Qtm3Sx3D/5TtBKK2n38tyK5gg/XvJNycaXvIl7iVcnI2ifpqD1mUWI6C9j9Tk19/XEpWkwaFi91+0LZF1GhjBu8o3G5Np4RIOKXi3TIHkpbMM5mf11T6Bm9LvEMq1h8bgRQigEbeJF8CbUOSVFv+AaXsggGjQhuwdyvy2JZo+tO1nfhi+kW3XrDGPsz1R7Wfqduyn7UUh5OiFymeZwKseYKnwU47KyCqDwrq5Mnx1MlSidnVmPriadR4= - secure: WE7FPwy07VzJTKAd2xwZdBhtmh8jk7ojwk4B2rIcBQu0vwUXc1MgO8tBLD7s08lBedBjqZiLZEW31uPMEyWNysouDt16a5gm2d149LR7flI3MOifBtxINfJuC3eOEG65bPgN/bYEsIpLKnu3469d5nxZkK7xsjbWTxHGoUpLvVPsmHY2ZM5/jftybs7fI0do4NMG2XffKfZbiFb447Ao3xeQeEfW6IkJllzgGnlG9FJATFidrbwDNdmzAnvPEnDoKAf7ZvhPV0x9yR5V6P4Ck5hxl8mlPdBa1cRMO8s/1ag1c7YJ3AF9ZlwcwqTiGsT8DHTVRxSz4nFHJTMlrm9j84u7WzLZJBhPgF0UeLN3AQgiAZ3c2TFDvjQWeHVuSPkV5GrKlfhSvR82s9yPEdHQxxwYymBbAr6rJR4NtXTyZX0vg8NRKHssZKLSafs/D/pt9xXspqu8HAHc+mS0lCips79XptSr5BEsioil3D2io3tbzrGugpTeJ7oEA787vKn2Cm4XmhyQ0UBhvwsPZ351l27wZYuNV07o9Ik83hN/w4o2v899QQ/zbX42Iy8ZUCWOPX7MV7+TA7SMxru3qx7HL5hDM8kTetxbLB6Ckr+JOdX8L2Fb5L3TVDpsvfv0ebXgwaQR/ez8/7bcXmBqcERApHDz73HaMXUap+iDR4FLdXE= - AWS_DEFAULT_REGION=us-east-1 - stage: "Automerge check" - python: '3.7.13' + python: '3.11' install: - pip install --no-cache-dir torch torchvision --default-timeout=1000 --retries=5 - pip install --no-cache-dir -e ".[test]" diff --git a/README.md b/README.md index eae4d140f..e0552605e 100644 --- a/README.md +++ b/README.md @@ -22,7 +22,7 @@ To contribute, please [send in a pull request](https://github.com/brain-score/vi ## Local installation -You will need Python = 3.7 and pip >= 18.1. +You will need Python = 3.11 and pip >= 18.1. `pip install git+https://github.com/brain-score/vision` diff --git a/brainscore_vision/benchmark_helpers/__init__.py b/brainscore_vision/benchmark_helpers/__init__.py index eb36e50ca..7eb506115 100644 --- a/brainscore_vision/benchmark_helpers/__init__.py +++ b/brainscore_vision/benchmark_helpers/__init__.py @@ -1,6 +1,7 @@ from typing import Union import numpy as np +import hashlib from brainio.assemblies import NeuroidAssembly, DataAssembly from brainscore_core import Score @@ -18,6 +19,13 @@ def __init__(self, features: Union[DataAssembly, dict], visual_degrees): self.features = features self._visual_degrees = visual_degrees + @property + def identifier(self) -> str: + # serialize the features to a string and create hash + features_data = str(self.features) + features_hash = hashlib.md5(features_data.encode('utf-8')).hexdigest() + return f"precomputed-{features_hash}" + def visual_degrees(self) -> int: return self._visual_degrees diff --git a/brainscore_vision/benchmark_helpers/test_helper.py b/brainscore_vision/benchmark_helpers/test_helper.py index 6e3ad4a03..57d6461f6 100644 --- a/brainscore_vision/benchmark_helpers/test_helper.py +++ b/brainscore_vision/benchmark_helpers/test_helper.py @@ -7,6 +7,7 @@ from brainio.assemblies import NeuroidAssembly, PropertyAssembly from brainscore_vision import load_benchmark from brainscore_vision.model_interface import BrainModel +from brainscore_vision.data_helpers import s3 from . import PrecomputedFeatures @@ -68,6 +69,8 @@ def run_test_properties(self, benchmark: str, files: dict, expected: float): for current_stimulus in stimulus_identifiers: stimulus_set = load_stimulus_set(current_stimulus) path = Path(__file__).parent / files[current_stimulus] + s3.download_file_if_not_exists(local_path=path, + bucket='brainscore-unittests', remote_filepath=f'tests/test_benchmarks/{files[current_stimulus]}') features = PropertyAssembly.from_files(path, stimulus_set_identifier=stimulus_set.identifier, stimulus_set=stimulus_set) diff --git a/brainscore_vision/benchmarks/coggan2024_behavior/benchmark.py b/brainscore_vision/benchmarks/coggan2024_behavior/benchmark.py index 14db4121f..1a2fbbfae 100644 --- a/brainscore_vision/benchmarks/coggan2024_behavior/benchmark.py +++ b/brainscore_vision/benchmarks/coggan2024_behavior/benchmark.py @@ -75,9 +75,12 @@ def __call__(self, candidate: BrainModel) -> Score: data.model_prediction == data.object_class, dtype=int) # get correlation between model and human performance across conditions - performance = (data[data.visibility < 1] + performance = ( + data[data.visibility < 1] .groupby(['subject', 'occluder_type', 'occluder_color']) - .mean(['human_accuracy', 'model_accuracy'])).reset_index() + .mean(numeric_only=True) + .reset_index() + ) scores = performance.groupby('subject').apply( lambda df: np.corrcoef(df.human_accuracy, df.model_accuracy)[0, 1]) score = Score(np.mean(scores)) @@ -100,8 +103,9 @@ def get_noise_ceiling(performance: pd.DataFrame) -> Score: nc = [] for subject in performance.subject.unique(): performance_ind = performance[performance.subject == subject] - performance_grp = (performance[performance.subject != subject] - .groupby(['occluder_type', 'occluder_color']).mean()) + performance_grp = performance[performance.subject != subject] + numeric_cols = performance_grp.select_dtypes(include=np.number).columns + performance_grp = performance_grp.groupby(['occluder_type', 'occluder_color'])[numeric_cols].mean() merged_df = performance_ind.merge( performance_grp, on=['occluder_type', 'occluder_color']) nc.append(np.corrcoef(merged_df.human_accuracy_x, merged_df.human_accuracy_y)[0, 1]) diff --git a/brainscore_vision/benchmarks/igustibagus2024/domain_transfer_analysis.py b/brainscore_vision/benchmarks/igustibagus2024/domain_transfer_analysis.py index 9a8c07713..da3d662f2 100644 --- a/brainscore_vision/benchmarks/igustibagus2024/domain_transfer_analysis.py +++ b/brainscore_vision/benchmarks/igustibagus2024/domain_transfer_analysis.py @@ -5,6 +5,8 @@ import pandas as pd from sklearn.linear_model import RidgeClassifierCV from sklearn.model_selection import train_test_split +from sklearn.pipeline import Pipeline +from sklearn.preprocessing import StandardScaler from tqdm import tqdm # import brain-score specific libraries @@ -89,7 +91,10 @@ def __call__(self, candidate: BrainModel) -> Score: def OOD_AnalysisBenchmark(): return _OOD_AnalysisBenchmark( - classifier=RidgeClassifierCV(alphas=[0.0001, 0.001, 0.01, 0.1, 1, 10], fit_intercept=True, normalize=True) + classifier=Pipeline([ + ('scaler', StandardScaler()), + ('classifier', RidgeClassifierCV(alphas=[0.0001, 0.001, 0.01, 0.1, 1, 10], fit_intercept=True)) + ]) ) diff --git a/brainscore_vision/benchmarks/kar2019/test.py b/brainscore_vision/benchmarks/kar2019/test.py index b0fece327..34c15b9a9 100644 --- a/brainscore_vision/benchmarks/kar2019/test.py +++ b/brainscore_vision/benchmarks/kar2019/test.py @@ -24,7 +24,7 @@ def test_Kar2019ost_cornet_s(): filename = 'cornet_s-kar2019.nc' filepath = Path(__file__).parent / filename s3.download_file_if_not_exists(local_path=filepath, - bucket='brainio-brainscore', remote_filepath=f'tests/test_benchmarks/{filename}') + bucket='brainscore-unittests', remote_filepath=f'tests/test_benchmarks/{filename}') precomputed_features = NeuroidAssembly.from_files( filepath, stimulus_set_identifier=benchmark._assembly.stimulus_set.identifier, diff --git a/brainscore_vision/benchmarks/rajalingham2018/test.py b/brainscore_vision/benchmarks/rajalingham2018/test.py index 7a7e96388..2ff9d38a0 100644 --- a/brainscore_vision/benchmarks/rajalingham2018/test.py +++ b/brainscore_vision/benchmarks/rajalingham2018/test.py @@ -7,7 +7,7 @@ from pytest import approx from brainio.assemblies import BehavioralAssembly -from brainscore_vision import benchmark_registry, load_benchmark, load_metric +from brainscore_vision import benchmark_registry, load_benchmark, load_metric, load_model from brainscore_vision.benchmark_helpers import PrecomputedFeatures from brainscore_vision.benchmark_helpers.test_helper import VisualDegreesTests, NumberOfTrialsTests from brainscore_vision.benchmarks.rajalingham2018 import DicarloRajalingham2018I2n @@ -115,44 +115,11 @@ class TestMetricScore: @pytest.mark.parametrize(['model', 'expected_score'], [ ('alexnet', .253), - ('resnet34', .37787), - ('resnet18', .3638), + ('resnet50_tutorial', 0.348), + ('pixels', 0.0139) ]) def test_model(self, model, expected_score): - class UnceiledBenchmark(_DicarloRajalingham2018): - def __init__(self): - metric = load_metric('i2n') - super(UnceiledBenchmark, self).__init__(metric=metric, metric_identifier='i2n') - - def __call__(self, candidate: BrainModel): - candidate.start_task(BrainModel.Task.probabilities, self._fitting_stimuli) - probabilities = candidate.look_at(self._assembly.stimulus_set) - score = self._metric(probabilities, self._assembly) - return score - - benchmark = UnceiledBenchmark() - # features - feature_responses = xr.load_dataarray(Path(__file__).parent / 'test_resources' / - f'identifier={model},stimuli_identifier=objectome-240.nc') - feature_responses['stimulus_id'] = 'stimulus_path', [os.path.splitext(os.path.basename(path))[0] - for path in feature_responses['stimulus_path'].values] - feature_responses = feature_responses.stack(presentation=['stimulus_path']) - assert len(np.unique(feature_responses['layer'])) == 1 # only penultimate layer - - class PrecomputedFeatures: - def __init__(self, precomputed_features): - self.features = precomputed_features - - def __call__(self, stimuli, layers): - np.testing.assert_array_equal(layers, ['behavioral-layer']) - self_stimulus_ids = self.features['stimulus_id'].values.tolist() - indices = [self_stimulus_ids.index(stimulus_id) for stimulus_id in stimuli['stimulus_id'].values] - features = self.features[{'presentation': indices}] - return features - - # evaluate candidate - transformation = ProbabilitiesMapping(identifier=f'TestI2N.{model}', - activations_model=PrecomputedFeatures(feature_responses), - layer='behavioral-layer') - score = benchmark(transformation) - assert score == approx(expected_score, abs=0.005), f"expected {expected_score}, but got {score}" + benchmark = load_benchmark('Rajalingham2018-i2n') + model = load_model(model) + score = benchmark(model) + assert score.raw == approx(expected_score, abs=0.005), f"expected {expected_score}, but got {score.raw}" diff --git a/brainscore_vision/benchmarks/rajalingham2020/test.py b/brainscore_vision/benchmarks/rajalingham2020/test.py index 6af813946..40b6226d5 100644 --- a/brainscore_vision/benchmarks/rajalingham2020/test.py +++ b/brainscore_vision/benchmarks/rajalingham2020/test.py @@ -35,5 +35,5 @@ def test_Rajalingham2020(benchmark, expected): filename = 'alexnet-rajalingham2020-features.12.nc' filepath = Path(__file__).parent / filename s3.download_file_if_not_exists(local_path=filepath, - bucket='brainio-brainscore', remote_filepath=f'tests/test_benchmarks/{filename}') + bucket='brainscore-unittests', remote_filepath=f'tests/test_benchmarks/{filename}') precomputed_test.run_test(benchmark=benchmark, precomputed_features_filepath=filepath, expected=expected) diff --git a/brainscore_vision/benchmarks/sanghavi2020/test.py b/brainscore_vision/benchmarks/sanghavi2020/test.py index b65f08f63..ac6fe79b3 100644 --- a/brainscore_vision/benchmarks/sanghavi2020/test.py +++ b/brainscore_vision/benchmarks/sanghavi2020/test.py @@ -66,7 +66,7 @@ def test_self_regression(benchmark, visual_degrees, expected): def test_model_features(benchmark, filename, expected): filepath = Path(__file__).parent / filename s3.download_file_if_not_exists(local_path=filepath, - bucket='brainio-brainscore', remote_filepath=f'tests/test_benchmarks/{filename}') + bucket='brainscore-unittests', remote_filepath=f'tests/test_benchmarks/{filename}') precomputed_test.run_test(benchmark=benchmark, precomputed_features_filepath=filepath, expected=expected) diff --git a/brainscore_vision/data/geirhos2021/test.py b/brainscore_vision/data/geirhos2021/test.py index bdc2052af..41762008d 100644 --- a/brainscore_vision/data/geirhos2021/test.py +++ b/brainscore_vision/data/geirhos2021/test.py @@ -62,7 +62,7 @@ def test_stimulus_set_assembly_alignment(self, identifier, field): full_name = f"Geirhos2021_{identifier}" assembly = load_dataset(full_name) assert assembly.stimulus_set is not None - assert assembly.stimulus_set.identifier == f"{full_name}" + assert assembly.stimulus_set.identifier == full_name assert set(assembly.stimulus_set[field]) == set(assembly[field].values) # test the number of subjects: @@ -236,7 +236,7 @@ def test_stimulus_set_exist(self, identifier): full_name = f"Geirhos2021_{identifier}" stimulus_set = load_stimulus_set(full_name) assert stimulus_set is not None - assert stimulus_set.identifier == full_name + assert stimulus_set.identifier == f"{full_name}" # test the number of images @pytest.mark.parametrize('identifier, num_images', [ diff --git a/brainscore_vision/data/scialom2024/test.py b/brainscore_vision/data/scialom2024/test.py index dbc38b3b3..657376d1a 100644 --- a/brainscore_vision/data/scialom2024/test.py +++ b/brainscore_vision/data/scialom2024/test.py @@ -258,7 +258,7 @@ def test_stimulus_set_exists(self, identifier): ]) def test_number_of_images(self, identifier, num_images): stimulus_set = load_stimulus_set(identifier) - assert len(np.unique(stimulus_set['image_id'].values)) == num_images + assert len(np.unique(stimulus_set['stimulus_id'].values)) == num_images # test assembly coords present in ALL 17 sets: @pytest.mark.parametrize('identifier', [ diff --git a/brainscore_vision/metrics/internal_consistency/test.py b/brainscore_vision/metrics/internal_consistency/test.py index 6ccd597c3..3c00657fb 100644 --- a/brainscore_vision/metrics/internal_consistency/test.py +++ b/brainscore_vision/metrics/internal_consistency/test.py @@ -19,7 +19,7 @@ def test_dummy_data(self): dims=['presentation', 'neuroid']) ceiler = load_ceiling('internal_consistency') ceiling = ceiler(data) - assert ceiling == 1 + assert ceiling.item() == approx(1, abs=1e-8) class TestSplitHalfConsistency: diff --git a/brainscore_vision/metrics/ost/metric.py b/brainscore_vision/metrics/ost/metric.py index 7093781e7..92f7eb9ed 100644 --- a/brainscore_vision/metrics/ost/metric.py +++ b/brainscore_vision/metrics/ost/metric.py @@ -63,7 +63,7 @@ def compute_osts(self, train_source, test_source, test_osts): break # stop early if threshold is already hit for every image # interpolate - predicted_osts = np.empty(len(test_osts), dtype=np.float) + predicted_osts = np.empty(len(test_osts), dtype=np.float64) predicted_osts[:] = np.nan for i, (last_ost, hit_ost) in enumerate(zip(last_osts, hit_osts)): if hit_ost is None: diff --git a/brainscore_vision/model_helpers/activations/__init__.py b/brainscore_vision/model_helpers/activations/__init__.py index 10f514697..40a84e464 100644 --- a/brainscore_vision/model_helpers/activations/__init__.py +++ b/brainscore_vision/model_helpers/activations/__init__.py @@ -1,3 +1 @@ -from brainscore_vision.model_helpers.activations.keras import KerasWrapper, preprocess as preprocess_keras from brainscore_vision.model_helpers.activations.pytorch import PytorchWrapper, preprocess_images as preprocess_pytorch -from brainscore_vision.model_helpers.activations.tensorflow import TensorflowWrapper, TensorflowSlimWrapper diff --git a/brainscore_vision/model_helpers/activations/core.py b/brainscore_vision/model_helpers/activations/core.py index a9f537250..58f8baefc 100644 --- a/brainscore_vision/model_helpers/activations/core.py +++ b/brainscore_vision/model_helpers/activations/core.py @@ -348,7 +348,7 @@ def translate_images(self, images: List[Union[str, np.ndarray]], image_paths: Li """ Translate images according to selected microsaccades, if microsaccades are required. - :param images: A list of images (in the case of tensorflow models), or a list of arrays (non-tf models). + :param images: A list of arrays. :param image_paths: A list of image paths. Both `image_paths` and `images` are needed since while both tf and non-tf models preprocess images before this point, non-tf models' preprocessed images are fixed as arrays when fed into here. As such, simply returning `image_paths` for @@ -519,14 +519,9 @@ def translate(image: np.array, shift: Tuple[float, float], image_shape: Tuple[in return translated_image @staticmethod - def get_image_with_shape(image: Union[str, np.ndarray]) -> Tuple[np.array, Tuple[int, int], bool]: - if isinstance(image, str): # tf models return strings after preprocessing - image = cv2.imread(image) - rows, cols, _ = image.shape # cv2 uses height, width, channels - image_is_channels_first = False - else: - _, rows, cols, = image.shape # pytorch and keras use channels, height, width - image_is_channels_first = True + def get_image_with_shape(image: np.ndarray) -> Tuple[np.array, Tuple[int, int], bool]: + _, rows, cols, = image.shape # pytorch uses channels, height, width + image_is_channels_first = True return image, (rows, cols), image_is_channels_first @staticmethod diff --git a/brainscore_vision/model_helpers/activations/keras.py b/brainscore_vision/model_helpers/activations/keras.py deleted file mode 100644 index 8d1acf4d7..000000000 --- a/brainscore_vision/model_helpers/activations/keras.py +++ /dev/null @@ -1,92 +0,0 @@ -from collections import OrderedDict - -import numpy as np - -from brainscore_vision.model_helpers.activations.core import ActivationsExtractorHelper - - -class KerasWrapper: - def __init__(self, model, preprocessing, identifier=None, *args, **kwargs): - """ - :param model: a keras model with a function `preprocess_input` - that will later be called on the loaded numpy image - """ - self._model = model - identifier = identifier or model.name - self._extractor = ActivationsExtractorHelper( - identifier=identifier, get_activations=self.get_activations, preprocessing=preprocessing, - *args, **kwargs) - self._extractor.insert_attrs(self) - - @property - def identifier(self): - return self._extractor.identifier - - @identifier.setter - def identifier(self, value): - self._extractor.identifier = value - - def __call__(self, *args, **kwargs): # cannot assign __call__ as attribute due to Python convention - return self._extractor(*args, **kwargs) - - def get_activations(self, images, layer_names): - from keras import backend as K - input_tensor = self._model.input - layers = [layer for layer in self._model.layers if layer.name in layer_names] - layers = sorted(layers, key=lambda layer: layer_names.index(layer.name)) - if 'logits' in layer_names: - layers.insert(layer_names.index('logits'), self._model.layers[-1]) - assert len(layers) == len(layer_names) - layer_out_tensors = [layer.output for layer in layers] - functor = K.function([input_tensor] + [K.learning_phase()], layer_out_tensors) # evaluate all tensors at once - layer_outputs = functor([images, 0.]) # 0 to signal testing phase - return OrderedDict([(layer_name, layer_output) for layer_name, layer_output in zip(layer_names, layer_outputs)]) - - def __repr__(self): - return repr(self._model) - - def graph(self): - import networkx as nx - g = nx.DiGraph() - for layer in self._model.layers: - g.add_node(layer.name, object=layer, type=type(layer)) - for outbound_node in layer._outbound_nodes: - g.add_edge(layer.name, outbound_node.outbound_layer.name) - return g - - -def load_images(image_filepaths, image_size): - images = [load_image(image_filepath) for image_filepath in image_filepaths] - images = [scale_image(image, image_size) for image in images] - return np.array(images) - - -def load_image(image_filepath): - try: # keras API before tensorflow 2.9.1 - from keras.preprocessing.image import load_img - from keras.preprocessing.image import img_to_array - except ImportError: - from tensorflow.keras.utils import load_img - from tensorflow.keras.utils import img_to_array - img = load_img(image_filepath) - x = img_to_array(img) - return x - - -def scale_image(img, image_size): - from PIL import Image - try: # keras API before tensorflow 2.9.1 - from keras.preprocessing.image import img_to_array - except ImportError: - from tensorflow.keras.utils import img_to_array - img = Image.fromarray(img.astype(np.uint8)) - img = img.resize((image_size, image_size)) - img = img_to_array(img) - return img - - -def preprocess(image_filepaths, image_size, *args, **kwargs): - # only a wrapper to avoid top-level keras imports - from keras.applications.imagenet_utils import preprocess_input - images = load_images(image_filepaths, image_size=image_size) - return preprocess_input(images, *args, **kwargs) diff --git a/brainscore_vision/model_helpers/activations/temporal/inputs/base.py b/brainscore_vision/model_helpers/activations/temporal/inputs/base.py index d656a86b7..c94ccd3d7 100644 --- a/brainscore_vision/model_helpers/activations/temporal/inputs/base.py +++ b/brainscore_vision/model_helpers/activations/temporal/inputs/base.py @@ -15,4 +15,3 @@ def is_video_path(path: Union[str, Path]) -> bool: def is_image_path(path: Union[str, Path]) -> bool: extension = path.split('.')[-1].lower() return extension in ['jpg', 'jpeg', 'png', 'bmp', 'tiff'] - \ No newline at end of file diff --git a/brainscore_vision/model_helpers/activations/tensorflow.py b/brainscore_vision/model_helpers/activations/tensorflow.py deleted file mode 100644 index d5e4864d5..000000000 --- a/brainscore_vision/model_helpers/activations/tensorflow.py +++ /dev/null @@ -1,71 +0,0 @@ -from collections import OrderedDict - -from brainscore_vision.model_helpers.activations.core import ActivationsExtractorHelper - - -class TensorflowWrapper: - def __init__(self, identifier, inputs, endpoints: dict, session, *args, **kwargs): - import tensorflow as tf - self._inputs = inputs - self._endpoints = endpoints - self._session = session or tf.compat.v1.Session() - self._extractor = ActivationsExtractorHelper(identifier=identifier, get_activations=self.get_activations, - preprocessing=None, *args, **kwargs) - self._extractor.insert_attrs(self) - - @property - def identifier(self): - return self._extractor.identifier - - @identifier.setter - def identifier(self, value): - self._extractor.identifier = value - - def __call__(self, *args, **kwargs): # cannot assign __call__ as attribute due to Python convention - return self._extractor(*args, **kwargs) - - def get_activations(self, images, layer_names): - layer_tensors = OrderedDict((layer, self._endpoints[ - layer if (layer != 'logits' or layer in self._endpoints) else next(reversed(self._endpoints))]) - for layer in layer_names) - layer_outputs = self._session.run(layer_tensors, feed_dict={self._inputs: images}) - return layer_outputs - - def graph(self): - import networkx as nx - g = nx.DiGraph() - for name, layer in self._endpoints.items(): - g.add_node(name, object=layer, type=type(layer)) - g.add_node("logits", object=self.logits, type=type(self.logits)) - return g - - -class TensorflowSlimWrapper(TensorflowWrapper): - def __init__(self, *args, labels_offset=1, **kwargs): - super(TensorflowSlimWrapper, self).__init__(*args, **kwargs) - self._labels_offset = labels_offset - - def get_activations(self, images, layer_names): - layer_outputs = super(TensorflowSlimWrapper, self).get_activations(images, layer_names) - if 'logits' in layer_outputs: - layer_outputs['logits'] = layer_outputs['logits'][:, self._labels_offset:] - return layer_outputs - - -def load_image(image_filepath): - import tensorflow as tf - image = tf.io.read_file(image_filepath) - image = tf.image.decode_png(image, channels=3) - return image - - -def resize_image(image, image_size): - import tensorflow as tf - image = tf.image.resize(image, (image_size, image_size)) - return image - - -def load_resize_image(image_path, image_size): - image = load_image(image_path) - image = resize_image(image, image_size) - return image diff --git a/brainscore_vision/models/bp_resnet50_julios/setup.py b/brainscore_vision/models/bp_resnet50_julios/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/bp_resnet50_julios/setup.py +++ b/brainscore_vision/models/bp_resnet50_julios/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/cornet_s_ynshah/setup.py b/brainscore_vision/models/cornet_s_ynshah/setup.py index 68362b48b..aa18ce8a3 100644 --- a/brainscore_vision/models/cornet_s_ynshah/setup.py +++ b/brainscore_vision/models/cornet_s_ynshah/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/dbp_resnet50_julios/setup.py b/brainscore_vision/models/dbp_resnet50_julios/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/dbp_resnet50_julios/setup.py +++ b/brainscore_vision/models/dbp_resnet50_julios/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eBarlow_Vanilla/setup.py b/brainscore_vision/models/eBarlow_Vanilla/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eBarlow_Vanilla/setup.py +++ b/brainscore_vision/models/eBarlow_Vanilla/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eBarlow_Vanilla_1/setup.py b/brainscore_vision/models/eBarlow_Vanilla_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eBarlow_Vanilla_1/setup.py +++ b/brainscore_vision/models/eBarlow_Vanilla_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eBarlow_Vanilla_2/setup.py b/brainscore_vision/models/eBarlow_Vanilla_2/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eBarlow_Vanilla_2/setup.py +++ b/brainscore_vision/models/eBarlow_Vanilla_2/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eBarlow_augself_linear_1/setup.py b/brainscore_vision/models/eBarlow_augself_linear_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eBarlow_augself_linear_1/setup.py +++ b/brainscore_vision/models/eBarlow_augself_linear_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eBarlow_augself_mlp_1/setup.py b/brainscore_vision/models/eBarlow_augself_mlp_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eBarlow_augself_mlp_1/setup.py +++ b/brainscore_vision/models/eBarlow_augself_mlp_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eBarlow_lmda_0001_1/setup.py b/brainscore_vision/models/eBarlow_lmda_0001_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eBarlow_lmda_0001_1/setup.py +++ b/brainscore_vision/models/eBarlow_lmda_0001_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eBarlow_lmda_001_1/setup.py b/brainscore_vision/models/eBarlow_lmda_001_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eBarlow_lmda_001_1/setup.py +++ b/brainscore_vision/models/eBarlow_lmda_001_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eBarlow_lmda_001_2/setup.py b/brainscore_vision/models/eBarlow_lmda_001_2/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eBarlow_lmda_001_2/setup.py +++ b/brainscore_vision/models/eBarlow_lmda_001_2/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eBarlow_lmda_001_3/setup.py b/brainscore_vision/models/eBarlow_lmda_001_3/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eBarlow_lmda_001_3/setup.py +++ b/brainscore_vision/models/eBarlow_lmda_001_3/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eBarlow_lmda_01/setup.py b/brainscore_vision/models/eBarlow_lmda_01/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eBarlow_lmda_01/setup.py +++ b/brainscore_vision/models/eBarlow_lmda_01/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eBarlow_lmda_01_1/setup.py b/brainscore_vision/models/eBarlow_lmda_01_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eBarlow_lmda_01_1/setup.py +++ b/brainscore_vision/models/eBarlow_lmda_01_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eBarlow_lmda_01_2/setup.py b/brainscore_vision/models/eBarlow_lmda_01_2/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eBarlow_lmda_01_2/setup.py +++ b/brainscore_vision/models/eBarlow_lmda_01_2/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eBarlow_lmda_02_1/setup.py b/brainscore_vision/models/eBarlow_lmda_02_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eBarlow_lmda_02_1/setup.py +++ b/brainscore_vision/models/eBarlow_lmda_02_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eBarlow_lmda_03_1/setup.py b/brainscore_vision/models/eBarlow_lmda_03_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eBarlow_lmda_03_1/setup.py +++ b/brainscore_vision/models/eBarlow_lmda_03_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eBarlow_lmda_04_1/setup.py b/brainscore_vision/models/eBarlow_lmda_04_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eBarlow_lmda_04_1/setup.py +++ b/brainscore_vision/models/eBarlow_lmda_04_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eBarlow_lmda_05_1/setup.py b/brainscore_vision/models/eBarlow_lmda_05_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eBarlow_lmda_05_1/setup.py +++ b/brainscore_vision/models/eBarlow_lmda_05_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eMMCR_Mom_Vanilla_1/setup.py b/brainscore_vision/models/eMMCR_Mom_Vanilla_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eMMCR_Mom_Vanilla_1/setup.py +++ b/brainscore_vision/models/eMMCR_Mom_Vanilla_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eMMCR_Mom_Vanilla_2/setup.py b/brainscore_vision/models/eMMCR_Mom_Vanilla_2/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eMMCR_Mom_Vanilla_2/setup.py +++ b/brainscore_vision/models/eMMCR_Mom_Vanilla_2/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eMMCR_Mom_lmda_0001_1/setup.py b/brainscore_vision/models/eMMCR_Mom_lmda_0001_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eMMCR_Mom_lmda_0001_1/setup.py +++ b/brainscore_vision/models/eMMCR_Mom_lmda_0001_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eMMCR_Mom_lmda_001_1/setup.py b/brainscore_vision/models/eMMCR_Mom_lmda_001_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eMMCR_Mom_lmda_001_1/setup.py +++ b/brainscore_vision/models/eMMCR_Mom_lmda_001_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eMMCR_Mom_lmda_01_1/setup.py b/brainscore_vision/models/eMMCR_Mom_lmda_01_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eMMCR_Mom_lmda_01_1/setup.py +++ b/brainscore_vision/models/eMMCR_Mom_lmda_01_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eMMCR_Mom_lmda_01_2/setup.py b/brainscore_vision/models/eMMCR_Mom_lmda_01_2/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eMMCR_Mom_lmda_01_2/setup.py +++ b/brainscore_vision/models/eMMCR_Mom_lmda_01_2/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eMMCR_Mom_lmda_02_1/setup.py b/brainscore_vision/models/eMMCR_Mom_lmda_02_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eMMCR_Mom_lmda_02_1/setup.py +++ b/brainscore_vision/models/eMMCR_Mom_lmda_02_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eMMCR_Mom_lmda_03_1/setup.py b/brainscore_vision/models/eMMCR_Mom_lmda_03_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eMMCR_Mom_lmda_03_1/setup.py +++ b/brainscore_vision/models/eMMCR_Mom_lmda_03_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eMMCR_Mom_lmda_04_1/setup.py b/brainscore_vision/models/eMMCR_Mom_lmda_04_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eMMCR_Mom_lmda_04_1/setup.py +++ b/brainscore_vision/models/eMMCR_Mom_lmda_04_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eMMCR_Mom_lmda_05_1/setup.py b/brainscore_vision/models/eMMCR_Mom_lmda_05_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eMMCR_Mom_lmda_05_1/setup.py +++ b/brainscore_vision/models/eMMCR_Mom_lmda_05_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eMMCR_Vanilla/setup.py b/brainscore_vision/models/eMMCR_Vanilla/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eMMCR_Vanilla/setup.py +++ b/brainscore_vision/models/eMMCR_Vanilla/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eMMCR_VanillaV2/setup.py b/brainscore_vision/models/eMMCR_VanillaV2/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eMMCR_VanillaV2/setup.py +++ b/brainscore_vision/models/eMMCR_VanillaV2/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eMMCR_Vanilla_1/setup.py b/brainscore_vision/models/eMMCR_Vanilla_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eMMCR_Vanilla_1/setup.py +++ b/brainscore_vision/models/eMMCR_Vanilla_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eMMCR_Vanilla_2/setup.py b/brainscore_vision/models/eMMCR_Vanilla_2/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eMMCR_Vanilla_2/setup.py +++ b/brainscore_vision/models/eMMCR_Vanilla_2/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eMMCR_lmda_01/setup.py b/brainscore_vision/models/eMMCR_lmda_01/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eMMCR_lmda_01/setup.py +++ b/brainscore_vision/models/eMMCR_lmda_01/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eMMCR_lmda_01V2/setup.py b/brainscore_vision/models/eMMCR_lmda_01V2/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eMMCR_lmda_01V2/setup.py +++ b/brainscore_vision/models/eMMCR_lmda_01V2/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eMMCR_lmda_01_1/setup.py b/brainscore_vision/models/eMMCR_lmda_01_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eMMCR_lmda_01_1/setup.py +++ b/brainscore_vision/models/eMMCR_lmda_01_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eMMCR_lmda_01_2/setup.py b/brainscore_vision/models/eMMCR_lmda_01_2/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eMMCR_lmda_01_2/setup.py +++ b/brainscore_vision/models/eMMCR_lmda_01_2/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eMMCR_lmda_01_3/setup.py b/brainscore_vision/models/eMMCR_lmda_01_3/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eMMCR_lmda_01_3/setup.py +++ b/brainscore_vision/models/eMMCR_lmda_01_3/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eSimCLR_Vanilla_1/setup.py b/brainscore_vision/models/eSimCLR_Vanilla_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eSimCLR_Vanilla_1/setup.py +++ b/brainscore_vision/models/eSimCLR_Vanilla_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eSimCLR_Vanilla_2/setup.py b/brainscore_vision/models/eSimCLR_Vanilla_2/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eSimCLR_Vanilla_2/setup.py +++ b/brainscore_vision/models/eSimCLR_Vanilla_2/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eSimCLR_lmda_0001_1/setup.py b/brainscore_vision/models/eSimCLR_lmda_0001_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eSimCLR_lmda_0001_1/setup.py +++ b/brainscore_vision/models/eSimCLR_lmda_0001_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eSimCLR_lmda_001_1/setup.py b/brainscore_vision/models/eSimCLR_lmda_001_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eSimCLR_lmda_001_1/setup.py +++ b/brainscore_vision/models/eSimCLR_lmda_001_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eSimCLR_lmda_01_1/setup.py b/brainscore_vision/models/eSimCLR_lmda_01_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eSimCLR_lmda_01_1/setup.py +++ b/brainscore_vision/models/eSimCLR_lmda_01_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eSimCLR_lmda_01_2/setup.py b/brainscore_vision/models/eSimCLR_lmda_01_2/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eSimCLR_lmda_01_2/setup.py +++ b/brainscore_vision/models/eSimCLR_lmda_01_2/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eSimCLR_lmda_02_1/setup.py b/brainscore_vision/models/eSimCLR_lmda_02_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eSimCLR_lmda_02_1/setup.py +++ b/brainscore_vision/models/eSimCLR_lmda_02_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eSimCLR_lmda_02_1_1/setup.py b/brainscore_vision/models/eSimCLR_lmda_02_1_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eSimCLR_lmda_02_1_1/setup.py +++ b/brainscore_vision/models/eSimCLR_lmda_02_1_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eSimCLR_lmda_03_1/setup.py b/brainscore_vision/models/eSimCLR_lmda_03_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eSimCLR_lmda_03_1/setup.py +++ b/brainscore_vision/models/eSimCLR_lmda_03_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eSimCLR_lmda_04_1/setup.py b/brainscore_vision/models/eSimCLR_lmda_04_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eSimCLR_lmda_04_1/setup.py +++ b/brainscore_vision/models/eSimCLR_lmda_04_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eSimCLR_lmda_04_1_1/setup.py b/brainscore_vision/models/eSimCLR_lmda_04_1_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eSimCLR_lmda_04_1_1/setup.py +++ b/brainscore_vision/models/eSimCLR_lmda_04_1_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eSimCLR_lmda_05_1/setup.py b/brainscore_vision/models/eSimCLR_lmda_05_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eSimCLR_lmda_05_1/setup.py +++ b/brainscore_vision/models/eSimCLR_lmda_05_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/effnetb1_cutmixpatch_SAM_robust32_avge6e8e9e10_manylayers_324x288/model.py b/brainscore_vision/models/effnetb1_cutmixpatch_SAM_robust32_avge6e8e9e10_manylayers_324x288/model.py index 41c6ca79e..d3eaf9c94 100644 --- a/brainscore_vision/models/effnetb1_cutmixpatch_SAM_robust32_avge6e8e9e10_manylayers_324x288/model.py +++ b/brainscore_vision/models/effnetb1_cutmixpatch_SAM_robust32_avge6e8e9e10_manylayers_324x288/model.py @@ -1,7 +1,7 @@ import functools import torch -from brainscore_vision.model_helpers.activations import PytorchWrapper, KerasWrapper +from brainscore_vision.model_helpers.activations import PytorchWrapper from brainscore_vision.model_helpers.activations.pytorch import load_preprocess_images from brainscore_vision.model_helpers.s3 import load_weight_file from PIL import Image diff --git a/brainscore_vision/models/r50_tvpt/setup.py b/brainscore_vision/models/r50_tvpt/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/r50_tvpt/setup.py +++ b/brainscore_vision/models/r50_tvpt/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/resnet50_eMMCR_Vanilla/setup.py b/brainscore_vision/models/resnet50_eMMCR_Vanilla/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/resnet50_eMMCR_Vanilla/setup.py +++ b/brainscore_vision/models/resnet50_eMMCR_Vanilla/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/resnet50_eMMCR_VanillaV2/setup.py b/brainscore_vision/models/resnet50_eMMCR_VanillaV2/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/resnet50_eMMCR_VanillaV2/setup.py +++ b/brainscore_vision/models/resnet50_eMMCR_VanillaV2/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/resnet50_eMMCR_eqp10_lm1/setup.py b/brainscore_vision/models/resnet50_eMMCR_eqp10_lm1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/resnet50_eMMCR_eqp10_lm1/setup.py +++ b/brainscore_vision/models/resnet50_eMMCR_eqp10_lm1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/resnet50_julios/setup.py b/brainscore_vision/models/resnet50_julios/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/resnet50_julios/setup.py +++ b/brainscore_vision/models/resnet50_julios/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/temporal_model_AVID-CMA/__init__.py b/brainscore_vision/models/temporal_model_AVID_CMA/__init__.py similarity index 100% rename from brainscore_vision/models/temporal_model_AVID-CMA/__init__.py rename to brainscore_vision/models/temporal_model_AVID_CMA/__init__.py diff --git a/brainscore_vision/models/temporal_model_AVID-CMA/model.py b/brainscore_vision/models/temporal_model_AVID_CMA/model.py similarity index 94% rename from brainscore_vision/models/temporal_model_AVID-CMA/model.py rename to brainscore_vision/models/temporal_model_AVID_CMA/model.py index 60d91f690..a67eb3b43 100644 --- a/brainscore_vision/models/temporal_model_AVID-CMA/model.py +++ b/brainscore_vision/models/temporal_model_AVID_CMA/model.py @@ -29,7 +29,7 @@ def get_model(identifier): cfg_path = os.path.join(HOME, "configs/main/avid-cma/audioset/InstX-N1024-PosW-N64-Top32.yaml") weight_path = load_weight_file( bucket="brainscore-vision", - relative_path="temporal_model_AVID-CMA/AVID-CMA_Audioset_InstX-N1024-PosW-N64-Top32_checkpoint.pth.tar", + relative_path="temporal_model_AVID_CMA/AVID-CMA_Audioset_InstX-N1024-PosW-N64-Top32_checkpoint.pth.tar", version_id="jSaZgbUohM0ZeoEUUKZiLBo6iz_v8VvQ", sha1="9db5eba9aab6bdbb74025be57ab532df808fe3f6" ) @@ -38,7 +38,7 @@ def get_model(identifier): cfg_path = os.path.join(HOME, "configs/main/avid/kinetics/Cross-N1024.yaml") weight_path = load_weight_file( bucket="brainscore-vision", - relative_path="temporal_model_AVID-CMA/AVID_Kinetics_Cross-N1024_checkpoint.pth.tar", + relative_path="temporal_model_AVID_CMA/AVID_Kinetics_Cross-N1024_checkpoint.pth.tar", version_id="XyKt0UOUFsuuyrl6ZREivK8FadRPx34u", sha1="d3a04f856d29421ba8de37808593a3fad4d4794f" ) @@ -47,7 +47,7 @@ def get_model(identifier): cfg_path = os.path.join(HOME, "configs/main/avid/audioset/Cross-N1024.yaml") weight_path = load_weight_file( bucket="brainscore-vision", - relative_path="temporal_model_AVID-CMA/AVID_Audioset_Cross-N1024_checkpoint.pth.tar", + relative_path="temporal_model_AVID_CMA/AVID_Audioset_Cross-N1024_checkpoint.pth.tar", version_id="0Sxuhn8LsYXQC4FnPfJ7rw7uU6kDlKgc", sha1="b48d8428a1a2526ccca070f810333df18bfce5fd" ) diff --git a/brainscore_vision/models/temporal_model_AVID-CMA/requirements.txt b/brainscore_vision/models/temporal_model_AVID_CMA/requirements.txt similarity index 100% rename from brainscore_vision/models/temporal_model_AVID-CMA/requirements.txt rename to brainscore_vision/models/temporal_model_AVID_CMA/requirements.txt diff --git a/brainscore_vision/models/temporal_model_AVID-CMA/test.py b/brainscore_vision/models/temporal_model_AVID_CMA/test.py similarity index 100% rename from brainscore_vision/models/temporal_model_AVID-CMA/test.py rename to brainscore_vision/models/temporal_model_AVID_CMA/test.py diff --git a/brainscore_vision/models/temporal_model_GDT/model.py b/brainscore_vision/models/temporal_model_GDT/model.py index 624a5b29b..9a0c057c7 100644 --- a/brainscore_vision/models/temporal_model_GDT/model.py +++ b/brainscore_vision/models/temporal_model_GDT/model.py @@ -69,4 +69,4 @@ def get_model(identifier): # "base.fc": "C", # no fc } - return PytorchWrapper(identifier, model, transform_video, fps=30, layer_activation_format=layer_activation_format) \ No newline at end of file + return PytorchWrapper(identifier, model, transform_video, fps=30, layer_activation_format=layer_activation_format) diff --git a/brainscore_vision/models/temporal_model_VideoMAEv2/model.py b/brainscore_vision/models/temporal_model_VideoMAEv2/model.py index 7e785513e..355b8e8b2 100644 --- a/brainscore_vision/models/temporal_model_VideoMAEv2/model.py +++ b/brainscore_vision/models/temporal_model_VideoMAEv2/model.py @@ -54,7 +54,7 @@ def get_model(identifier): bucket="brainscore-vision", relative_path="temporal_model_VideoMAEv2/vit_g_hybrid_pt_1200e.pth", version_id="TxtkfbeMV105dzpzTwi0Kn5glnvQvIrq", - sha1="9048f2bc0b0c7ba4d0e5228f3a7c0bef4dbaca69", + sha1="9048f2bc0b0c7ba4d0e5228f3a7c0bef4dbaca69" ) num_blocks = 40 feature_map_size = 16 diff --git a/brainscore_vision/models/temporal_model_openstl/__init__.py b/brainscore_vision/models/temporal_model_openstl/__init__.py index 2b49cc845..9ea9b66b1 100644 --- a/brainscore_vision/models/temporal_model_openstl/__init__.py +++ b/brainscore_vision/models/temporal_model_openstl/__init__.py @@ -13,7 +13,6 @@ def commit_model(identifier): model_registry["ConvLSTM"] = lambda: commit_model("ConvLSTM") model_registry["PredRNN"] = lambda: commit_model("PredRNN") -# model_registry["PredNet"] = lambda: commit_model("PredNet") model_registry["SimVP"] = lambda: commit_model("SimVP") model_registry["TAU"] = lambda: commit_model("TAU") model_registry["MIM"] = lambda: commit_model("MIM") diff --git a/brainscore_vision/models/temporal_model_openstl/model.py b/brainscore_vision/models/temporal_model_openstl/model.py index aed3e0464..de5c93803 100644 --- a/brainscore_vision/models/temporal_model_openstl/model.py +++ b/brainscore_vision/models/temporal_model_openstl/model.py @@ -105,23 +105,6 @@ def process_output(layer, layer_name, inputs, output): kwargs = {} weight_name = "kitticaltech_predrnn_one_ep100.pth" - elif identifier == "PredNet": - layer_activation_format = { - **{f"layer{i}": "TCHW" for i in range(4)}, - "layer5": "TCHW" - } - - def process_output(layer, layer_name, inputs, output): - if layer_name.startswith("cell_list"): - h, c = output - return c - else: - return output - - wrapper_cls = LSTMWrapper - kwargs = {} - weight_name = "kitticaltech_prednet_one_ep100.pth" - elif identifier == "ConvLSTM": layer_activation_format = { **{f"cell_list.{i}": "TCHW" for i in range(4)}, @@ -220,4 +203,4 @@ def transform_video_simvp(video): return wrapper_cls(identifier, model, transform_video, fps=KITTI_FPS, layer_activation_format=layer_activation_format, - process_output=process_output, **kwargs) \ No newline at end of file + process_output=process_output, **kwargs) diff --git a/brainscore_vision/models/temporal_model_openstl/test.py b/brainscore_vision/models/temporal_model_openstl/test.py index 4d52b76ce..c4090a314 100644 --- a/brainscore_vision/models/temporal_model_openstl/test.py +++ b/brainscore_vision/models/temporal_model_openstl/test.py @@ -6,7 +6,6 @@ model_list = [ "ConvLSTM", "PredRNN", - "PredNet", "SimVP", "TAU", "MIM" @@ -17,4 +16,4 @@ @pytest.mark.parametrize("model_identifier", model_list) def test_load(model_identifier): model = load_model(model_identifier) - assert model is not None \ No newline at end of file + assert model is not None diff --git a/brainscore_vision/models/tv_efficientnet-b1/__init__.py b/brainscore_vision/models/tv_efficientnet_b1/__init__.py similarity index 100% rename from brainscore_vision/models/tv_efficientnet-b1/__init__.py rename to brainscore_vision/models/tv_efficientnet_b1/__init__.py diff --git a/brainscore_vision/models/tv_efficientnet-b1/model.py b/brainscore_vision/models/tv_efficientnet_b1/model.py similarity index 100% rename from brainscore_vision/models/tv_efficientnet-b1/model.py rename to brainscore_vision/models/tv_efficientnet_b1/model.py diff --git a/brainscore_vision/models/tv_efficientnet-b1/setup.py b/brainscore_vision/models/tv_efficientnet_b1/setup.py similarity index 92% rename from brainscore_vision/models/tv_efficientnet-b1/setup.py rename to brainscore_vision/models/tv_efficientnet_b1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/tv_efficientnet-b1/setup.py +++ b/brainscore_vision/models/tv_efficientnet_b1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/tv_efficientnet-b1/test.py b/brainscore_vision/models/tv_efficientnet_b1/test.py similarity index 100% rename from brainscore_vision/models/tv_efficientnet-b1/test.py rename to brainscore_vision/models/tv_efficientnet_b1/test.py diff --git a/brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240222/setup.py b/brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240222/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240222/setup.py +++ b/brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240222/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240312/setup.py b/brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240312/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240312/setup.py +++ b/brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240312/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/environment_lock.yml b/environment_lock.yml new file mode 100644 index 000000000..9847267d3 --- /dev/null +++ b/environment_lock.yml @@ -0,0 +1,182 @@ +# This environment_lock file is associated with the move to brainscore_vision 2.1.0. This lock includes all testing dependencies and dependencies +# from adjacent repositories. + +name: brainscore_env +channels: + - defaults + - conda-forge +dependencies: + - bzip2=1.0.8 + - ca-certificates=2024.7.2 + - libffi=3.4.4 + - ncurses=6.4 + - openssl=3.0.14 + - pip=24.2 + - python=3.11.9 + - readline=8.2 + - setuptools=72.1.0 + - sqlite=3.45.3 + - tk=8.6.14 + - wheel=0.43.0 + - xz=5.4.6 + - zlib=1.2.13 + - pip: + - anyio==4.4.0 + - appnope==0.1.4 + - argon2-cffi==23.1.0 + - argon2-cffi-bindings==21.2.0 + - arrow==1.3.0 + - asttokens==2.4.1 + - async-lru==2.0.4 + - attrs==24.2.0 + - babel==2.16.0 + - beautifulsoup4==4.12.3 + - bleach==6.1.0 + - boto3==1.35.3 + - botocore==1.35.3 + - brainio @ git+https://github.com/brain-score/brainio.git@main + - brainscore_core @ git+https://github.com/brain-score/core@main + - brainscore-vision @ git+https://github.com/brain-score/vision.git@main + - certifi==2024.7.4 + - cffi==1.17.0 + - cftime==1.6.4 + - charset-normalizer==3.3.2 + - click==8.1.7 + - cloudpickle==3.0.0 + - comm==0.2.2 + - contourpy==1.2.1 + - cycler==0.12.1 + - dask==2024.8.1 + - debugpy==1.8.5 + - decorator==5.1.1 + - defusedxml==0.7.1 + - entrypoints==0.4 + - eva-decord==0.6.1 + - executing==2.0.1 + - fastjsonschema==2.20.0 + - filelock==3.15.4 + - fire==0.6.0 + - fonttools==4.53.1 + - fqdn==1.5.1 + - fsspec==2024.6.1 + - gitdb==4.0.11 + - gitpython==3.1.43 + - h11==0.14.0 + - h5py==3.11.0 + - httpcore==1.0.5 + - httpx==0.27.0 + - idna==3.7 + - importlib-metadata==4.13.0 + - iniconfig==2.0.0 + - ipykernel==6.29.5 + - ipython==8.26.0 + - ipywidgets==8.1.5 + - isoduration==20.11.0 + - jedi==0.19.1 + - jinja2==3.1.4 + - jmespath==1.0.1 + - joblib==1.4.2 + - json5==0.9.25 + - jsonpointer==3.0.0 + - jsonschema==4.23.0 + - jsonschema-specifications==2023.12.1 + - jupyter==1.0.0 + - jupyter-client==8.6.2 + - jupyter-console==6.6.3 + - jupyter-core==5.7.2 + - jupyter-events==0.10.0 + - jupyter-lsp==2.2.5 + - jupyter-server==2.14.2 + - jupyter-server-terminals==0.5.3 + - jupyterlab==4.2.4 + - jupyterlab-pygments==0.3.0 + - jupyterlab-server==2.27.3 + - jupyterlab-widgets==3.0.13 + - kiwisolver==1.4.5 + - latexcodec==3.0.0 + - locket==1.0.0 + - markupsafe==2.1.5 + - matplotlib==3.9.2 + - matplotlib-inline==0.1.7 + - mistune==3.0.2 + - mpmath==1.3.0 + - nbclient==0.10.0 + - nbconvert==7.16.4 + - nbformat==5.10.4 + - nest-asyncio==1.6.0 + - netcdf4==1.7.1.post1 + - networkx==3.3 + - notebook==7.2.1 + - notebook-shim==0.2.4 + - numpy==1.26.4 + - opencv-python==4.10.0.84 + - overrides==7.7.0 + - packaging==24.1 + - pandas==2.2.2 + - pandocfilters==1.5.1 + - parso==0.8.4 + - partd==1.4.2 + - peewee==3.17.6 + - pexpect==4.9.0 + - pillow==10.4.0 + - platformdirs==4.2.2 + - pluggy==1.5.0 + - prometheus-client==0.20.0 + - prompt-toolkit==3.0.47 + - psutil==6.0.0 + - psycopg2-binary==2.9.9 + - ptyprocess==0.7.0 + - pure-eval==0.2.3 + - pybtex==0.24.0 + - pycparser==2.22 + - pygments==2.18.0 + - pyparsing==3.1.2 + - pytest==8.3.2 + - pytest-check==2.3.1 + - pytest-mock==3.14.0 + - pytest-timeout==2.3.1 + - python-dateutil==2.9.0.post0 + - python-json-logger==2.0.7 + - pytz==2024.1 + - pyyaml==6.0.2 + - pyzmq==26.2.0 + - qtconsole==5.5.2 + - qtpy==2.4.1 + - referencing==0.35.1 + - requests==2.32.3 + - result_caching @ git+https://github.com/brain-score/result_caching@master + - rfc3339-validator==0.1.4 + - rfc3986-validator==0.1.1 + - rpds-py==0.20.0 + - s3transfer==0.10.2 + - scikit-learn==1.5.1 + - scipy==1.14.1 + - send2trash==1.8.3 + - six==1.16.0 + - smmap==5.0.1 + - sniffio==1.3.1 + - soupsieve==2.6 + - stack-data==0.6.3 + - sympy==1.13.2 + - termcolor==2.4.0 + - terminado==0.18.1 + - threadpoolctl==3.5.0 + - tinycss2==1.3.0 + - toolz==0.12.1 + - torch==2.4.0 + - torchvision==0.19.0 + - tornado==6.4.1 + - tqdm==4.66.5 + - traitlets==5.14.3 + - types-python-dateutil==2.9.0.20240821 + - typing-extensions==4.12.2 + - tzdata==2024.1 + - uri-template==1.3.0 + - urllib3==2.2.2 + - wcwidth==0.2.13 + - webcolors==24.8.0 + - webencodings==0.5.1 + - websocket-client==1.8.0 + - widgetsnbextension==4.0.13 + - xarray==2022.3.0 + - zipp==3.20.0 diff --git a/pyproject.toml b/pyproject.toml index 3b28322e9..15b4de6d7 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,17 +4,17 @@ [project] name = "brainscore_vision" -version = "2.0" +version = "2.1" description = "The Brain-Score library enables model comparisons to behavioral and neural experiments" authors = [] license = { 'file' = 'LICENSE' } readme = "README.md" -requires-python = ">=3.7" +requires-python = ">=3.11" dependencies = [ - "numpy>=1.17", - "brainscore_core @ git+https://github.com/brain-score/core", - "result_caching @ git+https://github.com/brain-score/result_caching", + "numpy<2", + "brainscore_core @ git+https://github.com/brain-score/core@main", + "result_caching @ git+https://github.com/brain-score/result_caching@master", "importlib-metadata<5", # workaround to https://github.com/brain-score/brainio/issues/28 "scikit-learn", # for metric_helpers/transformations.py cross-validation "scipy", # for benchmark_helpers/properties_common.py @@ -28,8 +28,8 @@ dependencies = [ "peewee", "psycopg2-binary", "networkx", - "decord", - "psutil" + "eva-decord", + "psutil", ] [project.optional-dependencies] @@ -40,9 +40,6 @@ test = [ "pytest-timeout", "torch", "torchvision", - "tensorflow==1.15", - "keras==2.3.1", - "protobuf<=3.20", # https://protobuf.dev/news/2022-05-06/#python-updates "matplotlib", # for examples "pytest-mock", ] diff --git a/tests/test_model_helpers/activations/test___init__.py b/tests/test_model_helpers/activations/test___init__.py index 99b36cb98..9bd012348 100644 --- a/tests/test_model_helpers/activations/test___init__.py +++ b/tests/test_model_helpers/activations/test___init__.py @@ -6,7 +6,7 @@ from pathlib import Path from brainio.stimuli import StimulusSet -from brainscore_vision.model_helpers.activations import KerasWrapper, PytorchWrapper, TensorflowSlimWrapper +from brainscore_vision.model_helpers.activations import PytorchWrapper from brainscore_vision.model_helpers.activations.core import flatten from brainscore_vision.model_helpers.activations.pca import LayerPCA @@ -93,74 +93,10 @@ def forward(self, x): return PytorchWrapper(model=MyTransformer(), preprocessing=preprocessing) -def keras_vgg19(): - import keras - from keras.applications.vgg19 import VGG19, preprocess_input - from brainscore_vision.model_helpers.activations.keras import load_images - keras.backend.clear_session() - preprocessing = lambda image_filepaths: preprocess_input(load_images(image_filepaths, image_size=224)) - return KerasWrapper(model=VGG19(), preprocessing=preprocessing) - - -def tfslim_custom(): - from brainscore_vision.model_helpers.activations.tensorflow import load_resize_image - import tensorflow as tf - slim = tf.contrib.slim - tf.compat.v1.reset_default_graph() - - image_size = 224 - placeholder = tf.compat.v1.placeholder(dtype=tf.string, shape=[64]) - preprocess = lambda image_path: load_resize_image(image_path, image_size) - preprocess = tf.map_fn(preprocess, placeholder, dtype=tf.float32) - - with tf.compat.v1.variable_scope('my_model', values=[preprocess]) as sc: - end_points_collection = sc.original_name_scope + '_end_points' - # Collect outputs for conv2d, fully_connected and max_pool2d. - with slim.arg_scope([slim.conv2d, slim.fully_connected, slim.max_pool2d], - outputs_collections=[end_points_collection]): - net = slim.conv2d(preprocess, 64, [11, 11], 4, padding='VALID', scope='conv1') - net = slim.max_pool2d(net, [5, 5], 5, scope='pool1') - net = slim.max_pool2d(net, [3, 3], 2, scope='pool2') - net = slim.flatten(net, scope='flatten') - net = slim.fully_connected(net, 1000, scope='logits') - endpoints = slim.utils.convert_collection_to_dict(end_points_collection) - - session = tf.compat.v1.Session() - session.run(tf.compat.v1.initialize_all_variables()) - return TensorflowSlimWrapper(identifier='tf-custom', labels_offset=0, - endpoints=endpoints, inputs=placeholder, session=session) - - -def tfslim_vgg16(): - import tensorflow as tf - from nets import nets_factory - from preprocessing import vgg_preprocessing - from brainscore_vision.model_helpers.activations.tensorflow import load_resize_image - tf.compat.v1.reset_default_graph() - - image_size = 224 - placeholder = tf.compat.v1.placeholder(dtype=tf.string, shape=[64]) - preprocess_image = lambda image: vgg_preprocessing.preprocess_image( - image, image_size, image_size, resize_side_min=image_size) - preprocess = lambda image_path: preprocess_image(load_resize_image(image_path, image_size)) - preprocess = tf.map_fn(preprocess, placeholder, dtype=tf.float32) - - model_ctr = nets_factory.get_network_fn('vgg_16', num_classes=1001, is_training=False) - logits, endpoints = model_ctr(preprocess) - - session = tf.compat.v1.Session() - session.run(tf.compat.v1.initialize_all_variables()) - return TensorflowSlimWrapper(identifier='tf-vgg16', labels_offset=1, - endpoints=endpoints, inputs=placeholder, session=session) - - models_layers = [ pytest.param(pytorch_custom, ['linear', 'relu2']), pytest.param(pytorch_alexnet, ['features.12', 'classifier.5'], marks=pytest.mark.memory_intense), pytest.param(pytorch_transformer_substitute, ['relu1']), - pytest.param(keras_vgg19, ['block3_pool'], marks=pytest.mark.memory_intense), - pytest.param(tfslim_custom, ['my_model/pool2'], marks=pytest.mark.memory_intense), - pytest.param(tfslim_vgg16, ['vgg_16/pool5'], marks=pytest.mark.memory_intense), ] # exact microsaccades for pytorch_alexnet, grayscale.png, for 1 and 10 number_of_trials @@ -366,8 +302,6 @@ def test_exact_microsaccades(number_of_trials): @pytest.mark.memory_intense @pytest.mark.parametrize(["model_ctr", "internal_layers"], [ (pytorch_alexnet, ['features.12', 'classifier.5']), - (keras_vgg19, ['block3_pool']), - (tfslim_vgg16, ['vgg_16/pool5']), ]) def test_mixed_layer_logits(model_ctr, internal_layers): stimuli_paths = [os.path.join(os.path.dirname(__file__), 'rgb.jpg')] @@ -384,7 +318,6 @@ def test_mixed_layer_logits(model_ctr, internal_layers): @pytest.mark.parametrize(["model_ctr", "expected_identifier"], [ (pytorch_custom, 'MyModel'), (pytorch_alexnet, 'AlexNet'), - (keras_vgg19, 'vgg19'), ]) def test_infer_identifier(model_ctr, expected_identifier): model = model_ctr() From d8eddfc1062634616cdad4d2f1c69a43f5ab0cc4 Mon Sep 17 00:00:00 2001 From: Kartik Pradeepan Date: Tue, 10 Sep 2024 21:33:23 -0400 Subject: [PATCH 19/28] Sync master into develop (#1222) * Updated accuracy distance metric to accomodate condition-wise approaches (#1217) Co-authored-by: ddcoggan * Add temporal metrics; add temporal versions of MajajHong2015 (#1109) * feature: support temporal models for neural alignment by chaning TemporalIgnore to Temporal Aligned * add example temporal submission * complete new framework * new module: temporal model helpers * change the arch of temporal; add tutorials * improve: better naming * update: wrapper tutorial on brain model * add feature: inferencer identifier tracked by extractor for result caching * fix: video fps sampling; need more tests! * fix bugs: video sampling based on fps was wrong. * add mmaction2 models; add more features to the inferencers * PR: temporal model helpers * PR fix: not including gitmodules for now * Update brainscore_vision/model_helpers/brain_transformation/temporal.py Co-authored-by: Martin Schrimpf * Update brainscore_vision/model_helpers/brain_transformation/temporal.py Co-authored-by: Martin Schrimpf * Update brainscore_vision/model_helpers/brain_transformation/temporal.py Co-authored-by: Martin Schrimpf * Update brainscore_vision/models/temporal_models/test.py Co-authored-by: Martin Schrimpf * add mae_st; add ding2012 * try new arch * init ding2012 * add tests for temporal model helpers; add block inferencer * Delete tests/test_model_helpers/temporal/test___init__.py delete the old test * add benchmark ding2012 * add mutliple libs for temporal models * change executor output format; add more inference tests; init load_weight in s3 * add openstl * update backend for executor * feat:load_weight_file and corresponding test * change:resize strategy changed from bilinear to pooling * change:resize strategy changed from bilinear to pooling * fix mae_st submission * minor * fix:dtype in assembly time align * minor * update model submissions * fix dependency * refactor: simplify the inferencer methods * fix:block inferencer, neuroid coord while merging * fix:inferencer identifier * fix:weigh download * change tests to have max_workers=1 * revert screen.py * not submit region_layer_map * remove torch dependency * make fake modules in tests * add torch to requirements; avoid torch in tests * minor * minor * np.object changed to object * remove return in tests * fix insertion position bug * Apply suggestions from code review add: more type hints Co-authored-by: Martin Schrimpf * add: more type hints and comments * minor * pr:only commit temporal model helpers * pr: add one model for example * undo whole_brain in Brainodel.RecordingTarget * use logger and fix newlines * fix: video fps with copy was wrong * feat:fractional max_spatial_size * downsample layers in VideoMAE * fix:video sampling wrong duration * add more tests * fix merge * fix merge * module refactor; add more input test * add more temporal models * fix videomaev2 sha * fix:temporal_modelmae_st * change:video conservative loading; rename:image to pil image * fix:video last frame sampling; fix_time_naming * ignore pytest_cache * re-trigger tests * add joblib pool error management; fix video/image path recognizer * update: naming of failed to pickle func in joblibmapper * add temporal metric helpers * add temporal version of mamjajhong2015 * Update benchmark.py type hint * Update benchmark.py * Update brainscore_vision/metric_helpers/temporal.py Co-authored-by: Martin Schrimpf * Update brainscore_vision/metrics/internal_consistency/__init__.py Co-authored-by: Martin Schrimpf * Update benchmark.py --------- Co-authored-by: Yingtian Tang Co-authored-by: Martin Schrimpf Co-authored-by: Martin Schrimpf Co-authored-by: deirdre-k <95875723+deirdre-k@users.noreply.github.com> Co-authored-by: Michael Ferguson * Python 3.11 Migration (v2.1.0) (#1199) * changes necessary for 3.11 * change references of 3.7 to 3.11, remove tf and keras * fix geirhos data tests * Normalize deprecated * np.float is deprecated * update w/ master (#1066) * add r101_eBarlow_lmda_01_1 to models (#1037) Co-authored-by: AutoJenkins * update hmax requirements.txt (#1053) * update hmax requirements.txt to add torchvision (#1054) * fix mobilenet `pooler` layer prefix (#1055) * add r34_eMMCR_Mom_lmda_02_1 to models (#1061) Co-authored-by: AutoJenkins * add r101_eBarlow_lmda_02_1_copy to models (#1059) Co-authored-by: AutoJenkins * add r34_eMMCR_Mom_lmda_01_1 to models (#1060) Co-authored-by: AutoJenkins * add r34_eMMCR_Mom_Vanilla_1 to models (#1063) Co-authored-by: AutoJenkins * ignore time dimension in Bracci2019 (#1064) --------- Co-authored-by: Katherine Fairchild Co-authored-by: AutoJenkins Co-authored-by: Michael Ferguson Co-authored-by: Martin Schrimpf * update (#1093) * add r101_eBarlow_lmda_01_1 to models (#1037) Co-authored-by: AutoJenkins * update hmax requirements.txt (#1053) * update hmax requirements.txt to add torchvision (#1054) * fix mobilenet `pooler` layer prefix (#1055) * add r34_eMMCR_Mom_lmda_02_1 to models (#1061) Co-authored-by: AutoJenkins * add r101_eBarlow_lmda_02_1_copy to models (#1059) Co-authored-by: AutoJenkins * add r34_eMMCR_Mom_lmda_01_1 to models (#1060) Co-authored-by: AutoJenkins * add r34_eMMCR_Mom_Vanilla_1 to models (#1063) Co-authored-by: AutoJenkins * ignore time dimension in Bracci2019 (#1064) * Update behavior.py (#742) Co-authored-by: Sam Winebrake <85908068+samwinebrake@users.noreply.github.com> Co-authored-by: deirdre-k <95875723+deirdre-k@users.noreply.github.com> * add eBarlow_lmda_02_1_full to models (#1067) Co-authored-by: AutoJenkins * oddoneout: build full stimulus id index for non-numeric ids (#1068) * Add mobilevit_small - take 2 (#1051) * Add mobilevit_small - take 2 * Update brainscore_vision/models/mobilevit_small/model.py Co-authored-by: Martin Schrimpf --------- Co-authored-by: Martin Schrimpf Co-authored-by: Michael Ferguson * add yudixie_resnet18_240719_1 to models (#1070) Co-authored-by: AutoJenkins * add yudixie_resnet18_240719_0 to models (#1069) Co-authored-by: AutoJenkins * add yudixie_resnet18_240719_2 to models (#1071) Co-authored-by: AutoJenkins * add yudixie_resnet18_240719_10 to models (#1079) Co-authored-by: AutoJenkins --------- Co-authored-by: Katherine Fairchild Co-authored-by: AutoJenkins Co-authored-by: Michael Ferguson Co-authored-by: Martin Schrimpf Co-authored-by: Linus Sommer <95619282+linus-md@users.noreply.github.com> Co-authored-by: deirdre-k <95875723+deirdre-k@users.noreply.github.com> Co-authored-by: Kartik Pradeepan * remove keraswrapper * remove brendel * Trying a fix for travis * add necessary imports * Updating ubuntu version as xenial doesn't have 3.11 * Adding fix for pyproject.toml * Pin numpy below 2.0 * pointing to test branch for travis * update s3 bucket to brainscore-unittests * update s3 bucket to brainscore-unittests (rajalingham2020) * update s3 bucket to brainscore-unittests (sanghavi2020) * update folder name of models to folder_name="models-to-integrate-for-2.0" * update folder name of MAE model to folder_name="models-to-integrate-for-2.0" * update folder name of MAEv2 to folder_name="models-to-integrate-for-2.0" * update folder name of mae_st to folder_name="models-to-integrate-for-2.0" * PredNet commented out in init * remove prednet dead code * remove prednet code * scialom data: update image_id to stimulus_id * add number_of_trials keyword to rajalingham2018 * pointing to branch * Removing dash from python module name (#1118) * Removing dash from module name (#1119) * add require_variance keyword to precomputedfeatures call * add s3 download if not present to test helper * add missing s3 import * Update test_helper.py * pull item from ceiling Need to check with martin if this will break anything * update coggan benchmark to work w/ new pandas/python * add identifier function to pre computed features * forgot to add string line * update rajalingham2018 deprecated test * update w/ master (#1194) * add r101_eBarlow_lmda_01_1 to models (#1037) Co-authored-by: AutoJenkins * update hmax requirements.txt (#1053) * update hmax requirements.txt to add torchvision (#1054) * fix mobilenet `pooler` layer prefix (#1055) * add r34_eMMCR_Mom_lmda_02_1 to models (#1061) Co-authored-by: AutoJenkins * add r101_eBarlow_lmda_02_1_copy to models (#1059) Co-authored-by: AutoJenkins * add r34_eMMCR_Mom_lmda_01_1 to models (#1060) Co-authored-by: AutoJenkins * add r34_eMMCR_Mom_Vanilla_1 to models (#1063) Co-authored-by: AutoJenkins * ignore time dimension in Bracci2019 (#1064) * Update behavior.py (#742) Co-authored-by: Sam Winebrake <85908068+samwinebrake@users.noreply.github.com> Co-authored-by: deirdre-k <95875723+deirdre-k@users.noreply.github.com> * add eBarlow_lmda_02_1_full to models (#1067) Co-authored-by: AutoJenkins * oddoneout: build full stimulus id index for non-numeric ids (#1068) * Add mobilevit_small - take 2 (#1051) * Add mobilevit_small - take 2 * Update brainscore_vision/models/mobilevit_small/model.py Co-authored-by: Martin Schrimpf --------- Co-authored-by: Martin Schrimpf Co-authored-by: Michael Ferguson * add yudixie_resnet18_240719_1 to models (#1070) Co-authored-by: AutoJenkins * add yudixie_resnet18_240719_0 to models (#1069) Co-authored-by: AutoJenkins * add yudixie_resnet18_240719_2 to models (#1071) Co-authored-by: AutoJenkins * add yudixie_resnet18_240719_10 to models (#1079) Co-authored-by: AutoJenkins * Add cv_18_dagger_408_pretrained (#1104) * Add model files * Adjust timm version --------- Co-authored-by: Ethan Pellegrini * add eBarlow_lmda_02_200_full to models (#1121) Co-authored-by: AutoJenkins * remove old tutorials in favor of new link (#1170) --------- Co-authored-by: Katherine Fairchild Co-authored-by: AutoJenkins Co-authored-by: Michael Ferguson Co-authored-by: Martin Schrimpf Co-authored-by: Linus Sommer <95619282+linus-md@users.noreply.github.com> Co-authored-by: deirdre-k <95875723+deirdre-k@users.noreply.github.com> Co-authored-by: Kartik Pradeepan Co-authored-by: pellegreene <36171165+pellegreene@users.noreply.github.com> Co-authored-by: Ethan Pellegrini * environment lock for 2.1.0 * Rename environment.yml to environment_lock.yml * add test dependencies and move certain dependencies to pip * remove branch from core dependency * new env lock with all dependencies of env (including test) * add informative comment to env lock * Update .travis.yml * point to 3.11 branches * update pointers to 3.11 branches * add back check for forks in travis * remove f string from full_name variable * remove if/else that enabled tf * remove folder_name for models that were moved to correct directory * version less specific (2.1.0 to 2.1) * remove protobuf * remove python specification in setup.pys * Update mismatched s3 folder name * Update environment_lock.yml w/ main branches * update branch pointers to main --------- Co-authored-by: Katherine Fairchild Co-authored-by: AutoJenkins Co-authored-by: Michael Ferguson Co-authored-by: Martin Schrimpf Co-authored-by: Linus Sommer <95619282+linus-md@users.noreply.github.com> Co-authored-by: deirdre-k <95875723+deirdre-k@users.noreply.github.com> Co-authored-by: Kartik Pradeepan Co-authored-by: Deirdre Kelliher Co-authored-by: pellegreene <36171165+pellegreene@users.noreply.github.com> Co-authored-by: Ethan Pellegrini --------- Co-authored-by: David Coggan <06case_hoses@icloud.com> Co-authored-by: ddcoggan Co-authored-by: YingtianDt <90408985+YingtianDt@users.noreply.github.com> Co-authored-by: Yingtian Tang Co-authored-by: Martin Schrimpf Co-authored-by: Martin Schrimpf Co-authored-by: deirdre-k <95875723+deirdre-k@users.noreply.github.com> Co-authored-by: Michael Ferguson Co-authored-by: Sam Winebrake <85908068+samwinebrake@users.noreply.github.com> Co-authored-by: Katherine Fairchild Co-authored-by: AutoJenkins Co-authored-by: Linus Sommer <95619282+linus-md@users.noreply.github.com> Co-authored-by: Deirdre Kelliher Co-authored-by: pellegreene <36171165+pellegreene@users.noreply.github.com> Co-authored-by: Ethan Pellegrini --- .github/workflows/score_new_plugins.yml | 8 +- .readthedocs.yml | 2 +- .travis.yml | 13 +- README.md | 2 +- .../benchmark_helpers/__init__.py | 8 + .../benchmark_helpers/test_helper.py | 3 + .../coggan2024_behavior/benchmark.py | 12 +- .../domain_transfer_analysis.py | 7 +- brainscore_vision/benchmarks/kar2019/test.py | 2 +- .../benchmarks/majajhong2015/__init__.py | 5 + .../benchmarks/majajhong2015/benchmark.py | 44 ++++- .../benchmarks/rajalingham2018/test.py | 47 +---- .../benchmarks/rajalingham2020/test.py | 2 +- .../benchmarks/sanghavi2020/test.py | 2 +- brainscore_vision/data/geirhos2021/test.py | 4 +- brainscore_vision/data/scialom2024/test.py | 2 +- brainscore_vision/metric_helpers/temporal.py | 119 ++++++++++++ .../metric_helpers/xarray_utils.py | 59 ++++++ .../metrics/accuracy_distance/metric.py | 43 ++++- .../metrics/accuracy_distance/test.py | 15 ++ .../metrics/internal_consistency/__init__.py | 4 + .../metrics/internal_consistency/test.py | 2 +- brainscore_vision/metrics/ost/metric.py | 2 +- .../regression_correlation/__init__.py | 9 + .../metrics/regression_correlation/metric.py | 10 + .../model_helpers/activations/__init__.py | 2 - .../model_helpers/activations/core.py | 13 +- .../model_helpers/activations/keras.py | 92 --------- .../activations/temporal/inputs/base.py | 1 - .../model_helpers/activations/tensorflow.py | 71 ------- .../models/bp_resnet50_julios/setup.py | 1 - .../models/cornet_s_ynshah/setup.py | 1 - .../models/dbp_resnet50_julios/setup.py | 1 - .../models/eBarlow_Vanilla/setup.py | 1 - .../models/eBarlow_Vanilla_1/setup.py | 1 - .../models/eBarlow_Vanilla_2/setup.py | 1 - .../models/eBarlow_augself_linear_1/setup.py | 1 - .../models/eBarlow_augself_mlp_1/setup.py | 1 - .../models/eBarlow_lmda_0001_1/setup.py | 1 - .../models/eBarlow_lmda_001_1/setup.py | 1 - .../models/eBarlow_lmda_001_2/setup.py | 1 - .../models/eBarlow_lmda_001_3/setup.py | 1 - .../models/eBarlow_lmda_01/setup.py | 1 - .../models/eBarlow_lmda_01_1/setup.py | 1 - .../models/eBarlow_lmda_01_2/setup.py | 1 - .../models/eBarlow_lmda_02_1/setup.py | 1 - .../models/eBarlow_lmda_03_1/setup.py | 1 - .../models/eBarlow_lmda_04_1/setup.py | 1 - .../models/eBarlow_lmda_05_1/setup.py | 1 - .../models/eMMCR_Mom_Vanilla_1/setup.py | 1 - .../models/eMMCR_Mom_Vanilla_2/setup.py | 1 - .../models/eMMCR_Mom_lmda_0001_1/setup.py | 1 - .../models/eMMCR_Mom_lmda_001_1/setup.py | 1 - .../models/eMMCR_Mom_lmda_01_1/setup.py | 1 - .../models/eMMCR_Mom_lmda_01_2/setup.py | 1 - .../models/eMMCR_Mom_lmda_02_1/setup.py | 1 - .../models/eMMCR_Mom_lmda_03_1/setup.py | 1 - .../models/eMMCR_Mom_lmda_04_1/setup.py | 1 - .../models/eMMCR_Mom_lmda_05_1/setup.py | 1 - .../models/eMMCR_Vanilla/setup.py | 1 - .../models/eMMCR_VanillaV2/setup.py | 1 - .../models/eMMCR_Vanilla_1/setup.py | 1 - .../models/eMMCR_Vanilla_2/setup.py | 1 - .../models/eMMCR_lmda_01/setup.py | 1 - .../models/eMMCR_lmda_01V2/setup.py | 1 - .../models/eMMCR_lmda_01_1/setup.py | 1 - .../models/eMMCR_lmda_01_2/setup.py | 1 - .../models/eMMCR_lmda_01_3/setup.py | 1 - .../models/eSimCLR_Vanilla_1/setup.py | 1 - .../models/eSimCLR_Vanilla_2/setup.py | 1 - .../models/eSimCLR_lmda_0001_1/setup.py | 1 - .../models/eSimCLR_lmda_001_1/setup.py | 1 - .../models/eSimCLR_lmda_01_1/setup.py | 1 - .../models/eSimCLR_lmda_01_2/setup.py | 1 - .../models/eSimCLR_lmda_02_1/setup.py | 1 - .../models/eSimCLR_lmda_02_1_1/setup.py | 1 - .../models/eSimCLR_lmda_03_1/setup.py | 1 - .../models/eSimCLR_lmda_04_1/setup.py | 1 - .../models/eSimCLR_lmda_04_1_1/setup.py | 1 - .../models/eSimCLR_lmda_05_1/setup.py | 1 - .../model.py | 2 +- brainscore_vision/models/r50_tvpt/setup.py | 1 - .../models/resnet50_eMMCR_Vanilla/setup.py | 1 - .../models/resnet50_eMMCR_VanillaV2/setup.py | 1 - .../models/resnet50_eMMCR_eqp10_lm1/setup.py | 1 - .../models/resnet50_julios/setup.py | 1 - .../__init__.py | 0 .../model.py | 6 +- .../requirements.txt | 0 .../test.py | 0 .../models/temporal_model_GDT/model.py | 2 +- .../models/temporal_model_VideoMAEv2/model.py | 2 +- .../models/temporal_model_openstl/__init__.py | 1 - .../models/temporal_model_openstl/model.py | 19 +- .../models/temporal_model_openstl/test.py | 3 +- .../__init__.py | 0 .../model.py | 0 .../setup.py | 1 - .../test.py | 0 .../setup.py | 1 - .../setup.py | 1 - environment_lock.yml | 182 ++++++++++++++++++ pyproject.toml | 17 +- tests/test_metric_helpers/test_temporal.py | 80 ++++++++ .../activations/test___init__.py | 69 +------ 105 files changed, 629 insertions(+), 417 deletions(-) create mode 100644 brainscore_vision/metric_helpers/temporal.py delete mode 100644 brainscore_vision/model_helpers/activations/keras.py delete mode 100644 brainscore_vision/model_helpers/activations/tensorflow.py rename brainscore_vision/models/{temporal_model_AVID-CMA => temporal_model_AVID_CMA}/__init__.py (100%) rename brainscore_vision/models/{temporal_model_AVID-CMA => temporal_model_AVID_CMA}/model.py (94%) rename brainscore_vision/models/{temporal_model_AVID-CMA => temporal_model_AVID_CMA}/requirements.txt (100%) rename brainscore_vision/models/{temporal_model_AVID-CMA => temporal_model_AVID_CMA}/test.py (100%) rename brainscore_vision/models/{tv_efficientnet-b1 => tv_efficientnet_b1}/__init__.py (100%) rename brainscore_vision/models/{tv_efficientnet-b1 => tv_efficientnet_b1}/model.py (100%) rename brainscore_vision/models/{tv_efficientnet-b1 => tv_efficientnet_b1}/setup.py (92%) rename brainscore_vision/models/{tv_efficientnet-b1 => tv_efficientnet_b1}/test.py (100%) create mode 100644 environment_lock.yml create mode 100644 tests/test_metric_helpers/test_temporal.py diff --git a/.github/workflows/score_new_plugins.yml b/.github/workflows/score_new_plugins.yml index 8e4c8aec9..1f4c6a176 100644 --- a/.github/workflows/score_new_plugins.yml +++ b/.github/workflows/score_new_plugins.yml @@ -32,10 +32,10 @@ jobs: with: fetch-depth: 0 - - name: Set up Python 3.7 + - name: Set up Python 3.11 uses: actions/setup-python@v4 with: - python-version: 3.7 + python-version: 3.11 - name: Configure AWS Credentials uses: aws-actions/configure-aws-credentials@v1 @@ -176,10 +176,10 @@ jobs: - name: Check out repository code uses: actions/checkout@v4 - - name: Set up Python 3.7 + - name: Set up Python 3.11 uses: actions/setup-python@v4 with: - python-version: 3.7 + python-version: 3.11 - name: Build project run: | diff --git a/.readthedocs.yml b/.readthedocs.yml index 229a16285..ecc53316a 100644 --- a/.readthedocs.yml +++ b/.readthedocs.yml @@ -3,7 +3,7 @@ version: 2 build: os: "ubuntu-20.04" tools: - python: "3.7" + python: "3.11" python: install: diff --git a/.travis.yml b/.travis.yml index 69e9e9b03..75196cb31 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,5 +1,6 @@ version: ~> 1.0 language: python +dist: jammy env: global: - PYTEST_SETTINGS="not requires_gpu and not memory_intense and not slow and not travis_slow" @@ -9,7 +10,7 @@ env: - WEB_SUBMISSION="False" before_install: - pip install --upgrade pip -- pip install setuptools==60.5.0 +- pip install setuptools - pip install pytest # download large files - pip install awscli @@ -31,18 +32,18 @@ import: jobs: include: - - name: 3.7 public - python: '3.7.13' - - name: 3.7 private + - name: 3.11 public + python: '3.11' + - name: 3.11 private if: fork = false - python: '3.7.13' + python: '3.11' env: - PRIVATE_ACCESS=1 - secure: f1rWEwrslh7qa2g/QlKs001sGC3uaOxZNQSfNOPj+TMCqEo2c6OzImC4hyz+WqCyc6N/lFT4yYo2RhvaqStHMRmu/+9aZmuH05Bb0KQpfzNFA+yGa/U5WR3/4u6KRvDAeNEi9drT2LuacTyGbldmQsquujK0jrPpFWpe7zUUKv0zb0lJf0zcjeSrZlDXLlgD6DCqow7OqHRvW04dPZVy1OArRwtPV6DJ6Rqo1MqFQGHJ806VPlXhSoydb7a58dhGajqPjomdmZjhd3wS6Lv6uetTE/VVb4EP4e7n0qfZIx/TpnWG0SR44pcP7OCNARWYANsAivzxnQ0shyXnIzOo8ZcPYiPpt/5D53i5idTBxXyuDaHGQvgwuY5XLZzznEedBgZa4OvjxAXlLEQjdVDfSsZeYaV9gyFkeTlLnK1zvWi0US38eF2Qtm3Sx3D/5TtBKK2n38tyK5gg/XvJNycaXvIl7iVcnI2ifpqD1mUWI6C9j9Tk19/XEpWkwaFi91+0LZF1GhjBu8o3G5Np4RIOKXi3TIHkpbMM5mf11T6Bm9LvEMq1h8bgRQigEbeJF8CbUOSVFv+AaXsggGjQhuwdyvy2JZo+tO1nfhi+kW3XrDGPsz1R7Wfqduyn7UUh5OiFymeZwKseYKnwU47KyCqDwrq5Mnx1MlSidnVmPriadR4= - secure: WE7FPwy07VzJTKAd2xwZdBhtmh8jk7ojwk4B2rIcBQu0vwUXc1MgO8tBLD7s08lBedBjqZiLZEW31uPMEyWNysouDt16a5gm2d149LR7flI3MOifBtxINfJuC3eOEG65bPgN/bYEsIpLKnu3469d5nxZkK7xsjbWTxHGoUpLvVPsmHY2ZM5/jftybs7fI0do4NMG2XffKfZbiFb447Ao3xeQeEfW6IkJllzgGnlG9FJATFidrbwDNdmzAnvPEnDoKAf7ZvhPV0x9yR5V6P4Ck5hxl8mlPdBa1cRMO8s/1ag1c7YJ3AF9ZlwcwqTiGsT8DHTVRxSz4nFHJTMlrm9j84u7WzLZJBhPgF0UeLN3AQgiAZ3c2TFDvjQWeHVuSPkV5GrKlfhSvR82s9yPEdHQxxwYymBbAr6rJR4NtXTyZX0vg8NRKHssZKLSafs/D/pt9xXspqu8HAHc+mS0lCips79XptSr5BEsioil3D2io3tbzrGugpTeJ7oEA787vKn2Cm4XmhyQ0UBhvwsPZ351l27wZYuNV07o9Ik83hN/w4o2v899QQ/zbX42Iy8ZUCWOPX7MV7+TA7SMxru3qx7HL5hDM8kTetxbLB6Ckr+JOdX8L2Fb5L3TVDpsvfv0ebXgwaQR/ez8/7bcXmBqcERApHDz73HaMXUap+iDR4FLdXE= - AWS_DEFAULT_REGION=us-east-1 - stage: "Automerge check" - python: '3.7.13' + python: '3.11' install: - pip install --no-cache-dir torch torchvision --default-timeout=1000 --retries=5 - pip install --no-cache-dir -e ".[test]" diff --git a/README.md b/README.md index eae4d140f..e0552605e 100644 --- a/README.md +++ b/README.md @@ -22,7 +22,7 @@ To contribute, please [send in a pull request](https://github.com/brain-score/vi ## Local installation -You will need Python = 3.7 and pip >= 18.1. +You will need Python = 3.11 and pip >= 18.1. `pip install git+https://github.com/brain-score/vision` diff --git a/brainscore_vision/benchmark_helpers/__init__.py b/brainscore_vision/benchmark_helpers/__init__.py index eb36e50ca..7eb506115 100644 --- a/brainscore_vision/benchmark_helpers/__init__.py +++ b/brainscore_vision/benchmark_helpers/__init__.py @@ -1,6 +1,7 @@ from typing import Union import numpy as np +import hashlib from brainio.assemblies import NeuroidAssembly, DataAssembly from brainscore_core import Score @@ -18,6 +19,13 @@ def __init__(self, features: Union[DataAssembly, dict], visual_degrees): self.features = features self._visual_degrees = visual_degrees + @property + def identifier(self) -> str: + # serialize the features to a string and create hash + features_data = str(self.features) + features_hash = hashlib.md5(features_data.encode('utf-8')).hexdigest() + return f"precomputed-{features_hash}" + def visual_degrees(self) -> int: return self._visual_degrees diff --git a/brainscore_vision/benchmark_helpers/test_helper.py b/brainscore_vision/benchmark_helpers/test_helper.py index 6e3ad4a03..57d6461f6 100644 --- a/brainscore_vision/benchmark_helpers/test_helper.py +++ b/brainscore_vision/benchmark_helpers/test_helper.py @@ -7,6 +7,7 @@ from brainio.assemblies import NeuroidAssembly, PropertyAssembly from brainscore_vision import load_benchmark from brainscore_vision.model_interface import BrainModel +from brainscore_vision.data_helpers import s3 from . import PrecomputedFeatures @@ -68,6 +69,8 @@ def run_test_properties(self, benchmark: str, files: dict, expected: float): for current_stimulus in stimulus_identifiers: stimulus_set = load_stimulus_set(current_stimulus) path = Path(__file__).parent / files[current_stimulus] + s3.download_file_if_not_exists(local_path=path, + bucket='brainscore-unittests', remote_filepath=f'tests/test_benchmarks/{files[current_stimulus]}') features = PropertyAssembly.from_files(path, stimulus_set_identifier=stimulus_set.identifier, stimulus_set=stimulus_set) diff --git a/brainscore_vision/benchmarks/coggan2024_behavior/benchmark.py b/brainscore_vision/benchmarks/coggan2024_behavior/benchmark.py index 14db4121f..1a2fbbfae 100644 --- a/brainscore_vision/benchmarks/coggan2024_behavior/benchmark.py +++ b/brainscore_vision/benchmarks/coggan2024_behavior/benchmark.py @@ -75,9 +75,12 @@ def __call__(self, candidate: BrainModel) -> Score: data.model_prediction == data.object_class, dtype=int) # get correlation between model and human performance across conditions - performance = (data[data.visibility < 1] + performance = ( + data[data.visibility < 1] .groupby(['subject', 'occluder_type', 'occluder_color']) - .mean(['human_accuracy', 'model_accuracy'])).reset_index() + .mean(numeric_only=True) + .reset_index() + ) scores = performance.groupby('subject').apply( lambda df: np.corrcoef(df.human_accuracy, df.model_accuracy)[0, 1]) score = Score(np.mean(scores)) @@ -100,8 +103,9 @@ def get_noise_ceiling(performance: pd.DataFrame) -> Score: nc = [] for subject in performance.subject.unique(): performance_ind = performance[performance.subject == subject] - performance_grp = (performance[performance.subject != subject] - .groupby(['occluder_type', 'occluder_color']).mean()) + performance_grp = performance[performance.subject != subject] + numeric_cols = performance_grp.select_dtypes(include=np.number).columns + performance_grp = performance_grp.groupby(['occluder_type', 'occluder_color'])[numeric_cols].mean() merged_df = performance_ind.merge( performance_grp, on=['occluder_type', 'occluder_color']) nc.append(np.corrcoef(merged_df.human_accuracy_x, merged_df.human_accuracy_y)[0, 1]) diff --git a/brainscore_vision/benchmarks/igustibagus2024/domain_transfer_analysis.py b/brainscore_vision/benchmarks/igustibagus2024/domain_transfer_analysis.py index 9a8c07713..da3d662f2 100644 --- a/brainscore_vision/benchmarks/igustibagus2024/domain_transfer_analysis.py +++ b/brainscore_vision/benchmarks/igustibagus2024/domain_transfer_analysis.py @@ -5,6 +5,8 @@ import pandas as pd from sklearn.linear_model import RidgeClassifierCV from sklearn.model_selection import train_test_split +from sklearn.pipeline import Pipeline +from sklearn.preprocessing import StandardScaler from tqdm import tqdm # import brain-score specific libraries @@ -89,7 +91,10 @@ def __call__(self, candidate: BrainModel) -> Score: def OOD_AnalysisBenchmark(): return _OOD_AnalysisBenchmark( - classifier=RidgeClassifierCV(alphas=[0.0001, 0.001, 0.01, 0.1, 1, 10], fit_intercept=True, normalize=True) + classifier=Pipeline([ + ('scaler', StandardScaler()), + ('classifier', RidgeClassifierCV(alphas=[0.0001, 0.001, 0.01, 0.1, 1, 10], fit_intercept=True)) + ]) ) diff --git a/brainscore_vision/benchmarks/kar2019/test.py b/brainscore_vision/benchmarks/kar2019/test.py index b0fece327..34c15b9a9 100644 --- a/brainscore_vision/benchmarks/kar2019/test.py +++ b/brainscore_vision/benchmarks/kar2019/test.py @@ -24,7 +24,7 @@ def test_Kar2019ost_cornet_s(): filename = 'cornet_s-kar2019.nc' filepath = Path(__file__).parent / filename s3.download_file_if_not_exists(local_path=filepath, - bucket='brainio-brainscore', remote_filepath=f'tests/test_benchmarks/{filename}') + bucket='brainscore-unittests', remote_filepath=f'tests/test_benchmarks/{filename}') precomputed_features = NeuroidAssembly.from_files( filepath, stimulus_set_identifier=benchmark._assembly.stimulus_set.identifier, diff --git a/brainscore_vision/benchmarks/majajhong2015/__init__.py b/brainscore_vision/benchmarks/majajhong2015/__init__.py index 24fe8651e..5ae8988fd 100644 --- a/brainscore_vision/benchmarks/majajhong2015/__init__.py +++ b/brainscore_vision/benchmarks/majajhong2015/__init__.py @@ -11,3 +11,8 @@ benchmark_registry['MajajHong2015public.V4-pls'] = MajajHongV4PublicBenchmark benchmark_registry['MajajHong2015public.IT-pls'] = MajajHongITPublicBenchmark + +# temporal +from .benchmark import MajajHongV4TemporalPublicBenchmark, MajajHongITTemporalPublicBenchmark +benchmark_registry['MajajHong2015public.V4-temporal-pls'] = lambda: MajajHongV4TemporalPublicBenchmark(time_interval=10) +benchmark_registry['MajajHong2015public.IT-temporal-pls'] = lambda: MajajHongITTemporalPublicBenchmark(time_interval=10) diff --git a/brainscore_vision/benchmarks/majajhong2015/benchmark.py b/brainscore_vision/benchmarks/majajhong2015/benchmark.py index 766f5c93f..5270ab7af 100644 --- a/brainscore_vision/benchmarks/majajhong2015/benchmark.py +++ b/brainscore_vision/benchmarks/majajhong2015/benchmark.py @@ -1,7 +1,8 @@ from brainscore_core import Metric from brainscore_vision import load_metric, Ceiling, load_ceiling, load_dataset -from brainscore_vision.benchmark_helpers.neural_common import NeuralBenchmark, average_repetition +from brainscore_vision.benchmark_helpers.neural_common import NeuralBenchmark, average_repetition, apply_keep_attrs +from brainscore_vision.model_helpers.brain_transformation.temporal import assembly_time_align VISUAL_DEGREES = 8 NUMBER_OF_TRIALS = 50 @@ -20,13 +21,14 @@ eprint = {https://www.jneurosci.org/content/35/39/13402.full.pdf}, journal = {Journal of Neuroscience}}""" -pls_metric = lambda: load_metric('pls', crossvalidation_kwargs=dict(stratification_coord='object_name')) - +crossvalidation_kwargs = dict(stratification_coord='object_name') +pls_metric = lambda: load_metric('pls', crossvalidation_kwargs=crossvalidation_kwargs) +spantime_pls_metric = lambda: load_metric('spantime_pls', crossvalidation_kwargs=crossvalidation_kwargs) def _DicarloMajajHong2015Region(region: str, access: str, identifier_metric_suffix: str, - similarity_metric: Metric, ceiler: Ceiling): - assembly_repetition = load_assembly(average_repetitions=False, region=region, access=access) - assembly = load_assembly(average_repetitions=True, region=region, access=access) + similarity_metric: Metric, ceiler: Ceiling, time_interval: float = None): + assembly_repetition = load_assembly(average_repetitions=False, region=region, access=access, time_interval=time_interval) + assembly = load_assembly(average_repetitions=True, region=region, access=access, time_interval=time_interval) benchmark_identifier = f'MajajHong2015.{region}' + ('.public' if access == 'public' else '') return NeuralBenchmark(identifier=f'{benchmark_identifier}-{identifier_metric_suffix}', version=3, assembly=assembly, similarity_metric=similarity_metric, @@ -60,13 +62,35 @@ def MajajHongITPublicBenchmark(): ceiler=load_ceiling('internal_consistency')) -def load_assembly(average_repetitions, region, access='private'): - assembly = load_dataset(f'MajajHong2015.{access}') +def MajajHongV4TemporalPublicBenchmark(time_interval: float = None): + return _DicarloMajajHong2015Region(region='V4', access='public', identifier_metric_suffix='pls', + similarity_metric=spantime_pls_metric(), time_interval=time_interval, + ceiler=load_ceiling('internal_consistency_temporal')) + + +def MajajHongITTemporalPublicBenchmark(time_interval: float = None): + return _DicarloMajajHong2015Region(region='IT', access='public', identifier_metric_suffix='pls', + similarity_metric=spantime_pls_metric(), time_interval=time_interval, + ceiler=load_ceiling('internal_consistency_temporal')) + + +def load_assembly(average_repetitions: bool, region: str, access: str = 'private', time_interval: float = None): + temporal = time_interval is not None + if not temporal: + assembly = load_dataset(f'MajajHong2015.{access}') + assembly = assembly.squeeze("time_bin") + else: + assembly = load_dataset(f'MajajHong2015.temporal.{access}') + assembly = assembly.__class__(assembly) + target_time_bins = [ + (t, t+time_interval) for t in range(0, assembly.time_bin_end.max().item()-time_interval, time_interval) + ] + assembly = apply_keep_attrs(assembly, lambda assembly: assembly_time_align(assembly, target_time_bins)) + assembly = assembly.sel(region=region) assembly['region'] = 'neuroid', [region] * len(assembly['neuroid']) - assembly = assembly.squeeze("time_bin") assembly.load() - assembly = assembly.transpose('presentation', 'neuroid') + assembly = assembly.transpose('presentation', 'neuroid', ...) if average_repetitions: assembly = average_repetition(assembly) return assembly diff --git a/brainscore_vision/benchmarks/rajalingham2018/test.py b/brainscore_vision/benchmarks/rajalingham2018/test.py index 7a7e96388..2ff9d38a0 100644 --- a/brainscore_vision/benchmarks/rajalingham2018/test.py +++ b/brainscore_vision/benchmarks/rajalingham2018/test.py @@ -7,7 +7,7 @@ from pytest import approx from brainio.assemblies import BehavioralAssembly -from brainscore_vision import benchmark_registry, load_benchmark, load_metric +from brainscore_vision import benchmark_registry, load_benchmark, load_metric, load_model from brainscore_vision.benchmark_helpers import PrecomputedFeatures from brainscore_vision.benchmark_helpers.test_helper import VisualDegreesTests, NumberOfTrialsTests from brainscore_vision.benchmarks.rajalingham2018 import DicarloRajalingham2018I2n @@ -115,44 +115,11 @@ class TestMetricScore: @pytest.mark.parametrize(['model', 'expected_score'], [ ('alexnet', .253), - ('resnet34', .37787), - ('resnet18', .3638), + ('resnet50_tutorial', 0.348), + ('pixels', 0.0139) ]) def test_model(self, model, expected_score): - class UnceiledBenchmark(_DicarloRajalingham2018): - def __init__(self): - metric = load_metric('i2n') - super(UnceiledBenchmark, self).__init__(metric=metric, metric_identifier='i2n') - - def __call__(self, candidate: BrainModel): - candidate.start_task(BrainModel.Task.probabilities, self._fitting_stimuli) - probabilities = candidate.look_at(self._assembly.stimulus_set) - score = self._metric(probabilities, self._assembly) - return score - - benchmark = UnceiledBenchmark() - # features - feature_responses = xr.load_dataarray(Path(__file__).parent / 'test_resources' / - f'identifier={model},stimuli_identifier=objectome-240.nc') - feature_responses['stimulus_id'] = 'stimulus_path', [os.path.splitext(os.path.basename(path))[0] - for path in feature_responses['stimulus_path'].values] - feature_responses = feature_responses.stack(presentation=['stimulus_path']) - assert len(np.unique(feature_responses['layer'])) == 1 # only penultimate layer - - class PrecomputedFeatures: - def __init__(self, precomputed_features): - self.features = precomputed_features - - def __call__(self, stimuli, layers): - np.testing.assert_array_equal(layers, ['behavioral-layer']) - self_stimulus_ids = self.features['stimulus_id'].values.tolist() - indices = [self_stimulus_ids.index(stimulus_id) for stimulus_id in stimuli['stimulus_id'].values] - features = self.features[{'presentation': indices}] - return features - - # evaluate candidate - transformation = ProbabilitiesMapping(identifier=f'TestI2N.{model}', - activations_model=PrecomputedFeatures(feature_responses), - layer='behavioral-layer') - score = benchmark(transformation) - assert score == approx(expected_score, abs=0.005), f"expected {expected_score}, but got {score}" + benchmark = load_benchmark('Rajalingham2018-i2n') + model = load_model(model) + score = benchmark(model) + assert score.raw == approx(expected_score, abs=0.005), f"expected {expected_score}, but got {score.raw}" diff --git a/brainscore_vision/benchmarks/rajalingham2020/test.py b/brainscore_vision/benchmarks/rajalingham2020/test.py index 6af813946..40b6226d5 100644 --- a/brainscore_vision/benchmarks/rajalingham2020/test.py +++ b/brainscore_vision/benchmarks/rajalingham2020/test.py @@ -35,5 +35,5 @@ def test_Rajalingham2020(benchmark, expected): filename = 'alexnet-rajalingham2020-features.12.nc' filepath = Path(__file__).parent / filename s3.download_file_if_not_exists(local_path=filepath, - bucket='brainio-brainscore', remote_filepath=f'tests/test_benchmarks/{filename}') + bucket='brainscore-unittests', remote_filepath=f'tests/test_benchmarks/{filename}') precomputed_test.run_test(benchmark=benchmark, precomputed_features_filepath=filepath, expected=expected) diff --git a/brainscore_vision/benchmarks/sanghavi2020/test.py b/brainscore_vision/benchmarks/sanghavi2020/test.py index b65f08f63..ac6fe79b3 100644 --- a/brainscore_vision/benchmarks/sanghavi2020/test.py +++ b/brainscore_vision/benchmarks/sanghavi2020/test.py @@ -66,7 +66,7 @@ def test_self_regression(benchmark, visual_degrees, expected): def test_model_features(benchmark, filename, expected): filepath = Path(__file__).parent / filename s3.download_file_if_not_exists(local_path=filepath, - bucket='brainio-brainscore', remote_filepath=f'tests/test_benchmarks/{filename}') + bucket='brainscore-unittests', remote_filepath=f'tests/test_benchmarks/{filename}') precomputed_test.run_test(benchmark=benchmark, precomputed_features_filepath=filepath, expected=expected) diff --git a/brainscore_vision/data/geirhos2021/test.py b/brainscore_vision/data/geirhos2021/test.py index bdc2052af..41762008d 100644 --- a/brainscore_vision/data/geirhos2021/test.py +++ b/brainscore_vision/data/geirhos2021/test.py @@ -62,7 +62,7 @@ def test_stimulus_set_assembly_alignment(self, identifier, field): full_name = f"Geirhos2021_{identifier}" assembly = load_dataset(full_name) assert assembly.stimulus_set is not None - assert assembly.stimulus_set.identifier == f"{full_name}" + assert assembly.stimulus_set.identifier == full_name assert set(assembly.stimulus_set[field]) == set(assembly[field].values) # test the number of subjects: @@ -236,7 +236,7 @@ def test_stimulus_set_exist(self, identifier): full_name = f"Geirhos2021_{identifier}" stimulus_set = load_stimulus_set(full_name) assert stimulus_set is not None - assert stimulus_set.identifier == full_name + assert stimulus_set.identifier == f"{full_name}" # test the number of images @pytest.mark.parametrize('identifier, num_images', [ diff --git a/brainscore_vision/data/scialom2024/test.py b/brainscore_vision/data/scialom2024/test.py index dbc38b3b3..657376d1a 100644 --- a/brainscore_vision/data/scialom2024/test.py +++ b/brainscore_vision/data/scialom2024/test.py @@ -258,7 +258,7 @@ def test_stimulus_set_exists(self, identifier): ]) def test_number_of_images(self, identifier, num_images): stimulus_set = load_stimulus_set(identifier) - assert len(np.unique(stimulus_set['image_id'].values)) == num_images + assert len(np.unique(stimulus_set['stimulus_id'].values)) == num_images # test assembly coords present in ALL 17 sets: @pytest.mark.parametrize('identifier', [ diff --git a/brainscore_vision/metric_helpers/temporal.py b/brainscore_vision/metric_helpers/temporal.py new file mode 100644 index 000000000..0c110b9f2 --- /dev/null +++ b/brainscore_vision/metric_helpers/temporal.py @@ -0,0 +1,119 @@ +import xarray as xr +import numpy as np + +from brainscore_vision.benchmark_helpers.neural_common import Score +from brainscore_vision.metric_helpers.transformations import standard_error_of_the_mean + +from .xarray_utils import apply_over_dims, recursive_op + + +# take the mean of scores (medians of single neuron scores) over time + + +def average_over_presentation(score: Score) -> Score: + raw = score + score = raw.mean('presentation') + score.attrs['raw'] = raw + return score + + +# PerOps is applied to every slice/chunk of the xarray along the specified dimensions +class PerOps: + def __init__(self, callable, dims, check_coords=[]): + # for coordinate checking, they are supposed to be the same across assemblies + self.dims = dims + self.callable = callable + self.check_coords = check_coords + + def __call__(self, *asms): + for check_coord in self.check_coords: + asms = [asm.sortby(check_coord) for asm in asms] + for asm in asms[1:]: + assert (asm[check_coord].values == asms[0][check_coord].values).all() + ret = apply_over_dims(self.callable, *asms, dims=self.dims) + return ret + + +# SpanOps aggregates specified dimensions to one dimension +class SpanOps: + def __init__(self, callable, source_dims, aggregated_dim, resample=False): + # if resample, randomly choose samples from the aggregated dimension, + # whose size is the same as the assembly.sizes[aggregated_dim] + self.source_dims = source_dims + self.aggregated_dim = aggregated_dim + self.callable = callable + self.resample = resample + + def __call__(self, *asms): + asms = [self._stack(asm) for asm in asms] + return self.callable(*asms) + + def _stack(self, assembly): + assembly_type = type(assembly) + size = assembly.sizes[self.aggregated_dim] + assembly = xr.DataArray(assembly) # xarray cannot deal with stacking MultiIndex (pydata/xarray#1554) + assembly = assembly.reset_index(self.source_dims) + assembly = assembly.rename({dim:dim+"_" for dim in self.source_dims}) # we'll call stacked timebins "presentation" + assembly = assembly.stack({self.aggregated_dim : [dim+"_" for dim in self.source_dims]}) + if self.resample: + indices = np.random.randint(0, assembly.sizes[self.aggregated_dim], size) + assembly = assembly.isel({self.aggregated_dim: indices}) + return assembly_type(assembly) + +class PerTime(PerOps): + def __init__(self, callable, time_dim="time_bin", check_coord="time_bin_start", **kwargs): + self.time_bin = time_dim + super().__init__(callable, dims=[time_dim], check_coords=[check_coord], **kwargs) + +class PerPresentation(PerOps): + def __init__(self, callable, presentation_dim="presentation", check_coord="stimulus_id", **kwargs): + self.presentation_dim = presentation_dim + super().__init__(callable, dims=[presentation_dim], check_coords=[check_coord], **kwargs) + +class PerNeuroid(PerOps): + def __init__(self, callable, neuroid_dim="neuroid", check_coord="neuroid_id", **kwargs): + self.neuroid_dim = neuroid_dim + super().__init__(callable, dims=[neuroid_dim], check_coords=[check_coord], **kwargs) + +class SpanTime(SpanOps): + def __init__(self, callable, time_dim="time_bin", presentation_dim="presentation", resample=False): + self.time_dim = time_dim + self.presentation_dim = presentation_dim + source_dims = [self.time_dim, self.presentation_dim] + aggregated_dim = self.presentation_dim + super().__init__(callable, source_dims, aggregated_dim, resample=resample) + +class SpanTimeRegression: + """ + Fits a regression with weights shared across the time bins. + """ + + def __init__(self, regression): + self._regression = regression + + def fit(self, source, target): + assert (source['time_bin'].values == target['time_bin'].values).all() + SpanTime(self._regression.fit)(source, target) + + def predict(self, source): + return PerTime(self._regression.predict)(source) + +class PerTimeRegression: + """ + Fits a regression with different weights for each time bins. + """ + + def __init__(self, regression): + self._regression = regression + + def fit(self, source, target): + # Lazy fit until predict + assert (source['time_bin'].values == target['time_bin'].values).all() + self._train_source = source + self._train_target = target + + def predict(self, source): + def fit_predict(train_source, train_target, test_source): + self._regression.fit(train_source, train_target) + return self._regression.predict(test_source) + return PerTime(fit_predict)(self._train_source, self._train_target, source) \ No newline at end of file diff --git a/brainscore_vision/metric_helpers/xarray_utils.py b/brainscore_vision/metric_helpers/xarray_utils.py index ce67654ff..8998b6003 100644 --- a/brainscore_vision/metric_helpers/xarray_utils.py +++ b/brainscore_vision/metric_helpers/xarray_utils.py @@ -1,4 +1,5 @@ import numpy as np +import xarray as xr from brainio.assemblies import NeuroidAssembly, array_is_element, walk_coords from brainscore_vision.metric_helpers import Defaults @@ -90,3 +91,61 @@ def __call__(self, prediction, target): for coord, dims, values in walk_coords(target) if dims == neuroid_dims}, dims=neuroid_dims) return result + + +# ops that also applies to attrs (and attrs of attrs), which are xarrays +def recursive_op(*arrs, op=lambda x:x): + # the attrs structure of each arr must be the same + val = op(*arrs) + attrs = arrs[0].attrs + for attr in attrs: + attr_val = arrs[0].attrs[attr] + if isinstance(attr_val, xr.DataArray): + attr_arrs = [arr.attrs[attr] for arr in arrs] + attr_val = recursive_op(*attr_arrs, op=op) + val.attrs[attr] = attr_val + return val + + +# apply a callable to every slice of the xarray along the specified dimensions +def apply_over_dims(callable, *asms, dims, njobs=-1): + asms = [asm.transpose(*dims, ...) for asm in asms] + sizes = [asms[0].sizes[dim] for dim in dims] + + def apply_helper(sizes, dims, *asms): + xarr = [] + attrs = {} + size = sizes[0] + rsizes = sizes[1:] + dim = dims[0] + rdims = dims[1:] + + if len(sizes) == 1: + # parallel execution on the last applied dimension + from joblib import Parallel, delayed + results = Parallel(n_jobs=njobs)(delayed(callable)(*[asm.isel({dim:s}) for asm in asms]) for s in range(size)) + else: + results = [] + for s in range(size): + arr = apply_helper(rsizes, rdims, *[asm.isel({dim:s}) for asm in asms]) + results.append(arr) + + for arr in results: + if arr is not None: + for k,v in arr.attrs.items(): + assert isinstance(v, xr.DataArray) + attrs.setdefault(k, []).append(v.expand_dims(dim)) + xarr.append(arr) + + if not xarr: + return + else: + xarr = xr.concat(xarr, dim=dim) + attrs = {k: xr.concat(vs, dim=dim) for k,vs in attrs.items()} + xarr.coords[dim] = asms[0].coords[dim] + for k,v in attrs.items(): + attrs[k].coords[dim] = asms[0].coords[dim] + xarr.attrs[k] = attrs[k] + return xarr + + return apply_helper(sizes, dims, *asms) \ No newline at end of file diff --git a/brainscore_vision/metrics/accuracy_distance/metric.py b/brainscore_vision/metrics/accuracy_distance/metric.py index fb31a7280..eb47e3bba 100644 --- a/brainscore_vision/metrics/accuracy_distance/metric.py +++ b/brainscore_vision/metrics/accuracy_distance/metric.py @@ -10,17 +10,52 @@ class AccuracyDistance(Metric): """ - Computes the accuracy distance using the relative distance between the source and target accuracies, adjusted - for the maximum possible difference between the two accuracies. + Computes the accuracy distance using the relative distance between the + source and target accuracies, adjusted for the maximum possible + difference between the two accuracies. By default, the distance is computed + from a single accuracy score on the entire BehavioralAssembly. However, + the distance can also be computed on a condition-wise basis using the + 'variables' argument. The advantage of the condition-wise approach is that + it can separate two models with identical overall accuracy if one exhibits a + more target-like pattern of performance across conditions. """ - def __call__(self, source: BehavioralAssembly, target: BehavioralAssembly) -> Score: + def __call__(self, source: BehavioralAssembly, target: + BehavioralAssembly, variables: tuple=()) -> Score: """Target should be the entire BehavioralAssembly, containing truth values.""" subjects = self.extract_subjects(target) subject_scores = [] for subject in subjects: subject_assembly = target.sel(subject=subject) - subject_score = self.compare_single_subject(source, subject_assembly) + + # compute single score across the entire dataset + if len(variables) == 0: + subject_score = self.compare_single_subject(source, subject_assembly) + + # compute scores for each condition, then average + else: + cond_scores = [] + + # get iterator across all combinations of variables + if len(variables) == 1: + conditions = set(subject_assembly[variables[0]].values) + conditions = [[c] for c in conditions] # to mimic itertools.product + else: + conditions = itertools.product( + *[set(subject_assembly[v].values) for v in variables]) + + # loop over conditions and compute scores + for cond in conditions: + indexers = {v: cond[i] for i, v in enumerate(variables)} + subject_cond_assembly = subject_assembly.sel(**indexers) + source_cond_assembly = source.sel(**indexers) + # to accomodate unbalanced designs, skip combinations of + # variables that don't exist in both assemblies + if len(subject_cond_assembly) and len(source_cond_assembly): + cond_scores.append(self.compare_single_subject( + source_cond_assembly, subject_cond_assembly)) + subject_score = Score(np.mean(cond_scores)) + subject_score = subject_score.expand_dims('subject') subject_score['subject'] = 'subject', [subject] subject_scores.append(subject_score) diff --git a/brainscore_vision/metrics/accuracy_distance/test.py b/brainscore_vision/metrics/accuracy_distance/test.py index 2fc15b792..d6414b790 100644 --- a/brainscore_vision/metrics/accuracy_distance/test.py +++ b/brainscore_vision/metrics/accuracy_distance/test.py @@ -12,6 +12,20 @@ def test_score(): assert score == approx(0.74074074) +def test_score_single_variable(): + assembly = _make_data() + metric = load_metric('accuracy_distance') + score = metric(assembly.sel(subject='C'), assembly, ('condition',)) + assert score == approx(0.55555556) + + +def test_score_multi_variable(): + assembly = _make_data() + metric = load_metric('accuracy_distance') + score = metric(assembly.sel(subject='C'), assembly, ('condition','animacy')) + assert score == approx(0.55555556) + + def test_has_error(): assembly = _make_data() metric = load_metric('accuracy_distance') @@ -38,5 +52,6 @@ def _make_data(): coords={'stimulus_id': ('presentation', np.resize(np.arange(9), 9 * 3)), 'truth': ('presentation', np.resize(['dog', 'cat', 'chair'], 9 * 3)), 'condition': ('presentation', np.resize([1, 1, 1, 2, 2, 2, 3, 3, 3], 9 * 3)), + 'animacy': ('presentation', np.resize(['animate', 'animate', 'inanimate'], 9 * 3)), 'subject': ('presentation', ['A'] * 9 + ['B'] * 9 + ['C'] * 9)}, dims=['presentation']) diff --git a/brainscore_vision/metrics/internal_consistency/__init__.py b/brainscore_vision/metrics/internal_consistency/__init__.py index bd71776be..ae6a41ea6 100644 --- a/brainscore_vision/metrics/internal_consistency/__init__.py +++ b/brainscore_vision/metrics/internal_consistency/__init__.py @@ -1,4 +1,8 @@ from brainscore_vision import metric_registry from .ceiling import InternalConsistency +from brainscore_vision.metric_helpers.temporal import PerTime + + metric_registry['internal_consistency'] = InternalConsistency +metric_registry['internal_consistency_temporal'] = lambda *args, **kwargs: PerTime(InternalConsistency(*args, **kwargs)) \ No newline at end of file diff --git a/brainscore_vision/metrics/internal_consistency/test.py b/brainscore_vision/metrics/internal_consistency/test.py index 6ccd597c3..3c00657fb 100644 --- a/brainscore_vision/metrics/internal_consistency/test.py +++ b/brainscore_vision/metrics/internal_consistency/test.py @@ -19,7 +19,7 @@ def test_dummy_data(self): dims=['presentation', 'neuroid']) ceiler = load_ceiling('internal_consistency') ceiling = ceiler(data) - assert ceiling == 1 + assert ceiling.item() == approx(1, abs=1e-8) class TestSplitHalfConsistency: diff --git a/brainscore_vision/metrics/ost/metric.py b/brainscore_vision/metrics/ost/metric.py index 7093781e7..92f7eb9ed 100644 --- a/brainscore_vision/metrics/ost/metric.py +++ b/brainscore_vision/metrics/ost/metric.py @@ -63,7 +63,7 @@ def compute_osts(self, train_source, test_source, test_osts): break # stop early if threshold is already hit for every image # interpolate - predicted_osts = np.empty(len(test_osts), dtype=np.float) + predicted_osts = np.empty(len(test_osts), dtype=np.float64) predicted_osts[:] = np.nan for i, (last_ost, hit_ost) in enumerate(zip(last_osts, hit_osts)): if hit_ost is None: diff --git a/brainscore_vision/metrics/regression_correlation/__init__.py b/brainscore_vision/metrics/regression_correlation/__init__.py index 2f8019b3f..691e82685 100644 --- a/brainscore_vision/metrics/regression_correlation/__init__.py +++ b/brainscore_vision/metrics/regression_correlation/__init__.py @@ -11,6 +11,15 @@ metric_registry['linear_predictivity'] = lambda *args, **kwargs: CrossRegressedCorrelation( regression=linear_regression(), correlation=pearsonr_correlation(), *args, **kwargs) +# temporal metrics +from .metric import SpanTimeCrossRegressedCorrelation + +metric_registry['spantime_pls'] = lambda *args, **kwargs: SpanTimeCrossRegressedCorrelation( + regression=pls_regression(), correlation=pearsonr_correlation(), *args, **kwargs) +metric_registry['spantime_ridge'] = lambda *args, **kwargs: SpanTimeCrossRegressedCorrelation( + regression=ridge_regression(), correlation=pearsonr_correlation(), *args, **kwargs) + + BIBTEX = """@article{schrimpf2018brain, title={Brain-score: Which artificial neural network for object recognition is most brain-like?}, author={Schrimpf, Martin and Kubilius, Jonas and Hong, Ha and Majaj, Najib J and Rajalingham, Rishi and Issa, Elias B and Kar, Kohitij and Bashivan, Pouya and Prescott-Roy, Jonathan and Geiger, Franziska and others}, diff --git a/brainscore_vision/metrics/regression_correlation/metric.py b/brainscore_vision/metrics/regression_correlation/metric.py index 365f63868..a09ba03e0 100644 --- a/brainscore_vision/metrics/regression_correlation/metric.py +++ b/brainscore_vision/metrics/regression_correlation/metric.py @@ -8,6 +8,7 @@ from brainscore_core.metrics import Metric, Score from brainscore_vision.metric_helpers.transformations import CrossValidation from brainscore_vision.metric_helpers.xarray_utils import XarrayRegression, XarrayCorrelation +from brainscore_vision.metric_helpers.temporal import SpanTimeRegression, PerTime class CrossRegressedCorrelation(Metric): @@ -65,6 +66,15 @@ def predict(self, X): return Ypred +# make the crc to consider time as a sample dimension +def SpanTimeCrossRegressedCorrelation(regression, correlation, *args, **kwargs): + return CrossRegressedCorrelation( + regression=SpanTimeRegression(regression), + correlation=PerTime(correlation), + *args, **kwargs + ) + + def pls_regression(regression_kwargs=None, xarray_kwargs=None): regression_defaults = dict(n_components=25, scale=False) regression_kwargs = {**regression_defaults, **(regression_kwargs or {})} diff --git a/brainscore_vision/model_helpers/activations/__init__.py b/brainscore_vision/model_helpers/activations/__init__.py index 10f514697..40a84e464 100644 --- a/brainscore_vision/model_helpers/activations/__init__.py +++ b/brainscore_vision/model_helpers/activations/__init__.py @@ -1,3 +1 @@ -from brainscore_vision.model_helpers.activations.keras import KerasWrapper, preprocess as preprocess_keras from brainscore_vision.model_helpers.activations.pytorch import PytorchWrapper, preprocess_images as preprocess_pytorch -from brainscore_vision.model_helpers.activations.tensorflow import TensorflowWrapper, TensorflowSlimWrapper diff --git a/brainscore_vision/model_helpers/activations/core.py b/brainscore_vision/model_helpers/activations/core.py index a9f537250..58f8baefc 100644 --- a/brainscore_vision/model_helpers/activations/core.py +++ b/brainscore_vision/model_helpers/activations/core.py @@ -348,7 +348,7 @@ def translate_images(self, images: List[Union[str, np.ndarray]], image_paths: Li """ Translate images according to selected microsaccades, if microsaccades are required. - :param images: A list of images (in the case of tensorflow models), or a list of arrays (non-tf models). + :param images: A list of arrays. :param image_paths: A list of image paths. Both `image_paths` and `images` are needed since while both tf and non-tf models preprocess images before this point, non-tf models' preprocessed images are fixed as arrays when fed into here. As such, simply returning `image_paths` for @@ -519,14 +519,9 @@ def translate(image: np.array, shift: Tuple[float, float], image_shape: Tuple[in return translated_image @staticmethod - def get_image_with_shape(image: Union[str, np.ndarray]) -> Tuple[np.array, Tuple[int, int], bool]: - if isinstance(image, str): # tf models return strings after preprocessing - image = cv2.imread(image) - rows, cols, _ = image.shape # cv2 uses height, width, channels - image_is_channels_first = False - else: - _, rows, cols, = image.shape # pytorch and keras use channels, height, width - image_is_channels_first = True + def get_image_with_shape(image: np.ndarray) -> Tuple[np.array, Tuple[int, int], bool]: + _, rows, cols, = image.shape # pytorch uses channels, height, width + image_is_channels_first = True return image, (rows, cols), image_is_channels_first @staticmethod diff --git a/brainscore_vision/model_helpers/activations/keras.py b/brainscore_vision/model_helpers/activations/keras.py deleted file mode 100644 index 8d1acf4d7..000000000 --- a/brainscore_vision/model_helpers/activations/keras.py +++ /dev/null @@ -1,92 +0,0 @@ -from collections import OrderedDict - -import numpy as np - -from brainscore_vision.model_helpers.activations.core import ActivationsExtractorHelper - - -class KerasWrapper: - def __init__(self, model, preprocessing, identifier=None, *args, **kwargs): - """ - :param model: a keras model with a function `preprocess_input` - that will later be called on the loaded numpy image - """ - self._model = model - identifier = identifier or model.name - self._extractor = ActivationsExtractorHelper( - identifier=identifier, get_activations=self.get_activations, preprocessing=preprocessing, - *args, **kwargs) - self._extractor.insert_attrs(self) - - @property - def identifier(self): - return self._extractor.identifier - - @identifier.setter - def identifier(self, value): - self._extractor.identifier = value - - def __call__(self, *args, **kwargs): # cannot assign __call__ as attribute due to Python convention - return self._extractor(*args, **kwargs) - - def get_activations(self, images, layer_names): - from keras import backend as K - input_tensor = self._model.input - layers = [layer for layer in self._model.layers if layer.name in layer_names] - layers = sorted(layers, key=lambda layer: layer_names.index(layer.name)) - if 'logits' in layer_names: - layers.insert(layer_names.index('logits'), self._model.layers[-1]) - assert len(layers) == len(layer_names) - layer_out_tensors = [layer.output for layer in layers] - functor = K.function([input_tensor] + [K.learning_phase()], layer_out_tensors) # evaluate all tensors at once - layer_outputs = functor([images, 0.]) # 0 to signal testing phase - return OrderedDict([(layer_name, layer_output) for layer_name, layer_output in zip(layer_names, layer_outputs)]) - - def __repr__(self): - return repr(self._model) - - def graph(self): - import networkx as nx - g = nx.DiGraph() - for layer in self._model.layers: - g.add_node(layer.name, object=layer, type=type(layer)) - for outbound_node in layer._outbound_nodes: - g.add_edge(layer.name, outbound_node.outbound_layer.name) - return g - - -def load_images(image_filepaths, image_size): - images = [load_image(image_filepath) for image_filepath in image_filepaths] - images = [scale_image(image, image_size) for image in images] - return np.array(images) - - -def load_image(image_filepath): - try: # keras API before tensorflow 2.9.1 - from keras.preprocessing.image import load_img - from keras.preprocessing.image import img_to_array - except ImportError: - from tensorflow.keras.utils import load_img - from tensorflow.keras.utils import img_to_array - img = load_img(image_filepath) - x = img_to_array(img) - return x - - -def scale_image(img, image_size): - from PIL import Image - try: # keras API before tensorflow 2.9.1 - from keras.preprocessing.image import img_to_array - except ImportError: - from tensorflow.keras.utils import img_to_array - img = Image.fromarray(img.astype(np.uint8)) - img = img.resize((image_size, image_size)) - img = img_to_array(img) - return img - - -def preprocess(image_filepaths, image_size, *args, **kwargs): - # only a wrapper to avoid top-level keras imports - from keras.applications.imagenet_utils import preprocess_input - images = load_images(image_filepaths, image_size=image_size) - return preprocess_input(images, *args, **kwargs) diff --git a/brainscore_vision/model_helpers/activations/temporal/inputs/base.py b/brainscore_vision/model_helpers/activations/temporal/inputs/base.py index d656a86b7..c94ccd3d7 100644 --- a/brainscore_vision/model_helpers/activations/temporal/inputs/base.py +++ b/brainscore_vision/model_helpers/activations/temporal/inputs/base.py @@ -15,4 +15,3 @@ def is_video_path(path: Union[str, Path]) -> bool: def is_image_path(path: Union[str, Path]) -> bool: extension = path.split('.')[-1].lower() return extension in ['jpg', 'jpeg', 'png', 'bmp', 'tiff'] - \ No newline at end of file diff --git a/brainscore_vision/model_helpers/activations/tensorflow.py b/brainscore_vision/model_helpers/activations/tensorflow.py deleted file mode 100644 index d5e4864d5..000000000 --- a/brainscore_vision/model_helpers/activations/tensorflow.py +++ /dev/null @@ -1,71 +0,0 @@ -from collections import OrderedDict - -from brainscore_vision.model_helpers.activations.core import ActivationsExtractorHelper - - -class TensorflowWrapper: - def __init__(self, identifier, inputs, endpoints: dict, session, *args, **kwargs): - import tensorflow as tf - self._inputs = inputs - self._endpoints = endpoints - self._session = session or tf.compat.v1.Session() - self._extractor = ActivationsExtractorHelper(identifier=identifier, get_activations=self.get_activations, - preprocessing=None, *args, **kwargs) - self._extractor.insert_attrs(self) - - @property - def identifier(self): - return self._extractor.identifier - - @identifier.setter - def identifier(self, value): - self._extractor.identifier = value - - def __call__(self, *args, **kwargs): # cannot assign __call__ as attribute due to Python convention - return self._extractor(*args, **kwargs) - - def get_activations(self, images, layer_names): - layer_tensors = OrderedDict((layer, self._endpoints[ - layer if (layer != 'logits' or layer in self._endpoints) else next(reversed(self._endpoints))]) - for layer in layer_names) - layer_outputs = self._session.run(layer_tensors, feed_dict={self._inputs: images}) - return layer_outputs - - def graph(self): - import networkx as nx - g = nx.DiGraph() - for name, layer in self._endpoints.items(): - g.add_node(name, object=layer, type=type(layer)) - g.add_node("logits", object=self.logits, type=type(self.logits)) - return g - - -class TensorflowSlimWrapper(TensorflowWrapper): - def __init__(self, *args, labels_offset=1, **kwargs): - super(TensorflowSlimWrapper, self).__init__(*args, **kwargs) - self._labels_offset = labels_offset - - def get_activations(self, images, layer_names): - layer_outputs = super(TensorflowSlimWrapper, self).get_activations(images, layer_names) - if 'logits' in layer_outputs: - layer_outputs['logits'] = layer_outputs['logits'][:, self._labels_offset:] - return layer_outputs - - -def load_image(image_filepath): - import tensorflow as tf - image = tf.io.read_file(image_filepath) - image = tf.image.decode_png(image, channels=3) - return image - - -def resize_image(image, image_size): - import tensorflow as tf - image = tf.image.resize(image, (image_size, image_size)) - return image - - -def load_resize_image(image_path, image_size): - image = load_image(image_path) - image = resize_image(image, image_size) - return image diff --git a/brainscore_vision/models/bp_resnet50_julios/setup.py b/brainscore_vision/models/bp_resnet50_julios/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/bp_resnet50_julios/setup.py +++ b/brainscore_vision/models/bp_resnet50_julios/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/cornet_s_ynshah/setup.py b/brainscore_vision/models/cornet_s_ynshah/setup.py index 68362b48b..aa18ce8a3 100644 --- a/brainscore_vision/models/cornet_s_ynshah/setup.py +++ b/brainscore_vision/models/cornet_s_ynshah/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/dbp_resnet50_julios/setup.py b/brainscore_vision/models/dbp_resnet50_julios/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/dbp_resnet50_julios/setup.py +++ b/brainscore_vision/models/dbp_resnet50_julios/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eBarlow_Vanilla/setup.py b/brainscore_vision/models/eBarlow_Vanilla/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eBarlow_Vanilla/setup.py +++ b/brainscore_vision/models/eBarlow_Vanilla/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eBarlow_Vanilla_1/setup.py b/brainscore_vision/models/eBarlow_Vanilla_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eBarlow_Vanilla_1/setup.py +++ b/brainscore_vision/models/eBarlow_Vanilla_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eBarlow_Vanilla_2/setup.py b/brainscore_vision/models/eBarlow_Vanilla_2/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eBarlow_Vanilla_2/setup.py +++ b/brainscore_vision/models/eBarlow_Vanilla_2/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eBarlow_augself_linear_1/setup.py b/brainscore_vision/models/eBarlow_augself_linear_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eBarlow_augself_linear_1/setup.py +++ b/brainscore_vision/models/eBarlow_augself_linear_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eBarlow_augself_mlp_1/setup.py b/brainscore_vision/models/eBarlow_augself_mlp_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eBarlow_augself_mlp_1/setup.py +++ b/brainscore_vision/models/eBarlow_augself_mlp_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eBarlow_lmda_0001_1/setup.py b/brainscore_vision/models/eBarlow_lmda_0001_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eBarlow_lmda_0001_1/setup.py +++ b/brainscore_vision/models/eBarlow_lmda_0001_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eBarlow_lmda_001_1/setup.py b/brainscore_vision/models/eBarlow_lmda_001_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eBarlow_lmda_001_1/setup.py +++ b/brainscore_vision/models/eBarlow_lmda_001_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eBarlow_lmda_001_2/setup.py b/brainscore_vision/models/eBarlow_lmda_001_2/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eBarlow_lmda_001_2/setup.py +++ b/brainscore_vision/models/eBarlow_lmda_001_2/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eBarlow_lmda_001_3/setup.py b/brainscore_vision/models/eBarlow_lmda_001_3/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eBarlow_lmda_001_3/setup.py +++ b/brainscore_vision/models/eBarlow_lmda_001_3/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eBarlow_lmda_01/setup.py b/brainscore_vision/models/eBarlow_lmda_01/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eBarlow_lmda_01/setup.py +++ b/brainscore_vision/models/eBarlow_lmda_01/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eBarlow_lmda_01_1/setup.py b/brainscore_vision/models/eBarlow_lmda_01_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eBarlow_lmda_01_1/setup.py +++ b/brainscore_vision/models/eBarlow_lmda_01_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eBarlow_lmda_01_2/setup.py b/brainscore_vision/models/eBarlow_lmda_01_2/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eBarlow_lmda_01_2/setup.py +++ b/brainscore_vision/models/eBarlow_lmda_01_2/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eBarlow_lmda_02_1/setup.py b/brainscore_vision/models/eBarlow_lmda_02_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eBarlow_lmda_02_1/setup.py +++ b/brainscore_vision/models/eBarlow_lmda_02_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eBarlow_lmda_03_1/setup.py b/brainscore_vision/models/eBarlow_lmda_03_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eBarlow_lmda_03_1/setup.py +++ b/brainscore_vision/models/eBarlow_lmda_03_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eBarlow_lmda_04_1/setup.py b/brainscore_vision/models/eBarlow_lmda_04_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eBarlow_lmda_04_1/setup.py +++ b/brainscore_vision/models/eBarlow_lmda_04_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eBarlow_lmda_05_1/setup.py b/brainscore_vision/models/eBarlow_lmda_05_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eBarlow_lmda_05_1/setup.py +++ b/brainscore_vision/models/eBarlow_lmda_05_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eMMCR_Mom_Vanilla_1/setup.py b/brainscore_vision/models/eMMCR_Mom_Vanilla_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eMMCR_Mom_Vanilla_1/setup.py +++ b/brainscore_vision/models/eMMCR_Mom_Vanilla_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eMMCR_Mom_Vanilla_2/setup.py b/brainscore_vision/models/eMMCR_Mom_Vanilla_2/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eMMCR_Mom_Vanilla_2/setup.py +++ b/brainscore_vision/models/eMMCR_Mom_Vanilla_2/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eMMCR_Mom_lmda_0001_1/setup.py b/brainscore_vision/models/eMMCR_Mom_lmda_0001_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eMMCR_Mom_lmda_0001_1/setup.py +++ b/brainscore_vision/models/eMMCR_Mom_lmda_0001_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eMMCR_Mom_lmda_001_1/setup.py b/brainscore_vision/models/eMMCR_Mom_lmda_001_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eMMCR_Mom_lmda_001_1/setup.py +++ b/brainscore_vision/models/eMMCR_Mom_lmda_001_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eMMCR_Mom_lmda_01_1/setup.py b/brainscore_vision/models/eMMCR_Mom_lmda_01_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eMMCR_Mom_lmda_01_1/setup.py +++ b/brainscore_vision/models/eMMCR_Mom_lmda_01_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eMMCR_Mom_lmda_01_2/setup.py b/brainscore_vision/models/eMMCR_Mom_lmda_01_2/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eMMCR_Mom_lmda_01_2/setup.py +++ b/brainscore_vision/models/eMMCR_Mom_lmda_01_2/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eMMCR_Mom_lmda_02_1/setup.py b/brainscore_vision/models/eMMCR_Mom_lmda_02_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eMMCR_Mom_lmda_02_1/setup.py +++ b/brainscore_vision/models/eMMCR_Mom_lmda_02_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eMMCR_Mom_lmda_03_1/setup.py b/brainscore_vision/models/eMMCR_Mom_lmda_03_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eMMCR_Mom_lmda_03_1/setup.py +++ b/brainscore_vision/models/eMMCR_Mom_lmda_03_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eMMCR_Mom_lmda_04_1/setup.py b/brainscore_vision/models/eMMCR_Mom_lmda_04_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eMMCR_Mom_lmda_04_1/setup.py +++ b/brainscore_vision/models/eMMCR_Mom_lmda_04_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eMMCR_Mom_lmda_05_1/setup.py b/brainscore_vision/models/eMMCR_Mom_lmda_05_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eMMCR_Mom_lmda_05_1/setup.py +++ b/brainscore_vision/models/eMMCR_Mom_lmda_05_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eMMCR_Vanilla/setup.py b/brainscore_vision/models/eMMCR_Vanilla/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eMMCR_Vanilla/setup.py +++ b/brainscore_vision/models/eMMCR_Vanilla/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eMMCR_VanillaV2/setup.py b/brainscore_vision/models/eMMCR_VanillaV2/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eMMCR_VanillaV2/setup.py +++ b/brainscore_vision/models/eMMCR_VanillaV2/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eMMCR_Vanilla_1/setup.py b/brainscore_vision/models/eMMCR_Vanilla_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eMMCR_Vanilla_1/setup.py +++ b/brainscore_vision/models/eMMCR_Vanilla_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eMMCR_Vanilla_2/setup.py b/brainscore_vision/models/eMMCR_Vanilla_2/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eMMCR_Vanilla_2/setup.py +++ b/brainscore_vision/models/eMMCR_Vanilla_2/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eMMCR_lmda_01/setup.py b/brainscore_vision/models/eMMCR_lmda_01/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eMMCR_lmda_01/setup.py +++ b/brainscore_vision/models/eMMCR_lmda_01/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eMMCR_lmda_01V2/setup.py b/brainscore_vision/models/eMMCR_lmda_01V2/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eMMCR_lmda_01V2/setup.py +++ b/brainscore_vision/models/eMMCR_lmda_01V2/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eMMCR_lmda_01_1/setup.py b/brainscore_vision/models/eMMCR_lmda_01_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eMMCR_lmda_01_1/setup.py +++ b/brainscore_vision/models/eMMCR_lmda_01_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eMMCR_lmda_01_2/setup.py b/brainscore_vision/models/eMMCR_lmda_01_2/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eMMCR_lmda_01_2/setup.py +++ b/brainscore_vision/models/eMMCR_lmda_01_2/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eMMCR_lmda_01_3/setup.py b/brainscore_vision/models/eMMCR_lmda_01_3/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eMMCR_lmda_01_3/setup.py +++ b/brainscore_vision/models/eMMCR_lmda_01_3/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eSimCLR_Vanilla_1/setup.py b/brainscore_vision/models/eSimCLR_Vanilla_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eSimCLR_Vanilla_1/setup.py +++ b/brainscore_vision/models/eSimCLR_Vanilla_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eSimCLR_Vanilla_2/setup.py b/brainscore_vision/models/eSimCLR_Vanilla_2/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eSimCLR_Vanilla_2/setup.py +++ b/brainscore_vision/models/eSimCLR_Vanilla_2/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eSimCLR_lmda_0001_1/setup.py b/brainscore_vision/models/eSimCLR_lmda_0001_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eSimCLR_lmda_0001_1/setup.py +++ b/brainscore_vision/models/eSimCLR_lmda_0001_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eSimCLR_lmda_001_1/setup.py b/brainscore_vision/models/eSimCLR_lmda_001_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eSimCLR_lmda_001_1/setup.py +++ b/brainscore_vision/models/eSimCLR_lmda_001_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eSimCLR_lmda_01_1/setup.py b/brainscore_vision/models/eSimCLR_lmda_01_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eSimCLR_lmda_01_1/setup.py +++ b/brainscore_vision/models/eSimCLR_lmda_01_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eSimCLR_lmda_01_2/setup.py b/brainscore_vision/models/eSimCLR_lmda_01_2/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eSimCLR_lmda_01_2/setup.py +++ b/brainscore_vision/models/eSimCLR_lmda_01_2/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eSimCLR_lmda_02_1/setup.py b/brainscore_vision/models/eSimCLR_lmda_02_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eSimCLR_lmda_02_1/setup.py +++ b/brainscore_vision/models/eSimCLR_lmda_02_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eSimCLR_lmda_02_1_1/setup.py b/brainscore_vision/models/eSimCLR_lmda_02_1_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eSimCLR_lmda_02_1_1/setup.py +++ b/brainscore_vision/models/eSimCLR_lmda_02_1_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eSimCLR_lmda_03_1/setup.py b/brainscore_vision/models/eSimCLR_lmda_03_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eSimCLR_lmda_03_1/setup.py +++ b/brainscore_vision/models/eSimCLR_lmda_03_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eSimCLR_lmda_04_1/setup.py b/brainscore_vision/models/eSimCLR_lmda_04_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eSimCLR_lmda_04_1/setup.py +++ b/brainscore_vision/models/eSimCLR_lmda_04_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eSimCLR_lmda_04_1_1/setup.py b/brainscore_vision/models/eSimCLR_lmda_04_1_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eSimCLR_lmda_04_1_1/setup.py +++ b/brainscore_vision/models/eSimCLR_lmda_04_1_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eSimCLR_lmda_05_1/setup.py b/brainscore_vision/models/eSimCLR_lmda_05_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eSimCLR_lmda_05_1/setup.py +++ b/brainscore_vision/models/eSimCLR_lmda_05_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/effnetb1_cutmixpatch_SAM_robust32_avge6e8e9e10_manylayers_324x288/model.py b/brainscore_vision/models/effnetb1_cutmixpatch_SAM_robust32_avge6e8e9e10_manylayers_324x288/model.py index 41c6ca79e..d3eaf9c94 100644 --- a/brainscore_vision/models/effnetb1_cutmixpatch_SAM_robust32_avge6e8e9e10_manylayers_324x288/model.py +++ b/brainscore_vision/models/effnetb1_cutmixpatch_SAM_robust32_avge6e8e9e10_manylayers_324x288/model.py @@ -1,7 +1,7 @@ import functools import torch -from brainscore_vision.model_helpers.activations import PytorchWrapper, KerasWrapper +from brainscore_vision.model_helpers.activations import PytorchWrapper from brainscore_vision.model_helpers.activations.pytorch import load_preprocess_images from brainscore_vision.model_helpers.s3 import load_weight_file from PIL import Image diff --git a/brainscore_vision/models/r50_tvpt/setup.py b/brainscore_vision/models/r50_tvpt/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/r50_tvpt/setup.py +++ b/brainscore_vision/models/r50_tvpt/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/resnet50_eMMCR_Vanilla/setup.py b/brainscore_vision/models/resnet50_eMMCR_Vanilla/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/resnet50_eMMCR_Vanilla/setup.py +++ b/brainscore_vision/models/resnet50_eMMCR_Vanilla/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/resnet50_eMMCR_VanillaV2/setup.py b/brainscore_vision/models/resnet50_eMMCR_VanillaV2/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/resnet50_eMMCR_VanillaV2/setup.py +++ b/brainscore_vision/models/resnet50_eMMCR_VanillaV2/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/resnet50_eMMCR_eqp10_lm1/setup.py b/brainscore_vision/models/resnet50_eMMCR_eqp10_lm1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/resnet50_eMMCR_eqp10_lm1/setup.py +++ b/brainscore_vision/models/resnet50_eMMCR_eqp10_lm1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/resnet50_julios/setup.py b/brainscore_vision/models/resnet50_julios/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/resnet50_julios/setup.py +++ b/brainscore_vision/models/resnet50_julios/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/temporal_model_AVID-CMA/__init__.py b/brainscore_vision/models/temporal_model_AVID_CMA/__init__.py similarity index 100% rename from brainscore_vision/models/temporal_model_AVID-CMA/__init__.py rename to brainscore_vision/models/temporal_model_AVID_CMA/__init__.py diff --git a/brainscore_vision/models/temporal_model_AVID-CMA/model.py b/brainscore_vision/models/temporal_model_AVID_CMA/model.py similarity index 94% rename from brainscore_vision/models/temporal_model_AVID-CMA/model.py rename to brainscore_vision/models/temporal_model_AVID_CMA/model.py index 60d91f690..a67eb3b43 100644 --- a/brainscore_vision/models/temporal_model_AVID-CMA/model.py +++ b/brainscore_vision/models/temporal_model_AVID_CMA/model.py @@ -29,7 +29,7 @@ def get_model(identifier): cfg_path = os.path.join(HOME, "configs/main/avid-cma/audioset/InstX-N1024-PosW-N64-Top32.yaml") weight_path = load_weight_file( bucket="brainscore-vision", - relative_path="temporal_model_AVID-CMA/AVID-CMA_Audioset_InstX-N1024-PosW-N64-Top32_checkpoint.pth.tar", + relative_path="temporal_model_AVID_CMA/AVID-CMA_Audioset_InstX-N1024-PosW-N64-Top32_checkpoint.pth.tar", version_id="jSaZgbUohM0ZeoEUUKZiLBo6iz_v8VvQ", sha1="9db5eba9aab6bdbb74025be57ab532df808fe3f6" ) @@ -38,7 +38,7 @@ def get_model(identifier): cfg_path = os.path.join(HOME, "configs/main/avid/kinetics/Cross-N1024.yaml") weight_path = load_weight_file( bucket="brainscore-vision", - relative_path="temporal_model_AVID-CMA/AVID_Kinetics_Cross-N1024_checkpoint.pth.tar", + relative_path="temporal_model_AVID_CMA/AVID_Kinetics_Cross-N1024_checkpoint.pth.tar", version_id="XyKt0UOUFsuuyrl6ZREivK8FadRPx34u", sha1="d3a04f856d29421ba8de37808593a3fad4d4794f" ) @@ -47,7 +47,7 @@ def get_model(identifier): cfg_path = os.path.join(HOME, "configs/main/avid/audioset/Cross-N1024.yaml") weight_path = load_weight_file( bucket="brainscore-vision", - relative_path="temporal_model_AVID-CMA/AVID_Audioset_Cross-N1024_checkpoint.pth.tar", + relative_path="temporal_model_AVID_CMA/AVID_Audioset_Cross-N1024_checkpoint.pth.tar", version_id="0Sxuhn8LsYXQC4FnPfJ7rw7uU6kDlKgc", sha1="b48d8428a1a2526ccca070f810333df18bfce5fd" ) diff --git a/brainscore_vision/models/temporal_model_AVID-CMA/requirements.txt b/brainscore_vision/models/temporal_model_AVID_CMA/requirements.txt similarity index 100% rename from brainscore_vision/models/temporal_model_AVID-CMA/requirements.txt rename to brainscore_vision/models/temporal_model_AVID_CMA/requirements.txt diff --git a/brainscore_vision/models/temporal_model_AVID-CMA/test.py b/brainscore_vision/models/temporal_model_AVID_CMA/test.py similarity index 100% rename from brainscore_vision/models/temporal_model_AVID-CMA/test.py rename to brainscore_vision/models/temporal_model_AVID_CMA/test.py diff --git a/brainscore_vision/models/temporal_model_GDT/model.py b/brainscore_vision/models/temporal_model_GDT/model.py index 624a5b29b..9a0c057c7 100644 --- a/brainscore_vision/models/temporal_model_GDT/model.py +++ b/brainscore_vision/models/temporal_model_GDT/model.py @@ -69,4 +69,4 @@ def get_model(identifier): # "base.fc": "C", # no fc } - return PytorchWrapper(identifier, model, transform_video, fps=30, layer_activation_format=layer_activation_format) \ No newline at end of file + return PytorchWrapper(identifier, model, transform_video, fps=30, layer_activation_format=layer_activation_format) diff --git a/brainscore_vision/models/temporal_model_VideoMAEv2/model.py b/brainscore_vision/models/temporal_model_VideoMAEv2/model.py index 7e785513e..355b8e8b2 100644 --- a/brainscore_vision/models/temporal_model_VideoMAEv2/model.py +++ b/brainscore_vision/models/temporal_model_VideoMAEv2/model.py @@ -54,7 +54,7 @@ def get_model(identifier): bucket="brainscore-vision", relative_path="temporal_model_VideoMAEv2/vit_g_hybrid_pt_1200e.pth", version_id="TxtkfbeMV105dzpzTwi0Kn5glnvQvIrq", - sha1="9048f2bc0b0c7ba4d0e5228f3a7c0bef4dbaca69", + sha1="9048f2bc0b0c7ba4d0e5228f3a7c0bef4dbaca69" ) num_blocks = 40 feature_map_size = 16 diff --git a/brainscore_vision/models/temporal_model_openstl/__init__.py b/brainscore_vision/models/temporal_model_openstl/__init__.py index 2b49cc845..9ea9b66b1 100644 --- a/brainscore_vision/models/temporal_model_openstl/__init__.py +++ b/brainscore_vision/models/temporal_model_openstl/__init__.py @@ -13,7 +13,6 @@ def commit_model(identifier): model_registry["ConvLSTM"] = lambda: commit_model("ConvLSTM") model_registry["PredRNN"] = lambda: commit_model("PredRNN") -# model_registry["PredNet"] = lambda: commit_model("PredNet") model_registry["SimVP"] = lambda: commit_model("SimVP") model_registry["TAU"] = lambda: commit_model("TAU") model_registry["MIM"] = lambda: commit_model("MIM") diff --git a/brainscore_vision/models/temporal_model_openstl/model.py b/brainscore_vision/models/temporal_model_openstl/model.py index aed3e0464..de5c93803 100644 --- a/brainscore_vision/models/temporal_model_openstl/model.py +++ b/brainscore_vision/models/temporal_model_openstl/model.py @@ -105,23 +105,6 @@ def process_output(layer, layer_name, inputs, output): kwargs = {} weight_name = "kitticaltech_predrnn_one_ep100.pth" - elif identifier == "PredNet": - layer_activation_format = { - **{f"layer{i}": "TCHW" for i in range(4)}, - "layer5": "TCHW" - } - - def process_output(layer, layer_name, inputs, output): - if layer_name.startswith("cell_list"): - h, c = output - return c - else: - return output - - wrapper_cls = LSTMWrapper - kwargs = {} - weight_name = "kitticaltech_prednet_one_ep100.pth" - elif identifier == "ConvLSTM": layer_activation_format = { **{f"cell_list.{i}": "TCHW" for i in range(4)}, @@ -220,4 +203,4 @@ def transform_video_simvp(video): return wrapper_cls(identifier, model, transform_video, fps=KITTI_FPS, layer_activation_format=layer_activation_format, - process_output=process_output, **kwargs) \ No newline at end of file + process_output=process_output, **kwargs) diff --git a/brainscore_vision/models/temporal_model_openstl/test.py b/brainscore_vision/models/temporal_model_openstl/test.py index 4d52b76ce..c4090a314 100644 --- a/brainscore_vision/models/temporal_model_openstl/test.py +++ b/brainscore_vision/models/temporal_model_openstl/test.py @@ -6,7 +6,6 @@ model_list = [ "ConvLSTM", "PredRNN", - "PredNet", "SimVP", "TAU", "MIM" @@ -17,4 +16,4 @@ @pytest.mark.parametrize("model_identifier", model_list) def test_load(model_identifier): model = load_model(model_identifier) - assert model is not None \ No newline at end of file + assert model is not None diff --git a/brainscore_vision/models/tv_efficientnet-b1/__init__.py b/brainscore_vision/models/tv_efficientnet_b1/__init__.py similarity index 100% rename from brainscore_vision/models/tv_efficientnet-b1/__init__.py rename to brainscore_vision/models/tv_efficientnet_b1/__init__.py diff --git a/brainscore_vision/models/tv_efficientnet-b1/model.py b/brainscore_vision/models/tv_efficientnet_b1/model.py similarity index 100% rename from brainscore_vision/models/tv_efficientnet-b1/model.py rename to brainscore_vision/models/tv_efficientnet_b1/model.py diff --git a/brainscore_vision/models/tv_efficientnet-b1/setup.py b/brainscore_vision/models/tv_efficientnet_b1/setup.py similarity index 92% rename from brainscore_vision/models/tv_efficientnet-b1/setup.py rename to brainscore_vision/models/tv_efficientnet_b1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/tv_efficientnet-b1/setup.py +++ b/brainscore_vision/models/tv_efficientnet_b1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/tv_efficientnet-b1/test.py b/brainscore_vision/models/tv_efficientnet_b1/test.py similarity index 100% rename from brainscore_vision/models/tv_efficientnet-b1/test.py rename to brainscore_vision/models/tv_efficientnet_b1/test.py diff --git a/brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240222/setup.py b/brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240222/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240222/setup.py +++ b/brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240222/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240312/setup.py b/brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240312/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240312/setup.py +++ b/brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240312/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/environment_lock.yml b/environment_lock.yml new file mode 100644 index 000000000..9847267d3 --- /dev/null +++ b/environment_lock.yml @@ -0,0 +1,182 @@ +# This environment_lock file is associated with the move to brainscore_vision 2.1.0. This lock includes all testing dependencies and dependencies +# from adjacent repositories. + +name: brainscore_env +channels: + - defaults + - conda-forge +dependencies: + - bzip2=1.0.8 + - ca-certificates=2024.7.2 + - libffi=3.4.4 + - ncurses=6.4 + - openssl=3.0.14 + - pip=24.2 + - python=3.11.9 + - readline=8.2 + - setuptools=72.1.0 + - sqlite=3.45.3 + - tk=8.6.14 + - wheel=0.43.0 + - xz=5.4.6 + - zlib=1.2.13 + - pip: + - anyio==4.4.0 + - appnope==0.1.4 + - argon2-cffi==23.1.0 + - argon2-cffi-bindings==21.2.0 + - arrow==1.3.0 + - asttokens==2.4.1 + - async-lru==2.0.4 + - attrs==24.2.0 + - babel==2.16.0 + - beautifulsoup4==4.12.3 + - bleach==6.1.0 + - boto3==1.35.3 + - botocore==1.35.3 + - brainio @ git+https://github.com/brain-score/brainio.git@main + - brainscore_core @ git+https://github.com/brain-score/core@main + - brainscore-vision @ git+https://github.com/brain-score/vision.git@main + - certifi==2024.7.4 + - cffi==1.17.0 + - cftime==1.6.4 + - charset-normalizer==3.3.2 + - click==8.1.7 + - cloudpickle==3.0.0 + - comm==0.2.2 + - contourpy==1.2.1 + - cycler==0.12.1 + - dask==2024.8.1 + - debugpy==1.8.5 + - decorator==5.1.1 + - defusedxml==0.7.1 + - entrypoints==0.4 + - eva-decord==0.6.1 + - executing==2.0.1 + - fastjsonschema==2.20.0 + - filelock==3.15.4 + - fire==0.6.0 + - fonttools==4.53.1 + - fqdn==1.5.1 + - fsspec==2024.6.1 + - gitdb==4.0.11 + - gitpython==3.1.43 + - h11==0.14.0 + - h5py==3.11.0 + - httpcore==1.0.5 + - httpx==0.27.0 + - idna==3.7 + - importlib-metadata==4.13.0 + - iniconfig==2.0.0 + - ipykernel==6.29.5 + - ipython==8.26.0 + - ipywidgets==8.1.5 + - isoduration==20.11.0 + - jedi==0.19.1 + - jinja2==3.1.4 + - jmespath==1.0.1 + - joblib==1.4.2 + - json5==0.9.25 + - jsonpointer==3.0.0 + - jsonschema==4.23.0 + - jsonschema-specifications==2023.12.1 + - jupyter==1.0.0 + - jupyter-client==8.6.2 + - jupyter-console==6.6.3 + - jupyter-core==5.7.2 + - jupyter-events==0.10.0 + - jupyter-lsp==2.2.5 + - jupyter-server==2.14.2 + - jupyter-server-terminals==0.5.3 + - jupyterlab==4.2.4 + - jupyterlab-pygments==0.3.0 + - jupyterlab-server==2.27.3 + - jupyterlab-widgets==3.0.13 + - kiwisolver==1.4.5 + - latexcodec==3.0.0 + - locket==1.0.0 + - markupsafe==2.1.5 + - matplotlib==3.9.2 + - matplotlib-inline==0.1.7 + - mistune==3.0.2 + - mpmath==1.3.0 + - nbclient==0.10.0 + - nbconvert==7.16.4 + - nbformat==5.10.4 + - nest-asyncio==1.6.0 + - netcdf4==1.7.1.post1 + - networkx==3.3 + - notebook==7.2.1 + - notebook-shim==0.2.4 + - numpy==1.26.4 + - opencv-python==4.10.0.84 + - overrides==7.7.0 + - packaging==24.1 + - pandas==2.2.2 + - pandocfilters==1.5.1 + - parso==0.8.4 + - partd==1.4.2 + - peewee==3.17.6 + - pexpect==4.9.0 + - pillow==10.4.0 + - platformdirs==4.2.2 + - pluggy==1.5.0 + - prometheus-client==0.20.0 + - prompt-toolkit==3.0.47 + - psutil==6.0.0 + - psycopg2-binary==2.9.9 + - ptyprocess==0.7.0 + - pure-eval==0.2.3 + - pybtex==0.24.0 + - pycparser==2.22 + - pygments==2.18.0 + - pyparsing==3.1.2 + - pytest==8.3.2 + - pytest-check==2.3.1 + - pytest-mock==3.14.0 + - pytest-timeout==2.3.1 + - python-dateutil==2.9.0.post0 + - python-json-logger==2.0.7 + - pytz==2024.1 + - pyyaml==6.0.2 + - pyzmq==26.2.0 + - qtconsole==5.5.2 + - qtpy==2.4.1 + - referencing==0.35.1 + - requests==2.32.3 + - result_caching @ git+https://github.com/brain-score/result_caching@master + - rfc3339-validator==0.1.4 + - rfc3986-validator==0.1.1 + - rpds-py==0.20.0 + - s3transfer==0.10.2 + - scikit-learn==1.5.1 + - scipy==1.14.1 + - send2trash==1.8.3 + - six==1.16.0 + - smmap==5.0.1 + - sniffio==1.3.1 + - soupsieve==2.6 + - stack-data==0.6.3 + - sympy==1.13.2 + - termcolor==2.4.0 + - terminado==0.18.1 + - threadpoolctl==3.5.0 + - tinycss2==1.3.0 + - toolz==0.12.1 + - torch==2.4.0 + - torchvision==0.19.0 + - tornado==6.4.1 + - tqdm==4.66.5 + - traitlets==5.14.3 + - types-python-dateutil==2.9.0.20240821 + - typing-extensions==4.12.2 + - tzdata==2024.1 + - uri-template==1.3.0 + - urllib3==2.2.2 + - wcwidth==0.2.13 + - webcolors==24.8.0 + - webencodings==0.5.1 + - websocket-client==1.8.0 + - widgetsnbextension==4.0.13 + - xarray==2022.3.0 + - zipp==3.20.0 diff --git a/pyproject.toml b/pyproject.toml index 3b28322e9..15b4de6d7 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,17 +4,17 @@ [project] name = "brainscore_vision" -version = "2.0" +version = "2.1" description = "The Brain-Score library enables model comparisons to behavioral and neural experiments" authors = [] license = { 'file' = 'LICENSE' } readme = "README.md" -requires-python = ">=3.7" +requires-python = ">=3.11" dependencies = [ - "numpy>=1.17", - "brainscore_core @ git+https://github.com/brain-score/core", - "result_caching @ git+https://github.com/brain-score/result_caching", + "numpy<2", + "brainscore_core @ git+https://github.com/brain-score/core@main", + "result_caching @ git+https://github.com/brain-score/result_caching@master", "importlib-metadata<5", # workaround to https://github.com/brain-score/brainio/issues/28 "scikit-learn", # for metric_helpers/transformations.py cross-validation "scipy", # for benchmark_helpers/properties_common.py @@ -28,8 +28,8 @@ dependencies = [ "peewee", "psycopg2-binary", "networkx", - "decord", - "psutil" + "eva-decord", + "psutil", ] [project.optional-dependencies] @@ -40,9 +40,6 @@ test = [ "pytest-timeout", "torch", "torchvision", - "tensorflow==1.15", - "keras==2.3.1", - "protobuf<=3.20", # https://protobuf.dev/news/2022-05-06/#python-updates "matplotlib", # for examples "pytest-mock", ] diff --git a/tests/test_metric_helpers/test_temporal.py b/tests/test_metric_helpers/test_temporal.py new file mode 100644 index 000000000..64dffe8de --- /dev/null +++ b/tests/test_metric_helpers/test_temporal.py @@ -0,0 +1,80 @@ +import numpy as np +import scipy.stats +from pytest import approx +from sklearn.linear_model import LinearRegression + +from brainio.assemblies import NeuroidAssembly +from brainscore_vision.metric_helpers.xarray_utils import XarrayRegression, XarrayCorrelation +from brainscore_vision.metric_helpers.temporal import PerTime, SpanTime, PerTimeRegression, SpanTimeRegression + + +class TestMetricHelpers: + def test_pertime_ops(self): + jumbled_source = NeuroidAssembly(np.random.rand(500, 10, 20), + coords={'stimulus_id': ('presentation', list(reversed(range(500)))), + 'image_meta': ('presentation', [0] * 500), + 'neuroid_id': ('neuroid', list(reversed(range(10)))), + 'neuroid_meta': ('neuroid', [0] * 10), + 'time_bin_start': ('time_bin', np.arange(0, 400, 20)), + 'time_bin_end': ('time_bin', np.arange(20, 420, 20))}, + dims=['presentation', 'neuroid', 'time_bin']) + mean_neuroid = lambda arr: arr.mean('neuroid') + pertime_mean_neuroid = PerTime(mean_neuroid) + output = pertime_mean_neuroid(jumbled_source) + output = output.transpose('presentation', 'time_bin') + target = jumbled_source.transpose('presentation', 'time_bin', 'neuroid').mean('neuroid') + assert (output == approx(target)).all().item() + + def test_spantime_ops(self): + jumbled_source = NeuroidAssembly(np.random.rand(500, 10, 20), + coords={'stimulus_id': ('presentation', list(reversed(range(500)))), + 'image_meta': ('presentation', [0] * 500), + 'neuroid_id': ('neuroid', list(reversed(range(10)))), + 'neuroid_meta': ('neuroid', [0] * 10), + 'time_bin_start': ('time_bin', np.arange(0, 400, 20)), + 'time_bin_end': ('time_bin', np.arange(20, 420, 20))}, + dims=['presentation', 'neuroid', 'time_bin']) + mean_presentation = lambda arr: arr.mean("presentation") + spantime_mean_presentation = SpanTime(mean_presentation) + output = spantime_mean_presentation(jumbled_source) + output = output.transpose('neuroid') + target = jumbled_source.transpose('presentation', 'time_bin', 'neuroid').mean('presentation').mean('time_bin') + assert (output == approx(target)).all().item() + + def test_pertime_regression(self): + jumbled_source = NeuroidAssembly(np.random.rand(500, 10, 20), + coords={'stimulus_id': ('presentation', list(reversed(range(500)))), + 'image_meta': ('presentation', [0] * 500), + 'neuroid_id': ('neuroid', list(reversed(range(10)))), + 'neuroid_meta': ('neuroid', [0] * 10), + 'time_bin_start': ('time_bin', np.arange(0, 400, 20)), + 'time_bin_end': ('time_bin', np.arange(20, 420, 20))}, + dims=['presentation', 'neuroid', 'time_bin']) + target = jumbled_source.sortby(['stimulus_id', 'neuroid_id']) + pertime_regression = PerTimeRegression(XarrayRegression(LinearRegression())) + pertime_regression.fit(jumbled_source, target) + prediction = pertime_regression.predict(jumbled_source) + prediction = prediction.transpose(*target.dims) + # do not test for alignment of metadata - it is only important that the data is well-aligned with the metadata. + np.testing.assert_array_almost_equal(prediction.sortby(['stimulus_id', 'neuroid_id', 'time_bin']).values, + target.sortby(['stimulus_id', 'neuroid_id', 'time_bin']).values) + + + def test_spantime_regression(self): + jumbled_source = NeuroidAssembly(np.random.rand(500, 10, 20), + coords={'stimulus_id': ('presentation', list(reversed(range(500)))), + 'image_meta': ('presentation', [0] * 500), + 'neuroid_id': ('neuroid', list(reversed(range(10)))), + 'neuroid_meta': ('neuroid', [0] * 10), + 'time_bin_start': ('time_bin', np.arange(0, 400, 20)), + 'time_bin_end': ('time_bin', np.arange(20, 420, 20))}, + dims=['presentation', 'neuroid', 'time_bin']) + target = jumbled_source.sortby(['stimulus_id', 'neuroid_id']) + spantime_regression = SpanTimeRegression(XarrayRegression(LinearRegression())) + spantime_regression.fit(jumbled_source, target) + prediction = spantime_regression.predict(jumbled_source) + prediction = prediction.transpose(*target.dims) + # do not test for alignment of metadata - it is only important that the data is well-aligned with the metadata. + np.testing.assert_array_almost_equal(prediction.sortby(['stimulus_id', 'neuroid_id', 'time_bin']).values, + target.sortby(['stimulus_id', 'neuroid_id', 'time_bin']).values) + diff --git a/tests/test_model_helpers/activations/test___init__.py b/tests/test_model_helpers/activations/test___init__.py index 99b36cb98..9bd012348 100644 --- a/tests/test_model_helpers/activations/test___init__.py +++ b/tests/test_model_helpers/activations/test___init__.py @@ -6,7 +6,7 @@ from pathlib import Path from brainio.stimuli import StimulusSet -from brainscore_vision.model_helpers.activations import KerasWrapper, PytorchWrapper, TensorflowSlimWrapper +from brainscore_vision.model_helpers.activations import PytorchWrapper from brainscore_vision.model_helpers.activations.core import flatten from brainscore_vision.model_helpers.activations.pca import LayerPCA @@ -93,74 +93,10 @@ def forward(self, x): return PytorchWrapper(model=MyTransformer(), preprocessing=preprocessing) -def keras_vgg19(): - import keras - from keras.applications.vgg19 import VGG19, preprocess_input - from brainscore_vision.model_helpers.activations.keras import load_images - keras.backend.clear_session() - preprocessing = lambda image_filepaths: preprocess_input(load_images(image_filepaths, image_size=224)) - return KerasWrapper(model=VGG19(), preprocessing=preprocessing) - - -def tfslim_custom(): - from brainscore_vision.model_helpers.activations.tensorflow import load_resize_image - import tensorflow as tf - slim = tf.contrib.slim - tf.compat.v1.reset_default_graph() - - image_size = 224 - placeholder = tf.compat.v1.placeholder(dtype=tf.string, shape=[64]) - preprocess = lambda image_path: load_resize_image(image_path, image_size) - preprocess = tf.map_fn(preprocess, placeholder, dtype=tf.float32) - - with tf.compat.v1.variable_scope('my_model', values=[preprocess]) as sc: - end_points_collection = sc.original_name_scope + '_end_points' - # Collect outputs for conv2d, fully_connected and max_pool2d. - with slim.arg_scope([slim.conv2d, slim.fully_connected, slim.max_pool2d], - outputs_collections=[end_points_collection]): - net = slim.conv2d(preprocess, 64, [11, 11], 4, padding='VALID', scope='conv1') - net = slim.max_pool2d(net, [5, 5], 5, scope='pool1') - net = slim.max_pool2d(net, [3, 3], 2, scope='pool2') - net = slim.flatten(net, scope='flatten') - net = slim.fully_connected(net, 1000, scope='logits') - endpoints = slim.utils.convert_collection_to_dict(end_points_collection) - - session = tf.compat.v1.Session() - session.run(tf.compat.v1.initialize_all_variables()) - return TensorflowSlimWrapper(identifier='tf-custom', labels_offset=0, - endpoints=endpoints, inputs=placeholder, session=session) - - -def tfslim_vgg16(): - import tensorflow as tf - from nets import nets_factory - from preprocessing import vgg_preprocessing - from brainscore_vision.model_helpers.activations.tensorflow import load_resize_image - tf.compat.v1.reset_default_graph() - - image_size = 224 - placeholder = tf.compat.v1.placeholder(dtype=tf.string, shape=[64]) - preprocess_image = lambda image: vgg_preprocessing.preprocess_image( - image, image_size, image_size, resize_side_min=image_size) - preprocess = lambda image_path: preprocess_image(load_resize_image(image_path, image_size)) - preprocess = tf.map_fn(preprocess, placeholder, dtype=tf.float32) - - model_ctr = nets_factory.get_network_fn('vgg_16', num_classes=1001, is_training=False) - logits, endpoints = model_ctr(preprocess) - - session = tf.compat.v1.Session() - session.run(tf.compat.v1.initialize_all_variables()) - return TensorflowSlimWrapper(identifier='tf-vgg16', labels_offset=1, - endpoints=endpoints, inputs=placeholder, session=session) - - models_layers = [ pytest.param(pytorch_custom, ['linear', 'relu2']), pytest.param(pytorch_alexnet, ['features.12', 'classifier.5'], marks=pytest.mark.memory_intense), pytest.param(pytorch_transformer_substitute, ['relu1']), - pytest.param(keras_vgg19, ['block3_pool'], marks=pytest.mark.memory_intense), - pytest.param(tfslim_custom, ['my_model/pool2'], marks=pytest.mark.memory_intense), - pytest.param(tfslim_vgg16, ['vgg_16/pool5'], marks=pytest.mark.memory_intense), ] # exact microsaccades for pytorch_alexnet, grayscale.png, for 1 and 10 number_of_trials @@ -366,8 +302,6 @@ def test_exact_microsaccades(number_of_trials): @pytest.mark.memory_intense @pytest.mark.parametrize(["model_ctr", "internal_layers"], [ (pytorch_alexnet, ['features.12', 'classifier.5']), - (keras_vgg19, ['block3_pool']), - (tfslim_vgg16, ['vgg_16/pool5']), ]) def test_mixed_layer_logits(model_ctr, internal_layers): stimuli_paths = [os.path.join(os.path.dirname(__file__), 'rgb.jpg')] @@ -384,7 +318,6 @@ def test_mixed_layer_logits(model_ctr, internal_layers): @pytest.mark.parametrize(["model_ctr", "expected_identifier"], [ (pytorch_custom, 'MyModel'), (pytorch_alexnet, 'AlexNet'), - (keras_vgg19, 'vgg19'), ]) def test_infer_identifier(model_ctr, expected_identifier): model = model_ctr() From 07c835cf87fd1e312fb07407b66611c157962064 Mon Sep 17 00:00:00 2001 From: Katherine Fairchild Date: Wed, 11 Sep 2024 11:32:59 -0400 Subject: [PATCH 20/28] add alexnet_7be5be79 to models (#1223) Co-authored-by: AutoJenkins --- brainscore_vision/models/alexnet_7be5be79/setup.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/brainscore_vision/models/alexnet_7be5be79/setup.py b/brainscore_vision/models/alexnet_7be5be79/setup.py index 421914cfb..41c4ef930 100644 --- a/brainscore_vision/models/alexnet_7be5be79/setup.py +++ b/brainscore_vision/models/alexnet_7be5be79/setup.py @@ -3,9 +3,10 @@ from setuptools import setup, find_packages -requirements = [ "torchvision", - "torch" -] +requirements = ["torchvision", + "torch", + "fire" + ] setup( packages=find_packages(exclude=['tests']), From 9585b4a187bc5a8af5259c9c1cec1c27aca928e1 Mon Sep 17 00:00:00 2001 From: Sam Winebrake <85908068+samwinebrake@users.noreply.github.com> Date: Thu, 12 Sep 2024 14:27:32 -0400 Subject: [PATCH 21/28] point to pypi instead of git (#1236) --- pyproject.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 15b4de6d7..d565b19af 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -13,8 +13,8 @@ requires-python = ">=3.11" dependencies = [ "numpy<2", - "brainscore_core @ git+https://github.com/brain-score/core@main", - "result_caching @ git+https://github.com/brain-score/result_caching@master", + "brainscore_core", + "result_caching", "importlib-metadata<5", # workaround to https://github.com/brain-score/brainio/issues/28 "scikit-learn", # for metric_helpers/transformations.py cross-validation "scipy", # for benchmark_helpers/properties_common.py From a765a8f82598b5f0786ba967dd0372c9bcec24e2 Mon Sep 17 00:00:00 2001 From: Sam Winebrake <85908068+samwinebrake@users.noreply.github.com> Date: Thu, 12 Sep 2024 14:38:13 -0400 Subject: [PATCH 22/28] update dependency names w/ hyphens (#1238) * point to pypi instead of git * Update pyproject.toml with hyphens --- pyproject.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index d565b19af..83ab968f7 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -13,8 +13,8 @@ requires-python = ">=3.11" dependencies = [ "numpy<2", - "brainscore_core", - "result_caching", + "brainscore-core", + "result-caching", "importlib-metadata<5", # workaround to https://github.com/brain-score/brainio/issues/28 "scikit-learn", # for metric_helpers/transformations.py cross-validation "scipy", # for benchmark_helpers/properties_common.py From 6014e0f8ea2a283fc7ee346942c89f453b679b1d Mon Sep 17 00:00:00 2001 From: Kartik Pradeepan Date: Fri, 13 Sep 2024 05:24:31 -0400 Subject: [PATCH 23/28] Sync master into develop (#1239) * point to pypi instead of git (#1236) * update dependency names w/ hyphens (#1238) * point to pypi instead of git * Update pyproject.toml with hyphens --------- Co-authored-by: Sam Winebrake <85908068+samwinebrake@users.noreply.github.com> --- pyproject.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 15b4de6d7..83ab968f7 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -13,8 +13,8 @@ requires-python = ">=3.11" dependencies = [ "numpy<2", - "brainscore_core @ git+https://github.com/brain-score/core@main", - "result_caching @ git+https://github.com/brain-score/result_caching@master", + "brainscore-core", + "result-caching", "importlib-metadata<5", # workaround to https://github.com/brain-score/brainio/issues/28 "scikit-learn", # for metric_helpers/transformations.py cross-validation "scipy", # for benchmark_helpers/properties_common.py From 0a5161f5a192c2f6cdc2220a21d05dfcf18d8cbd Mon Sep 17 00:00:00 2001 From: Abdulkadir Gokce Date: Tue, 17 Sep 2024 09:07:00 +0200 Subject: [PATCH 24/28] Custom trained scaling models (#1206) * Custom trained models * Fix missing library * Fix a missing library * Not every model needs timm * Trigger build tests * Use model_id instead of model_name to ensure correct weights are loaded * timm is missing from deit models * timm is missing from deit models * Fix requirements.txt newline * Package models into a single plugin * Update requirements.txt * Update requirements.txt * Update requirements.txt * Change the order of models * Update model.py * Disable resultcaching for vits * Tests reordering * Revert tests back * Update model.py * Update model.py --- .../models/scaling_models/__init__.py | 265 ++++++ .../models/scaling_models/model.py | 144 +++ .../models/scaling_models/model_configs.json | 869 ++++++++++++++++++ .../models/scaling_models/requirements.txt | 4 + .../models/scaling_models/test.py | 0 5 files changed, 1282 insertions(+) create mode 100644 brainscore_vision/models/scaling_models/__init__.py create mode 100644 brainscore_vision/models/scaling_models/model.py create mode 100644 brainscore_vision/models/scaling_models/model_configs.json create mode 100644 brainscore_vision/models/scaling_models/requirements.txt create mode 100644 brainscore_vision/models/scaling_models/test.py diff --git a/brainscore_vision/models/scaling_models/__init__.py b/brainscore_vision/models/scaling_models/__init__.py new file mode 100644 index 000000000..e020a07e5 --- /dev/null +++ b/brainscore_vision/models/scaling_models/__init__.py @@ -0,0 +1,265 @@ +from brainscore_vision import model_registry +from brainscore_vision.model_helpers.brain_transformation import ModelCommitment +from .model import get_model, MODEL_CONFIGS + +model_registry["resnet18_imagenet_full"] = lambda: ModelCommitment( + identifier="resnet18_imagenet_full", + activations_model=get_model("resnet18_imagenet_full"), + layers=MODEL_CONFIGS["resnet18_imagenet_full"]["model_commitment"]["layers"], + behavioral_readout_layer=MODEL_CONFIGS["resnet18_imagenet_full"]["model_commitment"]["behavioral_readout_layer"], + region_layer_map=MODEL_CONFIGS["resnet18_imagenet_full"]["model_commitment"]["region_layer_map"] +) + + +model_registry["resnet34_imagenet_full"] = lambda: ModelCommitment( + identifier="resnet34_imagenet_full", + activations_model=get_model("resnet34_imagenet_full"), + layers=MODEL_CONFIGS["resnet34_imagenet_full"]["model_commitment"]["layers"], + behavioral_readout_layer=MODEL_CONFIGS["resnet34_imagenet_full"]["model_commitment"]["behavioral_readout_layer"], + region_layer_map=MODEL_CONFIGS["resnet34_imagenet_full"]["model_commitment"]["region_layer_map"] +) + + +model_registry["resnet50_imagenet_full"] = lambda: ModelCommitment( + identifier="resnet50_imagenet_full", + activations_model=get_model("resnet50_imagenet_full"), + layers=MODEL_CONFIGS["resnet50_imagenet_full"]["model_commitment"]["layers"], + behavioral_readout_layer=MODEL_CONFIGS["resnet50_imagenet_full"]["model_commitment"]["behavioral_readout_layer"], + region_layer_map=MODEL_CONFIGS["resnet50_imagenet_full"]["model_commitment"]["region_layer_map"] +) + + +model_registry["resnet101_imagenet_full"] = lambda: ModelCommitment( + identifier="resnet101_imagenet_full", + activations_model=get_model("resnet101_imagenet_full"), + layers=MODEL_CONFIGS["resnet101_imagenet_full"]["model_commitment"]["layers"], + behavioral_readout_layer=MODEL_CONFIGS["resnet101_imagenet_full"]["model_commitment"]["behavioral_readout_layer"], + region_layer_map=MODEL_CONFIGS["resnet101_imagenet_full"]["model_commitment"]["region_layer_map"] +) + + +model_registry["resnet152_imagenet_full"] = lambda: ModelCommitment( + identifier="resnet152_imagenet_full", + activations_model=get_model("resnet152_imagenet_full"), + layers=MODEL_CONFIGS["resnet152_imagenet_full"]["model_commitment"]["layers"], + behavioral_readout_layer=MODEL_CONFIGS["resnet152_imagenet_full"]["model_commitment"]["behavioral_readout_layer"], + region_layer_map=MODEL_CONFIGS["resnet152_imagenet_full"]["model_commitment"]["region_layer_map"] +) + + +model_registry["resnet18_ecoset_full"] = lambda: ModelCommitment( + identifier="resnet18_ecoset_full", + activations_model=get_model("resnet18_ecoset_full"), + layers=MODEL_CONFIGS["resnet18_ecoset_full"]["model_commitment"]["layers"], + behavioral_readout_layer=MODEL_CONFIGS["resnet18_ecoset_full"]["model_commitment"]["behavioral_readout_layer"], + region_layer_map=MODEL_CONFIGS["resnet18_ecoset_full"]["model_commitment"]["region_layer_map"] +) + + +model_registry["resnet34_ecoset_full"] = lambda: ModelCommitment( + identifier="resnet34_ecoset_full", + activations_model=get_model("resnet34_ecoset_full"), + layers=MODEL_CONFIGS["resnet34_ecoset_full"]["model_commitment"]["layers"], + behavioral_readout_layer=MODEL_CONFIGS["resnet34_ecoset_full"]["model_commitment"]["behavioral_readout_layer"], + region_layer_map=MODEL_CONFIGS["resnet34_ecoset_full"]["model_commitment"]["region_layer_map"] +) + + +model_registry["resnet50_ecoset_full"] = lambda: ModelCommitment( + identifier="resnet50_ecoset_full", + activations_model=get_model("resnet50_ecoset_full"), + layers=MODEL_CONFIGS["resnet50_ecoset_full"]["model_commitment"]["layers"], + behavioral_readout_layer=MODEL_CONFIGS["resnet50_ecoset_full"]["model_commitment"]["behavioral_readout_layer"], + region_layer_map=MODEL_CONFIGS["resnet50_ecoset_full"]["model_commitment"]["region_layer_map"] +) + + +model_registry["resnet101_ecoset_full"] = lambda: ModelCommitment( + identifier="resnet101_ecoset_full", + activations_model=get_model("resnet101_ecoset_full"), + layers=MODEL_CONFIGS["resnet101_ecoset_full"]["model_commitment"]["layers"], + behavioral_readout_layer=MODEL_CONFIGS["resnet101_ecoset_full"]["model_commitment"]["behavioral_readout_layer"], + region_layer_map=MODEL_CONFIGS["resnet101_ecoset_full"]["model_commitment"]["region_layer_map"] +) + + +model_registry["resnet152_ecoset_full"] = lambda: ModelCommitment( + identifier="resnet152_ecoset_full", + activations_model=get_model("resnet152_ecoset_full"), + layers=MODEL_CONFIGS["resnet152_ecoset_full"]["model_commitment"]["layers"], + behavioral_readout_layer=MODEL_CONFIGS["resnet152_ecoset_full"]["model_commitment"]["behavioral_readout_layer"], + region_layer_map=MODEL_CONFIGS["resnet152_ecoset_full"]["model_commitment"]["region_layer_map"] +) + + +model_registry["resnet50_imagenet_1_seed-0"] = lambda: ModelCommitment( + identifier="resnet50_imagenet_1_seed-0", + activations_model=get_model("resnet50_imagenet_1_seed-0"), + layers=MODEL_CONFIGS["resnet50_imagenet_1_seed-0"]["model_commitment"]["layers"], + behavioral_readout_layer=MODEL_CONFIGS["resnet50_imagenet_1_seed-0"]["model_commitment"]["behavioral_readout_layer"], + region_layer_map=MODEL_CONFIGS["resnet50_imagenet_1_seed-0"]["model_commitment"]["region_layer_map"] +) + + +model_registry["resnet50_imagenet_10_seed-0"] = lambda: ModelCommitment( + identifier="resnet50_imagenet_10_seed-0", + activations_model=get_model("resnet50_imagenet_10_seed-0"), + layers=MODEL_CONFIGS["resnet50_imagenet_10_seed-0"]["model_commitment"]["layers"], + behavioral_readout_layer=MODEL_CONFIGS["resnet50_imagenet_10_seed-0"]["model_commitment"]["behavioral_readout_layer"], + region_layer_map=MODEL_CONFIGS["resnet50_imagenet_10_seed-0"]["model_commitment"]["region_layer_map"] +) + + +model_registry["resnet50_imagenet_100_seed-0"] = lambda: ModelCommitment( + identifier="resnet50_imagenet_100_seed-0", + activations_model=get_model("resnet50_imagenet_100_seed-0"), + layers=MODEL_CONFIGS["resnet50_imagenet_100_seed-0"]["model_commitment"]["layers"], + behavioral_readout_layer=MODEL_CONFIGS["resnet50_imagenet_100_seed-0"]["model_commitment"]["behavioral_readout_layer"], + region_layer_map=MODEL_CONFIGS["resnet50_imagenet_100_seed-0"]["model_commitment"]["region_layer_map"] +) + + +model_registry["efficientnet_b0_imagenet_full"] = lambda: ModelCommitment( + identifier="efficientnet_b0_imagenet_full", + activations_model=get_model("efficientnet_b0_imagenet_full"), + layers=MODEL_CONFIGS["efficientnet_b0_imagenet_full"]["model_commitment"]["layers"], + behavioral_readout_layer=MODEL_CONFIGS["efficientnet_b0_imagenet_full"]["model_commitment"]["behavioral_readout_layer"], + region_layer_map=MODEL_CONFIGS["efficientnet_b0_imagenet_full"]["model_commitment"]["region_layer_map"] +) + + +model_registry["efficientnet_b1_imagenet_full"] = lambda: ModelCommitment( + identifier="efficientnet_b1_imagenet_full", + activations_model=get_model("efficientnet_b1_imagenet_full"), + layers=MODEL_CONFIGS["efficientnet_b1_imagenet_full"]["model_commitment"]["layers"], + behavioral_readout_layer=MODEL_CONFIGS["efficientnet_b1_imagenet_full"]["model_commitment"]["behavioral_readout_layer"], + region_layer_map=MODEL_CONFIGS["efficientnet_b1_imagenet_full"]["model_commitment"]["region_layer_map"] +) + + +model_registry["efficientnet_b2_imagenet_full"] = lambda: ModelCommitment( + identifier="efficientnet_b2_imagenet_full", + activations_model=get_model("efficientnet_b2_imagenet_full"), + layers=MODEL_CONFIGS["efficientnet_b2_imagenet_full"]["model_commitment"]["layers"], + behavioral_readout_layer=MODEL_CONFIGS["efficientnet_b2_imagenet_full"]["model_commitment"]["behavioral_readout_layer"], + region_layer_map=MODEL_CONFIGS["efficientnet_b2_imagenet_full"]["model_commitment"]["region_layer_map"] +) + + +model_registry["deit_small_imagenet_full_seed-0"] = lambda: ModelCommitment( + identifier="deit_small_imagenet_full_seed-0", + activations_model=get_model("deit_small_imagenet_full_seed-0"), + layers=MODEL_CONFIGS["deit_small_imagenet_full_seed-0"]["model_commitment"]["layers"], + behavioral_readout_layer=MODEL_CONFIGS["deit_small_imagenet_full_seed-0"]["model_commitment"]["behavioral_readout_layer"], + region_layer_map=MODEL_CONFIGS["deit_small_imagenet_full_seed-0"]["model_commitment"]["region_layer_map"] +) + + +model_registry["deit_base_imagenet_full_seed-0"] = lambda: ModelCommitment( + identifier="deit_base_imagenet_full_seed-0", + activations_model=get_model("deit_base_imagenet_full_seed-0"), + layers=MODEL_CONFIGS["deit_base_imagenet_full_seed-0"]["model_commitment"]["layers"], + behavioral_readout_layer=MODEL_CONFIGS["deit_base_imagenet_full_seed-0"]["model_commitment"]["behavioral_readout_layer"], + region_layer_map=MODEL_CONFIGS["deit_base_imagenet_full_seed-0"]["model_commitment"]["region_layer_map"] +) + + +model_registry["deit_large_imagenet_full_seed-0"] = lambda: ModelCommitment( + identifier="deit_large_imagenet_full_seed-0", + activations_model=get_model("deit_large_imagenet_full_seed-0"), + layers=MODEL_CONFIGS["deit_large_imagenet_full_seed-0"]["model_commitment"]["layers"], + behavioral_readout_layer=MODEL_CONFIGS["deit_large_imagenet_full_seed-0"]["model_commitment"]["behavioral_readout_layer"], + region_layer_map=MODEL_CONFIGS["deit_large_imagenet_full_seed-0"]["model_commitment"]["region_layer_map"] +) + + +model_registry["deit_small_imagenet_1_seed-0"] = lambda: ModelCommitment( + identifier="deit_small_imagenet_1_seed-0", + activations_model=get_model("deit_small_imagenet_1_seed-0"), + layers=MODEL_CONFIGS["deit_small_imagenet_1_seed-0"]["model_commitment"]["layers"], + behavioral_readout_layer=MODEL_CONFIGS["deit_small_imagenet_1_seed-0"]["model_commitment"]["behavioral_readout_layer"], + region_layer_map=MODEL_CONFIGS["deit_small_imagenet_1_seed-0"]["model_commitment"]["region_layer_map"] +) + + +model_registry["deit_small_imagenet_10_seed-0"] = lambda: ModelCommitment( + identifier="deit_small_imagenet_10_seed-0", + activations_model=get_model("deit_small_imagenet_10_seed-0"), + layers=MODEL_CONFIGS["deit_small_imagenet_10_seed-0"]["model_commitment"]["layers"], + behavioral_readout_layer=MODEL_CONFIGS["deit_small_imagenet_10_seed-0"]["model_commitment"]["behavioral_readout_layer"], + region_layer_map=MODEL_CONFIGS["deit_small_imagenet_10_seed-0"]["model_commitment"]["region_layer_map"] +) + + +model_registry["deit_small_imagenet_100_seed-0"] = lambda: ModelCommitment( + identifier="deit_small_imagenet_100_seed-0", + activations_model=get_model("deit_small_imagenet_100_seed-0"), + layers=MODEL_CONFIGS["deit_small_imagenet_100_seed-0"]["model_commitment"]["layers"], + behavioral_readout_layer=MODEL_CONFIGS["deit_small_imagenet_100_seed-0"]["model_commitment"]["behavioral_readout_layer"], + region_layer_map=MODEL_CONFIGS["deit_small_imagenet_100_seed-0"]["model_commitment"]["region_layer_map"] +) + + +model_registry["convnext_tiny_imagenet_full_seed-0"] = lambda: ModelCommitment( + identifier="convnext_tiny_imagenet_full_seed-0", + activations_model=get_model("convnext_tiny_imagenet_full_seed-0"), + layers=MODEL_CONFIGS["convnext_tiny_imagenet_full_seed-0"]["model_commitment"]["layers"], + behavioral_readout_layer=MODEL_CONFIGS["convnext_tiny_imagenet_full_seed-0"]["model_commitment"]["behavioral_readout_layer"], + region_layer_map=MODEL_CONFIGS["convnext_tiny_imagenet_full_seed-0"]["model_commitment"]["region_layer_map"] +) + + +model_registry["convnext_small_imagenet_full_seed-0"] = lambda: ModelCommitment( + identifier="convnext_small_imagenet_full_seed-0", + activations_model=get_model("convnext_small_imagenet_full_seed-0"), + layers=MODEL_CONFIGS["convnext_small_imagenet_full_seed-0"]["model_commitment"]["layers"], + behavioral_readout_layer=MODEL_CONFIGS["convnext_small_imagenet_full_seed-0"]["model_commitment"]["behavioral_readout_layer"], + region_layer_map=MODEL_CONFIGS["convnext_small_imagenet_full_seed-0"]["model_commitment"]["region_layer_map"] +) + + +model_registry["convnext_base_imagenet_full_seed-0"] = lambda: ModelCommitment( + identifier="convnext_base_imagenet_full_seed-0", + activations_model=get_model("convnext_base_imagenet_full_seed-0"), + layers=MODEL_CONFIGS["convnext_base_imagenet_full_seed-0"]["model_commitment"]["layers"], + behavioral_readout_layer=MODEL_CONFIGS["convnext_base_imagenet_full_seed-0"]["model_commitment"]["behavioral_readout_layer"], + region_layer_map=MODEL_CONFIGS["convnext_base_imagenet_full_seed-0"]["model_commitment"]["region_layer_map"] +) + + +model_registry["convnext_large_imagenet_full_seed-0"] = lambda: ModelCommitment( + identifier="convnext_large_imagenet_full_seed-0", + activations_model=get_model("convnext_large_imagenet_full_seed-0"), + layers=MODEL_CONFIGS["convnext_large_imagenet_full_seed-0"]["model_commitment"]["layers"], + behavioral_readout_layer=MODEL_CONFIGS["convnext_large_imagenet_full_seed-0"]["model_commitment"]["behavioral_readout_layer"], + region_layer_map=MODEL_CONFIGS["convnext_large_imagenet_full_seed-0"]["model_commitment"]["region_layer_map"] +) + + +model_registry["convnext_small_imagenet_1_seed-0"] = lambda: ModelCommitment( + identifier="convnext_small_imagenet_1_seed-0", + activations_model=get_model("convnext_small_imagenet_1_seed-0"), + layers=MODEL_CONFIGS["convnext_small_imagenet_1_seed-0"]["model_commitment"]["layers"], + behavioral_readout_layer=MODEL_CONFIGS["convnext_small_imagenet_1_seed-0"]["model_commitment"]["behavioral_readout_layer"], + region_layer_map=MODEL_CONFIGS["convnext_small_imagenet_1_seed-0"]["model_commitment"]["region_layer_map"] +) + + +model_registry["convnext_small_imagenet_10_seed-0"] = lambda: ModelCommitment( + identifier="convnext_small_imagenet_10_seed-0", + activations_model=get_model("convnext_small_imagenet_10_seed-0"), + layers=MODEL_CONFIGS["convnext_small_imagenet_10_seed-0"]["model_commitment"]["layers"], + behavioral_readout_layer=MODEL_CONFIGS["convnext_small_imagenet_10_seed-0"]["model_commitment"]["behavioral_readout_layer"], + region_layer_map=MODEL_CONFIGS["convnext_small_imagenet_10_seed-0"]["model_commitment"]["region_layer_map"] +) + + +model_registry["convnext_small_imagenet_100_seed-0"] = lambda: ModelCommitment( + identifier="convnext_small_imagenet_100_seed-0", + activations_model=get_model("convnext_small_imagenet_100_seed-0"), + layers=MODEL_CONFIGS["convnext_small_imagenet_100_seed-0"]["model_commitment"]["layers"], + behavioral_readout_layer=MODEL_CONFIGS["convnext_small_imagenet_100_seed-0"]["model_commitment"]["behavioral_readout_layer"], + region_layer_map=MODEL_CONFIGS["convnext_small_imagenet_100_seed-0"]["model_commitment"]["region_layer_map"] +) + + diff --git a/brainscore_vision/models/scaling_models/model.py b/brainscore_vision/models/scaling_models/model.py new file mode 100644 index 000000000..ab63520a4 --- /dev/null +++ b/brainscore_vision/models/scaling_models/model.py @@ -0,0 +1,144 @@ +import os +import functools +import json +from pathlib import Path + +import torchvision.models +import torch + +from brainscore_vision.model_helpers.activations.pytorch import PytorchWrapper +from brainscore_vision.model_helpers.activations.pytorch import load_preprocess_images + +import timm +import numpy as np +import torchvision.transforms as T +from PIL import Image + +import albumentations as A +from albumentations.pytorch import ToTensorV2 + +BIBTEX = """""" + + +with open(Path(__file__).parent / "model_configs.json", "r") as f: + MODEL_CONFIGS = json.load(f) + + +def load_image(image_filepath): + return Image.open(image_filepath).convert("RGB") + + +def get_interpolation_mode(interpolation: str) -> int: + """Returns the interpolation mode for albumentations""" + if "linear" or "bilinear" in interpolation: + return 1 + elif "cubic" or "bicubic" in interpolation: + return 2 + else: + raise NotImplementedError(f"Interpolation mode {interpolation} not implemented") + + +def custom_image_preprocess( + images, + resize_size: int, + crop_size: int, + interpolation: str, + transforms=None, +): + if transforms is None: + interpolation = get_interpolation_mode(interpolation) + transforms = A.Compose( + [ + A.Resize(resize_size, resize_size, p=1.0, interpolation=interpolation), + A.CenterCrop(crop_size, crop_size, p=1.0), + A.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), + ToTensorV2(), + ] + ) + if isinstance(transforms, T.Compose): + images = [transforms(image) for image in images] + images = [np.array(image) for image in images] + images = np.stack(images) + elif isinstance(transforms, A.Compose): + images = [transforms(image=np.array(image))["image"] for image in images] + images = np.stack(images) + else: + raise NotImplementedError( + f"Transform of type {type(transforms)} is not implemented" + ) + + return images + + +def load_preprocess_images_custom( + image_filepaths, preprocess_images=custom_image_preprocess, **kwargs +): + images = [load_image(image_filepath) for image_filepath in image_filepaths] + images = preprocess_images(images, **kwargs) + return images + + +def get_model(model_id:str): + + # Unpack model config + config = MODEL_CONFIGS[model_id] + model_name = config["model_name"] + model_id = config["model_id"] + resize_size = config["resize_size"] + crop_size = config["crop_size"] + interpolation = config["interpolation"] + num_classes = config["num_classes"] + ckpt_url = config["checkpoint_url"] + use_timm = config["use_timm"] + timm_model_name = config["timm_model_name"] + epoch = config["epoch"] + load_model_ema = config["load_model_ema"] + output_head = config["output_head"] + is_vit = config["is_vit"] + + # Temporary fix for vit models + # See https://github.com/brain-score/vision/pull/1232 + if is_vit: + os.environ['RESULTCACHING_DISABLE'] = 'brainscore_vision.model_helpers.activations.core.ActivationsExtractorHelper._from_paths_stored' + + + # Initialize model + if use_timm: + model = timm.create_model(timm_model_name, pretrained=False, num_classes=num_classes) + else: + model = eval(f"torchvision.models.{model_name}(weights=None)") + if num_classes != 1000: + exec(f'''{output_head} = torch.nn.Linear( + in_features={output_head}.in_features, + out_features=num_classes, + bias={output_head}.bias is not None, + )''' + ) + + # Load model weights + state_dict = torch.hub.load_state_dict_from_url( + ckpt_url, + check_hash=True, + file_name=f"{model_id}_ep{epoch}.pt", + map_location="cpu", + ) + if load_model_ema: + state_dict = state_dict["state"]["model_ema_state_dict"] + else: + state_dict = state_dict["state"]["model"] + state_dict = {k.replace("module.", ""): v for k, v in state_dict.items()} + model.load_state_dict(state_dict, strict=True) + print(f"Model loaded from {ckpt_url}") + + # Wrap model + preprocessing = functools.partial( + load_preprocess_images_custom, + resize_size=resize_size, + crop_size=crop_size, + interpolation=interpolation, + transforms=None + ) + wrapper = PytorchWrapper( + identifier=model_id, model=model, preprocessing=preprocessing + ) + return wrapper diff --git a/brainscore_vision/models/scaling_models/model_configs.json b/brainscore_vision/models/scaling_models/model_configs.json new file mode 100644 index 000000000..cc52e5370 --- /dev/null +++ b/brainscore_vision/models/scaling_models/model_configs.json @@ -0,0 +1,869 @@ +{ + "resnet18_imagenet_full": { + "model_name": "resnet18", + "model_id": "resnet18_imagenet_full", + "num_classes": 1000, + "resize_size": 256, + "crop_size": 224, + "interpolation": "bilinear", + "load_model_ema": false, + "use_timm": false, + "timm_model_name": null, + "is_vit": false, + "epoch": 100, + "output_head": "model.fc", + "model_commitment": { + "layers": [ + "layer1.0.bn1", + "layer2.0.bn2", + "layer3.0.conv2", + "layer4.0.bn1" + ], + "behavioral_readout_layer": "avgpool", + "region_layer_map": { + "V1": "layer1.0.bn1", + "V2": "layer3.0.conv2", + "V4": "layer2.0.bn2", + "IT": "layer4.0.bn1" + } + }, + "checkpoint_url": "https://epfl-neuroailab-scalinglaws.s3.eu-north-1.amazonaws.com/checkpoints/resnet18_imagenet_full/ep100.pt" + }, + "resnet34_imagenet_full": { + "model_name": "resnet34", + "model_id": "resnet34_imagenet_full", + "num_classes": 1000, + "resize_size": 256, + "crop_size": 224, + "interpolation": "bilinear", + "load_model_ema": false, + "use_timm": false, + "timm_model_name": null, + "is_vit": false, + "epoch": 100, + "output_head": "model.fc", + "model_commitment": { + "layers": [ + "layer1.0.bn1", + "layer2.0.bn2", + "layer3.1.conv1", + "layer4.0.bn1" + ], + "behavioral_readout_layer": "avgpool", + "region_layer_map": { + "V1": "layer1.0.bn1", + "V2": "layer3.1.conv1", + "V4": "layer2.0.bn2", + "IT": "layer4.0.bn1" + } + }, + "checkpoint_url": "https://epfl-neuroailab-scalinglaws.s3.eu-north-1.amazonaws.com/checkpoints/resnet34_imagenet_full/ep100.pt" + }, + "resnet50_imagenet_full": { + "model_name": "resnet50", + "model_id": "resnet50_imagenet_full", + "num_classes": 1000, + "resize_size": 256, + "crop_size": 224, + "interpolation": "bilinear", + "load_model_ema": false, + "use_timm": false, + "timm_model_name": null, + "is_vit": false, + "epoch": 100, + "output_head": "model.fc", + "model_commitment": { + "layers": [ + "layer1.0.conv1", + "layer3.0.conv1", + "layer3.5.bn3", + "layer4.0.relu" + ], + "behavioral_readout_layer": "avgpool", + "region_layer_map": { + "V1": "layer1.0.conv1", + "V2": "layer3.5.bn3", + "V4": "layer3.0.conv1", + "IT": "layer4.0.relu" + } + }, + "checkpoint_url": "https://epfl-neuroailab-scalinglaws.s3.eu-north-1.amazonaws.com/checkpoints/resnet50_imagenet_full/ep100.pt" + }, + "resnet101_imagenet_full": { + "model_name": "resnet101", + "model_id": "resnet101_imagenet_full", + "num_classes": 1000, + "resize_size": 256, + "crop_size": 224, + "interpolation": "bilinear", + "load_model_ema": false, + "use_timm": false, + "timm_model_name": null, + "is_vit": false, + "epoch": 100, + "output_head": "model.fc", + "model_commitment": { + "layers": [ + "layer1.0.bn1", + "layer3.0.bn3", + "layer4.0.bn1", + "layer4.0.relu" + ], + "behavioral_readout_layer": "avgpool", + "region_layer_map": { + "V1": "layer1.0.bn1", + "V2": "layer4.0.bn1", + "V4": "layer3.0.bn3", + "IT": "layer4.0.relu" + } + }, + "checkpoint_url": "https://epfl-neuroailab-scalinglaws.s3.eu-north-1.amazonaws.com/checkpoints/resnet101_imagenet_full/ep100.pt" + }, + "resnet152_imagenet_full": { + "model_name": "resnet152", + "model_id": "resnet152_imagenet_full", + "num_classes": 1000, + "resize_size": 256, + "crop_size": 224, + "interpolation": "bilinear", + "load_model_ema": false, + "use_timm": false, + "timm_model_name": null, + "is_vit": false, + "epoch": 100, + "output_head": "model.fc", + "model_commitment": { + "layers": [ + "layer1.0.bn1", + "layer3.0.bn3", + "layer3.3.bn3", + "layer3.34.bn3" + ], + "behavioral_readout_layer": "avgpool", + "region_layer_map": { + "V1": "layer1.0.bn1", + "V2": "layer3.3.bn3", + "V4": "layer3.0.bn3", + "IT": "layer3.34.bn3" + } + }, + "checkpoint_url": "https://epfl-neuroailab-scalinglaws.s3.eu-north-1.amazonaws.com/checkpoints/resnet152_imagenet_full/ep100.pt" + }, + "resnet18_ecoset_full": { + "model_name": "resnet18", + "model_id": "resnet18_ecoset_full", + "num_classes": 565, + "resize_size": 256, + "crop_size": 224, + "interpolation": "bilinear", + "load_model_ema": false, + "use_timm": false, + "timm_model_name": null, + "is_vit": false, + "epoch": 100, + "output_head": "model.fc", + "model_commitment": { + "layers": [ + "layer1.0.conv1", + "layer2.0.bn2", + "layer3.0.conv1", + "layer4.0.bn1" + ], + "behavioral_readout_layer": "avgpool", + "region_layer_map": { + "V1": "layer1.0.conv1", + "V2": "layer3.0.conv1", + "V4": "layer2.0.bn2", + "IT": "layer4.0.bn1" + } + }, + "checkpoint_url": "https://epfl-neuroailab-scalinglaws.s3.eu-north-1.amazonaws.com/checkpoints/resnet18_ecoset_full/ep100.pt" + }, + "resnet34_ecoset_full": { + "model_name": "resnet34", + "model_id": "resnet34_ecoset_full", + "num_classes": 565, + "resize_size": 256, + "crop_size": 224, + "interpolation": "bilinear", + "load_model_ema": false, + "use_timm": false, + "timm_model_name": null, + "is_vit": false, + "epoch": 100, + "output_head": "model.fc", + "model_commitment": { + "layers": [ + "layer1.0.bn1", + "layer3.0.conv1", + "layer3.1.conv1", + "layer4.0.conv1" + ], + "behavioral_readout_layer": "avgpool", + "region_layer_map": { + "V1": "layer1.0.bn1", + "V2": "layer3.1.conv1", + "V4": "layer3.0.conv1", + "IT": "layer4.0.conv1" + } + }, + "checkpoint_url": "https://epfl-neuroailab-scalinglaws.s3.eu-north-1.amazonaws.com/checkpoints/resnet34_ecoset_full/ep100.pt" + }, + "resnet50_ecoset_full": { + "model_name": "resnet50", + "model_id": "resnet50_ecoset_full", + "num_classes": 565, + "resize_size": 256, + "crop_size": 224, + "interpolation": "bilinear", + "load_model_ema": false, + "use_timm": false, + "timm_model_name": null, + "is_vit": false, + "epoch": 100, + "output_head": "model.fc", + "model_commitment": { + "layers": [ + "layer1.0.bn1", + "layer3.0.conv1", + "layer4.0.conv2", + "layer4.0.relu" + ], + "behavioral_readout_layer": "avgpool", + "region_layer_map": { + "V1": "layer1.0.bn1", + "V2": "layer4.0.conv2", + "V4": "layer3.0.conv1", + "IT": "layer4.0.relu" + } + }, + "checkpoint_url": "https://epfl-neuroailab-scalinglaws.s3.eu-north-1.amazonaws.com/checkpoints/resnet50_ecoset_full/ep100.pt" + }, + "resnet101_ecoset_full": { + "model_name": "resnet101", + "model_id": "resnet101_ecoset_full", + "num_classes": 565, + "resize_size": 256, + "crop_size": 224, + "interpolation": "bilinear", + "load_model_ema": false, + "use_timm": false, + "timm_model_name": null, + "is_vit": false, + "epoch": 100, + "output_head": "model.fc", + "model_commitment": { + "layers": [ + "layer1.0.bn1", + "layer3.0.bn3", + "layer3.4.relu", + "layer4.0.relu" + ], + "behavioral_readout_layer": "avgpool", + "region_layer_map": { + "V1": "layer1.0.bn1", + "V2": "layer3.4.relu", + "V4": "layer3.0.bn3", + "IT": "layer4.0.relu" + } + }, + "checkpoint_url": "https://epfl-neuroailab-scalinglaws.s3.eu-north-1.amazonaws.com/checkpoints/resnet101_ecoset_full/ep100.pt" + }, + "resnet152_ecoset_full": { + "model_name": "resnet152", + "model_id": "resnet152_ecoset_full", + "num_classes": 565, + "resize_size": 256, + "crop_size": 224, + "interpolation": "bilinear", + "load_model_ema": false, + "use_timm": false, + "timm_model_name": null, + "is_vit": false, + "epoch": 100, + "output_head": "model.fc", + "model_commitment": { + "layers": [ + "layer1.0.bn1", + "layer3.0.bn3", + "layer3.3.bn3", + "layer4.0.relu" + ], + "behavioral_readout_layer": "avgpool", + "region_layer_map": { + "V1": "layer1.0.bn1", + "V2": "layer3.3.bn3", + "V4": "layer3.0.bn3", + "IT": "layer4.0.relu" + } + }, + "checkpoint_url": "https://epfl-neuroailab-scalinglaws.s3.eu-north-1.amazonaws.com/checkpoints/resnet152_ecoset_full/ep100.pt" + }, + "resnet50_imagenet_1_seed-0": { + "model_name": "resnet50", + "model_id": "resnet50_imagenet_1_seed-0", + "num_classes": 1000, + "resize_size": 256, + "crop_size": 224, + "interpolation": "bilinear", + "load_model_ema": false, + "use_timm": false, + "timm_model_name": null, + "is_vit": false, + "epoch": 100, + "output_head": "model.fc", + "model_commitment": { + "layers": [ + "layer1.0.conv1", + "layer3.0.conv1", + "layer3.5.bn3", + "layer4.0.relu" + ], + "behavioral_readout_layer": "avgpool", + "region_layer_map": { + "V1": "layer1.0.conv1", + "V2": "layer3.5.bn3", + "V4": "layer3.0.conv1", + "IT": "layer4.0.relu" + } + }, + "checkpoint_url": "https://epfl-neuroailab-scalinglaws.s3.eu-north-1.amazonaws.com/checkpoints/resnet50_imagenet_1_seed-0/ep100.pt" + }, + "resnet50_imagenet_10_seed-0": { + "model_name": "resnet50", + "model_id": "resnet50_imagenet_10_seed-0", + "num_classes": 1000, + "resize_size": 256, + "crop_size": 224, + "interpolation": "bilinear", + "load_model_ema": false, + "use_timm": false, + "timm_model_name": null, + "is_vit": false, + "epoch": 100, + "output_head": "model.fc", + "model_commitment": { + "layers": [ + "layer1.0.conv1", + "layer3.0.conv1", + "layer3.5.bn3", + "layer4.0.relu" + ], + "behavioral_readout_layer": "avgpool", + "region_layer_map": { + "V1": "layer1.0.conv1", + "V2": "layer3.5.bn3", + "V4": "layer3.0.conv1", + "IT": "layer4.0.relu" + } + }, + "checkpoint_url": "https://epfl-neuroailab-scalinglaws.s3.eu-north-1.amazonaws.com/checkpoints/resnet50_imagenet_10_seed-0/ep100.pt" + }, + "resnet50_imagenet_100_seed-0": { + "model_name": "resnet50", + "model_id": "resnet50_imagenet_100_seed-0", + "num_classes": 1000, + "resize_size": 256, + "crop_size": 224, + "interpolation": "bilinear", + "load_model_ema": false, + "use_timm": false, + "timm_model_name": null, + "is_vit": false, + "epoch": 100, + "output_head": "model.fc", + "model_commitment": { + "layers": [ + "layer1.0.conv1", + "layer3.0.conv1", + "layer3.5.bn3", + "layer4.0.relu" + ], + "behavioral_readout_layer": "avgpool", + "region_layer_map": { + "V1": "layer1.0.conv1", + "V2": "layer3.5.bn3", + "V4": "layer3.0.conv1", + "IT": "layer4.0.relu" + } + }, + "checkpoint_url": "https://epfl-neuroailab-scalinglaws.s3.eu-north-1.amazonaws.com/checkpoints/resnet50_imagenet_100_seed-0/ep100.pt" + }, + "efficientnet_b0_imagenet_full": { + "model_name": "efficientnet_b0", + "model_id": "efficientnet_b0_imagenet_full", + "num_classes": 1000, + "resize_size": 256, + "crop_size": 224, + "interpolation": "bilinear", + "load_model_ema": false, + "use_timm": false, + "timm_model_name": null, + "is_vit": false, + "epoch": 100, + "output_head": "model.classifier[1]", + "model_commitment": { + "layers": [ + "features.4.0.block.0.1", + "features.4.0.block.1.0", + "features.4.1.block.3.1", + "features.6.0.block.3.0" + ], + "behavioral_readout_layer": "avgpool", + "region_layer_map": { + "V1": "features.4.0.block.1.0", + "V2": "features.4.1.block.3.1", + "V4": "features.4.0.block.0.1", + "IT": "features.6.0.block.3.0" + } + }, + "checkpoint_url": "https://epfl-neuroailab-scalinglaws.s3.eu-north-1.amazonaws.com/checkpoints/efficientnet_b0_imagenet_full/ep100.pt" + }, + "efficientnet_b1_imagenet_full": { + "model_name": "efficientnet_b1", + "model_id": "efficientnet_b1_imagenet_full", + "num_classes": 1000, + "resize_size": 255, + "crop_size": 240, + "interpolation": "bilinear", + "load_model_ema": false, + "use_timm": false, + "timm_model_name": null, + "is_vit": false, + "epoch": 100, + "output_head": "model.classifier[1]", + "model_commitment": { + "layers": [ + "features.4.0.block.0.1", + "features.4.0.block.1.0", + "features.6.0.block.3.0" + ], + "behavioral_readout_layer": "avgpool", + "region_layer_map": { + "V1": "features.4.0.block.0.1", + "V2": "features.4.0.block.1.0", + "V4": "features.4.0.block.0.1", + "IT": "features.6.0.block.3.0" + } + }, + "checkpoint_url": "https://epfl-neuroailab-scalinglaws.s3.eu-north-1.amazonaws.com/checkpoints/efficientnet_b1_imagenet_full/ep100.pt" + }, + "efficientnet_b2_imagenet_full": { + "model_name": "efficientnet_b2", + "model_id": "efficientnet_b2_imagenet_full", + "num_classes": 1000, + "resize_size": 288, + "crop_size": 288, + "interpolation": "bilinear", + "load_model_ema": false, + "use_timm": false, + "timm_model_name": null, + "is_vit": false, + "epoch": 100, + "output_head": "model.classifier[1]", + "model_commitment": { + "layers": [ + "features.4.0.block.3.0", + "features.5.0.block.1.0", + "features.6.0.block.3.0" + ], + "behavioral_readout_layer": "avgpool", + "region_layer_map": { + "V1": "features.4.0.block.3.0", + "V2": "features.5.0.block.1.0", + "V4": "features.4.0.block.3.0", + "IT": "features.6.0.block.3.0" + } + }, + "checkpoint_url": "https://epfl-neuroailab-scalinglaws.s3.eu-north-1.amazonaws.com/checkpoints/efficientnet_b2_imagenet_full/ep100.pt" + }, + "deit_small_imagenet_full_seed-0": { + "model_name": "deit_small", + "model_id": "deit_small_imagenet_full_seed-0", + "num_classes": 1000, + "resize_size": 256, + "crop_size": 224, + "interpolation": "bicubic", + "load_model_ema": true, + "use_timm": true, + "timm_model_name": "deit3_small_patch16_224", + "is_vit": true, + "epoch": 300, + "output_head": null, + "model_commitment": { + "layers": [ + "blocks.2.norm1", + "blocks.5.norm1", + "blocks.6.norm2", + "blocks.9.norm2" + ], + "behavioral_readout_layer": "fc_norm", + "region_layer_map": { + "V1": "blocks.2.norm1", + "V2": "blocks.6.norm2", + "V4": "blocks.5.norm1", + "IT": "blocks.9.norm2" + } + }, + "checkpoint_url": "https://epfl-neuroailab-scalinglaws.s3.eu-north-1.amazonaws.com/checkpoints/deit_small_imagenet_full_seed-0/ep300.pt" + }, + "deit_base_imagenet_full_seed-0": { + "model_name": "deit_base", + "model_id": "deit_base_imagenet_full_seed-0", + "num_classes": 1000, + "resize_size": 256, + "crop_size": 224, + "interpolation": "bicubic", + "load_model_ema": true, + "use_timm": true, + "timm_model_name": "deit3_base_patch16_224", + "is_vit": true, + "epoch": 300, + "output_head": null, + "model_commitment": { + "layers": [ + "blocks.3.mlp.act", + "blocks.3.mlp.fc1", + "blocks.8.norm2", + "blocks.9.norm2" + ], + "behavioral_readout_layer": "fc_norm", + "region_layer_map": { + "V1": "blocks.3.mlp.fc1", + "V2": "blocks.8.norm2", + "V4": "blocks.3.mlp.act", + "IT": "blocks.9.norm2" + } + }, + "checkpoint_url": "https://epfl-neuroailab-scalinglaws.s3.eu-north-1.amazonaws.com/checkpoints/deit_base_imagenet_full_seed-0/ep300.pt" + }, + "deit_large_imagenet_full_seed-0": { + "model_name": "deit_large", + "model_id": "deit_large_imagenet_full_seed-0", + "num_classes": 1000, + "resize_size": 256, + "crop_size": 224, + "interpolation": "bicubic", + "load_model_ema": true, + "use_timm": true, + "timm_model_name": "deit3_large_patch16_224", + "is_vit": true, + "epoch": 300, + "output_head": null, + "model_commitment": { + "layers": [ + "blocks.18.norm2", + "blocks.20.norm2", + "blocks.4.norm1", + "blocks.9.norm1" + ], + "behavioral_readout_layer": "fc_norm", + "region_layer_map": { + "V1": "blocks.4.norm1", + "V2": "blocks.18.norm2", + "V4": "blocks.9.norm1", + "IT": "blocks.20.norm2" + } + }, + "checkpoint_url": "https://epfl-neuroailab-scalinglaws.s3.eu-north-1.amazonaws.com/checkpoints/deit_large_imagenet_full_seed-0/ep300.pt" + }, + "deit_small_imagenet_1_seed-0": { + "model_name": "deit_small", + "model_id": "deit_small_imagenet_1_seed-0", + "num_classes": 1000, + "resize_size": 256, + "crop_size": 224, + "interpolation": "bicubic", + "load_model_ema": true, + "use_timm": true, + "timm_model_name": "deit3_small_patch16_224", + "is_vit": true, + "epoch": 300, + "output_head": null, + "model_commitment": { + "layers": [ + "blocks.2.norm1", + "blocks.5.norm1", + "blocks.6.norm2", + "blocks.9.norm2" + ], + "behavioral_readout_layer": "fc_norm", + "region_layer_map": { + "V1": "blocks.2.norm1", + "V2": "blocks.6.norm2", + "V4": "blocks.5.norm1", + "IT": "blocks.9.norm2" + } + }, + "checkpoint_url": "https://epfl-neuroailab-scalinglaws.s3.eu-north-1.amazonaws.com/checkpoints/deit_small_imagenet_1_seed-0/ep300.pt" + }, + "deit_small_imagenet_10_seed-0": { + "model_name": "deit_small", + "model_id": "deit_small_imagenet_10_seed-0", + "num_classes": 1000, + "resize_size": 256, + "crop_size": 224, + "interpolation": "bicubic", + "load_model_ema": true, + "use_timm": true, + "timm_model_name": "deit3_small_patch16_224", + "is_vit": true, + "epoch": 300, + "output_head": null, + "model_commitment": { + "layers": [ + "blocks.2.norm1", + "blocks.5.norm1", + "blocks.6.norm2", + "blocks.9.norm2" + ], + "behavioral_readout_layer": "fc_norm", + "region_layer_map": { + "V1": "blocks.2.norm1", + "V2": "blocks.6.norm2", + "V4": "blocks.5.norm1", + "IT": "blocks.9.norm2" + } + }, + "checkpoint_url": "https://epfl-neuroailab-scalinglaws.s3.eu-north-1.amazonaws.com/checkpoints/deit_small_imagenet_10_seed-0/ep300.pt" + }, + "deit_small_imagenet_100_seed-0": { + "model_name": "deit_small", + "model_id": "deit_small_imagenet_100_seed-0", + "num_classes": 1000, + "resize_size": 256, + "crop_size": 224, + "interpolation": "bicubic", + "load_model_ema": true, + "use_timm": true, + "timm_model_name": "deit3_small_patch16_224", + "is_vit": true, + "epoch": 300, + "output_head": null, + "model_commitment": { + "layers": [ + "blocks.2.norm1", + "blocks.5.norm1", + "blocks.6.norm2", + "blocks.9.norm2" + ], + "behavioral_readout_layer": "fc_norm", + "region_layer_map": { + "V1": "blocks.2.norm1", + "V2": "blocks.6.norm2", + "V4": "blocks.5.norm1", + "IT": "blocks.9.norm2" + } + }, + "checkpoint_url": "https://epfl-neuroailab-scalinglaws.s3.eu-north-1.amazonaws.com/checkpoints/deit_small_imagenet_100_seed-0/ep300.pt" + }, + "convnext_tiny_imagenet_full_seed-0": { + "model_name": "convnext_tiny", + "model_id": "convnext_tiny_imagenet_full_seed-0", + "num_classes": 1000, + "resize_size": 256, + "crop_size": 224, + "interpolation": "bicubic", + "load_model_ema": true, + "use_timm": false, + "timm_model_name": null, + "is_vit": false, + "epoch": 300, + "output_head": "model.classifier[2]", + "model_commitment": { + "layers": [ + "features.4.0", + "features.5.4.block.0", + "features.6.0" + ], + "behavioral_readout_layer": "classifier.1", + "region_layer_map": { + "V1": "features.6.0", + "V2": "features.5.4.block.0", + "V4": "features.4.0", + "IT": "features.5.4.block.0" + } + }, + "checkpoint_url": "https://epfl-neuroailab-scalinglaws.s3.eu-north-1.amazonaws.com/checkpoints/convnext_tiny_imagenet_full_seed-0/ep300.pt" + }, + "convnext_small_imagenet_full_seed-0": { + "model_name": "convnext_small", + "model_id": "convnext_small_imagenet_full_seed-0", + "num_classes": 1000, + "resize_size": 256, + "crop_size": 224, + "interpolation": "bicubic", + "load_model_ema": true, + "use_timm": false, + "timm_model_name": null, + "is_vit": false, + "epoch": 300, + "output_head": "model.classifier[2]", + "model_commitment": { + "layers": [ + "features.4.0", + "features.5.17.block.0", + "features.5.2.block.0", + "features.5.9.block.0" + ], + "behavioral_readout_layer": "classifier.1", + "region_layer_map": { + "V1": "features.5.2.block.0", + "V2": "features.5.17.block.0", + "V4": "features.4.0", + "IT": "features.5.9.block.0" + } + }, + "checkpoint_url": "https://epfl-neuroailab-scalinglaws.s3.eu-north-1.amazonaws.com/checkpoints/convnext_small_imagenet_full_seed-0/ep300.pt" + }, + "convnext_base_imagenet_full_seed-0": { + "model_name": "convnext_base", + "model_id": "convnext_base_imagenet_full_seed-0", + "num_classes": 1000, + "resize_size": 256, + "crop_size": 224, + "interpolation": "bicubic", + "load_model_ema": true, + "use_timm": false, + "timm_model_name": null, + "is_vit": false, + "epoch": 300, + "output_head": "model.classifier[2]", + "model_commitment": { + "layers": [ + "features.4.0", + "features.5.11.block.0", + "features.5.12.block.0", + "features.5.7.block.0" + ], + "behavioral_readout_layer": "classifier.1", + "region_layer_map": { + "V1": "features.5.7.block.0", + "V2": "features.5.12.block.0", + "V4": "features.4.0", + "IT": "features.5.11.block.0" + } + }, + "checkpoint_url": "https://epfl-neuroailab-scalinglaws.s3.eu-north-1.amazonaws.com/checkpoints/convnext_base_imagenet_full_seed-0/ep300.pt" + }, + "convnext_large_imagenet_full_seed-0": { + "model_name": "convnext_large", + "model_id": "convnext_large_imagenet_full_seed-0", + "num_classes": 1000, + "resize_size": 256, + "crop_size": 224, + "interpolation": "bicubic", + "load_model_ema": true, + "use_timm": false, + "timm_model_name": null, + "is_vit": false, + "epoch": 300, + "output_head": "model.classifier[2]", + "model_commitment": { + "layers": [ + "features.4.1", + "features.5.11.block.0", + "features.5.7.block.0", + "features.5.7.block.5" + ], + "behavioral_readout_layer": "classifier.1", + "region_layer_map": { + "V1": "features.5.7.block.5", + "V2": "features.5.7.block.0", + "V4": "features.4.1", + "IT": "features.5.11.block.0" + } + }, + "checkpoint_url": "https://epfl-neuroailab-scalinglaws.s3.eu-north-1.amazonaws.com/checkpoints/convnext_large_imagenet_full_seed-0/ep300.pt" + }, + "convnext_small_imagenet_1_seed-0": { + "model_name": "convnext_small", + "model_id": "convnext_small_imagenet_1_seed-0", + "num_classes": 1000, + "resize_size": 256, + "crop_size": 224, + "interpolation": "bicubic", + "load_model_ema": true, + "use_timm": false, + "timm_model_name": null, + "is_vit": false, + "epoch": 300, + "output_head": "model.classifier[2]", + "model_commitment": { + "layers": [ + "features.4.0", + "features.5.17.block.0", + "features.5.2.block.0", + "features.5.9.block.0" + ], + "behavioral_readout_layer": "classifier.1", + "region_layer_map": { + "V1": "features.5.2.block.0", + "V2": "features.5.17.block.0", + "V4": "features.4.0", + "IT": "features.5.9.block.0" + } + }, + "checkpoint_url": "https://epfl-neuroailab-scalinglaws.s3.eu-north-1.amazonaws.com/checkpoints/convnext_small_imagenet_1_seed-0/ep300.pt" + }, + "convnext_small_imagenet_10_seed-0": { + "model_name": "convnext_small", + "model_id": "convnext_small_imagenet_10_seed-0", + "num_classes": 1000, + "resize_size": 256, + "crop_size": 224, + "interpolation": "bicubic", + "load_model_ema": true, + "use_timm": false, + "timm_model_name": null, + "is_vit": false, + "epoch": 300, + "output_head": "model.classifier[2]", + "model_commitment": { + "layers": [ + "features.4.0", + "features.5.17.block.0", + "features.5.2.block.0", + "features.5.9.block.0" + ], + "behavioral_readout_layer": "classifier.1", + "region_layer_map": { + "V1": "features.5.2.block.0", + "V2": "features.5.17.block.0", + "V4": "features.4.0", + "IT": "features.5.9.block.0" + } + }, + "checkpoint_url": "https://epfl-neuroailab-scalinglaws.s3.eu-north-1.amazonaws.com/checkpoints/convnext_small_imagenet_10_seed-0/ep300.pt" + }, + "convnext_small_imagenet_100_seed-0": { + "model_name": "convnext_small", + "model_id": "convnext_small_imagenet_100_seed-0", + "num_classes": 1000, + "resize_size": 256, + "crop_size": 224, + "interpolation": "bicubic", + "load_model_ema": true, + "use_timm": false, + "timm_model_name": null, + "is_vit": false, + "epoch": 300, + "output_head": "model.classifier[2]", + "model_commitment": { + "layers": [ + "features.4.0", + "features.5.17.block.0", + "features.5.2.block.0", + "features.5.9.block.0" + ], + "behavioral_readout_layer": "classifier.1", + "region_layer_map": { + "V1": "features.5.2.block.0", + "V2": "features.5.17.block.0", + "V4": "features.4.0", + "IT": "features.5.9.block.0" + } + }, + "checkpoint_url": "https://epfl-neuroailab-scalinglaws.s3.eu-north-1.amazonaws.com/checkpoints/convnext_small_imagenet_100_seed-0/ep300.pt" + } +} \ No newline at end of file diff --git a/brainscore_vision/models/scaling_models/requirements.txt b/brainscore_vision/models/scaling_models/requirements.txt new file mode 100644 index 000000000..af6389b09 --- /dev/null +++ b/brainscore_vision/models/scaling_models/requirements.txt @@ -0,0 +1,4 @@ +torch +torchvision +albumentations +timm diff --git a/brainscore_vision/models/scaling_models/test.py b/brainscore_vision/models/scaling_models/test.py new file mode 100644 index 000000000..e69de29bb From d02f31c413e33c9ecbbdc0ef72018645b9010003 Mon Sep 17 00:00:00 2001 From: Kartik Pradeepan Date: Thu, 19 Sep 2024 15:01:06 -0400 Subject: [PATCH 25/28] Update to sync workflow (#1248) * Update to sync workflow Adds dynamic PR title and commit history included in PR description. Adds `No changes detected` job. * Update no_changes condition and text Refined condition to not require PR_merge because that is when this condition SHOULD be triggered to indicate the correct status of the workflow. --- .../workflows/sync_develop_with_master.yml | 38 ++++++++++++++++--- 1 file changed, 33 insertions(+), 5 deletions(-) diff --git a/.github/workflows/sync_develop_with_master.yml b/.github/workflows/sync_develop_with_master.yml index df8cc238d..bd1bf5b0c 100644 --- a/.github/workflows/sync_develop_with_master.yml +++ b/.github/workflows/sync_develop_with_master.yml @@ -16,6 +16,7 @@ jobs: id: init run: | echo "Starting branch synchronization of ${{ github.repository }}" + create_pr_for_nonplugin: name: Synchronizing non-plugin PR needs: start # This job now needs the 'start' job to complete first @@ -34,15 +35,21 @@ jobs: run: | git fetch origin master git reset --hard origin/master + - name: Get commit summary + id: commit_summary + run: | + git log -1 --pretty=format:"%s" + echo "::set-output name=summary::$(git log -1 --pretty=format:"%s")" - name: Create pull request in develop uses: peter-evans/create-pull-request@v6 with: token: '${{ secrets.PAT }}' commit-message: Sync master into develop - title: Sync master into develop + title: Sync master into develop. Triggered by PR #${{ github.event.pull_request.number }} body: >- This PR syncs the latest changes from the master branch into the develop branch. + Commit Summary: ${{ steps.commit_summary.outputs.summary }} base: develop branch: 'developer-sync-pr-${{ github.event.pull_request.number }}' @@ -85,7 +92,7 @@ jobs: fi } - name: Push changes to develop (if merge is successful) - if: steps.merge.conclusion == 'success' + if: steps.merge.outcome == 'success' run: | #Use force-with-lease to prevent accidental overwrite if branch has been updated. If fails, rebase the update and retry git push origin develop --force-with-lease || { echo "Push failed due to updates in develop. Attempting to rebase and retry..." @@ -93,14 +100,21 @@ jobs: git rebase origin/develop git push origin develop --force-with-lease } + - name: Get commit summary + id: commit_summary + run: | + git log -1 --pretty=format:"%s" + echo "::set-output name=summary::$(git log -1 --pretty=format:"%s")" - name: Create pull request for merge conflicts if: steps.merge.outputs.merge_conflict == 'true' uses: peter-evans/create-pull-request@v6 with: token: '${{ secrets.PAT }}' commit-message: Merge master into develop with conflict resolution - title: Resolve conflicts between master and develop - body: This PR resolves merge conflicts between master and develop. + title: Resolve conflicts between master and develop. Triggered by PR #${{ github.event.pull_request.number }} + body: | + This PR resolves merge conflicts between master and develop. + Commit Summary: ${{ steps.commit_summary.outputs.summary }} base: develop branch: 'developer-sync-pr-conflict-${{ github.event.pull_request.number }}' - name: Handle other merge failures @@ -108,4 +122,18 @@ jobs: run: > echo "Handle non-conflict related failure, such as network issues or missing branches" - # Possibly incorporate additional handling logic here (e.g.,notifications or retries) \ No newline at end of file + # Possibly incorporate additional handling logic here (e.g.,notifications or retries) + + + no_changes: + name: "No Changes Made. No synchronization needed." + needs: start + if: > + ( + needs.create_pr_for_nonplugin.result != 'success' && + needs.auto_sync_for_plugin.result != 'success' + ) + runs-on: ubuntu-latest + steps: + - name: Echo no changes + run: echo "No changes were made to master branch 👍" From bc726af55db06f5bdfc6f7faabbf2f85c221d49e Mon Sep 17 00:00:00 2001 From: Kartik Pradeepan Date: Thu, 19 Sep 2024 15:04:59 -0400 Subject: [PATCH 26/28] Sync master into develop. Triggered by PR (#1257) * point to pypi instead of git (#1236) * update dependency names w/ hyphens (#1238) * point to pypi instead of git * Update pyproject.toml with hyphens * Custom trained scaling models (#1206) * Custom trained models * Fix missing library * Fix a missing library * Not every model needs timm * Trigger build tests * Use model_id instead of model_name to ensure correct weights are loaded * timm is missing from deit models * timm is missing from deit models * Fix requirements.txt newline * Package models into a single plugin * Update requirements.txt * Update requirements.txt * Update requirements.txt * Change the order of models * Update model.py * Disable resultcaching for vits * Tests reordering * Revert tests back * Update model.py * Update model.py * Update to sync workflow (#1248) * Update to sync workflow Adds dynamic PR title and commit history included in PR description. Adds `No changes detected` job. * Update no_changes condition and text Refined condition to not require PR_merge because that is when this condition SHOULD be triggered to indicate the correct status of the workflow. --------- Co-authored-by: Sam Winebrake <85908068+samwinebrake@users.noreply.github.com> Co-authored-by: Abdulkadir Gokce --- .../workflows/sync_develop_with_master.yml | 38 +- .../models/scaling_models/__init__.py | 265 ++++++ .../models/scaling_models/model.py | 144 +++ .../models/scaling_models/model_configs.json | 869 ++++++++++++++++++ .../models/scaling_models/requirements.txt | 4 + .../models/scaling_models/test.py | 0 6 files changed, 1315 insertions(+), 5 deletions(-) create mode 100644 brainscore_vision/models/scaling_models/__init__.py create mode 100644 brainscore_vision/models/scaling_models/model.py create mode 100644 brainscore_vision/models/scaling_models/model_configs.json create mode 100644 brainscore_vision/models/scaling_models/requirements.txt create mode 100644 brainscore_vision/models/scaling_models/test.py diff --git a/.github/workflows/sync_develop_with_master.yml b/.github/workflows/sync_develop_with_master.yml index df8cc238d..bd1bf5b0c 100644 --- a/.github/workflows/sync_develop_with_master.yml +++ b/.github/workflows/sync_develop_with_master.yml @@ -16,6 +16,7 @@ jobs: id: init run: | echo "Starting branch synchronization of ${{ github.repository }}" + create_pr_for_nonplugin: name: Synchronizing non-plugin PR needs: start # This job now needs the 'start' job to complete first @@ -34,15 +35,21 @@ jobs: run: | git fetch origin master git reset --hard origin/master + - name: Get commit summary + id: commit_summary + run: | + git log -1 --pretty=format:"%s" + echo "::set-output name=summary::$(git log -1 --pretty=format:"%s")" - name: Create pull request in develop uses: peter-evans/create-pull-request@v6 with: token: '${{ secrets.PAT }}' commit-message: Sync master into develop - title: Sync master into develop + title: Sync master into develop. Triggered by PR #${{ github.event.pull_request.number }} body: >- This PR syncs the latest changes from the master branch into the develop branch. + Commit Summary: ${{ steps.commit_summary.outputs.summary }} base: develop branch: 'developer-sync-pr-${{ github.event.pull_request.number }}' @@ -85,7 +92,7 @@ jobs: fi } - name: Push changes to develop (if merge is successful) - if: steps.merge.conclusion == 'success' + if: steps.merge.outcome == 'success' run: | #Use force-with-lease to prevent accidental overwrite if branch has been updated. If fails, rebase the update and retry git push origin develop --force-with-lease || { echo "Push failed due to updates in develop. Attempting to rebase and retry..." @@ -93,14 +100,21 @@ jobs: git rebase origin/develop git push origin develop --force-with-lease } + - name: Get commit summary + id: commit_summary + run: | + git log -1 --pretty=format:"%s" + echo "::set-output name=summary::$(git log -1 --pretty=format:"%s")" - name: Create pull request for merge conflicts if: steps.merge.outputs.merge_conflict == 'true' uses: peter-evans/create-pull-request@v6 with: token: '${{ secrets.PAT }}' commit-message: Merge master into develop with conflict resolution - title: Resolve conflicts between master and develop - body: This PR resolves merge conflicts between master and develop. + title: Resolve conflicts between master and develop. Triggered by PR #${{ github.event.pull_request.number }} + body: | + This PR resolves merge conflicts between master and develop. + Commit Summary: ${{ steps.commit_summary.outputs.summary }} base: develop branch: 'developer-sync-pr-conflict-${{ github.event.pull_request.number }}' - name: Handle other merge failures @@ -108,4 +122,18 @@ jobs: run: > echo "Handle non-conflict related failure, such as network issues or missing branches" - # Possibly incorporate additional handling logic here (e.g.,notifications or retries) \ No newline at end of file + # Possibly incorporate additional handling logic here (e.g.,notifications or retries) + + + no_changes: + name: "No Changes Made. No synchronization needed." + needs: start + if: > + ( + needs.create_pr_for_nonplugin.result != 'success' && + needs.auto_sync_for_plugin.result != 'success' + ) + runs-on: ubuntu-latest + steps: + - name: Echo no changes + run: echo "No changes were made to master branch 👍" diff --git a/brainscore_vision/models/scaling_models/__init__.py b/brainscore_vision/models/scaling_models/__init__.py new file mode 100644 index 000000000..e020a07e5 --- /dev/null +++ b/brainscore_vision/models/scaling_models/__init__.py @@ -0,0 +1,265 @@ +from brainscore_vision import model_registry +from brainscore_vision.model_helpers.brain_transformation import ModelCommitment +from .model import get_model, MODEL_CONFIGS + +model_registry["resnet18_imagenet_full"] = lambda: ModelCommitment( + identifier="resnet18_imagenet_full", + activations_model=get_model("resnet18_imagenet_full"), + layers=MODEL_CONFIGS["resnet18_imagenet_full"]["model_commitment"]["layers"], + behavioral_readout_layer=MODEL_CONFIGS["resnet18_imagenet_full"]["model_commitment"]["behavioral_readout_layer"], + region_layer_map=MODEL_CONFIGS["resnet18_imagenet_full"]["model_commitment"]["region_layer_map"] +) + + +model_registry["resnet34_imagenet_full"] = lambda: ModelCommitment( + identifier="resnet34_imagenet_full", + activations_model=get_model("resnet34_imagenet_full"), + layers=MODEL_CONFIGS["resnet34_imagenet_full"]["model_commitment"]["layers"], + behavioral_readout_layer=MODEL_CONFIGS["resnet34_imagenet_full"]["model_commitment"]["behavioral_readout_layer"], + region_layer_map=MODEL_CONFIGS["resnet34_imagenet_full"]["model_commitment"]["region_layer_map"] +) + + +model_registry["resnet50_imagenet_full"] = lambda: ModelCommitment( + identifier="resnet50_imagenet_full", + activations_model=get_model("resnet50_imagenet_full"), + layers=MODEL_CONFIGS["resnet50_imagenet_full"]["model_commitment"]["layers"], + behavioral_readout_layer=MODEL_CONFIGS["resnet50_imagenet_full"]["model_commitment"]["behavioral_readout_layer"], + region_layer_map=MODEL_CONFIGS["resnet50_imagenet_full"]["model_commitment"]["region_layer_map"] +) + + +model_registry["resnet101_imagenet_full"] = lambda: ModelCommitment( + identifier="resnet101_imagenet_full", + activations_model=get_model("resnet101_imagenet_full"), + layers=MODEL_CONFIGS["resnet101_imagenet_full"]["model_commitment"]["layers"], + behavioral_readout_layer=MODEL_CONFIGS["resnet101_imagenet_full"]["model_commitment"]["behavioral_readout_layer"], + region_layer_map=MODEL_CONFIGS["resnet101_imagenet_full"]["model_commitment"]["region_layer_map"] +) + + +model_registry["resnet152_imagenet_full"] = lambda: ModelCommitment( + identifier="resnet152_imagenet_full", + activations_model=get_model("resnet152_imagenet_full"), + layers=MODEL_CONFIGS["resnet152_imagenet_full"]["model_commitment"]["layers"], + behavioral_readout_layer=MODEL_CONFIGS["resnet152_imagenet_full"]["model_commitment"]["behavioral_readout_layer"], + region_layer_map=MODEL_CONFIGS["resnet152_imagenet_full"]["model_commitment"]["region_layer_map"] +) + + +model_registry["resnet18_ecoset_full"] = lambda: ModelCommitment( + identifier="resnet18_ecoset_full", + activations_model=get_model("resnet18_ecoset_full"), + layers=MODEL_CONFIGS["resnet18_ecoset_full"]["model_commitment"]["layers"], + behavioral_readout_layer=MODEL_CONFIGS["resnet18_ecoset_full"]["model_commitment"]["behavioral_readout_layer"], + region_layer_map=MODEL_CONFIGS["resnet18_ecoset_full"]["model_commitment"]["region_layer_map"] +) + + +model_registry["resnet34_ecoset_full"] = lambda: ModelCommitment( + identifier="resnet34_ecoset_full", + activations_model=get_model("resnet34_ecoset_full"), + layers=MODEL_CONFIGS["resnet34_ecoset_full"]["model_commitment"]["layers"], + behavioral_readout_layer=MODEL_CONFIGS["resnet34_ecoset_full"]["model_commitment"]["behavioral_readout_layer"], + region_layer_map=MODEL_CONFIGS["resnet34_ecoset_full"]["model_commitment"]["region_layer_map"] +) + + +model_registry["resnet50_ecoset_full"] = lambda: ModelCommitment( + identifier="resnet50_ecoset_full", + activations_model=get_model("resnet50_ecoset_full"), + layers=MODEL_CONFIGS["resnet50_ecoset_full"]["model_commitment"]["layers"], + behavioral_readout_layer=MODEL_CONFIGS["resnet50_ecoset_full"]["model_commitment"]["behavioral_readout_layer"], + region_layer_map=MODEL_CONFIGS["resnet50_ecoset_full"]["model_commitment"]["region_layer_map"] +) + + +model_registry["resnet101_ecoset_full"] = lambda: ModelCommitment( + identifier="resnet101_ecoset_full", + activations_model=get_model("resnet101_ecoset_full"), + layers=MODEL_CONFIGS["resnet101_ecoset_full"]["model_commitment"]["layers"], + behavioral_readout_layer=MODEL_CONFIGS["resnet101_ecoset_full"]["model_commitment"]["behavioral_readout_layer"], + region_layer_map=MODEL_CONFIGS["resnet101_ecoset_full"]["model_commitment"]["region_layer_map"] +) + + +model_registry["resnet152_ecoset_full"] = lambda: ModelCommitment( + identifier="resnet152_ecoset_full", + activations_model=get_model("resnet152_ecoset_full"), + layers=MODEL_CONFIGS["resnet152_ecoset_full"]["model_commitment"]["layers"], + behavioral_readout_layer=MODEL_CONFIGS["resnet152_ecoset_full"]["model_commitment"]["behavioral_readout_layer"], + region_layer_map=MODEL_CONFIGS["resnet152_ecoset_full"]["model_commitment"]["region_layer_map"] +) + + +model_registry["resnet50_imagenet_1_seed-0"] = lambda: ModelCommitment( + identifier="resnet50_imagenet_1_seed-0", + activations_model=get_model("resnet50_imagenet_1_seed-0"), + layers=MODEL_CONFIGS["resnet50_imagenet_1_seed-0"]["model_commitment"]["layers"], + behavioral_readout_layer=MODEL_CONFIGS["resnet50_imagenet_1_seed-0"]["model_commitment"]["behavioral_readout_layer"], + region_layer_map=MODEL_CONFIGS["resnet50_imagenet_1_seed-0"]["model_commitment"]["region_layer_map"] +) + + +model_registry["resnet50_imagenet_10_seed-0"] = lambda: ModelCommitment( + identifier="resnet50_imagenet_10_seed-0", + activations_model=get_model("resnet50_imagenet_10_seed-0"), + layers=MODEL_CONFIGS["resnet50_imagenet_10_seed-0"]["model_commitment"]["layers"], + behavioral_readout_layer=MODEL_CONFIGS["resnet50_imagenet_10_seed-0"]["model_commitment"]["behavioral_readout_layer"], + region_layer_map=MODEL_CONFIGS["resnet50_imagenet_10_seed-0"]["model_commitment"]["region_layer_map"] +) + + +model_registry["resnet50_imagenet_100_seed-0"] = lambda: ModelCommitment( + identifier="resnet50_imagenet_100_seed-0", + activations_model=get_model("resnet50_imagenet_100_seed-0"), + layers=MODEL_CONFIGS["resnet50_imagenet_100_seed-0"]["model_commitment"]["layers"], + behavioral_readout_layer=MODEL_CONFIGS["resnet50_imagenet_100_seed-0"]["model_commitment"]["behavioral_readout_layer"], + region_layer_map=MODEL_CONFIGS["resnet50_imagenet_100_seed-0"]["model_commitment"]["region_layer_map"] +) + + +model_registry["efficientnet_b0_imagenet_full"] = lambda: ModelCommitment( + identifier="efficientnet_b0_imagenet_full", + activations_model=get_model("efficientnet_b0_imagenet_full"), + layers=MODEL_CONFIGS["efficientnet_b0_imagenet_full"]["model_commitment"]["layers"], + behavioral_readout_layer=MODEL_CONFIGS["efficientnet_b0_imagenet_full"]["model_commitment"]["behavioral_readout_layer"], + region_layer_map=MODEL_CONFIGS["efficientnet_b0_imagenet_full"]["model_commitment"]["region_layer_map"] +) + + +model_registry["efficientnet_b1_imagenet_full"] = lambda: ModelCommitment( + identifier="efficientnet_b1_imagenet_full", + activations_model=get_model("efficientnet_b1_imagenet_full"), + layers=MODEL_CONFIGS["efficientnet_b1_imagenet_full"]["model_commitment"]["layers"], + behavioral_readout_layer=MODEL_CONFIGS["efficientnet_b1_imagenet_full"]["model_commitment"]["behavioral_readout_layer"], + region_layer_map=MODEL_CONFIGS["efficientnet_b1_imagenet_full"]["model_commitment"]["region_layer_map"] +) + + +model_registry["efficientnet_b2_imagenet_full"] = lambda: ModelCommitment( + identifier="efficientnet_b2_imagenet_full", + activations_model=get_model("efficientnet_b2_imagenet_full"), + layers=MODEL_CONFIGS["efficientnet_b2_imagenet_full"]["model_commitment"]["layers"], + behavioral_readout_layer=MODEL_CONFIGS["efficientnet_b2_imagenet_full"]["model_commitment"]["behavioral_readout_layer"], + region_layer_map=MODEL_CONFIGS["efficientnet_b2_imagenet_full"]["model_commitment"]["region_layer_map"] +) + + +model_registry["deit_small_imagenet_full_seed-0"] = lambda: ModelCommitment( + identifier="deit_small_imagenet_full_seed-0", + activations_model=get_model("deit_small_imagenet_full_seed-0"), + layers=MODEL_CONFIGS["deit_small_imagenet_full_seed-0"]["model_commitment"]["layers"], + behavioral_readout_layer=MODEL_CONFIGS["deit_small_imagenet_full_seed-0"]["model_commitment"]["behavioral_readout_layer"], + region_layer_map=MODEL_CONFIGS["deit_small_imagenet_full_seed-0"]["model_commitment"]["region_layer_map"] +) + + +model_registry["deit_base_imagenet_full_seed-0"] = lambda: ModelCommitment( + identifier="deit_base_imagenet_full_seed-0", + activations_model=get_model("deit_base_imagenet_full_seed-0"), + layers=MODEL_CONFIGS["deit_base_imagenet_full_seed-0"]["model_commitment"]["layers"], + behavioral_readout_layer=MODEL_CONFIGS["deit_base_imagenet_full_seed-0"]["model_commitment"]["behavioral_readout_layer"], + region_layer_map=MODEL_CONFIGS["deit_base_imagenet_full_seed-0"]["model_commitment"]["region_layer_map"] +) + + +model_registry["deit_large_imagenet_full_seed-0"] = lambda: ModelCommitment( + identifier="deit_large_imagenet_full_seed-0", + activations_model=get_model("deit_large_imagenet_full_seed-0"), + layers=MODEL_CONFIGS["deit_large_imagenet_full_seed-0"]["model_commitment"]["layers"], + behavioral_readout_layer=MODEL_CONFIGS["deit_large_imagenet_full_seed-0"]["model_commitment"]["behavioral_readout_layer"], + region_layer_map=MODEL_CONFIGS["deit_large_imagenet_full_seed-0"]["model_commitment"]["region_layer_map"] +) + + +model_registry["deit_small_imagenet_1_seed-0"] = lambda: ModelCommitment( + identifier="deit_small_imagenet_1_seed-0", + activations_model=get_model("deit_small_imagenet_1_seed-0"), + layers=MODEL_CONFIGS["deit_small_imagenet_1_seed-0"]["model_commitment"]["layers"], + behavioral_readout_layer=MODEL_CONFIGS["deit_small_imagenet_1_seed-0"]["model_commitment"]["behavioral_readout_layer"], + region_layer_map=MODEL_CONFIGS["deit_small_imagenet_1_seed-0"]["model_commitment"]["region_layer_map"] +) + + +model_registry["deit_small_imagenet_10_seed-0"] = lambda: ModelCommitment( + identifier="deit_small_imagenet_10_seed-0", + activations_model=get_model("deit_small_imagenet_10_seed-0"), + layers=MODEL_CONFIGS["deit_small_imagenet_10_seed-0"]["model_commitment"]["layers"], + behavioral_readout_layer=MODEL_CONFIGS["deit_small_imagenet_10_seed-0"]["model_commitment"]["behavioral_readout_layer"], + region_layer_map=MODEL_CONFIGS["deit_small_imagenet_10_seed-0"]["model_commitment"]["region_layer_map"] +) + + +model_registry["deit_small_imagenet_100_seed-0"] = lambda: ModelCommitment( + identifier="deit_small_imagenet_100_seed-0", + activations_model=get_model("deit_small_imagenet_100_seed-0"), + layers=MODEL_CONFIGS["deit_small_imagenet_100_seed-0"]["model_commitment"]["layers"], + behavioral_readout_layer=MODEL_CONFIGS["deit_small_imagenet_100_seed-0"]["model_commitment"]["behavioral_readout_layer"], + region_layer_map=MODEL_CONFIGS["deit_small_imagenet_100_seed-0"]["model_commitment"]["region_layer_map"] +) + + +model_registry["convnext_tiny_imagenet_full_seed-0"] = lambda: ModelCommitment( + identifier="convnext_tiny_imagenet_full_seed-0", + activations_model=get_model("convnext_tiny_imagenet_full_seed-0"), + layers=MODEL_CONFIGS["convnext_tiny_imagenet_full_seed-0"]["model_commitment"]["layers"], + behavioral_readout_layer=MODEL_CONFIGS["convnext_tiny_imagenet_full_seed-0"]["model_commitment"]["behavioral_readout_layer"], + region_layer_map=MODEL_CONFIGS["convnext_tiny_imagenet_full_seed-0"]["model_commitment"]["region_layer_map"] +) + + +model_registry["convnext_small_imagenet_full_seed-0"] = lambda: ModelCommitment( + identifier="convnext_small_imagenet_full_seed-0", + activations_model=get_model("convnext_small_imagenet_full_seed-0"), + layers=MODEL_CONFIGS["convnext_small_imagenet_full_seed-0"]["model_commitment"]["layers"], + behavioral_readout_layer=MODEL_CONFIGS["convnext_small_imagenet_full_seed-0"]["model_commitment"]["behavioral_readout_layer"], + region_layer_map=MODEL_CONFIGS["convnext_small_imagenet_full_seed-0"]["model_commitment"]["region_layer_map"] +) + + +model_registry["convnext_base_imagenet_full_seed-0"] = lambda: ModelCommitment( + identifier="convnext_base_imagenet_full_seed-0", + activations_model=get_model("convnext_base_imagenet_full_seed-0"), + layers=MODEL_CONFIGS["convnext_base_imagenet_full_seed-0"]["model_commitment"]["layers"], + behavioral_readout_layer=MODEL_CONFIGS["convnext_base_imagenet_full_seed-0"]["model_commitment"]["behavioral_readout_layer"], + region_layer_map=MODEL_CONFIGS["convnext_base_imagenet_full_seed-0"]["model_commitment"]["region_layer_map"] +) + + +model_registry["convnext_large_imagenet_full_seed-0"] = lambda: ModelCommitment( + identifier="convnext_large_imagenet_full_seed-0", + activations_model=get_model("convnext_large_imagenet_full_seed-0"), + layers=MODEL_CONFIGS["convnext_large_imagenet_full_seed-0"]["model_commitment"]["layers"], + behavioral_readout_layer=MODEL_CONFIGS["convnext_large_imagenet_full_seed-0"]["model_commitment"]["behavioral_readout_layer"], + region_layer_map=MODEL_CONFIGS["convnext_large_imagenet_full_seed-0"]["model_commitment"]["region_layer_map"] +) + + +model_registry["convnext_small_imagenet_1_seed-0"] = lambda: ModelCommitment( + identifier="convnext_small_imagenet_1_seed-0", + activations_model=get_model("convnext_small_imagenet_1_seed-0"), + layers=MODEL_CONFIGS["convnext_small_imagenet_1_seed-0"]["model_commitment"]["layers"], + behavioral_readout_layer=MODEL_CONFIGS["convnext_small_imagenet_1_seed-0"]["model_commitment"]["behavioral_readout_layer"], + region_layer_map=MODEL_CONFIGS["convnext_small_imagenet_1_seed-0"]["model_commitment"]["region_layer_map"] +) + + +model_registry["convnext_small_imagenet_10_seed-0"] = lambda: ModelCommitment( + identifier="convnext_small_imagenet_10_seed-0", + activations_model=get_model("convnext_small_imagenet_10_seed-0"), + layers=MODEL_CONFIGS["convnext_small_imagenet_10_seed-0"]["model_commitment"]["layers"], + behavioral_readout_layer=MODEL_CONFIGS["convnext_small_imagenet_10_seed-0"]["model_commitment"]["behavioral_readout_layer"], + region_layer_map=MODEL_CONFIGS["convnext_small_imagenet_10_seed-0"]["model_commitment"]["region_layer_map"] +) + + +model_registry["convnext_small_imagenet_100_seed-0"] = lambda: ModelCommitment( + identifier="convnext_small_imagenet_100_seed-0", + activations_model=get_model("convnext_small_imagenet_100_seed-0"), + layers=MODEL_CONFIGS["convnext_small_imagenet_100_seed-0"]["model_commitment"]["layers"], + behavioral_readout_layer=MODEL_CONFIGS["convnext_small_imagenet_100_seed-0"]["model_commitment"]["behavioral_readout_layer"], + region_layer_map=MODEL_CONFIGS["convnext_small_imagenet_100_seed-0"]["model_commitment"]["region_layer_map"] +) + + diff --git a/brainscore_vision/models/scaling_models/model.py b/brainscore_vision/models/scaling_models/model.py new file mode 100644 index 000000000..ab63520a4 --- /dev/null +++ b/brainscore_vision/models/scaling_models/model.py @@ -0,0 +1,144 @@ +import os +import functools +import json +from pathlib import Path + +import torchvision.models +import torch + +from brainscore_vision.model_helpers.activations.pytorch import PytorchWrapper +from brainscore_vision.model_helpers.activations.pytorch import load_preprocess_images + +import timm +import numpy as np +import torchvision.transforms as T +from PIL import Image + +import albumentations as A +from albumentations.pytorch import ToTensorV2 + +BIBTEX = """""" + + +with open(Path(__file__).parent / "model_configs.json", "r") as f: + MODEL_CONFIGS = json.load(f) + + +def load_image(image_filepath): + return Image.open(image_filepath).convert("RGB") + + +def get_interpolation_mode(interpolation: str) -> int: + """Returns the interpolation mode for albumentations""" + if "linear" or "bilinear" in interpolation: + return 1 + elif "cubic" or "bicubic" in interpolation: + return 2 + else: + raise NotImplementedError(f"Interpolation mode {interpolation} not implemented") + + +def custom_image_preprocess( + images, + resize_size: int, + crop_size: int, + interpolation: str, + transforms=None, +): + if transforms is None: + interpolation = get_interpolation_mode(interpolation) + transforms = A.Compose( + [ + A.Resize(resize_size, resize_size, p=1.0, interpolation=interpolation), + A.CenterCrop(crop_size, crop_size, p=1.0), + A.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), + ToTensorV2(), + ] + ) + if isinstance(transforms, T.Compose): + images = [transforms(image) for image in images] + images = [np.array(image) for image in images] + images = np.stack(images) + elif isinstance(transforms, A.Compose): + images = [transforms(image=np.array(image))["image"] for image in images] + images = np.stack(images) + else: + raise NotImplementedError( + f"Transform of type {type(transforms)} is not implemented" + ) + + return images + + +def load_preprocess_images_custom( + image_filepaths, preprocess_images=custom_image_preprocess, **kwargs +): + images = [load_image(image_filepath) for image_filepath in image_filepaths] + images = preprocess_images(images, **kwargs) + return images + + +def get_model(model_id:str): + + # Unpack model config + config = MODEL_CONFIGS[model_id] + model_name = config["model_name"] + model_id = config["model_id"] + resize_size = config["resize_size"] + crop_size = config["crop_size"] + interpolation = config["interpolation"] + num_classes = config["num_classes"] + ckpt_url = config["checkpoint_url"] + use_timm = config["use_timm"] + timm_model_name = config["timm_model_name"] + epoch = config["epoch"] + load_model_ema = config["load_model_ema"] + output_head = config["output_head"] + is_vit = config["is_vit"] + + # Temporary fix for vit models + # See https://github.com/brain-score/vision/pull/1232 + if is_vit: + os.environ['RESULTCACHING_DISABLE'] = 'brainscore_vision.model_helpers.activations.core.ActivationsExtractorHelper._from_paths_stored' + + + # Initialize model + if use_timm: + model = timm.create_model(timm_model_name, pretrained=False, num_classes=num_classes) + else: + model = eval(f"torchvision.models.{model_name}(weights=None)") + if num_classes != 1000: + exec(f'''{output_head} = torch.nn.Linear( + in_features={output_head}.in_features, + out_features=num_classes, + bias={output_head}.bias is not None, + )''' + ) + + # Load model weights + state_dict = torch.hub.load_state_dict_from_url( + ckpt_url, + check_hash=True, + file_name=f"{model_id}_ep{epoch}.pt", + map_location="cpu", + ) + if load_model_ema: + state_dict = state_dict["state"]["model_ema_state_dict"] + else: + state_dict = state_dict["state"]["model"] + state_dict = {k.replace("module.", ""): v for k, v in state_dict.items()} + model.load_state_dict(state_dict, strict=True) + print(f"Model loaded from {ckpt_url}") + + # Wrap model + preprocessing = functools.partial( + load_preprocess_images_custom, + resize_size=resize_size, + crop_size=crop_size, + interpolation=interpolation, + transforms=None + ) + wrapper = PytorchWrapper( + identifier=model_id, model=model, preprocessing=preprocessing + ) + return wrapper diff --git a/brainscore_vision/models/scaling_models/model_configs.json b/brainscore_vision/models/scaling_models/model_configs.json new file mode 100644 index 000000000..cc52e5370 --- /dev/null +++ b/brainscore_vision/models/scaling_models/model_configs.json @@ -0,0 +1,869 @@ +{ + "resnet18_imagenet_full": { + "model_name": "resnet18", + "model_id": "resnet18_imagenet_full", + "num_classes": 1000, + "resize_size": 256, + "crop_size": 224, + "interpolation": "bilinear", + "load_model_ema": false, + "use_timm": false, + "timm_model_name": null, + "is_vit": false, + "epoch": 100, + "output_head": "model.fc", + "model_commitment": { + "layers": [ + "layer1.0.bn1", + "layer2.0.bn2", + "layer3.0.conv2", + "layer4.0.bn1" + ], + "behavioral_readout_layer": "avgpool", + "region_layer_map": { + "V1": "layer1.0.bn1", + "V2": "layer3.0.conv2", + "V4": "layer2.0.bn2", + "IT": "layer4.0.bn1" + } + }, + "checkpoint_url": "https://epfl-neuroailab-scalinglaws.s3.eu-north-1.amazonaws.com/checkpoints/resnet18_imagenet_full/ep100.pt" + }, + "resnet34_imagenet_full": { + "model_name": "resnet34", + "model_id": "resnet34_imagenet_full", + "num_classes": 1000, + "resize_size": 256, + "crop_size": 224, + "interpolation": "bilinear", + "load_model_ema": false, + "use_timm": false, + "timm_model_name": null, + "is_vit": false, + "epoch": 100, + "output_head": "model.fc", + "model_commitment": { + "layers": [ + "layer1.0.bn1", + "layer2.0.bn2", + "layer3.1.conv1", + "layer4.0.bn1" + ], + "behavioral_readout_layer": "avgpool", + "region_layer_map": { + "V1": "layer1.0.bn1", + "V2": "layer3.1.conv1", + "V4": "layer2.0.bn2", + "IT": "layer4.0.bn1" + } + }, + "checkpoint_url": "https://epfl-neuroailab-scalinglaws.s3.eu-north-1.amazonaws.com/checkpoints/resnet34_imagenet_full/ep100.pt" + }, + "resnet50_imagenet_full": { + "model_name": "resnet50", + "model_id": "resnet50_imagenet_full", + "num_classes": 1000, + "resize_size": 256, + "crop_size": 224, + "interpolation": "bilinear", + "load_model_ema": false, + "use_timm": false, + "timm_model_name": null, + "is_vit": false, + "epoch": 100, + "output_head": "model.fc", + "model_commitment": { + "layers": [ + "layer1.0.conv1", + "layer3.0.conv1", + "layer3.5.bn3", + "layer4.0.relu" + ], + "behavioral_readout_layer": "avgpool", + "region_layer_map": { + "V1": "layer1.0.conv1", + "V2": "layer3.5.bn3", + "V4": "layer3.0.conv1", + "IT": "layer4.0.relu" + } + }, + "checkpoint_url": "https://epfl-neuroailab-scalinglaws.s3.eu-north-1.amazonaws.com/checkpoints/resnet50_imagenet_full/ep100.pt" + }, + "resnet101_imagenet_full": { + "model_name": "resnet101", + "model_id": "resnet101_imagenet_full", + "num_classes": 1000, + "resize_size": 256, + "crop_size": 224, + "interpolation": "bilinear", + "load_model_ema": false, + "use_timm": false, + "timm_model_name": null, + "is_vit": false, + "epoch": 100, + "output_head": "model.fc", + "model_commitment": { + "layers": [ + "layer1.0.bn1", + "layer3.0.bn3", + "layer4.0.bn1", + "layer4.0.relu" + ], + "behavioral_readout_layer": "avgpool", + "region_layer_map": { + "V1": "layer1.0.bn1", + "V2": "layer4.0.bn1", + "V4": "layer3.0.bn3", + "IT": "layer4.0.relu" + } + }, + "checkpoint_url": "https://epfl-neuroailab-scalinglaws.s3.eu-north-1.amazonaws.com/checkpoints/resnet101_imagenet_full/ep100.pt" + }, + "resnet152_imagenet_full": { + "model_name": "resnet152", + "model_id": "resnet152_imagenet_full", + "num_classes": 1000, + "resize_size": 256, + "crop_size": 224, + "interpolation": "bilinear", + "load_model_ema": false, + "use_timm": false, + "timm_model_name": null, + "is_vit": false, + "epoch": 100, + "output_head": "model.fc", + "model_commitment": { + "layers": [ + "layer1.0.bn1", + "layer3.0.bn3", + "layer3.3.bn3", + "layer3.34.bn3" + ], + "behavioral_readout_layer": "avgpool", + "region_layer_map": { + "V1": "layer1.0.bn1", + "V2": "layer3.3.bn3", + "V4": "layer3.0.bn3", + "IT": "layer3.34.bn3" + } + }, + "checkpoint_url": "https://epfl-neuroailab-scalinglaws.s3.eu-north-1.amazonaws.com/checkpoints/resnet152_imagenet_full/ep100.pt" + }, + "resnet18_ecoset_full": { + "model_name": "resnet18", + "model_id": "resnet18_ecoset_full", + "num_classes": 565, + "resize_size": 256, + "crop_size": 224, + "interpolation": "bilinear", + "load_model_ema": false, + "use_timm": false, + "timm_model_name": null, + "is_vit": false, + "epoch": 100, + "output_head": "model.fc", + "model_commitment": { + "layers": [ + "layer1.0.conv1", + "layer2.0.bn2", + "layer3.0.conv1", + "layer4.0.bn1" + ], + "behavioral_readout_layer": "avgpool", + "region_layer_map": { + "V1": "layer1.0.conv1", + "V2": "layer3.0.conv1", + "V4": "layer2.0.bn2", + "IT": "layer4.0.bn1" + } + }, + "checkpoint_url": "https://epfl-neuroailab-scalinglaws.s3.eu-north-1.amazonaws.com/checkpoints/resnet18_ecoset_full/ep100.pt" + }, + "resnet34_ecoset_full": { + "model_name": "resnet34", + "model_id": "resnet34_ecoset_full", + "num_classes": 565, + "resize_size": 256, + "crop_size": 224, + "interpolation": "bilinear", + "load_model_ema": false, + "use_timm": false, + "timm_model_name": null, + "is_vit": false, + "epoch": 100, + "output_head": "model.fc", + "model_commitment": { + "layers": [ + "layer1.0.bn1", + "layer3.0.conv1", + "layer3.1.conv1", + "layer4.0.conv1" + ], + "behavioral_readout_layer": "avgpool", + "region_layer_map": { + "V1": "layer1.0.bn1", + "V2": "layer3.1.conv1", + "V4": "layer3.0.conv1", + "IT": "layer4.0.conv1" + } + }, + "checkpoint_url": "https://epfl-neuroailab-scalinglaws.s3.eu-north-1.amazonaws.com/checkpoints/resnet34_ecoset_full/ep100.pt" + }, + "resnet50_ecoset_full": { + "model_name": "resnet50", + "model_id": "resnet50_ecoset_full", + "num_classes": 565, + "resize_size": 256, + "crop_size": 224, + "interpolation": "bilinear", + "load_model_ema": false, + "use_timm": false, + "timm_model_name": null, + "is_vit": false, + "epoch": 100, + "output_head": "model.fc", + "model_commitment": { + "layers": [ + "layer1.0.bn1", + "layer3.0.conv1", + "layer4.0.conv2", + "layer4.0.relu" + ], + "behavioral_readout_layer": "avgpool", + "region_layer_map": { + "V1": "layer1.0.bn1", + "V2": "layer4.0.conv2", + "V4": "layer3.0.conv1", + "IT": "layer4.0.relu" + } + }, + "checkpoint_url": "https://epfl-neuroailab-scalinglaws.s3.eu-north-1.amazonaws.com/checkpoints/resnet50_ecoset_full/ep100.pt" + }, + "resnet101_ecoset_full": { + "model_name": "resnet101", + "model_id": "resnet101_ecoset_full", + "num_classes": 565, + "resize_size": 256, + "crop_size": 224, + "interpolation": "bilinear", + "load_model_ema": false, + "use_timm": false, + "timm_model_name": null, + "is_vit": false, + "epoch": 100, + "output_head": "model.fc", + "model_commitment": { + "layers": [ + "layer1.0.bn1", + "layer3.0.bn3", + "layer3.4.relu", + "layer4.0.relu" + ], + "behavioral_readout_layer": "avgpool", + "region_layer_map": { + "V1": "layer1.0.bn1", + "V2": "layer3.4.relu", + "V4": "layer3.0.bn3", + "IT": "layer4.0.relu" + } + }, + "checkpoint_url": "https://epfl-neuroailab-scalinglaws.s3.eu-north-1.amazonaws.com/checkpoints/resnet101_ecoset_full/ep100.pt" + }, + "resnet152_ecoset_full": { + "model_name": "resnet152", + "model_id": "resnet152_ecoset_full", + "num_classes": 565, + "resize_size": 256, + "crop_size": 224, + "interpolation": "bilinear", + "load_model_ema": false, + "use_timm": false, + "timm_model_name": null, + "is_vit": false, + "epoch": 100, + "output_head": "model.fc", + "model_commitment": { + "layers": [ + "layer1.0.bn1", + "layer3.0.bn3", + "layer3.3.bn3", + "layer4.0.relu" + ], + "behavioral_readout_layer": "avgpool", + "region_layer_map": { + "V1": "layer1.0.bn1", + "V2": "layer3.3.bn3", + "V4": "layer3.0.bn3", + "IT": "layer4.0.relu" + } + }, + "checkpoint_url": "https://epfl-neuroailab-scalinglaws.s3.eu-north-1.amazonaws.com/checkpoints/resnet152_ecoset_full/ep100.pt" + }, + "resnet50_imagenet_1_seed-0": { + "model_name": "resnet50", + "model_id": "resnet50_imagenet_1_seed-0", + "num_classes": 1000, + "resize_size": 256, + "crop_size": 224, + "interpolation": "bilinear", + "load_model_ema": false, + "use_timm": false, + "timm_model_name": null, + "is_vit": false, + "epoch": 100, + "output_head": "model.fc", + "model_commitment": { + "layers": [ + "layer1.0.conv1", + "layer3.0.conv1", + "layer3.5.bn3", + "layer4.0.relu" + ], + "behavioral_readout_layer": "avgpool", + "region_layer_map": { + "V1": "layer1.0.conv1", + "V2": "layer3.5.bn3", + "V4": "layer3.0.conv1", + "IT": "layer4.0.relu" + } + }, + "checkpoint_url": "https://epfl-neuroailab-scalinglaws.s3.eu-north-1.amazonaws.com/checkpoints/resnet50_imagenet_1_seed-0/ep100.pt" + }, + "resnet50_imagenet_10_seed-0": { + "model_name": "resnet50", + "model_id": "resnet50_imagenet_10_seed-0", + "num_classes": 1000, + "resize_size": 256, + "crop_size": 224, + "interpolation": "bilinear", + "load_model_ema": false, + "use_timm": false, + "timm_model_name": null, + "is_vit": false, + "epoch": 100, + "output_head": "model.fc", + "model_commitment": { + "layers": [ + "layer1.0.conv1", + "layer3.0.conv1", + "layer3.5.bn3", + "layer4.0.relu" + ], + "behavioral_readout_layer": "avgpool", + "region_layer_map": { + "V1": "layer1.0.conv1", + "V2": "layer3.5.bn3", + "V4": "layer3.0.conv1", + "IT": "layer4.0.relu" + } + }, + "checkpoint_url": "https://epfl-neuroailab-scalinglaws.s3.eu-north-1.amazonaws.com/checkpoints/resnet50_imagenet_10_seed-0/ep100.pt" + }, + "resnet50_imagenet_100_seed-0": { + "model_name": "resnet50", + "model_id": "resnet50_imagenet_100_seed-0", + "num_classes": 1000, + "resize_size": 256, + "crop_size": 224, + "interpolation": "bilinear", + "load_model_ema": false, + "use_timm": false, + "timm_model_name": null, + "is_vit": false, + "epoch": 100, + "output_head": "model.fc", + "model_commitment": { + "layers": [ + "layer1.0.conv1", + "layer3.0.conv1", + "layer3.5.bn3", + "layer4.0.relu" + ], + "behavioral_readout_layer": "avgpool", + "region_layer_map": { + "V1": "layer1.0.conv1", + "V2": "layer3.5.bn3", + "V4": "layer3.0.conv1", + "IT": "layer4.0.relu" + } + }, + "checkpoint_url": "https://epfl-neuroailab-scalinglaws.s3.eu-north-1.amazonaws.com/checkpoints/resnet50_imagenet_100_seed-0/ep100.pt" + }, + "efficientnet_b0_imagenet_full": { + "model_name": "efficientnet_b0", + "model_id": "efficientnet_b0_imagenet_full", + "num_classes": 1000, + "resize_size": 256, + "crop_size": 224, + "interpolation": "bilinear", + "load_model_ema": false, + "use_timm": false, + "timm_model_name": null, + "is_vit": false, + "epoch": 100, + "output_head": "model.classifier[1]", + "model_commitment": { + "layers": [ + "features.4.0.block.0.1", + "features.4.0.block.1.0", + "features.4.1.block.3.1", + "features.6.0.block.3.0" + ], + "behavioral_readout_layer": "avgpool", + "region_layer_map": { + "V1": "features.4.0.block.1.0", + "V2": "features.4.1.block.3.1", + "V4": "features.4.0.block.0.1", + "IT": "features.6.0.block.3.0" + } + }, + "checkpoint_url": "https://epfl-neuroailab-scalinglaws.s3.eu-north-1.amazonaws.com/checkpoints/efficientnet_b0_imagenet_full/ep100.pt" + }, + "efficientnet_b1_imagenet_full": { + "model_name": "efficientnet_b1", + "model_id": "efficientnet_b1_imagenet_full", + "num_classes": 1000, + "resize_size": 255, + "crop_size": 240, + "interpolation": "bilinear", + "load_model_ema": false, + "use_timm": false, + "timm_model_name": null, + "is_vit": false, + "epoch": 100, + "output_head": "model.classifier[1]", + "model_commitment": { + "layers": [ + "features.4.0.block.0.1", + "features.4.0.block.1.0", + "features.6.0.block.3.0" + ], + "behavioral_readout_layer": "avgpool", + "region_layer_map": { + "V1": "features.4.0.block.0.1", + "V2": "features.4.0.block.1.0", + "V4": "features.4.0.block.0.1", + "IT": "features.6.0.block.3.0" + } + }, + "checkpoint_url": "https://epfl-neuroailab-scalinglaws.s3.eu-north-1.amazonaws.com/checkpoints/efficientnet_b1_imagenet_full/ep100.pt" + }, + "efficientnet_b2_imagenet_full": { + "model_name": "efficientnet_b2", + "model_id": "efficientnet_b2_imagenet_full", + "num_classes": 1000, + "resize_size": 288, + "crop_size": 288, + "interpolation": "bilinear", + "load_model_ema": false, + "use_timm": false, + "timm_model_name": null, + "is_vit": false, + "epoch": 100, + "output_head": "model.classifier[1]", + "model_commitment": { + "layers": [ + "features.4.0.block.3.0", + "features.5.0.block.1.0", + "features.6.0.block.3.0" + ], + "behavioral_readout_layer": "avgpool", + "region_layer_map": { + "V1": "features.4.0.block.3.0", + "V2": "features.5.0.block.1.0", + "V4": "features.4.0.block.3.0", + "IT": "features.6.0.block.3.0" + } + }, + "checkpoint_url": "https://epfl-neuroailab-scalinglaws.s3.eu-north-1.amazonaws.com/checkpoints/efficientnet_b2_imagenet_full/ep100.pt" + }, + "deit_small_imagenet_full_seed-0": { + "model_name": "deit_small", + "model_id": "deit_small_imagenet_full_seed-0", + "num_classes": 1000, + "resize_size": 256, + "crop_size": 224, + "interpolation": "bicubic", + "load_model_ema": true, + "use_timm": true, + "timm_model_name": "deit3_small_patch16_224", + "is_vit": true, + "epoch": 300, + "output_head": null, + "model_commitment": { + "layers": [ + "blocks.2.norm1", + "blocks.5.norm1", + "blocks.6.norm2", + "blocks.9.norm2" + ], + "behavioral_readout_layer": "fc_norm", + "region_layer_map": { + "V1": "blocks.2.norm1", + "V2": "blocks.6.norm2", + "V4": "blocks.5.norm1", + "IT": "blocks.9.norm2" + } + }, + "checkpoint_url": "https://epfl-neuroailab-scalinglaws.s3.eu-north-1.amazonaws.com/checkpoints/deit_small_imagenet_full_seed-0/ep300.pt" + }, + "deit_base_imagenet_full_seed-0": { + "model_name": "deit_base", + "model_id": "deit_base_imagenet_full_seed-0", + "num_classes": 1000, + "resize_size": 256, + "crop_size": 224, + "interpolation": "bicubic", + "load_model_ema": true, + "use_timm": true, + "timm_model_name": "deit3_base_patch16_224", + "is_vit": true, + "epoch": 300, + "output_head": null, + "model_commitment": { + "layers": [ + "blocks.3.mlp.act", + "blocks.3.mlp.fc1", + "blocks.8.norm2", + "blocks.9.norm2" + ], + "behavioral_readout_layer": "fc_norm", + "region_layer_map": { + "V1": "blocks.3.mlp.fc1", + "V2": "blocks.8.norm2", + "V4": "blocks.3.mlp.act", + "IT": "blocks.9.norm2" + } + }, + "checkpoint_url": "https://epfl-neuroailab-scalinglaws.s3.eu-north-1.amazonaws.com/checkpoints/deit_base_imagenet_full_seed-0/ep300.pt" + }, + "deit_large_imagenet_full_seed-0": { + "model_name": "deit_large", + "model_id": "deit_large_imagenet_full_seed-0", + "num_classes": 1000, + "resize_size": 256, + "crop_size": 224, + "interpolation": "bicubic", + "load_model_ema": true, + "use_timm": true, + "timm_model_name": "deit3_large_patch16_224", + "is_vit": true, + "epoch": 300, + "output_head": null, + "model_commitment": { + "layers": [ + "blocks.18.norm2", + "blocks.20.norm2", + "blocks.4.norm1", + "blocks.9.norm1" + ], + "behavioral_readout_layer": "fc_norm", + "region_layer_map": { + "V1": "blocks.4.norm1", + "V2": "blocks.18.norm2", + "V4": "blocks.9.norm1", + "IT": "blocks.20.norm2" + } + }, + "checkpoint_url": "https://epfl-neuroailab-scalinglaws.s3.eu-north-1.amazonaws.com/checkpoints/deit_large_imagenet_full_seed-0/ep300.pt" + }, + "deit_small_imagenet_1_seed-0": { + "model_name": "deit_small", + "model_id": "deit_small_imagenet_1_seed-0", + "num_classes": 1000, + "resize_size": 256, + "crop_size": 224, + "interpolation": "bicubic", + "load_model_ema": true, + "use_timm": true, + "timm_model_name": "deit3_small_patch16_224", + "is_vit": true, + "epoch": 300, + "output_head": null, + "model_commitment": { + "layers": [ + "blocks.2.norm1", + "blocks.5.norm1", + "blocks.6.norm2", + "blocks.9.norm2" + ], + "behavioral_readout_layer": "fc_norm", + "region_layer_map": { + "V1": "blocks.2.norm1", + "V2": "blocks.6.norm2", + "V4": "blocks.5.norm1", + "IT": "blocks.9.norm2" + } + }, + "checkpoint_url": "https://epfl-neuroailab-scalinglaws.s3.eu-north-1.amazonaws.com/checkpoints/deit_small_imagenet_1_seed-0/ep300.pt" + }, + "deit_small_imagenet_10_seed-0": { + "model_name": "deit_small", + "model_id": "deit_small_imagenet_10_seed-0", + "num_classes": 1000, + "resize_size": 256, + "crop_size": 224, + "interpolation": "bicubic", + "load_model_ema": true, + "use_timm": true, + "timm_model_name": "deit3_small_patch16_224", + "is_vit": true, + "epoch": 300, + "output_head": null, + "model_commitment": { + "layers": [ + "blocks.2.norm1", + "blocks.5.norm1", + "blocks.6.norm2", + "blocks.9.norm2" + ], + "behavioral_readout_layer": "fc_norm", + "region_layer_map": { + "V1": "blocks.2.norm1", + "V2": "blocks.6.norm2", + "V4": "blocks.5.norm1", + "IT": "blocks.9.norm2" + } + }, + "checkpoint_url": "https://epfl-neuroailab-scalinglaws.s3.eu-north-1.amazonaws.com/checkpoints/deit_small_imagenet_10_seed-0/ep300.pt" + }, + "deit_small_imagenet_100_seed-0": { + "model_name": "deit_small", + "model_id": "deit_small_imagenet_100_seed-0", + "num_classes": 1000, + "resize_size": 256, + "crop_size": 224, + "interpolation": "bicubic", + "load_model_ema": true, + "use_timm": true, + "timm_model_name": "deit3_small_patch16_224", + "is_vit": true, + "epoch": 300, + "output_head": null, + "model_commitment": { + "layers": [ + "blocks.2.norm1", + "blocks.5.norm1", + "blocks.6.norm2", + "blocks.9.norm2" + ], + "behavioral_readout_layer": "fc_norm", + "region_layer_map": { + "V1": "blocks.2.norm1", + "V2": "blocks.6.norm2", + "V4": "blocks.5.norm1", + "IT": "blocks.9.norm2" + } + }, + "checkpoint_url": "https://epfl-neuroailab-scalinglaws.s3.eu-north-1.amazonaws.com/checkpoints/deit_small_imagenet_100_seed-0/ep300.pt" + }, + "convnext_tiny_imagenet_full_seed-0": { + "model_name": "convnext_tiny", + "model_id": "convnext_tiny_imagenet_full_seed-0", + "num_classes": 1000, + "resize_size": 256, + "crop_size": 224, + "interpolation": "bicubic", + "load_model_ema": true, + "use_timm": false, + "timm_model_name": null, + "is_vit": false, + "epoch": 300, + "output_head": "model.classifier[2]", + "model_commitment": { + "layers": [ + "features.4.0", + "features.5.4.block.0", + "features.6.0" + ], + "behavioral_readout_layer": "classifier.1", + "region_layer_map": { + "V1": "features.6.0", + "V2": "features.5.4.block.0", + "V4": "features.4.0", + "IT": "features.5.4.block.0" + } + }, + "checkpoint_url": "https://epfl-neuroailab-scalinglaws.s3.eu-north-1.amazonaws.com/checkpoints/convnext_tiny_imagenet_full_seed-0/ep300.pt" + }, + "convnext_small_imagenet_full_seed-0": { + "model_name": "convnext_small", + "model_id": "convnext_small_imagenet_full_seed-0", + "num_classes": 1000, + "resize_size": 256, + "crop_size": 224, + "interpolation": "bicubic", + "load_model_ema": true, + "use_timm": false, + "timm_model_name": null, + "is_vit": false, + "epoch": 300, + "output_head": "model.classifier[2]", + "model_commitment": { + "layers": [ + "features.4.0", + "features.5.17.block.0", + "features.5.2.block.0", + "features.5.9.block.0" + ], + "behavioral_readout_layer": "classifier.1", + "region_layer_map": { + "V1": "features.5.2.block.0", + "V2": "features.5.17.block.0", + "V4": "features.4.0", + "IT": "features.5.9.block.0" + } + }, + "checkpoint_url": "https://epfl-neuroailab-scalinglaws.s3.eu-north-1.amazonaws.com/checkpoints/convnext_small_imagenet_full_seed-0/ep300.pt" + }, + "convnext_base_imagenet_full_seed-0": { + "model_name": "convnext_base", + "model_id": "convnext_base_imagenet_full_seed-0", + "num_classes": 1000, + "resize_size": 256, + "crop_size": 224, + "interpolation": "bicubic", + "load_model_ema": true, + "use_timm": false, + "timm_model_name": null, + "is_vit": false, + "epoch": 300, + "output_head": "model.classifier[2]", + "model_commitment": { + "layers": [ + "features.4.0", + "features.5.11.block.0", + "features.5.12.block.0", + "features.5.7.block.0" + ], + "behavioral_readout_layer": "classifier.1", + "region_layer_map": { + "V1": "features.5.7.block.0", + "V2": "features.5.12.block.0", + "V4": "features.4.0", + "IT": "features.5.11.block.0" + } + }, + "checkpoint_url": "https://epfl-neuroailab-scalinglaws.s3.eu-north-1.amazonaws.com/checkpoints/convnext_base_imagenet_full_seed-0/ep300.pt" + }, + "convnext_large_imagenet_full_seed-0": { + "model_name": "convnext_large", + "model_id": "convnext_large_imagenet_full_seed-0", + "num_classes": 1000, + "resize_size": 256, + "crop_size": 224, + "interpolation": "bicubic", + "load_model_ema": true, + "use_timm": false, + "timm_model_name": null, + "is_vit": false, + "epoch": 300, + "output_head": "model.classifier[2]", + "model_commitment": { + "layers": [ + "features.4.1", + "features.5.11.block.0", + "features.5.7.block.0", + "features.5.7.block.5" + ], + "behavioral_readout_layer": "classifier.1", + "region_layer_map": { + "V1": "features.5.7.block.5", + "V2": "features.5.7.block.0", + "V4": "features.4.1", + "IT": "features.5.11.block.0" + } + }, + "checkpoint_url": "https://epfl-neuroailab-scalinglaws.s3.eu-north-1.amazonaws.com/checkpoints/convnext_large_imagenet_full_seed-0/ep300.pt" + }, + "convnext_small_imagenet_1_seed-0": { + "model_name": "convnext_small", + "model_id": "convnext_small_imagenet_1_seed-0", + "num_classes": 1000, + "resize_size": 256, + "crop_size": 224, + "interpolation": "bicubic", + "load_model_ema": true, + "use_timm": false, + "timm_model_name": null, + "is_vit": false, + "epoch": 300, + "output_head": "model.classifier[2]", + "model_commitment": { + "layers": [ + "features.4.0", + "features.5.17.block.0", + "features.5.2.block.0", + "features.5.9.block.0" + ], + "behavioral_readout_layer": "classifier.1", + "region_layer_map": { + "V1": "features.5.2.block.0", + "V2": "features.5.17.block.0", + "V4": "features.4.0", + "IT": "features.5.9.block.0" + } + }, + "checkpoint_url": "https://epfl-neuroailab-scalinglaws.s3.eu-north-1.amazonaws.com/checkpoints/convnext_small_imagenet_1_seed-0/ep300.pt" + }, + "convnext_small_imagenet_10_seed-0": { + "model_name": "convnext_small", + "model_id": "convnext_small_imagenet_10_seed-0", + "num_classes": 1000, + "resize_size": 256, + "crop_size": 224, + "interpolation": "bicubic", + "load_model_ema": true, + "use_timm": false, + "timm_model_name": null, + "is_vit": false, + "epoch": 300, + "output_head": "model.classifier[2]", + "model_commitment": { + "layers": [ + "features.4.0", + "features.5.17.block.0", + "features.5.2.block.0", + "features.5.9.block.0" + ], + "behavioral_readout_layer": "classifier.1", + "region_layer_map": { + "V1": "features.5.2.block.0", + "V2": "features.5.17.block.0", + "V4": "features.4.0", + "IT": "features.5.9.block.0" + } + }, + "checkpoint_url": "https://epfl-neuroailab-scalinglaws.s3.eu-north-1.amazonaws.com/checkpoints/convnext_small_imagenet_10_seed-0/ep300.pt" + }, + "convnext_small_imagenet_100_seed-0": { + "model_name": "convnext_small", + "model_id": "convnext_small_imagenet_100_seed-0", + "num_classes": 1000, + "resize_size": 256, + "crop_size": 224, + "interpolation": "bicubic", + "load_model_ema": true, + "use_timm": false, + "timm_model_name": null, + "is_vit": false, + "epoch": 300, + "output_head": "model.classifier[2]", + "model_commitment": { + "layers": [ + "features.4.0", + "features.5.17.block.0", + "features.5.2.block.0", + "features.5.9.block.0" + ], + "behavioral_readout_layer": "classifier.1", + "region_layer_map": { + "V1": "features.5.2.block.0", + "V2": "features.5.17.block.0", + "V4": "features.4.0", + "IT": "features.5.9.block.0" + } + }, + "checkpoint_url": "https://epfl-neuroailab-scalinglaws.s3.eu-north-1.amazonaws.com/checkpoints/convnext_small_imagenet_100_seed-0/ep300.pt" + } +} \ No newline at end of file diff --git a/brainscore_vision/models/scaling_models/requirements.txt b/brainscore_vision/models/scaling_models/requirements.txt new file mode 100644 index 000000000..af6389b09 --- /dev/null +++ b/brainscore_vision/models/scaling_models/requirements.txt @@ -0,0 +1,4 @@ +torch +torchvision +albumentations +timm diff --git a/brainscore_vision/models/scaling_models/test.py b/brainscore_vision/models/scaling_models/test.py new file mode 100644 index 000000000..e69de29bb From a89c9a96b0ebbe4d916fd80206050e4612782756 Mon Sep 17 00:00:00 2001 From: Abdulkadir Gokce Date: Wed, 25 Sep 2024 11:36:54 +0200 Subject: [PATCH 27/28] enable unverified SSL for scaling models (#1263) --- brainscore_vision/models/scaling_models/model.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/brainscore_vision/models/scaling_models/model.py b/brainscore_vision/models/scaling_models/model.py index ab63520a4..c7a470e6e 100644 --- a/brainscore_vision/models/scaling_models/model.py +++ b/brainscore_vision/models/scaling_models/model.py @@ -2,6 +2,7 @@ import functools import json from pathlib import Path +import ssl import torchvision.models import torch @@ -17,6 +18,9 @@ import albumentations as A from albumentations.pytorch import ToTensorV2 +# Disable SSL verification +ssl._create_default_https_context = ssl._create_unverified_context + BIBTEX = """""" From 4c31b4fd868f97298780bc394404f9c84932bfdb Mon Sep 17 00:00:00 2001 From: Kartik Pradeepan Date: Wed, 25 Sep 2024 14:13:11 -0400 Subject: [PATCH 28/28] Force sync (#1264) * point to pypi instead of git (#1236) * update dependency names w/ hyphens (#1238) * point to pypi instead of git * Update pyproject.toml with hyphens * Custom trained scaling models (#1206) * Custom trained models * Fix missing library * Fix a missing library * Not every model needs timm * Trigger build tests * Use model_id instead of model_name to ensure correct weights are loaded * timm is missing from deit models * timm is missing from deit models * Fix requirements.txt newline * Package models into a single plugin * Update requirements.txt * Update requirements.txt * Update requirements.txt * Change the order of models * Update model.py * Disable resultcaching for vits * Tests reordering * Revert tests back * Update model.py * Update model.py * Update to sync workflow (#1248) * Update to sync workflow Adds dynamic PR title and commit history included in PR description. Adds `No changes detected` job. * Update no_changes condition and text Refined condition to not require PR_merge because that is when this condition SHOULD be triggered to indicate the correct status of the workflow. * enable unverified SSL for scaling models (#1263) --------- Co-authored-by: Sam Winebrake <85908068+samwinebrake@users.noreply.github.com> Co-authored-by: Abdulkadir Gokce --- brainscore_vision/models/scaling_models/model.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/brainscore_vision/models/scaling_models/model.py b/brainscore_vision/models/scaling_models/model.py index ab63520a4..c7a470e6e 100644 --- a/brainscore_vision/models/scaling_models/model.py +++ b/brainscore_vision/models/scaling_models/model.py @@ -2,6 +2,7 @@ import functools import json from pathlib import Path +import ssl import torchvision.models import torch @@ -17,6 +18,9 @@ import albumentations as A from albumentations.pytorch import ToTensorV2 +# Disable SSL verification +ssl._create_default_https_context = ssl._create_unverified_context + BIBTEX = """"""