diff --git a/.github/workflows/score_new_plugins.yml b/.github/workflows/score_new_plugins.yml index 8e4c8aec9..1f4c6a176 100644 --- a/.github/workflows/score_new_plugins.yml +++ b/.github/workflows/score_new_plugins.yml @@ -32,10 +32,10 @@ jobs: with: fetch-depth: 0 - - name: Set up Python 3.7 + - name: Set up Python 3.11 uses: actions/setup-python@v4 with: - python-version: 3.7 + python-version: 3.11 - name: Configure AWS Credentials uses: aws-actions/configure-aws-credentials@v1 @@ -176,10 +176,10 @@ jobs: - name: Check out repository code uses: actions/checkout@v4 - - name: Set up Python 3.7 + - name: Set up Python 3.11 uses: actions/setup-python@v4 with: - python-version: 3.7 + python-version: 3.11 - name: Build project run: | diff --git a/.github/workflows/sync_develop_with_master.yml b/.github/workflows/sync_develop_with_master.yml index df8cc238d..bd1bf5b0c 100644 --- a/.github/workflows/sync_develop_with_master.yml +++ b/.github/workflows/sync_develop_with_master.yml @@ -16,6 +16,7 @@ jobs: id: init run: | echo "Starting branch synchronization of ${{ github.repository }}" + create_pr_for_nonplugin: name: Synchronizing non-plugin PR needs: start # This job now needs the 'start' job to complete first @@ -34,15 +35,21 @@ jobs: run: | git fetch origin master git reset --hard origin/master + - name: Get commit summary + id: commit_summary + run: | + git log -1 --pretty=format:"%s" + echo "::set-output name=summary::$(git log -1 --pretty=format:"%s")" - name: Create pull request in develop uses: peter-evans/create-pull-request@v6 with: token: '${{ secrets.PAT }}' commit-message: Sync master into develop - title: Sync master into develop + title: Sync master into develop. Triggered by PR #${{ github.event.pull_request.number }} body: >- This PR syncs the latest changes from the master branch into the develop branch. + Commit Summary: ${{ steps.commit_summary.outputs.summary }} base: develop branch: 'developer-sync-pr-${{ github.event.pull_request.number }}' @@ -85,7 +92,7 @@ jobs: fi } - name: Push changes to develop (if merge is successful) - if: steps.merge.conclusion == 'success' + if: steps.merge.outcome == 'success' run: | #Use force-with-lease to prevent accidental overwrite if branch has been updated. If fails, rebase the update and retry git push origin develop --force-with-lease || { echo "Push failed due to updates in develop. Attempting to rebase and retry..." @@ -93,14 +100,21 @@ jobs: git rebase origin/develop git push origin develop --force-with-lease } + - name: Get commit summary + id: commit_summary + run: | + git log -1 --pretty=format:"%s" + echo "::set-output name=summary::$(git log -1 --pretty=format:"%s")" - name: Create pull request for merge conflicts if: steps.merge.outputs.merge_conflict == 'true' uses: peter-evans/create-pull-request@v6 with: token: '${{ secrets.PAT }}' commit-message: Merge master into develop with conflict resolution - title: Resolve conflicts between master and develop - body: This PR resolves merge conflicts between master and develop. + title: Resolve conflicts between master and develop. Triggered by PR #${{ github.event.pull_request.number }} + body: | + This PR resolves merge conflicts between master and develop. + Commit Summary: ${{ steps.commit_summary.outputs.summary }} base: develop branch: 'developer-sync-pr-conflict-${{ github.event.pull_request.number }}' - name: Handle other merge failures @@ -108,4 +122,18 @@ jobs: run: > echo "Handle non-conflict related failure, such as network issues or missing branches" - # Possibly incorporate additional handling logic here (e.g.,notifications or retries) \ No newline at end of file + # Possibly incorporate additional handling logic here (e.g.,notifications or retries) + + + no_changes: + name: "No Changes Made. No synchronization needed." + needs: start + if: > + ( + needs.create_pr_for_nonplugin.result != 'success' && + needs.auto_sync_for_plugin.result != 'success' + ) + runs-on: ubuntu-latest + steps: + - name: Echo no changes + run: echo "No changes were made to master branch 👍" diff --git a/.readthedocs.yml b/.readthedocs.yml index 229a16285..ecc53316a 100644 --- a/.readthedocs.yml +++ b/.readthedocs.yml @@ -3,7 +3,7 @@ version: 2 build: os: "ubuntu-20.04" tools: - python: "3.7" + python: "3.11" python: install: diff --git a/.travis.yml b/.travis.yml index 69e9e9b03..75196cb31 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,5 +1,6 @@ version: ~> 1.0 language: python +dist: jammy env: global: - PYTEST_SETTINGS="not requires_gpu and not memory_intense and not slow and not travis_slow" @@ -9,7 +10,7 @@ env: - WEB_SUBMISSION="False" before_install: - pip install --upgrade pip -- pip install setuptools==60.5.0 +- pip install setuptools - pip install pytest # download large files - pip install awscli @@ -31,18 +32,18 @@ import: jobs: include: - - name: 3.7 public - python: '3.7.13' - - name: 3.7 private + - name: 3.11 public + python: '3.11' + - name: 3.11 private if: fork = false - python: '3.7.13' + python: '3.11' env: - PRIVATE_ACCESS=1 - secure: f1rWEwrslh7qa2g/QlKs001sGC3uaOxZNQSfNOPj+TMCqEo2c6OzImC4hyz+WqCyc6N/lFT4yYo2RhvaqStHMRmu/+9aZmuH05Bb0KQpfzNFA+yGa/U5WR3/4u6KRvDAeNEi9drT2LuacTyGbldmQsquujK0jrPpFWpe7zUUKv0zb0lJf0zcjeSrZlDXLlgD6DCqow7OqHRvW04dPZVy1OArRwtPV6DJ6Rqo1MqFQGHJ806VPlXhSoydb7a58dhGajqPjomdmZjhd3wS6Lv6uetTE/VVb4EP4e7n0qfZIx/TpnWG0SR44pcP7OCNARWYANsAivzxnQ0shyXnIzOo8ZcPYiPpt/5D53i5idTBxXyuDaHGQvgwuY5XLZzznEedBgZa4OvjxAXlLEQjdVDfSsZeYaV9gyFkeTlLnK1zvWi0US38eF2Qtm3Sx3D/5TtBKK2n38tyK5gg/XvJNycaXvIl7iVcnI2ifpqD1mUWI6C9j9Tk19/XEpWkwaFi91+0LZF1GhjBu8o3G5Np4RIOKXi3TIHkpbMM5mf11T6Bm9LvEMq1h8bgRQigEbeJF8CbUOSVFv+AaXsggGjQhuwdyvy2JZo+tO1nfhi+kW3XrDGPsz1R7Wfqduyn7UUh5OiFymeZwKseYKnwU47KyCqDwrq5Mnx1MlSidnVmPriadR4= - secure: WE7FPwy07VzJTKAd2xwZdBhtmh8jk7ojwk4B2rIcBQu0vwUXc1MgO8tBLD7s08lBedBjqZiLZEW31uPMEyWNysouDt16a5gm2d149LR7flI3MOifBtxINfJuC3eOEG65bPgN/bYEsIpLKnu3469d5nxZkK7xsjbWTxHGoUpLvVPsmHY2ZM5/jftybs7fI0do4NMG2XffKfZbiFb447Ao3xeQeEfW6IkJllzgGnlG9FJATFidrbwDNdmzAnvPEnDoKAf7ZvhPV0x9yR5V6P4Ck5hxl8mlPdBa1cRMO8s/1ag1c7YJ3AF9ZlwcwqTiGsT8DHTVRxSz4nFHJTMlrm9j84u7WzLZJBhPgF0UeLN3AQgiAZ3c2TFDvjQWeHVuSPkV5GrKlfhSvR82s9yPEdHQxxwYymBbAr6rJR4NtXTyZX0vg8NRKHssZKLSafs/D/pt9xXspqu8HAHc+mS0lCips79XptSr5BEsioil3D2io3tbzrGugpTeJ7oEA787vKn2Cm4XmhyQ0UBhvwsPZ351l27wZYuNV07o9Ik83hN/w4o2v899QQ/zbX42Iy8ZUCWOPX7MV7+TA7SMxru3qx7HL5hDM8kTetxbLB6Ckr+JOdX8L2Fb5L3TVDpsvfv0ebXgwaQR/ez8/7bcXmBqcERApHDz73HaMXUap+iDR4FLdXE= - AWS_DEFAULT_REGION=us-east-1 - stage: "Automerge check" - python: '3.7.13' + python: '3.11' install: - pip install --no-cache-dir torch torchvision --default-timeout=1000 --retries=5 - pip install --no-cache-dir -e ".[test]" diff --git a/README.md b/README.md index eae4d140f..e0a3010c0 100644 --- a/README.md +++ b/README.md @@ -22,9 +22,9 @@ To contribute, please [send in a pull request](https://github.com/brain-score/vi ## Local installation -You will need Python = 3.7 and pip >= 18.1. +You will need Python = 3.11 and pip >= 18.1. -`pip install git+https://github.com/brain-score/vision` +`pip install git+https://github.com/brain-score/vision.git` Test if the installation is successful by scoring a model on a public benchmark: diff --git a/brainscore_vision/benchmark_helpers/__init__.py b/brainscore_vision/benchmark_helpers/__init__.py index eb36e50ca..7eb506115 100644 --- a/brainscore_vision/benchmark_helpers/__init__.py +++ b/brainscore_vision/benchmark_helpers/__init__.py @@ -1,6 +1,7 @@ from typing import Union import numpy as np +import hashlib from brainio.assemblies import NeuroidAssembly, DataAssembly from brainscore_core import Score @@ -18,6 +19,13 @@ def __init__(self, features: Union[DataAssembly, dict], visual_degrees): self.features = features self._visual_degrees = visual_degrees + @property + def identifier(self) -> str: + # serialize the features to a string and create hash + features_data = str(self.features) + features_hash = hashlib.md5(features_data.encode('utf-8')).hexdigest() + return f"precomputed-{features_hash}" + def visual_degrees(self) -> int: return self._visual_degrees diff --git a/brainscore_vision/benchmark_helpers/test_helper.py b/brainscore_vision/benchmark_helpers/test_helper.py index 6e3ad4a03..57d6461f6 100644 --- a/brainscore_vision/benchmark_helpers/test_helper.py +++ b/brainscore_vision/benchmark_helpers/test_helper.py @@ -7,6 +7,7 @@ from brainio.assemblies import NeuroidAssembly, PropertyAssembly from brainscore_vision import load_benchmark from brainscore_vision.model_interface import BrainModel +from brainscore_vision.data_helpers import s3 from . import PrecomputedFeatures @@ -68,6 +69,8 @@ def run_test_properties(self, benchmark: str, files: dict, expected: float): for current_stimulus in stimulus_identifiers: stimulus_set = load_stimulus_set(current_stimulus) path = Path(__file__).parent / files[current_stimulus] + s3.download_file_if_not_exists(local_path=path, + bucket='brainscore-unittests', remote_filepath=f'tests/test_benchmarks/{files[current_stimulus]}') features = PropertyAssembly.from_files(path, stimulus_set_identifier=stimulus_set.identifier, stimulus_set=stimulus_set) diff --git a/brainscore_vision/benchmarks/coggan2024_behavior/benchmark.py b/brainscore_vision/benchmarks/coggan2024_behavior/benchmark.py index c00a52335..6e1d019d1 100644 --- a/brainscore_vision/benchmarks/coggan2024_behavior/benchmark.py +++ b/brainscore_vision/benchmarks/coggan2024_behavior/benchmark.py @@ -106,9 +106,12 @@ def __call__(self, candidate: BrainModel) -> Score: data.model_prediction == data.object_class, dtype=int) # get correlation between model and human performance across conditions - performance = (data[data.visibility < 1] + performance = ( + data[data.visibility < 1] .groupby(['subject', 'occluder_type', 'occluder_color']) - .mean(['human_accuracy', 'model_accuracy'])).reset_index() + .mean(numeric_only=True) + .reset_index() + ) scores = performance.groupby('subject').apply( lambda df: np.corrcoef(df.human_accuracy, df.model_accuracy)[0, 1]) score = Score(np.mean(scores)) @@ -131,8 +134,9 @@ def get_noise_ceiling(performance: pd.DataFrame) -> Score: nc = [] for subject in performance.subject.unique(): performance_ind = performance[performance.subject == subject] - performance_grp = (performance[performance.subject != subject] - .groupby(['occluder_type', 'occluder_color']).mean()) + performance_grp = performance[performance.subject != subject] + numeric_cols = performance_grp.select_dtypes(include=np.number).columns + performance_grp = performance_grp.groupby(['occluder_type', 'occluder_color'])[numeric_cols].mean() merged_df = performance_ind.merge( performance_grp, on=['occluder_type', 'occluder_color']) nc.append(np.corrcoef(merged_df.human_accuracy_x, merged_df.human_accuracy_y)[0, 1]) diff --git a/brainscore_vision/benchmarks/igustibagus2024/domain_transfer_analysis.py b/brainscore_vision/benchmarks/igustibagus2024/domain_transfer_analysis.py index 9a8c07713..da3d662f2 100644 --- a/brainscore_vision/benchmarks/igustibagus2024/domain_transfer_analysis.py +++ b/brainscore_vision/benchmarks/igustibagus2024/domain_transfer_analysis.py @@ -5,6 +5,8 @@ import pandas as pd from sklearn.linear_model import RidgeClassifierCV from sklearn.model_selection import train_test_split +from sklearn.pipeline import Pipeline +from sklearn.preprocessing import StandardScaler from tqdm import tqdm # import brain-score specific libraries @@ -89,7 +91,10 @@ def __call__(self, candidate: BrainModel) -> Score: def OOD_AnalysisBenchmark(): return _OOD_AnalysisBenchmark( - classifier=RidgeClassifierCV(alphas=[0.0001, 0.001, 0.01, 0.1, 1, 10], fit_intercept=True, normalize=True) + classifier=Pipeline([ + ('scaler', StandardScaler()), + ('classifier', RidgeClassifierCV(alphas=[0.0001, 0.001, 0.01, 0.1, 1, 10], fit_intercept=True)) + ]) ) diff --git a/brainscore_vision/benchmarks/kar2019/test.py b/brainscore_vision/benchmarks/kar2019/test.py index b0fece327..34c15b9a9 100644 --- a/brainscore_vision/benchmarks/kar2019/test.py +++ b/brainscore_vision/benchmarks/kar2019/test.py @@ -24,7 +24,7 @@ def test_Kar2019ost_cornet_s(): filename = 'cornet_s-kar2019.nc' filepath = Path(__file__).parent / filename s3.download_file_if_not_exists(local_path=filepath, - bucket='brainio-brainscore', remote_filepath=f'tests/test_benchmarks/{filename}') + bucket='brainscore-unittests', remote_filepath=f'tests/test_benchmarks/{filename}') precomputed_features = NeuroidAssembly.from_files( filepath, stimulus_set_identifier=benchmark._assembly.stimulus_set.identifier, diff --git a/brainscore_vision/benchmarks/lonnqvist2024/__init__.py b/brainscore_vision/benchmarks/lonnqvist2024/__init__.py new file mode 100644 index 000000000..a5d63c7ae --- /dev/null +++ b/brainscore_vision/benchmarks/lonnqvist2024/__init__.py @@ -0,0 +1,8 @@ +from brainscore_vision import benchmark_registry +from . import benchmark + +benchmark_registry['Lonnqvist2024_InlabInstructionsBehavioralAccuracyDistance'] = lambda: benchmark._Lonnqvist2024BehavioralAccuracyDistanceInlabInstructions() +benchmark_registry['Lonnqvist2024_InlabNoInstructionsBehavioralAccuracyDistance'] = lambda: benchmark._Lonnqvist2024BehavioralAccuracyDistanceInlabNoInstructions() +benchmark_registry['Lonnqvist2024_OnlineNoInstructionsBehavioralAccuracyDistance'] = lambda: benchmark._Lonnqvist2024BehavioralAccuracyDistanceOnlineNoInstructions() + +benchmark_registry['Lonnqvist2024_EngineeringAccuracy'] = lambda: benchmark._Lonnqvist2024EngineeringAccuracy() \ No newline at end of file diff --git a/brainscore_vision/benchmarks/lonnqvist2024/benchmark.py b/brainscore_vision/benchmarks/lonnqvist2024/benchmark.py new file mode 100644 index 000000000..73169adc5 --- /dev/null +++ b/brainscore_vision/benchmarks/lonnqvist2024/benchmark.py @@ -0,0 +1,122 @@ +from pathlib import Path + +import numpy as np + +from brainio.assemblies import BehavioralAssembly +from brainscore_vision import load_metric, load_stimulus_set, load_dataset +from brainscore_vision.benchmark_helpers.screen import place_on_screen +from brainscore_vision.benchmarks import BenchmarkBase +from brainscore_vision.metrics import Score +from brainscore_vision.model_interface import BrainModel + +BIBTEX = "" # to appear in a future article + + +class _Lonnqvist2024Base(BenchmarkBase): + def __init__(self, identifier, dataset, ceiling_func, metric): + self._metric = metric + self._stimulus_set = load_stimulus_set('Lonnqvist2024_test') + self._fitting_stimuli = load_stimulus_set('Lonnqvist2024_train') + self._visual_degrees = 17.70753 + self.assembly = load_dataset(f'Lonnqvist2024_{dataset}') + + super(_Lonnqvist2024Base, self).__init__( + identifier=identifier, version=1, + ceiling_func=ceiling_func, + parent='Lonnqvist2024', + bibtex=BIBTEX) + + def __call__(self, candidate: BrainModel, return_raw_responses: bool = False): + fitting_stimulus_set = place_on_screen( + self._fitting_stimuli, + target_visual_degrees=candidate.visual_degrees(), + source_visual_degrees=self._visual_degrees + ) + candidate.start_task(BrainModel.Task.probabilities, fitting_stimuli=fitting_stimulus_set, number_of_trials=1) + stimulus_set = place_on_screen( + self._stimulus_set, + target_visual_degrees=candidate.visual_degrees(), + source_visual_degrees=self._visual_degrees + ) + model_response = candidate.look_at(stimulus_set, number_of_trials=1) + model_response = convert_proba_to_choices(model_response) + raw_score = self._metric(model_response, self.assembly) + # Adjust score to ceiling + ceiling = self.ceiling + score = raw_score / ceiling + score.attrs['raw'] = raw_score + score.attrs['ceiling'] = ceiling + if return_raw_responses: + return score, model_response + return score + + +class _Lonnqvist2024BehavioralAccuracyDistanceInlabInstructions(_Lonnqvist2024Base): + def __init__(self): + metric = load_metric('accuracy_distance') + ceiling_func = lambda: metric.ceiling(self.assembly) + super(_Lonnqvist2024BehavioralAccuracyDistanceInlabInstructions, self).__init__( + identifier='Lonnqvist2024-inlab-instructions_behavioral_accuracy_distance', dataset='inlab-instructions', + ceiling_func=ceiling_func, + metric=metric) + + +class _Lonnqvist2024BehavioralAccuracyDistanceInlabNoInstructions(_Lonnqvist2024Base): + def __init__(self): + metric = load_metric('accuracy_distance') + ceiling_func = lambda: metric.ceiling(self.assembly) + super(_Lonnqvist2024BehavioralAccuracyDistanceInlabNoInstructions, self).__init__( + identifier='Lonnqvist2024-inlab-no-instructions_behavioral_accuracy_distance', dataset='inlab-no-instructions', + ceiling_func=ceiling_func, + metric=metric) + + +class _Lonnqvist2024BehavioralAccuracyDistanceOnlineNoInstructions(_Lonnqvist2024Base): + def __init__(self): + metric = load_metric('accuracy_distance') + ceiling_func = lambda: metric.ceiling(self.assembly) + super(_Lonnqvist2024BehavioralAccuracyDistanceOnlineNoInstructions, self).__init__( + identifier='Lonnqvist2024-online-no-instructions_behavioral_accuracy_distance', dataset='online-no-instructions', + ceiling_func=ceiling_func, + metric=metric) + + +class _Lonnqvist2024EngineeringAccuracy(_Lonnqvist2024Base): + def __init__(self): + metric = load_metric('accuracy') + ceiling_func = lambda: Score(1) + super(_Lonnqvist2024EngineeringAccuracy, self).__init__( + identifier='Lonnqvist2024-engineering_accuracy', dataset='inlab-instructions', + ceiling_func=ceiling_func, + metric=metric) + + def __call__(self, candidate: BrainModel, return_raw_responses: bool = False): + fitting_stimulus_set = place_on_screen( + self._fitting_stimuli, + target_visual_degrees=candidate.visual_degrees(), + source_visual_degrees=self._visual_degrees + ) + candidate.start_task(BrainModel.Task.probabilities, fitting_stimuli=fitting_stimulus_set, number_of_trials=1) + stimulus_set = place_on_screen( + self._stimulus_set, + target_visual_degrees=candidate.visual_degrees(), + source_visual_degrees=self._visual_degrees + ) + model_response = candidate.look_at(stimulus_set, number_of_trials=1) + model_response = convert_proba_to_choices(model_response) + raw_score = self._metric(model_response, stimulus_set['truth']) + # Adjust score to ceiling + ceiling = self.ceiling + score = raw_score / ceiling + score.attrs['raw'] = raw_score + score.attrs['ceiling'] = ceiling + if return_raw_responses: + return score, model_response + return score + + +def convert_proba_to_choices(source: BehavioralAssembly) -> np.array: + """Converts the probability values returned by models doing probability tasks to behavioral choices.""" + decisions = np.argmax(source.values, axis=1) + choices = [source['choice'].values[decision] for decision in decisions] + return BehavioralAssembly(choices, coords={'presentation': source['presentation']}) diff --git a/brainscore_vision/benchmarks/lonnqvist2024/test.py b/brainscore_vision/benchmarks/lonnqvist2024/test.py new file mode 100644 index 000000000..f4bdaf134 --- /dev/null +++ b/brainscore_vision/benchmarks/lonnqvist2024/test.py @@ -0,0 +1,61 @@ +import pytest +from pytest import approx + +from brainscore_vision import benchmark_registry, load_benchmark, load_model + + +@pytest.mark.parametrize('benchmark', [ + 'Lonnqvist2024_InlabInstructionsBehavioralAccuracyDistance', + 'Lonnqvist2024_InlabNoInstructionsBehavioralAccuracyDistance', + 'Lonnqvist2024_InlabInstructionsBehavioralAccuracyDistance', + 'Lonnqvist2024_EngineeringAccuracy', +]) +def test_benchmark_registry(benchmark): + assert benchmark in benchmark_registry + + +class TestBehavioral: + @pytest.mark.private_access + @pytest.mark.parametrize('dataset, expected_ceiling', [ + ('InlabInstructionsBehavioralAccuracyDistance', approx(0.95646366, abs=0.001)), + ('InlabNoInstructionsBehavioralAccuracyDistance', approx(0.84258475, abs=0.001)), + ('OnlineNoInstructionsBehavioralAccuracyDistance', approx(0.79752907, abs=0.001)), + ]) + def test_dataset_ceiling(self, dataset, expected_ceiling): + benchmark = f"Lonnqvist2024_{dataset}" + benchmark = load_benchmark(benchmark) + ceiling = benchmark.ceiling + assert ceiling == expected_ceiling + + @pytest.mark.private_access + @pytest.mark.parametrize('dataset, expected_raw_score', [ + ('InlabInstructionsBehavioralAccuracyDistance', approx(0.58568247, abs=0.001)), + ('InlabNoInstructionsBehavioralAccuracyDistance', approx(0.62883828, abs=0.001)), + ('OnlineNoInstructionsBehavioralAccuracyDistance', approx(0.78192183, abs=0.001)), + ]) + def test_model(self, dataset, expected_raw_score): + if 'all' in dataset: + benchmark = f"Lonnqvist2024_{dataset}" + else: + benchmark = f"Lonnqvist2024_{dataset}" + benchmark = load_benchmark(benchmark) + model = load_model('alexnet') + score = benchmark(model) + raw_score = score.raw + # division by ceiling <= 1 should result in higher score + assert score >= raw_score + assert raw_score == expected_raw_score + + +class TestEngineering: + @pytest.mark.parametrize('dataset, expected_accuracy', [ + ('EngineeringAccuracy', approx(0.45, abs=0.001)), + ]) + def test_accuracy(self, dataset, expected_accuracy): + benchmark = load_benchmark(f"Lonnqvist2024_{dataset}") + model = load_model('alexnet') + score = benchmark(model) + raw_score = score.raw + # division by ceiling <= 1 should result in higher score + assert score >= raw_score + assert raw_score == expected_accuracy diff --git a/brainscore_vision/benchmarks/majajhong2015/__init__.py b/brainscore_vision/benchmarks/majajhong2015/__init__.py index 24fe8651e..5ae8988fd 100644 --- a/brainscore_vision/benchmarks/majajhong2015/__init__.py +++ b/brainscore_vision/benchmarks/majajhong2015/__init__.py @@ -11,3 +11,8 @@ benchmark_registry['MajajHong2015public.V4-pls'] = MajajHongV4PublicBenchmark benchmark_registry['MajajHong2015public.IT-pls'] = MajajHongITPublicBenchmark + +# temporal +from .benchmark import MajajHongV4TemporalPublicBenchmark, MajajHongITTemporalPublicBenchmark +benchmark_registry['MajajHong2015public.V4-temporal-pls'] = lambda: MajajHongV4TemporalPublicBenchmark(time_interval=10) +benchmark_registry['MajajHong2015public.IT-temporal-pls'] = lambda: MajajHongITTemporalPublicBenchmark(time_interval=10) diff --git a/brainscore_vision/benchmarks/majajhong2015/benchmark.py b/brainscore_vision/benchmarks/majajhong2015/benchmark.py index 766f5c93f..5270ab7af 100644 --- a/brainscore_vision/benchmarks/majajhong2015/benchmark.py +++ b/brainscore_vision/benchmarks/majajhong2015/benchmark.py @@ -1,7 +1,8 @@ from brainscore_core import Metric from brainscore_vision import load_metric, Ceiling, load_ceiling, load_dataset -from brainscore_vision.benchmark_helpers.neural_common import NeuralBenchmark, average_repetition +from brainscore_vision.benchmark_helpers.neural_common import NeuralBenchmark, average_repetition, apply_keep_attrs +from brainscore_vision.model_helpers.brain_transformation.temporal import assembly_time_align VISUAL_DEGREES = 8 NUMBER_OF_TRIALS = 50 @@ -20,13 +21,14 @@ eprint = {https://www.jneurosci.org/content/35/39/13402.full.pdf}, journal = {Journal of Neuroscience}}""" -pls_metric = lambda: load_metric('pls', crossvalidation_kwargs=dict(stratification_coord='object_name')) - +crossvalidation_kwargs = dict(stratification_coord='object_name') +pls_metric = lambda: load_metric('pls', crossvalidation_kwargs=crossvalidation_kwargs) +spantime_pls_metric = lambda: load_metric('spantime_pls', crossvalidation_kwargs=crossvalidation_kwargs) def _DicarloMajajHong2015Region(region: str, access: str, identifier_metric_suffix: str, - similarity_metric: Metric, ceiler: Ceiling): - assembly_repetition = load_assembly(average_repetitions=False, region=region, access=access) - assembly = load_assembly(average_repetitions=True, region=region, access=access) + similarity_metric: Metric, ceiler: Ceiling, time_interval: float = None): + assembly_repetition = load_assembly(average_repetitions=False, region=region, access=access, time_interval=time_interval) + assembly = load_assembly(average_repetitions=True, region=region, access=access, time_interval=time_interval) benchmark_identifier = f'MajajHong2015.{region}' + ('.public' if access == 'public' else '') return NeuralBenchmark(identifier=f'{benchmark_identifier}-{identifier_metric_suffix}', version=3, assembly=assembly, similarity_metric=similarity_metric, @@ -60,13 +62,35 @@ def MajajHongITPublicBenchmark(): ceiler=load_ceiling('internal_consistency')) -def load_assembly(average_repetitions, region, access='private'): - assembly = load_dataset(f'MajajHong2015.{access}') +def MajajHongV4TemporalPublicBenchmark(time_interval: float = None): + return _DicarloMajajHong2015Region(region='V4', access='public', identifier_metric_suffix='pls', + similarity_metric=spantime_pls_metric(), time_interval=time_interval, + ceiler=load_ceiling('internal_consistency_temporal')) + + +def MajajHongITTemporalPublicBenchmark(time_interval: float = None): + return _DicarloMajajHong2015Region(region='IT', access='public', identifier_metric_suffix='pls', + similarity_metric=spantime_pls_metric(), time_interval=time_interval, + ceiler=load_ceiling('internal_consistency_temporal')) + + +def load_assembly(average_repetitions: bool, region: str, access: str = 'private', time_interval: float = None): + temporal = time_interval is not None + if not temporal: + assembly = load_dataset(f'MajajHong2015.{access}') + assembly = assembly.squeeze("time_bin") + else: + assembly = load_dataset(f'MajajHong2015.temporal.{access}') + assembly = assembly.__class__(assembly) + target_time_bins = [ + (t, t+time_interval) for t in range(0, assembly.time_bin_end.max().item()-time_interval, time_interval) + ] + assembly = apply_keep_attrs(assembly, lambda assembly: assembly_time_align(assembly, target_time_bins)) + assembly = assembly.sel(region=region) assembly['region'] = 'neuroid', [region] * len(assembly['neuroid']) - assembly = assembly.squeeze("time_bin") assembly.load() - assembly = assembly.transpose('presentation', 'neuroid') + assembly = assembly.transpose('presentation', 'neuroid', ...) if average_repetitions: assembly = average_repetition(assembly) return assembly diff --git a/brainscore_vision/benchmarks/malania2007/benchmark.py b/brainscore_vision/benchmarks/malania2007/benchmark.py index 7ad587b4d..18bc9e7ee 100644 --- a/brainscore_vision/benchmarks/malania2007/benchmark.py +++ b/brainscore_vision/benchmarks/malania2007/benchmark.py @@ -110,7 +110,12 @@ def __init__(self, condition: str): def __call__(self, candidate: BrainModel): model_responses = {} - candidate.start_task(BrainModel.Task.probabilities, fitting_stimuli=self._fitting_stimuli, + fitting_stimulus_set = place_on_screen( + self._fitting_stimuli, + target_visual_degrees=candidate.visual_degrees(), + source_visual_degrees=self._visual_degrees + ) + candidate.start_task(BrainModel.Task.probabilities, fitting_stimuli=fitting_stimulus_set, number_of_trials=2, require_variance=True) for condition in (self.baseline_condition, self.condition): stimulus_set = place_on_screen( @@ -160,9 +165,15 @@ def __init__(self): def __call__(self, candidate: BrainModel): scores = [] + for condition in self.conditions: - candidate.start_task(BrainModel.Task.probabilities, fitting_stimuli=self._fitting_stimuli[condition], - number_of_trials=2, require_variance=True) + fitting_stimulus_set = place_on_screen( + self._fitting_stimuli[condition], + target_visual_degrees=candidate.visual_degrees(), + source_visual_degrees=self._visual_degrees + ) + candidate.start_task(BrainModel.Task.probabilities, fitting_stimuli=fitting_stimulus_set, + number_of_trials=2, require_variance=True) stimulus_set = place_on_screen( self._stimulus_set, target_visual_degrees=candidate.visual_degrees(), diff --git a/brainscore_vision/benchmarks/rajalingham2018/test.py b/brainscore_vision/benchmarks/rajalingham2018/test.py index 7a7e96388..2ff9d38a0 100644 --- a/brainscore_vision/benchmarks/rajalingham2018/test.py +++ b/brainscore_vision/benchmarks/rajalingham2018/test.py @@ -7,7 +7,7 @@ from pytest import approx from brainio.assemblies import BehavioralAssembly -from brainscore_vision import benchmark_registry, load_benchmark, load_metric +from brainscore_vision import benchmark_registry, load_benchmark, load_metric, load_model from brainscore_vision.benchmark_helpers import PrecomputedFeatures from brainscore_vision.benchmark_helpers.test_helper import VisualDegreesTests, NumberOfTrialsTests from brainscore_vision.benchmarks.rajalingham2018 import DicarloRajalingham2018I2n @@ -115,44 +115,11 @@ class TestMetricScore: @pytest.mark.parametrize(['model', 'expected_score'], [ ('alexnet', .253), - ('resnet34', .37787), - ('resnet18', .3638), + ('resnet50_tutorial', 0.348), + ('pixels', 0.0139) ]) def test_model(self, model, expected_score): - class UnceiledBenchmark(_DicarloRajalingham2018): - def __init__(self): - metric = load_metric('i2n') - super(UnceiledBenchmark, self).__init__(metric=metric, metric_identifier='i2n') - - def __call__(self, candidate: BrainModel): - candidate.start_task(BrainModel.Task.probabilities, self._fitting_stimuli) - probabilities = candidate.look_at(self._assembly.stimulus_set) - score = self._metric(probabilities, self._assembly) - return score - - benchmark = UnceiledBenchmark() - # features - feature_responses = xr.load_dataarray(Path(__file__).parent / 'test_resources' / - f'identifier={model},stimuli_identifier=objectome-240.nc') - feature_responses['stimulus_id'] = 'stimulus_path', [os.path.splitext(os.path.basename(path))[0] - for path in feature_responses['stimulus_path'].values] - feature_responses = feature_responses.stack(presentation=['stimulus_path']) - assert len(np.unique(feature_responses['layer'])) == 1 # only penultimate layer - - class PrecomputedFeatures: - def __init__(self, precomputed_features): - self.features = precomputed_features - - def __call__(self, stimuli, layers): - np.testing.assert_array_equal(layers, ['behavioral-layer']) - self_stimulus_ids = self.features['stimulus_id'].values.tolist() - indices = [self_stimulus_ids.index(stimulus_id) for stimulus_id in stimuli['stimulus_id'].values] - features = self.features[{'presentation': indices}] - return features - - # evaluate candidate - transformation = ProbabilitiesMapping(identifier=f'TestI2N.{model}', - activations_model=PrecomputedFeatures(feature_responses), - layer='behavioral-layer') - score = benchmark(transformation) - assert score == approx(expected_score, abs=0.005), f"expected {expected_score}, but got {score}" + benchmark = load_benchmark('Rajalingham2018-i2n') + model = load_model(model) + score = benchmark(model) + assert score.raw == approx(expected_score, abs=0.005), f"expected {expected_score}, but got {score.raw}" diff --git a/brainscore_vision/benchmarks/rajalingham2020/test.py b/brainscore_vision/benchmarks/rajalingham2020/test.py index 6af813946..40b6226d5 100644 --- a/brainscore_vision/benchmarks/rajalingham2020/test.py +++ b/brainscore_vision/benchmarks/rajalingham2020/test.py @@ -35,5 +35,5 @@ def test_Rajalingham2020(benchmark, expected): filename = 'alexnet-rajalingham2020-features.12.nc' filepath = Path(__file__).parent / filename s3.download_file_if_not_exists(local_path=filepath, - bucket='brainio-brainscore', remote_filepath=f'tests/test_benchmarks/{filename}') + bucket='brainscore-unittests', remote_filepath=f'tests/test_benchmarks/{filename}') precomputed_test.run_test(benchmark=benchmark, precomputed_features_filepath=filepath, expected=expected) diff --git a/brainscore_vision/benchmarks/sanghavi2020/test.py b/brainscore_vision/benchmarks/sanghavi2020/test.py index b65f08f63..ac6fe79b3 100644 --- a/brainscore_vision/benchmarks/sanghavi2020/test.py +++ b/brainscore_vision/benchmarks/sanghavi2020/test.py @@ -66,7 +66,7 @@ def test_self_regression(benchmark, visual_degrees, expected): def test_model_features(benchmark, filename, expected): filepath = Path(__file__).parent / filename s3.download_file_if_not_exists(local_path=filepath, - bucket='brainio-brainscore', remote_filepath=f'tests/test_benchmarks/{filename}') + bucket='brainscore-unittests', remote_filepath=f'tests/test_benchmarks/{filename}') precomputed_test.run_test(benchmark=benchmark, precomputed_features_filepath=filepath, expected=expected) diff --git a/brainscore_vision/data/geirhos2021/test.py b/brainscore_vision/data/geirhos2021/test.py index bdc2052af..41762008d 100644 --- a/brainscore_vision/data/geirhos2021/test.py +++ b/brainscore_vision/data/geirhos2021/test.py @@ -62,7 +62,7 @@ def test_stimulus_set_assembly_alignment(self, identifier, field): full_name = f"Geirhos2021_{identifier}" assembly = load_dataset(full_name) assert assembly.stimulus_set is not None - assert assembly.stimulus_set.identifier == f"{full_name}" + assert assembly.stimulus_set.identifier == full_name assert set(assembly.stimulus_set[field]) == set(assembly[field].values) # test the number of subjects: @@ -236,7 +236,7 @@ def test_stimulus_set_exist(self, identifier): full_name = f"Geirhos2021_{identifier}" stimulus_set = load_stimulus_set(full_name) assert stimulus_set is not None - assert stimulus_set.identifier == full_name + assert stimulus_set.identifier == f"{full_name}" # test the number of images @pytest.mark.parametrize('identifier, num_images', [ diff --git a/brainscore_vision/data/lonnqvist2024/__init__.py b/brainscore_vision/data/lonnqvist2024/__init__.py new file mode 100644 index 000000000..e8e17099b --- /dev/null +++ b/brainscore_vision/data/lonnqvist2024/__init__.py @@ -0,0 +1,47 @@ +from brainio.assemblies import BehavioralAssembly + +from brainscore_vision import data_registry, stimulus_set_registry, load_stimulus_set +from brainscore_vision.data_helpers.s3 import load_assembly_from_s3, load_stimulus_set_from_s3 + + +data_registry['Lonnqvist2024_inlab-instructions'] = lambda: load_assembly_from_s3( + identifier='Lonnqvist2024_inlab-instructions', + version_id='nTcvZkZedprOMYKkZ8kONUxy4M.__F_C', + sha1='64ec603ebc852d193e7437980eaabe8fc482d88b', + bucket="brainio-brainscore", + cls=BehavioralAssembly, + stimulus_set_loader=lambda: load_stimulus_set('Lonnqvist2024_test')) + +data_registry['Lonnqvist2024_inlab-no-instructions'] = lambda: load_assembly_from_s3( + identifier='Lonnqvist2024_inlab-no-instructions', + version_id='XtRi6xl6cJJ_71mAzfqqm3fSKujI8O4C', + sha1='ff248ca2058d4e36eee44dbc6f8ea6a79c70b715', + bucket="brainio-brainscore", + cls=BehavioralAssembly, + stimulus_set_loader=lambda: load_stimulus_set('Lonnqvist2024_test')) + +data_registry['Lonnqvist2024_online-no-instructions'] = lambda: load_assembly_from_s3( + identifier='Lonnqvist2024_online-no-instructions', + version_id='VRMgNb4mYSf_S6S81LGK2LFVVrfnCI26', + sha1='04240330eaf371d160ab418fd5560a72ed42cecb', + bucket="brainio-brainscore", + cls=BehavioralAssembly, + stimulus_set_loader=lambda: load_stimulus_set('Lonnqvist2024_test')) + +stimulus_set_registry['Lonnqvist2024_train'] = lambda: load_stimulus_set_from_s3( + identifier='Lonnqvist2024_train', + bucket="brainio-brainscore", + csv_sha1='2d6a95a8239aa647ddc6aedd449eabcebdf882cf', + zip_sha1='8adbf4de94524892042d3e43629a4be2beeedcaf', + csv_version_id='92o14bqbYd5Xut2zoTADmT4S_FEPmuW6', + zip_version_id='oZM7Fe446bq15A1fpoatEsvWHZrstdXJ', + filename_prefix='stimulus_') + +stimulus_set_registry['Lonnqvist2024_test'] = lambda: load_stimulus_set_from_s3( + identifier='Lonnqvist2024_test', + bucket="brainio-brainscore", + csv_sha1='8bc98dfc9f334e5c21b68f6787b3255da0d8644a', + zip_sha1='cf94b5341d956d250e7f7798044cf71bbd100721', + csv_version_id='VCzpiY0ZMySfDTPIT8zauLZuV0QJmgwZ', + zip_version_id='ZjEM4Es91H1aGyGo73VcVoL1id6FdWiO', + filename_prefix='stimulus_') \ No newline at end of file diff --git a/brainscore_vision/data/lonnqvist2024/data_packaging/lonnqvist_data_assembly.py b/brainscore_vision/data/lonnqvist2024/data_packaging/lonnqvist_data_assembly.py new file mode 100644 index 000000000..6e2c0b5ec --- /dev/null +++ b/brainscore_vision/data/lonnqvist2024/data_packaging/lonnqvist_data_assembly.py @@ -0,0 +1,53 @@ +from pathlib import Path +import numpy as np +import xarray as xr + +from brainio.assemblies import BehavioralAssembly +from brainio.packaging import package_data_assembly +import pandas as pd + + +DATASETS = ['inlab-instructions', 'inlab-no-instructions', 'online-no-instructions'] + + +def collect_lonnqvist_data_assembly(root_directory, dataset): + """ + Experiment Information: + """ + data = pd.read_csv(Path(rf'{root_directory}/{dataset}.csv')) + + assembly = BehavioralAssembly(data['subject_answer'], + coords={ + 'subject': ('presentation', data['subject_id']), + 'visual_degrees': ('presentation', data['visual_degrees']), + 'image_duration': ('presentation', data['image_duration']), + 'is_correct': ('presentation', data['is_correct']), + 'subject_answer': ('presentation', data['subject_answer']), + 'curve_length': ('presentation', data['curve_length']), + 'n_cross': ('presentation', data['n_cross']), + 'image_path': ('presentation', data['image_path']), + 'stimulus_id': ('presentation', data['stimulus_id']), + 'truth': ('presentation', data['truth']), + 'image_label': ('presentation', data['truth']) + }, + dims=['presentation'] + ) + + # give the assembly an identifier name + assembly.name = f'Lonnqvist2024_{dataset}' + + return assembly + + +if __name__ == '__main__': + root_directory = Path(r'./local') + for dataset in DATASETS: + assembly = collect_lonnqvist_data_assembly(root_directory, dataset) + # upload to S3 + prints = package_data_assembly(catalog_identifier=None, + proto_data_assembly=assembly, + assembly_identifier=assembly.name, + stimulus_set_identifier=assembly.name, + assembly_class_name="BehavioralAssembly", + bucket_name="brainio-brainscore") + print(prints) diff --git a/brainscore_vision/data/lonnqvist2024/data_packaging/lonnqvist_stimulus_set.py b/brainscore_vision/data/lonnqvist2024/data_packaging/lonnqvist_stimulus_set.py new file mode 100644 index 000000000..b9a9fd1fe --- /dev/null +++ b/brainscore_vision/data/lonnqvist2024/data_packaging/lonnqvist_stimulus_set.py @@ -0,0 +1,61 @@ +from pathlib import Path +import csv + +from brainio.stimuli import StimulusSet +from brainio.packaging import package_stimulus_set + +''' +dataset Meta Info + +- curve length: values from 20-200 in steps of 10 +- n_cross: number of times the lines intercept, range from 1-7 +- condition: same or diff +''' + + +def collect_lonnqvist_stimulus_set(dataset, stimuli_directory, metadata_filepath): + stimuli = [] + stimulus_paths = {} + + with open(metadata_filepath, 'r') as metadata: + reader = csv.DictReader(metadata) + for row in reader: + stimulus_meta = { + 'curve_length': int(row['curve_length']), + 'n_cross': int(row['n_cross']), + 'image_path': str(row['path']), + 'stimulus_id': str(row['idx']), + 'truth': str(row['correct_response_key']), + 'image_label': str(row['correct_response_key']) + } + + stimuli.append(stimulus_meta) + stimulus_paths[str(row['idx'])] = Path(f'{row["path"]}') + + stimuli = StimulusSet(stimuli) + stimuli.stimulus_paths = stimulus_paths + + stimuli.name = f'Lonnqvist2024_{dataset}' + stimuli.identifier = f'Lonnqvist2024_{dataset}' + return stimuli + + +if __name__ == '__main__': + datasets = ['train', 'test'] + stimulus_directories = {'train': Path(r'stimuli/images_examples'), + 'test': Path(r'stimuli/images')} + metadata_filepaths = {'train': Path('stimuli/metadata_examples.csv'), + 'test': Path('stimuli/metadata.csv')} + for dataset in datasets: + stimulus_set = collect_lonnqvist_stimulus_set(dataset, + stimulus_directories[dataset], + metadata_filepaths[dataset]) + if dataset == 'train': + assert len(stimulus_set) == 185 + else: + assert len(stimulus_set) == 380 + prints = package_stimulus_set(catalog_name=None, + proto_stimulus_set=stimulus_set, + stimulus_set_identifier=stimulus_set.name, + bucket_name="brainio-brainscore") + print(prints) diff --git a/brainscore_vision/data/lonnqvist2024/test.py b/brainscore_vision/data/lonnqvist2024/test.py new file mode 100644 index 000000000..c9f82fba5 --- /dev/null +++ b/brainscore_vision/data/lonnqvist2024/test.py @@ -0,0 +1,127 @@ +import numpy as np +import pytest + +from brainscore_vision import load_stimulus_set, load_dataset + + +@pytest.mark.private_access +@pytest.mark.parametrize('assembly_identifier', [ + 'Lonnqvist2024_inlab-instructions', + 'Lonnqvist2024_inlab-no-instructions', + 'Lonnqvist2024_online-no-instructions' +]) +def test_existence(assembly_identifier): + assert load_dataset(assembly_identifier) is not None + + +@pytest.mark.private_access +class TestAssemblies: + @pytest.mark.parametrize('assembly', [ + 'Lonnqvist2024_inlab-instructions', + 'Lonnqvist2024_inlab-no-instructions', + 'Lonnqvist2024_online-no-instructions' + ]) + @pytest.mark.parametrize('identifier', [ + 'Lonnqvist2024_test' + ]) + @pytest.mark.parametrize('field', [ + 'stimulus_id', + 'truth' + ]) + def test_stimulus_set_assembly_alignment(self, assembly, identifier, field): + assembly = load_dataset(assembly) + assert assembly.stimulus_set is not None + assert assembly.stimulus_set.identifier == identifier + assert set(assembly.stimulus_set[field]) == set(assembly[field].values) + + # test the number of subjects + @pytest.mark.parametrize('identifier, num_subjects', [ + ('Lonnqvist2024_inlab-instructions', 10), + ('Lonnqvist2024_inlab-no-instructions', 10), + ('Lonnqvist2024_online-no-instructions', 92), + ]) + def test_num_subjects(self, identifier, num_subjects): + assembly = load_dataset(identifier) + assert len(np.unique(assembly['subject'].values)) == num_subjects + + # test number of unique images + @pytest.mark.parametrize('identifier, num_unique_images', [ + ('Lonnqvist2024_inlab-instructions', 380), + ('Lonnqvist2024_inlab-no-instructions', 380), + ('Lonnqvist2024_online-no-instructions', 380), + ]) + def test_num_unique_images(self, identifier, num_unique_images): + assembly = load_dataset(identifier) + assert len(np.unique(assembly['stimulus_id'].values)) == num_unique_images + + # tests assembly dim for ALL datasets + @pytest.mark.parametrize('identifier, length', [ + ('Lonnqvist2024_inlab-instructions', 3800), + ('Lonnqvist2024_inlab-no-instructions', 3800), + ('Lonnqvist2024_online-no-instructions', 34960), + ]) + def test_length(self, identifier, length): + assembly = load_dataset(identifier) + assert len(assembly['presentation']) == length + + # test assembly coords present in ALL 17 sets: + @pytest.mark.parametrize('identifier', [ + 'Lonnqvist2024_inlab-instructions', + 'Lonnqvist2024_inlab-no-instructions', + 'Lonnqvist2024_online-no-instructions' + ]) + @pytest.mark.parametrize('field', [ + 'subject', + 'visual_degrees', + 'image_duration', + 'is_correct', + 'subject_answer', + 'curve_length', + 'n_cross', + 'image_path', + 'stimulus_id', + 'truth', + 'image_label' + ]) + def test_fields_present(self, identifier, field): + assembly = load_dataset(identifier) + assert hasattr(assembly, field) + + +@pytest.mark.private_access +@pytest.mark.slow +class TestStimulusSets: + # test stimulus_set data: + @pytest.mark.parametrize('identifier', [ + 'Lonnqvist2024_train', + 'Lonnqvist2024_test', + ]) + def test_stimulus_set_exists(self, identifier): + stimulus_set = load_stimulus_set(identifier) + assert stimulus_set is not None + assert stimulus_set.identifier == identifier + + @pytest.mark.parametrize('identifier, num_images', [ + ('Lonnqvist2024_train', 185), + ('Lonnqvist2024_test', 380), + ]) + def test_number_of_images(self, identifier, num_images): + stimulus_set = load_stimulus_set(identifier) + assert len(np.unique(stimulus_set['stimulus_id'].values)) == num_images + + # test assembly coords present in ALL 17 sets: + @pytest.mark.parametrize('identifier', [ + 'Lonnqvist2024_train', + 'Lonnqvist2024_test', + ]) + @pytest.mark.parametrize('field', [ + 'curve_length', + 'n_cross', + 'image_path', + 'stimulus_id', + 'truth', + 'image_label' + ]) + def test_fields_present(self, identifier, field): + stimulus_set = load_stimulus_set(identifier) + assert hasattr(stimulus_set, field) diff --git a/brainscore_vision/data/malania2007/__init__.py b/brainscore_vision/data/malania2007/__init__.py index e6ecbb5cd..d6cb25ea1 100644 --- a/brainscore_vision/data/malania2007/__init__.py +++ b/brainscore_vision/data/malania2007/__init__.py @@ -103,152 +103,152 @@ stimulus_set_registry['Malania2007.equal2'] = lambda: load_stimulus_set_from_s3( identifier='Malania2007_equal-2', bucket="brainio-brainscore", - csv_sha1="77e94b9b5122a83ebbaffb4a06fcab68ef652751", - zip_sha1="99826d459f6920dafab72eed69eb2a90492ce796", - csv_version_id="MlRpSz.4.jvVRFAZl8tGEum1P0Q0GtyS", - zip_version_id="vHbAM_FjTbjp5U12BkAelJu4KW6PLYFn" + csv_sha1="36f3c92a1335895b10c4150f5c25a68ab4576d4a", + zip_sha1="80be52e8701ecb8e7fbb81c0bff9c148ddc2b401", + csv_version_id="lkbpUNzhET3.hR.9StpSVvoxh05aWzoi", + zip_version_id="Wd7Fc3QVn1baC53Iy.E5ha4YqWvdybM3" ) stimulus_set_registry['Malania2007.equal2_fit'] = lambda: load_stimulus_set_from_s3( identifier='Malania2007_equal-2_fit', bucket="brainio-brainscore", - csv_sha1="bafdfc855c164d3e5443d67dcf9eb7762443f964", - zip_sha1="e52fec1a79ac8837e331b180c2a8a140840d6666", - csv_version_id="PIXEW.2vHvjIBP0Q2KHIpnxns7t9o8Cf", - zip_version_id="h7pp84CYFGLKlPhveD0L5ogePqisk_I7" + csv_sha1="b7105f44d5d781f5e06159008a3f63c9f774c2d1", + zip_sha1="ba5c1bacbb4afe40c5a19eddb07fc9f98312ec69", + csv_version_id="qhsx4_OM0FSCl7SU0hXhhmiJvLnDU6Dm", + zip_version_id="PzGnzHtpGghaHGK6MO4DeSy7w.rDUfRN" ) stimulus_set_registry['Malania2007.equal16'] = lambda: load_stimulus_set_from_s3( identifier='Malania2007_equal-16', bucket="brainio-brainscore", - csv_sha1="5fedcff56c302339c3451ae2edbcb846c39c3189", - zip_sha1="b30dc2dc90e4f3d88775622e558db963765f38e0", - csv_version_id="VmRGiQkhPALDwq74NpE2VpTiKTGn.30T", - zip_version_id="c.DOlVULXZingRJ9gVY_NbZwRrj_xs_i" + csv_sha1="14f9f7098831691811abf9953766951edc952203", + zip_sha1="5127e88eaed1ef64247c7cb4262868533fb4ebae", + csv_version_id="bgBDFK3666NPXwINqGcdouvtWy12yqyY", + zip_version_id="zlkMQOE9wfTQHPohmxol4uAR6y0zqwjI" ) stimulus_set_registry['Malania2007.equal16_fit'] = lambda: load_stimulus_set_from_s3( identifier='Malania2007_equal-16_fit', bucket="brainio-brainscore", - csv_sha1="3de3e5de19a638767a01ba68cb690dc746c29a77", - zip_sha1="1728920c5ea4fb7b3a3cf3c076165aca65c8b751", - csv_version_id="joAq8JBC_7axZDfLNFgoXFhTCLU_KKr_", - zip_version_id="77JRwdldaHDr6TLW1NnB5HucIrkUCVg." + csv_sha1="2ff7f2f97250b9bcce3d2753be6e5b98e083892b", + zip_sha1="db07ef4862fd9cb65c1e726cacc5914821296a5b", + csv_version_id="cVYkl_N7c36UfjbWqAffYrHVDbPhmiwa", + zip_version_id="azu8FTnJVmsou98co5iVE2G8OemMIl4H" ) stimulus_set_registry['Malania2007.long2'] = lambda: load_stimulus_set_from_s3( identifier='Malania2007_long-2', bucket="brainio-brainscore", - csv_sha1="ba65316a63dc688d8dfb410219a28fd02850b991", - zip_sha1="7fd431fbbd4a4dc0cd271624d3297c19a28a70b5", - csv_version_id="_0fqObn6k5KvXurHMsuD4IqtrqbNskyo", - zip_version_id="foL92ndVAAAETzMYHdmMtwIwKxXYhAB." + csv_sha1="153b987c4c6b8a22efb88be26aaa46bd36912c9b", + zip_sha1="07bb413d56ac77fc71823559780cdb16e4df563d", + csv_version_id="nKEYl_hb8tBKOg47O28iLY5.oYyimmAf", + zip_version_id="3BS.xmMHnND1C3bjDsut8qILlzMIJhHQ" ) stimulus_set_registry['Malania2007.long2_fit'] = lambda: load_stimulus_set_from_s3( identifier='Malania2007_long-2_fit', bucket="brainio-brainscore", - csv_sha1="b91dd9261c1d47bdd37f9b60eb8066b7b719709f", - zip_sha1="5be3e1cd57b59081103715b5d318505166e0045e", - csv_version_id="mATh8lcVisdsDnPnU6ACE23iBPfpkLZA", - zip_version_id="6nEviShTyCYQKrmxyjDyNov9Skc77eXT" + csv_sha1="8b6d1557879e6271554c0fcb67bf6b2941dad2c8", + zip_sha1="66205529af748ffd88579caef86b107838c0b0da", + csv_version_id="ut0_Zbq97vwzmkk9MY.0h5phJZqp_McX", + zip_version_id="qcGk5zy7KN.vEmLgtZKRYn0OXyWoER9G" ) stimulus_set_registry['Malania2007.long16'] = lambda: load_stimulus_set_from_s3( identifier='Malania2007_long-16', bucket="brainio-brainscore", - csv_sha1="1f1b03319b81698ba5e7db389dcd4248f94e45ca", - zip_sha1="97c70462a28905b58058c687880188d634d357f0", - csv_version_id="4RtywQ40hfQA4N80g8lxEScAmMXFRg7E", - zip_version_id="lJy2QosABzHtiA6BJaE4OqCn1w1Jhz2k" + csv_sha1="6c5d45b489bc290e41013d211d18570368012c9b", + zip_sha1="10944e5b65e8da9e52087d3cbbdc6575538c2847", + csv_version_id="sswWsVsgFbPU1psGfoIS.0Goi6b.9Dn2", + zip_version_id="5.gegdwQMNpqcP3FnW4DkTZ7s3bT0j75" ) stimulus_set_registry['Malania2007.long16_fit'] = lambda: load_stimulus_set_from_s3( identifier='Malania2007_long-16_fit', bucket="brainio-brainscore", - csv_sha1="d80a02c75b9908301c3c8dc9f7116fecf8e060ec", - zip_sha1="d8819b94d3f502d7a382c8a0db0a34627132e5e2", - csv_version_id="gOxY6tjnT7LO.FDeL1xkRmowl5wYeAia", - zip_version_id="71UAPTnZscIuqdx2dhuW9V0O0DO_TgTM" + csv_sha1="603dc8edb169f39e322f8980972eda1930c300ed", + zip_sha1="a67330e18f1e5d9ad3829d8d8c000487fe3e4d48", + csv_version_id=".qV8En95o4QR_jgvr145ww8xvgAnoIs5", + zip_version_id="h_IWMOSq4uJe91XgOEHFSVwa01vH74.H" ) stimulus_set_registry['Malania2007.short2'] = lambda: load_stimulus_set_from_s3( identifier='Malania2007_short-2', bucket="brainio-brainscore", - csv_sha1="bf0252056d2084e855646f624700ab03c19cfc3d", - zip_sha1="eee1270feb7443e7e315d8feb7fb0a6b6908f554", - csv_version_id="zcJqM.ZPwJyiMRWa3RBdvv401yPnLQAp", - zip_version_id="C8WZzAAQ0JGHAAKii4JpvlRhcUOhgSj." + csv_sha1="c8bb84c5468a43348149afe24d5c0ebda233d54e", + zip_sha1="1739226e7e32f60a7bb060fecc14c4a6353ca2ad", + csv_version_id="sGDRldX6CEbDguYsikFArt1P5aMMCueM", + zip_version_id="1RjSss5KIKKfK7UzeGRyQgBu.I47xcIQ" ) stimulus_set_registry['Malania2007.short2_fit'] = lambda: load_stimulus_set_from_s3( identifier='Malania2007_short-2_fit', bucket="brainio-brainscore", - csv_sha1="73127d279a2cd254ae4f07b0053580851e84b00c", - zip_sha1="918736349d714a4f784c29bf7e7d218b103e128d", - csv_version_id="iwGRp3_ktAHfJ6r7ktSK9gsthDjKek70", - zip_version_id="6RpplJ9UVXTlvhmFSXla0Qa20b44m8Ds" + csv_sha1="600c754811aa27d80a155c8ac643a81f2347ce3a", + zip_sha1="a1a121dbbbf761caea0a086c2a74ab511f296ed5", + csv_version_id="X7c1h_64KB18noSoG2uaGo2baYTvblKa", + zip_version_id="hx5Of92KkReH_GXPll4MnFrJ.yI3UQhH" ) stimulus_set_registry['Malania2007.short4'] = lambda: load_stimulus_set_from_s3( identifier='Malania2007_short-4', bucket="brainio-brainscore", - csv_sha1="816326d89d358f6592bd1f789e5c8d429fbca1cd", - zip_sha1="ff57d976ef75ede9148a4097e90d6cf6c8054d34", - csv_version_id="Waikk.bktXIncCUtCIAyB2EqynGk.H.F", - zip_version_id="rl_muxI4UEpwXVaXuhsqroG..COGILvR" + csv_sha1="181c912c03fdb3e4f89a737584a3a0816859f816", + zip_sha1="820019a7f68db60fac11a5c5f3e42037cf205248", + csv_version_id="pjGwais_x1SzlK9kOWzypnEjWOqIJejt", + zip_version_id="uHY9JlyoIKF7QY.7h2YnaMjoGMOLTS0Y" ) stimulus_set_registry['Malania2007.short4_fit'] = lambda: load_stimulus_set_from_s3( identifier='Malania2007_short-4_fit', bucket="brainio-brainscore", - csv_sha1="3512cfd029f4e4299bc41ede519e691d80cfc3d5", - zip_sha1="301386408dd1fb8556881f9a171be2d43dbfec6e", - csv_version_id="UhisdJqiEmkQ_4zsUtAmaxtle2kMZdcD", - zip_version_id="xt_v0xgCB8YUptyPB0yZFHIUcel5MF_x" + csv_sha1="65af7b5d3845a7ea284aefba21734e1d298742c8", + zip_sha1="5234b449c05a43e726543029137fe26930157b09", + csv_version_id=".HhdLMWwnSAJTeBICSys3fjbWpa9V3ee", + zip_version_id="M1eKvnklDoUFZ70K4x9EFiYycyIAybXY" ) stimulus_set_registry['Malania2007.short6'] = lambda: load_stimulus_set_from_s3( identifier='Malania2007_short-6', bucket="brainio-brainscore", - csv_sha1="3d5dd9b48a56ba0c31de94b6221b97df962b6f8a", - zip_sha1="120d90a143d1577d4745c3f69291d0db6c7e512e", - csv_version_id="GwGHPJkMDdg8N_.boyj8qJ3ChsEx4w._", - zip_version_id="gIN1O4yz.THvK0Ifm5M3AI58ZACE1QFh" + csv_sha1="57813230e337a09c4c423da927c1f33e62054547", + zip_sha1="dab58e8e996f91af643a0b61247e7ef87f35338d", + csv_version_id="4GLCXr_ii4r7jHsOoRbTZWgdQMrChMwy", + zip_version_id="WQ.jkGJuKabBAt9br9oAYB4wDstDVPer" ) stimulus_set_registry['Malania2007.short6_fit'] = lambda: load_stimulus_set_from_s3( identifier='Malania2007_short-6_fit', bucket="brainio-brainscore", - csv_sha1="27a5be4fca190837fc5b75ed2cdbbffbf6b41338", - zip_sha1="c88e05c6cadec88a2c9475b0735323a2b049bd75", - csv_version_id="oMlj7wV85s00hJFE84ym0AJHLCfYHVA6", - zip_version_id="oS.KrBTlcYAgr_lWyA_bIjVc2js_VeUe" + csv_sha1="ea7eb26b42fe9e4fc1ac2ed6e9bad439e8077ce1", + zip_sha1="895e69c835b22b07ee66a0f5f53e7a108ac8287c", + csv_version_id="agzJvrPzCyMsHVPuJeHnu.kWLqCOgTyv", + zip_version_id="6ArqgIEm9wZoihx6_swTilr.fBtd14Gw" ) stimulus_set_registry['Malania2007.short8'] = lambda: load_stimulus_set_from_s3( identifier='Malania2007_short-8', bucket="brainio-brainscore", - csv_sha1="8fc35f607196b4c0cdcebd8102d17e3a637e5988", - zip_sha1="a9215ed0cb0f0333582dda65f6afd7015c506ba5", - csv_version_id="gzys8s7j7euMEl7JJpqBFLFHMpFjwbA7", - zip_version_id="3fYb4Iruh3lRKUwC1APqFH4CNbE5DEuk" + csv_sha1="3df9a38605a4590eac8a1151779ba68c3cd54dc1", + zip_sha1="7626364e0776b2809ae36d9cb713c6ff9a0d0c05", + csv_version_id="8OV0COxeMrzgsJnm_vC3q9pEB44LSllC", + zip_version_id="YmcL0kN4_sDVMxegHdurephfduxWHFua" ) stimulus_set_registry['Malania2007.short8_fit'] = lambda: load_stimulus_set_from_s3( identifier='Malania2007_short-8_fit', bucket="brainio-brainscore", - csv_sha1="aa4133a9fe19a3c9004a9cb5e6eb5a72564e4883", - zip_sha1="beb9f068794708e41750202b78c438538a40a8fb", - csv_version_id="7N1Z.uiagqBknJUSBQ4mVfHKWgocM5aA", - zip_version_id="kcEOPOkvWymO0wX5j_QKxcNPl9sZsjFd" + csv_sha1="2782c818056b374e86195cbdb0ab1a52ef0d01da", + zip_sha1="ec2fa2a261d965455ffa81acdb0fddef447ad4ff", + csv_version_id="iInrw3cTlTQw0NxQ0bvpbar.jD64IkYh", + zip_version_id="hCWq6yFtO6LlDrAY46B0fhHVWaxJSDGY" ) stimulus_set_registry['Malania2007.short16'] = lambda: load_stimulus_set_from_s3( identifier='Malania2007_short-16', bucket="brainio-brainscore", - csv_sha1="addd260c9959f2f315db03c0a39c6c1b01fef685", - zip_sha1="cba4c2866ec692fb808471df7c2fed446d9fb3fe", - csv_version_id="Peu7WU5vanLoZNOFIAbuPzZNPDRgbCSX", - zip_version_id="wFkJkZMC8Fs_HfPJy32CMKcHJWeQIUDB" + csv_sha1="9f5f4e3597006c50530017ce769c8689d43b06f5", + zip_sha1="b67b1e70e8ba698907c95614bcb16eea6ff2f090", + csv_version_id="syxlZTsmHlr6eL8L4aI9ddWbKhr8tLUk", + zip_version_id="6kSKtajsISK6TTE6Ej3UG0oGblFzU9dk" ) stimulus_set_registry['Malania2007.short16_fit'] = lambda: load_stimulus_set_from_s3( identifier='Malania2007_short-16_fit', bucket="brainio-brainscore", - csv_sha1="9b340fe242117482f6992f48a805297215ba9924", - zip_sha1="4a90d511a3ceb3307a672177a3ad6b76521e65e5", - csv_version_id="sYBPEmXDgbWipuepciLirlorQE3L8BLc", - zip_version_id="pYvOkrLxadkQ67K3__wmciNwaCW.hyyN" + csv_sha1="5bc0314a6c16095a70049fa5e8df5e9b94345f30", + zip_sha1="0ca3930831ca926ea8b8c3600695b639ff05ddb5", + csv_version_id="HZWWB5vyyMoaTCyM7t.4RwfnNtP4e64I", + zip_version_id="aWy0F_It4iUGAgGJCECz6NOJ__JHr2ib" ) stimulus_set_registry['Malania2007.vernier_only'] = lambda: load_stimulus_set_from_s3( identifier='Malania2007_vernier-only', bucket="brainio-brainscore", - csv_sha1="b2cb0f2ed32426b739f90187ae24ad4adf84110d", - zip_sha1="0e177aea523adc320070196fbb777af4cdba2144", - csv_version_id="c8wpZpqoMqdATlqdoq3srPUi_8fYg6a.", - zip_version_id="28lHgxERhw32Ux6IBCxWWTtRwIaRrwo6" -) + csv_sha1="c71f654fccf1265a95dd0585c186232a2519e944", + zip_sha1="eadff359975c3ba250224ce1942544b091415095", + csv_version_id="PQoHljauNff1yWCMNKd5JEzR8Y38_j.0", + zip_version_id="6gE8TX1J89BnsHmdqb7rIGvRMw.sZdo6" +) \ No newline at end of file diff --git a/brainscore_vision/data/scialom2024/test.py b/brainscore_vision/data/scialom2024/test.py index dbc38b3b3..657376d1a 100644 --- a/brainscore_vision/data/scialom2024/test.py +++ b/brainscore_vision/data/scialom2024/test.py @@ -258,7 +258,7 @@ def test_stimulus_set_exists(self, identifier): ]) def test_number_of_images(self, identifier, num_images): stimulus_set = load_stimulus_set(identifier) - assert len(np.unique(stimulus_set['image_id'].values)) == num_images + assert len(np.unique(stimulus_set['stimulus_id'].values)) == num_images # test assembly coords present in ALL 17 sets: @pytest.mark.parametrize('identifier', [ diff --git a/brainscore_vision/metric_helpers/temporal.py b/brainscore_vision/metric_helpers/temporal.py new file mode 100644 index 000000000..0c110b9f2 --- /dev/null +++ b/brainscore_vision/metric_helpers/temporal.py @@ -0,0 +1,119 @@ +import xarray as xr +import numpy as np + +from brainscore_vision.benchmark_helpers.neural_common import Score +from brainscore_vision.metric_helpers.transformations import standard_error_of_the_mean + +from .xarray_utils import apply_over_dims, recursive_op + + +# take the mean of scores (medians of single neuron scores) over time + + +def average_over_presentation(score: Score) -> Score: + raw = score + score = raw.mean('presentation') + score.attrs['raw'] = raw + return score + + +# PerOps is applied to every slice/chunk of the xarray along the specified dimensions +class PerOps: + def __init__(self, callable, dims, check_coords=[]): + # for coordinate checking, they are supposed to be the same across assemblies + self.dims = dims + self.callable = callable + self.check_coords = check_coords + + def __call__(self, *asms): + for check_coord in self.check_coords: + asms = [asm.sortby(check_coord) for asm in asms] + for asm in asms[1:]: + assert (asm[check_coord].values == asms[0][check_coord].values).all() + ret = apply_over_dims(self.callable, *asms, dims=self.dims) + return ret + + +# SpanOps aggregates specified dimensions to one dimension +class SpanOps: + def __init__(self, callable, source_dims, aggregated_dim, resample=False): + # if resample, randomly choose samples from the aggregated dimension, + # whose size is the same as the assembly.sizes[aggregated_dim] + self.source_dims = source_dims + self.aggregated_dim = aggregated_dim + self.callable = callable + self.resample = resample + + def __call__(self, *asms): + asms = [self._stack(asm) for asm in asms] + return self.callable(*asms) + + def _stack(self, assembly): + assembly_type = type(assembly) + size = assembly.sizes[self.aggregated_dim] + assembly = xr.DataArray(assembly) # xarray cannot deal with stacking MultiIndex (pydata/xarray#1554) + assembly = assembly.reset_index(self.source_dims) + assembly = assembly.rename({dim:dim+"_" for dim in self.source_dims}) # we'll call stacked timebins "presentation" + assembly = assembly.stack({self.aggregated_dim : [dim+"_" for dim in self.source_dims]}) + if self.resample: + indices = np.random.randint(0, assembly.sizes[self.aggregated_dim], size) + assembly = assembly.isel({self.aggregated_dim: indices}) + return assembly_type(assembly) + +class PerTime(PerOps): + def __init__(self, callable, time_dim="time_bin", check_coord="time_bin_start", **kwargs): + self.time_bin = time_dim + super().__init__(callable, dims=[time_dim], check_coords=[check_coord], **kwargs) + +class PerPresentation(PerOps): + def __init__(self, callable, presentation_dim="presentation", check_coord="stimulus_id", **kwargs): + self.presentation_dim = presentation_dim + super().__init__(callable, dims=[presentation_dim], check_coords=[check_coord], **kwargs) + +class PerNeuroid(PerOps): + def __init__(self, callable, neuroid_dim="neuroid", check_coord="neuroid_id", **kwargs): + self.neuroid_dim = neuroid_dim + super().__init__(callable, dims=[neuroid_dim], check_coords=[check_coord], **kwargs) + +class SpanTime(SpanOps): + def __init__(self, callable, time_dim="time_bin", presentation_dim="presentation", resample=False): + self.time_dim = time_dim + self.presentation_dim = presentation_dim + source_dims = [self.time_dim, self.presentation_dim] + aggregated_dim = self.presentation_dim + super().__init__(callable, source_dims, aggregated_dim, resample=resample) + +class SpanTimeRegression: + """ + Fits a regression with weights shared across the time bins. + """ + + def __init__(self, regression): + self._regression = regression + + def fit(self, source, target): + assert (source['time_bin'].values == target['time_bin'].values).all() + SpanTime(self._regression.fit)(source, target) + + def predict(self, source): + return PerTime(self._regression.predict)(source) + +class PerTimeRegression: + """ + Fits a regression with different weights for each time bins. + """ + + def __init__(self, regression): + self._regression = regression + + def fit(self, source, target): + # Lazy fit until predict + assert (source['time_bin'].values == target['time_bin'].values).all() + self._train_source = source + self._train_target = target + + def predict(self, source): + def fit_predict(train_source, train_target, test_source): + self._regression.fit(train_source, train_target) + return self._regression.predict(test_source) + return PerTime(fit_predict)(self._train_source, self._train_target, source) \ No newline at end of file diff --git a/brainscore_vision/metric_helpers/xarray_utils.py b/brainscore_vision/metric_helpers/xarray_utils.py index ce67654ff..8998b6003 100644 --- a/brainscore_vision/metric_helpers/xarray_utils.py +++ b/brainscore_vision/metric_helpers/xarray_utils.py @@ -1,4 +1,5 @@ import numpy as np +import xarray as xr from brainio.assemblies import NeuroidAssembly, array_is_element, walk_coords from brainscore_vision.metric_helpers import Defaults @@ -90,3 +91,61 @@ def __call__(self, prediction, target): for coord, dims, values in walk_coords(target) if dims == neuroid_dims}, dims=neuroid_dims) return result + + +# ops that also applies to attrs (and attrs of attrs), which are xarrays +def recursive_op(*arrs, op=lambda x:x): + # the attrs structure of each arr must be the same + val = op(*arrs) + attrs = arrs[0].attrs + for attr in attrs: + attr_val = arrs[0].attrs[attr] + if isinstance(attr_val, xr.DataArray): + attr_arrs = [arr.attrs[attr] for arr in arrs] + attr_val = recursive_op(*attr_arrs, op=op) + val.attrs[attr] = attr_val + return val + + +# apply a callable to every slice of the xarray along the specified dimensions +def apply_over_dims(callable, *asms, dims, njobs=-1): + asms = [asm.transpose(*dims, ...) for asm in asms] + sizes = [asms[0].sizes[dim] for dim in dims] + + def apply_helper(sizes, dims, *asms): + xarr = [] + attrs = {} + size = sizes[0] + rsizes = sizes[1:] + dim = dims[0] + rdims = dims[1:] + + if len(sizes) == 1: + # parallel execution on the last applied dimension + from joblib import Parallel, delayed + results = Parallel(n_jobs=njobs)(delayed(callable)(*[asm.isel({dim:s}) for asm in asms]) for s in range(size)) + else: + results = [] + for s in range(size): + arr = apply_helper(rsizes, rdims, *[asm.isel({dim:s}) for asm in asms]) + results.append(arr) + + for arr in results: + if arr is not None: + for k,v in arr.attrs.items(): + assert isinstance(v, xr.DataArray) + attrs.setdefault(k, []).append(v.expand_dims(dim)) + xarr.append(arr) + + if not xarr: + return + else: + xarr = xr.concat(xarr, dim=dim) + attrs = {k: xr.concat(vs, dim=dim) for k,vs in attrs.items()} + xarr.coords[dim] = asms[0].coords[dim] + for k,v in attrs.items(): + attrs[k].coords[dim] = asms[0].coords[dim] + xarr.attrs[k] = attrs[k] + return xarr + + return apply_helper(sizes, dims, *asms) \ No newline at end of file diff --git a/brainscore_vision/metrics/accuracy_distance/metric.py b/brainscore_vision/metrics/accuracy_distance/metric.py index fb31a7280..eb47e3bba 100644 --- a/brainscore_vision/metrics/accuracy_distance/metric.py +++ b/brainscore_vision/metrics/accuracy_distance/metric.py @@ -10,17 +10,52 @@ class AccuracyDistance(Metric): """ - Computes the accuracy distance using the relative distance between the source and target accuracies, adjusted - for the maximum possible difference between the two accuracies. + Computes the accuracy distance using the relative distance between the + source and target accuracies, adjusted for the maximum possible + difference between the two accuracies. By default, the distance is computed + from a single accuracy score on the entire BehavioralAssembly. However, + the distance can also be computed on a condition-wise basis using the + 'variables' argument. The advantage of the condition-wise approach is that + it can separate two models with identical overall accuracy if one exhibits a + more target-like pattern of performance across conditions. """ - def __call__(self, source: BehavioralAssembly, target: BehavioralAssembly) -> Score: + def __call__(self, source: BehavioralAssembly, target: + BehavioralAssembly, variables: tuple=()) -> Score: """Target should be the entire BehavioralAssembly, containing truth values.""" subjects = self.extract_subjects(target) subject_scores = [] for subject in subjects: subject_assembly = target.sel(subject=subject) - subject_score = self.compare_single_subject(source, subject_assembly) + + # compute single score across the entire dataset + if len(variables) == 0: + subject_score = self.compare_single_subject(source, subject_assembly) + + # compute scores for each condition, then average + else: + cond_scores = [] + + # get iterator across all combinations of variables + if len(variables) == 1: + conditions = set(subject_assembly[variables[0]].values) + conditions = [[c] for c in conditions] # to mimic itertools.product + else: + conditions = itertools.product( + *[set(subject_assembly[v].values) for v in variables]) + + # loop over conditions and compute scores + for cond in conditions: + indexers = {v: cond[i] for i, v in enumerate(variables)} + subject_cond_assembly = subject_assembly.sel(**indexers) + source_cond_assembly = source.sel(**indexers) + # to accomodate unbalanced designs, skip combinations of + # variables that don't exist in both assemblies + if len(subject_cond_assembly) and len(source_cond_assembly): + cond_scores.append(self.compare_single_subject( + source_cond_assembly, subject_cond_assembly)) + subject_score = Score(np.mean(cond_scores)) + subject_score = subject_score.expand_dims('subject') subject_score['subject'] = 'subject', [subject] subject_scores.append(subject_score) diff --git a/brainscore_vision/metrics/accuracy_distance/test.py b/brainscore_vision/metrics/accuracy_distance/test.py index 2fc15b792..d6414b790 100644 --- a/brainscore_vision/metrics/accuracy_distance/test.py +++ b/brainscore_vision/metrics/accuracy_distance/test.py @@ -12,6 +12,20 @@ def test_score(): assert score == approx(0.74074074) +def test_score_single_variable(): + assembly = _make_data() + metric = load_metric('accuracy_distance') + score = metric(assembly.sel(subject='C'), assembly, ('condition',)) + assert score == approx(0.55555556) + + +def test_score_multi_variable(): + assembly = _make_data() + metric = load_metric('accuracy_distance') + score = metric(assembly.sel(subject='C'), assembly, ('condition','animacy')) + assert score == approx(0.55555556) + + def test_has_error(): assembly = _make_data() metric = load_metric('accuracy_distance') @@ -38,5 +52,6 @@ def _make_data(): coords={'stimulus_id': ('presentation', np.resize(np.arange(9), 9 * 3)), 'truth': ('presentation', np.resize(['dog', 'cat', 'chair'], 9 * 3)), 'condition': ('presentation', np.resize([1, 1, 1, 2, 2, 2, 3, 3, 3], 9 * 3)), + 'animacy': ('presentation', np.resize(['animate', 'animate', 'inanimate'], 9 * 3)), 'subject': ('presentation', ['A'] * 9 + ['B'] * 9 + ['C'] * 9)}, dims=['presentation']) diff --git a/brainscore_vision/metrics/internal_consistency/__init__.py b/brainscore_vision/metrics/internal_consistency/__init__.py index bd71776be..ae6a41ea6 100644 --- a/brainscore_vision/metrics/internal_consistency/__init__.py +++ b/brainscore_vision/metrics/internal_consistency/__init__.py @@ -1,4 +1,8 @@ from brainscore_vision import metric_registry from .ceiling import InternalConsistency +from brainscore_vision.metric_helpers.temporal import PerTime + + metric_registry['internal_consistency'] = InternalConsistency +metric_registry['internal_consistency_temporal'] = lambda *args, **kwargs: PerTime(InternalConsistency(*args, **kwargs)) \ No newline at end of file diff --git a/brainscore_vision/metrics/internal_consistency/test.py b/brainscore_vision/metrics/internal_consistency/test.py index 6ccd597c3..3c00657fb 100644 --- a/brainscore_vision/metrics/internal_consistency/test.py +++ b/brainscore_vision/metrics/internal_consistency/test.py @@ -19,7 +19,7 @@ def test_dummy_data(self): dims=['presentation', 'neuroid']) ceiler = load_ceiling('internal_consistency') ceiling = ceiler(data) - assert ceiling == 1 + assert ceiling.item() == approx(1, abs=1e-8) class TestSplitHalfConsistency: diff --git a/brainscore_vision/metrics/ost/metric.py b/brainscore_vision/metrics/ost/metric.py index 7093781e7..92f7eb9ed 100644 --- a/brainscore_vision/metrics/ost/metric.py +++ b/brainscore_vision/metrics/ost/metric.py @@ -63,7 +63,7 @@ def compute_osts(self, train_source, test_source, test_osts): break # stop early if threshold is already hit for every image # interpolate - predicted_osts = np.empty(len(test_osts), dtype=np.float) + predicted_osts = np.empty(len(test_osts), dtype=np.float64) predicted_osts[:] = np.nan for i, (last_ost, hit_ost) in enumerate(zip(last_osts, hit_osts)): if hit_ost is None: diff --git a/brainscore_vision/metrics/regression_correlation/__init__.py b/brainscore_vision/metrics/regression_correlation/__init__.py index 2f8019b3f..691e82685 100644 --- a/brainscore_vision/metrics/regression_correlation/__init__.py +++ b/brainscore_vision/metrics/regression_correlation/__init__.py @@ -11,6 +11,15 @@ metric_registry['linear_predictivity'] = lambda *args, **kwargs: CrossRegressedCorrelation( regression=linear_regression(), correlation=pearsonr_correlation(), *args, **kwargs) +# temporal metrics +from .metric import SpanTimeCrossRegressedCorrelation + +metric_registry['spantime_pls'] = lambda *args, **kwargs: SpanTimeCrossRegressedCorrelation( + regression=pls_regression(), correlation=pearsonr_correlation(), *args, **kwargs) +metric_registry['spantime_ridge'] = lambda *args, **kwargs: SpanTimeCrossRegressedCorrelation( + regression=ridge_regression(), correlation=pearsonr_correlation(), *args, **kwargs) + + BIBTEX = """@article{schrimpf2018brain, title={Brain-score: Which artificial neural network for object recognition is most brain-like?}, author={Schrimpf, Martin and Kubilius, Jonas and Hong, Ha and Majaj, Najib J and Rajalingham, Rishi and Issa, Elias B and Kar, Kohitij and Bashivan, Pouya and Prescott-Roy, Jonathan and Geiger, Franziska and others}, diff --git a/brainscore_vision/metrics/regression_correlation/metric.py b/brainscore_vision/metrics/regression_correlation/metric.py index 365f63868..a09ba03e0 100644 --- a/brainscore_vision/metrics/regression_correlation/metric.py +++ b/brainscore_vision/metrics/regression_correlation/metric.py @@ -8,6 +8,7 @@ from brainscore_core.metrics import Metric, Score from brainscore_vision.metric_helpers.transformations import CrossValidation from brainscore_vision.metric_helpers.xarray_utils import XarrayRegression, XarrayCorrelation +from brainscore_vision.metric_helpers.temporal import SpanTimeRegression, PerTime class CrossRegressedCorrelation(Metric): @@ -65,6 +66,15 @@ def predict(self, X): return Ypred +# make the crc to consider time as a sample dimension +def SpanTimeCrossRegressedCorrelation(regression, correlation, *args, **kwargs): + return CrossRegressedCorrelation( + regression=SpanTimeRegression(regression), + correlation=PerTime(correlation), + *args, **kwargs + ) + + def pls_regression(regression_kwargs=None, xarray_kwargs=None): regression_defaults = dict(n_components=25, scale=False) regression_kwargs = {**regression_defaults, **(regression_kwargs or {})} diff --git a/brainscore_vision/model_helpers/activations/__init__.py b/brainscore_vision/model_helpers/activations/__init__.py index 10f514697..40a84e464 100644 --- a/brainscore_vision/model_helpers/activations/__init__.py +++ b/brainscore_vision/model_helpers/activations/__init__.py @@ -1,3 +1 @@ -from brainscore_vision.model_helpers.activations.keras import KerasWrapper, preprocess as preprocess_keras from brainscore_vision.model_helpers.activations.pytorch import PytorchWrapper, preprocess_images as preprocess_pytorch -from brainscore_vision.model_helpers.activations.tensorflow import TensorflowWrapper, TensorflowSlimWrapper diff --git a/brainscore_vision/model_helpers/activations/core.py b/brainscore_vision/model_helpers/activations/core.py index a9f537250..58f8baefc 100644 --- a/brainscore_vision/model_helpers/activations/core.py +++ b/brainscore_vision/model_helpers/activations/core.py @@ -348,7 +348,7 @@ def translate_images(self, images: List[Union[str, np.ndarray]], image_paths: Li """ Translate images according to selected microsaccades, if microsaccades are required. - :param images: A list of images (in the case of tensorflow models), or a list of arrays (non-tf models). + :param images: A list of arrays. :param image_paths: A list of image paths. Both `image_paths` and `images` are needed since while both tf and non-tf models preprocess images before this point, non-tf models' preprocessed images are fixed as arrays when fed into here. As such, simply returning `image_paths` for @@ -519,14 +519,9 @@ def translate(image: np.array, shift: Tuple[float, float], image_shape: Tuple[in return translated_image @staticmethod - def get_image_with_shape(image: Union[str, np.ndarray]) -> Tuple[np.array, Tuple[int, int], bool]: - if isinstance(image, str): # tf models return strings after preprocessing - image = cv2.imread(image) - rows, cols, _ = image.shape # cv2 uses height, width, channels - image_is_channels_first = False - else: - _, rows, cols, = image.shape # pytorch and keras use channels, height, width - image_is_channels_first = True + def get_image_with_shape(image: np.ndarray) -> Tuple[np.array, Tuple[int, int], bool]: + _, rows, cols, = image.shape # pytorch uses channels, height, width + image_is_channels_first = True return image, (rows, cols), image_is_channels_first @staticmethod diff --git a/brainscore_vision/model_helpers/activations/keras.py b/brainscore_vision/model_helpers/activations/keras.py deleted file mode 100644 index 8d1acf4d7..000000000 --- a/brainscore_vision/model_helpers/activations/keras.py +++ /dev/null @@ -1,92 +0,0 @@ -from collections import OrderedDict - -import numpy as np - -from brainscore_vision.model_helpers.activations.core import ActivationsExtractorHelper - - -class KerasWrapper: - def __init__(self, model, preprocessing, identifier=None, *args, **kwargs): - """ - :param model: a keras model with a function `preprocess_input` - that will later be called on the loaded numpy image - """ - self._model = model - identifier = identifier or model.name - self._extractor = ActivationsExtractorHelper( - identifier=identifier, get_activations=self.get_activations, preprocessing=preprocessing, - *args, **kwargs) - self._extractor.insert_attrs(self) - - @property - def identifier(self): - return self._extractor.identifier - - @identifier.setter - def identifier(self, value): - self._extractor.identifier = value - - def __call__(self, *args, **kwargs): # cannot assign __call__ as attribute due to Python convention - return self._extractor(*args, **kwargs) - - def get_activations(self, images, layer_names): - from keras import backend as K - input_tensor = self._model.input - layers = [layer for layer in self._model.layers if layer.name in layer_names] - layers = sorted(layers, key=lambda layer: layer_names.index(layer.name)) - if 'logits' in layer_names: - layers.insert(layer_names.index('logits'), self._model.layers[-1]) - assert len(layers) == len(layer_names) - layer_out_tensors = [layer.output for layer in layers] - functor = K.function([input_tensor] + [K.learning_phase()], layer_out_tensors) # evaluate all tensors at once - layer_outputs = functor([images, 0.]) # 0 to signal testing phase - return OrderedDict([(layer_name, layer_output) for layer_name, layer_output in zip(layer_names, layer_outputs)]) - - def __repr__(self): - return repr(self._model) - - def graph(self): - import networkx as nx - g = nx.DiGraph() - for layer in self._model.layers: - g.add_node(layer.name, object=layer, type=type(layer)) - for outbound_node in layer._outbound_nodes: - g.add_edge(layer.name, outbound_node.outbound_layer.name) - return g - - -def load_images(image_filepaths, image_size): - images = [load_image(image_filepath) for image_filepath in image_filepaths] - images = [scale_image(image, image_size) for image in images] - return np.array(images) - - -def load_image(image_filepath): - try: # keras API before tensorflow 2.9.1 - from keras.preprocessing.image import load_img - from keras.preprocessing.image import img_to_array - except ImportError: - from tensorflow.keras.utils import load_img - from tensorflow.keras.utils import img_to_array - img = load_img(image_filepath) - x = img_to_array(img) - return x - - -def scale_image(img, image_size): - from PIL import Image - try: # keras API before tensorflow 2.9.1 - from keras.preprocessing.image import img_to_array - except ImportError: - from tensorflow.keras.utils import img_to_array - img = Image.fromarray(img.astype(np.uint8)) - img = img.resize((image_size, image_size)) - img = img_to_array(img) - return img - - -def preprocess(image_filepaths, image_size, *args, **kwargs): - # only a wrapper to avoid top-level keras imports - from keras.applications.imagenet_utils import preprocess_input - images = load_images(image_filepaths, image_size=image_size) - return preprocess_input(images, *args, **kwargs) diff --git a/brainscore_vision/model_helpers/activations/temporal/inputs/base.py b/brainscore_vision/model_helpers/activations/temporal/inputs/base.py index d656a86b7..c94ccd3d7 100644 --- a/brainscore_vision/model_helpers/activations/temporal/inputs/base.py +++ b/brainscore_vision/model_helpers/activations/temporal/inputs/base.py @@ -15,4 +15,3 @@ def is_video_path(path: Union[str, Path]) -> bool: def is_image_path(path: Union[str, Path]) -> bool: extension = path.split('.')[-1].lower() return extension in ['jpg', 'jpeg', 'png', 'bmp', 'tiff'] - \ No newline at end of file diff --git a/brainscore_vision/model_helpers/activations/tensorflow.py b/brainscore_vision/model_helpers/activations/tensorflow.py deleted file mode 100644 index d5e4864d5..000000000 --- a/brainscore_vision/model_helpers/activations/tensorflow.py +++ /dev/null @@ -1,71 +0,0 @@ -from collections import OrderedDict - -from brainscore_vision.model_helpers.activations.core import ActivationsExtractorHelper - - -class TensorflowWrapper: - def __init__(self, identifier, inputs, endpoints: dict, session, *args, **kwargs): - import tensorflow as tf - self._inputs = inputs - self._endpoints = endpoints - self._session = session or tf.compat.v1.Session() - self._extractor = ActivationsExtractorHelper(identifier=identifier, get_activations=self.get_activations, - preprocessing=None, *args, **kwargs) - self._extractor.insert_attrs(self) - - @property - def identifier(self): - return self._extractor.identifier - - @identifier.setter - def identifier(self, value): - self._extractor.identifier = value - - def __call__(self, *args, **kwargs): # cannot assign __call__ as attribute due to Python convention - return self._extractor(*args, **kwargs) - - def get_activations(self, images, layer_names): - layer_tensors = OrderedDict((layer, self._endpoints[ - layer if (layer != 'logits' or layer in self._endpoints) else next(reversed(self._endpoints))]) - for layer in layer_names) - layer_outputs = self._session.run(layer_tensors, feed_dict={self._inputs: images}) - return layer_outputs - - def graph(self): - import networkx as nx - g = nx.DiGraph() - for name, layer in self._endpoints.items(): - g.add_node(name, object=layer, type=type(layer)) - g.add_node("logits", object=self.logits, type=type(self.logits)) - return g - - -class TensorflowSlimWrapper(TensorflowWrapper): - def __init__(self, *args, labels_offset=1, **kwargs): - super(TensorflowSlimWrapper, self).__init__(*args, **kwargs) - self._labels_offset = labels_offset - - def get_activations(self, images, layer_names): - layer_outputs = super(TensorflowSlimWrapper, self).get_activations(images, layer_names) - if 'logits' in layer_outputs: - layer_outputs['logits'] = layer_outputs['logits'][:, self._labels_offset:] - return layer_outputs - - -def load_image(image_filepath): - import tensorflow as tf - image = tf.io.read_file(image_filepath) - image = tf.image.decode_png(image, channels=3) - return image - - -def resize_image(image, image_size): - import tensorflow as tf - image = tf.image.resize(image, (image_size, image_size)) - return image - - -def load_resize_image(image_path, image_size): - image = load_image(image_path) - image = resize_image(image, image_size) - return image diff --git a/brainscore_vision/models/alexnet_7be5be79/setup.py b/brainscore_vision/models/alexnet_7be5be79/setup.py index 421914cfb..41c4ef930 100644 --- a/brainscore_vision/models/alexnet_7be5be79/setup.py +++ b/brainscore_vision/models/alexnet_7be5be79/setup.py @@ -3,9 +3,10 @@ from setuptools import setup, find_packages -requirements = [ "torchvision", - "torch" -] +requirements = ["torchvision", + "torch", + "fire" + ] setup( packages=find_packages(exclude=['tests']), diff --git a/brainscore_vision/models/alexnet_7be5be79_convs/__init__.py b/brainscore_vision/models/alexnet_7be5be79_convs/__init__.py new file mode 100644 index 000000000..36cd00711 --- /dev/null +++ b/brainscore_vision/models/alexnet_7be5be79_convs/__init__.py @@ -0,0 +1,5 @@ +from brainscore_vision import model_registry +from brainscore_vision.model_helpers.brain_transformation import ModelCommitment +from .model import get_model, get_layers + +model_registry['alexnet_7be5be79_convs'] = lambda: ModelCommitment(identifier='alexnet_7be5be79_convs', activations_model=get_model('alexnet_7be5be79_convs'), layers=get_layers('alexnet_7be5be79_convs')) diff --git a/brainscore_vision/models/alexnet_7be5be79_convs/model.py b/brainscore_vision/models/alexnet_7be5be79_convs/model.py new file mode 100644 index 000000000..42ad4e2d0 --- /dev/null +++ b/brainscore_vision/models/alexnet_7be5be79_convs/model.py @@ -0,0 +1,42 @@ +from brainscore_vision.model_helpers.check_submission import check_models +import functools +import os +import torchvision.models +from brainscore_vision.model_helpers.activations.pytorch import PytorchWrapper +from brainscore_vision.model_helpers.activations.pytorch import load_preprocess_images +from pathlib import Path +from brainscore_vision.model_helpers import download_weights +import torch + +# This is an example implementation for submitting resnet-50 as a pytorch model + +# Attention: It is important, that the wrapper identifier is unique per model! +# The results will otherwise be the same due to brain-scores internal result caching mechanism. +# Please load your pytorch model for usage in CPU. There won't be GPUs available for scoring your model. +# If the model requires a GPU, contact the brain-score team directly. +from brainscore_vision.model_helpers.check_submission import check_models + + +def get_model_list(): + return ['alexnet_7be5be79_convs'] + + +def get_model(name): + assert name == 'alexnet_7be5be79_convs' + model = torchvision.models.alexnet(pretrained=True) + preprocessing = functools.partial(load_preprocess_images, image_size=224) + wrapper = PytorchWrapper(identifier='alexnet_7be5be79_convs', model=model, preprocessing=preprocessing) + wrapper.image_size = 224 + return wrapper + + +def get_layers(name): + assert name == 'alexnet_7be5be79_convs' + return ['features.3','features.6', 'features.8', 'features.10', 'classifier.1', 'classifier.4'] + +def get_bibtex(model_identifier): + return """xx""" + + +if __name__ == '__main__': + check_models.check_base_models(__name__) diff --git a/brainscore_vision/models/tv_efficientnet-b1/setup.py b/brainscore_vision/models/alexnet_7be5be79_convs/setup.py similarity index 100% rename from brainscore_vision/models/tv_efficientnet-b1/setup.py rename to brainscore_vision/models/alexnet_7be5be79_convs/setup.py diff --git a/brainscore_vision/models/tv_efficientnet-b1/test.py b/brainscore_vision/models/alexnet_7be5be79_convs/test.py similarity index 100% rename from brainscore_vision/models/tv_efficientnet-b1/test.py rename to brainscore_vision/models/alexnet_7be5be79_convs/test.py diff --git a/brainscore_vision/models/bp_resnet50_julios/setup.py b/brainscore_vision/models/bp_resnet50_julios/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/bp_resnet50_julios/setup.py +++ b/brainscore_vision/models/bp_resnet50_julios/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/cornet_s_ynshah/setup.py b/brainscore_vision/models/cornet_s_ynshah/setup.py index 68362b48b..aa18ce8a3 100644 --- a/brainscore_vision/models/cornet_s_ynshah/setup.py +++ b/brainscore_vision/models/cornet_s_ynshah/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/dbp_resnet50_julios/setup.py b/brainscore_vision/models/dbp_resnet50_julios/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/dbp_resnet50_julios/setup.py +++ b/brainscore_vision/models/dbp_resnet50_julios/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eBarlow_Vanilla/setup.py b/brainscore_vision/models/eBarlow_Vanilla/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eBarlow_Vanilla/setup.py +++ b/brainscore_vision/models/eBarlow_Vanilla/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eBarlow_Vanilla_1/setup.py b/brainscore_vision/models/eBarlow_Vanilla_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eBarlow_Vanilla_1/setup.py +++ b/brainscore_vision/models/eBarlow_Vanilla_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eBarlow_Vanilla_2/setup.py b/brainscore_vision/models/eBarlow_Vanilla_2/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eBarlow_Vanilla_2/setup.py +++ b/brainscore_vision/models/eBarlow_Vanilla_2/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eBarlow_augself_linear_1/setup.py b/brainscore_vision/models/eBarlow_augself_linear_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eBarlow_augself_linear_1/setup.py +++ b/brainscore_vision/models/eBarlow_augself_linear_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eBarlow_augself_mlp_1/setup.py b/brainscore_vision/models/eBarlow_augself_mlp_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eBarlow_augself_mlp_1/setup.py +++ b/brainscore_vision/models/eBarlow_augself_mlp_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eBarlow_lmda_0001_1/setup.py b/brainscore_vision/models/eBarlow_lmda_0001_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eBarlow_lmda_0001_1/setup.py +++ b/brainscore_vision/models/eBarlow_lmda_0001_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eBarlow_lmda_001_1/setup.py b/brainscore_vision/models/eBarlow_lmda_001_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eBarlow_lmda_001_1/setup.py +++ b/brainscore_vision/models/eBarlow_lmda_001_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eBarlow_lmda_001_2/setup.py b/brainscore_vision/models/eBarlow_lmda_001_2/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eBarlow_lmda_001_2/setup.py +++ b/brainscore_vision/models/eBarlow_lmda_001_2/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eBarlow_lmda_001_3/setup.py b/brainscore_vision/models/eBarlow_lmda_001_3/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eBarlow_lmda_001_3/setup.py +++ b/brainscore_vision/models/eBarlow_lmda_001_3/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eBarlow_lmda_01/setup.py b/brainscore_vision/models/eBarlow_lmda_01/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eBarlow_lmda_01/setup.py +++ b/brainscore_vision/models/eBarlow_lmda_01/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eBarlow_lmda_01_1/setup.py b/brainscore_vision/models/eBarlow_lmda_01_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eBarlow_lmda_01_1/setup.py +++ b/brainscore_vision/models/eBarlow_lmda_01_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eBarlow_lmda_01_2/setup.py b/brainscore_vision/models/eBarlow_lmda_01_2/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eBarlow_lmda_01_2/setup.py +++ b/brainscore_vision/models/eBarlow_lmda_01_2/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eBarlow_lmda_02_1/setup.py b/brainscore_vision/models/eBarlow_lmda_02_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eBarlow_lmda_02_1/setup.py +++ b/brainscore_vision/models/eBarlow_lmda_02_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eBarlow_lmda_03_1/setup.py b/brainscore_vision/models/eBarlow_lmda_03_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eBarlow_lmda_03_1/setup.py +++ b/brainscore_vision/models/eBarlow_lmda_03_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eBarlow_lmda_04_1/setup.py b/brainscore_vision/models/eBarlow_lmda_04_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eBarlow_lmda_04_1/setup.py +++ b/brainscore_vision/models/eBarlow_lmda_04_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eBarlow_lmda_05_1/setup.py b/brainscore_vision/models/eBarlow_lmda_05_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eBarlow_lmda_05_1/setup.py +++ b/brainscore_vision/models/eBarlow_lmda_05_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eMMCR_Mom_Vanilla_1/setup.py b/brainscore_vision/models/eMMCR_Mom_Vanilla_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eMMCR_Mom_Vanilla_1/setup.py +++ b/brainscore_vision/models/eMMCR_Mom_Vanilla_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eMMCR_Mom_Vanilla_2/setup.py b/brainscore_vision/models/eMMCR_Mom_Vanilla_2/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eMMCR_Mom_Vanilla_2/setup.py +++ b/brainscore_vision/models/eMMCR_Mom_Vanilla_2/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eMMCR_Mom_lmda_0001_1/setup.py b/brainscore_vision/models/eMMCR_Mom_lmda_0001_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eMMCR_Mom_lmda_0001_1/setup.py +++ b/brainscore_vision/models/eMMCR_Mom_lmda_0001_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eMMCR_Mom_lmda_001_1/setup.py b/brainscore_vision/models/eMMCR_Mom_lmda_001_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eMMCR_Mom_lmda_001_1/setup.py +++ b/brainscore_vision/models/eMMCR_Mom_lmda_001_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eMMCR_Mom_lmda_01_1/setup.py b/brainscore_vision/models/eMMCR_Mom_lmda_01_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eMMCR_Mom_lmda_01_1/setup.py +++ b/brainscore_vision/models/eMMCR_Mom_lmda_01_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eMMCR_Mom_lmda_01_2/setup.py b/brainscore_vision/models/eMMCR_Mom_lmda_01_2/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eMMCR_Mom_lmda_01_2/setup.py +++ b/brainscore_vision/models/eMMCR_Mom_lmda_01_2/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eMMCR_Mom_lmda_02_1/setup.py b/brainscore_vision/models/eMMCR_Mom_lmda_02_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eMMCR_Mom_lmda_02_1/setup.py +++ b/brainscore_vision/models/eMMCR_Mom_lmda_02_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eMMCR_Mom_lmda_03_1/setup.py b/brainscore_vision/models/eMMCR_Mom_lmda_03_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eMMCR_Mom_lmda_03_1/setup.py +++ b/brainscore_vision/models/eMMCR_Mom_lmda_03_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eMMCR_Mom_lmda_04_1/setup.py b/brainscore_vision/models/eMMCR_Mom_lmda_04_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eMMCR_Mom_lmda_04_1/setup.py +++ b/brainscore_vision/models/eMMCR_Mom_lmda_04_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eMMCR_Mom_lmda_05_1/setup.py b/brainscore_vision/models/eMMCR_Mom_lmda_05_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eMMCR_Mom_lmda_05_1/setup.py +++ b/brainscore_vision/models/eMMCR_Mom_lmda_05_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eMMCR_Vanilla/setup.py b/brainscore_vision/models/eMMCR_Vanilla/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eMMCR_Vanilla/setup.py +++ b/brainscore_vision/models/eMMCR_Vanilla/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eMMCR_VanillaV2/setup.py b/brainscore_vision/models/eMMCR_VanillaV2/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eMMCR_VanillaV2/setup.py +++ b/brainscore_vision/models/eMMCR_VanillaV2/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eMMCR_Vanilla_1/setup.py b/brainscore_vision/models/eMMCR_Vanilla_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eMMCR_Vanilla_1/setup.py +++ b/brainscore_vision/models/eMMCR_Vanilla_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eMMCR_Vanilla_2/setup.py b/brainscore_vision/models/eMMCR_Vanilla_2/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eMMCR_Vanilla_2/setup.py +++ b/brainscore_vision/models/eMMCR_Vanilla_2/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eMMCR_lmda_01/setup.py b/brainscore_vision/models/eMMCR_lmda_01/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eMMCR_lmda_01/setup.py +++ b/brainscore_vision/models/eMMCR_lmda_01/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eMMCR_lmda_01V2/setup.py b/brainscore_vision/models/eMMCR_lmda_01V2/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eMMCR_lmda_01V2/setup.py +++ b/brainscore_vision/models/eMMCR_lmda_01V2/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eMMCR_lmda_01_1/setup.py b/brainscore_vision/models/eMMCR_lmda_01_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eMMCR_lmda_01_1/setup.py +++ b/brainscore_vision/models/eMMCR_lmda_01_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eMMCR_lmda_01_2/setup.py b/brainscore_vision/models/eMMCR_lmda_01_2/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eMMCR_lmda_01_2/setup.py +++ b/brainscore_vision/models/eMMCR_lmda_01_2/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eMMCR_lmda_01_3/setup.py b/brainscore_vision/models/eMMCR_lmda_01_3/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eMMCR_lmda_01_3/setup.py +++ b/brainscore_vision/models/eMMCR_lmda_01_3/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eSimCLR_Vanilla_1/setup.py b/brainscore_vision/models/eSimCLR_Vanilla_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eSimCLR_Vanilla_1/setup.py +++ b/brainscore_vision/models/eSimCLR_Vanilla_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eSimCLR_Vanilla_2/setup.py b/brainscore_vision/models/eSimCLR_Vanilla_2/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eSimCLR_Vanilla_2/setup.py +++ b/brainscore_vision/models/eSimCLR_Vanilla_2/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eSimCLR_lmda_0001_1/setup.py b/brainscore_vision/models/eSimCLR_lmda_0001_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eSimCLR_lmda_0001_1/setup.py +++ b/brainscore_vision/models/eSimCLR_lmda_0001_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eSimCLR_lmda_001_1/setup.py b/brainscore_vision/models/eSimCLR_lmda_001_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eSimCLR_lmda_001_1/setup.py +++ b/brainscore_vision/models/eSimCLR_lmda_001_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eSimCLR_lmda_01_1/setup.py b/brainscore_vision/models/eSimCLR_lmda_01_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eSimCLR_lmda_01_1/setup.py +++ b/brainscore_vision/models/eSimCLR_lmda_01_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eSimCLR_lmda_01_2/setup.py b/brainscore_vision/models/eSimCLR_lmda_01_2/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eSimCLR_lmda_01_2/setup.py +++ b/brainscore_vision/models/eSimCLR_lmda_01_2/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eSimCLR_lmda_02_1/setup.py b/brainscore_vision/models/eSimCLR_lmda_02_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eSimCLR_lmda_02_1/setup.py +++ b/brainscore_vision/models/eSimCLR_lmda_02_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eSimCLR_lmda_02_1_1/setup.py b/brainscore_vision/models/eSimCLR_lmda_02_1_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eSimCLR_lmda_02_1_1/setup.py +++ b/brainscore_vision/models/eSimCLR_lmda_02_1_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eSimCLR_lmda_03_1/setup.py b/brainscore_vision/models/eSimCLR_lmda_03_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eSimCLR_lmda_03_1/setup.py +++ b/brainscore_vision/models/eSimCLR_lmda_03_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eSimCLR_lmda_04_1/setup.py b/brainscore_vision/models/eSimCLR_lmda_04_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eSimCLR_lmda_04_1/setup.py +++ b/brainscore_vision/models/eSimCLR_lmda_04_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eSimCLR_lmda_04_1_1/setup.py b/brainscore_vision/models/eSimCLR_lmda_04_1_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eSimCLR_lmda_04_1_1/setup.py +++ b/brainscore_vision/models/eSimCLR_lmda_04_1_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eSimCLR_lmda_05_1/setup.py b/brainscore_vision/models/eSimCLR_lmda_05_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eSimCLR_lmda_05_1/setup.py +++ b/brainscore_vision/models/eSimCLR_lmda_05_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/effnetb1_cutmixpatch_SAM_robust32_avge6e8e9e10_manylayers_324x288/model.py b/brainscore_vision/models/effnetb1_cutmixpatch_SAM_robust32_avge6e8e9e10_manylayers_324x288/model.py index 41c6ca79e..d3eaf9c94 100644 --- a/brainscore_vision/models/effnetb1_cutmixpatch_SAM_robust32_avge6e8e9e10_manylayers_324x288/model.py +++ b/brainscore_vision/models/effnetb1_cutmixpatch_SAM_robust32_avge6e8e9e10_manylayers_324x288/model.py @@ -1,7 +1,7 @@ import functools import torch -from brainscore_vision.model_helpers.activations import PytorchWrapper, KerasWrapper +from brainscore_vision.model_helpers.activations import PytorchWrapper from brainscore_vision.model_helpers.activations.pytorch import load_preprocess_images from brainscore_vision.model_helpers.s3 import load_weight_file from PIL import Image diff --git a/brainscore_vision/models/fixres_resnext101_32x48d_wsl/__init__.py b/brainscore_vision/models/fixres_resnext101_32x48d_wsl/__init__.py new file mode 100644 index 000000000..01cb6f846 --- /dev/null +++ b/brainscore_vision/models/fixres_resnext101_32x48d_wsl/__init__.py @@ -0,0 +1,7 @@ +from brainscore_vision import model_registry +from brainscore_vision.model_helpers.brain_transformation import ModelCommitment +from .model import get_model, get_layers + +model_registry['fixres_resnext101_32x48d_wsl'] = lambda: ModelCommitment(identifier='fixres_resnext101_32x48d_wsl', + activations_model=get_model(), + layers=get_layers('fixres_resnext101_32x48d_wsl')) \ No newline at end of file diff --git a/brainscore_vision/models/fixres_resnext101_32x48d_wsl/model.py b/brainscore_vision/models/fixres_resnext101_32x48d_wsl/model.py new file mode 100644 index 000000000..b9d82f402 --- /dev/null +++ b/brainscore_vision/models/fixres_resnext101_32x48d_wsl/model.py @@ -0,0 +1,57 @@ +from brainscore_vision.model_helpers.activations.pytorch import PytorchWrapper +from fixres.hubconf import load_state_dict_from_url +from fixres.transforms_v2 import get_transforms +from brainscore_vision.model_helpers.activations.pytorch import load_images +import numpy as np +from importlib import import_module +import ssl + + +ssl._create_default_https_context = ssl._create_unverified_context + + +def get_model(): + module = import_module('fixres.imnet_evaluate.resnext_wsl') + model_ctr = getattr(module, 'resnext101_32x48d_wsl') + model = model_ctr(pretrained=False) # the pretrained flag here corresponds to standard resnext weights + pretrained_dict = load_state_dict_from_url('https://dl.fbaipublicfiles.com/FixRes_data/FixRes_Pretrained_Models/ResNeXt_101_32x48d.pth', + map_location=lambda storage, loc: storage)['model'] + model_dict = model.state_dict() + for k in model_dict.keys(): + assert ('module.' + k) in pretrained_dict.keys() + model_dict[k] = pretrained_dict.get(('module.' + k)) + model.load_state_dict(model_dict) + + # preprocessing + # 320 for ResNeXt: + # https://github.com/mschrimpf/FixRes/tree/4ddcf11b29c118dfb8a48686f75f572450f67e5d#example-evaluation-procedure + input_size = 320 + # https://github.com/mschrimpf/FixRes/blob/0dc15ab509b9cb9d7002ca47826dab4d66033668/fixres/imnet_evaluate/train.py#L159-L160 + transformation = get_transforms(input_size=input_size, test_size=input_size, + kind='full', need=('val',), + # this is different from standard ImageNet evaluation to show the whole image + crop=False, + # no backbone parameter for ResNeXt following + # https://github.com/mschrimpf/FixRes/blob/0dc15ab509b9cb9d7002ca47826dab4d66033668/fixres/imnet_evaluate/train.py#L154-L156 + backbone=None) + transform = transformation['val'] + + def load_preprocess_images(image_filepaths): + images = load_images(image_filepaths) + images = [transform(image) for image in images] + images = [image.unsqueeze(0) for image in images] + images = np.concatenate(images) + return images + + wrapper = PytorchWrapper(identifier='resnext101_32x48d_wsl', model=model, preprocessing=load_preprocess_images, + batch_size=4) # doesn't fit into 12 GB GPU memory otherwise + wrapper.image_size = input_size + return wrapper + + +def get_layers(name): + return (['conv1'] + + # note that while relu is used multiple times, by default the last one will overwrite all previous ones + [f"layer{block + 1}.{unit}.relu" + for block, block_units in enumerate([3, 4, 23, 3]) for unit in range(block_units)] + + ['avgpool']) diff --git a/brainscore_vision/models/fixres_resnext101_32x48d_wsl/requirements.txt b/brainscore_vision/models/fixres_resnext101_32x48d_wsl/requirements.txt new file mode 100644 index 000000000..fabd05c82 --- /dev/null +++ b/brainscore_vision/models/fixres_resnext101_32x48d_wsl/requirements.txt @@ -0,0 +1,5 @@ +torchvision +torch +numpy +importlib +Fixing-the-train-test-resolution-discrepancy-scripts@ git+https://github.com/mschrimpf/FixRes.git diff --git a/brainscore_vision/models/fixres_resnext101_32x48d_wsl/test.py b/brainscore_vision/models/fixres_resnext101_32x48d_wsl/test.py new file mode 100644 index 000000000..ba14b95bf --- /dev/null +++ b/brainscore_vision/models/fixres_resnext101_32x48d_wsl/test.py @@ -0,0 +1,7 @@ +import pytest +import brainscore_vision + +@pytest.mark.travis_slow +def test_has_identifier(): + model = brainscore_vision.load_model('fixres_resnext101_32x48d_wsl') + assert model.identifier == 'fixres_resnext101_32x48d_wsl' diff --git a/brainscore_vision/models/inception_v4_pytorch/__init__.py b/brainscore_vision/models/inception_v4_pytorch/__init__.py new file mode 100644 index 000000000..9b4ccbdf5 --- /dev/null +++ b/brainscore_vision/models/inception_v4_pytorch/__init__.py @@ -0,0 +1,7 @@ +from brainscore_vision import model_registry +from brainscore_vision.model_helpers.brain_transformation import ModelCommitment +from .model import get_model, get_layers + +model_registry['inception_v4_pytorch'] = lambda: ModelCommitment(identifier='inception_v4_pytorch', + activations_model=get_model('inception_v4_pytorch'), + layers=get_layers('inception_v4_pytorch')) \ No newline at end of file diff --git a/brainscore_vision/models/inception_v4_pytorch/model.py b/brainscore_vision/models/inception_v4_pytorch/model.py new file mode 100644 index 000000000..a73e41abc --- /dev/null +++ b/brainscore_vision/models/inception_v4_pytorch/model.py @@ -0,0 +1,64 @@ +from brainscore_vision.model_helpers.activations.pytorch import PytorchWrapper +from brainscore_vision.model_helpers.activations.pytorch import load_preprocess_images +import ssl +import functools +import timm +from brainscore_vision.model_helpers.check_submission import check_models + +ssl._create_default_https_context = ssl._create_unverified_context + +''' +This is a Pytorch implementation of inception_v4. + +Previously on Brain-Score, this model existed as a Tensorflow model, and was converted via: + https://huggingface.co/docs/timm/en/models/inception-v4 + +Disclaimer: This (pytorch) implementation's Brain-Score scores might not align identically with Tensorflow +implementation. + +''' + + +MODEL = timm.create_model('inception_v4', pretrained=True) + +def get_model(name): + assert name == 'inception_v4_pytorch' + preprocessing = functools.partial(load_preprocess_images, image_size=299) + wrapper = PytorchWrapper(identifier='inception_v4_pytorch', model=MODEL, + preprocessing=preprocessing, + batch_size=4) # doesn't fit into 12 GB GPU memory otherwise + wrapper.image_size = 299 + return wrapper + + +def get_layers(name): + assert name == 'inception_v4_pytorch' + layers = [] + layers += ['Conv2d_1a_3x3'] + layers += ['Mixed_3a'] + layers += ['Mixed_4a'] + layers += [f'Mixed_5{i}' for i in ['a', 'b', 'c', 'd', 'e']] + layers += [f'Mixed_6{i}' for i in ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h']] + layers += [f'Mixed_7{i}' for i in ['a', 'b', 'c', 'd']] + layers += ['global_pool'] + + return layers + + +def get_bibtex(model_identifier): + """ + A method returning the bibtex reference of the requested model as a string. + """ + return """@misc{szegedy2016inceptionv4, + title={Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning}, + author={Christian Szegedy and Sergey Ioffe and Vincent Vanhoucke and Alex Alemi}, + year={2016}, + eprint={1602.07261}, + archivePrefix={arXiv}, + primaryClass={cs.CV} + } + """ + + +if __name__ == '__main__': + check_models.check_base_models(__name__) diff --git a/brainscore_vision/models/inception_v4_pytorch/requirements.txt b/brainscore_vision/models/inception_v4_pytorch/requirements.txt new file mode 100644 index 000000000..860845916 --- /dev/null +++ b/brainscore_vision/models/inception_v4_pytorch/requirements.txt @@ -0,0 +1,3 @@ +torch +torchvision +timm diff --git a/brainscore_vision/models/inception_v4_pytorch/test.py b/brainscore_vision/models/inception_v4_pytorch/test.py new file mode 100644 index 000000000..8e1c39049 --- /dev/null +++ b/brainscore_vision/models/inception_v4_pytorch/test.py @@ -0,0 +1,8 @@ +import pytest +import brainscore_vision + + +@pytest.mark.travis_slow +def test_has_identifier(): + model = brainscore_vision.load_model('inception_v4_pytorch') + assert model.identifier == 'inception_v4_pytorch' \ No newline at end of file diff --git a/brainscore_vision/models/r50_tvpt/setup.py b/brainscore_vision/models/r50_tvpt/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/r50_tvpt/setup.py +++ b/brainscore_vision/models/r50_tvpt/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/resnet50_eMMCR_Vanilla/setup.py b/brainscore_vision/models/resnet50_eMMCR_Vanilla/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/resnet50_eMMCR_Vanilla/setup.py +++ b/brainscore_vision/models/resnet50_eMMCR_Vanilla/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/resnet50_eMMCR_VanillaV2/setup.py b/brainscore_vision/models/resnet50_eMMCR_VanillaV2/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/resnet50_eMMCR_VanillaV2/setup.py +++ b/brainscore_vision/models/resnet50_eMMCR_VanillaV2/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/resnet50_eMMCR_eqp10_lm1/setup.py b/brainscore_vision/models/resnet50_eMMCR_eqp10_lm1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/resnet50_eMMCR_eqp10_lm1/setup.py +++ b/brainscore_vision/models/resnet50_eMMCR_eqp10_lm1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/resnet50_julios/setup.py b/brainscore_vision/models/resnet50_julios/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/resnet50_julios/setup.py +++ b/brainscore_vision/models/resnet50_julios/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/scaling_models/__init__.py b/brainscore_vision/models/scaling_models/__init__.py new file mode 100644 index 000000000..e020a07e5 --- /dev/null +++ b/brainscore_vision/models/scaling_models/__init__.py @@ -0,0 +1,265 @@ +from brainscore_vision import model_registry +from brainscore_vision.model_helpers.brain_transformation import ModelCommitment +from .model import get_model, MODEL_CONFIGS + +model_registry["resnet18_imagenet_full"] = lambda: ModelCommitment( + identifier="resnet18_imagenet_full", + activations_model=get_model("resnet18_imagenet_full"), + layers=MODEL_CONFIGS["resnet18_imagenet_full"]["model_commitment"]["layers"], + behavioral_readout_layer=MODEL_CONFIGS["resnet18_imagenet_full"]["model_commitment"]["behavioral_readout_layer"], + region_layer_map=MODEL_CONFIGS["resnet18_imagenet_full"]["model_commitment"]["region_layer_map"] +) + + +model_registry["resnet34_imagenet_full"] = lambda: ModelCommitment( + identifier="resnet34_imagenet_full", + activations_model=get_model("resnet34_imagenet_full"), + layers=MODEL_CONFIGS["resnet34_imagenet_full"]["model_commitment"]["layers"], + behavioral_readout_layer=MODEL_CONFIGS["resnet34_imagenet_full"]["model_commitment"]["behavioral_readout_layer"], + region_layer_map=MODEL_CONFIGS["resnet34_imagenet_full"]["model_commitment"]["region_layer_map"] +) + + +model_registry["resnet50_imagenet_full"] = lambda: ModelCommitment( + identifier="resnet50_imagenet_full", + activations_model=get_model("resnet50_imagenet_full"), + layers=MODEL_CONFIGS["resnet50_imagenet_full"]["model_commitment"]["layers"], + behavioral_readout_layer=MODEL_CONFIGS["resnet50_imagenet_full"]["model_commitment"]["behavioral_readout_layer"], + region_layer_map=MODEL_CONFIGS["resnet50_imagenet_full"]["model_commitment"]["region_layer_map"] +) + + +model_registry["resnet101_imagenet_full"] = lambda: ModelCommitment( + identifier="resnet101_imagenet_full", + activations_model=get_model("resnet101_imagenet_full"), + layers=MODEL_CONFIGS["resnet101_imagenet_full"]["model_commitment"]["layers"], + behavioral_readout_layer=MODEL_CONFIGS["resnet101_imagenet_full"]["model_commitment"]["behavioral_readout_layer"], + region_layer_map=MODEL_CONFIGS["resnet101_imagenet_full"]["model_commitment"]["region_layer_map"] +) + + +model_registry["resnet152_imagenet_full"] = lambda: ModelCommitment( + identifier="resnet152_imagenet_full", + activations_model=get_model("resnet152_imagenet_full"), + layers=MODEL_CONFIGS["resnet152_imagenet_full"]["model_commitment"]["layers"], + behavioral_readout_layer=MODEL_CONFIGS["resnet152_imagenet_full"]["model_commitment"]["behavioral_readout_layer"], + region_layer_map=MODEL_CONFIGS["resnet152_imagenet_full"]["model_commitment"]["region_layer_map"] +) + + +model_registry["resnet18_ecoset_full"] = lambda: ModelCommitment( + identifier="resnet18_ecoset_full", + activations_model=get_model("resnet18_ecoset_full"), + layers=MODEL_CONFIGS["resnet18_ecoset_full"]["model_commitment"]["layers"], + behavioral_readout_layer=MODEL_CONFIGS["resnet18_ecoset_full"]["model_commitment"]["behavioral_readout_layer"], + region_layer_map=MODEL_CONFIGS["resnet18_ecoset_full"]["model_commitment"]["region_layer_map"] +) + + +model_registry["resnet34_ecoset_full"] = lambda: ModelCommitment( + identifier="resnet34_ecoset_full", + activations_model=get_model("resnet34_ecoset_full"), + layers=MODEL_CONFIGS["resnet34_ecoset_full"]["model_commitment"]["layers"], + behavioral_readout_layer=MODEL_CONFIGS["resnet34_ecoset_full"]["model_commitment"]["behavioral_readout_layer"], + region_layer_map=MODEL_CONFIGS["resnet34_ecoset_full"]["model_commitment"]["region_layer_map"] +) + + +model_registry["resnet50_ecoset_full"] = lambda: ModelCommitment( + identifier="resnet50_ecoset_full", + activations_model=get_model("resnet50_ecoset_full"), + layers=MODEL_CONFIGS["resnet50_ecoset_full"]["model_commitment"]["layers"], + behavioral_readout_layer=MODEL_CONFIGS["resnet50_ecoset_full"]["model_commitment"]["behavioral_readout_layer"], + region_layer_map=MODEL_CONFIGS["resnet50_ecoset_full"]["model_commitment"]["region_layer_map"] +) + + +model_registry["resnet101_ecoset_full"] = lambda: ModelCommitment( + identifier="resnet101_ecoset_full", + activations_model=get_model("resnet101_ecoset_full"), + layers=MODEL_CONFIGS["resnet101_ecoset_full"]["model_commitment"]["layers"], + behavioral_readout_layer=MODEL_CONFIGS["resnet101_ecoset_full"]["model_commitment"]["behavioral_readout_layer"], + region_layer_map=MODEL_CONFIGS["resnet101_ecoset_full"]["model_commitment"]["region_layer_map"] +) + + +model_registry["resnet152_ecoset_full"] = lambda: ModelCommitment( + identifier="resnet152_ecoset_full", + activations_model=get_model("resnet152_ecoset_full"), + layers=MODEL_CONFIGS["resnet152_ecoset_full"]["model_commitment"]["layers"], + behavioral_readout_layer=MODEL_CONFIGS["resnet152_ecoset_full"]["model_commitment"]["behavioral_readout_layer"], + region_layer_map=MODEL_CONFIGS["resnet152_ecoset_full"]["model_commitment"]["region_layer_map"] +) + + +model_registry["resnet50_imagenet_1_seed-0"] = lambda: ModelCommitment( + identifier="resnet50_imagenet_1_seed-0", + activations_model=get_model("resnet50_imagenet_1_seed-0"), + layers=MODEL_CONFIGS["resnet50_imagenet_1_seed-0"]["model_commitment"]["layers"], + behavioral_readout_layer=MODEL_CONFIGS["resnet50_imagenet_1_seed-0"]["model_commitment"]["behavioral_readout_layer"], + region_layer_map=MODEL_CONFIGS["resnet50_imagenet_1_seed-0"]["model_commitment"]["region_layer_map"] +) + + +model_registry["resnet50_imagenet_10_seed-0"] = lambda: ModelCommitment( + identifier="resnet50_imagenet_10_seed-0", + activations_model=get_model("resnet50_imagenet_10_seed-0"), + layers=MODEL_CONFIGS["resnet50_imagenet_10_seed-0"]["model_commitment"]["layers"], + behavioral_readout_layer=MODEL_CONFIGS["resnet50_imagenet_10_seed-0"]["model_commitment"]["behavioral_readout_layer"], + region_layer_map=MODEL_CONFIGS["resnet50_imagenet_10_seed-0"]["model_commitment"]["region_layer_map"] +) + + +model_registry["resnet50_imagenet_100_seed-0"] = lambda: ModelCommitment( + identifier="resnet50_imagenet_100_seed-0", + activations_model=get_model("resnet50_imagenet_100_seed-0"), + layers=MODEL_CONFIGS["resnet50_imagenet_100_seed-0"]["model_commitment"]["layers"], + behavioral_readout_layer=MODEL_CONFIGS["resnet50_imagenet_100_seed-0"]["model_commitment"]["behavioral_readout_layer"], + region_layer_map=MODEL_CONFIGS["resnet50_imagenet_100_seed-0"]["model_commitment"]["region_layer_map"] +) + + +model_registry["efficientnet_b0_imagenet_full"] = lambda: ModelCommitment( + identifier="efficientnet_b0_imagenet_full", + activations_model=get_model("efficientnet_b0_imagenet_full"), + layers=MODEL_CONFIGS["efficientnet_b0_imagenet_full"]["model_commitment"]["layers"], + behavioral_readout_layer=MODEL_CONFIGS["efficientnet_b0_imagenet_full"]["model_commitment"]["behavioral_readout_layer"], + region_layer_map=MODEL_CONFIGS["efficientnet_b0_imagenet_full"]["model_commitment"]["region_layer_map"] +) + + +model_registry["efficientnet_b1_imagenet_full"] = lambda: ModelCommitment( + identifier="efficientnet_b1_imagenet_full", + activations_model=get_model("efficientnet_b1_imagenet_full"), + layers=MODEL_CONFIGS["efficientnet_b1_imagenet_full"]["model_commitment"]["layers"], + behavioral_readout_layer=MODEL_CONFIGS["efficientnet_b1_imagenet_full"]["model_commitment"]["behavioral_readout_layer"], + region_layer_map=MODEL_CONFIGS["efficientnet_b1_imagenet_full"]["model_commitment"]["region_layer_map"] +) + + +model_registry["efficientnet_b2_imagenet_full"] = lambda: ModelCommitment( + identifier="efficientnet_b2_imagenet_full", + activations_model=get_model("efficientnet_b2_imagenet_full"), + layers=MODEL_CONFIGS["efficientnet_b2_imagenet_full"]["model_commitment"]["layers"], + behavioral_readout_layer=MODEL_CONFIGS["efficientnet_b2_imagenet_full"]["model_commitment"]["behavioral_readout_layer"], + region_layer_map=MODEL_CONFIGS["efficientnet_b2_imagenet_full"]["model_commitment"]["region_layer_map"] +) + + +model_registry["deit_small_imagenet_full_seed-0"] = lambda: ModelCommitment( + identifier="deit_small_imagenet_full_seed-0", + activations_model=get_model("deit_small_imagenet_full_seed-0"), + layers=MODEL_CONFIGS["deit_small_imagenet_full_seed-0"]["model_commitment"]["layers"], + behavioral_readout_layer=MODEL_CONFIGS["deit_small_imagenet_full_seed-0"]["model_commitment"]["behavioral_readout_layer"], + region_layer_map=MODEL_CONFIGS["deit_small_imagenet_full_seed-0"]["model_commitment"]["region_layer_map"] +) + + +model_registry["deit_base_imagenet_full_seed-0"] = lambda: ModelCommitment( + identifier="deit_base_imagenet_full_seed-0", + activations_model=get_model("deit_base_imagenet_full_seed-0"), + layers=MODEL_CONFIGS["deit_base_imagenet_full_seed-0"]["model_commitment"]["layers"], + behavioral_readout_layer=MODEL_CONFIGS["deit_base_imagenet_full_seed-0"]["model_commitment"]["behavioral_readout_layer"], + region_layer_map=MODEL_CONFIGS["deit_base_imagenet_full_seed-0"]["model_commitment"]["region_layer_map"] +) + + +model_registry["deit_large_imagenet_full_seed-0"] = lambda: ModelCommitment( + identifier="deit_large_imagenet_full_seed-0", + activations_model=get_model("deit_large_imagenet_full_seed-0"), + layers=MODEL_CONFIGS["deit_large_imagenet_full_seed-0"]["model_commitment"]["layers"], + behavioral_readout_layer=MODEL_CONFIGS["deit_large_imagenet_full_seed-0"]["model_commitment"]["behavioral_readout_layer"], + region_layer_map=MODEL_CONFIGS["deit_large_imagenet_full_seed-0"]["model_commitment"]["region_layer_map"] +) + + +model_registry["deit_small_imagenet_1_seed-0"] = lambda: ModelCommitment( + identifier="deit_small_imagenet_1_seed-0", + activations_model=get_model("deit_small_imagenet_1_seed-0"), + layers=MODEL_CONFIGS["deit_small_imagenet_1_seed-0"]["model_commitment"]["layers"], + behavioral_readout_layer=MODEL_CONFIGS["deit_small_imagenet_1_seed-0"]["model_commitment"]["behavioral_readout_layer"], + region_layer_map=MODEL_CONFIGS["deit_small_imagenet_1_seed-0"]["model_commitment"]["region_layer_map"] +) + + +model_registry["deit_small_imagenet_10_seed-0"] = lambda: ModelCommitment( + identifier="deit_small_imagenet_10_seed-0", + activations_model=get_model("deit_small_imagenet_10_seed-0"), + layers=MODEL_CONFIGS["deit_small_imagenet_10_seed-0"]["model_commitment"]["layers"], + behavioral_readout_layer=MODEL_CONFIGS["deit_small_imagenet_10_seed-0"]["model_commitment"]["behavioral_readout_layer"], + region_layer_map=MODEL_CONFIGS["deit_small_imagenet_10_seed-0"]["model_commitment"]["region_layer_map"] +) + + +model_registry["deit_small_imagenet_100_seed-0"] = lambda: ModelCommitment( + identifier="deit_small_imagenet_100_seed-0", + activations_model=get_model("deit_small_imagenet_100_seed-0"), + layers=MODEL_CONFIGS["deit_small_imagenet_100_seed-0"]["model_commitment"]["layers"], + behavioral_readout_layer=MODEL_CONFIGS["deit_small_imagenet_100_seed-0"]["model_commitment"]["behavioral_readout_layer"], + region_layer_map=MODEL_CONFIGS["deit_small_imagenet_100_seed-0"]["model_commitment"]["region_layer_map"] +) + + +model_registry["convnext_tiny_imagenet_full_seed-0"] = lambda: ModelCommitment( + identifier="convnext_tiny_imagenet_full_seed-0", + activations_model=get_model("convnext_tiny_imagenet_full_seed-0"), + layers=MODEL_CONFIGS["convnext_tiny_imagenet_full_seed-0"]["model_commitment"]["layers"], + behavioral_readout_layer=MODEL_CONFIGS["convnext_tiny_imagenet_full_seed-0"]["model_commitment"]["behavioral_readout_layer"], + region_layer_map=MODEL_CONFIGS["convnext_tiny_imagenet_full_seed-0"]["model_commitment"]["region_layer_map"] +) + + +model_registry["convnext_small_imagenet_full_seed-0"] = lambda: ModelCommitment( + identifier="convnext_small_imagenet_full_seed-0", + activations_model=get_model("convnext_small_imagenet_full_seed-0"), + layers=MODEL_CONFIGS["convnext_small_imagenet_full_seed-0"]["model_commitment"]["layers"], + behavioral_readout_layer=MODEL_CONFIGS["convnext_small_imagenet_full_seed-0"]["model_commitment"]["behavioral_readout_layer"], + region_layer_map=MODEL_CONFIGS["convnext_small_imagenet_full_seed-0"]["model_commitment"]["region_layer_map"] +) + + +model_registry["convnext_base_imagenet_full_seed-0"] = lambda: ModelCommitment( + identifier="convnext_base_imagenet_full_seed-0", + activations_model=get_model("convnext_base_imagenet_full_seed-0"), + layers=MODEL_CONFIGS["convnext_base_imagenet_full_seed-0"]["model_commitment"]["layers"], + behavioral_readout_layer=MODEL_CONFIGS["convnext_base_imagenet_full_seed-0"]["model_commitment"]["behavioral_readout_layer"], + region_layer_map=MODEL_CONFIGS["convnext_base_imagenet_full_seed-0"]["model_commitment"]["region_layer_map"] +) + + +model_registry["convnext_large_imagenet_full_seed-0"] = lambda: ModelCommitment( + identifier="convnext_large_imagenet_full_seed-0", + activations_model=get_model("convnext_large_imagenet_full_seed-0"), + layers=MODEL_CONFIGS["convnext_large_imagenet_full_seed-0"]["model_commitment"]["layers"], + behavioral_readout_layer=MODEL_CONFIGS["convnext_large_imagenet_full_seed-0"]["model_commitment"]["behavioral_readout_layer"], + region_layer_map=MODEL_CONFIGS["convnext_large_imagenet_full_seed-0"]["model_commitment"]["region_layer_map"] +) + + +model_registry["convnext_small_imagenet_1_seed-0"] = lambda: ModelCommitment( + identifier="convnext_small_imagenet_1_seed-0", + activations_model=get_model("convnext_small_imagenet_1_seed-0"), + layers=MODEL_CONFIGS["convnext_small_imagenet_1_seed-0"]["model_commitment"]["layers"], + behavioral_readout_layer=MODEL_CONFIGS["convnext_small_imagenet_1_seed-0"]["model_commitment"]["behavioral_readout_layer"], + region_layer_map=MODEL_CONFIGS["convnext_small_imagenet_1_seed-0"]["model_commitment"]["region_layer_map"] +) + + +model_registry["convnext_small_imagenet_10_seed-0"] = lambda: ModelCommitment( + identifier="convnext_small_imagenet_10_seed-0", + activations_model=get_model("convnext_small_imagenet_10_seed-0"), + layers=MODEL_CONFIGS["convnext_small_imagenet_10_seed-0"]["model_commitment"]["layers"], + behavioral_readout_layer=MODEL_CONFIGS["convnext_small_imagenet_10_seed-0"]["model_commitment"]["behavioral_readout_layer"], + region_layer_map=MODEL_CONFIGS["convnext_small_imagenet_10_seed-0"]["model_commitment"]["region_layer_map"] +) + + +model_registry["convnext_small_imagenet_100_seed-0"] = lambda: ModelCommitment( + identifier="convnext_small_imagenet_100_seed-0", + activations_model=get_model("convnext_small_imagenet_100_seed-0"), + layers=MODEL_CONFIGS["convnext_small_imagenet_100_seed-0"]["model_commitment"]["layers"], + behavioral_readout_layer=MODEL_CONFIGS["convnext_small_imagenet_100_seed-0"]["model_commitment"]["behavioral_readout_layer"], + region_layer_map=MODEL_CONFIGS["convnext_small_imagenet_100_seed-0"]["model_commitment"]["region_layer_map"] +) + + diff --git a/brainscore_vision/models/scaling_models/model.py b/brainscore_vision/models/scaling_models/model.py new file mode 100644 index 000000000..c7a470e6e --- /dev/null +++ b/brainscore_vision/models/scaling_models/model.py @@ -0,0 +1,148 @@ +import os +import functools +import json +from pathlib import Path +import ssl + +import torchvision.models +import torch + +from brainscore_vision.model_helpers.activations.pytorch import PytorchWrapper +from brainscore_vision.model_helpers.activations.pytorch import load_preprocess_images + +import timm +import numpy as np +import torchvision.transforms as T +from PIL import Image + +import albumentations as A +from albumentations.pytorch import ToTensorV2 + +# Disable SSL verification +ssl._create_default_https_context = ssl._create_unverified_context + +BIBTEX = """""" + + +with open(Path(__file__).parent / "model_configs.json", "r") as f: + MODEL_CONFIGS = json.load(f) + + +def load_image(image_filepath): + return Image.open(image_filepath).convert("RGB") + + +def get_interpolation_mode(interpolation: str) -> int: + """Returns the interpolation mode for albumentations""" + if "linear" or "bilinear" in interpolation: + return 1 + elif "cubic" or "bicubic" in interpolation: + return 2 + else: + raise NotImplementedError(f"Interpolation mode {interpolation} not implemented") + + +def custom_image_preprocess( + images, + resize_size: int, + crop_size: int, + interpolation: str, + transforms=None, +): + if transforms is None: + interpolation = get_interpolation_mode(interpolation) + transforms = A.Compose( + [ + A.Resize(resize_size, resize_size, p=1.0, interpolation=interpolation), + A.CenterCrop(crop_size, crop_size, p=1.0), + A.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), + ToTensorV2(), + ] + ) + if isinstance(transforms, T.Compose): + images = [transforms(image) for image in images] + images = [np.array(image) for image in images] + images = np.stack(images) + elif isinstance(transforms, A.Compose): + images = [transforms(image=np.array(image))["image"] for image in images] + images = np.stack(images) + else: + raise NotImplementedError( + f"Transform of type {type(transforms)} is not implemented" + ) + + return images + + +def load_preprocess_images_custom( + image_filepaths, preprocess_images=custom_image_preprocess, **kwargs +): + images = [load_image(image_filepath) for image_filepath in image_filepaths] + images = preprocess_images(images, **kwargs) + return images + + +def get_model(model_id:str): + + # Unpack model config + config = MODEL_CONFIGS[model_id] + model_name = config["model_name"] + model_id = config["model_id"] + resize_size = config["resize_size"] + crop_size = config["crop_size"] + interpolation = config["interpolation"] + num_classes = config["num_classes"] + ckpt_url = config["checkpoint_url"] + use_timm = config["use_timm"] + timm_model_name = config["timm_model_name"] + epoch = config["epoch"] + load_model_ema = config["load_model_ema"] + output_head = config["output_head"] + is_vit = config["is_vit"] + + # Temporary fix for vit models + # See https://github.com/brain-score/vision/pull/1232 + if is_vit: + os.environ['RESULTCACHING_DISABLE'] = 'brainscore_vision.model_helpers.activations.core.ActivationsExtractorHelper._from_paths_stored' + + + # Initialize model + if use_timm: + model = timm.create_model(timm_model_name, pretrained=False, num_classes=num_classes) + else: + model = eval(f"torchvision.models.{model_name}(weights=None)") + if num_classes != 1000: + exec(f'''{output_head} = torch.nn.Linear( + in_features={output_head}.in_features, + out_features=num_classes, + bias={output_head}.bias is not None, + )''' + ) + + # Load model weights + state_dict = torch.hub.load_state_dict_from_url( + ckpt_url, + check_hash=True, + file_name=f"{model_id}_ep{epoch}.pt", + map_location="cpu", + ) + if load_model_ema: + state_dict = state_dict["state"]["model_ema_state_dict"] + else: + state_dict = state_dict["state"]["model"] + state_dict = {k.replace("module.", ""): v for k, v in state_dict.items()} + model.load_state_dict(state_dict, strict=True) + print(f"Model loaded from {ckpt_url}") + + # Wrap model + preprocessing = functools.partial( + load_preprocess_images_custom, + resize_size=resize_size, + crop_size=crop_size, + interpolation=interpolation, + transforms=None + ) + wrapper = PytorchWrapper( + identifier=model_id, model=model, preprocessing=preprocessing + ) + return wrapper diff --git a/brainscore_vision/models/scaling_models/model_configs.json b/brainscore_vision/models/scaling_models/model_configs.json new file mode 100644 index 000000000..cc52e5370 --- /dev/null +++ b/brainscore_vision/models/scaling_models/model_configs.json @@ -0,0 +1,869 @@ +{ + "resnet18_imagenet_full": { + "model_name": "resnet18", + "model_id": "resnet18_imagenet_full", + "num_classes": 1000, + "resize_size": 256, + "crop_size": 224, + "interpolation": "bilinear", + "load_model_ema": false, + "use_timm": false, + "timm_model_name": null, + "is_vit": false, + "epoch": 100, + "output_head": "model.fc", + "model_commitment": { + "layers": [ + "layer1.0.bn1", + "layer2.0.bn2", + "layer3.0.conv2", + "layer4.0.bn1" + ], + "behavioral_readout_layer": "avgpool", + "region_layer_map": { + "V1": "layer1.0.bn1", + "V2": "layer3.0.conv2", + "V4": "layer2.0.bn2", + "IT": "layer4.0.bn1" + } + }, + "checkpoint_url": "https://epfl-neuroailab-scalinglaws.s3.eu-north-1.amazonaws.com/checkpoints/resnet18_imagenet_full/ep100.pt" + }, + "resnet34_imagenet_full": { + "model_name": "resnet34", + "model_id": "resnet34_imagenet_full", + "num_classes": 1000, + "resize_size": 256, + "crop_size": 224, + "interpolation": "bilinear", + "load_model_ema": false, + "use_timm": false, + "timm_model_name": null, + "is_vit": false, + "epoch": 100, + "output_head": "model.fc", + "model_commitment": { + "layers": [ + "layer1.0.bn1", + "layer2.0.bn2", + "layer3.1.conv1", + "layer4.0.bn1" + ], + "behavioral_readout_layer": "avgpool", + "region_layer_map": { + "V1": "layer1.0.bn1", + "V2": "layer3.1.conv1", + "V4": "layer2.0.bn2", + "IT": "layer4.0.bn1" + } + }, + "checkpoint_url": "https://epfl-neuroailab-scalinglaws.s3.eu-north-1.amazonaws.com/checkpoints/resnet34_imagenet_full/ep100.pt" + }, + "resnet50_imagenet_full": { + "model_name": "resnet50", + "model_id": "resnet50_imagenet_full", + "num_classes": 1000, + "resize_size": 256, + "crop_size": 224, + "interpolation": "bilinear", + "load_model_ema": false, + "use_timm": false, + "timm_model_name": null, + "is_vit": false, + "epoch": 100, + "output_head": "model.fc", + "model_commitment": { + "layers": [ + "layer1.0.conv1", + "layer3.0.conv1", + "layer3.5.bn3", + "layer4.0.relu" + ], + "behavioral_readout_layer": "avgpool", + "region_layer_map": { + "V1": "layer1.0.conv1", + "V2": "layer3.5.bn3", + "V4": "layer3.0.conv1", + "IT": "layer4.0.relu" + } + }, + "checkpoint_url": "https://epfl-neuroailab-scalinglaws.s3.eu-north-1.amazonaws.com/checkpoints/resnet50_imagenet_full/ep100.pt" + }, + "resnet101_imagenet_full": { + "model_name": "resnet101", + "model_id": "resnet101_imagenet_full", + "num_classes": 1000, + "resize_size": 256, + "crop_size": 224, + "interpolation": "bilinear", + "load_model_ema": false, + "use_timm": false, + "timm_model_name": null, + "is_vit": false, + "epoch": 100, + "output_head": "model.fc", + "model_commitment": { + "layers": [ + "layer1.0.bn1", + "layer3.0.bn3", + "layer4.0.bn1", + "layer4.0.relu" + ], + "behavioral_readout_layer": "avgpool", + "region_layer_map": { + "V1": "layer1.0.bn1", + "V2": "layer4.0.bn1", + "V4": "layer3.0.bn3", + "IT": "layer4.0.relu" + } + }, + "checkpoint_url": "https://epfl-neuroailab-scalinglaws.s3.eu-north-1.amazonaws.com/checkpoints/resnet101_imagenet_full/ep100.pt" + }, + "resnet152_imagenet_full": { + "model_name": "resnet152", + "model_id": "resnet152_imagenet_full", + "num_classes": 1000, + "resize_size": 256, + "crop_size": 224, + "interpolation": "bilinear", + "load_model_ema": false, + "use_timm": false, + "timm_model_name": null, + "is_vit": false, + "epoch": 100, + "output_head": "model.fc", + "model_commitment": { + "layers": [ + "layer1.0.bn1", + "layer3.0.bn3", + "layer3.3.bn3", + "layer3.34.bn3" + ], + "behavioral_readout_layer": "avgpool", + "region_layer_map": { + "V1": "layer1.0.bn1", + "V2": "layer3.3.bn3", + "V4": "layer3.0.bn3", + "IT": "layer3.34.bn3" + } + }, + "checkpoint_url": "https://epfl-neuroailab-scalinglaws.s3.eu-north-1.amazonaws.com/checkpoints/resnet152_imagenet_full/ep100.pt" + }, + "resnet18_ecoset_full": { + "model_name": "resnet18", + "model_id": "resnet18_ecoset_full", + "num_classes": 565, + "resize_size": 256, + "crop_size": 224, + "interpolation": "bilinear", + "load_model_ema": false, + "use_timm": false, + "timm_model_name": null, + "is_vit": false, + "epoch": 100, + "output_head": "model.fc", + "model_commitment": { + "layers": [ + "layer1.0.conv1", + "layer2.0.bn2", + "layer3.0.conv1", + "layer4.0.bn1" + ], + "behavioral_readout_layer": "avgpool", + "region_layer_map": { + "V1": "layer1.0.conv1", + "V2": "layer3.0.conv1", + "V4": "layer2.0.bn2", + "IT": "layer4.0.bn1" + } + }, + "checkpoint_url": "https://epfl-neuroailab-scalinglaws.s3.eu-north-1.amazonaws.com/checkpoints/resnet18_ecoset_full/ep100.pt" + }, + "resnet34_ecoset_full": { + "model_name": "resnet34", + "model_id": "resnet34_ecoset_full", + "num_classes": 565, + "resize_size": 256, + "crop_size": 224, + "interpolation": "bilinear", + "load_model_ema": false, + "use_timm": false, + "timm_model_name": null, + "is_vit": false, + "epoch": 100, + "output_head": "model.fc", + "model_commitment": { + "layers": [ + "layer1.0.bn1", + "layer3.0.conv1", + "layer3.1.conv1", + "layer4.0.conv1" + ], + "behavioral_readout_layer": "avgpool", + "region_layer_map": { + "V1": "layer1.0.bn1", + "V2": "layer3.1.conv1", + "V4": "layer3.0.conv1", + "IT": "layer4.0.conv1" + } + }, + "checkpoint_url": "https://epfl-neuroailab-scalinglaws.s3.eu-north-1.amazonaws.com/checkpoints/resnet34_ecoset_full/ep100.pt" + }, + "resnet50_ecoset_full": { + "model_name": "resnet50", + "model_id": "resnet50_ecoset_full", + "num_classes": 565, + "resize_size": 256, + "crop_size": 224, + "interpolation": "bilinear", + "load_model_ema": false, + "use_timm": false, + "timm_model_name": null, + "is_vit": false, + "epoch": 100, + "output_head": "model.fc", + "model_commitment": { + "layers": [ + "layer1.0.bn1", + "layer3.0.conv1", + "layer4.0.conv2", + "layer4.0.relu" + ], + "behavioral_readout_layer": "avgpool", + "region_layer_map": { + "V1": "layer1.0.bn1", + "V2": "layer4.0.conv2", + "V4": "layer3.0.conv1", + "IT": "layer4.0.relu" + } + }, + "checkpoint_url": "https://epfl-neuroailab-scalinglaws.s3.eu-north-1.amazonaws.com/checkpoints/resnet50_ecoset_full/ep100.pt" + }, + "resnet101_ecoset_full": { + "model_name": "resnet101", + "model_id": "resnet101_ecoset_full", + "num_classes": 565, + "resize_size": 256, + "crop_size": 224, + "interpolation": "bilinear", + "load_model_ema": false, + "use_timm": false, + "timm_model_name": null, + "is_vit": false, + "epoch": 100, + "output_head": "model.fc", + "model_commitment": { + "layers": [ + "layer1.0.bn1", + "layer3.0.bn3", + "layer3.4.relu", + "layer4.0.relu" + ], + "behavioral_readout_layer": "avgpool", + "region_layer_map": { + "V1": "layer1.0.bn1", + "V2": "layer3.4.relu", + "V4": "layer3.0.bn3", + "IT": "layer4.0.relu" + } + }, + "checkpoint_url": "https://epfl-neuroailab-scalinglaws.s3.eu-north-1.amazonaws.com/checkpoints/resnet101_ecoset_full/ep100.pt" + }, + "resnet152_ecoset_full": { + "model_name": "resnet152", + "model_id": "resnet152_ecoset_full", + "num_classes": 565, + "resize_size": 256, + "crop_size": 224, + "interpolation": "bilinear", + "load_model_ema": false, + "use_timm": false, + "timm_model_name": null, + "is_vit": false, + "epoch": 100, + "output_head": "model.fc", + "model_commitment": { + "layers": [ + "layer1.0.bn1", + "layer3.0.bn3", + "layer3.3.bn3", + "layer4.0.relu" + ], + "behavioral_readout_layer": "avgpool", + "region_layer_map": { + "V1": "layer1.0.bn1", + "V2": "layer3.3.bn3", + "V4": "layer3.0.bn3", + "IT": "layer4.0.relu" + } + }, + "checkpoint_url": "https://epfl-neuroailab-scalinglaws.s3.eu-north-1.amazonaws.com/checkpoints/resnet152_ecoset_full/ep100.pt" + }, + "resnet50_imagenet_1_seed-0": { + "model_name": "resnet50", + "model_id": "resnet50_imagenet_1_seed-0", + "num_classes": 1000, + "resize_size": 256, + "crop_size": 224, + "interpolation": "bilinear", + "load_model_ema": false, + "use_timm": false, + "timm_model_name": null, + "is_vit": false, + "epoch": 100, + "output_head": "model.fc", + "model_commitment": { + "layers": [ + "layer1.0.conv1", + "layer3.0.conv1", + "layer3.5.bn3", + "layer4.0.relu" + ], + "behavioral_readout_layer": "avgpool", + "region_layer_map": { + "V1": "layer1.0.conv1", + "V2": "layer3.5.bn3", + "V4": "layer3.0.conv1", + "IT": "layer4.0.relu" + } + }, + "checkpoint_url": "https://epfl-neuroailab-scalinglaws.s3.eu-north-1.amazonaws.com/checkpoints/resnet50_imagenet_1_seed-0/ep100.pt" + }, + "resnet50_imagenet_10_seed-0": { + "model_name": "resnet50", + "model_id": "resnet50_imagenet_10_seed-0", + "num_classes": 1000, + "resize_size": 256, + "crop_size": 224, + "interpolation": "bilinear", + "load_model_ema": false, + "use_timm": false, + "timm_model_name": null, + "is_vit": false, + "epoch": 100, + "output_head": "model.fc", + "model_commitment": { + "layers": [ + "layer1.0.conv1", + "layer3.0.conv1", + "layer3.5.bn3", + "layer4.0.relu" + ], + "behavioral_readout_layer": "avgpool", + "region_layer_map": { + "V1": "layer1.0.conv1", + "V2": "layer3.5.bn3", + "V4": "layer3.0.conv1", + "IT": "layer4.0.relu" + } + }, + "checkpoint_url": "https://epfl-neuroailab-scalinglaws.s3.eu-north-1.amazonaws.com/checkpoints/resnet50_imagenet_10_seed-0/ep100.pt" + }, + "resnet50_imagenet_100_seed-0": { + "model_name": "resnet50", + "model_id": "resnet50_imagenet_100_seed-0", + "num_classes": 1000, + "resize_size": 256, + "crop_size": 224, + "interpolation": "bilinear", + "load_model_ema": false, + "use_timm": false, + "timm_model_name": null, + "is_vit": false, + "epoch": 100, + "output_head": "model.fc", + "model_commitment": { + "layers": [ + "layer1.0.conv1", + "layer3.0.conv1", + "layer3.5.bn3", + "layer4.0.relu" + ], + "behavioral_readout_layer": "avgpool", + "region_layer_map": { + "V1": "layer1.0.conv1", + "V2": "layer3.5.bn3", + "V4": "layer3.0.conv1", + "IT": "layer4.0.relu" + } + }, + "checkpoint_url": "https://epfl-neuroailab-scalinglaws.s3.eu-north-1.amazonaws.com/checkpoints/resnet50_imagenet_100_seed-0/ep100.pt" + }, + "efficientnet_b0_imagenet_full": { + "model_name": "efficientnet_b0", + "model_id": "efficientnet_b0_imagenet_full", + "num_classes": 1000, + "resize_size": 256, + "crop_size": 224, + "interpolation": "bilinear", + "load_model_ema": false, + "use_timm": false, + "timm_model_name": null, + "is_vit": false, + "epoch": 100, + "output_head": "model.classifier[1]", + "model_commitment": { + "layers": [ + "features.4.0.block.0.1", + "features.4.0.block.1.0", + "features.4.1.block.3.1", + "features.6.0.block.3.0" + ], + "behavioral_readout_layer": "avgpool", + "region_layer_map": { + "V1": "features.4.0.block.1.0", + "V2": "features.4.1.block.3.1", + "V4": "features.4.0.block.0.1", + "IT": "features.6.0.block.3.0" + } + }, + "checkpoint_url": "https://epfl-neuroailab-scalinglaws.s3.eu-north-1.amazonaws.com/checkpoints/efficientnet_b0_imagenet_full/ep100.pt" + }, + "efficientnet_b1_imagenet_full": { + "model_name": "efficientnet_b1", + "model_id": "efficientnet_b1_imagenet_full", + "num_classes": 1000, + "resize_size": 255, + "crop_size": 240, + "interpolation": "bilinear", + "load_model_ema": false, + "use_timm": false, + "timm_model_name": null, + "is_vit": false, + "epoch": 100, + "output_head": "model.classifier[1]", + "model_commitment": { + "layers": [ + "features.4.0.block.0.1", + "features.4.0.block.1.0", + "features.6.0.block.3.0" + ], + "behavioral_readout_layer": "avgpool", + "region_layer_map": { + "V1": "features.4.0.block.0.1", + "V2": "features.4.0.block.1.0", + "V4": "features.4.0.block.0.1", + "IT": "features.6.0.block.3.0" + } + }, + "checkpoint_url": "https://epfl-neuroailab-scalinglaws.s3.eu-north-1.amazonaws.com/checkpoints/efficientnet_b1_imagenet_full/ep100.pt" + }, + "efficientnet_b2_imagenet_full": { + "model_name": "efficientnet_b2", + "model_id": "efficientnet_b2_imagenet_full", + "num_classes": 1000, + "resize_size": 288, + "crop_size": 288, + "interpolation": "bilinear", + "load_model_ema": false, + "use_timm": false, + "timm_model_name": null, + "is_vit": false, + "epoch": 100, + "output_head": "model.classifier[1]", + "model_commitment": { + "layers": [ + "features.4.0.block.3.0", + "features.5.0.block.1.0", + "features.6.0.block.3.0" + ], + "behavioral_readout_layer": "avgpool", + "region_layer_map": { + "V1": "features.4.0.block.3.0", + "V2": "features.5.0.block.1.0", + "V4": "features.4.0.block.3.0", + "IT": "features.6.0.block.3.0" + } + }, + "checkpoint_url": "https://epfl-neuroailab-scalinglaws.s3.eu-north-1.amazonaws.com/checkpoints/efficientnet_b2_imagenet_full/ep100.pt" + }, + "deit_small_imagenet_full_seed-0": { + "model_name": "deit_small", + "model_id": "deit_small_imagenet_full_seed-0", + "num_classes": 1000, + "resize_size": 256, + "crop_size": 224, + "interpolation": "bicubic", + "load_model_ema": true, + "use_timm": true, + "timm_model_name": "deit3_small_patch16_224", + "is_vit": true, + "epoch": 300, + "output_head": null, + "model_commitment": { + "layers": [ + "blocks.2.norm1", + "blocks.5.norm1", + "blocks.6.norm2", + "blocks.9.norm2" + ], + "behavioral_readout_layer": "fc_norm", + "region_layer_map": { + "V1": "blocks.2.norm1", + "V2": "blocks.6.norm2", + "V4": "blocks.5.norm1", + "IT": "blocks.9.norm2" + } + }, + "checkpoint_url": "https://epfl-neuroailab-scalinglaws.s3.eu-north-1.amazonaws.com/checkpoints/deit_small_imagenet_full_seed-0/ep300.pt" + }, + "deit_base_imagenet_full_seed-0": { + "model_name": "deit_base", + "model_id": "deit_base_imagenet_full_seed-0", + "num_classes": 1000, + "resize_size": 256, + "crop_size": 224, + "interpolation": "bicubic", + "load_model_ema": true, + "use_timm": true, + "timm_model_name": "deit3_base_patch16_224", + "is_vit": true, + "epoch": 300, + "output_head": null, + "model_commitment": { + "layers": [ + "blocks.3.mlp.act", + "blocks.3.mlp.fc1", + "blocks.8.norm2", + "blocks.9.norm2" + ], + "behavioral_readout_layer": "fc_norm", + "region_layer_map": { + "V1": "blocks.3.mlp.fc1", + "V2": "blocks.8.norm2", + "V4": "blocks.3.mlp.act", + "IT": "blocks.9.norm2" + } + }, + "checkpoint_url": "https://epfl-neuroailab-scalinglaws.s3.eu-north-1.amazonaws.com/checkpoints/deit_base_imagenet_full_seed-0/ep300.pt" + }, + "deit_large_imagenet_full_seed-0": { + "model_name": "deit_large", + "model_id": "deit_large_imagenet_full_seed-0", + "num_classes": 1000, + "resize_size": 256, + "crop_size": 224, + "interpolation": "bicubic", + "load_model_ema": true, + "use_timm": true, + "timm_model_name": "deit3_large_patch16_224", + "is_vit": true, + "epoch": 300, + "output_head": null, + "model_commitment": { + "layers": [ + "blocks.18.norm2", + "blocks.20.norm2", + "blocks.4.norm1", + "blocks.9.norm1" + ], + "behavioral_readout_layer": "fc_norm", + "region_layer_map": { + "V1": "blocks.4.norm1", + "V2": "blocks.18.norm2", + "V4": "blocks.9.norm1", + "IT": "blocks.20.norm2" + } + }, + "checkpoint_url": "https://epfl-neuroailab-scalinglaws.s3.eu-north-1.amazonaws.com/checkpoints/deit_large_imagenet_full_seed-0/ep300.pt" + }, + "deit_small_imagenet_1_seed-0": { + "model_name": "deit_small", + "model_id": "deit_small_imagenet_1_seed-0", + "num_classes": 1000, + "resize_size": 256, + "crop_size": 224, + "interpolation": "bicubic", + "load_model_ema": true, + "use_timm": true, + "timm_model_name": "deit3_small_patch16_224", + "is_vit": true, + "epoch": 300, + "output_head": null, + "model_commitment": { + "layers": [ + "blocks.2.norm1", + "blocks.5.norm1", + "blocks.6.norm2", + "blocks.9.norm2" + ], + "behavioral_readout_layer": "fc_norm", + "region_layer_map": { + "V1": "blocks.2.norm1", + "V2": "blocks.6.norm2", + "V4": "blocks.5.norm1", + "IT": "blocks.9.norm2" + } + }, + "checkpoint_url": "https://epfl-neuroailab-scalinglaws.s3.eu-north-1.amazonaws.com/checkpoints/deit_small_imagenet_1_seed-0/ep300.pt" + }, + "deit_small_imagenet_10_seed-0": { + "model_name": "deit_small", + "model_id": "deit_small_imagenet_10_seed-0", + "num_classes": 1000, + "resize_size": 256, + "crop_size": 224, + "interpolation": "bicubic", + "load_model_ema": true, + "use_timm": true, + "timm_model_name": "deit3_small_patch16_224", + "is_vit": true, + "epoch": 300, + "output_head": null, + "model_commitment": { + "layers": [ + "blocks.2.norm1", + "blocks.5.norm1", + "blocks.6.norm2", + "blocks.9.norm2" + ], + "behavioral_readout_layer": "fc_norm", + "region_layer_map": { + "V1": "blocks.2.norm1", + "V2": "blocks.6.norm2", + "V4": "blocks.5.norm1", + "IT": "blocks.9.norm2" + } + }, + "checkpoint_url": "https://epfl-neuroailab-scalinglaws.s3.eu-north-1.amazonaws.com/checkpoints/deit_small_imagenet_10_seed-0/ep300.pt" + }, + "deit_small_imagenet_100_seed-0": { + "model_name": "deit_small", + "model_id": "deit_small_imagenet_100_seed-0", + "num_classes": 1000, + "resize_size": 256, + "crop_size": 224, + "interpolation": "bicubic", + "load_model_ema": true, + "use_timm": true, + "timm_model_name": "deit3_small_patch16_224", + "is_vit": true, + "epoch": 300, + "output_head": null, + "model_commitment": { + "layers": [ + "blocks.2.norm1", + "blocks.5.norm1", + "blocks.6.norm2", + "blocks.9.norm2" + ], + "behavioral_readout_layer": "fc_norm", + "region_layer_map": { + "V1": "blocks.2.norm1", + "V2": "blocks.6.norm2", + "V4": "blocks.5.norm1", + "IT": "blocks.9.norm2" + } + }, + "checkpoint_url": "https://epfl-neuroailab-scalinglaws.s3.eu-north-1.amazonaws.com/checkpoints/deit_small_imagenet_100_seed-0/ep300.pt" + }, + "convnext_tiny_imagenet_full_seed-0": { + "model_name": "convnext_tiny", + "model_id": "convnext_tiny_imagenet_full_seed-0", + "num_classes": 1000, + "resize_size": 256, + "crop_size": 224, + "interpolation": "bicubic", + "load_model_ema": true, + "use_timm": false, + "timm_model_name": null, + "is_vit": false, + "epoch": 300, + "output_head": "model.classifier[2]", + "model_commitment": { + "layers": [ + "features.4.0", + "features.5.4.block.0", + "features.6.0" + ], + "behavioral_readout_layer": "classifier.1", + "region_layer_map": { + "V1": "features.6.0", + "V2": "features.5.4.block.0", + "V4": "features.4.0", + "IT": "features.5.4.block.0" + } + }, + "checkpoint_url": "https://epfl-neuroailab-scalinglaws.s3.eu-north-1.amazonaws.com/checkpoints/convnext_tiny_imagenet_full_seed-0/ep300.pt" + }, + "convnext_small_imagenet_full_seed-0": { + "model_name": "convnext_small", + "model_id": "convnext_small_imagenet_full_seed-0", + "num_classes": 1000, + "resize_size": 256, + "crop_size": 224, + "interpolation": "bicubic", + "load_model_ema": true, + "use_timm": false, + "timm_model_name": null, + "is_vit": false, + "epoch": 300, + "output_head": "model.classifier[2]", + "model_commitment": { + "layers": [ + "features.4.0", + "features.5.17.block.0", + "features.5.2.block.0", + "features.5.9.block.0" + ], + "behavioral_readout_layer": "classifier.1", + "region_layer_map": { + "V1": "features.5.2.block.0", + "V2": "features.5.17.block.0", + "V4": "features.4.0", + "IT": "features.5.9.block.0" + } + }, + "checkpoint_url": "https://epfl-neuroailab-scalinglaws.s3.eu-north-1.amazonaws.com/checkpoints/convnext_small_imagenet_full_seed-0/ep300.pt" + }, + "convnext_base_imagenet_full_seed-0": { + "model_name": "convnext_base", + "model_id": "convnext_base_imagenet_full_seed-0", + "num_classes": 1000, + "resize_size": 256, + "crop_size": 224, + "interpolation": "bicubic", + "load_model_ema": true, + "use_timm": false, + "timm_model_name": null, + "is_vit": false, + "epoch": 300, + "output_head": "model.classifier[2]", + "model_commitment": { + "layers": [ + "features.4.0", + "features.5.11.block.0", + "features.5.12.block.0", + "features.5.7.block.0" + ], + "behavioral_readout_layer": "classifier.1", + "region_layer_map": { + "V1": "features.5.7.block.0", + "V2": "features.5.12.block.0", + "V4": "features.4.0", + "IT": "features.5.11.block.0" + } + }, + "checkpoint_url": "https://epfl-neuroailab-scalinglaws.s3.eu-north-1.amazonaws.com/checkpoints/convnext_base_imagenet_full_seed-0/ep300.pt" + }, + "convnext_large_imagenet_full_seed-0": { + "model_name": "convnext_large", + "model_id": "convnext_large_imagenet_full_seed-0", + "num_classes": 1000, + "resize_size": 256, + "crop_size": 224, + "interpolation": "bicubic", + "load_model_ema": true, + "use_timm": false, + "timm_model_name": null, + "is_vit": false, + "epoch": 300, + "output_head": "model.classifier[2]", + "model_commitment": { + "layers": [ + "features.4.1", + "features.5.11.block.0", + "features.5.7.block.0", + "features.5.7.block.5" + ], + "behavioral_readout_layer": "classifier.1", + "region_layer_map": { + "V1": "features.5.7.block.5", + "V2": "features.5.7.block.0", + "V4": "features.4.1", + "IT": "features.5.11.block.0" + } + }, + "checkpoint_url": "https://epfl-neuroailab-scalinglaws.s3.eu-north-1.amazonaws.com/checkpoints/convnext_large_imagenet_full_seed-0/ep300.pt" + }, + "convnext_small_imagenet_1_seed-0": { + "model_name": "convnext_small", + "model_id": "convnext_small_imagenet_1_seed-0", + "num_classes": 1000, + "resize_size": 256, + "crop_size": 224, + "interpolation": "bicubic", + "load_model_ema": true, + "use_timm": false, + "timm_model_name": null, + "is_vit": false, + "epoch": 300, + "output_head": "model.classifier[2]", + "model_commitment": { + "layers": [ + "features.4.0", + "features.5.17.block.0", + "features.5.2.block.0", + "features.5.9.block.0" + ], + "behavioral_readout_layer": "classifier.1", + "region_layer_map": { + "V1": "features.5.2.block.0", + "V2": "features.5.17.block.0", + "V4": "features.4.0", + "IT": "features.5.9.block.0" + } + }, + "checkpoint_url": "https://epfl-neuroailab-scalinglaws.s3.eu-north-1.amazonaws.com/checkpoints/convnext_small_imagenet_1_seed-0/ep300.pt" + }, + "convnext_small_imagenet_10_seed-0": { + "model_name": "convnext_small", + "model_id": "convnext_small_imagenet_10_seed-0", + "num_classes": 1000, + "resize_size": 256, + "crop_size": 224, + "interpolation": "bicubic", + "load_model_ema": true, + "use_timm": false, + "timm_model_name": null, + "is_vit": false, + "epoch": 300, + "output_head": "model.classifier[2]", + "model_commitment": { + "layers": [ + "features.4.0", + "features.5.17.block.0", + "features.5.2.block.0", + "features.5.9.block.0" + ], + "behavioral_readout_layer": "classifier.1", + "region_layer_map": { + "V1": "features.5.2.block.0", + "V2": "features.5.17.block.0", + "V4": "features.4.0", + "IT": "features.5.9.block.0" + } + }, + "checkpoint_url": "https://epfl-neuroailab-scalinglaws.s3.eu-north-1.amazonaws.com/checkpoints/convnext_small_imagenet_10_seed-0/ep300.pt" + }, + "convnext_small_imagenet_100_seed-0": { + "model_name": "convnext_small", + "model_id": "convnext_small_imagenet_100_seed-0", + "num_classes": 1000, + "resize_size": 256, + "crop_size": 224, + "interpolation": "bicubic", + "load_model_ema": true, + "use_timm": false, + "timm_model_name": null, + "is_vit": false, + "epoch": 300, + "output_head": "model.classifier[2]", + "model_commitment": { + "layers": [ + "features.4.0", + "features.5.17.block.0", + "features.5.2.block.0", + "features.5.9.block.0" + ], + "behavioral_readout_layer": "classifier.1", + "region_layer_map": { + "V1": "features.5.2.block.0", + "V2": "features.5.17.block.0", + "V4": "features.4.0", + "IT": "features.5.9.block.0" + } + }, + "checkpoint_url": "https://epfl-neuroailab-scalinglaws.s3.eu-north-1.amazonaws.com/checkpoints/convnext_small_imagenet_100_seed-0/ep300.pt" + } +} \ No newline at end of file diff --git a/brainscore_vision/models/scaling_models/requirements.txt b/brainscore_vision/models/scaling_models/requirements.txt new file mode 100644 index 000000000..af6389b09 --- /dev/null +++ b/brainscore_vision/models/scaling_models/requirements.txt @@ -0,0 +1,4 @@ +torch +torchvision +albumentations +timm diff --git a/brainscore_vision/models/scaling_models/test.py b/brainscore_vision/models/scaling_models/test.py new file mode 100644 index 000000000..e69de29bb diff --git a/brainscore_vision/models/temporal_model_AVID-CMA/__init__.py b/brainscore_vision/models/temporal_model_AVID_CMA/__init__.py similarity index 100% rename from brainscore_vision/models/temporal_model_AVID-CMA/__init__.py rename to brainscore_vision/models/temporal_model_AVID_CMA/__init__.py diff --git a/brainscore_vision/models/temporal_model_AVID-CMA/model.py b/brainscore_vision/models/temporal_model_AVID_CMA/model.py similarity index 94% rename from brainscore_vision/models/temporal_model_AVID-CMA/model.py rename to brainscore_vision/models/temporal_model_AVID_CMA/model.py index 60d91f690..a67eb3b43 100644 --- a/brainscore_vision/models/temporal_model_AVID-CMA/model.py +++ b/brainscore_vision/models/temporal_model_AVID_CMA/model.py @@ -29,7 +29,7 @@ def get_model(identifier): cfg_path = os.path.join(HOME, "configs/main/avid-cma/audioset/InstX-N1024-PosW-N64-Top32.yaml") weight_path = load_weight_file( bucket="brainscore-vision", - relative_path="temporal_model_AVID-CMA/AVID-CMA_Audioset_InstX-N1024-PosW-N64-Top32_checkpoint.pth.tar", + relative_path="temporal_model_AVID_CMA/AVID-CMA_Audioset_InstX-N1024-PosW-N64-Top32_checkpoint.pth.tar", version_id="jSaZgbUohM0ZeoEUUKZiLBo6iz_v8VvQ", sha1="9db5eba9aab6bdbb74025be57ab532df808fe3f6" ) @@ -38,7 +38,7 @@ def get_model(identifier): cfg_path = os.path.join(HOME, "configs/main/avid/kinetics/Cross-N1024.yaml") weight_path = load_weight_file( bucket="brainscore-vision", - relative_path="temporal_model_AVID-CMA/AVID_Kinetics_Cross-N1024_checkpoint.pth.tar", + relative_path="temporal_model_AVID_CMA/AVID_Kinetics_Cross-N1024_checkpoint.pth.tar", version_id="XyKt0UOUFsuuyrl6ZREivK8FadRPx34u", sha1="d3a04f856d29421ba8de37808593a3fad4d4794f" ) @@ -47,7 +47,7 @@ def get_model(identifier): cfg_path = os.path.join(HOME, "configs/main/avid/audioset/Cross-N1024.yaml") weight_path = load_weight_file( bucket="brainscore-vision", - relative_path="temporal_model_AVID-CMA/AVID_Audioset_Cross-N1024_checkpoint.pth.tar", + relative_path="temporal_model_AVID_CMA/AVID_Audioset_Cross-N1024_checkpoint.pth.tar", version_id="0Sxuhn8LsYXQC4FnPfJ7rw7uU6kDlKgc", sha1="b48d8428a1a2526ccca070f810333df18bfce5fd" ) diff --git a/brainscore_vision/models/temporal_model_AVID-CMA/requirements.txt b/brainscore_vision/models/temporal_model_AVID_CMA/requirements.txt similarity index 100% rename from brainscore_vision/models/temporal_model_AVID-CMA/requirements.txt rename to brainscore_vision/models/temporal_model_AVID_CMA/requirements.txt diff --git a/brainscore_vision/models/temporal_model_AVID-CMA/test.py b/brainscore_vision/models/temporal_model_AVID_CMA/test.py similarity index 100% rename from brainscore_vision/models/temporal_model_AVID-CMA/test.py rename to brainscore_vision/models/temporal_model_AVID_CMA/test.py diff --git a/brainscore_vision/models/temporal_model_GDT/model.py b/brainscore_vision/models/temporal_model_GDT/model.py index 624a5b29b..9a0c057c7 100644 --- a/brainscore_vision/models/temporal_model_GDT/model.py +++ b/brainscore_vision/models/temporal_model_GDT/model.py @@ -69,4 +69,4 @@ def get_model(identifier): # "base.fc": "C", # no fc } - return PytorchWrapper(identifier, model, transform_video, fps=30, layer_activation_format=layer_activation_format) \ No newline at end of file + return PytorchWrapper(identifier, model, transform_video, fps=30, layer_activation_format=layer_activation_format) diff --git a/brainscore_vision/models/temporal_model_VideoMAEv2/model.py b/brainscore_vision/models/temporal_model_VideoMAEv2/model.py index 7e785513e..355b8e8b2 100644 --- a/brainscore_vision/models/temporal_model_VideoMAEv2/model.py +++ b/brainscore_vision/models/temporal_model_VideoMAEv2/model.py @@ -54,7 +54,7 @@ def get_model(identifier): bucket="brainscore-vision", relative_path="temporal_model_VideoMAEv2/vit_g_hybrid_pt_1200e.pth", version_id="TxtkfbeMV105dzpzTwi0Kn5glnvQvIrq", - sha1="9048f2bc0b0c7ba4d0e5228f3a7c0bef4dbaca69", + sha1="9048f2bc0b0c7ba4d0e5228f3a7c0bef4dbaca69" ) num_blocks = 40 feature_map_size = 16 diff --git a/brainscore_vision/models/temporal_model_openstl/__init__.py b/brainscore_vision/models/temporal_model_openstl/__init__.py index 2b49cc845..9ea9b66b1 100644 --- a/brainscore_vision/models/temporal_model_openstl/__init__.py +++ b/brainscore_vision/models/temporal_model_openstl/__init__.py @@ -13,7 +13,6 @@ def commit_model(identifier): model_registry["ConvLSTM"] = lambda: commit_model("ConvLSTM") model_registry["PredRNN"] = lambda: commit_model("PredRNN") -# model_registry["PredNet"] = lambda: commit_model("PredNet") model_registry["SimVP"] = lambda: commit_model("SimVP") model_registry["TAU"] = lambda: commit_model("TAU") model_registry["MIM"] = lambda: commit_model("MIM") diff --git a/brainscore_vision/models/temporal_model_openstl/model.py b/brainscore_vision/models/temporal_model_openstl/model.py index aed3e0464..de5c93803 100644 --- a/brainscore_vision/models/temporal_model_openstl/model.py +++ b/brainscore_vision/models/temporal_model_openstl/model.py @@ -105,23 +105,6 @@ def process_output(layer, layer_name, inputs, output): kwargs = {} weight_name = "kitticaltech_predrnn_one_ep100.pth" - elif identifier == "PredNet": - layer_activation_format = { - **{f"layer{i}": "TCHW" for i in range(4)}, - "layer5": "TCHW" - } - - def process_output(layer, layer_name, inputs, output): - if layer_name.startswith("cell_list"): - h, c = output - return c - else: - return output - - wrapper_cls = LSTMWrapper - kwargs = {} - weight_name = "kitticaltech_prednet_one_ep100.pth" - elif identifier == "ConvLSTM": layer_activation_format = { **{f"cell_list.{i}": "TCHW" for i in range(4)}, @@ -220,4 +203,4 @@ def transform_video_simvp(video): return wrapper_cls(identifier, model, transform_video, fps=KITTI_FPS, layer_activation_format=layer_activation_format, - process_output=process_output, **kwargs) \ No newline at end of file + process_output=process_output, **kwargs) diff --git a/brainscore_vision/models/temporal_model_openstl/test.py b/brainscore_vision/models/temporal_model_openstl/test.py index 4d52b76ce..c4090a314 100644 --- a/brainscore_vision/models/temporal_model_openstl/test.py +++ b/brainscore_vision/models/temporal_model_openstl/test.py @@ -6,7 +6,6 @@ model_list = [ "ConvLSTM", "PredRNN", - "PredNet", "SimVP", "TAU", "MIM" @@ -17,4 +16,4 @@ @pytest.mark.parametrize("model_identifier", model_list) def test_load(model_identifier): model = load_model(model_identifier) - assert model is not None \ No newline at end of file + assert model is not None diff --git a/brainscore_vision/models/tv_efficientnet-b1/__init__.py b/brainscore_vision/models/tv_efficientnet_b1/__init__.py similarity index 100% rename from brainscore_vision/models/tv_efficientnet-b1/__init__.py rename to brainscore_vision/models/tv_efficientnet_b1/__init__.py diff --git a/brainscore_vision/models/tv_efficientnet-b1/model.py b/brainscore_vision/models/tv_efficientnet_b1/model.py similarity index 100% rename from brainscore_vision/models/tv_efficientnet-b1/model.py rename to brainscore_vision/models/tv_efficientnet_b1/model.py diff --git a/brainscore_vision/models/tv_efficientnet_b1/setup.py b/brainscore_vision/models/tv_efficientnet_b1/setup.py new file mode 100644 index 000000000..c286567f5 --- /dev/null +++ b/brainscore_vision/models/tv_efficientnet_b1/setup.py @@ -0,0 +1,24 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +from setuptools import setup, find_packages + +requirements = [ "torchvision", + "torch" +] + +setup( + packages=find_packages(exclude=['tests']), + include_package_data=True, + install_requires=requirements, + license="MIT license", + zip_safe=False, + keywords='brain-score template', + classifiers=[ + 'Development Status :: 2 - Pre-Alpha', + 'Intended Audience :: Developers', + 'License :: OSI Approved :: MIT License', + 'Natural Language :: English', + ], + test_suite='tests', +) diff --git a/brainscore_vision/models/tv_efficientnet_b1/test.py b/brainscore_vision/models/tv_efficientnet_b1/test.py new file mode 100644 index 000000000..e594ba9e1 --- /dev/null +++ b/brainscore_vision/models/tv_efficientnet_b1/test.py @@ -0,0 +1 @@ +# Left empty as part of 2023 models migration diff --git a/brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240222/setup.py b/brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240222/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240222/setup.py +++ b/brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240222/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240312/setup.py b/brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240312/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240312/setup.py +++ b/brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240312/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/environment_lock.yml b/environment_lock.yml new file mode 100644 index 000000000..9847267d3 --- /dev/null +++ b/environment_lock.yml @@ -0,0 +1,182 @@ +# This environment_lock file is associated with the move to brainscore_vision 2.1.0. This lock includes all testing dependencies and dependencies +# from adjacent repositories. + +name: brainscore_env +channels: + - defaults + - conda-forge +dependencies: + - bzip2=1.0.8 + - ca-certificates=2024.7.2 + - libffi=3.4.4 + - ncurses=6.4 + - openssl=3.0.14 + - pip=24.2 + - python=3.11.9 + - readline=8.2 + - setuptools=72.1.0 + - sqlite=3.45.3 + - tk=8.6.14 + - wheel=0.43.0 + - xz=5.4.6 + - zlib=1.2.13 + - pip: + - anyio==4.4.0 + - appnope==0.1.4 + - argon2-cffi==23.1.0 + - argon2-cffi-bindings==21.2.0 + - arrow==1.3.0 + - asttokens==2.4.1 + - async-lru==2.0.4 + - attrs==24.2.0 + - babel==2.16.0 + - beautifulsoup4==4.12.3 + - bleach==6.1.0 + - boto3==1.35.3 + - botocore==1.35.3 + - brainio @ git+https://github.com/brain-score/brainio.git@main + - brainscore_core @ git+https://github.com/brain-score/core@main + - brainscore-vision @ git+https://github.com/brain-score/vision.git@main + - certifi==2024.7.4 + - cffi==1.17.0 + - cftime==1.6.4 + - charset-normalizer==3.3.2 + - click==8.1.7 + - cloudpickle==3.0.0 + - comm==0.2.2 + - contourpy==1.2.1 + - cycler==0.12.1 + - dask==2024.8.1 + - debugpy==1.8.5 + - decorator==5.1.1 + - defusedxml==0.7.1 + - entrypoints==0.4 + - eva-decord==0.6.1 + - executing==2.0.1 + - fastjsonschema==2.20.0 + - filelock==3.15.4 + - fire==0.6.0 + - fonttools==4.53.1 + - fqdn==1.5.1 + - fsspec==2024.6.1 + - gitdb==4.0.11 + - gitpython==3.1.43 + - h11==0.14.0 + - h5py==3.11.0 + - httpcore==1.0.5 + - httpx==0.27.0 + - idna==3.7 + - importlib-metadata==4.13.0 + - iniconfig==2.0.0 + - ipykernel==6.29.5 + - ipython==8.26.0 + - ipywidgets==8.1.5 + - isoduration==20.11.0 + - jedi==0.19.1 + - jinja2==3.1.4 + - jmespath==1.0.1 + - joblib==1.4.2 + - json5==0.9.25 + - jsonpointer==3.0.0 + - jsonschema==4.23.0 + - jsonschema-specifications==2023.12.1 + - jupyter==1.0.0 + - jupyter-client==8.6.2 + - jupyter-console==6.6.3 + - jupyter-core==5.7.2 + - jupyter-events==0.10.0 + - jupyter-lsp==2.2.5 + - jupyter-server==2.14.2 + - jupyter-server-terminals==0.5.3 + - jupyterlab==4.2.4 + - jupyterlab-pygments==0.3.0 + - jupyterlab-server==2.27.3 + - jupyterlab-widgets==3.0.13 + - kiwisolver==1.4.5 + - latexcodec==3.0.0 + - locket==1.0.0 + - markupsafe==2.1.5 + - matplotlib==3.9.2 + - matplotlib-inline==0.1.7 + - mistune==3.0.2 + - mpmath==1.3.0 + - nbclient==0.10.0 + - nbconvert==7.16.4 + - nbformat==5.10.4 + - nest-asyncio==1.6.0 + - netcdf4==1.7.1.post1 + - networkx==3.3 + - notebook==7.2.1 + - notebook-shim==0.2.4 + - numpy==1.26.4 + - opencv-python==4.10.0.84 + - overrides==7.7.0 + - packaging==24.1 + - pandas==2.2.2 + - pandocfilters==1.5.1 + - parso==0.8.4 + - partd==1.4.2 + - peewee==3.17.6 + - pexpect==4.9.0 + - pillow==10.4.0 + - platformdirs==4.2.2 + - pluggy==1.5.0 + - prometheus-client==0.20.0 + - prompt-toolkit==3.0.47 + - psutil==6.0.0 + - psycopg2-binary==2.9.9 + - ptyprocess==0.7.0 + - pure-eval==0.2.3 + - pybtex==0.24.0 + - pycparser==2.22 + - pygments==2.18.0 + - pyparsing==3.1.2 + - pytest==8.3.2 + - pytest-check==2.3.1 + - pytest-mock==3.14.0 + - pytest-timeout==2.3.1 + - python-dateutil==2.9.0.post0 + - python-json-logger==2.0.7 + - pytz==2024.1 + - pyyaml==6.0.2 + - pyzmq==26.2.0 + - qtconsole==5.5.2 + - qtpy==2.4.1 + - referencing==0.35.1 + - requests==2.32.3 + - result_caching @ git+https://github.com/brain-score/result_caching@master + - rfc3339-validator==0.1.4 + - rfc3986-validator==0.1.1 + - rpds-py==0.20.0 + - s3transfer==0.10.2 + - scikit-learn==1.5.1 + - scipy==1.14.1 + - send2trash==1.8.3 + - six==1.16.0 + - smmap==5.0.1 + - sniffio==1.3.1 + - soupsieve==2.6 + - stack-data==0.6.3 + - sympy==1.13.2 + - termcolor==2.4.0 + - terminado==0.18.1 + - threadpoolctl==3.5.0 + - tinycss2==1.3.0 + - toolz==0.12.1 + - torch==2.4.0 + - torchvision==0.19.0 + - tornado==6.4.1 + - tqdm==4.66.5 + - traitlets==5.14.3 + - types-python-dateutil==2.9.0.20240821 + - typing-extensions==4.12.2 + - tzdata==2024.1 + - uri-template==1.3.0 + - urllib3==2.2.2 + - wcwidth==0.2.13 + - webcolors==24.8.0 + - webencodings==0.5.1 + - websocket-client==1.8.0 + - widgetsnbextension==4.0.13 + - xarray==2022.3.0 + - zipp==3.20.0 diff --git a/pyproject.toml b/pyproject.toml index 3b28322e9..83ab968f7 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,17 +4,17 @@ [project] name = "brainscore_vision" -version = "2.0" +version = "2.1" description = "The Brain-Score library enables model comparisons to behavioral and neural experiments" authors = [] license = { 'file' = 'LICENSE' } readme = "README.md" -requires-python = ">=3.7" +requires-python = ">=3.11" dependencies = [ - "numpy>=1.17", - "brainscore_core @ git+https://github.com/brain-score/core", - "result_caching @ git+https://github.com/brain-score/result_caching", + "numpy<2", + "brainscore-core", + "result-caching", "importlib-metadata<5", # workaround to https://github.com/brain-score/brainio/issues/28 "scikit-learn", # for metric_helpers/transformations.py cross-validation "scipy", # for benchmark_helpers/properties_common.py @@ -28,8 +28,8 @@ dependencies = [ "peewee", "psycopg2-binary", "networkx", - "decord", - "psutil" + "eva-decord", + "psutil", ] [project.optional-dependencies] @@ -40,9 +40,6 @@ test = [ "pytest-timeout", "torch", "torchvision", - "tensorflow==1.15", - "keras==2.3.1", - "protobuf<=3.20", # https://protobuf.dev/news/2022-05-06/#python-updates "matplotlib", # for examples "pytest-mock", ] diff --git a/tests/test_metric_helpers/test_temporal.py b/tests/test_metric_helpers/test_temporal.py new file mode 100644 index 000000000..64dffe8de --- /dev/null +++ b/tests/test_metric_helpers/test_temporal.py @@ -0,0 +1,80 @@ +import numpy as np +import scipy.stats +from pytest import approx +from sklearn.linear_model import LinearRegression + +from brainio.assemblies import NeuroidAssembly +from brainscore_vision.metric_helpers.xarray_utils import XarrayRegression, XarrayCorrelation +from brainscore_vision.metric_helpers.temporal import PerTime, SpanTime, PerTimeRegression, SpanTimeRegression + + +class TestMetricHelpers: + def test_pertime_ops(self): + jumbled_source = NeuroidAssembly(np.random.rand(500, 10, 20), + coords={'stimulus_id': ('presentation', list(reversed(range(500)))), + 'image_meta': ('presentation', [0] * 500), + 'neuroid_id': ('neuroid', list(reversed(range(10)))), + 'neuroid_meta': ('neuroid', [0] * 10), + 'time_bin_start': ('time_bin', np.arange(0, 400, 20)), + 'time_bin_end': ('time_bin', np.arange(20, 420, 20))}, + dims=['presentation', 'neuroid', 'time_bin']) + mean_neuroid = lambda arr: arr.mean('neuroid') + pertime_mean_neuroid = PerTime(mean_neuroid) + output = pertime_mean_neuroid(jumbled_source) + output = output.transpose('presentation', 'time_bin') + target = jumbled_source.transpose('presentation', 'time_bin', 'neuroid').mean('neuroid') + assert (output == approx(target)).all().item() + + def test_spantime_ops(self): + jumbled_source = NeuroidAssembly(np.random.rand(500, 10, 20), + coords={'stimulus_id': ('presentation', list(reversed(range(500)))), + 'image_meta': ('presentation', [0] * 500), + 'neuroid_id': ('neuroid', list(reversed(range(10)))), + 'neuroid_meta': ('neuroid', [0] * 10), + 'time_bin_start': ('time_bin', np.arange(0, 400, 20)), + 'time_bin_end': ('time_bin', np.arange(20, 420, 20))}, + dims=['presentation', 'neuroid', 'time_bin']) + mean_presentation = lambda arr: arr.mean("presentation") + spantime_mean_presentation = SpanTime(mean_presentation) + output = spantime_mean_presentation(jumbled_source) + output = output.transpose('neuroid') + target = jumbled_source.transpose('presentation', 'time_bin', 'neuroid').mean('presentation').mean('time_bin') + assert (output == approx(target)).all().item() + + def test_pertime_regression(self): + jumbled_source = NeuroidAssembly(np.random.rand(500, 10, 20), + coords={'stimulus_id': ('presentation', list(reversed(range(500)))), + 'image_meta': ('presentation', [0] * 500), + 'neuroid_id': ('neuroid', list(reversed(range(10)))), + 'neuroid_meta': ('neuroid', [0] * 10), + 'time_bin_start': ('time_bin', np.arange(0, 400, 20)), + 'time_bin_end': ('time_bin', np.arange(20, 420, 20))}, + dims=['presentation', 'neuroid', 'time_bin']) + target = jumbled_source.sortby(['stimulus_id', 'neuroid_id']) + pertime_regression = PerTimeRegression(XarrayRegression(LinearRegression())) + pertime_regression.fit(jumbled_source, target) + prediction = pertime_regression.predict(jumbled_source) + prediction = prediction.transpose(*target.dims) + # do not test for alignment of metadata - it is only important that the data is well-aligned with the metadata. + np.testing.assert_array_almost_equal(prediction.sortby(['stimulus_id', 'neuroid_id', 'time_bin']).values, + target.sortby(['stimulus_id', 'neuroid_id', 'time_bin']).values) + + + def test_spantime_regression(self): + jumbled_source = NeuroidAssembly(np.random.rand(500, 10, 20), + coords={'stimulus_id': ('presentation', list(reversed(range(500)))), + 'image_meta': ('presentation', [0] * 500), + 'neuroid_id': ('neuroid', list(reversed(range(10)))), + 'neuroid_meta': ('neuroid', [0] * 10), + 'time_bin_start': ('time_bin', np.arange(0, 400, 20)), + 'time_bin_end': ('time_bin', np.arange(20, 420, 20))}, + dims=['presentation', 'neuroid', 'time_bin']) + target = jumbled_source.sortby(['stimulus_id', 'neuroid_id']) + spantime_regression = SpanTimeRegression(XarrayRegression(LinearRegression())) + spantime_regression.fit(jumbled_source, target) + prediction = spantime_regression.predict(jumbled_source) + prediction = prediction.transpose(*target.dims) + # do not test for alignment of metadata - it is only important that the data is well-aligned with the metadata. + np.testing.assert_array_almost_equal(prediction.sortby(['stimulus_id', 'neuroid_id', 'time_bin']).values, + target.sortby(['stimulus_id', 'neuroid_id', 'time_bin']).values) + diff --git a/tests/test_model_helpers/activations/test___init__.py b/tests/test_model_helpers/activations/test___init__.py index 99b36cb98..9bd012348 100644 --- a/tests/test_model_helpers/activations/test___init__.py +++ b/tests/test_model_helpers/activations/test___init__.py @@ -6,7 +6,7 @@ from pathlib import Path from brainio.stimuli import StimulusSet -from brainscore_vision.model_helpers.activations import KerasWrapper, PytorchWrapper, TensorflowSlimWrapper +from brainscore_vision.model_helpers.activations import PytorchWrapper from brainscore_vision.model_helpers.activations.core import flatten from brainscore_vision.model_helpers.activations.pca import LayerPCA @@ -93,74 +93,10 @@ def forward(self, x): return PytorchWrapper(model=MyTransformer(), preprocessing=preprocessing) -def keras_vgg19(): - import keras - from keras.applications.vgg19 import VGG19, preprocess_input - from brainscore_vision.model_helpers.activations.keras import load_images - keras.backend.clear_session() - preprocessing = lambda image_filepaths: preprocess_input(load_images(image_filepaths, image_size=224)) - return KerasWrapper(model=VGG19(), preprocessing=preprocessing) - - -def tfslim_custom(): - from brainscore_vision.model_helpers.activations.tensorflow import load_resize_image - import tensorflow as tf - slim = tf.contrib.slim - tf.compat.v1.reset_default_graph() - - image_size = 224 - placeholder = tf.compat.v1.placeholder(dtype=tf.string, shape=[64]) - preprocess = lambda image_path: load_resize_image(image_path, image_size) - preprocess = tf.map_fn(preprocess, placeholder, dtype=tf.float32) - - with tf.compat.v1.variable_scope('my_model', values=[preprocess]) as sc: - end_points_collection = sc.original_name_scope + '_end_points' - # Collect outputs for conv2d, fully_connected and max_pool2d. - with slim.arg_scope([slim.conv2d, slim.fully_connected, slim.max_pool2d], - outputs_collections=[end_points_collection]): - net = slim.conv2d(preprocess, 64, [11, 11], 4, padding='VALID', scope='conv1') - net = slim.max_pool2d(net, [5, 5], 5, scope='pool1') - net = slim.max_pool2d(net, [3, 3], 2, scope='pool2') - net = slim.flatten(net, scope='flatten') - net = slim.fully_connected(net, 1000, scope='logits') - endpoints = slim.utils.convert_collection_to_dict(end_points_collection) - - session = tf.compat.v1.Session() - session.run(tf.compat.v1.initialize_all_variables()) - return TensorflowSlimWrapper(identifier='tf-custom', labels_offset=0, - endpoints=endpoints, inputs=placeholder, session=session) - - -def tfslim_vgg16(): - import tensorflow as tf - from nets import nets_factory - from preprocessing import vgg_preprocessing - from brainscore_vision.model_helpers.activations.tensorflow import load_resize_image - tf.compat.v1.reset_default_graph() - - image_size = 224 - placeholder = tf.compat.v1.placeholder(dtype=tf.string, shape=[64]) - preprocess_image = lambda image: vgg_preprocessing.preprocess_image( - image, image_size, image_size, resize_side_min=image_size) - preprocess = lambda image_path: preprocess_image(load_resize_image(image_path, image_size)) - preprocess = tf.map_fn(preprocess, placeholder, dtype=tf.float32) - - model_ctr = nets_factory.get_network_fn('vgg_16', num_classes=1001, is_training=False) - logits, endpoints = model_ctr(preprocess) - - session = tf.compat.v1.Session() - session.run(tf.compat.v1.initialize_all_variables()) - return TensorflowSlimWrapper(identifier='tf-vgg16', labels_offset=1, - endpoints=endpoints, inputs=placeholder, session=session) - - models_layers = [ pytest.param(pytorch_custom, ['linear', 'relu2']), pytest.param(pytorch_alexnet, ['features.12', 'classifier.5'], marks=pytest.mark.memory_intense), pytest.param(pytorch_transformer_substitute, ['relu1']), - pytest.param(keras_vgg19, ['block3_pool'], marks=pytest.mark.memory_intense), - pytest.param(tfslim_custom, ['my_model/pool2'], marks=pytest.mark.memory_intense), - pytest.param(tfslim_vgg16, ['vgg_16/pool5'], marks=pytest.mark.memory_intense), ] # exact microsaccades for pytorch_alexnet, grayscale.png, for 1 and 10 number_of_trials @@ -366,8 +302,6 @@ def test_exact_microsaccades(number_of_trials): @pytest.mark.memory_intense @pytest.mark.parametrize(["model_ctr", "internal_layers"], [ (pytorch_alexnet, ['features.12', 'classifier.5']), - (keras_vgg19, ['block3_pool']), - (tfslim_vgg16, ['vgg_16/pool5']), ]) def test_mixed_layer_logits(model_ctr, internal_layers): stimuli_paths = [os.path.join(os.path.dirname(__file__), 'rgb.jpg')] @@ -384,7 +318,6 @@ def test_mixed_layer_logits(model_ctr, internal_layers): @pytest.mark.parametrize(["model_ctr", "expected_identifier"], [ (pytorch_custom, 'MyModel'), (pytorch_alexnet, 'AlexNet'), - (keras_vgg19, 'vgg19'), ]) def test_infer_identifier(model_ctr, expected_identifier): model = model_ctr()