From 7349f4a73f69c040b8c544850dcbd98fdf16b54c Mon Sep 17 00:00:00 2001 From: Kartik Pradeepan Date: Tue, 15 Oct 2024 11:51:49 -0400 Subject: [PATCH] Synchronize develop with master (#1349) * Revert "add sam_test_resnet_3 to models (#1309)" (#1310) This reverts commit 85ed01e1c6cc5760438e8067dc4a176b3c8af914. * add sam_test_resnet_4 to models (#1313) Co-authored-by: Jenkins * add mvimgnet_rf to models (#1339) Co-authored-by: Jenkins * add mvimgnet_ss_00 to models (#1340) Co-authored-by: Jenkins * add mvimgnet_ss_03 to models (#1342) Co-authored-by: Jenkins * add mvimgnet_ss_05 to models (#1343) Co-authored-by: Jenkins * add mvimgnet_ss_03 to models (#1345) Co-authored-by: Jenkins * add mvimgnet_ms_05 to models (#1346) Co-authored-by: Jenkins * add mvimgnet_ss_02 to models (#1347) Co-authored-by: Jenkins * add mvimgnet_ss_04 to models (#1348) Co-authored-by: Jenkins --------- Co-authored-by: Sam Winebrake <85908068+samwinebrake@users.noreply.github.com> Co-authored-by: Katherine Fairchild Co-authored-by: Jenkins --- .../models/mvimgnet_ms_05/__init__.py | 9 +++ .../models/mvimgnet_ms_05/model.py | 64 +++++++++++++++++++ .../models/mvimgnet_ms_05/setup.py | 25 ++++++++ .../models/mvimgnet_ms_05/test.py | 1 + .../models/mvimgnet_rf/__init__.py | 9 +++ brainscore_vision/models/mvimgnet_rf/model.py | 64 +++++++++++++++++++ brainscore_vision/models/mvimgnet_rf/setup.py | 25 ++++++++ brainscore_vision/models/mvimgnet_rf/test.py | 1 + .../models/mvimgnet_ss_00/__init__.py | 9 +++ .../models/mvimgnet_ss_00/model.py | 64 +++++++++++++++++++ .../models/mvimgnet_ss_00/setup.py | 25 ++++++++ .../models/mvimgnet_ss_00/test.py | 1 + .../models/mvimgnet_ss_02/__init__.py | 9 +++ .../models/mvimgnet_ss_02/model.py | 64 +++++++++++++++++++ .../models/mvimgnet_ss_02/setup.py | 25 ++++++++ .../models/mvimgnet_ss_02/test.py | 1 + .../models/mvimgnet_ss_03/__init__.py | 9 +++ .../models/mvimgnet_ss_03/model.py | 64 +++++++++++++++++++ .../models/mvimgnet_ss_03/setup.py | 25 ++++++++ .../models/mvimgnet_ss_03/test.py | 1 + .../models/mvimgnet_ss_04/__init__.py | 9 +++ .../models/mvimgnet_ss_04/model.py | 64 +++++++++++++++++++ .../models/mvimgnet_ss_04/setup.py | 25 ++++++++ .../models/mvimgnet_ss_04/test.py | 1 + .../models/mvimgnet_ss_05/__init__.py | 9 +++ .../models/mvimgnet_ss_05/model.py | 64 +++++++++++++++++++ .../models/mvimgnet_ss_05/setup.py | 25 ++++++++ .../models/mvimgnet_ss_05/test.py | 1 + .../models/sam_test_resnet_4/__init__.py | 5 ++ .../models/sam_test_resnet_4/model.py | 26 ++++++++ .../models/sam_test_resnet_4/requirements.txt | 2 + .../models/sam_test_resnet_4/test.py | 8 +++ 32 files changed, 734 insertions(+) create mode 100644 brainscore_vision/models/mvimgnet_ms_05/__init__.py create mode 100644 brainscore_vision/models/mvimgnet_ms_05/model.py create mode 100644 brainscore_vision/models/mvimgnet_ms_05/setup.py create mode 100644 brainscore_vision/models/mvimgnet_ms_05/test.py create mode 100644 brainscore_vision/models/mvimgnet_rf/__init__.py create mode 100644 brainscore_vision/models/mvimgnet_rf/model.py create mode 100644 brainscore_vision/models/mvimgnet_rf/setup.py create mode 100644 brainscore_vision/models/mvimgnet_rf/test.py create mode 100644 brainscore_vision/models/mvimgnet_ss_00/__init__.py create mode 100644 brainscore_vision/models/mvimgnet_ss_00/model.py create mode 100644 brainscore_vision/models/mvimgnet_ss_00/setup.py create mode 100644 brainscore_vision/models/mvimgnet_ss_00/test.py create mode 100644 brainscore_vision/models/mvimgnet_ss_02/__init__.py create mode 100644 brainscore_vision/models/mvimgnet_ss_02/model.py create mode 100644 brainscore_vision/models/mvimgnet_ss_02/setup.py create mode 100644 brainscore_vision/models/mvimgnet_ss_02/test.py create mode 100644 brainscore_vision/models/mvimgnet_ss_03/__init__.py create mode 100644 brainscore_vision/models/mvimgnet_ss_03/model.py create mode 100644 brainscore_vision/models/mvimgnet_ss_03/setup.py create mode 100644 brainscore_vision/models/mvimgnet_ss_03/test.py create mode 100644 brainscore_vision/models/mvimgnet_ss_04/__init__.py create mode 100644 brainscore_vision/models/mvimgnet_ss_04/model.py create mode 100644 brainscore_vision/models/mvimgnet_ss_04/setup.py create mode 100644 brainscore_vision/models/mvimgnet_ss_04/test.py create mode 100644 brainscore_vision/models/mvimgnet_ss_05/__init__.py create mode 100644 brainscore_vision/models/mvimgnet_ss_05/model.py create mode 100644 brainscore_vision/models/mvimgnet_ss_05/setup.py create mode 100644 brainscore_vision/models/mvimgnet_ss_05/test.py create mode 100644 brainscore_vision/models/sam_test_resnet_4/__init__.py create mode 100644 brainscore_vision/models/sam_test_resnet_4/model.py create mode 100644 brainscore_vision/models/sam_test_resnet_4/requirements.txt create mode 100644 brainscore_vision/models/sam_test_resnet_4/test.py diff --git a/brainscore_vision/models/mvimgnet_ms_05/__init__.py b/brainscore_vision/models/mvimgnet_ms_05/__init__.py new file mode 100644 index 000000000..78e39a3cb --- /dev/null +++ b/brainscore_vision/models/mvimgnet_ms_05/__init__.py @@ -0,0 +1,9 @@ +from brainscore_vision import model_registry +from brainscore_vision.model_helpers.brain_transformation import ModelCommitment +from .model import get_model, get_layers + +model_registry["mvimgnet_ms_05"] = lambda: ModelCommitment( + identifier="mvimgnet_ms_05", + activations_model=get_model("mvimgnet_ms_05"), + layers=get_layers("mvimgnet_ms_05"), +) diff --git a/brainscore_vision/models/mvimgnet_ms_05/model.py b/brainscore_vision/models/mvimgnet_ms_05/model.py new file mode 100644 index 000000000..36543ec1b --- /dev/null +++ b/brainscore_vision/models/mvimgnet_ms_05/model.py @@ -0,0 +1,64 @@ +from brainscore_vision.model_helpers.check_submission import check_models +import functools +import os +from urllib.request import urlretrieve +import torchvision.models +from brainscore_vision.model_helpers.activations.pytorch import PytorchWrapper +from brainscore_vision.model_helpers.activations.pytorch import load_preprocess_images +from pathlib import Path +from brainscore_vision.model_helpers import download_weights +import torch +from collections import OrderedDict + +# This is an example implementation for submitting resnet-50 as a pytorch model + +# Attention: It is important, that the wrapper identifier is unique per model! +# The results will otherwise be the same due to brain-scores internal result caching mechanism. +# Please load your pytorch model for usage in CPU. There won't be GPUs available for scoring your model. +# If the model requires a GPU, contact the brain-score team directly. +from brainscore_vision.model_helpers.check_submission import check_models + + +def get_model_list(): + return ["mvimgnet_ms_05"] + + +def get_model(name): + assert name == "mvimgnet_ms_05" + url = "https://users.flatironinstitute.org/~tyerxa/slow_steady/training_checkpoints/slow_steady/multiscale/512_10/lmda_0.5/latest-rank0.pt" + fh = urlretrieve(url) + state_dict = torch.load(fh[0], map_location=torch.device("cpu"))["state"]["model"] + model = load_composer_classifier(state_dict) + preprocessing = functools.partial(load_preprocess_images, image_size=224) + wrapper = PytorchWrapper(identifier=name, model=model, preprocessing=preprocessing) + wrapper.image_size = 224 + return wrapper + +def load_composer_classifier(sd): + model = torchvision.models.resnet.resnet50() + new_sd = OrderedDict() + for k, v in sd.items(): + if 'lin_cls' in k: + new_sd['fc.' + k.split('.')[-1]] = v + if ".f." not in k: + continue + parts = k.split(".") + idx = parts.index("f") + new_k = ".".join(parts[idx + 1 :]) + new_sd[new_k] = v + model.load_state_dict(new_sd, strict=True) + return model + +def get_layers(name): + assert name == "mvimgnet_ms_05" + + outs = ["layer1", "layer2", "layer3", "layer4"] + return outs + + +def get_bibtex(model_identifier): + return """xx""" + + +if __name__ == "__main__": + check_models.check_base_models(__name__) diff --git a/brainscore_vision/models/mvimgnet_ms_05/setup.py b/brainscore_vision/models/mvimgnet_ms_05/setup.py new file mode 100644 index 000000000..421914cfb --- /dev/null +++ b/brainscore_vision/models/mvimgnet_ms_05/setup.py @@ -0,0 +1,25 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +from setuptools import setup, find_packages + +requirements = [ "torchvision", + "torch" +] + +setup( + packages=find_packages(exclude=['tests']), + include_package_data=True, + install_requires=requirements, + license="MIT license", + zip_safe=False, + keywords='brain-score template', + classifiers=[ + 'Development Status :: 2 - Pre-Alpha', + 'Intended Audience :: Developers', + 'License :: OSI Approved :: MIT License', + 'Natural Language :: English', + 'Programming Language :: Python :: 3.7', + ], + test_suite='tests', +) diff --git a/brainscore_vision/models/mvimgnet_ms_05/test.py b/brainscore_vision/models/mvimgnet_ms_05/test.py new file mode 100644 index 000000000..e594ba9e1 --- /dev/null +++ b/brainscore_vision/models/mvimgnet_ms_05/test.py @@ -0,0 +1 @@ +# Left empty as part of 2023 models migration diff --git a/brainscore_vision/models/mvimgnet_rf/__init__.py b/brainscore_vision/models/mvimgnet_rf/__init__.py new file mode 100644 index 000000000..46fc2c38d --- /dev/null +++ b/brainscore_vision/models/mvimgnet_rf/__init__.py @@ -0,0 +1,9 @@ +from brainscore_vision import model_registry +from brainscore_vision.model_helpers.brain_transformation import ModelCommitment +from .model import get_model, get_layers + +model_registry["mvimgnet_rf"] = lambda: ModelCommitment( + identifier="mvimgnet_rf", + activations_model=get_model("mvimgnet_rf"), + layers=get_layers("mvimgnet_rf"), +) diff --git a/brainscore_vision/models/mvimgnet_rf/model.py b/brainscore_vision/models/mvimgnet_rf/model.py new file mode 100644 index 000000000..7c6dbee42 --- /dev/null +++ b/brainscore_vision/models/mvimgnet_rf/model.py @@ -0,0 +1,64 @@ +from brainscore_vision.model_helpers.check_submission import check_models +import functools +import os +from urllib.request import urlretrieve +import torchvision.models +from brainscore_vision.model_helpers.activations.pytorch import PytorchWrapper +from brainscore_vision.model_helpers.activations.pytorch import load_preprocess_images +from pathlib import Path +from brainscore_vision.model_helpers import download_weights +import torch +from collections import OrderedDict + +# This is an example implementation for submitting resnet-50 as a pytorch model + +# Attention: It is important, that the wrapper identifier is unique per model! +# The results will otherwise be the same due to brain-scores internal result caching mechanism. +# Please load your pytorch model for usage in CPU. There won't be GPUs available for scoring your model. +# If the model requires a GPU, contact the brain-score team directly. +from brainscore_vision.model_helpers.check_submission import check_models + + +def get_model_list(): + return ["mvimgnet_rf"] + + +def get_model(name): + assert name == "mvimgnet_rf" + url = "https://users.flatironinstitute.org/~tyerxa/slow_steady/training_checkpoints/slow_steady/r2/LARS/rf/latest-rank0.pt" + fh = urlretrieve(url) + state_dict = torch.load(fh[0], map_location=torch.device("cpu"))["state"]["model"] + model = load_composer_classifier(state_dict) + preprocessing = functools.partial(load_preprocess_images, image_size=224) + wrapper = PytorchWrapper(identifier=name, model=model, preprocessing=preprocessing) + wrapper.image_size = 224 + return wrapper + +def load_composer_classifier(sd): + model = torchvision.models.resnet.resnet50() + new_sd = OrderedDict() + for k, v in sd.items(): + if 'lin_cls' in k: + new_sd['fc.' + k.split('.')[-1]] = v + if ".f." not in k: + continue + parts = k.split(".") + idx = parts.index("f") + new_k = ".".join(parts[idx + 1 :]) + new_sd[new_k] = v + model.load_state_dict(new_sd, strict=True) + return model + +def get_layers(name): + assert name == "mvimgnet_rf" + + outs = ["layer1", "layer2", "layer3", "layer4"] + return outs + + +def get_bibtex(model_identifier): + return """xx""" + + +if __name__ == "__main__": + check_models.check_base_models(__name__) diff --git a/brainscore_vision/models/mvimgnet_rf/setup.py b/brainscore_vision/models/mvimgnet_rf/setup.py new file mode 100644 index 000000000..421914cfb --- /dev/null +++ b/brainscore_vision/models/mvimgnet_rf/setup.py @@ -0,0 +1,25 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +from setuptools import setup, find_packages + +requirements = [ "torchvision", + "torch" +] + +setup( + packages=find_packages(exclude=['tests']), + include_package_data=True, + install_requires=requirements, + license="MIT license", + zip_safe=False, + keywords='brain-score template', + classifiers=[ + 'Development Status :: 2 - Pre-Alpha', + 'Intended Audience :: Developers', + 'License :: OSI Approved :: MIT License', + 'Natural Language :: English', + 'Programming Language :: Python :: 3.7', + ], + test_suite='tests', +) diff --git a/brainscore_vision/models/mvimgnet_rf/test.py b/brainscore_vision/models/mvimgnet_rf/test.py new file mode 100644 index 000000000..e594ba9e1 --- /dev/null +++ b/brainscore_vision/models/mvimgnet_rf/test.py @@ -0,0 +1 @@ +# Left empty as part of 2023 models migration diff --git a/brainscore_vision/models/mvimgnet_ss_00/__init__.py b/brainscore_vision/models/mvimgnet_ss_00/__init__.py new file mode 100644 index 000000000..5eedd9498 --- /dev/null +++ b/brainscore_vision/models/mvimgnet_ss_00/__init__.py @@ -0,0 +1,9 @@ +from brainscore_vision import model_registry +from brainscore_vision.model_helpers.brain_transformation import ModelCommitment +from .model import get_model, get_layers + +model_registry["mvimgnet_ss_00"] = lambda: ModelCommitment( + identifier="mvimgnet_ss_00", + activations_model=get_model("mvimgnet_ss_00"), + layers=get_layers("mvimgnet_ss_00"), +) diff --git a/brainscore_vision/models/mvimgnet_ss_00/model.py b/brainscore_vision/models/mvimgnet_ss_00/model.py new file mode 100644 index 000000000..c02978b40 --- /dev/null +++ b/brainscore_vision/models/mvimgnet_ss_00/model.py @@ -0,0 +1,64 @@ +from brainscore_vision.model_helpers.check_submission import check_models +import functools +import os +from urllib.request import urlretrieve +import torchvision.models +from brainscore_vision.model_helpers.activations.pytorch import PytorchWrapper +from brainscore_vision.model_helpers.activations.pytorch import load_preprocess_images +from pathlib import Path +from brainscore_vision.model_helpers import download_weights +import torch +from collections import OrderedDict + +# This is an example implementation for submitting resnet-50 as a pytorch model + +# Attention: It is important, that the wrapper identifier is unique per model! +# The results will otherwise be the same due to brain-scores internal result caching mechanism. +# Please load your pytorch model for usage in CPU. There won't be GPUs available for scoring your model. +# If the model requires a GPU, contact the brain-score team directly. +from brainscore_vision.model_helpers.check_submission import check_models + + +def get_model_list(): + return ["mvimgnet_ss_00"] + + +def get_model(name): + assert name == "mvimgnet_ss_00" + url = "https://users.flatironinstitute.org/~tyerxa/slow_steady/training_checkpoints/slow_steady/r2/LARS/lmda_0.0/latest-rank0.pt" + fh = urlretrieve(url) + state_dict = torch.load(fh[0], map_location=torch.device("cpu"))["state"]["model"] + model = load_composer_classifier(state_dict) + preprocessing = functools.partial(load_preprocess_images, image_size=224) + wrapper = PytorchWrapper(identifier=name, model=model, preprocessing=preprocessing) + wrapper.image_size = 224 + return wrapper + +def load_composer_classifier(sd): + model = torchvision.models.resnet.resnet50() + new_sd = OrderedDict() + for k, v in sd.items(): + if 'lin_cls' in k: + new_sd['fc.' + k.split('.')[-1]] = v + if ".f." not in k: + continue + parts = k.split(".") + idx = parts.index("f") + new_k = ".".join(parts[idx + 1 :]) + new_sd[new_k] = v + model.load_state_dict(new_sd, strict=True) + return model + +def get_layers(name): + assert name == "mvimgnet_ss_00" + + outs = ["layer1", "layer2", "layer3", "layer4"] + return outs + + +def get_bibtex(model_identifier): + return """xx""" + + +if __name__ == "__main__": + check_models.check_base_models(__name__) diff --git a/brainscore_vision/models/mvimgnet_ss_00/setup.py b/brainscore_vision/models/mvimgnet_ss_00/setup.py new file mode 100644 index 000000000..421914cfb --- /dev/null +++ b/brainscore_vision/models/mvimgnet_ss_00/setup.py @@ -0,0 +1,25 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +from setuptools import setup, find_packages + +requirements = [ "torchvision", + "torch" +] + +setup( + packages=find_packages(exclude=['tests']), + include_package_data=True, + install_requires=requirements, + license="MIT license", + zip_safe=False, + keywords='brain-score template', + classifiers=[ + 'Development Status :: 2 - Pre-Alpha', + 'Intended Audience :: Developers', + 'License :: OSI Approved :: MIT License', + 'Natural Language :: English', + 'Programming Language :: Python :: 3.7', + ], + test_suite='tests', +) diff --git a/brainscore_vision/models/mvimgnet_ss_00/test.py b/brainscore_vision/models/mvimgnet_ss_00/test.py new file mode 100644 index 000000000..e594ba9e1 --- /dev/null +++ b/brainscore_vision/models/mvimgnet_ss_00/test.py @@ -0,0 +1 @@ +# Left empty as part of 2023 models migration diff --git a/brainscore_vision/models/mvimgnet_ss_02/__init__.py b/brainscore_vision/models/mvimgnet_ss_02/__init__.py new file mode 100644 index 000000000..158a45727 --- /dev/null +++ b/brainscore_vision/models/mvimgnet_ss_02/__init__.py @@ -0,0 +1,9 @@ +from brainscore_vision import model_registry +from brainscore_vision.model_helpers.brain_transformation import ModelCommitment +from .model import get_model, get_layers + +model_registry["mvimgnet_ss_02"] = lambda: ModelCommitment( + identifier="mvimgnet_ss_02", + activations_model=get_model("mvimgnet_ss_02"), + layers=get_layers("mvimgnet_ss_02"), +) diff --git a/brainscore_vision/models/mvimgnet_ss_02/model.py b/brainscore_vision/models/mvimgnet_ss_02/model.py new file mode 100644 index 000000000..ab4f51b12 --- /dev/null +++ b/brainscore_vision/models/mvimgnet_ss_02/model.py @@ -0,0 +1,64 @@ +from brainscore_vision.model_helpers.check_submission import check_models +import functools +import os +from urllib.request import urlretrieve +import torchvision.models +from brainscore_vision.model_helpers.activations.pytorch import PytorchWrapper +from brainscore_vision.model_helpers.activations.pytorch import load_preprocess_images +from pathlib import Path +from brainscore_vision.model_helpers import download_weights +import torch +from collections import OrderedDict + +# This is an example implementation for submitting resnet-50 as a pytorch model + +# Attention: It is important, that the wrapper identifier is unique per model! +# The results will otherwise be the same due to brain-scores internal result caching mechanism. +# Please load your pytorch model for usage in CPU. There won't be GPUs available for scoring your model. +# If the model requires a GPU, contact the brain-score team directly. +from brainscore_vision.model_helpers.check_submission import check_models + + +def get_model_list(): + return ["mvimgnet_ss_02"] + + +def get_model(name): + assert name == "mvimgnet_ss_02" + url = "https://users.flatironinstitute.org/~tyerxa/slow_steady/training_checkpoints/slow_steady/r2/LARS/lmda_0.2/latest-rank0.pt" + fh = urlretrieve(url) + state_dict = torch.load(fh[0], map_location=torch.device("cpu"))["state"]["model"] + model = load_composer_classifier(state_dict) + preprocessing = functools.partial(load_preprocess_images, image_size=224) + wrapper = PytorchWrapper(identifier=name, model=model, preprocessing=preprocessing) + wrapper.image_size = 224 + return wrapper + +def load_composer_classifier(sd): + model = torchvision.models.resnet.resnet50() + new_sd = OrderedDict() + for k, v in sd.items(): + if 'lin_cls' in k: + new_sd['fc.' + k.split('.')[-1]] = v + if ".f." not in k: + continue + parts = k.split(".") + idx = parts.index("f") + new_k = ".".join(parts[idx + 1 :]) + new_sd[new_k] = v + model.load_state_dict(new_sd, strict=True) + return model + +def get_layers(name): + assert name == "mvimgnet_ss_02" + + outs = ["layer1", "layer2", "layer3", "layer4"] + return outs + + +def get_bibtex(model_identifier): + return """xx""" + + +if __name__ == "__main__": + check_models.check_base_models(__name__) diff --git a/brainscore_vision/models/mvimgnet_ss_02/setup.py b/brainscore_vision/models/mvimgnet_ss_02/setup.py new file mode 100644 index 000000000..421914cfb --- /dev/null +++ b/brainscore_vision/models/mvimgnet_ss_02/setup.py @@ -0,0 +1,25 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +from setuptools import setup, find_packages + +requirements = [ "torchvision", + "torch" +] + +setup( + packages=find_packages(exclude=['tests']), + include_package_data=True, + install_requires=requirements, + license="MIT license", + zip_safe=False, + keywords='brain-score template', + classifiers=[ + 'Development Status :: 2 - Pre-Alpha', + 'Intended Audience :: Developers', + 'License :: OSI Approved :: MIT License', + 'Natural Language :: English', + 'Programming Language :: Python :: 3.7', + ], + test_suite='tests', +) diff --git a/brainscore_vision/models/mvimgnet_ss_02/test.py b/brainscore_vision/models/mvimgnet_ss_02/test.py new file mode 100644 index 000000000..e594ba9e1 --- /dev/null +++ b/brainscore_vision/models/mvimgnet_ss_02/test.py @@ -0,0 +1 @@ +# Left empty as part of 2023 models migration diff --git a/brainscore_vision/models/mvimgnet_ss_03/__init__.py b/brainscore_vision/models/mvimgnet_ss_03/__init__.py new file mode 100644 index 000000000..ec9185b95 --- /dev/null +++ b/brainscore_vision/models/mvimgnet_ss_03/__init__.py @@ -0,0 +1,9 @@ +from brainscore_vision import model_registry +from brainscore_vision.model_helpers.brain_transformation import ModelCommitment +from .model import get_model, get_layers + +model_registry["mvimgnet_ss_03"] = lambda: ModelCommitment( + identifier="mvimgnet_ss_03", + activations_model=get_model("mvimgnet_ss_03"), + layers=get_layers("mvimgnet_ss_03"), +) diff --git a/brainscore_vision/models/mvimgnet_ss_03/model.py b/brainscore_vision/models/mvimgnet_ss_03/model.py new file mode 100644 index 000000000..b8d89c586 --- /dev/null +++ b/brainscore_vision/models/mvimgnet_ss_03/model.py @@ -0,0 +1,64 @@ +from brainscore_vision.model_helpers.check_submission import check_models +import functools +import os +from urllib.request import urlretrieve +import torchvision.models +from brainscore_vision.model_helpers.activations.pytorch import PytorchWrapper +from brainscore_vision.model_helpers.activations.pytorch import load_preprocess_images +from pathlib import Path +from brainscore_vision.model_helpers import download_weights +import torch +from collections import OrderedDict + +# This is an example implementation for submitting resnet-50 as a pytorch model + +# Attention: It is important, that the wrapper identifier is unique per model! +# The results will otherwise be the same due to brain-scores internal result caching mechanism. +# Please load your pytorch model for usage in CPU. There won't be GPUs available for scoring your model. +# If the model requires a GPU, contact the brain-score team directly. +from brainscore_vision.model_helpers.check_submission import check_models + + +def get_model_list(): + return ["mvimgnet_ss_03"] + + +def get_model(name): + assert name == "mvimgnet_ss_03" + url = "https://users.flatironinstitute.org/~tyerxa/slow_steady/training_checkpoints/slow_steady/r2/LARS/lmda_0.3/latest-rank0.pt" + fh = urlretrieve(url) + state_dict = torch.load(fh[0], map_location=torch.device("cpu"))["state"]["model"] + model = load_composer_classifier(state_dict) + preprocessing = functools.partial(load_preprocess_images, image_size=224) + wrapper = PytorchWrapper(identifier=name, model=model, preprocessing=preprocessing) + wrapper.image_size = 224 + return wrapper + +def load_composer_classifier(sd): + model = torchvision.models.resnet.resnet50() + new_sd = OrderedDict() + for k, v in sd.items(): + if 'lin_cls' in k: + new_sd['fc.' + k.split('.')[-1]] = v + if ".f." not in k: + continue + parts = k.split(".") + idx = parts.index("f") + new_k = ".".join(parts[idx + 1 :]) + new_sd[new_k] = v + model.load_state_dict(new_sd, strict=True) + return model + +def get_layers(name): + assert name == "mvimgnet_ss_03" + + outs = ["layer1", "layer2", "layer3", "layer4"] + return outs + + +def get_bibtex(model_identifier): + return """xx""" + + +if __name__ == "__main__": + check_models.check_base_models(__name__) diff --git a/brainscore_vision/models/mvimgnet_ss_03/setup.py b/brainscore_vision/models/mvimgnet_ss_03/setup.py new file mode 100644 index 000000000..421914cfb --- /dev/null +++ b/brainscore_vision/models/mvimgnet_ss_03/setup.py @@ -0,0 +1,25 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +from setuptools import setup, find_packages + +requirements = [ "torchvision", + "torch" +] + +setup( + packages=find_packages(exclude=['tests']), + include_package_data=True, + install_requires=requirements, + license="MIT license", + zip_safe=False, + keywords='brain-score template', + classifiers=[ + 'Development Status :: 2 - Pre-Alpha', + 'Intended Audience :: Developers', + 'License :: OSI Approved :: MIT License', + 'Natural Language :: English', + 'Programming Language :: Python :: 3.7', + ], + test_suite='tests', +) diff --git a/brainscore_vision/models/mvimgnet_ss_03/test.py b/brainscore_vision/models/mvimgnet_ss_03/test.py new file mode 100644 index 000000000..e594ba9e1 --- /dev/null +++ b/brainscore_vision/models/mvimgnet_ss_03/test.py @@ -0,0 +1 @@ +# Left empty as part of 2023 models migration diff --git a/brainscore_vision/models/mvimgnet_ss_04/__init__.py b/brainscore_vision/models/mvimgnet_ss_04/__init__.py new file mode 100644 index 000000000..287f95a9a --- /dev/null +++ b/brainscore_vision/models/mvimgnet_ss_04/__init__.py @@ -0,0 +1,9 @@ +from brainscore_vision import model_registry +from brainscore_vision.model_helpers.brain_transformation import ModelCommitment +from .model import get_model, get_layers + +model_registry["mvimgnet_ss_04"] = lambda: ModelCommitment( + identifier="mvimgnet_ss_04", + activations_model=get_model("mvimgnet_ss_04"), + layers=get_layers("mvimgnet_ss_04"), +) diff --git a/brainscore_vision/models/mvimgnet_ss_04/model.py b/brainscore_vision/models/mvimgnet_ss_04/model.py new file mode 100644 index 000000000..6886e8c80 --- /dev/null +++ b/brainscore_vision/models/mvimgnet_ss_04/model.py @@ -0,0 +1,64 @@ +from brainscore_vision.model_helpers.check_submission import check_models +import functools +import os +from urllib.request import urlretrieve +import torchvision.models +from brainscore_vision.model_helpers.activations.pytorch import PytorchWrapper +from brainscore_vision.model_helpers.activations.pytorch import load_preprocess_images +from pathlib import Path +from brainscore_vision.model_helpers import download_weights +import torch +from collections import OrderedDict + +# This is an example implementation for submitting resnet-50 as a pytorch model + +# Attention: It is important, that the wrapper identifier is unique per model! +# The results will otherwise be the same due to brain-scores internal result caching mechanism. +# Please load your pytorch model for usage in CPU. There won't be GPUs available for scoring your model. +# If the model requires a GPU, contact the brain-score team directly. +from brainscore_vision.model_helpers.check_submission import check_models + + +def get_model_list(): + return ["mvimgnet_ss_04"] + + +def get_model(name): + assert name == "mvimgnet_ss_04" + url = "https://users.flatironinstitute.org/~tyerxa/slow_steady/training_checkpoints/slow_steady/r2/LARS/lmda_0.4/latest-rank0.pt" + fh = urlretrieve(url) + state_dict = torch.load(fh[0], map_location=torch.device("cpu"))["state"]["model"] + model = load_composer_classifier(state_dict) + preprocessing = functools.partial(load_preprocess_images, image_size=224) + wrapper = PytorchWrapper(identifier=name, model=model, preprocessing=preprocessing) + wrapper.image_size = 224 + return wrapper + +def load_composer_classifier(sd): + model = torchvision.models.resnet.resnet50() + new_sd = OrderedDict() + for k, v in sd.items(): + if 'lin_cls' in k: + new_sd['fc.' + k.split('.')[-1]] = v + if ".f." not in k: + continue + parts = k.split(".") + idx = parts.index("f") + new_k = ".".join(parts[idx + 1 :]) + new_sd[new_k] = v + model.load_state_dict(new_sd, strict=True) + return model + +def get_layers(name): + assert name == "mvimgnet_ss_04" + + outs = ["layer1", "layer2", "layer3", "layer4"] + return outs + + +def get_bibtex(model_identifier): + return """xx""" + + +if __name__ == "__main__": + check_models.check_base_models(__name__) diff --git a/brainscore_vision/models/mvimgnet_ss_04/setup.py b/brainscore_vision/models/mvimgnet_ss_04/setup.py new file mode 100644 index 000000000..421914cfb --- /dev/null +++ b/brainscore_vision/models/mvimgnet_ss_04/setup.py @@ -0,0 +1,25 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +from setuptools import setup, find_packages + +requirements = [ "torchvision", + "torch" +] + +setup( + packages=find_packages(exclude=['tests']), + include_package_data=True, + install_requires=requirements, + license="MIT license", + zip_safe=False, + keywords='brain-score template', + classifiers=[ + 'Development Status :: 2 - Pre-Alpha', + 'Intended Audience :: Developers', + 'License :: OSI Approved :: MIT License', + 'Natural Language :: English', + 'Programming Language :: Python :: 3.7', + ], + test_suite='tests', +) diff --git a/brainscore_vision/models/mvimgnet_ss_04/test.py b/brainscore_vision/models/mvimgnet_ss_04/test.py new file mode 100644 index 000000000..e594ba9e1 --- /dev/null +++ b/brainscore_vision/models/mvimgnet_ss_04/test.py @@ -0,0 +1 @@ +# Left empty as part of 2023 models migration diff --git a/brainscore_vision/models/mvimgnet_ss_05/__init__.py b/brainscore_vision/models/mvimgnet_ss_05/__init__.py new file mode 100644 index 000000000..759ac83b9 --- /dev/null +++ b/brainscore_vision/models/mvimgnet_ss_05/__init__.py @@ -0,0 +1,9 @@ +from brainscore_vision import model_registry +from brainscore_vision.model_helpers.brain_transformation import ModelCommitment +from .model import get_model, get_layers + +model_registry["mvimgnet_ss_05"] = lambda: ModelCommitment( + identifier="mvimgnet_ss_05", + activations_model=get_model("mvimgnet_ss_05"), + layers=get_layers("mvimgnet_ss_05"), +) diff --git a/brainscore_vision/models/mvimgnet_ss_05/model.py b/brainscore_vision/models/mvimgnet_ss_05/model.py new file mode 100644 index 000000000..ebd9af24c --- /dev/null +++ b/brainscore_vision/models/mvimgnet_ss_05/model.py @@ -0,0 +1,64 @@ +from brainscore_vision.model_helpers.check_submission import check_models +import functools +import os +from urllib.request import urlretrieve +import torchvision.models +from brainscore_vision.model_helpers.activations.pytorch import PytorchWrapper +from brainscore_vision.model_helpers.activations.pytorch import load_preprocess_images +from pathlib import Path +from brainscore_vision.model_helpers import download_weights +import torch +from collections import OrderedDict + +# This is an example implementation for submitting resnet-50 as a pytorch model + +# Attention: It is important, that the wrapper identifier is unique per model! +# The results will otherwise be the same due to brain-scores internal result caching mechanism. +# Please load your pytorch model for usage in CPU. There won't be GPUs available for scoring your model. +# If the model requires a GPU, contact the brain-score team directly. +from brainscore_vision.model_helpers.check_submission import check_models + + +def get_model_list(): + return ["mvimgnet_ss_05"] + + +def get_model(name): + assert name == "mvimgnet_ss_05" + url = "https://users.flatironinstitute.org/~tyerxa/slow_steady/training_checkpoints/slow_steady/r2/LARS/lmda_0.5/latest-rank0.pt" + fh = urlretrieve(url) + state_dict = torch.load(fh[0], map_location=torch.device("cpu"))["state"]["model"] + model = load_composer_classifier(state_dict) + preprocessing = functools.partial(load_preprocess_images, image_size=224) + wrapper = PytorchWrapper(identifier=name, model=model, preprocessing=preprocessing) + wrapper.image_size = 224 + return wrapper + +def load_composer_classifier(sd): + model = torchvision.models.resnet.resnet50() + new_sd = OrderedDict() + for k, v in sd.items(): + if 'lin_cls' in k: + new_sd['fc.' + k.split('.')[-1]] = v + if ".f." not in k: + continue + parts = k.split(".") + idx = parts.index("f") + new_k = ".".join(parts[idx + 1 :]) + new_sd[new_k] = v + model.load_state_dict(new_sd, strict=True) + return model + +def get_layers(name): + assert name == "mvimgnet_ss_05" + + outs = ["layer1", "layer2", "layer3", "layer4"] + return outs + + +def get_bibtex(model_identifier): + return """xx""" + + +if __name__ == "__main__": + check_models.check_base_models(__name__) diff --git a/brainscore_vision/models/mvimgnet_ss_05/setup.py b/brainscore_vision/models/mvimgnet_ss_05/setup.py new file mode 100644 index 000000000..421914cfb --- /dev/null +++ b/brainscore_vision/models/mvimgnet_ss_05/setup.py @@ -0,0 +1,25 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +from setuptools import setup, find_packages + +requirements = [ "torchvision", + "torch" +] + +setup( + packages=find_packages(exclude=['tests']), + include_package_data=True, + install_requires=requirements, + license="MIT license", + zip_safe=False, + keywords='brain-score template', + classifiers=[ + 'Development Status :: 2 - Pre-Alpha', + 'Intended Audience :: Developers', + 'License :: OSI Approved :: MIT License', + 'Natural Language :: English', + 'Programming Language :: Python :: 3.7', + ], + test_suite='tests', +) diff --git a/brainscore_vision/models/mvimgnet_ss_05/test.py b/brainscore_vision/models/mvimgnet_ss_05/test.py new file mode 100644 index 000000000..e594ba9e1 --- /dev/null +++ b/brainscore_vision/models/mvimgnet_ss_05/test.py @@ -0,0 +1 @@ +# Left empty as part of 2023 models migration diff --git a/brainscore_vision/models/sam_test_resnet_4/__init__.py b/brainscore_vision/models/sam_test_resnet_4/__init__.py new file mode 100644 index 000000000..0f78a2705 --- /dev/null +++ b/brainscore_vision/models/sam_test_resnet_4/__init__.py @@ -0,0 +1,5 @@ +from brainscore_vision import model_registry +from brainscore_vision.model_helpers.brain_transformation import ModelCommitment +from .model import get_model, get_layers + +model_registry['sam_test_resnet_4'] = lambda: ModelCommitment(identifier='sam_test_resnet_4', activations_model=get_model('sam_test_resnet_4'), layers=get_layers('sam_test_resnet_4')) diff --git a/brainscore_vision/models/sam_test_resnet_4/model.py b/brainscore_vision/models/sam_test_resnet_4/model.py new file mode 100644 index 000000000..9dadf077f --- /dev/null +++ b/brainscore_vision/models/sam_test_resnet_4/model.py @@ -0,0 +1,26 @@ +from brainscore_vision.model_helpers.check_submission import check_models +import functools +import torchvision.models +from brainscore_vision.model_helpers.activations.pytorch import PytorchWrapper +from brainscore_vision.model_helpers.activations.pytorch import load_preprocess_images + +def get_model(name): + assert name == 'sam_test_resnet_4' + model = torchvision.models.resnet50(pretrained=True) + preprocessing = functools.partial(load_preprocess_images, image_size=224) + wrapper = PytorchWrapper(identifier='sam_test_resnet', model=model, preprocessing=preprocessing) + wrapper.image_size = 224 + return wrapper + + +def get_layers(name): + assert name == 'sam_test_resnet_4' + return ['conv1','layer1', 'layer2', 'layer3', 'layer4', 'fc'] + + +def get_bibtex(model_identifier): + return """""" + + +if __name__ == '__main__': + check_models.check_base_models(__name__) diff --git a/brainscore_vision/models/sam_test_resnet_4/requirements.txt b/brainscore_vision/models/sam_test_resnet_4/requirements.txt new file mode 100644 index 000000000..9a7d57cd9 --- /dev/null +++ b/brainscore_vision/models/sam_test_resnet_4/requirements.txt @@ -0,0 +1,2 @@ +torchvision +torch diff --git a/brainscore_vision/models/sam_test_resnet_4/test.py b/brainscore_vision/models/sam_test_resnet_4/test.py new file mode 100644 index 000000000..a94da6cf4 --- /dev/null +++ b/brainscore_vision/models/sam_test_resnet_4/test.py @@ -0,0 +1,8 @@ +import pytest +import brainscore_vision + + +@pytest.mark.travis_slow +def test_has_identifier(): + model = brainscore_vision.load_model('sam_test_resnet_4') + assert model.identifier == 'sam_test_resnet_4'