From 398335cfc0a4acdd011e1cdf801e326c36f1b9ed Mon Sep 17 00:00:00 2001 From: Katherine Fairchild Date: Mon, 22 Apr 2024 17:47:17 -0400 Subject: [PATCH] add eMMCR_Mom_lmda_04_1 to models (#783) Co-authored-by: AutoJenkins --- .../models/eMMCR_Mom_lmda_04_1/__init__.py | 9 +++ .../models/eMMCR_Mom_lmda_04_1/model.py | 65 +++++++++++++++++++ .../models/eMMCR_Mom_lmda_04_1/setup.py | 25 +++++++ .../models/eMMCR_Mom_lmda_04_1/test.py | 1 + 4 files changed, 100 insertions(+) create mode 100644 brainscore_vision/models/eMMCR_Mom_lmda_04_1/__init__.py create mode 100644 brainscore_vision/models/eMMCR_Mom_lmda_04_1/model.py create mode 100644 brainscore_vision/models/eMMCR_Mom_lmda_04_1/setup.py create mode 100644 brainscore_vision/models/eMMCR_Mom_lmda_04_1/test.py diff --git a/brainscore_vision/models/eMMCR_Mom_lmda_04_1/__init__.py b/brainscore_vision/models/eMMCR_Mom_lmda_04_1/__init__.py new file mode 100644 index 000000000..76d3f105a --- /dev/null +++ b/brainscore_vision/models/eMMCR_Mom_lmda_04_1/__init__.py @@ -0,0 +1,9 @@ +from brainscore_vision import model_registry +from brainscore_vision.model_helpers.brain_transformation import ModelCommitment +from .model import get_model, get_layers + +model_registry["eMMCR_Mom_lmda_04_1"] = lambda: ModelCommitment( + identifier="eMMCR_Mom_lmda_04_1", + activations_model=get_model("eMMCR_Mom_lmda_04_1"), + layers=get_layers("eMMCR_Mom_lmda_04_1"), +) diff --git a/brainscore_vision/models/eMMCR_Mom_lmda_04_1/model.py b/brainscore_vision/models/eMMCR_Mom_lmda_04_1/model.py new file mode 100644 index 000000000..7329c4799 --- /dev/null +++ b/brainscore_vision/models/eMMCR_Mom_lmda_04_1/model.py @@ -0,0 +1,65 @@ +from brainscore_vision.model_helpers.check_submission import check_models +import functools +import os +from urllib.request import urlretrieve +import torchvision.models +from brainscore_vision.model_helpers.activations.pytorch import PytorchWrapper +from brainscore_vision.model_helpers.activations.pytorch import load_preprocess_images +from pathlib import Path +from brainscore_vision.model_helpers import download_weights +import torch +from collections import OrderedDict + +# This is an example implementation for submitting resnet-50 as a pytorch model + +# Attention: It is important, that the wrapper identifier is unique per model! +# The results will otherwise be the same due to brain-scores internal result caching mechanism. +# Please load your pytorch model for usage in CPU. There won't be GPUs available for scoring your model. +# If the model requires a GPU, contact the brain-score team directly. +from brainscore_vision.model_helpers.check_submission import check_models + + +def get_model_list(): + return ["eMMCR_Mom_lmda_04_1"] + + +def get_model(name): + assert name == "eMMCR_Mom_lmda_04_1" + url = "https://users.flatironinstitute.org/~tyerxa/equi_proj/training_checkpoints/fresh/paired/lmda_0.4/MMCR_Momentum_1/latest-rank0" + fh = urlretrieve(url) + state_dict = torch.load(fh[0], map_location=torch.device("cpu"))["state"]["model"] + model = load_composer_classifier(state_dict) + preprocessing = functools.partial(load_preprocess_images, image_size=224) + wrapper = PytorchWrapper(identifier=name, model=model, preprocessing=preprocessing) + wrapper.image_size = 224 + return wrapper + +def load_composer_classifier(sd): + model = torchvision.models.resnet.resnet50() + new_sd = OrderedDict() + for k, v in sd.items(): + if 'lin_cls' in k: + new_sd['fc.' + k.split('.')[-1]] = v + if ".f." not in k: + continue + parts = k.split(".") + idx = parts.index("f") + new_k = ".".join(parts[idx + 1 :]) + new_sd[new_k] = v + model.load_state_dict(new_sd, strict=True) + return model + +def get_layers(name): + assert name == "eMMCR_Mom_lmda_04_1" + + outs = ["conv1", "layer1", "layer2", "layer3", "layer4", "avgpool", "fc"] + outs = ["layer4"] + return outs + + +def get_bibtex(model_identifier): + return """xx""" + + +if __name__ == "__main__": + check_models.check_base_models(__name__) diff --git a/brainscore_vision/models/eMMCR_Mom_lmda_04_1/setup.py b/brainscore_vision/models/eMMCR_Mom_lmda_04_1/setup.py new file mode 100644 index 000000000..421914cfb --- /dev/null +++ b/brainscore_vision/models/eMMCR_Mom_lmda_04_1/setup.py @@ -0,0 +1,25 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +from setuptools import setup, find_packages + +requirements = [ "torchvision", + "torch" +] + +setup( + packages=find_packages(exclude=['tests']), + include_package_data=True, + install_requires=requirements, + license="MIT license", + zip_safe=False, + keywords='brain-score template', + classifiers=[ + 'Development Status :: 2 - Pre-Alpha', + 'Intended Audience :: Developers', + 'License :: OSI Approved :: MIT License', + 'Natural Language :: English', + 'Programming Language :: Python :: 3.7', + ], + test_suite='tests', +) diff --git a/brainscore_vision/models/eMMCR_Mom_lmda_04_1/test.py b/brainscore_vision/models/eMMCR_Mom_lmda_04_1/test.py new file mode 100644 index 000000000..e594ba9e1 --- /dev/null +++ b/brainscore_vision/models/eMMCR_Mom_lmda_04_1/test.py @@ -0,0 +1 @@ +# Left empty as part of 2023 models migration