Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Custom trained models #1206

Merged
merged 29 commits into from
Sep 17, 2024
Merged
Show file tree
Hide file tree
Changes from 10 commits
Commits
Show all changes
29 commits
Select commit Hold shift + click to select a range
a84d7d3
Custom trained models
akgokce Aug 29, 2024
04187f0
Fix missing library
akgokce Aug 29, 2024
eee6ad2
Fix a missing library
akgokce Aug 29, 2024
47cae96
Not every model needs timm
akgokce Aug 29, 2024
fe7aadd
Trigger build tests
akgokce Aug 29, 2024
52d50e9
Use model_id instead of model_name to ensure correct weights are loaded
akgokce Aug 29, 2024
4a2d151
timm is missing from deit models
akgokce Aug 29, 2024
1777ac5
timm is missing from deit models
akgokce Aug 29, 2024
3e5d152
Fix requirements.txt newline
akgokce Aug 29, 2024
089c224
Merge branch 'master' into master
akgokce Aug 29, 2024
62f81f7
Package models into a single plugin
akgokce Sep 9, 2024
5474f00
Merge branch 'master' of https://github.com/akgokce/vision-custom-models
akgokce Sep 9, 2024
9cc1cd1
Merge branch 'brain-score:master' into master
akgokce Sep 9, 2024
980d740
Update requirements.txt
akgokce Sep 10, 2024
75bcdab
Merge branch 'master' of https://github.com/akgokce/vision-custom-models
akgokce Sep 10, 2024
0158d9d
Update requirements.txt
akgokce Sep 10, 2024
466f297
Merge branch 'master' into master
akgokce Sep 10, 2024
9701fb5
Update requirements.txt
akgokce Sep 10, 2024
f28069d
Change the order of models
akgokce Sep 10, 2024
dd6346d
Merge branch 'master' into master
akgokce Sep 11, 2024
d3b71d5
Update model.py
akgokce Sep 11, 2024
a5b62af
Merge branch 'master' of https://github.com/akgokce/vision-custom-models
akgokce Sep 11, 2024
7910891
Disable resultcaching for vits
akgokce Sep 12, 2024
041ba09
Merge branch 'master' into master
akgokce Sep 12, 2024
519d83d
Tests reordering
akgokce Sep 12, 2024
e044547
Revert tests back
akgokce Sep 15, 2024
16ff06f
Merge branch 'master' into master
akgokce Sep 15, 2024
9051860
Update model.py
akgokce Sep 15, 2024
414ad2b
Update model.py
akgokce Sep 15, 2024
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
from brainscore_vision import model_registry
from brainscore_vision.model_helpers.brain_transformation import ModelCommitment
from .model import get_model, MODEL_COMMITMENT

MODEL_NAME = "convnext_base"
MODEL_ID = "convnext_base_imagenet_full_seed-0"

model_registry["convnext_base_imagenet_full_seed-0"] = lambda: ModelCommitment(
identifier=MODEL_ID,
activations_model=get_model(),
layers=MODEL_COMMITMENT["layers"],
behavioral_readout_layer=MODEL_COMMITMENT["behavioral_readout_layer"],
region_layer_map=MODEL_COMMITMENT["region_layer_map"],
)
116 changes: 116 additions & 0 deletions brainscore_vision/models/convnext_base_imagenet_full_seed-0/model.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,116 @@
import functools

import torchvision.models
import torch

from brainscore_vision.model_helpers.activations.pytorch import PytorchWrapper
from brainscore_vision.model_helpers.activations.pytorch import load_preprocess_images

import numpy as np
import torchvision.transforms as T
from PIL import Image

import albumentations as A
from albumentations.pytorch import ToTensorV2

BIBTEX = """"""

MODEL_NAME = "convnext_base"
MODEL_ID = "convnext_base_imagenet_full_seed-0"
MODEL_COMMITMENT = {
"region_layer_map": {
"V1": "features.5.7.block.0",
"V2": "features.5.12.block.0",
"V4": "features.4.0",
"IT": "features.5.11.block.0",
},
"behavioral_readout_layer": "fclassifier.1",
"layers": ["features.4.0", "features.5.11.block.0", "features.5.12.block.0", "features.5.7.block.0"]
}

RESIZE_SIZE = 256
CROP_SIZE = 224
INTERPOLATION = "bicubic"
NUM_CLASSES = 1000
EPOCH = 300
CKPT_URL = "https://epfl-neuroailab-scalinglaws.s3.eu-north-1.amazonaws.com/checkpoints/convnext_base_imagenet_full_seed-0/ep300.pt"


def load_image(image_filepath):
return Image.open(image_filepath).convert("RGB")


def get_interpolation_mode(interpolation: str) -> int:
"""Returns the interpolation mode for albumentations"""
if "linear" or "bilinear" in interpolation:
return 1
elif "cubic" or "bicubic" in interpolation:
return 2
else:
raise NotImplementedError(f"Interpolation mode {interpolation} not implemented")


def custom_image_preprocess(
images,
transforms=None,
resize_size: int = RESIZE_SIZE,
crop_size: int = CROP_SIZE,
interpolation: str = INTERPOLATION,
):
if transforms is None:
interpolation = get_interpolation_mode(interpolation)
transforms = A.Compose(
[
A.Resize(resize_size, resize_size, p=1.0, interpolation=interpolation),
A.CenterCrop(crop_size, crop_size, p=1.0),
A.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
ToTensorV2(),
]
)
if isinstance(transforms, T.Compose):
images = [transforms(image) for image in images]
images = [np.array(image) for image in images]
images = np.stack(images)
elif isinstance(transforms, A.Compose):
images = [transforms(image=np.array(image))["image"] for image in images]
images = np.stack(images)
else:
raise NotImplementedError(
f"Transform of type {type(transforms)} is not implemented"
)

return images


def load_preprocess_images_custom(
image_filepaths, preprocess_images=custom_image_preprocess, **kwargs
):
images = [load_image(image_filepath) for image_filepath in image_filepaths]
images = preprocess_images(images, **kwargs)
return images


def get_model():
model = torchvision.models.convnext_base()
if NUM_CLASSES != 1000:
model.classifier[2] = torch.nn.Linear(
in_features=model.classifier[2].in_features,
out_features=NUM_CLASSES,
bias=model.classifier[2].bias is not None )

state_dict = torch.hub.load_state_dict_from_url(
CKPT_URL,
check_hash=True,
file_name=f"{MODEL_ID}_ep{EPOCH}.pt",
map_location="cpu",
)
state_dict = state_dict["state"]["model_ema_state_dict"]
state_dict = {k.replace("module.", ""): v for k, v in state_dict.items()}
model.load_state_dict(state_dict, strict=True)
print(f"Model loaded from {CKPT_URL}")

preprocessing = functools.partial(load_preprocess_images_custom, transforms=None)
wrapper = PytorchWrapper(
identifier=MODEL_ID, model=model, preprocessing=preprocessing
)
return wrapper
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
torch
torchvision
albumentations
Empty file.
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
from brainscore_vision import model_registry
from brainscore_vision.model_helpers.brain_transformation import ModelCommitment
from .model import get_model, MODEL_COMMITMENT

MODEL_NAME = "convnext_large"
MODEL_ID = "convnext_large_imagenet_full_seed-0"

model_registry["convnext_large_imagenet_full_seed-0"] = lambda: ModelCommitment(
identifier=MODEL_ID,
activations_model=get_model(),
layers=MODEL_COMMITMENT["layers"],
behavioral_readout_layer=MODEL_COMMITMENT["behavioral_readout_layer"],
region_layer_map=MODEL_COMMITMENT["region_layer_map"],
)
116 changes: 116 additions & 0 deletions brainscore_vision/models/convnext_large_imagenet_full_seed-0/model.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,116 @@
import functools

import torchvision.models
import torch

from brainscore_vision.model_helpers.activations.pytorch import PytorchWrapper
from brainscore_vision.model_helpers.activations.pytorch import load_preprocess_images

import numpy as np
import torchvision.transforms as T
from PIL import Image

import albumentations as A
from albumentations.pytorch import ToTensorV2

BIBTEX = """"""

MODEL_NAME = "convnext_large"
MODEL_ID = "convnext_large_imagenet_full_seed-0"
MODEL_COMMITMENT = {
"region_layer_map": {
"V1": "features.5.7.block.5",
"V2": "features.5.7.block.0",
"V4": "features.4.1",
"IT": "features.5.11.block.0",
},
"behavioral_readout_layer": "fclassifier.1",
"layers": ["features.4.1", "features.5.11.block.0", "features.5.7.block.0", "features.5.7.block.5"]
}

RESIZE_SIZE = 256
CROP_SIZE = 224
INTERPOLATION = "bicubic"
NUM_CLASSES = 1000
EPOCH = 300
CKPT_URL = "https://epfl-neuroailab-scalinglaws.s3.eu-north-1.amazonaws.com/checkpoints/convnext_large_imagenet_full_seed-0/ep300.pt"


def load_image(image_filepath):
return Image.open(image_filepath).convert("RGB")


def get_interpolation_mode(interpolation: str) -> int:
"""Returns the interpolation mode for albumentations"""
if "linear" or "bilinear" in interpolation:
return 1
elif "cubic" or "bicubic" in interpolation:
return 2
else:
raise NotImplementedError(f"Interpolation mode {interpolation} not implemented")


def custom_image_preprocess(
images,
transforms=None,
resize_size: int = RESIZE_SIZE,
crop_size: int = CROP_SIZE,
interpolation: str = INTERPOLATION,
):
if transforms is None:
interpolation = get_interpolation_mode(interpolation)
transforms = A.Compose(
[
A.Resize(resize_size, resize_size, p=1.0, interpolation=interpolation),
A.CenterCrop(crop_size, crop_size, p=1.0),
A.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
ToTensorV2(),
]
)
if isinstance(transforms, T.Compose):
images = [transforms(image) for image in images]
images = [np.array(image) for image in images]
images = np.stack(images)
elif isinstance(transforms, A.Compose):
images = [transforms(image=np.array(image))["image"] for image in images]
images = np.stack(images)
else:
raise NotImplementedError(
f"Transform of type {type(transforms)} is not implemented"
)

return images


def load_preprocess_images_custom(
image_filepaths, preprocess_images=custom_image_preprocess, **kwargs
):
images = [load_image(image_filepath) for image_filepath in image_filepaths]
images = preprocess_images(images, **kwargs)
return images


def get_model():
model = torchvision.models.convnext_large()
if NUM_CLASSES != 1000:
model.classifier[2] = torch.nn.Linear(
in_features=model.classifier[2].in_features,
out_features=NUM_CLASSES,
bias=model.classifier[2].bias is not None )

state_dict = torch.hub.load_state_dict_from_url(
CKPT_URL,
check_hash=True,
file_name=f"{MODEL_ID}_ep{EPOCH}.pt",
map_location="cpu",
)
state_dict = state_dict["state"]["model_ema_state_dict"]
state_dict = {k.replace("module.", ""): v for k, v in state_dict.items()}
model.load_state_dict(state_dict, strict=True)
print(f"Model loaded from {CKPT_URL}")

preprocessing = functools.partial(load_preprocess_images_custom, transforms=None)
wrapper = PytorchWrapper(
identifier=MODEL_ID, model=model, preprocessing=preprocessing
)
return wrapper
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
torch
torchvision
albumentations
Empty file.
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
from brainscore_vision import model_registry
from brainscore_vision.model_helpers.brain_transformation import ModelCommitment
from .model import get_model, MODEL_COMMITMENT

MODEL_NAME = "convnext_small"
MODEL_ID = "convnext_small_imagenet_100_seed-0"

model_registry["convnext_small_imagenet_100_seed-0"] = lambda: ModelCommitment(
identifier=MODEL_ID,
activations_model=get_model(),
layers=MODEL_COMMITMENT["layers"],
behavioral_readout_layer=MODEL_COMMITMENT["behavioral_readout_layer"],
region_layer_map=MODEL_COMMITMENT["region_layer_map"],
)
Loading