From 45060b3129e969e9a20c44b43cbbf0b7092c78e0 Mon Sep 17 00:00:00 2001 From: Mike Ferguson Date: Mon, 18 Nov 2024 14:06:05 -0500 Subject: [PATCH] changed model weight s3 paths to new brain-score AWS bucket --- brainscore_vision/models/cornet_s/model.py | 4 +- .../custom_model_cv_18_dagger_408/model.py | 4 +- .../model.py | 4 +- .../model.py | 284 +++++++++--------- .../model.py | 4 +- brainscore_vision/models/hmax/model.py | 4 +- .../models/resnet_50_robust/model.py | 4 +- .../voneresnet_50_non_stochastic/model.py | 4 +- 8 files changed, 156 insertions(+), 156 deletions(-) diff --git a/brainscore_vision/models/cornet_s/model.py b/brainscore_vision/models/cornet_s/model.py index 9862e34ac..fd42ea251 100644 --- a/brainscore_vision/models/cornet_s/model.py +++ b/brainscore_vision/models/cornet_s/model.py @@ -33,9 +33,9 @@ def __init__(self, model): model_ctr = getattr(mod, 'CORnet_S') model = model_ctr() model = Wrapper(model) # model was wrapped with DataParallel, so weights require `module.` prefix - weights_path = load_weight_file(bucket="brainscore-vision", folder_name="models", + weights_path = load_weight_file(bucket="brainscore-storage", folder_name="brainscore-vision/models", relative_path="cornet_s/cornet_s_epoch43.pth.tar", - version_id="4EAQnCqTy.2MCKiXTJ4l02iG8l3e.yfQ", + version_id="null", sha1="a4bfd8eda33b45fd945da1b972ab0b7cad38d60f") checkpoint = torch.load(weights_path, map_location=lambda storage, loc: storage) # map onto cpu model.load_state_dict(checkpoint['state_dict']) diff --git a/brainscore_vision/models/custom_model_cv_18_dagger_408/model.py b/brainscore_vision/models/custom_model_cv_18_dagger_408/model.py index 1fc114081..ca277d30c 100644 --- a/brainscore_vision/models/custom_model_cv_18_dagger_408/model.py +++ b/brainscore_vision/models/custom_model_cv_18_dagger_408/model.py @@ -47,9 +47,9 @@ def load_preprocess_custom_model(image_filepaths, image_size, **kwargs): def get_model(name): assert name == 'custom_model_cv_18_dagger_408' model = create_model('crossvit_18_dagger_408', pretrained=False) - weights_path = load_weight_file(bucket="brainscore-vision", folder_name="models", + weights_path = load_weight_file(bucket="brainscore-storage", folder_name="brainscore-vision/models", relative_path="custom_model_cv_18_dagger_408/crossvit_18_dagger_408_adv_finetuned_epoch5.pt", - version_id="pQVPFC_iiWpRRr7P54qxQfRzjNSn2uYB", + version_id="null", sha1="c769518485e352d5a2e6f3e588d6208cbad71b69") checkpoint = torch.load(weights_path, map_location=torch.device('cpu')) model.load_state_dict(checkpoint['state_dict'], strict=True) diff --git a/brainscore_vision/models/effnetb1_cutmix_augmix_sam_e1_5avg_424x377/model.py b/brainscore_vision/models/effnetb1_cutmix_augmix_sam_e1_5avg_424x377/model.py index 310fa7b04..6c16183b9 100644 --- a/brainscore_vision/models/effnetb1_cutmix_augmix_sam_e1_5avg_424x377/model.py +++ b/brainscore_vision/models/effnetb1_cutmix_augmix_sam_e1_5avg_424x377/model.py @@ -65,9 +65,9 @@ def get_model(name): assert name == 'effnetb1_cutmix_augmix_sam_e1_5avg_424x377' model_tf_efficientnet_b1_ns = EffNetBX() - weights_path = load_weight_file(bucket="brainscore-vision", folder_name="models", + weights_path = load_weight_file(bucket="brainscore-storage", folder_name="brainscore-vision/models", relative_path="effnetb1_cutmix_augmix_sam_e1_5avg_424x377/weights1_5_avg.pth", - version_id="EqB6P7BittVdkgRd3oMncq_j9AAdiYvz", + version_id="null", sha1="871bd10e6ce164bfe8f3ce10bb77a69d326d7b65") model_tf_efficientnet_b1_ns.load_state_dict(torch.load(weights_path, map_location=torch.device('cpu'))["model"]) model = model_tf_efficientnet_b1_ns.efnet_model diff --git a/brainscore_vision/models/effnetb1_cutmixpatch_SAM_robust32_avge6e8e9e10_manylayers_324x288/model.py b/brainscore_vision/models/effnetb1_cutmixpatch_SAM_robust32_avge6e8e9e10_manylayers_324x288/model.py index d3eaf9c94..1b5731908 100644 --- a/brainscore_vision/models/effnetb1_cutmixpatch_SAM_robust32_avge6e8e9e10_manylayers_324x288/model.py +++ b/brainscore_vision/models/effnetb1_cutmixpatch_SAM_robust32_avge6e8e9e10_manylayers_324x288/model.py @@ -1,142 +1,142 @@ -import functools - -import torch -from brainscore_vision.model_helpers.activations import PytorchWrapper -from brainscore_vision.model_helpers.activations.pytorch import load_preprocess_images -from brainscore_vision.model_helpers.s3 import load_weight_file -from PIL import Image -import numpy as np -import timm -from timm.data import resolve_data_config -from timm.data.transforms_factory import create_transform -import torch.nn as nn -from albumentations import ( - Compose, Normalize, Resize,CenterCrop - ) -from albumentations.pytorch import ToTensorV2 -# This is an example implementation for submitting alexnet as a pytorch model -# If you use pytorch, don't forget to add it to the setup.py - -# Attention: It is important, that the wrapper identifier is unique per model! -# The results will otherwise be the same due to brain-scores internal result caching mechanism. -# Please load your pytorch model for usage in CPU. There won't be GPUs available for scoring your model. -# If the model requires a GPU, contact the brain-score team directly. -from brainscore_vision.model_helpers.check_submission import check_models - -import os - -image_resize = 324 -image_crop = 288 -norm_mean = [0.485, 0.456, 0.406] -norm_std = [0.229, 0.224, 0.225] -freeze_layers = ['blocks.0.0', 'blocks.0.1', 'blocks.1.0', - 'blocks.1.1', 'blocks.1.2', 'blocks.2.0', - 'blocks.2.1', 'blocks.2.2', 'blocks.3.0', 'blocks.3.1', 'blocks.3.2'] - -def custom_image_preprocess(images, **kwargs): - - transforms_val = Compose([ - Resize(image_resize, image_resize), - CenterCrop(image_crop, image_crop), - Normalize(mean=norm_mean,std=norm_std,), - ToTensorV2()]) - - images = [np.array(pillow_image) for pillow_image in images] - images = [transforms_val(image=image)["image"] for image in images] - images = np.stack(images) - - return images - -def load_preprocess_images_custom(image_filepaths, preprocess_images=custom_image_preprocess, **kwargs): - images = [load_image(image_filepath) for image_filepath in image_filepaths] - images = preprocess_images(images, **kwargs) - return images - -def load_image(image_filepath): - with Image.open(image_filepath) as pil_image: - if 'L' not in pil_image.mode.upper() and 'A' not in pil_image.mode.upper()\ - and 'P' not in pil_image.mode.upper(): # not binary and not alpha and not palletized - # work around to https://github.com/python-pillow/Pillow/issues/1144, - # see https://stackoverflow.com/a/30376272/2225200 - return pil_image.copy() - else: # make sure potential binary images are in RGB - rgb_image = Image.new("RGB", pil_image.size) - rgb_image.paste(pil_image) - return rgb_image - -class EffNetBX(nn.Module): - def __init__(self,): - super().__init__ () - self.efnet_model = timm.create_model('tf_efficientnet_b1_ns', pretrained=True) - - def forward(self, x): - x = self.efnet_model(x) - return x - -def get_model(name): - assert name == 'effnetb1_cutmixpatch_SAM_robust32_avge6e8e9e10_manylayers_324x288' - model_tf_efficientnet_b1_ns= EffNetBX() - - weights_path = load_weight_file(bucket="brainscore-vision", folder_name="models", - relative_path="effnetb1_cutmixpatch_SAM_robust32_avge6e8e9e10_manylayers_324x288/tf_efficientnet_b1_ns_robust_cutmixpatchresize_SAM_e6e8e9e10.pth", - version_id="prSgvyJFh_c7OKQODIEqU_c_hg_YXh5M", - sha1="9d60e49043b2d5354447c46cd011764cc6cf094e") - model_tf_efficientnet_b1_ns.load_state_dict(torch.load(weights_path, map_location=torch.device('cpu'))["model"]) - model = model_tf_efficientnet_b1_ns.efnet_model - filter_elems = set(["se", "act", "bn", "conv"]) - layer_list = [layer for layer, _ in model.named_modules() if not any(i in layer for i in filter_elems)] - print(layer_list) - print(len(layer_list)) - - for n, m in model.named_modules(): - if isinstance(m, nn.BatchNorm2d) and any(x in n for x in ["conv_stem" ] + freeze_layers) or n =="bn1": - print(f"Freeze {n, m}") - m.eval() - - - preprocessing = functools.partial(load_preprocess_images_custom, - preprocess_images=custom_image_preprocess, - ) - - - wrapper = PytorchWrapper(identifier='my-model', model=model, preprocessing=preprocessing, batch_size=8) - - wrapper.image_size = image_crop - return wrapper - - -def get_layers(name): - assert name == 'effnetb1_cutmixpatch_SAM_robust32_avge6e8e9e10_manylayers_324x288' - return ['blocks', 'blocks.0', 'blocks.0.0', 'blocks.0.1', - 'blocks.1', 'blocks.1.0', 'blocks.1.1', 'blocks.1.2', - 'blocks.2', 'blocks.2.0', 'blocks.2.1', 'blocks.2.2', - 'blocks.3', 'blocks.3.0', 'blocks.3.1', 'blocks.3.2', 'blocks.3.3', - 'blocks.4', 'blocks.4.0', - 'blocks.4.0.conv_pw', 'blocks.4.0.conv_dw', 'blocks.4.0.conv_pwl', 'blocks.4.1', 'blocks.4.1.conv_pw', 'blocks.4.1.conv_dw', 'blocks.4.1.conv_pwl', 'blocks.4.2', - 'blocks.4.2.conv_pw', 'blocks.4.2.conv_dw', 'blocks.4.2.conv_pwl', 'blocks.4.3', 'blocks.4.3.conv_pw', 'blocks.4.3.conv_dw', 'blocks.4.3.conv_pwl', 'blocks.5', - 'blocks.5.0', 'blocks.5.0.conv_pw', 'blocks.5.0.conv_dw', 'blocks.5.0.conv_pwl', 'blocks.5.1', 'blocks.5.1.conv_pw', 'blocks.5.1.conv_dw', 'blocks.5.1.conv_pwl', - 'blocks.5.2', 'blocks.5.2.conv_pw', 'blocks.5.2.conv_dw', 'blocks.5.2.conv_pwl', 'blocks.5.3', 'blocks.5.3.conv_pw', 'blocks.5.3.conv_dw', 'blocks.5.3.conv_pwl', - 'blocks.5.4', 'blocks.5.4.conv_pw', 'blocks.5.4.conv_dw', 'blocks.5.4.conv_pwl', 'blocks.6', 'blocks.6.0', 'blocks.6.0.conv_pw', 'blocks.6.0.conv_dw', - 'blocks.6.0.conv_pwl', 'blocks.6.1', 'blocks.6.1.conv_pw', 'blocks.6.1.conv_dw', 'blocks.6.1.conv_pwl', - 'global_pool', 'global_pool.flatten', 'global_pool.pool'] - -def get_bibtex(model_identifier): - return """@InProceedings{pmlr-v97-tan19a, - title = {{E}fficient{N}et: Rethinking Model Scaling for Convolutional Neural Networks}, - author = {Tan, Mingxing and Le, Quoc}, - booktitle = {Proceedings of the 36th International Conference on Machine Learning}, - pages = {6105--6114}, - year = {2019}, - editor = {Chaudhuri, Kamalika and Salakhutdinov, Ruslan}, - volume = {97}, - series = {Proceedings of Machine Learning Research}, - month = {09--15 Jun}, - publisher = {PMLR}, - pdf = {http://proceedings.mlr.press/v97/tan19a/tan19a.pdf}, - url = {https://proceedings.mlr.press/v97/tan19a.html}, - abstract = {Convolutional Neural Networks (ConvNets) are commonly developed at a fixed resource budget, and then scaled up for better accuracy if more resources are given. In this paper, we systematically study model scaling and identify that carefully balancing network depth, width, and resolution can lead to better performance. Based on this observation, we propose a new scaling method that uniformly scales all dimensions of depth/width/resolution using a simple yet highly effective compound coefficient. We demonstrate the effectiveness of this method on MobileNets and ResNet. To go even further, we use neural architecture search to design a new baseline network and scale it up to obtain a family of models, called EfficientNets, which achieve much better accuracy and efficiency than previous ConvNets. In particular, our EfficientNet-B7 achieves stateof-the-art 84.4% top-1 / 97.1% top-5 accuracy on ImageNet, while being 8.4x smaller and 6.1x faster on inference than the best existing ConvNet (Huang et al., 2018). Our EfficientNets also transfer well and achieve state-of-the-art accuracy on CIFAR-100 (91.7%), Flower (98.8%), and 3 other transfer learning datasets, with an order of magnitude fewer parameters.} - }""" - - -if __name__ == '__main__': - check_models.check_base_models(__name__) +import functools + +import torch +from brainscore_vision.model_helpers.activations import PytorchWrapper +from brainscore_vision.model_helpers.activations.pytorch import load_preprocess_images +from brainscore_vision.model_helpers.s3 import load_weight_file +from PIL import Image +import numpy as np +import timm +from timm.data import resolve_data_config +from timm.data.transforms_factory import create_transform +import torch.nn as nn +from albumentations import ( + Compose, Normalize, Resize,CenterCrop + ) +from albumentations.pytorch import ToTensorV2 +# This is an example implementation for submitting alexnet as a pytorch model +# If you use pytorch, don't forget to add it to the setup.py + +# Attention: It is important, that the wrapper identifier is unique per model! +# The results will otherwise be the same due to brain-scores internal result caching mechanism. +# Please load your pytorch model for usage in CPU. There won't be GPUs available for scoring your model. +# If the model requires a GPU, contact the brain-score team directly. +from brainscore_vision.model_helpers.check_submission import check_models + +import os + +image_resize = 324 +image_crop = 288 +norm_mean = [0.485, 0.456, 0.406] +norm_std = [0.229, 0.224, 0.225] +freeze_layers = ['blocks.0.0', 'blocks.0.1', 'blocks.1.0', + 'blocks.1.1', 'blocks.1.2', 'blocks.2.0', + 'blocks.2.1', 'blocks.2.2', 'blocks.3.0', 'blocks.3.1', 'blocks.3.2'] + +def custom_image_preprocess(images, **kwargs): + + transforms_val = Compose([ + Resize(image_resize, image_resize), + CenterCrop(image_crop, image_crop), + Normalize(mean=norm_mean,std=norm_std,), + ToTensorV2()]) + + images = [np.array(pillow_image) for pillow_image in images] + images = [transforms_val(image=image)["image"] for image in images] + images = np.stack(images) + + return images + +def load_preprocess_images_custom(image_filepaths, preprocess_images=custom_image_preprocess, **kwargs): + images = [load_image(image_filepath) for image_filepath in image_filepaths] + images = preprocess_images(images, **kwargs) + return images + +def load_image(image_filepath): + with Image.open(image_filepath) as pil_image: + if 'L' not in pil_image.mode.upper() and 'A' not in pil_image.mode.upper()\ + and 'P' not in pil_image.mode.upper(): # not binary and not alpha and not palletized + # work around to https://github.com/python-pillow/Pillow/issues/1144, + # see https://stackoverflow.com/a/30376272/2225200 + return pil_image.copy() + else: # make sure potential binary images are in RGB + rgb_image = Image.new("RGB", pil_image.size) + rgb_image.paste(pil_image) + return rgb_image + +class EffNetBX(nn.Module): + def __init__(self,): + super().__init__ () + self.efnet_model = timm.create_model('tf_efficientnet_b1_ns', pretrained=True) + + def forward(self, x): + x = self.efnet_model(x) + return x + +def get_model(name): + assert name == 'effnetb1_cutmixpatch_SAM_robust32_avge6e8e9e10_manylayers_324x288' + model_tf_efficientnet_b1_ns= EffNetBX() + + weights_path = load_weight_file(bucket="brainscore-storage", folder_name="brainscore-vision/models", + relative_path="effnetb1_cutmixpatch_SAM_robust32_avge6e8e9e10_manylayers_324x288/tf_efficientnet_b1_ns_robust_cutmixpatchresize_SAM_e6e8e9e10.pth", + version_id="null", + sha1="9d60e49043b2d5354447c46cd011764cc6cf094e") + model_tf_efficientnet_b1_ns.load_state_dict(torch.load(weights_path, map_location=torch.device('cpu'))["model"]) + model = model_tf_efficientnet_b1_ns.efnet_model + filter_elems = set(["se", "act", "bn", "conv"]) + layer_list = [layer for layer, _ in model.named_modules() if not any(i in layer for i in filter_elems)] + print(layer_list) + print(len(layer_list)) + + for n, m in model.named_modules(): + if isinstance(m, nn.BatchNorm2d) and any(x in n for x in ["conv_stem" ] + freeze_layers) or n =="bn1": + print(f"Freeze {n, m}") + m.eval() + + + preprocessing = functools.partial(load_preprocess_images_custom, + preprocess_images=custom_image_preprocess, + ) + + + wrapper = PytorchWrapper(identifier='my-model', model=model, preprocessing=preprocessing, batch_size=8) + + wrapper.image_size = image_crop + return wrapper + + +def get_layers(name): + assert name == 'effnetb1_cutmixpatch_SAM_robust32_avge6e8e9e10_manylayers_324x288' + return ['blocks', 'blocks.0', 'blocks.0.0', 'blocks.0.1', + 'blocks.1', 'blocks.1.0', 'blocks.1.1', 'blocks.1.2', + 'blocks.2', 'blocks.2.0', 'blocks.2.1', 'blocks.2.2', + 'blocks.3', 'blocks.3.0', 'blocks.3.1', 'blocks.3.2', 'blocks.3.3', + 'blocks.4', 'blocks.4.0', + 'blocks.4.0.conv_pw', 'blocks.4.0.conv_dw', 'blocks.4.0.conv_pwl', 'blocks.4.1', 'blocks.4.1.conv_pw', 'blocks.4.1.conv_dw', 'blocks.4.1.conv_pwl', 'blocks.4.2', + 'blocks.4.2.conv_pw', 'blocks.4.2.conv_dw', 'blocks.4.2.conv_pwl', 'blocks.4.3', 'blocks.4.3.conv_pw', 'blocks.4.3.conv_dw', 'blocks.4.3.conv_pwl', 'blocks.5', + 'blocks.5.0', 'blocks.5.0.conv_pw', 'blocks.5.0.conv_dw', 'blocks.5.0.conv_pwl', 'blocks.5.1', 'blocks.5.1.conv_pw', 'blocks.5.1.conv_dw', 'blocks.5.1.conv_pwl', + 'blocks.5.2', 'blocks.5.2.conv_pw', 'blocks.5.2.conv_dw', 'blocks.5.2.conv_pwl', 'blocks.5.3', 'blocks.5.3.conv_pw', 'blocks.5.3.conv_dw', 'blocks.5.3.conv_pwl', + 'blocks.5.4', 'blocks.5.4.conv_pw', 'blocks.5.4.conv_dw', 'blocks.5.4.conv_pwl', 'blocks.6', 'blocks.6.0', 'blocks.6.0.conv_pw', 'blocks.6.0.conv_dw', + 'blocks.6.0.conv_pwl', 'blocks.6.1', 'blocks.6.1.conv_pw', 'blocks.6.1.conv_dw', 'blocks.6.1.conv_pwl', + 'global_pool', 'global_pool.flatten', 'global_pool.pool'] + +def get_bibtex(model_identifier): + return """@InProceedings{pmlr-v97-tan19a, + title = {{E}fficient{N}et: Rethinking Model Scaling for Convolutional Neural Networks}, + author = {Tan, Mingxing and Le, Quoc}, + booktitle = {Proceedings of the 36th International Conference on Machine Learning}, + pages = {6105--6114}, + year = {2019}, + editor = {Chaudhuri, Kamalika and Salakhutdinov, Ruslan}, + volume = {97}, + series = {Proceedings of Machine Learning Research}, + month = {09--15 Jun}, + publisher = {PMLR}, + pdf = {http://proceedings.mlr.press/v97/tan19a/tan19a.pdf}, + url = {https://proceedings.mlr.press/v97/tan19a.html}, + abstract = {Convolutional Neural Networks (ConvNets) are commonly developed at a fixed resource budget, and then scaled up for better accuracy if more resources are given. In this paper, we systematically study model scaling and identify that carefully balancing network depth, width, and resolution can lead to better performance. Based on this observation, we propose a new scaling method that uniformly scales all dimensions of depth/width/resolution using a simple yet highly effective compound coefficient. We demonstrate the effectiveness of this method on MobileNets and ResNet. To go even further, we use neural architecture search to design a new baseline network and scale it up to obtain a family of models, called EfficientNets, which achieve much better accuracy and efficiency than previous ConvNets. In particular, our EfficientNet-B7 achieves stateof-the-art 84.4% top-1 / 97.1% top-5 accuracy on ImageNet, while being 8.4x smaller and 6.1x faster on inference than the best existing ConvNet (Huang et al., 2018). Our EfficientNets also transfer well and achieve state-of-the-art accuracy on CIFAR-100 (91.7%), Flower (98.8%), and 3 other transfer learning datasets, with an order of magnitude fewer parameters.} + }""" + + +if __name__ == '__main__': + check_models.check_base_models(__name__) diff --git a/brainscore_vision/models/effnetb1_cutmixpatch_augmix_robust32_avge4e7_manylayers_324x288/model.py b/brainscore_vision/models/effnetb1_cutmixpatch_augmix_robust32_avge4e7_manylayers_324x288/model.py index e7603dc14..aff46e87e 100644 --- a/brainscore_vision/models/effnetb1_cutmixpatch_augmix_robust32_avge4e7_manylayers_324x288/model.py +++ b/brainscore_vision/models/effnetb1_cutmixpatch_augmix_robust32_avge4e7_manylayers_324x288/model.py @@ -70,9 +70,9 @@ def forward(self, x): def get_model(name): assert name == 'effnetb1_cutmixpatch_augmix_robust32_avge4e7_manylayers_324x288' model_tf_efficientnet_b1_ns = EffNetBX() - weights_path = load_weight_file(bucket="brainscore-vision", folder_name="models", + weights_path = load_weight_file(bucket="brainscore-storage", folder_name="brainscore-vision/models", relative_path="effnetb1_cutmixpatch_augmix_robust32_avge4e7_manylayers_324x288/tf_efficientnet_b1_ns_robust_cutmixpatchresize_augmix_e4toe7.pth", - version_id="iB0UqbguDpYHD0HRbMt1F1er3c414yWr", + version_id="null", sha1="37f3ac1b14e80cfaa99fa5f412c1e132480ed5b6") model_tf_efficientnet_b1_ns.load_state_dict(torch.load(weights_path,map_location=torch.device('cpu'))["model"]) diff --git a/brainscore_vision/models/hmax/model.py b/brainscore_vision/models/hmax/model.py index 028c914e2..ed157500a 100644 --- a/brainscore_vision/models/hmax/model.py +++ b/brainscore_vision/models/hmax/model.py @@ -18,9 +18,9 @@ def get_model(name): def get_hmax(identifier, image_size): - weights_path = load_weight_file(bucket="brainscore-vision", folder_name="models", + weights_path = load_weight_file(bucket="brainscore-storage", folder_name="brainscore-vision/models", relative_path="hmax/universal_patch_set.mat", - version_id="fIX.lsvnc8qqjDr_sG_Dl9RyqWuG0OGC", + version_id="null", sha1="acc7316fcb0d1797486bb62753b71e158216a92a") global model model = HMAX(str(weights_path)) diff --git a/brainscore_vision/models/resnet_50_robust/model.py b/brainscore_vision/models/resnet_50_robust/model.py index 83b17d049..576c3da01 100644 --- a/brainscore_vision/models/resnet_50_robust/model.py +++ b/brainscore_vision/models/resnet_50_robust/model.py @@ -16,9 +16,9 @@ def get_model(name): model_ctr = getattr(module, 'resnet50') model = model_ctr() preprocessing = functools.partial(load_preprocess_images, image_size=224) - weights_path = load_weight_file(bucket="brainscore-vision", folder_name="models", + weights_path = load_weight_file(bucket="brainscore-storage", folder_name="brainscore-vision/models", relative_path="resnet-50-robust/ImageNet.pt", - version_id=".shHB0L_L9L3Mtco0Kf4EBP3Xj9nLKnC", + version_id="null", sha1="cc6e4441abc8ad6d2f4da5db84836e544bfb53fd") checkpoint = torch.load(weights_path, map_location=torch.device('cpu')) diff --git a/brainscore_vision/models/voneresnet_50_non_stochastic/model.py b/brainscore_vision/models/voneresnet_50_non_stochastic/model.py index 8052fd887..efddf96e3 100644 --- a/brainscore_vision/models/voneresnet_50_non_stochastic/model.py +++ b/brainscore_vision/models/voneresnet_50_non_stochastic/model.py @@ -24,9 +24,9 @@ def get_model_from_s3(): model_arch = 'resnet50' pretrained = True if pretrained and model_arch: - weights_path = load_weight_file(bucket="brainscore-vision", folder_name="models", + weights_path = load_weight_file(bucket="brainscore-storage", folder_name="brainscore-vision/models", relative_path="voneresnet-50-non_stochastic/voneresnet50_ns_e70.pth.tar", - version_id="vDk2cwi2xjwGqhGyyjp8lEGSfcaFzB61", + version_id="null", sha1="c270528818d6d7fc67a6aec86919d47311ad6221") ckpt_data = torch.load(weights_path, map_location=torch.device('cpu')) stride = ckpt_data['flags']['stride']