Skip to content

Commit

Permalink
Remove deprecated hook code plus some submission fixes (brain-score#195)
Browse files Browse the repository at this point in the history
* Private public datasets division

* Clean modules and get benchmarks running again

* Delete print statements

* Another print

* Move unittest

* Move code from candidate models and write new code, to be able to run model scoring with a plug repo in mechanism

* Add fire to setup.py

* Little changes to make it work

* Make it an indirect dependency to model_tools

* Remove helper and revert import optimization

* FIx some errors in submission script

* FIx little error

* Change submission process a bit

* Include csv files

* Add database insertion and jenkins job id

* Database problems

* Adjust database write

* Change setup and install

* Change pip command

* Add env to pip install call

* Set pip3

* Change configuration file layout

* Change submission to work with smaller config set

* remove pip3

* Adjust call into installed module code

* Add user id aka email

* Adjust benchmark set and rework result file

* Change zip file location

* Add name field to database

* Delete zip_filepath

* Wrong model call

* Fix little issues

* Disable Ko's benchmark

* Fix imagenet benchmark run and add newlines

* Changes for new benchmark PR

* Fix little issues

* Fix model parameter in brain model scoring

* Clean a bit

* Clean score_model module

* PR review comments

* Change database insert argument list

* Change hook mechanism

* Path problem

* Remove print

* Add log msg

* Hasattr has to be a string

* Change assertion. This score object is terrible!

* Fix hook problem
  • Loading branch information
franzigeiger authored Feb 21, 2020
1 parent c9e8c1e commit 44f28e8
Show file tree
Hide file tree
Showing 3 changed files with 13 additions and 18 deletions.
3 changes: 3 additions & 0 deletions brainscore/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,10 +24,13 @@ def get_assembly(name):

@store(identifier_ignore=['model', 'benchmark'])
def score_model(model_identifier, benchmark_identifier, model, benchmark=None):
# model_identifier variable is not unused, the result caching component uses it to identify the cached results
assert model is not None
if benchmark is None:
_logger.debug("retrieving benchmark")
benchmark = benchmark_pool[benchmark_identifier]
_logger.debug("scoring model")
score = benchmark(model)
score.attrs['model_identifier'] = model_identifier
score.attrs['benchmark_identifier'] = benchmark_identifier
return score
7 changes: 4 additions & 3 deletions submission/evaluation.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,12 +72,12 @@ def run_evaluation(config_file, work_dir, db_connection_config, jenkins_id, mode
logger.info(f"Scoring {model} on benchmark {benchmark}")
score = score_model(model, benchmark, ml_brain_pool[model])
logger.info(f'Running benchmark {benchmark} on model {model} produced this score: {score}')
if not hasattr(score, ceiling):
if not hasattr(score, 'ceiling'):
raw = score.sel(aggregation='center').item(0)
ceiled = None
error = None
else:
assert len(score.raw.sel(aggregation='center')) == 1
assert score.raw.sel(aggregation='center') is not None
raw = score.raw.sel(aggregation='center').item(0)
ceiled = score.sel(aggregation='center').item(0)
error = score.sel(aggregation='error').item(0)
Expand Down Expand Up @@ -150,7 +150,8 @@ def clone_repo(config, work_dir):
def install_project(repo, package):
try:
assert 0 == subprocess.call([sys.executable, "-m", "pip", "install", repo], env=os.environ)
sys.path.insert(1, repo)
sys.path.insert(1, str(repo))
logger.info(f'System paths {sys.path}')
return import_module(package)
except ImportError:
return __import__(package)
21 changes: 6 additions & 15 deletions submission/ml_pool.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,8 @@

import itertools

from model_tools.brain_transformation import ModelCommitment

from brainscore.utils import LazyLoad
from submission.utils import UniqueKeyDict

Expand Down Expand Up @@ -32,8 +34,7 @@ def __init__(self, base_model_pool, model_layers):
continue
layers = model_layers[basemodel_identifier]

for identifier, activations_model in Hooks().iterate_hooks(basemodel_identifier, activations_model):
self[identifier] = {'model': activations_model, 'layers': layers}
self[basemodel_identifier] = {'model': activations_model, 'layers': layers}


regions = ['V1', 'V2', 'V4', 'IT']
Expand All @@ -49,16 +50,6 @@ def __init__(self, base_model_pool, model_layers):
continue
layers = model_layers[basemodel_identifier]

for identifier, activations_model in Hooks().iterate_hooks(basemodel_identifier, activations_model):
if identifier in self: # already pre-defined
continue
from model_tools.brain_transformation import ModelCommitment
# enforce early parameter binding: https://stackoverflow.com/a/3431699/2225200
def load(identifier=identifier, activations_model=activations_model, layers=layers):
brain_model = ModelCommitment(identifier=identifier, activations_model=activations_model,
layers=layers)
for region in regions:
brain_model.commit_region(region)
return brain_model

self[identifier] = LazyLoad(load)
self[basemodel_identifier] = LazyLoad(
lambda identifier=basemodel_identifier, activations_model=activations_model, layers=layers:
ModelCommitment(identifier=identifier, activations_model=activations_model, layers=layers))

0 comments on commit 44f28e8

Please sign in to comment.