diff --git a/.coveragerc b/.coveragerc index d5800712..a9a72c98 100644 --- a/.coveragerc +++ b/.coveragerc @@ -1,4 +1,5 @@ [run] omit = */tests/* - micropsi_server/bottle.py \ No newline at end of file + micropsi_server/bottle.py + */conftest.py diff --git a/.gitignore b/.gitignore index e3723b2d..da83dc01 100644 --- a/.gitignore +++ b/.gitignore @@ -25,3 +25,4 @@ /*.sublime-workspace micropsi.log /test-data/ +pip-selfcheck.json diff --git a/CHANGELOG.md b/CHANGELOG.md index 95fb49f9..61248979 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,17 @@ +0.8-alpha6 (2016-04-22) +========== + + * Operations for selections of nodes/nodespaces + * Test infrastructure for nodenets and native modules + * Gradient descent native modules built-in + * Nodenet/native module data structure changed + * Faster sensors for theano + * Configurable worlds + * New timeseries world + * Netapi console if serving for localhost only + + 0.7-alpha5 (2016-02-04) ========== diff --git a/Makefile b/Makefile index 1239b23a..ec457e7e 100644 --- a/Makefile +++ b/Makefile @@ -17,9 +17,17 @@ clean: rm -rf include lib .Python bin tests: - bin/py.test + -bin/py.test + -bin/py.test --agents test-coverage: bin/py.test --cov micropsi_core --cov micropsi_server --cov-report html -.PHONY: run \ No newline at end of file +test-toolkit: + bin/py.test + +test-agents: + bin/py.test --agents + + +.PHONY: run diff --git a/README.md b/README.md index 72cff51e..f43b21c4 100644 --- a/README.md +++ b/README.md @@ -7,7 +7,7 @@ For more information on the cognitive architecture visit [micropsi.com](http://w Prerequisites ----- -* Python3 +* Python3 (tested with 3.4.3 and 3.5.1) * On Windows, we recommend downloading and installing [WinPython 3.4.3.7](http://winpython.github.io/) diff --git a/README_additional_configuration.md b/README_additional_configuration.md index 400df4ec..2f4d9a87 100644 --- a/README_additional_configuration.md +++ b/README_additional_configuration.md @@ -37,7 +37,7 @@ Windows support for advanced features is experimental. * From the mingw Folder, add the `bin` Folder to your PATH environment variable * Install pycrypto for python3.4. Get one of the [pycrypto windows binaries](https://github.com/axper/python3-pycrypto-windows-installer), open the WinPython Control Panel, click "Add Packages", select the downloaded pycrypto installer, and click "Install packages" * now you can install our modified spock via -`pip install -e -e git+https://github.com/micropsi-industries/spock.git#egg=spock-dev` +`pip install -e git+https://github.com/micropsi-industries/spock.git#egg=spock-dev` * this should lead to a working MicroPsi with Theano and minecraft support. * install the optional packages with `pip install cherrypy pytest mock webtest` * run `python start_micropsi_server.py` diff --git a/configuration.py b/configuration.py index 25958ed6..6ff5d240 100644 --- a/configuration.py +++ b/configuration.py @@ -24,7 +24,7 @@ warnings.warn('Can not read config from inifile %s' % filename) raise RuntimeError('Can not read config from inifile %s' % filename) -config['micropsi2']['version'] = "0.7-alpha5" +config['micropsi2']['version'] = "0.8-alpha6" config['micropsi2']['apptitle'] = "MicroPsi" homedir = config['micropsi2']['data_directory'].startswith('~') @@ -43,6 +43,6 @@ config['logging'][level] = 'WARNING' config.add_section('paths') -config['paths']['resource_path'] = os.path.join(os.path.dirname(__file__), data_path) +config['paths']['data_directory'] = os.path.join(os.path.dirname(__file__), data_path) config['paths']['usermanager_path'] = os.path.join(os.path.dirname(__file__), 'resources', 'user-db.json') config['paths']['server_settings_path'] = os.path.join(os.path.dirname(__file__), 'resources', 'server-config.json') diff --git a/conftest.py b/conftest.py index ec3e6998..f321b8a9 100644 --- a/conftest.py +++ b/conftest.py @@ -4,59 +4,63 @@ import pytest import logging +try: + import theano + engine_defaults = "dict_engine,theano_engine" +except: + engine_defaults = "dict_engine" -testpath = os.path.join('.', 'test-data') +testpath = os.path.abspath(os.path.join('.', 'test-data')) try: shutil.rmtree(testpath) except OSError: pass -# override config -from configuration import config -config['paths']['resource_path'] = testpath -config['paths']['server_settings_path'] = os.path.join(config['paths']['resource_path'], 'server_config.json') -config['paths']['usermanager_path'] = os.path.join(config['paths']['resource_path'], 'user-db.json') -config['micropsi2']['single_agent_mode'] = '' -if 'theano' in config: - config['theano']['initial_number_of_nodes'] = '50' - from micropsi_core import runtime as micropsi +from micropsi_core.runtime import cfg +original_ini_data_directory = cfg['paths']['data_directory'] -# create testuser -from micropsi_server.micropsi_app import usermanager +cfg['paths']['data_directory'] = testpath +cfg['paths']['server_settings_path'] = os.path.join(testpath, 'server_cfg.json') +cfg['paths']['usermanager_path'] = os.path.join(testpath, 'user-db.json') +cfg['micropsi2']['single_agent_mode'] = '' +if 'theano' in cfg: + cfg['theano']['initial_number_of_nodes'] = '50' -usermanager.create_user('Pytest User', 'test', 'Administrator', uid='Pytest User') -user_token = usermanager.start_session('Pytest User', 'test', True) - -# reset logging levels -logging.getLogger('system').setLevel(logging.WARNING) -logging.getLogger('world').setLevel(logging.WARNING) world_uid = 'WorldOfPain' nn_uid = 'Testnet' -nodetype_file = os.path.join(config['paths']['resource_path'], 'nodetypes.json') -nodefunc_file = os.path.join(config['paths']['resource_path'], 'nodefunctions.py') -recipes_file = os.path.join(config['paths']['resource_path'], 'recipes.py') +def pytest_addoption(parser): + """register argparse-style options and ini-style config values.""" + parser.addoption("--engine", action="store", default=engine_defaults, + help="The engine that should be used for this testrun.") + parser.addoption("--agents", action="store_true", + help="Only test agents-code from the data_directory") -try: - import theano - engine_defaults = "dict_engine,theano_engine" -except: - engine_defaults = "dict_engine" +def pytest_cmdline_main(config): + """ called for performing the main command line action. The default + implementation will invoke the configure hooks and runtest_mainloop. """ + if config.getoption('agents'): + config.args = [original_ini_data_directory] + micropsi.initialize(persistency_path=testpath, resource_path=original_ini_data_directory) + else: + micropsi.initialize(persistency_path=testpath) + from micropsi_server.micropsi_app import usermanager -def set_logging_levels(): - logging.getLogger('system').setLevel(logging.WARNING) - logging.getLogger('world').setLevel(logging.WARNING) - micropsi.cfg['logging']['level_agent'] = 'WARNING' + usermanager.create_user('Pytest User', 'test', 'Administrator', uid='Pytest User') + usermanager.start_session('Pytest User', 'test', True) + set_logging_levels() -def pytest_addoption(parser): - parser.addoption("--engine", action="store", default=engine_defaults, - help="The engine that should be used for this testrun.") + +def pytest_configure(config): + # register an additional marker + config.addinivalue_line("markers", + "engine(name): mark test to run only on the specified engine") def pytest_generate_tests(metafunc): @@ -70,99 +74,102 @@ def pytest_generate_tests(metafunc): metafunc.parametrize("engine", engines, scope="session") -def pytest_configure(config): - # register an additional marker - config.addinivalue_line("markers", - "engine(name): mark test to run only on the specified engine") - - def pytest_runtest_setup(item): engine_marker = item.get_marker("engine") if engine_marker is not None: engine_marker = engine_marker.args[0] if engine_marker != item.callspec.params['engine']: pytest.skip("test requires engine %s" % engine_marker) + for item in os.listdir(testpath): + if item != 'worlds' and item != 'nodenets': + path = os.path.join(testpath, item) + if os.path.isdir(path): + shutil.rmtree(path) + else: + os.remove(path) + os.mkdir(os.path.join(testpath, 'Test')) + open(os.path.join(testpath, 'Test', '__init__.py'), 'w').close() + micropsi.reload_native_modules() micropsi.logger.clear_logs() + micropsi.set_runner_properties(1, 1) + set_logging_levels() -def pytest_runtest_teardown(item, nextitem): - if nextitem is None: - print("DELETING ALL STUFF") - shutil.rmtree(config['paths']['resource_path']) - else: - uids = list(micropsi.nodenets.keys()) - for uid in uids: - micropsi.delete_nodenet(uid) - if os.path.isfile(nodetype_file): - os.remove(nodetype_file) - if os.path.isfile(nodefunc_file): - os.remove(nodefunc_file) - if os.path.isfile(recipes_file): - os.remove(recipes_file) - micropsi.reload_native_modules() - set_logging_levels() +def pytest_internalerror(excrepr, excinfo): + """ called for internal errors. """ + micropsi.kill_runners() + shutil.rmtree(testpath) -@pytest.fixture(scope="session") -def resourcepath(): - return config['paths']['resource_path'] +def pytest_keyboard_interrupt(excinfo): + """ called for keyboard interrupt. """ + micropsi.kill_runners() + shutil.rmtree(testpath) -@pytest.fixture() -def nodetype_def(): - return nodetype_file +def set_logging_levels(): + """ sets the logging levels of the default loggers back to WARNING """ + logging.getLogger('system').setLevel(logging.WARNING) + logging.getLogger('world').setLevel(logging.WARNING) + micropsi.cfg['logging']['level_agent'] = 'WARNING' -@pytest.fixture -def nodefunc_def(): - return nodefunc_file +@pytest.fixture(scope="session") +def resourcepath(): + """ Fixture: the resource path """ + return micropsi.RESOURCE_PATH -@pytest.fixture -def recipes_def(): - return recipes_file +@pytest.fixture(scope="session") +def runtime(): + """ Fixture: The micropsi runtime """ + return micropsi -@pytest.fixture(scope="function") +@pytest.yield_fixture(scope="function") def test_world(request): + """ + Fixture: A test world of type Island + """ global world_uid - worlds = micropsi.get_available_worlds("Pytest User") - if world_uid not in worlds: - success, world_uid = micropsi.new_world("World of Pain", "Island", "Pytest User", uid=world_uid) - - def fin(): - try: - micropsi.delete_world(world_uid) - except: - pass # world was deleted in test - request.addfinalizer(fin) - return world_uid + success, world_uid = micropsi.new_world("World of Pain", "Island", "Pytest User", uid=world_uid) + yield world_uid + try: + micropsi.delete_world(world_uid) + except: + pass @pytest.fixture(scope="function") +def default_world(request): + """ + Fixture: A test world of type Island + """ + for uid in micropsi.worlds: + if micropsi.worlds[uid].data['world_type'] == 'World': + return uid + + +@pytest.yield_fixture(scope="function") def test_nodenet(request, test_world, engine): + """ + Fixture: A completely empty nodenet without a worldadapter + """ global nn_uid - nodenets = micropsi.get_available_nodenets("Pytest User") or {} - if nn_uid not in nodenets: - success, nn_uid = micropsi.new_nodenet("Testnet", engine=engine, owner="Pytest User", uid='Testnet') - micropsi.save_nodenet(nn_uid) - if nn_uid not in micropsi.nodenets: - micropsi.load_nodenet(nn_uid) - return nn_uid + success, nn_uid = micropsi.new_nodenet("Testnet", engine=engine, owner="Pytest User", uid='Testnet') + micropsi.save_nodenet(nn_uid) + yield nn_uid + try: + micropsi.delete_nodenet(nn_uid) + except: + pass @pytest.fixture(scope="function") def node(request, test_nodenet): + """ + Fixture: A Pipe node with a genloop + """ res, uid = micropsi.add_node(test_nodenet, 'Pipe', [10, 10, 10], name='N1') micropsi.add_link(test_nodenet, uid, 'gen', uid, 'gen') return uid - - -def pytest_internalerror(excrepr, excinfo): - """ called for internal errors. """ - shutil.rmtree(config['paths']['resource_path']) - - -def pytest_keyboard_interrupt(excinfo): - """ called for keyboard interrupt. """ - shutil.rmtree(config['paths']['resource_path']) diff --git a/micropsi_core/_runtime_api_monitors.py b/micropsi_core/_runtime_api_monitors.py index a016534a..2cfc7afa 100644 --- a/micropsi_core/_runtime_api_monitors.py +++ b/micropsi_core/_runtime_api_monitors.py @@ -12,59 +12,59 @@ def add_gate_monitor(nodenet_uid, node_uid, gate, sheaf=None, name=None, color=None): """Adds a continuous monitor to the activation of a gate. The monitor will collect the activation - value in every simulation step. + value in every calculation step. Returns the uid of the new monitor.""" - nodenet = micropsi_core.runtime.nodenets[nodenet_uid] + nodenet = micropsi_core.runtime.get_nodenet(nodenet_uid) return nodenet.add_gate_monitor(node_uid, gate, sheaf=sheaf, name=name, color=color) def add_slot_monitor(nodenet_uid, node_uid, slot, sheaf=None, name=None, color=None): """Adds a continuous monitor to the activation of a slot. The monitor will collect the activation - value in every simulation step. + value in every calculation step. Returns the uid of the new monitor.""" - nodenet = micropsi_core.runtime.nodenets[nodenet_uid] + nodenet = micropsi_core.runtime.get_nodenet(nodenet_uid) return nodenet.add_slot_monitor(node_uid, slot, sheaf=sheaf, name=name, color=color) def add_link_monitor(nodenet_uid, source_node_uid, gate_type, target_node_uid, slot_type, property, name, color=None): """Adds a continuous monitor to a link. You can choose to monitor either weight (default) or certainty - The monitor will collect respective value in every simulation step. + The monitor will collect respective value in every calculation step. Returns the uid of the new monitor.""" - nodenet = micropsi_core.runtime.nodenets[nodenet_uid] + nodenet = micropsi_core.runtime.get_nodenet(nodenet_uid) return nodenet.add_link_monitor(source_node_uid, gate_type, target_node_uid, slot_type, property, name, color=color) def add_modulator_monitor(nodenet_uid, modulator, name, color=None): """Adds a continuous monitor to a global modulator. - The monitor will collect respective value in every simulation step. + The monitor will collect respective value in every calculation step. Returns the uid of the new monitor.""" - nodenet = micropsi_core.runtime.nodenets[nodenet_uid] + nodenet = micropsi_core.runtime.get_nodenet(nodenet_uid) return nodenet.add_modulator_monitor(modulator, name, color=color) def add_custom_monitor(nodenet_uid, function, name, color=None): """Adds a continuous monitor, that evaluates the given python-code and collects the - return-value for every simulation step. + return-value for every calculation step. Returns the uid of the new monitor.""" - nodenet = micropsi_core.runtime.nodenets[nodenet_uid] + nodenet = micropsi_core.runtime.get_nodenet(nodenet_uid) return nodenet.add_custom_monitor(function, name, color=color) def remove_monitor(nodenet_uid, monitor_uid): """Deletes an activation monitor.""" - micropsi_core.runtime.nodenets[nodenet_uid].remove_monitor(monitor_uid) + micropsi_core.runtime.get_nodenet(nodenet_uid).remove_monitor(monitor_uid) return True def clear_monitor(nodenet_uid, monitor_uid): """Leaves the monitor intact, but deletes the current list of stored values.""" - micropsi_core.runtime.nodenets[nodenet_uid].get_monitor(monitor_uid).clear() + micropsi_core.runtime.get_nodenet(nodenet_uid).get_monitor(monitor_uid).clear() return True def export_monitor_data(nodenet_uid, monitor_uid=None, monitor_from=0, monitor_count=-1): """Returns a string with all currently stored monitor data for the given nodenet.""" - nodenet = micropsi_core.runtime.nodenets[nodenet_uid] + nodenet = micropsi_core.runtime.get_nodenet(nodenet_uid) if monitor_from == 0 and monitor_count > 0: monitor_count = min(nodenet.current_step + 1, monitor_count) monitor_from = max(0, nodenet.current_step + 1 - monitor_count) @@ -97,10 +97,11 @@ def export_monitor_data(nodenet_uid, monitor_uid=None, monitor_from=0, monitor_c def get_monitor_data(nodenet_uid, step=0, monitor_from=0, monitor_count=-1): """Returns monitor and nodenet data for drawing monitor plots for the current step, - if the current step is newer than the supplied simulation step.""" + if the current step is newer than the supplied calculation step.""" + nodenet = micropsi_core.runtime.get_nodenet(nodenet_uid) data = { - 'nodenet_running': micropsi_core.runtime.nodenets[nodenet_uid].is_active, - 'current_step': micropsi_core.runtime.nodenets[nodenet_uid].current_step + 'nodenet_running': nodenet.is_active, + 'current_step': nodenet.current_step } if step > data['current_step']: return data diff --git a/micropsi_core/_runtime_api_world.py b/micropsi_core/_runtime_api_world.py index edf01e77..c8a088ed 100644 --- a/micropsi_core/_runtime_api_world.py +++ b/micropsi_core/_runtime_api_world.py @@ -49,16 +49,18 @@ def get_world_properties(world_uid): return data -def get_worldadapters(world_uid): - """Returns the world adapters available in the given world""" - +def get_worldadapters(world_uid, nodenet_uid=None): + """ Returns the world adapters available in the given world. Provide an optional nodenet_uid of an agent + in the given world to obtain datasources and datatargets for the agent's worldadapter""" data = {} if world_uid in micropsi_core.runtime.worlds: - for name, worldadapter in micropsi_core.runtime.worlds[world_uid].supported_worldadapters.items(): - data[name] = { - 'datasources': worldadapter.supported_datasources, - 'datatargets': worldadapter.supported_datatargets - } + world = micropsi_core.runtime.worlds[world_uid] + for name, worldadapter in world.supported_worldadapters.items(): + data[name] = {'description': worldadapter.__doc__} + if nodenet_uid and nodenet_uid in world.agents: + agent = world.agents[nodenet_uid] + data[agent.__class__.__name__]['datasources'] = agent.get_available_datasources() + data[agent.__class__.__name__]['datatargets'] = agent.get_available_datatargets() return data @@ -86,7 +88,7 @@ def set_worldagent_properties(world_uid, uid, position=None, orientation=None, n return micropsi_core.runtime.worlds[world_uid].set_agent_properties(uid, position, orientation, name, parameters) -def new_world(world_name, world_type, owner="", uid=None): +def new_world(world_name, world_type, owner="", uid=None, config={}): """Creates a new world and registers it. Arguments: @@ -101,10 +103,15 @@ def new_world(world_name, world_type, owner="", uid=None): if uid is None: uid = tools.generate_uid() - filename = os.path.join(micropsi_core.runtime.RESOURCE_PATH, micropsi_core.runtime.WORLD_DIRECTORY, uid + ".json") + if world_type.startswith('Minecraft'): + for uid in micropsi_core.runtime.worlds: + if micropsi_core.runtime.worlds[uid].__class__.__name__.startswith('Minecraft'): + raise RuntimeError("Only one instance of a minecraft world is supported right now") + + filename = os.path.join(micropsi_core.runtime.PERSISTENCY_PATH, micropsi_core.runtime.WORLD_DIRECTORY, uid + ".json") micropsi_core.runtime.world_data[uid] = Bunch(uid=uid, name=world_name, world_type=world_type, filename=filename, version=1, - owner=owner) + owner=owner, config=config) with open(filename, 'w+') as fp: fp.write(json.dumps(micropsi_core.runtime.world_data[uid], sort_keys=True, indent=4)) try: @@ -118,9 +125,11 @@ def new_world(world_name, world_type, owner="", uid=None): def delete_world(world_uid): """Removes the world with the given uid from the server (and unloads it from memory if it is running.)""" world = micropsi_core.runtime.worlds[world_uid] - for uid in world.agents: - world.unregister_agent(micropsi_core.runtime.nodenets[uid]) - nodenets[uid].world = None + for uid in list(world.agents.keys()): + world.unregister_nodenet(uid) + micropsi_core.runtime.nodenets[uid].worldadapter_instance = None + micropsi_core.runtime.nodenets[uid].world = None + micropsi_core.runtime.worlds[world_uid].__del__() del micropsi_core.runtime.worlds[world_uid] os.remove(micropsi_core.runtime.world_data[world_uid].filename) del micropsi_core.runtime.world_data[world_uid] @@ -149,16 +158,30 @@ def set_world_properties(world_uid, world_name=None, owner=None): return True +def set_world_data(world_uid, data): + """ Sets some data for the world. Whatever the world supports""" + if world_uid not in micropsi_core.runtime.worlds: + raise KeyError("World not found") + micropsi_core.runtime.worlds[world_uid].set_user_data(data) + return True + + def revert_world(world_uid): """Reverts the world to the last saved state.""" data = micropsi_core.runtime.world_data[world_uid] - micropsi_core.runtime.worlds[world_uid] = get_world_class_from_name(data.world_type)(**data) + if world_uid in micropsi_core.runtime.worlds: + micropsi_core.runtime.worlds[world_uid].__del__() + del micropsi_core.runtime.worlds[world_uid] + if data.get('world_type'): + micropsi_core.runtime.worlds[world_uid] = get_world_class_from_name(data.world_type)(**data) + else: + micropsi_core.runtime.worlds[world_uid] = world.World(**data) return True def save_world(world_uid): """Stores the world state on the server.""" - with open(os.path.join(micropsi_core.runtime.RESOURCE_PATH, micropsi_core.runtime.WORLD_DIRECTORY, + with open(os.path.join(micropsi_core.runtime.PERSISTENCY_PATH, micropsi_core.runtime.WORLD_DIRECTORY, world_uid) + '.json', 'w+') as fp: fp.write(json.dumps(micropsi_core.runtime.worlds[world_uid].data, sort_keys=True, indent=4)) return True @@ -172,14 +195,14 @@ def export_world(world_uid): def import_world(worlddata, owner=None): """Imports a JSON string with world data. May not overwrite an existing world.""" data = json.loads(worlddata) - if not 'uid' in data: + if 'uid' not in data: data['uid'] = tools.generate_uid() else: if data['uid'] in micropsi_core.runtime.worlds: raise RuntimeError("A world with this ID already exists.") if owner is not None: data['owner'] = owner - filename = os.path.join(micropsi_core.runtime.RESOURCE_PATH, micropsi_core.runtime.WORLD_DIRECTORY, data['uid'] + '.json') + filename = os.path.join(micropsi_core.runtime.PERSISTENCY_PATH, micropsi_core.runtime.WORLD_DIRECTORY, data['uid'] + '.json') data['filename'] = filename with open(filename, 'w+') as fp: fp.write(json.dumps(data)) @@ -199,16 +222,7 @@ def get_world_class_from_name(world_type): def get_available_world_types(): - """Returns the names of the available world types""" + """Returns a mapping of the available world type names to their classes""" import importlib from micropsi_core.world.world import World - return [cls.__name__ for cls in tools.itersubclasses(vars()['World'])] - for cls in tools.itersubclasses(World): - if 'minecraft' in cls.__name__.toLower(): - try: - import spock - except: - # requirement not satisfied, ignore - continue - available_worlds.append(cls.__name__) - return available_worlds + return dict((cls.__name__, cls) for cls in tools.itersubclasses(vars()['World'])) diff --git a/micropsi_core/config.py b/micropsi_core/config.py index 0facde86..e5d62559 100644 --- a/micropsi_core/config.py +++ b/micropsi_core/config.py @@ -51,13 +51,14 @@ def __init__(self, config_path="config-data.json", auto_save=True): # check if we already have a configuration manager with this resource file absolute_path = os.path.abspath(config_path) if absolute_path in ConfigurationManager.instances: - raise RuntimeError("A configuration manager with this resource path already exists!") + logging.getLogger("system").warning("A configuration manager with this resource path already exists!") + # raise RuntimeError("A configuration manager with this resource path already exists!") ConfigurationManager.instances.append(absolute_path) self.key = absolute_path # set up persistence - micropsi_core.tools.mkdir(os.path.dirname(config_path)) + os.makedirs(os.path.dirname(config_path), exist_ok=True) self.config_file_name = config_path self.auto_save = auto_save diff --git a/micropsi_core/nodenet/dict_engine/dict_node.py b/micropsi_core/nodenet/dict_engine/dict_node.py index 790757b3..e5ea9934 100644 --- a/micropsi_core/nodenet/dict_engine/dict_node.py +++ b/micropsi_core/nodenet/dict_engine/dict_node.py @@ -522,7 +522,7 @@ class DictSlot(Slot): type: a string that determines the type of the slot node: the parent node of the slot activation: a numerical value which is the sum of all incoming activations - current_step: the simulation step when the slot last received activation + current_step: the calculation step when the slot last received activation incoming: a dictionary of incoming links together with the respective activation received by them """ diff --git a/micropsi_core/nodenet/dict_engine/dict_nodenet.py b/micropsi_core/nodenet/dict_engine/dict_nodenet.py index 91f93de7..15e185b5 100644 --- a/micropsi_core/nodenet/dict_engine/dict_nodenet.py +++ b/micropsi_core/nodenet/dict_engine/dict_nodenet.py @@ -145,7 +145,7 @@ class DictNodenet(Nodenet): world: an environment for the node net worldadapter: an actual world adapter object residing in a world implementation, provides interface owner: an id of the user who created the node net - step: the current simulation step of the node net + step: the current calculation step of the node net """ @property @@ -156,7 +156,7 @@ def engine(self): def current_step(self): return self._step - def __init__(self, name="", worldadapter="Default", world=None, owner="", uid=None, native_modules={}): + def __init__(self, name="", worldadapter="Default", world=None, owner="", uid=None, native_modules={}, use_modulators=True, worldadapter_instance=None): """Create a new MicroPsi agent. Arguments: @@ -166,16 +166,15 @@ def __init__(self, name="", worldadapter="Default", world=None, owner="", uid=No uid (optional): unique handle of the agent; if none is given, it will be generated """ - super(DictNodenet, self).__init__(name, worldadapter, world, owner, uid) + super(DictNodenet, self).__init__(name, worldadapter, world, owner, uid, use_modulators=use_modulators, worldadapter_instance=worldadapter_instance) - self.stepoperators = [DictPropagate(), DictCalculate(), DoernerianEmotionalModulators()] + self.stepoperators = [DictPropagate(), DictCalculate()] + if self.use_modulators: + self.stepoperators.append(DoernerianEmotionalModulators()) self.stepoperators.sort(key=lambda op: op.priority) self._version = NODENET_VERSION # used to check compatibility of the node net data self._step = 0 - self._modulators = { - 'por_ret_decay': 0. - } self._nodes = {} self._nodespaces = {} @@ -195,7 +194,7 @@ def __init__(self, name="", worldadapter="Default", world=None, owner="", uid=No def get_data(self, **params): data = super().get_data(**params) data['nodes'] = self.construct_nodes_dict(**params) - data['nodespaces'] = self.construct_nodespaces_dict("Root") + data['nodespaces'] = self.construct_nodespaces_dict("Root", transitive=True) data['version'] = self._version data['modulators'] = self.construct_modulators_dict() return data @@ -205,6 +204,48 @@ def export_json(self): data['links'] = self.construct_links_list() return data + def get_nodes(self, nodespace_uids=[], include_links=True): + """ + Returns a dict with contents for the given nodespaces + """ + data = {} + data['nodes'] = {} + data['nodespaces'] = {} + followupnodes = [] + fetch_all = False + + if nodespace_uids == []: + nodespace_uids = self.get_nodespace_uids() + root = self.get_nodespace(None) + data['nodespaces'][root.uid] = root.get_data() + fetch_all = True + else: + nodespace_uids = [self.get_nodespace(uid).uid for uid in nodespace_uids] + + for nodespace_uid in nodespace_uids: + data['nodespaces'].update(self.construct_nodespaces_dict(nodespace_uid)) + nodespace = self.get_nodespace(nodespace_uid) + for uid in nodespace.get_known_ids(entitytype="nodes"): + node = self.get_node(uid) + data['nodes'][uid] = node.get_data(include_links=include_links) + if include_links and not fetch_all: + followupnodes.extend(node.get_associated_node_uids()) + + if include_links: + for uid in set(followupnodes): + if uid not in data['nodes']: + node = self.get_node(uid).get_data(include_links=True) + for gate in list(node['links'].keys()): + links = node['links'][gate] + for idx, l in enumerate(links): + if self._nodes[l['target_node_uid']].parent_nodespace not in nodespace_uids: + del links[idx] + if len(node['links'][gate]) == 0: + del node['links'][gate] + data['nodes'][uid] = node + + return data + def save(self, filename): # dict_engine saves everything to json, just dump the json export with open(filename, 'w+') as fp: @@ -270,11 +311,13 @@ def initialize_nodenet(self, initfrom): computation of the node net """ - self._modulators = initfrom.get("modulators", {}) + self._modulators.update(initfrom.get("modulators", {})) if initfrom.get('runner_condition'): self.set_runner_condition(initfrom['runner_condition']) + self._nodespace_ui_properties = initfrom.get('nodespace_ui_properties', {}) + # set up nodespaces; make sure that parent nodespaces exist before children are initialized self._nodespaces = {} self._nodespaces["Root"] = DictNodespace(self, None, [0, 0, 0], name="Root", uid="Root") @@ -301,23 +344,28 @@ def construct_nodes_dict(self, max_nodes=-1, **params): break return data - def construct_nodespaces_dict(self, nodespace_uid): + def construct_nodespaces_dict(self, nodespace_uid, transitive=False): data = {} if nodespace_uid is None: nodespace_uid = "Root" - for nodespace_candidate_uid in self.get_nodespace_uids(): - is_in_hierarchy = False - if nodespace_candidate_uid == nodespace_uid: - is_in_hierarchy = True - else: - parent_uid = self.get_nodespace(nodespace_candidate_uid).parent_nodespace - while parent_uid is not None and parent_uid != nodespace_uid: - parent_uid = self.get_nodespace(parent_uid).parent_nodespace - if parent_uid == nodespace_uid: - is_in_hierarchy = True - if is_in_hierarchy: - data[nodespace_candidate_uid] = self.get_nodespace(nodespace_candidate_uid).get_data() + if transitive: + for nodespace_candidate_uid in self.get_nodespace_uids(): + is_in_hierarchy = False + if nodespace_candidate_uid == nodespace_uid: + is_in_hierarchy = True + else: + parent_uid = self.get_nodespace(nodespace_candidate_uid).parent_nodespace + while parent_uid is not None and parent_uid != nodespace_uid: + parent_uid = self.get_nodespace(parent_uid).parent_nodespace + if parent_uid == nodespace_uid: + is_in_hierarchy = True + + if is_in_hierarchy: + data[nodespace_candidate_uid] = self.get_nodespace(nodespace_candidate_uid).get_data() + else: + for uid in self.get_nodespace(nodespace_uid).get_known_ids('nodespaces'): + data[uid] = self.get_nodespace(uid).get_data() return data def get_nodetype(self, type): @@ -351,12 +399,15 @@ def get_nodespace_data(self, nodespace_uid, include_links): data['nodes'][uid] = self.get_node(uid).get_data(include_links=include_links) return data - def get_activation_data(self, nodespace_uid=None, rounded=1): + def get_activation_data(self, nodespace_uids=None, rounded=1): activations = {} - if nodespace_uid is None: + + node_ids = [] + if nodespace_uids == []: node_ids = self._nodes.keys() else: - node_ids = self.get_nodespace(nodespace_uid).get_known_ids("nodes") + for nsuid in nodespace_uids: + node_ids.extend(self.get_nodespace(nsuid).get_known_ids("nodes")) for uid in node_ids: node = self.get_node(uid) @@ -388,8 +439,9 @@ def delete_node(self, node_uid): del self._nodes[node_uid] self._track_deletion('nodes', node_uid) - def delete_nodespace(self, uid): - self.delete_node(uid) + def delete_nodespace(self, nodespace_uid): + self._nodespace_ui_properties.pop(nodespace_uid, None) + self.delete_node(nodespace_uid) def clear(self): super(DictNodenet, self).clear() @@ -470,10 +522,7 @@ def merge_data(self, nodenet_data, keep_uids=False): self._monitors[mon.uid] = mon def step(self): - """perform a simulation step""" - if self.worldadapter_instance: - self.worldadapter_instance.snapshot() - + """perform a calculation step""" with self.netlock: self._step += 1 @@ -623,30 +672,12 @@ def delete_link(self, source_node_uid, gate_type, target_node_uid, slot_type): source_node.unlink(gate_type, target_node_uid, slot_type) return True - def get_modulator(self, modulator): - """ - Returns the numeric value of the given global modulator - """ - return self._modulators.get(modulator, 1) - - def change_modulator(self, modulator, diff): - """ - Changes the value of the given global modulator by the value of diff - """ - self._modulators[modulator] = self._modulators.get(modulator, 0) + diff - def construct_modulators_dict(self): """ Returns a new dict containing all modulators """ return self._modulators.copy() - def set_modulator(self, modulator, value): - """ - Changes the value of the given global modulator to the given value - """ - self._modulators[modulator] = value - def get_standard_nodetype_definitions(self): """ Returns the standard node types supported by this nodenet @@ -807,33 +838,45 @@ def get_available_gatefunctions(self): from micropsi_core.nodenet import gatefunctions return sorted([name for name, func in getmembers(gatefunctions, isfunction)]) - def has_nodespace_changes(self, nodespace_uid, since_step): - return self.get_nodespace(nodespace_uid).contents_last_changed >= since_step + def has_nodespace_changes(self, nodespace_uids=[], since_step=0): + if nodespace_uids == []: + nodespace_uids = self.get_nodespace_uids() - def get_nodespace_changes(self, nodespace_uid, since_step): - ns = self.get_nodespace(nodespace_uid) + for nodespace_uid in nodespace_uids: + if self.get_nodespace(nodespace_uid).contents_last_changed >= since_step: + return True + return False + + def get_nodespace_changes(self, nodespace_uids=[], since_step=0): result = { 'nodes_dirty': {}, 'nodespaces_dirty': {}, 'nodes_deleted': [], 'nodespaces_deleted': [] } + + if nodespace_uids == []: + nodespace_uids = self.get_nodespace_uids() + else: + nodespace_uids = [self.get_nodespace(uid).uid for uid in nodespace_uids] + for i in range(since_step, self.current_step + 1): if i in self.deleted_items: result['nodespaces_deleted'].extend(self.deleted_items[i].get('nodespaces_deleted', [])) result['nodes_deleted'].extend(self.deleted_items[i].get('nodes_deleted', [])) - for uid in ns.get_known_ids(): - if uid not in result['nodes_deleted'] and self.is_node(uid): - if self.get_node(uid).last_changed >= since_step: - result['nodes_dirty'][uid] = self.get_node(uid).get_data(include_links=True) - for assoc in self.get_node(uid).get_associated_node_uids(): - if self.get_node(assoc).parent_nodespace != ns.uid and assoc not in result['nodes_dirty']: - result['nodes_dirty'][assoc] = self.get_node(assoc).get_data(include_links=True) - - elif uid not in result['nodespaces_deleted'] and self.is_nodespace(uid): - if self.get_nodespace(uid).last_changed >= since_step: - result['nodespaces_dirty'][uid] = self.get_nodespace(uid).get_data() + for nsuid in nodespace_uids: + for uid in self.get_nodespace(nsuid).get_known_ids(): + if uid not in result['nodes_deleted'] and self.is_node(uid): + if self.get_node(uid).last_changed >= since_step: + result['nodes_dirty'][uid] = self.get_node(uid).get_data(include_links=True) + for assoc in self.get_node(uid).get_associated_node_uids(): + if self.get_node(assoc).parent_nodespace not in nodespace_uids and assoc not in result['nodes_dirty']: + result['nodes_dirty'][assoc] = self.get_node(assoc).get_data(include_links=True) + + elif uid not in result['nodespaces_deleted'] and self.is_nodespace(uid): + if self.get_nodespace(uid).last_changed >= since_step: + result['nodespaces_dirty'][uid] = self.get_nodespace(uid).get_data() return result def get_dashboard(self): diff --git a/micropsi_core/nodenet/dict_engine/dict_nodespace.py b/micropsi_core/nodenet/dict_engine/dict_nodespace.py index f7f8a265..547f1e7f 100644 --- a/micropsi_core/nodenet/dict_engine/dict_nodespace.py +++ b/micropsi_core/nodenet/dict_engine/dict_nodespace.py @@ -33,15 +33,6 @@ def __init__(self, nodenet, parent_nodespace, position, name="", uid=None, index self.contents_last_changed = nodenet.current_step nodenet._register_nodespace(self) - def get_data(self): - return { - "uid": self.uid, - "index": self.index, - "name": self.name, - "position": self.position, - "parent_nodespace": self.parent_nodespace, - } - def get_known_ids(self, entitytype=None): if entitytype: if entitytype not in self.__netentities: diff --git a/micropsi_core/nodenet/native_modules.py b/micropsi_core/nodenet/native_modules.py new file mode 100644 index 00000000..b8cc049a --- /dev/null +++ b/micropsi_core/nodenet/native_modules.py @@ -0,0 +1,535 @@ +""" +Builtin native modules + +Currently contains + * GradientDescent for 3 layers (input, hidden, outpu) + * GradientDescent for LSTMS +""" + +import os + +nodetypes = {} + +try: + import numpy as np + import theano + numpy_installed = True +except ImportError: + numpy_installed = False + + +if numpy_installed: + # only register these native modules if we + # have theano and numpy installed. + nodetypes["GradientDescent"] = { + "name": "GradientDescent", + "engine": "theano_engine", + "slottypes": ["gen"], + "gatetypes": ["gen"], + "nodefunction_name": "gradient_descent", + "symbol": "☲", + "category": "nn_learning", + "path": os.path.abspath(__file__), + "parameters": [ + "ae_type", + "adadelta_rho", + "adadelta_eps", + "check_grad", + "weight_decay", + "tied_weights", + "sparsity_value", + "sparsity_penalty", + "t", + "ctr", + "input_prefix", + "hidden_prefix", + "output_prefix", + "input_nodespace" + ], + "parameter_values": { + "ae_type": ["sparse", "denoising"], + "tied_weights": ["True", "False"], + "check_grad": ["yes", "no"] + }, + "parameter_defaults": { + "ae_type": "denoising", + "tied_weights": "True", + "hidden_prefix": "hidden_1", + "output_prefix": "output_1" + } + } + + +def gradient_descent(netapi, node=None, **params): + """ + Online gradient descent with backpropagation for three layers (input, hidden, + and output layer) and AdaDelta for adapting the learning rate per parameter. + + References: + [1] Werbos, PJ. "Beyond Regression: New Tools for Prediction and Analysis + in the Behavioral Sciences." (1974). + [2] Zeiler, MD. "ADADELTA: An adaptive learning rate method." (2012). + [3] Vincent, P. "Extracting and Composing Robust Features with Denoising + Autoencoders." (2008). + """ + + # To be able to switch this native module on and off, require positive + # activation on the gen slot for its code to be run. + if node.get_slot('gen').activation > 0: + + import theano + import theano.tensor as T + + # get shared name prefix of nodes in input, hidden, and output layers + input_ = node.get_parameter('input_prefix') + hidden = node.get_parameter('hidden_prefix') + output = node.get_parameter('output_prefix') + + # get the name of the nodespace where the input lives + ns_input_name = node.get_parameter('input_nodespace') + + # get nodespace uids of nodes in input, hidden, and output layers + # assumption: if the input layer consists of sensor nodes, they have their + # own nodespace, all other nodes are in this node's nodespace + ns_input_uid = None + for ns in netapi.get_nodespaces(): + if ns.name == ns_input_name: + ns_input_uid = ns.uid + break + ns_hidden_uid = node.parent_nodespace + ns_output_uid = node.parent_nodespace + + # initialization + if not hasattr(node, 'initialized'): + + node.set_state('cumulative_error', 0) + + sparse = str(node.get_parameter('ae_type')) == "sparse" + # denoising = str(node.get_parameter('ae_type')) == "denoising" + tied_weights = str(node.get_parameter('tied_weights')) == "True" + + # group nodes + netapi.group_nodes_by_names(ns_input_uid, node_name_prefix=input_) + netapi.group_nodes_by_names(ns_hidden_uid, node_name_prefix=hidden) + netapi.group_nodes_by_names(ns_output_uid, node_name_prefix=output) + + # get activation values + a_i_array = netapi.get_activations(ns_input_uid, input_) + a_h_array = netapi.get_activations(ns_hidden_uid, hidden) + a_o_array = netapi.get_activations(ns_output_uid, output) + + node.set_parameter('error', 0.0) # store error values to observe how training develops + + len_input = len(a_i_array) + len_hidden = len(a_h_array) + len_output = len(a_o_array) + + if len_input == 0: + netapi.logger.warn("Node net has no input nodes whose names start with '%s'", input_) + node.set_parameter('ctr', 0) + return + elif len_hidden == 0: + netapi.logger.warn("Node net has no hidden nodes whose names start with '%s'.", hidden) + node.set_parameter('ctr', 0) + return + elif len_output == 0: + netapi.logger.warn("Node net has no output names whose names start with '%s'.", output) + node.set_parameter('ctr', 0) + return + else: + netapi.logger.info("Initializing theano-based autoencoder backprop with layout: %i -> %i -> %i", + len_input, len_hidden, len_output) + + # get parameter values from node net + b_h_array = netapi.get_thetas(ns_hidden_uid, hidden) + b_o_array = netapi.get_thetas(ns_output_uid, output) + w_hi_array = netapi.get_link_weights(ns_input_uid, input_, ns_hidden_uid, hidden) + w_oh_array = netapi.get_link_weights(ns_hidden_uid, hidden, ns_output_uid, output) + + # declare shared variables ( shared b/w theano and node nets ) + a_i = node.a_i = theano.shared(value=a_i_array.astype(T.config.floatX), name="a_i", borrow=False) + a_h = node.a_h = theano.shared(value=a_h_array.astype(T.config.floatX), name="a_h", borrow=False) + a_o = node.a_o = theano.shared(value=a_o_array.astype(T.config.floatX), name="a_o", borrow=False) + b_h = node.b_h = theano.shared(value=b_h_array.astype(T.config.floatX), name="b_h", borrow=False) + b_o = node.b_o = theano.shared(value=b_o_array.astype(T.config.floatX), name="b_o", borrow=False) + w_hi = node.w_hi = theano.shared(value=w_hi_array.astype(T.config.floatX), name="w_hi", borrow=False) + w_oh = node.w_oh = theano.shared(value=w_oh_array.astype(T.config.floatX), name="w_oh", borrow=False) + + # write initial parameter values to shared variables + node.b_h.set_value(b_h_array, borrow=True) + node.b_o.set_value(b_o_array, borrow=True) + node.w_hi.set_value(w_hi_array, borrow=True) + node.w_oh.set_value(w_oh_array, borrow=True) + + # initialize accumulation variables for AdaDelta, ie. mean square gradients and mean square deltas + ms_grad_b_o = node.ms_grad_b_o = theano.shared(value=np.zeros_like(b_o_array), name="ms_grad_b_o", borrow=True) + ms_grad_w_oh = node.ms_grad_w_oh = theano.shared(value=np.zeros_like(w_oh_array), name="ms_grad_w_oh", borrow=True) + ms_grad_b_h = node.ms_grad_b_h = theano.shared(value=np.zeros_like(b_h_array), name="ms_grad_b_h", borrow=True) + ms_grad_w_hi = node.ms_grad_w_hi = theano.shared(value=np.zeros_like(w_hi_array), name="ms_grad_w_hi", borrow=True) + + ms_delta_b_o = node.ms_delta_b_o = theano.shared(value=np.zeros_like(b_o_array), name="ms_delta_b_o", borrow=True) + ms_delta_w_oh = node.ms_delta_w_oh = theano.shared(value=np.zeros_like(w_oh_array), name="ms_delta_w_oh", borrow=True) + ms_delta_b_h = node.ms_delta_b_h = theano.shared(value=np.zeros_like(b_h_array), name="ms_delta_b_h", borrow=True) + ms_delta_w_hi = node.ms_delta_w_hi = theano.shared(value=np.zeros_like(w_hi_array), name="ms_delta_w_hi", borrow=True) + + # make function parameters theano compatible + weight_decay = T.scalar("weight_decay", dtype=T.config.floatX) + sparsity_value = T.scalar("sparsity_value", dtype=T.config.floatX) + sparsity_penalty = T.scalar("sparsity_penalty", dtype=T.config.floatX) + ada_rho = T.scalar("ada_rho", dtype=T.config.floatX) + ada_eps = T.scalar("ada_eps", dtype=T.config.floatX) + + # declare the reconstruction error + error_term = T.sum(T.square(a_o - a_i)) / 2. # squared error + # error_term = -T.sum(a_i * T.log(a_o) + (1. - a_i) * T.log(1. - a_o)) # cross-entropy + + # use a weight constraint as a regularizer + weight_constraint = (weight_decay / 2.) * (T.sum(T.square(w_hi)) + T.sum(T.square(w_oh))) + + if sparse: # training criterion for a sparse autoencoder + + # save the average activation of hidden units; initialize to first activation received + avg_a_h = node.avg_a_h = theano.shared(value=a_h_array, name="avg_a_h", borrow=False) + new_avg_a_h = 0.95 * avg_a_h + (1 - 0.95) * a_h # for gradient checking, set new_avg_a_h = a_h + + rho = sparsity_value + information_gain = rho * T.log(rho / new_avg_a_h) + (1. - rho) * T.log((1. - rho) / (1. - new_avg_a_h)) + + sparsity_constraint = sparsity_penalty * T.sum(information_gain) + cost = error_term + weight_constraint + sparsity_constraint + + else: # training criterion for a denoising autoencoder + + cost = error_term + weight_constraint + + node.cost = theano.function([weight_decay, sparsity_value, sparsity_penalty], cost, on_unused_input='ignore') + node.error = theano.function([], error_term / len(b_h_array)) + + # compute gradients + sigmoid_deriv_a_o = a_o * (1. - a_o) + grad_o = (a_o - a_i) * sigmoid_deriv_a_o # squared error # T.grad(cost, z_o) + # grad_o = ((a_i - a_o) / (a_o - a_o**2)) * sigmoid_deriv_a_o # cross-entropy + + sigmoid_deriv_a_h = a_h * (1. - a_h) + + if sparse: + + grad_w_oh = T.dot(T.reshape(grad_o, (len_input, 1)), T.reshape(a_h, (1, len_hidden))) + weight_decay * w_oh + grad_sparsity = (- rho / new_avg_a_h + (1. - rho) / (1. - new_avg_a_h)).T + grad_h = (T.dot(w_oh.T, grad_o) + sparsity_penalty * grad_sparsity) * sigmoid_deriv_a_h + grad_w_hi = T.dot(T.reshape(grad_h, (len_hidden, 1)), T.reshape(a_i, (1, len_input))) + weight_decay * w_hi + + else: # denoising + + grad_w_oh = T.dot(T.reshape(grad_o, (len_input, 1)), T.reshape(a_h, (1, len_hidden))) + weight_decay * w_oh + grad_h = T.dot(w_oh.T, grad_o) * sigmoid_deriv_a_h + grad_w_hi = T.dot(T.reshape(grad_h, (len_hidden, 1)), T.reshape(a_i, (1, len_input))) + weight_decay * w_hi + + if tied_weights: + grad_w_oh = grad_w_oh + grad_w_hi.T + gradients = [grad_o, grad_w_oh, grad_h] + ms_grad = [ms_grad_b_o, ms_grad_w_oh, ms_grad_b_h] + ms_delta = [ms_delta_b_o, ms_delta_w_oh, ms_delta_b_h] + else: + gradients = [grad_o, grad_w_oh, grad_h, grad_w_hi] + ms_grad = [ms_grad_b_o, ms_grad_w_oh, ms_grad_b_h, ms_grad_w_hi] + ms_delta = [ms_delta_b_o, ms_delta_w_oh, ms_delta_b_h, ms_delta_w_hi] + + # update accumulation variables for AdaDelta and compute new deltas + # compute an exponentially decaying average of squared gradients + # ie. recent gradients are more important and the quantity doesn't continue to grow + # thereby allowing the learning rate to grow or shrink as time progresses ( rather than just shrink as in AdaGrad ) + new_ms_grad = [ada_rho * ms_g + (1 - ada_rho) * (g**2) for ms_g, g in zip(ms_grad, gradients)] + # Note: the square root of the mean squared gradients plus epsilon is effectively the RMS of the gradients + # epsilon is added ~"to start off the first iteration and to ensure progress when previous updates become small" + deltas = [(T.sqrt(ms_d + ada_eps) / T.sqrt(ms_g + ada_eps)) * g for ms_d, ms_g, g in zip(ms_delta, new_ms_grad, gradients)] + # compute an exponentially decaying average of squared deltas -- this is to ensure correct units + new_ms_delta = [ada_rho * ms_d + (1 - ada_rho) * (d**2) for ms_d, d in zip(ms_delta, deltas)] + + # update parameters, ie. old_value - learning_rate * delta_value + if tied_weights: + new_b_o, new_w_oh, new_b_h = (old - update for old, update in zip([b_o, w_oh, b_h], deltas)) + new_w_hi = new_w_oh.T + new_ms_grad.append(new_ms_grad[1].T) + new_ms_delta.append(new_ms_delta[1].T) + gradients.append(gradients[1].T) + else: + new_b_o, new_w_oh, new_b_h, new_w_hi = (old - update for old, update in zip([b_o, w_oh, b_h, w_hi], deltas)) + + if sparse: + + update_function = theano.function([weight_decay, sparsity_value, sparsity_penalty, ada_rho, ada_eps], + None, + updates=[(b_o, new_b_o), + (w_oh, new_w_oh), + (b_h, new_b_h), + (w_hi, new_w_hi), + (avg_a_h, new_avg_a_h), + (ms_grad_b_o, new_ms_grad[0]), + (ms_grad_w_oh, new_ms_grad[1]), + (ms_grad_b_h, new_ms_grad[2]), + (ms_grad_w_hi, new_ms_grad[3]), + (ms_delta_b_o, new_ms_delta[0]), + (ms_delta_w_oh, new_ms_delta[1]), + (ms_delta_b_h, new_ms_delta[2]), + (ms_delta_w_hi, new_ms_delta[3])], + on_unused_input='ignore') + + else: # denoising + + update_function = theano.function([weight_decay, sparsity_value, sparsity_penalty, ada_rho, ada_eps], + None, + updates=[(b_o, new_b_o), + (w_oh, new_w_oh), + (b_h, new_b_h), + (w_hi, new_w_hi), + (ms_grad_b_o, new_ms_grad[0]), + (ms_grad_w_oh, new_ms_grad[1]), + (ms_grad_b_h, new_ms_grad[2]), + (ms_grad_w_hi, new_ms_grad[3]), + (ms_delta_b_o, new_ms_delta[0]), + (ms_delta_w_oh, new_ms_delta[1]), + (ms_delta_b_h, new_ms_delta[2]), + (ms_delta_w_hi, new_ms_delta[3])], + on_unused_input='ignore') + + node.get_updated_parameters = update_function + + # for gradient checking use the following function: + node.get_gradients = theano.function([weight_decay, sparsity_value, sparsity_penalty, ada_rho, ada_eps], + [gradients[0], gradients[1], gradients[2], gradients[3]], on_unused_input='ignore') + + node.initialized = True + + # get input activations from node net + a_i_array = netapi.get_activations(ns_input_uid, input_) + + # learn only if activation on the input layer has been persistent for as many steps as your neural net has layers + # Note: since we're currently using denoising autoencoders, this means persistent up to Bernoulli noise + try: + # check if activation has changed since the last step ( by testing if there's any different activation value ) + bool_idx = node.prev_a_i != a_i_array + input_changed = np.any(bool_idx) + + # if deviating activations were 0 ( i.e most likely the effect of Bernoulli noising ), assume no change + is_zero = node.prev_a_i[bool_idx] == 0 + # if is_zero contains elements but not all input activations and their values are all zero, assume no change + if len(is_zero) and len(is_zero) < len(a_i_array) and np.all(is_zero): + input_changed = False + except: + input_changed = True + + node.prev_a_i = a_i_array + + if input_changed: + node.set_parameter('ctr', 1) + else: + node.set_parameter('ctr', int(node.get_parameter('ctr')) + 1) + + # until counter equals number of layers, ie. the same activation has reached all layers, don't compute + if node.get_parameter('ctr') < 3: + return + + # get other activations from node net + a_h_array = netapi.get_activations(ns_hidden_uid, hidden) + a_o_array = netapi.get_activations(ns_output_uid, output) + + # define learning parameters + param = node.get_parameter('weight_decay') + if param is None: + weight_decay = netapi.floatX(4e-06) # 0.0001 . 1e-07 assuming batches of size 1000 . 4e-06 assuming batches of size 256 + node.set_parameter('weight_decay', str(weight_decay)) # store as regular float to appease the serializer + else: + weight_decay = netapi.floatX(param) + + param = node.get_parameter('sparsity_value') + if param is None: + sparsity_value = netapi.floatX(0.05) + node.set_parameter('sparsity_value', str(sparsity_value)) + else: + sparsity_value = netapi.floatX(param) + + param = node.get_parameter('sparsity_penalty') + if param is None: + sparsity_penalty = netapi.floatX(0.001) # 3.0 . 0.003 assuming batches of size 1000 . 0.01 assuming batches of size 256 + node.set_parameter('sparsity_penalty', str(sparsity_penalty)) + else: + sparsity_penalty = netapi.floatX(param) + + param = node.get_parameter('adadelta_rho') + if param is None: + ada_rho = netapi.floatX(0.95) + node.set_parameter('adadelta_rho', str(ada_rho)) + else: + ada_rho = netapi.floatX(param) + + param = node.get_parameter('adadelta_eps') + if param is None: + ada_eps = netapi.floatX(1e-6) + node.set_parameter('adadelta_eps', str(ada_eps)) + else: + ada_eps = netapi.floatX(param) + + param = node.get_parameter('ae_type') + if param is None: + ae_type = 'sparse' # options: 'sparse', 'denoising' + node.set_parameter('ae_type', 'sparse') + else: + ae_type = str(param) + + param = node.get_parameter('t') + if param is None: + t = 0 + node.set_parameter('t', t) + else: + t = int(param) + + # gradient checking + # Note: use double precision when running gradient checks + if node.get_parameter('check_grad') == 'yes': + + # get values of biases and weights from node net + b_h_array = netapi.get_thetas(ns_hidden_uid, hidden) + b_o_array = netapi.get_thetas(ns_output_uid, output) + w_hi_array = netapi.get_link_weights(ns_input_uid, input_, ns_hidden_uid, hidden) + w_oh_array = netapi.get_link_weights(ns_hidden_uid, hidden, ns_output_uid, output) + + # compute the analytical gradient + anal_grad = compute_analytic_gradient( + netapi, node, a_i_array, a_h_array, a_o_array, b_h_array, b_o_array, w_hi_array, w_oh_array, + weight_decay, sparsity_value, sparsity_penalty, ada_rho, ada_eps) + + # compute the numerical gradient + num_grad = compute_numeric_gradient( + netapi, node, a_i_array, a_h_array, a_o_array, b_h_array, b_o_array, w_hi_array, w_oh_array, + weight_decay, sparsity_value, sparsity_penalty) + + # compare them + diff = np.linalg.norm(num_grad - anal_grad) / np.linalg.norm(num_grad + anal_grad) + print("Gradient difference: %e" % diff) # %.10f" % diff + print("The norm of the difference between numerical and analytical gradient should be < 1e-9\n") + + # write values to shared variables + node.a_i.set_value(a_i_array, borrow=True) + node.a_h.set_value(a_h_array, borrow=True) + node.a_o.set_value(a_o_array, borrow=True) + + # update values in shared variables ( using backpropgation of the gradients ) + node.get_updated_parameters(weight_decay, sparsity_value, sparsity_penalty, ada_rho, ada_eps) + + # write new parameter values to node net + netapi.set_thetas(ns_output_uid, output, node.b_o.get_value(borrow=True)) + netapi.set_link_weights(ns_hidden_uid, hidden, ns_output_uid, output, node.w_oh.get_value(borrow=True)) + netapi.set_thetas(ns_hidden_uid, hidden, node.b_h.get_value(borrow=True)) + netapi.set_link_weights(ns_input_uid, input_, ns_hidden_uid, hidden, node.w_hi.get_value(borrow=True)) + + error = float(node.error()) + # save current error as node parameter + node.set_parameter('error', error) + node.set_state('cumulative_error', node.get_state('cumulative_error') + error) + + t = int(node.get_parameter('t')) + if t % 1000 == 0: + netapi.logger.debug("Number of backprop steps computed %d" % t) + netapi.logger.debug("Average Error %.6f (Latest: 0=%.6f)" % ((node.get_state('cumulative_error') / 1000), error)) + node.set_state('cumulative_error', 0.0) + + # reset counter after successful backprop step; cf. must wait for new sensory activation to reach output layer + node.set_parameter('ctr', 0) + node.set_parameter('t', t + 1) + + +def sigmoid(z): + """ The sigmoid ( activation ) function. """ + return 1. / (1. + np.exp(-z)) + + +def compute_analytic_gradient(netapi, node, a_i, a_h, a_o, b_h, b_o, w_hi, w_oh, weight_decay, + sparsity_value, sparsity_penalty, ada_rho, ada_eps): + + # make sure borrow is False here because otherwise the buffers are overwritten and + # compute_numerical_gradient(..) still needs these same input values for proper comparison + node.a_i.set_value(a_i, borrow=False) + node.a_h.set_value(a_h, borrow=False) + node.a_o.set_value(a_o, borrow=False) + node.b_h.set_value(b_h, borrow=False) + node.b_o.set_value(b_o, borrow=False) + node.w_hi.set_value(w_hi, borrow=False) + node.w_oh.set_value(w_oh, borrow=False) + + delta_o, delta_w_oh, delta_h, delta_w_hi = \ + node.get_gradients(weight_decay, sparsity_value, sparsity_penalty, ada_rho, ada_eps) + + gradient = np.concatenate((delta_o, np.ravel(delta_w_oh), delta_h, np.ravel(delta_w_hi))) + + return gradient + + +def compute_numeric_gradient(netapi, node, a_i, a_h, a_o, b_h, b_o, w_hi, w_oh, weight_decay, sparsity_value, sparsity_penalty): + """ Compute numerical gradient for validating backprop implementation above. """ + + from copy import deepcopy + + # helper variables + epsilon = netapi.floatX(1e-4) + ni = len(b_o) + nh = len(b_h) + nih = ni * nh + + theta = np.concatenate((b_o, np.ravel(w_oh), b_h, np.ravel(w_hi))) + + n = theta.shape[0] + I = np.eye(n, dtype=netapi.floatX) + gradient = np.zeros(theta.shape, dtype=netapi.floatX) + + for i in range(n): + + eps_vec = np.array(I[:, i] * epsilon, dtype=netapi.floatX) + eps_plus = theta + eps_vec + eps_minus = theta - eps_vec + + # split theta into parts, recompute activations, update shared variables, compute cost + b_o_plus = eps_plus[: ni] + w_oh_plus = eps_plus[ni: ni + nih].reshape((ni, nh)) + b_h_plus = eps_plus[ni + nih: ni + nih + nh] + w_hi_plus = eps_plus[ni + nih + nh:].reshape((nh, ni)) + a_i_plus = deepcopy(a_i) + a_h_plus = np.ravel(sigmoid(w_hi_plus.dot(a_i_plus) + b_h_plus)) + a_o_plus = np.ravel(sigmoid(w_oh_plus.dot(a_h_plus) + b_o_plus)) + + node.a_i.set_value(a_i_plus, borrow=True) + node.a_h.set_value(a_h_plus, borrow=True) + node.a_o.set_value(a_o_plus, borrow=True) + node.b_h.set_value(b_h_plus, borrow=True) + node.b_o.set_value(b_o_plus, borrow=True) + node.w_hi.set_value(w_hi_plus, borrow=True) + node.w_oh.set_value(w_oh_plus, borrow=True) + + cost = node.cost(weight_decay, sparsity_value, sparsity_penalty) + + # split theta into parts, recompute activations, update shared variables, compute cost + b_o_minus = eps_minus[: ni] + w_oh_minus = eps_minus[ni: ni + nih].reshape((ni, nh)) + b_h_minus = eps_minus[ni + nih: ni + nih + nh] + w_hi_minus = eps_minus[ni + nih + nh:].reshape((nh, ni)) + a_i_minus = deepcopy(a_i) + a_h_minus = np.ravel(sigmoid(w_hi_minus.dot(a_i_minus) + b_h_minus)) + a_o_minus = np.ravel(sigmoid(w_oh_minus.dot(a_h_minus) + b_o_minus)) + + node.a_i.set_value(a_i_minus, borrow=True) + node.a_h.set_value(a_h_minus, borrow=True) + node.a_o.set_value(a_o_minus, borrow=True) + node.b_h.set_value(b_h_minus, borrow=True) + node.b_o.set_value(b_o_minus, borrow=True) + node.w_hi.set_value(w_hi_minus, borrow=True) + node.w_oh.set_value(w_oh_minus, borrow=True) + + cost_ = node.cost(weight_decay, sparsity_value, sparsity_penalty) + + # compute cost difference + gradient[i] = (cost - cost_) / (2. * epsilon) + + if i % 1000 == 0: + print("Computed numeric gradient for %d parameters" % i) + + return gradient diff --git a/micropsi_core/nodenet/netapi.py b/micropsi_core/nodenet/netapi.py index 3eec25c7..a35c6e52 100644 --- a/micropsi_core/nodenet/netapi.py +++ b/micropsi_core/nodenet/netapi.py @@ -52,7 +52,7 @@ def get_node(self, uid): """ return self.__nodenet.get_node(uid) - def get_nodes(self, nodespace=None, node_name_prefix=None, nodetype=None): + def get_nodes(self, nodespace=None, node_name_prefix=None, nodetype=None, sortby='id'): """ Returns a list of nodes in the given nodespace (all Nodespaces if None) whose names start with the given prefix (all if None) @@ -71,6 +71,12 @@ def get_nodes(self, nodespace=None, node_name_prefix=None, nodetype=None): if node_name_prefix is not None and not node.name.startswith(node_name_prefix): continue nodes.append(node) + + if sortby == 'ids': + nodes = sorted(nodes, key=lambda node: node.uid) + elif sortby == 'names': + nodes = sorted(nodes, key=lambda node: node.name) + return nodes def get_nodes_in_gate_field(self, node, gate=None, no_links_to=None, nodespace=None): @@ -218,11 +224,27 @@ def unlink(self, source_node, source_gate=None, target_node=None, target_slot=No target_node_uid = target_node.uid if target_node is not None else None source_node.unlink(source_gate, target_node_uid, target_slot) + def unlink_gate(self, node, gate_name, target_node_uid=None, target_slot_name=None): + """ + Deletes all links from the given gate, optionally filtered by target_node_uid or target_slot_name + """ + node.unlink(gate_name, target_node_uid=target_node_uid, slot_name=target_slot_name) + + def unlink_slot(self, node, slot_name, source_node_uid=None, source_gate_name=None): + """ + Deletes all links to the given slot, optionally filtered by source_node_uid or source_gate_name + """ + for l in node.get_slot(slot_name).get_links(): + if source_node_uid is None or l.source_node.uid == source_node_uid: + if source_gate_name is None or l.source_gate.type == source_gate_name: + l.source_node.unlink(l.source_gate.type, target_node_uid=node.uid, slot_name=slot_name) + def unlink_direction(self, node, gateslot=None): """ - Deletes all links from a node ending at the given gate or originating at the given slot + Deletes all links from a node ending at the given slot or originating at the given gate Read this as 'delete all por linkage from this node' """ + self.logger.warn("unlink direction is deprecated. use unlink_gate and unlink_slot") node.unlink(gateslot) links_to_delete = set() @@ -232,15 +254,13 @@ def unlink_direction(self, node, gateslot=None): links_to_delete.add(link) for link in links_to_delete: - link.source_node.unlink(gateslot, node.uid) + link.source_node.unlink(target_node_uid=node.uid, slot_name=gateslot) def link_actor(self, node, datatarget, weight=1, certainty=1, gate='sub', slot='sur'): """ Links a node to an actor. If no actor exists in the node's nodespace for the given datatarget, a new actor will be created, otherwise the first actor found will be used """ - if datatarget not in self.worldadapter.get_available_datatargets(): - raise KeyError("Data target %s not found" % datatarget) actor = None for uid, candidate in self.__nodenet.get_actors(node.parent_nodespace).items(): if candidate.get_parameter('datatarget') == datatarget: @@ -252,13 +272,11 @@ def link_actor(self, node, datatarget, weight=1, certainty=1, gate='sub', slot=' self.link(node, gate, actor, 'gen', weight, certainty) # self.link(actor, 'gen', node, slot) - def link_sensor(self, node, datasource, slot='sur'): + def link_sensor(self, node, datasource, slot='sur', weight=1): """ Links a node to a sensor. If no sensor exists in the node's nodespace for the given datasource, a new sensor will be created, otherwise the first sensor found will be used """ - if datasource not in self.worldadapter.get_available_datasources(): - raise KeyError("Data source %s not found" % datasource) sensor = None for uid, candidate in self.__nodenet.get_sensors(node.parent_nodespace).items(): if candidate.get_parameter('datasource') == datasource: @@ -267,7 +285,7 @@ def link_sensor(self, node, datasource, slot='sur'): sensor = self.create_node("Sensor", node.parent_nodespace, datasource) sensor.set_parameter('datasource', datasource) - self.link(sensor, 'gen', node, slot) + self.link(sensor, 'gen', node, slot, weight) def import_actors(self, nodespace, datatarget_prefix=None): """ @@ -378,6 +396,12 @@ def autoalign_nodespace(self, nodespace): if nodespace in self.__nodenet.get_nodespace_uids(): align(self.__nodenet, nodespace) + def autoalign_entities(self, nodespace, entity_uids): + """ Calls the autoalignment on the given entities in the given nodespace """ + from micropsi_core.nodenet.node_alignment import align + if nodespace in self.__nodenet.get_nodespace_uids(): + align(self.__nodenet, nodespace, entity_uids) + def get_modulator(self, modulator): """ Returns the numeric value of the given global modulator @@ -497,31 +521,31 @@ def get_node_ids(self, nodespace_uid, group): def add_gate_monitor(self, node_uid, gate, sheaf=None, name=None, color=None): """Adds a continuous monitor to the activation of a gate. The monitor will collect the activation - value in every simulation step. + value in every calculation step. Returns the uid of the new monitor.""" return self.__nodenet.add_gate_monitor(node_uid, gate, sheaf=sheaf, name=name, color=color) def add_slot_monitor(self, node_uid, slot, sheaf=None, name=None, color=None): """Adds a continuous monitor to the activation of a slot. The monitor will collect the activation - value in every simulation step. + value in every calculation step. Returns the uid of the new monitor.""" return self.__nodenet.add_slot_monitor(node_uid, slot, sheaf=sheaf, name=name, color=color) def add_link_monitor(self, source_node_uid, gate_type, target_node_uid, slot_type, property=None, name=None, color=None): """Adds a continuous monitor to a link. You can choose to monitor either weight (default) or certainty - The monitor will collect respective value in every simulation step. + The monitor will collect respective value in every calculation step. Returns the uid of the new monitor.""" return self.__nodenet.add_link_monitor(source_node_uid, gate_type, target_node_uid, slot_type, property=property, name=name, color=color) def add_modulator_monitor(self, modulator, name, color=None): """Adds a continuous monitor to a global modulator. - The monitor will collect respective value in every simulation step. + The monitor will collect respective value in every calculation step. Returns the uid of the new monitor.""" return self.__nodenet.add_modulator_monitor(modulator, name, color=color) def add_custom_monitor(self, function, name, color=None): """Adds a continuous monitor, that evaluates the given python-code and collects the - return-value for every simulation step. + return-value for every calculation step. Returns the uid of the new monitor.""" return self.__nodenet.add_custom_monitor(function, name, color=color) @@ -539,15 +563,23 @@ def set_dashboard_value(self, name, value): def decay_por_links(self, nodespace_uid): """ Decayes all por-links in the given nodespace """ - decay_factor = self.__nodenet.get_modulator('base_porret_decay_factor') + porretdecay = self.__nodenet.get_modulator('base_porret_decay_factor') nodes = self.get_nodes(nodespace=nodespace_uid, nodetype="Pipe") - pordecay = (1 - self.__nodenet.get_modulator('por_ret_decay')) - if decay_factor and pordecay is not None and pordecay > 0: + decay_factor = (1 - porretdecay) + if porretdecay != 0: for node in nodes: porgate = node.get_gate('por') for link in porgate.get_links(): if link.weight > 0: - link._set_weight(max(link.weight * pordecay, 0)) + link._set_weight(max(link.weight * decay_factor, 0)) + + def get_nodespace_properties(self, nodespace_uid=None): + """ retrieve the ui properties for the given nodespace""" + return self.__nodenet.get_nodespace_properties(nodespace_uid) + + def set_nodespace_properties(self, nodespace_uid, properties): + """ sets the ui properties for the given nodespace""" + self.__nodenet.set_nodespace_properties(nodespace_uid, properties) def announce_nodes(self, nodespace_uid, numer_of_nodes, average_element_per_node): pass diff --git a/micropsi_core/nodenet/node.py b/micropsi_core/nodenet/node.py index 40f4a841..793706d9 100644 --- a/micropsi_core/nodenet/node.py +++ b/micropsi_core/nodenet/node.py @@ -482,6 +482,9 @@ def open_sheaf(self, input_activation, sheaf="default"): """ pass # pragma: no cover + def __repr__(self): + return "" % (self.type, self.node) + class Slot(metaclass=ABCMeta): """ @@ -547,6 +550,9 @@ def get_links(self): """ pass # pragma: no cover + def __repr__(self): + return "" % (self.type, self.node) + class Nodetype(object): """Every node has a type, which is defined by its slot types, gate types, its node function and a list of @@ -593,22 +599,27 @@ def nodefunction_name(self): @nodefunction_name.setter def nodefunction_name(self, nodefunction_name): + import os + from importlib.machinery import SourceFileLoader self._nodefunction_name = nodefunction_name try: - from micropsi_core.nodenet import nodefunctions - if hasattr(nodefunctions, nodefunction_name): - self.nodefunction = getattr(nodefunctions, nodefunction_name) + if self.path: + module = SourceFileLoader("nodefunctions", self.path).load_module() + self.nodefunction = getattr(module, nodefunction_name) else: - import nodefunctions as custom_nodefunctions - self.nodefunction = getattr(custom_nodefunctions, nodefunction_name) + from micropsi_core.nodenet import nodefunctions + if hasattr(nodefunctions, nodefunction_name): + self.nodefunction = getattr(nodefunctions, nodefunction_name) + else: + self.logger.warning("Can not find definition of nodefunction %s" % nodefunction_name) except (ImportError, AttributeError) as err: - self.logger.warn("Import error while importing node function: nodefunctions.%s %s" % (nodefunction_name, err)) + self.logger.warning("Import error while importing node function: nodefunctions.%s %s" % (nodefunction_name, err)) raise err def __init__(self, name, nodenet, slottypes=None, gatetypes=None, parameters=None, nodefunction_definition=None, nodefunction_name=None, parameter_values=None, gate_defaults=None, - symbol=None, shape=None, engine=None, parameter_defaults=None): + symbol=None, shape=None, engine=None, parameter_defaults=None, path='', category=''): """Initializes or creates a nodetype. Arguments: @@ -627,6 +638,9 @@ def __init__(self, name, nodenet, slottypes=None, gatetypes=None, parameters=Non self.slottypes = slottypes or {} self.gatetypes = gatetypes or {} + self.path = path + self.category = category + self.logger = nodenet.logger self.gate_defaults = {} diff --git a/micropsi_core/nodenet/node_alignment.py b/micropsi_core/nodenet/node_alignment.py index c93739ad..91d155a1 100644 --- a/micropsi_core/nodenet/node_alignment.py +++ b/micropsi_core/nodenet/node_alignment.py @@ -18,11 +18,12 @@ PREFERRED_WIDTH = 8.0 -def align(nodenet, nodespace): +def align(nodenet, nodespace, entity_uids=False): """aligns the entities in the given nodenet. Arguments: nodenet: current node net nodespace: the nodespace in which the entities are to be aligned + entity_uids: optional list of entity uids that should be aligned. If set, other entities remain untouched Returns: True on success, False otherwise """ @@ -32,18 +33,30 @@ def align(nodenet, nodespace): key=lambda i: nodenet.get_nodespace(i).index) unaligned_nodes = sorted(nodenet.get_nodespace(nodespace).get_known_ids('nodes'), key=lambda i: nodenet.get_node(i).index) - sensors = [s for s in unaligned_nodes if nodenet.get_node(s).type == "Sensor"] - actors = [a for a in unaligned_nodes if nodenet.get_node(a).type == "Actor"] - activators = [a for a in unaligned_nodes if nodenet.get_node(a).type == "Activator"] - unaligned_nodes = [n for n in unaligned_nodes if not nodenet.get_node(n).type in ("Sensor", "Actor", "Activator")] + + if entity_uids: + unaligned_nodespaces = [id for id in unaligned_nodespaces if id in entity_uids] + unaligned_nodes = [id for id in unaligned_nodes if id in entity_uids] + sensors = [] + actors = [] + activators = [] + ymin = min(nodenet.get_node(n).position[1] for n in unaligned_nodes + unaligned_nodespaces) + xmin = min(nodenet.get_node(n).position[0] for n in unaligned_nodes + unaligned_nodespaces) + start_position = (xmin, ymin, 0) + + else: + sensors = [s for s in unaligned_nodes if nodenet.get_node(s).type == "Sensor"] + actors = [a for a in unaligned_nodes if nodenet.get_node(a).type == "Actor"] + activators = [a for a in unaligned_nodes if nodenet.get_node(a).type == "Activator"] + unaligned_nodes = [n for n in unaligned_nodes if not nodenet.get_node(n).type in ("Sensor", "Actor", "Activator")] + + start_position = (BORDER + GRID / 2, BORDER + (0.5 + math.ceil(len(unaligned_nodespaces) / PREFERRED_WIDTH)) * GRID, 0) # position nodespaces for i, id in enumerate(unaligned_nodespaces): nodenet.get_nodespace(id).position = calculate_grid_position(i) - start_position = (BORDER + GRID / 2, BORDER + (0.5 + math.ceil(len(unaligned_nodespaces) / PREFERRED_WIDTH)) * GRID, 0) - # simplify linkage group = unify_links(nodenet, unaligned_nodes) # connect all nodes that have por- and ret-links @@ -62,6 +75,7 @@ def align(nodenet, nodespace): return True + INVERSE_DIRECTIONS = {"s": "n", "w": "e", "nw": "se", "ne": "sw", "n": "s", "e": "w", "se": "nw", "sw": "ne", "o": "O", "O": "o", "b": "a", "a": "b"} @@ -258,6 +272,7 @@ def _fix_link_inheritance(group, excluded_nodes): The function adds the links as .directions to the group and its sub-groups, and carries a set of excluded_nodes to remember which links should not be inherited upwards""" + from copy import deepcopy if hasattr(group, "uid"): excluded_nodes.add(group) else: @@ -274,7 +289,7 @@ def _fix_link_inheritance(group, excluded_nodes): # now delete all links to excluded nodes dirs_copy = group.directions.copy() for d in dirs_copy: - for node in dirs_copy[d]: + for node in deepcopy(dirs_copy[d]): if node in excluded_nodes: group.directions[d].remove(node) if not group.directions[d]: diff --git a/micropsi_core/nodenet/nodefunctions.py b/micropsi_core/nodenet/nodefunctions.py index 66a0b6ed..2417fab4 100644 --- a/micropsi_core/nodenet/nodefunctions.py +++ b/micropsi_core/nodenet/nodefunctions.py @@ -19,8 +19,8 @@ def register(netapi, node=None, **params): def sensor(netapi, node=None, datasource=None, **params): - if datasource in netapi.worldadapter.get_available_datasources(): - datasource_value = netapi.worldadapter.get_datasource(datasource) + if netapi.worldadapter and datasource in netapi.worldadapter.get_available_datasources(): + datasource_value = netapi.worldadapter.get_datasource_value(datasource) else: datasource_value = netapi.get_modulator(datasource) node.activation = datasource_value @@ -28,12 +28,10 @@ def sensor(netapi, node=None, datasource=None, **params): def actor(netapi, node=None, datatarget=None, **params): - if not netapi.worldadapter: - return activation_to_set = node.get_slot("gen").activation - if datatarget in netapi.worldadapter.get_available_datatargets(): + if netapi.worldadapter and datatarget in netapi.worldadapter.get_available_datatargets(): netapi.worldadapter.add_to_datatarget(datatarget, activation_to_set) - feedback = netapi.worldadapter.get_datatarget_feedback(datatarget) + feedback = netapi.worldadapter.get_datatarget_feedback_value(datatarget) else: netapi.set_modulator(datatarget, activation_to_set) feedback = 1 @@ -201,7 +199,7 @@ def pipe(netapi, node=None, sheaf="default", **params): if exp == 0: exp += node.get_slot("sur").get_activation("default") # no activation in our sheaf, maybe from sensors? if exp > 1: exp = 1 - if node.get_slot('sub').get_activation(sheaf) > 0: + if node.get_slot('sub').get_activation(sheaf) > 0 and node.nodenet.use_modulators: if sur > 0: netapi.change_modulator('base_number_of_expected_events', 1) elif sur < 0: diff --git a/micropsi_core/nodenet/nodenet.py b/micropsi_core/nodenet/nodenet.py index bee0e4a2..dcfbbf53 100644 --- a/micropsi_core/nodenet/nodenet.py +++ b/micropsi_core/nodenet/nodenet.py @@ -65,7 +65,9 @@ def metadata(self): 'world': self._world_uid, 'worldadapter': self._worldadapter_uid, 'version': NODENET_VERSION, - 'runner_condition': self._runner_condition + 'runner_condition': self._runner_condition, + 'use_modulators': self.use_modulators, + 'nodespace_ui_properties': self._nodespace_ui_properties } return data @@ -135,7 +137,7 @@ def worldadapter_instance(self, _worldadapter_instance): """ self._worldadapter_instance = _worldadapter_instance - def __init__(self, name="", worldadapter="Default", world=None, owner="", uid=None): + def __init__(self, name="", worldadapter="Default", world=None, owner="", uid=None, use_modulators=True, worldadapter_instance=None): """ Constructor for the abstract base class, must be called by implementations """ @@ -143,8 +145,9 @@ def __init__(self, name="", worldadapter="Default", world=None, owner="", uid=No self._name = name self._world_uid = world self._worldadapter_uid = worldadapter if world else None - self._worldadapter_instance = None + self._worldadapter_instance = worldadapter_instance self.is_active = False + self.use_modulators = use_modulators self._version = NODENET_VERSION # used to check compatibility of the node net data self._uid = uid @@ -152,6 +155,7 @@ def __init__(self, name="", worldadapter="Default", world=None, owner="", uid=No self.owner = owner self._monitors = {} + self._nodespace_ui_properties = {} self.netlock = Lock() @@ -166,6 +170,12 @@ def __init__(self, name="", worldadapter="Default", world=None, owner="", uid=No self.stepping_rate = [] self.dashboard_values = {} + self._modulators = {} + if use_modulators: + from micropsi_core.nodenet.stepoperators import DoernerianEmotionalModulators as emo + for modulator in emo.writeable_modulators + emo.readable_modulators: + self._modulators[modulator] = 1 + def get_data(self, complete=False, include_links=True): """ Returns a dict representing the whole node net. @@ -186,6 +196,13 @@ def get_data(self, complete=False, include_links=True): }) return data + @abstractmethod + def get_nodes(self, nodespaces=[], include_links=True): + """ + Returns a dict with contents for the given nodespaces + """ + pass # pragma: no cover + @abstractmethod def save(self, filename): """ @@ -281,6 +298,24 @@ def is_nodespace(self, uid): """ pass # pragma: no cover + def set_nodespace_properties(self, nodespace_uid, data): + """ + Sets a persistent property for UI purposes for the given nodespace + """ + nodespace_uid = self.get_nodespace(nodespace_uid).uid + if nodespace_uid not in self._nodespace_ui_properties: + self._nodespace_ui_properties[nodespace_uid] = {} + self._nodespace_ui_properties[nodespace_uid].update(data) + + def get_nodespace_properties(self, nodespace_uid=None): + """ + Return the nodespace properties of all or only the given nodespace + """ + if nodespace_uid: + return self._nodespace_ui_properties.get(nodespace_uid, {}) + else: + return self._nodespace_ui_properties + @abstractmethod def set_entity_positions(self, positions): """ Sets the position of nodes or nodespaces. @@ -295,7 +330,7 @@ def create_nodespace(self, parent_uid, position, name="", uid=None): pass # pragma: no cover @abstractmethod - def delete_nodespace(self, uid): + def delete_nodespace(self, nodespace_uid): """ Deletes the nodespace with the given UID, and everything it contains """ @@ -375,7 +410,7 @@ def get_nodespace_data(self, nodespace_uid, include_links): """ pass # pragma: no cover - def get_activation_data(self, nodespace_uid=None, rounded=1): + def get_activation_data(self, nodespace_uids=[], rounded=1): """ Returns a dict of uids to lists of activation values. Callers need to know the types of nodes that these activations belong to. @@ -391,26 +426,23 @@ def merge_data(self, nodenet_data, keep_uids=False): """ pass # pragma: no cover - @abstractmethod def get_modulator(self, modulator): """ Returns the numeric value of the given global modulator """ - pass # pragma: no cover + return self._modulators.get(modulator, 1) - @abstractmethod def change_modulator(self, modulator, diff): """ Changes the value of the given global modulator by the value of diff """ - pass # pragma: no cover + self._modulators[modulator] = self._modulators.get(modulator, 0) + diff - @abstractmethod def set_modulator(self, modulator, value): """ Changes the value of the given global modulator to the given value """ - pass # pragma: no cover + self._modulators[modulator] = value @abstractmethod def get_standard_nodetype_definitions(self): @@ -505,7 +537,7 @@ def get_available_gatefunctions(self): pass # pragma: no cover @abstractmethod - def has_nodespace_changes(self, nodespace_uid, since_step): + def has_nodespace_changes(self, nodespace_uids=[], since_step=0): """ Returns true, if the structure of the nodespace has changed since the given step, false otherwise Structural changes include everything besides activation @@ -513,7 +545,7 @@ def has_nodespace_changes(self, nodespace_uid, since_step): pass # pragma: no cover @abstractmethod - def get_nodespace_changes(self, nodespace_uid, since_step): + def get_nodespace_changes(self, nodespace_uids=[], since_step=0): """ Returns a dictionary of structural changes that happened in the given nodespace since the given step @@ -545,7 +577,7 @@ def clear(self): def add_gate_monitor(self, node_uid, gate, sheaf=None, name=None, color=None): """Adds a continuous monitor to the activation of a gate. The monitor will collect the activation - value in every simulation step. + value in every calculation step. Returns the uid of the new monitor.""" mon = monitor.NodeMonitor(self, node_uid, 'gate', gate, sheaf=sheaf, name=name, color=color) self._monitors[mon.uid] = mon @@ -553,7 +585,7 @@ def add_gate_monitor(self, node_uid, gate, sheaf=None, name=None, color=None): def add_slot_monitor(self, node_uid, slot, sheaf=None, name=None, color=None): """Adds a continuous monitor to the activation of a slot. The monitor will collect the activation - value in every simulation step. + value in every calculation step. Returns the uid of the new monitor.""" mon = monitor.NodeMonitor(self, node_uid, 'slot', slot, sheaf=sheaf, name=name, color=color) self._monitors[mon.uid] = mon @@ -561,7 +593,7 @@ def add_slot_monitor(self, node_uid, slot, sheaf=None, name=None, color=None): def add_link_monitor(self, source_node_uid, gate_type, target_node_uid, slot_type, property=None, name=None, color=None): """Adds a continuous monitor to a link. You can choose to monitor either weight (default) or certainty - The monitor will collect respective value in every simulation step. + The monitor will collect respective value in every calculation step. Returns the uid of the new monitor.""" mon = monitor.LinkMonitor(self, source_node_uid, gate_type, target_node_uid, slot_type, property=property, name=name, color=color) self._monitors[mon.uid] = mon @@ -569,7 +601,7 @@ def add_link_monitor(self, source_node_uid, gate_type, target_node_uid, slot_typ def add_modulator_monitor(self, modulator, name, color=None): """Adds a continuous monitor to a global modulator. - The monitor will collect respective value in every simulation step. + The monitor will collect respective value in every calculation step. Returns the uid of the new monitor.""" mon = monitor.ModulatorMonitor(self, modulator, name=name, color=color) self._monitors[mon.uid] = mon @@ -577,7 +609,7 @@ def add_modulator_monitor(self, modulator, name, color=None): def add_custom_monitor(self, function, name, color=None): """Adds a continuous monitor, that evaluates the given python-code and collects the - return-value for every simulation step. + return-value for every calculation step. Returns the uid of the new monitor.""" mon = monitor.CustomMonitor(self, function=function, name=name, color=color) self._monitors[mon.uid] = mon @@ -601,15 +633,6 @@ def remove_monitor(self, monitor_uid): def get_dashboard(self): data = self.dashboard_values.copy() - sensors = {} - actors = {} - if self.worldadapter_instance: - for s in self.worldadapter_instance.get_available_datasources(): - sensors[s] = self.worldadapter_instance.get_datasource(s) - for uid, actor in self.get_actors().items(): - actors[actor.get_parameter('datatarget')] = actor.activation - data['sensors'] = sensors - data['actors'] = actors data['is_active'] = self.is_active data['step'] = self.current_step if self.stepping_rate: diff --git a/micropsi_core/nodenet/operations/__init__.py b/micropsi_core/nodenet/operations/__init__.py new file mode 100644 index 00000000..89df238e --- /dev/null +++ b/micropsi_core/nodenet/operations/__init__.py @@ -0,0 +1,34 @@ +""" +Package for operations. + +You can add files here, that define functions to operate on a selection of nodes or nodespaces +The filename will be used to group the operations by category. + +To define an operation-function, choose a good function-name, and use the selectioninfo decorator to provide +meta-information about your operation: +* a list of nodetypes this operation can work with (or empty for all). defaults to empty +* a mincount to define the minimum amount of nodes needed for your operation. defaults to 0 +* a maxcount to define the maximum amount of nodes you operation can work on, or -1 for no limit. defaults to -1 + +Setting a short docstring is encouraged, and will be used in the frontend to clarify what your operation does. + +Operations can return a dict. +If a key "error" is set in the dict, the frontend assumes the whole operation failed. +Otherwise the result (if any) is displayed in the frontend. +To return images, that can be displayed in the frontend, base64 encode the images bytestring, and return it as data +together with a content-type: {'content_type': 'data:image/png;base64', 'data': image} + +Operations that can have a mincount of one are assumed to be applicable to nodespaces, and listed in a context +menu without selection. +""" + + +def selectioninfo(nodetypes=[], mincount=0, maxcount=-1): + def _decorator(func): + func.selectioninfo = { + 'nodetypes': nodetypes if type(nodetypes) == list else [nodetypes], + 'mincount': mincount, + 'maxcount': maxcount + } + return func + return _decorator diff --git a/micropsi_core/nodenet/operations/layout.py b/micropsi_core/nodenet/operations/layout.py new file mode 100644 index 00000000..6474ed11 --- /dev/null +++ b/micropsi_core/nodenet/operations/layout.py @@ -0,0 +1,31 @@ + +from micropsi_core.nodenet.operations import selectioninfo + + +@selectioninfo(mincount=1) +def autoalign(netapi, selection): + """ Autoalign nodes or nodespaces.""" + if len(selection) == 1: + # if there's only one item selected, we assume it's a nodespace + # so we align its contents. If it's not, we return an error + try: + nodespace = netapi.get_nodespace(selection[0]) + except: + return {'error': 'nothing to align'} + netapi.autoalign_nodespace(nodespace.uid) + else: + # otherwise, we retrieve the parent nodespace from the first selected + # entity, and autoalign the selected nodes in the given nodespace + nodespace = None + if len(selection): + try: + nodespace = netapi.get_node(selection[0]).parent_nodespace + except: + pass + try: + nodespace = netapi.get_nodespace(selection[0]).parent_nodespace + except: + pass + if nodespace is None: + return {'error': 'unknown entity in selection'} + netapi.autoalign_entities(nodespace, selection) diff --git a/micropsi_core/nodenet/stepoperators.py b/micropsi_core/nodenet/stepoperators.py index fd8e0098..5636c015 100644 --- a/micropsi_core/nodenet/stepoperators.py +++ b/micropsi_core/nodenet/stepoperators.py @@ -88,6 +88,26 @@ class DoernerianEmotionalModulators(StepOperator): """ + writeable_modulators = [ + 'base_sum_importance_of_intentions', + 'base_sum_urgency_of_intentions', + 'base_competence_for_intention', + 'base_importance_of_intention', + 'base_urgency_of_intention', + 'base_number_of_active_motives', + 'base_number_of_expected_events', + 'base_number_of_unexpected_events', + 'base_urge_change', + 'base_age_influence_on_competence', + 'base_porret_decay_factor'] + readable_modulators = [ + 'emo_pleasure', + 'emo_activation', + 'emo_securing_rate', + 'emo_resolution', + 'emo_selection_threshold', + 'emo_competence'] + @property def priority(self): return 1000 @@ -168,7 +188,7 @@ def execute(self, nodenet, nodes, netapi): nodenet.set_modulator("base_number_of_expected_events", 0) nodenet.set_modulator("base_number_of_unexpected_events", 0) nodenet.set_modulator("base_urge_change", 0) - nodenet.set_modulator("base_porret_decay_factor", 1) + # nodenet.set_modulator("base_porret_decay_factor", 1) # setting emotional parameters nodenet.set_modulator("emo_pleasure", emo_pleasure) diff --git a/micropsi_core/nodenet/theano_engine/theano_netapi.py b/micropsi_core/nodenet/theano_engine/theano_netapi.py index ca5aa5e1..2d200c41 100644 --- a/micropsi_core/nodenet/theano_engine/theano_netapi.py +++ b/micropsi_core/nodenet/theano_engine/theano_netapi.py @@ -20,32 +20,6 @@ def floatX(self): def announce_nodes(self, nodespace_uid, numer_of_nodes, average_element_per_node): self.__nodenet.announce_nodes(nodespace_uid, numer_of_nodes, average_element_per_node) - def get_selectors(self, group): - """ - Returns The indices for the elements for the given group, as an ndarray of ints. - These indices are valid in a, w, and theta. - """ - return self.__nodenet.nodegroups[group] - - def get_a(self): - """ - Returns the theano shared variable with the activation vector - """ - return self.__nodenet.a - - def get_w(self): - """ - Returns the theano shared variable with the link weights - Caution: Changing non-zero values to zero or zero-values to non-zero will lead to inconsistencies. - """ - return self.__nodenet.w - - def get_theta(self): - """ - Returns the theano shared variable with the "theta" parameter values - """ - return self.__nodenet.g_theta - def decay_por_links(self, nodespace_uid): """ Decays all por-links in the given nodespace """ # por_cols = T.lvector("por_cols") @@ -54,7 +28,8 @@ def decay_por_links(self, nodespace_uid): # self.decay = theano.function([por_cols, por_rows], None, updates={nodenet.w: new_w}, accept_inplace=True) import numpy as np from .theano_definitions import node_from_id, PIPE, POR - porretdecay = self.__nodenet.get_modulator('por_ret_decay') + nodespace_uid = self.get_nodespace(nodespace_uid).uid + porretdecay = self.__nodenet.get_modulator('base_porret_decay_factor') ns = self.get_nodespace(nodespace_uid) partition = ns._partition if partition.has_pipes and porretdecay != 0: diff --git a/micropsi_core/nodenet/theano_engine/theano_node.py b/micropsi_core/nodenet/theano_engine/theano_node.py index f0bcccfd..b4192a03 100644 --- a/micropsi_core/nodenet/theano_engine/theano_node.py +++ b/micropsi_core/nodenet/theano_engine/theano_node.py @@ -235,7 +235,10 @@ def get_associated_node_uids(self): return ids def get_parameter(self, parameter): - return self.clone_parameters().get(parameter, None) + if self.type in self._nodenet.native_modules: + return self.parameters.get(parameter, self.nodetype.parameter_defaults.get(parameter, None)) + else: + return self.clone_parameters().get(parameter, None) def set_parameter(self, parameter, value): if value == '' or value is None: @@ -244,25 +247,50 @@ def set_parameter(self, parameter, value): else: value = None if self.type == "Sensor" and parameter == "datasource": - if self.uid in self._nodenet.inverted_sensor_map: - olddatasource = self._nodenet.inverted_sensor_map[self.uid] # first, clear old data source association - if self.uid in self._nodenet.sensormap.get(olddatasource, []): - self._nodenet.sensormap.get(olddatasource, []).remove(self.uid) - - connectedsensors = self._nodenet.sensormap.get(value, []) # then, set the new one - connectedsensors.append(self.uid) - self._nodenet.sensormap[value] = connectedsensors - self._nodenet.inverted_sensor_map[self.uid] = value + if value is not None and value != "": + datasources = self._nodenet.get_datasources() + sensor_element = self._partition.allocated_node_offsets[self._id] + GEN + old_datasource_index = np.where(self._partition.sensor_indices == sensor_element)[0] + + self._partition.sensor_indices[old_datasource_index] = 0 + if value not in datasources: + self.logger.warning("Datasource %s not known, will not be assigned." % value) + return + + datasource_index = datasources.index(value) + + if self._partition.sensor_indices[datasource_index] != sensor_element and \ + self._partition.sensor_indices[datasource_index] > 0: + + other_sensor_element = self._partition.sensor_indices[datasource_index] + other_sensor_id = node_to_id(self._partition.allocated_elements_to_nodes[other_sensor_element], self._partition.pid) + + self.logger.warning("Datasource %s had already been assigned to sensor %s, which will now be unassigned." % (value, other_sensor_id)) + + self._nodenet.sensormap[value] = self.uid + self._partition.sensor_indices[datasource_index] = sensor_element elif self.type == "Actor" and parameter == "datatarget": - if self.uid in self._nodenet.inverted_actuator_map: - olddatatarget = self._nodenet.inverted_actuator_map[self.uid] # first, clear old data target association - if self.uid in self._nodenet.actuatormap.get(olddatatarget, []): - self._nodenet.actuatormap.get(olddatatarget, []).remove(self.uid) - - connectedactuators = self._nodenet.actuatormap.get(value, []) # then, set the new one - connectedactuators.append(self.uid) - self._nodenet.actuatormap[value] = connectedactuators - self._nodenet.inverted_actuator_map[self.uid] = value + if value is not None and value != "": + datatargets = self._nodenet.get_datatargets() + actuator_element = self._partition.allocated_node_offsets[self._id] + GEN + old_datatarget_index = np.where(self._partition.actuator_indices == actuator_element)[0] + self._partition.actuator_indices[old_datatarget_index] = 0 + if value not in datatargets: + self.logger.warning("Datatarget %s not known, will not be assigned." % value) + return + + datatarget_index = datatargets.index(value) + + if self._partition.actuator_indices[datatarget_index] != actuator_element and \ + self._partition.actuator_indices[datatarget_index] > 0: + + other_actuator_element = self._partition.actuator_indices[datatarget_index] + other_actuator_id = node_to_id(self._partition.allocated_elements_to_nodes[other_actuator_element], self._partition.pid) + + self.logger.warning("Datatarget %s had already been assigned to actuator %s, which will now be unassigned." % (value, other_actuator_id)) + + self._nodenet.actuatormap[value] = self.uid + self._partition.actuator_indices[datatarget_index] = actuator_element elif self.type == "Activator" and parameter == "type": if value != "sampling": self._nodenet.set_nodespace_gatetype_activator(self.parent_nodespace, value, self.uid) @@ -291,9 +319,19 @@ def clear_parameter(self, parameter): def clone_parameters(self): parameters = {} if self.type == "Sensor": - parameters['datasource'] = self._nodenet.inverted_sensor_map.get(self.uid, None) + sensor_element = self._partition.allocated_node_offsets[self._id] + GEN + datasource_index = np.where(self._partition.sensor_indices == sensor_element)[0] + if len(datasource_index) == 0: + parameters['datasource'] = None + else: + parameters['datasource'] = self._nodenet.get_datasources()[datasource_index[0]] elif self.type == "Actor": - parameters['datatarget'] = self._nodenet.inverted_actuator_map.get(self.uid, None) + actuator_element = self._partition.allocated_node_offsets[self._id] + GEN + datatarget_index = np.where(self._partition.actuator_indices == actuator_element)[0] + if len(datatarget_index) == 0: + parameters['datatarget'] = None + else: + parameters['datatarget'] = self._nodenet.get_datatargets()[datatarget_index[0]] elif self.type == "Activator": activator_type = None if self._id in self._partition.allocated_nodespaces_por_activators: @@ -479,7 +517,10 @@ def node(self): def empty(self): w_matrix = self.__partition.w.get_value(borrow=True) slotrow = w_matrix[self.__partition.allocated_node_offsets[node_from_id(self.__node.uid)] + self.__numerictype] - return len(np.nonzero(slotrow)[1]) == 0 + if self.__partition.sparse: + return len(np.nonzero(slotrow)[1]) == 0 + else: + return len(np.nonzero(slotrow)[0]) == 0 @property def activation(self): diff --git a/micropsi_core/nodenet/theano_engine/theano_nodenet.py b/micropsi_core/nodenet/theano_engine/theano_nodenet.py index 50a13b15..ded96a22 100644 --- a/micropsi_core/nodenet/theano_engine/theano_nodenet.py +++ b/micropsi_core/nodenet/theano_engine/theano_nodenet.py @@ -166,11 +166,20 @@ class TheanoNodenet(Nodenet): def engine(self): return "theano_engine" + @property + def worldadapter_instance(self): + return self._worldadapter_instance + + @worldadapter_instance.setter + def worldadapter_instance(self, _worldadapter_instance): + self._worldadapter_instance = _worldadapter_instance + self._rebuild_sensor_actor_indices() + @property def current_step(self): return self._step - def __init__(self, name="", worldadapter="Default", world=None, owner="", uid=None, native_modules={}): + def __init__(self, name="", worldadapter="Default", world=None, owner="", uid=None, native_modules={}, use_modulators=True, worldadapter_instance=None): # map of string uids to positions. Not all nodes necessarily have an entry. self.positions = {} @@ -181,16 +190,10 @@ def __init__(self, name="", worldadapter="Default", world=None, owner="", uid=No # map of data sources to string node IDs self.sensormap = {} - # map of string node IDs to data sources - self.inverted_sensor_map = {} - # map of data targets to string node IDs self.actuatormap = {} - # map of string node IDs to data targets - self.inverted_actuator_map = {} - - super(TheanoNodenet, self).__init__(name, worldadapter, world, owner, uid) + super(TheanoNodenet, self).__init__(name, worldadapter, world, owner, uid, use_modulators=use_modulators, worldadapter_instance=worldadapter_instance) precision = settings['theano']['precision'] if precision == "32": @@ -203,7 +206,7 @@ def __init__(self, name="", worldadapter="Default", world=None, owner="", uid=No self.scipyfloatX = scipy.float64 self.numpyfloatX = np.float64 self.byte_per_float = 8 - else: + else: # pragma: no cover self.logger.warn("Unsupported precision value from configuration: %s, falling back to float64", precision) T.config.floatX = "float64" self.scipyfloatX = scipy.float64 @@ -226,14 +229,14 @@ def __init__(self, name="", worldadapter="Default", world=None, owner="", uid=No configured_elements_per_node_assumption = settings['theano']['elements_per_node_assumption'] try: average_elements_per_node_assumption = int(configured_elements_per_node_assumption) - except: + except: # pragma: no cover self.logger.warn("Unsupported elements_per_node_assumption value from configuration: %s, falling back to 4", configured_elements_per_node_assumption) initial_number_of_nodes = 2000 configured_initial_number_of_nodes = settings['theano']['initial_number_of_nodes'] try: initial_number_of_nodes = int(configured_initial_number_of_nodes) - except: + except: # pragma: no cover self.logger.warn("Unsupported initial_number_of_nodes value from configuration: %s, falling back to 2000", configured_initial_number_of_nodes) sparse = True @@ -242,7 +245,7 @@ def __init__(self, name="", worldadapter="Default", world=None, owner="", uid=No sparse = True elif configuredsparse == "False": sparse = False - else: + else: # pragma: no cover self.logger.warn("Unsupported sparse_weight_matrix value from configuration: %s, falling back to True", configuredsparse) sparse = True @@ -255,11 +258,10 @@ def __init__(self, name="", worldadapter="Default", world=None, owner="", uid=No self.rootpartition = rootpartition self.partitionmap = {} self.inverted_partitionmap = {} + self._rebuild_sensor_actor_indices(rootpartition) self._version = NODENET_VERSION # used to check compatibility of the node net data self._step = 0 - self._modulators = {} - self._modulators['por_ret_decay'] = 0. self.proxycache = {} @@ -284,7 +286,7 @@ def get_data(self, complete=False, include_links=True): data['nodes'] = self.construct_nodes_dict(complete=complete, include_links=include_links) # for uid in data['nodes']: # data['nodes'][uid]['gate_parameters'] = self.get_node(uid).clone_non_default_gate_parameters() - data['nodespaces'] = self.construct_nodespaces_dict(None) + data['nodespaces'] = self.construct_nodespaces_dict(None, transitive=True) data['version'] = self._version data['modulators'] = self.construct_modulators_dict() return data @@ -294,11 +296,64 @@ def export_json(self): data['links'] = self.construct_links_list() return data + def get_nodes(self, nodespace_uids=[], include_links=True): + """ + Returns a dict with contents for the given nodespaces + """ + data = {} + data['nodes'] = {} + data['nodespaces'] = {} + + if nodespace_uids == []: + nodespace_uids = False + else: + nodespace_uids = [self.get_nodespace(uid).uid for uid in nodespace_uids] + + if nodespace_uids: + nodespaces_by_partition = dict((spid, []) for spid in self.partitions) + for nodespace_uid in nodespace_uids: + data['nodespaces'].update(self.construct_nodespaces_dict(nodespace_uid)) + nodespaces_by_partition[self.get_partition(nodespace_uid).spid].append(nodespace_from_id(nodespace_uid)) + + followupuids = [] + for spid in nodespaces_by_partition: + if nodespaces_by_partition[spid]: + nodes, followups = self.partitions[spid].get_node_data(nodespace_ids=nodespaces_by_partition[spid], include_links=include_links) + data['nodes'].update(nodes) + followupuids.extend(followups) + + followups_by_partition = dict((spid, []) for spid in self.partitions) + for uid in followupuids: + followups_by_partition[self.get_partition(uid).spid].append(node_from_id(uid)) + + for spid in followups_by_partition: + if followups_by_partition[spid]: + nodes, _ = self.partitions[spid].get_node_data(ids=followups_by_partition[spid]) + for uid in nodes: + for gate in list(nodes[uid]['links'].keys()): + links = nodes[uid]['links'][gate] + for idx, l in enumerate(links): + p = self.get_partition(l['target_node_uid']) + if p.allocated_node_parents[node_from_id(l['target_node_uid'])] not in nodespaces_by_partition.get(p.spid, []): + del links[idx] + if len(nodes[uid]['links'][gate]) == 0: + del nodes[uid]['links'][gate] + data['nodes'].update(nodes) + + else: + data['nodespaces'] = self.construct_nodespaces_dict(None, transitive=True) + for partition in self.partitions.values(): + nodes, _ = partition.get_node_data(include_links=include_links, include_followupnodes=False) + data['nodes'].update(nodes) + + return data + def initialize_stepoperators(self): self.stepoperators = [ TheanoPropagate(), - TheanoCalculate(self), - DoernerianEmotionalModulators()] + TheanoCalculate(self)] + if self.use_modulators: + self.stepoperators.append(DoernerianEmotionalModulators()) self.stepoperators.sort(key=lambda op: op.priority) def save(self, filename): @@ -327,16 +382,15 @@ def load(self, filename): with self.netlock: initfrom = {} - datafile = None if os.path.isfile(filename): try: self.logger.info("Loading nodenet %s metadata from file %s", self.name, filename) with open(filename) as file: initfrom.update(json.load(file)) - except ValueError: + except ValueError: # pragma: no cover self.logger.warn("Could not read nodenet metadata from file %s", filename) return False - except IOError: + except IOError: # pragma: no cover self.logger.warn("Could not open nodenet metadata file %s", filename) return False @@ -359,16 +413,11 @@ def load(self, filename): # was saved). self.reload_native_modules(self.native_module_definitions) - for sensor, id_list in self.sensormap.items(): - for id in id_list: - self.inverted_sensor_map[id] = sensor - for actuator, id_list in self.actuatormap.items(): - for id in id_list: - self.inverted_actuator_map[id] = actuator - # re-initialize step operators for theano recompile to new shared variables self.initialize_stepoperators() + self._rebuild_sensor_actor_indices() + return True def remove(self, filename): @@ -384,9 +433,11 @@ def initialize_nodenet(self, initfrom): if initfrom.get('runner_condition'): self.set_runner_condition(initfrom['runner_condition']) + self._nodespace_ui_properties = initfrom.get('nodespace_ui_properties', {}) + if len(initfrom) != 0: # now merge in all init data (from the persisted file typically) - self.merge_data(initfrom, keep_uids=True) + self.merge_data(initfrom, keep_uids=True, native_module_instances_only=True) if 'names' in initfrom: self.names = initfrom['names'] if 'positions' in initfrom: @@ -403,9 +454,8 @@ def initialize_nodenet(self, initfrom): if 'current_step' in initfrom: self._step = initfrom['current_step'] - def merge_data(self, nodenet_data, keep_uids=False): + def merge_data(self, nodenet_data, keep_uids=False, native_module_instances_only=False): """merges the nodenet state with the current node net, might have to give new UIDs to some entities""" - uidmap = {} invalid_nodes = [] @@ -415,6 +465,12 @@ def merge_data(self, nodenet_data, keep_uids=False): # re-use the root nodespace uidmap[self.rootpartition.rootnodespace_uid] = self.rootpartition.rootnodespace_uid + # make sure we have the partition NoNs large enough to store the native modules: + indexes = [n['index'] for n in nodenet_data.get('nodes', {}).values()] + node_maxindex = max(indexes) if indexes else 10 + if self.rootpartition.NoN <= node_maxindex: + self.rootpartition.grow_number_of_nodes((node_maxindex - self.rootpartition.NoN) + 1) + # instantiate partitions partitions_to_instantiate = nodenet_data.get('partition_parents', {}) largest_pid = 0 @@ -424,10 +480,10 @@ def merge_data(self, nodenet_data, keep_uids=False): largest_pid = pid self.create_partition(pid, parent_uid, - True, - round(len(nodenet_data.get('nodes', {}).keys()) * 1.2 + 1), - 7, - round(len(set(nodenet_data.get('nodespaces', {}).keys())) * 1.2) + 1) + sparse=True, + initial_number_of_nodes=round(node_maxindex * 1.2 + 1), + average_elements_per_node_assumption=7, + initial_number_of_nodespaces=round(len(set(nodenet_data.get('nodespaces', {}).keys())) * 1.2) + 1) self.last_allocated_partition = largest_pid # merge in spaces, make sure that parent nodespaces exist before children are initialized @@ -451,15 +507,20 @@ def merge_data(self, nodenet_data, keep_uids=False): data['type'] = 'Comment' del data['gate_parameters'] invalid_nodes.append(uid) - new_uid = self.create_node( - data['type'], - parent_uid, - data['position'], - name=data['name'], - uid=id_to_pass, - parameters=data.get('parameters'), - gate_parameters=data.get('gate_parameters'), - gate_functions=data.get('gate_functions')) + if native_module_instances_only: + node = TheanoNode(self, self.get_partition(uid), parent_uid, uid, get_numerical_node_type(data['type'], nativemodules=self.native_modules), parameters=data.get('parameters')) + self.proxycache[node.uid] = node + new_uid = node.uid + else: + new_uid = self.create_node( + data['type'], + parent_uid, + data['position'], + name=data['name'], + uid=id_to_pass, + parameters=data.get('parameters'), + gate_parameters=data.get('gate_parameters'), + gate_functions=data.get('gate_functions')) uidmap[uid] = new_uid node_proxy = self.get_node(new_uid) for gatetype in data.get('gate_activations', {}): # todo: implement sheaves @@ -538,9 +599,6 @@ def merge_nodespace_data(self, nodespace_uid, data, uidmap, keep_uids=False): uidmap[nodespace_uid] = newuid def step(self): - if self.worldadapter_instance is not None: - self.worldadapter_instance.snapshot() - with self.netlock: self._step += 1 @@ -627,20 +685,10 @@ def create_node(self, nodetype, nodespace_uid, position, name=None, uid=None, pa if nodetype == "Sensor": if 'datasource' in parameters: - datasource = parameters['datasource'] - if datasource is not None: - connectedsensors = self.sensormap.get(datasource, []) - connectedsensors.append(uid) - self.sensormap[datasource] = connectedsensors - self.inverted_sensor_map[uid] = datasource + self.get_node(uid).set_parameter("datasource", parameters['datasource']) elif nodetype == "Actor": if 'datatarget' in parameters: - datatarget = parameters['datatarget'] - if datatarget is not None: - connectedactuators = self.actuatormap.get(datatarget, []) - connectedactuators.append(uid) - self.actuatormap[datatarget] = connectedactuators - self.inverted_actuator_map[uid] = datatarget + self.get_node(uid).set_parameter("datatarget", parameters['datatarget']) return uid @@ -723,22 +771,12 @@ def delete_node(self, uid): partition.delete_node(node_id) # remove sensor association if there should be one - if uid in self.inverted_sensor_map: - sensor = self.inverted_sensor_map[uid] - del self.inverted_sensor_map[uid] - if sensor in self.sensormap: - self.sensormap[sensor].remove(uid) - if len(self.sensormap[sensor]) == 0: - del self.sensormap[sensor] + if uid in self.sensormap.values(): + self.sensormap = {k: v for k, v in self.sensormap.items() if v != uid} # remove actuator association if there should be one - if uid in self.inverted_actuator_map: - actuator = self.inverted_actuator_map[uid] - del self.inverted_actuator_map[uid] - if actuator in self.actuatormap: - self.actuatormap[actuator].remove(uid) - if len(self.actuatormap[actuator]) == 0: - del self.actuatormap[actuator] + if uid in self.actuatormap.values(): + self.actuatormap = {k: v for k, v in self.actuatormap.items() if v != uid} self.clear_supplements(uid) @@ -786,6 +824,9 @@ def get_nodespace(self, uid): if uid is None: uid = nodespace_to_id(1, self.rootpartition.pid) + if not self.is_nodespace(uid): + raise KeyError("No nodespace with id %s exists", uid) + partition = self.get_partition(uid) if uid in self.proxycache: @@ -829,6 +870,7 @@ def create_partition(self, pid, parent_uid, sparse, initial_number_of_nodes, ave self.partitionmap[parent_uid] = [] self.partitionmap[parent_uid].append(partition) self.inverted_partitionmap[partition.spid] = parent_uid + self._rebuild_sensor_actor_indices(partition) return partition.spid def delete_partition(self, pid): @@ -848,6 +890,11 @@ def delete_partition(self, pid): for otherpartition in self.partitions.values(): if spid in otherpartition.inlinks: del otherpartition.inlinks[spid] + for uid, node in otherpartition.native_module_instances.items(): + for g in node.get_gate_types(): + node.get_gate(g).invalidate_caches() + for s in node.get_slot_types(): + node.get_slot(s).invalidate_caches() def create_nodespace(self, parent_uid, position, name="", uid=None, options=None): if options is None: @@ -880,7 +927,7 @@ def create_nodespace(self, parent_uid, position, name="", uid=None, options=None try: average_elements_per_node_assumption = int(configured_elements_per_node_assumption) except: - self.logger.warn("Unsupported elements_per_node_assumption value from configuration: %s, falling back to 4", configured_elements_per_node_assumption) + self.logger.warn("Unsupported elements_per_node_assumption value from configuration: %s, falling back to 4", configured_elements_per_node_assumption) # pragma: no cover initial_number_of_nodes = 2000 if "initial_number_of_nodes" in options: @@ -890,7 +937,7 @@ def create_nodespace(self, parent_uid, position, name="", uid=None, options=None try: initial_number_of_nodes = int(configured_initial_number_of_nodes) except: - self.logger.warn("Unsupported initial_number_of_nodes value from configuration: %s, falling back to 2000", configured_initial_number_of_nodes) + self.logger.warn("Unsupported initial_number_of_nodes value from configuration: %s, falling back to 2000", configured_initial_number_of_nodes) # pragma: no cover sparse = True if "sparse" in options: @@ -902,7 +949,7 @@ def create_nodespace(self, parent_uid, position, name="", uid=None, options=None elif configuredsparse == "False": sparse = False else: - self.logger.warn("Unsupported sparse_weight_matrix value from configuration: %s, falling back to True", configuredsparse) + self.logger.warn("Unsupported sparse_weight_matrix value from configuration: %s, falling back to True", configuredsparse) # pragma: no cover sparse = True self.last_allocated_partition += 1 @@ -931,7 +978,7 @@ def create_nodespace(self, parent_uid, position, name="", uid=None, options=None def delete_nodespace(self, nodespace_uid): if nodespace_uid is None or nodespace_uid == self.get_nodespace(None).uid: raise ValueError("The root nodespace cannot be deleted.") - + self._nodespace_ui_properties.pop(nodespace_uid, None) partition = self.get_partition(nodespace_uid) nodespace_id = nodespace_from_id(nodespace_uid) if nodespace_id == 1 and partition.pid != self.rootpartition.pid: @@ -954,10 +1001,9 @@ def get_sensors(self, nodespace=None, datasource=None): sensors = {} sensorlist = [] if datasource is None: - for ds_sensors in self.sensormap.values(): - sensorlist.extend(ds_sensors) + sensorlist = self.sensormap.values() elif datasource in self.sensormap: - sensorlist = self.sensormap[datasource] + sensorlist.append(self.sensormap[datasource]) for uid in sensorlist: if nodespace is None or self.get_partition(uid).allocated_node_parents[node_from_id(uid)] == nodespace_from_id(nodespace): sensors[uid] = self.get_node(uid) @@ -967,10 +1013,9 @@ def get_actors(self, nodespace=None, datatarget=None): actuators = {} actuatorlist = [] if datatarget is None: - for dt_actuators in self.actuatormap.values(): - actuatorlist.extend(dt_actuators) + actuatorlist = self.actuatormap.values() elif datatarget in self.actuatormap: - actuatorlist = self.actuatormap[datatarget] + actuatorlist.append(self.actuatormap[datatarget]) for uid in actuatorlist: if nodespace is None or self.get_partition(uid).allocated_node_parents[node_from_id(uid)] == nodespace_from_id(nodespace): actuators[uid] = self.get_node(uid) @@ -1046,7 +1091,7 @@ def reload_native_modules(self, native_modules): numeric_id = node_from_id(uid) number_of_elements = len(np.where(partition.allocated_elements_to_nodes == numeric_id)[0]) - new_numer_of_elements = max(len(native_modules[instance.type]['slottypes']), len(native_modules[instance.type]['gatetypes'])) + new_numer_of_elements = max(len(native_modules[instance.type].get('slottypes', [])), len(native_modules[instance.type].get('gatetypes', []))) if number_of_elements != new_numer_of_elements: self.logger.warn("Number of elements changed for node type %s from %d to %d, recreating instance %s" % (instance.type, number_of_elements, new_numer_of_elements, uid)) @@ -1118,11 +1163,11 @@ def get_nodespace_data(self, nodespace_uid, include_links=True): return data - def get_activation_data(self, nodespace_uid=None, rounded=1): + def get_activation_data(self, nodespace_uids=[], rounded=1): if rounded is not None: mult = math.pow(10, rounded) activations = {} - if nodespace_uid is None: + if nodespace_uids == []: for partition in self.partitions.values(): ids = np.nonzero(partition.allocated_nodes)[0] for id in ids: @@ -1133,27 +1178,20 @@ def get_activation_data(self, nodespace_uid=None, rounded=1): else: activations[node_to_id(id, partition.pid)] = [n.item() / mult for n in np.rint(partition.a.get_value()[offset:offset+elements]*mult)] else: - partition = self.get_nodespace(nodespace_uid).partition - nodespace_id = nodespace_from_id(nodespace_uid) - ids = np.where(partition.allocated_node_parents == nodespace_id)[0] - for id in ids: - elements = get_elements_per_type(partition.allocated_nodes[id], self.native_modules) - offset = partition.allocated_node_offsets[id] - if rounded is None: - activations[node_to_id(id, partition.pid)] = [n.item() for n in partition.a.get_value()[offset:offset+elements]] - else: - activations[node_to_id(id, partition.pid)] = [n.item() / mult for n in np.rint(partition.a.get_value()[offset:offset+elements]*mult)] + for nsuid in nodespace_uids: + nodespace = self.get_nodespace(nsuid) + partition = self.get_nodespace(nodespace.uid).partition + nodespace_id = nodespace_from_id(nodespace.uid) + ids = np.where(partition.allocated_node_parents == nodespace_id)[0] + for id in ids: + elements = get_elements_per_type(partition.allocated_nodes[id], self.native_modules) + offset = partition.allocated_node_offsets[id] + if rounded is None: + activations[node_to_id(id, partition.pid)] = [n.item() for n in partition.a.get_value()[offset:offset+elements]] + else: + activations[node_to_id(id, partition.pid)] = [n.item() / mult for n in np.rint(partition.a.get_value()[offset:offset+elements]*mult)] return activations - def get_modulator(self, modulator): - return self._modulators.get(modulator, 1) - - def change_modulator(self, modulator, diff): - self._modulators[modulator] = self._modulators.get(modulator, 0) + diff - - def set_modulator(self, modulator, value): - self._modulators[modulator] = value - def get_nodetype(self, type): if type in self._nodetypes: return self._nodetypes[type] @@ -1263,33 +1301,38 @@ def construct_nodes_dict(self, nodespace_uid=None, max_nodes=-1, complete=False, break return data - def construct_nodespaces_dict(self, nodespace_uid): + def construct_nodespaces_dict(self, nodespace_uid, transitive=False): data = {} if nodespace_uid is None: nodespace_uid = self.get_nodespace(None).uid - for partition in self.partitions.values(): - nodespace_id = nodespace_from_id(nodespace_uid) - nodespace_ids = np.nonzero(partition.allocated_nodespaces)[0] - nodespace_ids = np.append(nodespace_ids, 1) - for candidate_id in nodespace_ids: - is_in_hierarchy = False - if candidate_id == nodespace_id: - is_in_hierarchy = True - else: - parent_id = partition.allocated_nodespaces[candidate_id] - while parent_id > 0 and parent_id != nodespace_id: - parent_id = partition.allocated_nodespaces[parent_id] - if parent_id == nodespace_id: + if transitive: + for partition in self.partitions.values(): + nodespace_id = nodespace_from_id(nodespace_uid) + nodespace_ids = np.nonzero(partition.allocated_nodespaces)[0] + nodespace_ids = np.append(nodespace_ids, 1) + for candidate_id in nodespace_ids: + is_in_hierarchy = False + if candidate_id == nodespace_id: is_in_hierarchy = True + else: + parent_id = partition.allocated_nodespaces[candidate_id] + while parent_id > 0 and parent_id != nodespace_id: + parent_id = partition.allocated_nodespaces[parent_id] + if parent_id == nodespace_id: + is_in_hierarchy = True - if is_in_hierarchy: - data[nodespace_to_id(candidate_id, partition.pid)] = self.get_nodespace(nodespace_to_id(candidate_id, partition.pid)).get_data() + if is_in_hierarchy: + data[nodespace_to_id(candidate_id, partition.pid)] = self.get_nodespace(nodespace_to_id(candidate_id, partition.pid)).get_data() - if nodespace_uid in self.partitionmap: - for partition in self.partitionmap[nodespace_uid]: - partition_root_uid = partition.rootnodespace_uid - data[partition_root_uid] = self.get_nodespace(partition_root_uid).get_data() + if nodespace_uid in self.partitionmap: + for partition in self.partitionmap[nodespace_uid]: + partition_root_uid = partition.rootnodespace_uid + data[partition_root_uid] = self.get_nodespace(partition_root_uid).get_data() + + else: + for uid in self.get_nodespace(nodespace_uid).get_known_ids('nodespaces'): + data[uid] = self.get_nodespace(uid).get_data() return data @@ -1302,68 +1345,107 @@ def get_standard_nodetype_definitions(self): """ return copy.deepcopy(STANDARD_NODETYPES) - def set_sensors_and_actuator_feedback_to_values(self, datasource_to_value_map, datatarget_to_value_map): + def set_sensors_and_actuator_feedback_values(self): """ - Sets the sensors for the given data sources to the given values + Sets the values for sensors and actuator_feedback from the worldadapter """ + # convert from python types: + sensor_values = np.array([]) + actuator_feedback_values = np.array([]) + if self._worldadapter_instance: + sensor_values = np.concatenate((sensor_values, np.asarray(self._worldadapter_instance.get_datasource_values()))) + actuator_feedback_values = np.concatenate((actuator_feedback_values, np.asarray(self._worldadapter_instance.get_datatarget_feedback_values()))) + if self.use_modulators: + # include modulators + readables = [0 for _ in DoernerianEmotionalModulators.readable_modulators] + for idx, key in enumerate(sorted(DoernerianEmotionalModulators.readable_modulators)): + readables[idx] = self.get_modulator(key) + sensor_values = np.concatenate((sensor_values, np.asarray(readables))) + writeables = [0 for _ in DoernerianEmotionalModulators.writeable_modulators] + for idx, key in enumerate(sorted(DoernerianEmotionalModulators.writeable_modulators)): + writeables[idx] = 1 + actuator_feedback_values = np.concatenate((actuator_feedback_values, np.asarray(writeables))) for partition in self.partitions.values(): a_array = partition.a.get_value(borrow=True) - - for datasource in datasource_to_value_map: - value = datasource_to_value_map.get(datasource) - sensor_uids = self.sensormap.get(datasource, []) - - for sensor_uid in sensor_uids: - if self.get_partition(sensor_uid).pid == partition.pid: - a_array[partition.allocated_node_offsets[node_from_id(sensor_uid)] + GEN] = value - - for datatarget in datatarget_to_value_map: - value = datatarget_to_value_map.get(datatarget) - actuator_uids = self.actuatormap.get(datatarget, []) - - for actuator_uid in actuator_uids: - if self.get_partition(actuator_uid).pid == partition.pid: - a_array[partition.allocated_node_offsets[node_from_id(actuator_uid)] + GEN] = value - + a_array[partition.sensor_indices] = sensor_values + a_array[partition.actuator_indices] = actuator_feedback_values partition.a.set_value(a_array, borrow=True) - def read_actuators(self): + def set_actuator_values(self): """ - Returns a map of datatargets to values for writing back to the world adapter + Writes the values from the actuators to datatargets and modulators """ - - actuator_values_to_write = {} - + actuator_values_to_write = np.zeros_like(self.rootpartition.actuator_indices) for partition in self.partitions.values(): a_array = partition.a.get_value(borrow=True) - - for datatarget in self.actuatormap: - if datatarget not in actuator_values_to_write: - actuator_values_to_write[datatarget] = 0 - actuator_node_activations = 0 - for actuator_uid in self.actuatormap[datatarget]: - if self.get_partition(actuator_uid).pid == partition.pid: - actuator_node_activations += a_array[partition.allocated_node_offsets[node_from_id(actuator_uid)] + GEN] - - actuator_values_to_write[datatarget] += actuator_node_activations - - partition.a.set_value(a_array, borrow=True) - - return actuator_values_to_write + actuator_values_to_write = actuator_values_to_write + a_array[partition.actuator_indices] + if self.use_modulators and bool(self.actuatormap): + writeables = sorted(DoernerianEmotionalModulators.writeable_modulators) + # remove modulators from actuator values + modulator_values = actuator_values_to_write[-len(writeables):] + actuator_values_to_write = actuator_values_to_write[:-len(writeables)] + for idx, key in enumerate(writeables): + if key in self.actuatormap: + self.set_modulator(key, modulator_values[idx]) + if self._worldadapter_instance: + self._worldadapter_instance.set_datatarget_values(actuator_values_to_write) + + def _rebuild_sensor_actor_indices(self, partition=None): + """ + Rebuilds the actor and sensor indices of the given partition or all partitions if None + """ + if partition is not None: + partitions = [partition] + else: + partitions = self.partitions.values() + for partition in partitions: + partition.sensor_indices = np.zeros(len(self.get_datasources()), np.int32) + partition.actuator_indices = np.zeros(len(self.get_datatargets()), np.int32) + for datatarget, node_id in self.actuatormap.items(): + if not isinstance(node_id, str): + node_id = node_id[0] + if self.get_partition(node_id) == partition: + self.get_node(node_id).set_parameter("datatarget", datatarget) + + for datasource, node_id in self.sensormap.items(): + if not isinstance(node_id, str): + node_id = node_id[0] + if self.get_partition(node_id) == partition: + self.get_node(node_id).set_parameter("datasource", datasource) + + def get_datasources(self): + """ Returns a sorted list of available datasources, including worldadapter datasources + and readable modulators""" + datasources = self.worldadapter_instance.get_available_datasources() if self.worldadapter_instance else [] + if self.use_modulators: + for item in sorted(DoernerianEmotionalModulators.readable_modulators): + datasources.append(item) + return datasources + + def get_datatargets(self): + """ Returns a sorted list of available datatargets, including worldadapter datatargets + and writeable modulators""" + datatargets = self.worldadapter_instance.get_available_datatargets() if self.worldadapter_instance else [] + if self.use_modulators: + for item in sorted(DoernerianEmotionalModulators.writeable_modulators): + datatargets.append(item) + return datatargets def group_nodes_by_names(self, nodespace_uid, node_name_prefix=None, gatetype="gen", sortby='id', group_name=None): if nodespace_uid is None: nodespace_uid = self.get_nodespace(None).uid + partition = self.get_partition(nodespace_uid) + if group_name is None: group_name = node_name_prefix ids = [] for uid, name in self.names.items(): - partition = self.get_partition(uid) - if self.is_node(uid) and name.startswith(node_name_prefix) and \ - (partition.allocated_node_parents[node_from_id(uid)] == nodespace_from_id(nodespace_uid)): + parentpartition = self.get_partition(uid) + if parentpartition == partition and self.is_node(uid) and name.startswith(node_name_prefix) and \ + (parentpartition.allocated_node_parents[node_from_id(uid)] == nodespace_from_id(nodespace_uid)): ids.append(uid) self.group_nodes_by_ids(nodespace_uid, ids, group_name, gatetype, sortby) @@ -1466,12 +1548,14 @@ def set_link_weights(self, nodespace_from_uid, group_from, nodespace_to_uid, gro else: partition_from.set_link_weights(nodespace_from_uid, group_from, nodespace_to_uid, group_to, new_w) - uids_to_invalidate = self.get_node_uids(nodespace_from_uid, group_from) - uids_to_invalidate.extend(self.get_node_uids(nodespace_to_uid, group_to)) + self.proxycache.clear() - for uid in uids_to_invalidate: - if uid in self.proxycache: - del self.proxycache[uid] + # uids_to_invalidate = self.get_node_uids(nodespace_from_uid, group_from) + # uids_to_invalidate.extend(self.get_node_uids(nodespace_to_uid, group_to)) + + # for uid in uids_to_invalidate: + # if uid in self.proxycache: + # del self.proxycache[uid] def get_available_gatefunctions(self): return ["identity", "absolute", "sigmoid", "tanh", "rect", "one_over_x"] @@ -1479,14 +1563,18 @@ def get_available_gatefunctions(self): def add_slot_monitor(self, node_uid, slot, **_): raise RuntimeError("Theano engine does not support slot monitors") - def has_nodespace_changes(self, nodespace_uid, since_step): - partition = self.get_partition(nodespace_uid) - nodespace = self.get_nodespace(nodespace_uid) - return partition.has_nodespace_changes(nodespace.uid, since_step) + def has_nodespace_changes(self, nodespace_uids=[], since_step=0): + if nodespace_uids == []: + nodespace_uids = self.get_nodespace_uids() - def get_nodespace_changes(self, nodespace_uid, since_step): - partition = self.get_partition(nodespace_uid) - nodespace = self.get_nodespace(nodespace_uid) + for nodespace_uid in nodespace_uids: + nodespace = self.get_nodespace(nodespace_uid) + partition = self.get_partition(nodespace.uid) + if partition.has_nodespace_changes(nodespace.uid, since_step): + return True + return False + + def get_nodespace_changes(self, nodespace_uids=[], since_step=0): result = { 'nodes_dirty': {}, 'nodespaces_dirty': {}, @@ -1494,18 +1582,23 @@ def get_nodespace_changes(self, nodespace_uid, since_step): 'nodespaces_deleted': [] } - for i in range(since_step, self.current_step + 1): - if i in self.deleted_items: - result['nodespaces_deleted'].extend(self.deleted_items[i].get('nodespaces_deleted', [])) - result['nodes_deleted'].extend(self.deleted_items[i].get('nodes_deleted', [])) - - changed_nodes, changed_nodespaces = partition.get_nodespace_changes(nodespace.uid, since_step) - for uid in changed_nodes: - uid = node_to_id(uid, partition.pid) - result['nodes_dirty'][uid] = self.get_node(uid).get_data(include_links=True) - for uid in changed_nodespaces: - uid = nodespace_to_id(uid, partition.pid) - result['nodespaces_dirty'][uid] = self.get_nodespace(uid).get_data() + if nodespace_uids == []: + nodespace_uids = self.get_nodespace_uids() + + for nsuid in nodespace_uids: + nodespace = self.get_nodespace(nsuid) + partition = self.get_partition(nodespace.uid) + for i in range(since_step, self.current_step + 1): + if i in self.deleted_items: + result['nodespaces_deleted'].extend(self.deleted_items[i].get('nodespaces_deleted', [])) + result['nodes_deleted'].extend(self.deleted_items[i].get('nodes_deleted', [])) + changed_nodes, changed_nodespaces = partition.get_nodespace_changes(nodespace.uid, since_step) + for uid in changed_nodes: + uid = node_to_id(uid, partition.pid) + result['nodes_dirty'][uid] = self.get_node(uid).get_data(include_links=True) + for uid in changed_nodespaces: + uid = nodespace_to_id(uid, partition.pid) + result['nodespaces_dirty'][uid] = self.get_nodespace(uid).get_data() return result def get_dashboard(self): diff --git a/micropsi_core/nodenet/theano_engine/theano_nodespace.py b/micropsi_core/nodenet/theano_engine/theano_nodespace.py index 6cfa461e..5330bd70 100644 --- a/micropsi_core/nodenet/theano_engine/theano_nodespace.py +++ b/micropsi_core/nodenet/theano_engine/theano_nodespace.py @@ -77,7 +77,7 @@ def get_known_ids(self, entitytype=None): for partition in self._nodenet.partitionmap[self.uid]: uids.append(partition.rootnodespace_uid) return uids - elif entitytype == None: + elif entitytype is None: ids = self.get_known_ids('nodes') ids.extend(self.get_known_ids('nodespaces')) return ids diff --git a/micropsi_core/nodenet/theano_engine/theano_partition.py b/micropsi_core/nodenet/theano_engine/theano_partition.py index 911a0088..a7adbac0 100644 --- a/micropsi_core/nodenet/theano_engine/theano_partition.py +++ b/micropsi_core/nodenet/theano_engine/theano_partition.py @@ -237,6 +237,9 @@ def __init__(self, nodenet, pid, sparse=True, initial_number_of_nodes=2000, aver self.allocated_elements_to_activators = np.zeros(self.NoE, dtype=np.int32) + self.sensor_indices = np.zeros(0, dtype=np.int32) # index := datasource, value:=node_id + self.actuator_indices = np.zeros(0, dtype=np.int32) # index := datatarget, value:=node_id + self.inlinks = {} self.deleted_items = {} @@ -668,7 +671,6 @@ def rebuild_ret_linked(self): self.n_node_retlinked.set_value(n_node_retlinked_array) def grow_number_of_nodes(self, growby): - new_NoN = int(self.NoN + growby) new_allocated_nodes = np.zeros(new_NoN, dtype=np.int32) @@ -808,10 +810,10 @@ def load_data(self, datafilename, nodes_data): try: self.logger.info("Loading nodenet %s partition %i bulk data from file %s" % (self.nodenet.name, self.pid, datafilename)) datafile = np.load(datafilename) - except ValueError: + except ValueError: # pragma: no cover self.logger.warn("Could not read nodenet data from file %s" % datafile) return False - except IOError: + except IOError: # pragma: no cover self.logger.warn("Could not open nodenet file %s" % datafile) return False @@ -832,73 +834,73 @@ def load_data(self, datafilename, nodes_data): self.a_prev = theano.shared(value=a_prev_array.astype(T.config.floatX), name="a_prev", borrow=True) else: - self.logger.warn("no sizeinformation in file, falling back to defaults") + self.logger.warn("no sizeinformation in file, falling back to defaults") # pragma: no cover # the load bulk data into numpy arrays if 'allocated_nodes' in datafile: self.allocated_nodes = datafile['allocated_nodes'] else: - self.logger.warn("no allocated_nodes in file, falling back to defaults") + self.logger.warn("no allocated_nodes in file, falling back to defaults") # pragma: no cover if 'allocated_node_offsets' in datafile: self.allocated_node_offsets = datafile['allocated_node_offsets'] else: - self.logger.warn("no allocated_node_offsets in file, falling back to defaults") + self.logger.warn("no allocated_node_offsets in file, falling back to defaults") # pragma: no cover if 'allocated_elements_to_nodes' in datafile: self.allocated_elements_to_nodes = datafile['allocated_elements_to_nodes'] else: - self.logger.warn("no allocated_elements_to_nodes in file, falling back to defaults") + self.logger.warn("no allocated_elements_to_nodes in file, falling back to defaults") # pragma: no cover if 'allocated_nodespaces' in datafile: self.allocated_nodespaces = datafile['allocated_nodespaces'] else: - self.logger.warn("no allocated_nodespaces in file, falling back to defaults") + self.logger.warn("no allocated_nodespaces in file, falling back to defaults") # pragma: no cover if 'allocated_node_parents' in datafile: self.allocated_node_parents = datafile['allocated_node_parents'] else: - self.logger.warn("no allocated_node_parents in file, falling back to defaults") + self.logger.warn("no allocated_node_parents in file, falling back to defaults") # pragma: no cover if 'allocated_elements_to_activators' in datafile: self.allocated_elements_to_activators = datafile['allocated_elements_to_activators'] else: - self.logger.warn("no allocated_elements_to_activators in file, falling back to defaults") + self.logger.warn("no allocated_elements_to_activators in file, falling back to defaults") # pragma: no cover if 'allocated_nodespaces_por_activators' in datafile: self.allocated_nodespaces_por_activators = datafile['allocated_nodespaces_por_activators'] else: - self.logger.warn("no allocated_nodespaces_por_activators in file, falling back to defaults") + self.logger.warn("no allocated_nodespaces_por_activators in file, falling back to defaults") # pragma: no cover if 'allocated_nodespaces_ret_activators' in datafile: self.allocated_nodespaces_ret_activators = datafile['allocated_nodespaces_ret_activators'] else: - self.logger.warn("no allocated_nodespaces_ret_activators in file, falling back to defaults") + self.logger.warn("no allocated_nodespaces_ret_activators in file, falling back to defaults") # pragma: no cover if 'allocated_nodespaces_sub_activators' in datafile: self.allocated_nodespaces_sub_activators = datafile['allocated_nodespaces_sub_activators'] else: - self.logger.warn("no allocated_nodespaces_sub_activators in file, falling back to defaults") + self.logger.warn("no allocated_nodespaces_sub_activators in file, falling back to defaults") # pragma: no cover if 'allocated_nodespaces_sur_activators' in datafile: self.allocated_nodespaces_sur_activators = datafile['allocated_nodespaces_sur_activators'] else: - self.logger.warn("no allocated_nodespaces_sur_activators in file, falling back to defaults") + self.logger.warn("no allocated_nodespaces_sur_activators in file, falling back to defaults") # pragma: no cover if 'allocated_nodespaces_cat_activators' in datafile: self.allocated_nodespaces_cat_activators = datafile['allocated_nodespaces_cat_activators'] else: - self.logger.warn("no allocated_nodespaces_cat_activators in file, falling back to defaults") + self.logger.warn("no allocated_nodespaces_cat_activators in file, falling back to defaults") # pragma: no cover if 'allocated_nodespaces_exp_activators' in datafile: self.allocated_nodespaces_exp_activators = datafile['allocated_nodespaces_exp_activators'] else: - self.logger.warn("no allocated_nodespaces_exp_activators in file, falling back to defaults") + self.logger.warn("no allocated_nodespaces_exp_activators in file, falling back to defaults") # pragma: no cover if 'allocated_nodespaces_sampling_activators' in datafile: self.allocated_nodespaces_sampling_activators = datafile['allocated_nodespaces_sampling_activators'] else: - self.logger.warn("no allocated_nodespaces_por_activators in file, falling back to defaults") + self.logger.warn("no allocated_nodespaces_por_activators in file, falling back to defaults") # pragma: no cover if 'w_data' in datafile and 'w_indices' in datafile and 'w_indptr' in datafile: w = sp.csr_matrix((datafile['w_data'], datafile['w_indices'], datafile['w_indptr']), shape = (self.NoE, self.NoE)) @@ -909,62 +911,62 @@ def load_data(self, datafilename, nodes_data): self.a = theano.shared(value=datafile['a'].astype(T.config.floatX), name="a", borrow=False) self.a_in = theano.shared(value=np.zeros_like(datafile['a']).astype(T.config.floatX), name="a_in", borrow=False) else: - self.logger.warn("no w_data, w_indices or w_indptr in file, falling back to defaults") + self.logger.warn("no w_data, w_indices or w_indptr in file, falling back to defaults") # pragma: no cover if 'g_theta' in datafile: self.g_theta = theano.shared(value=datafile['g_theta'].astype(T.config.floatX), name="theta", borrow=False) else: - self.logger.warn("no g_theta in file, falling back to defaults") + self.logger.warn("no g_theta in file, falling back to defaults") # pragma: no cover if 'g_factor' in datafile: self.g_factor = theano.shared(value=datafile['g_factor'].astype(T.config.floatX), name="g_factor", borrow=False) else: - self.logger.warn("no g_factor in file, falling back to defaults") + self.logger.warn("no g_factor in file, falling back to defaults") # pragma: no cover if 'g_threshold' in datafile: self.g_threshold = theano.shared(value=datafile['g_threshold'].astype(T.config.floatX), name="g_threshold", borrow=False) else: - self.logger.warn("no g_threshold in file, falling back to defaults") + self.logger.warn("no g_threshold in file, falling back to defaults") # pragma: no cover if 'g_amplification' in datafile: self.g_amplification = theano.shared(value=datafile['g_amplification'].astype(T.config.floatX), name="g_amplification", borrow=False) else: - self.logger.warn("no g_amplification in file, falling back to defaults") + self.logger.warn("no g_amplification in file, falling back to defaults") # pragma: no cover if 'g_min' in datafile: self.g_min = theano.shared(value=datafile['g_min'].astype(T.config.floatX), name="g_min", borrow=False) else: - self.logger.warn("no g_min in file, falling back to defaults") + self.logger.warn("no g_min in file, falling back to defaults") # pragma: no cover if 'g_max' in datafile: self.g_max = theano.shared(value=datafile['g_max'].astype(T.config.floatX), name="g_max", borrow=False) else: - self.logger.warn("no g_max in file, falling back to defaults") + self.logger.warn("no g_max in file, falling back to defaults") # pragma: no cover if 'g_function_selector' in datafile: self.g_function_selector = theano.shared(value=datafile['g_function_selector'], name="gatefunction", borrow=False) else: - self.logger.warn("no g_function_selector in file, falling back to defaults") + self.logger.warn("no g_function_selector in file, falling back to defaults") # pragma: no cover if 'g_expect' in datafile: self.g_expect = theano.shared(value=datafile['g_expect'], name="expectation", borrow=False) else: - self.logger.warn("no g_expect in file, falling back to defaults") + self.logger.warn("no g_expect in file, falling back to defaults") # pragma: no cover if 'g_countdown' in datafile: self.g_countdown = theano.shared(value=datafile['g_countdown'], name="countdown", borrow=False) else: - self.logger.warn("no g_countdown in file, falling back to defaults") + self.logger.warn("no g_countdown in file, falling back to defaults") # pragma: no cover if 'g_wait' in datafile: self.g_wait = theano.shared(value=datafile['g_wait'], name="wait", borrow=False) else: - self.logger.warn("no g_wait in file, falling back to defaults") + self.logger.warn("no g_wait in file, falling back to defaults") # pragma: no cover if 'n_function_selector' in datafile: self.n_function_selector = theano.shared(value=datafile['n_function_selector'], name="nodefunction_per_gate", borrow=False) else: - self.logger.warn("no n_function_selector in file, falling back to defaults") + self.logger.warn("no n_function_selector in file, falling back to defaults") # pragma: no cover # reconstruct other states self.por_ret_dirty = True @@ -1026,10 +1028,10 @@ def load_inlinks(self, datafilename): if os.path.isfile(datafilename): try: datafile = np.load(datafilename) - except ValueError: + except ValueError: # pragma: no cover self.logger.warn("Could not read nodenet data from file %s" % datafile) return False - except IOError: + except IOError: # pragma: no cover self.logger.warn("Could not open nodenet file %s" % datafile) return False @@ -1068,149 +1070,147 @@ def load_inlinks(self, datafilename): inlink_from_offset += inlink_from_lengths[i] inlink_to_offset += inlink_to_lengths[i] else: - self.logger.warn("no or incomplete inlink information in file, no inter-partition links will be loaded") + self.logger.warn("no or incomplete inlink information in file, no inter-partition links will be loaded") # pragma: no cover def grow_number_of_nodespaces(self, growby): new_NoNS = int(self.NoNS + growby) - with self.nodenet.netlock: - new_allocated_nodespaces = np.zeros(new_NoNS, dtype=np.int32) - new_allocated_nodespaces[0:self.NoNS] = self.allocated_nodespaces - self.allocated_nodespaces = new_allocated_nodespaces + new_allocated_nodespaces = np.zeros(new_NoNS, dtype=np.int32) + new_allocated_nodespaces[0:self.NoNS] = self.allocated_nodespaces + self.allocated_nodespaces = new_allocated_nodespaces - new_allocated_nodespaces_por_activators = np.zeros(new_NoNS, dtype=np.int32) - new_allocated_nodespaces_por_activators[0:self.NoNS] = self.allocated_nodespaces_por_activators - self.allocated_nodespaces_por_activators = new_allocated_nodespaces_por_activators + new_allocated_nodespaces_por_activators = np.zeros(new_NoNS, dtype=np.int32) + new_allocated_nodespaces_por_activators[0:self.NoNS] = self.allocated_nodespaces_por_activators + self.allocated_nodespaces_por_activators = new_allocated_nodespaces_por_activators - new_allocated_nodespaces_ret_activators = np.zeros(new_NoNS, dtype=np.int32) - new_allocated_nodespaces_ret_activators[0:self.NoNS] = self.allocated_nodespaces_ret_activators - self.allocated_nodespaces_ret_activators = new_allocated_nodespaces_ret_activators + new_allocated_nodespaces_ret_activators = np.zeros(new_NoNS, dtype=np.int32) + new_allocated_nodespaces_ret_activators[0:self.NoNS] = self.allocated_nodespaces_ret_activators + self.allocated_nodespaces_ret_activators = new_allocated_nodespaces_ret_activators - new_allocated_nodespaces_sub_activators = np.zeros(new_NoNS, dtype=np.int32) - new_allocated_nodespaces_sub_activators[0:self.NoNS] = self.allocated_nodespaces_sub_activators - self.allocated_nodespaces_sub_activators = new_allocated_nodespaces_sub_activators + new_allocated_nodespaces_sub_activators = np.zeros(new_NoNS, dtype=np.int32) + new_allocated_nodespaces_sub_activators[0:self.NoNS] = self.allocated_nodespaces_sub_activators + self.allocated_nodespaces_sub_activators = new_allocated_nodespaces_sub_activators - new_allocated_nodespaces_sur_activators = np.zeros(new_NoNS, dtype=np.int32) - new_allocated_nodespaces_sur_activators[0:self.NoNS] = self.allocated_nodespaces_sur_activators - self.allocated_nodespaces_sur_activators = new_allocated_nodespaces_sur_activators + new_allocated_nodespaces_sur_activators = np.zeros(new_NoNS, dtype=np.int32) + new_allocated_nodespaces_sur_activators[0:self.NoNS] = self.allocated_nodespaces_sur_activators + self.allocated_nodespaces_sur_activators = new_allocated_nodespaces_sur_activators - new_allocated_nodespaces_cat_activators = np.zeros(new_NoNS, dtype=np.int32) - new_allocated_nodespaces_cat_activators[0:self.NoNS] = self.allocated_nodespaces_cat_activators - self.allocated_nodespaces_cat_activators = new_allocated_nodespaces_cat_activators + new_allocated_nodespaces_cat_activators = np.zeros(new_NoNS, dtype=np.int32) + new_allocated_nodespaces_cat_activators[0:self.NoNS] = self.allocated_nodespaces_cat_activators + self.allocated_nodespaces_cat_activators = new_allocated_nodespaces_cat_activators - new_allocated_nodespaces_exp_activators = np.zeros(new_NoNS, dtype=np.int32) - new_allocated_nodespaces_exp_activators[0:self.NoNS] = self.allocated_nodespaces_exp_activators - self.allocated_nodespaces_exp_activators = new_allocated_nodespaces_exp_activators + new_allocated_nodespaces_exp_activators = np.zeros(new_NoNS, dtype=np.int32) + new_allocated_nodespaces_exp_activators[0:self.NoNS] = self.allocated_nodespaces_exp_activators + self.allocated_nodespaces_exp_activators = new_allocated_nodespaces_exp_activators - new_allocated_nodespaces_sampling_activators = np.zeros(new_NoNS, dtype=np.int32) - new_allocated_nodespaces_sampling_activators[0:self.NoNS] = self.allocated_nodespaces_sampling_activators - self.allocated_nodespaces_sampling_activators = new_allocated_nodespaces_sampling_activators + new_allocated_nodespaces_sampling_activators = np.zeros(new_NoNS, dtype=np.int32) + new_allocated_nodespaces_sampling_activators[0:self.NoNS] = self.allocated_nodespaces_sampling_activators + self.allocated_nodespaces_sampling_activators = new_allocated_nodespaces_sampling_activators - new_nodespaces_last_changed = np.zeros(new_NoNS, dtype=np.int32) - new_nodespaces_last_changed[0:self.NoNS] = self.nodespaces_last_changed - self.nodespaces_last_changed = new_nodespaces_last_changed + new_nodespaces_last_changed = np.zeros(new_NoNS, dtype=np.int32) + new_nodespaces_last_changed[0:self.NoNS] = self.nodespaces_last_changed + self.nodespaces_last_changed = new_nodespaces_last_changed - new_nodespaces_contents_last_changed = np.zeros(new_NoNS, dtype=np.int32) - new_nodespaces_contents_last_changed[0:self.NoNS] = self.nodespaces_contents_last_changed - self.nodespaces_contents_last_changed = new_nodespaces_contents_last_changed + new_nodespaces_contents_last_changed = np.zeros(new_NoNS, dtype=np.int32) + new_nodespaces_contents_last_changed[0:self.NoNS] = self.nodespaces_contents_last_changed + self.nodespaces_contents_last_changed = new_nodespaces_contents_last_changed - self.has_new_usages = True - self.NoNS = new_NoNS + self.has_new_usages = True + self.NoNS = new_NoNS def grow_number_of_elements(self, growby): new_NoE = int(self.NoE + growby) - with self.nodenet.netlock: - new_allocated_elements_to_nodes = np.zeros(new_NoE, dtype=np.int32) - new_allocated_elements_to_nodes[0:self.NoE] = self.allocated_elements_to_nodes - self.allocated_elements_to_nodes = new_allocated_elements_to_nodes + new_allocated_elements_to_nodes = np.zeros(new_NoE, dtype=np.int32) + new_allocated_elements_to_nodes[0:self.NoE] = self.allocated_elements_to_nodes + self.allocated_elements_to_nodes = new_allocated_elements_to_nodes - new_allocated_elements_to_activators = np.zeros(new_NoE, dtype=np.int32) - new_allocated_elements_to_activators[0:self.NoE] = self.allocated_elements_to_activators - self.allocated_elements_to_activators = new_allocated_elements_to_activators + new_allocated_elements_to_activators = np.zeros(new_NoE, dtype=np.int32) + new_allocated_elements_to_activators[0:self.NoE] = self.allocated_elements_to_activators + self.allocated_elements_to_activators = new_allocated_elements_to_activators - if self.sparse: - new_w = sp.csr_matrix((new_NoE, new_NoE), dtype=self.nodenet.scipyfloatX) - else: - new_w = np.zeros((new_NoE, new_NoE), dtype=self.nodenet.scipyfloatX) - new_w[0:self.NoE, 0:self.NoE] = self.w.get_value(borrow=True) - self.w.set_value(new_w, borrow=True) + if self.sparse: + new_w = sp.csr_matrix((new_NoE, new_NoE), dtype=self.nodenet.scipyfloatX) + else: + new_w = np.zeros((new_NoE, new_NoE), dtype=self.nodenet.scipyfloatX) + new_w[0:self.NoE, 0:self.NoE] = self.w.get_value(borrow=True) + self.w.set_value(new_w, borrow=True) - new_a = np.zeros(new_NoE, dtype=self.nodenet.numpyfloatX) - new_a[0:self.NoE] = self.a.get_value(borrow=True) - self.a.set_value(new_a, borrow=True) + new_a = np.zeros(new_NoE, dtype=self.nodenet.numpyfloatX) + new_a[0:self.NoE] = self.a.get_value(borrow=True) + self.a.set_value(new_a, borrow=True) - new_a_shifted = np.lib.stride_tricks.as_strided(new_a, shape=(new_NoE, 7), strides=(self.nodenet.byte_per_float, self.nodenet.byte_per_float)) - self.a_shifted.set_value(new_a_shifted, borrow=True) + new_a_shifted = np.lib.stride_tricks.as_strided(new_a, shape=(new_NoE, 7), strides=(self.nodenet.byte_per_float, self.nodenet.byte_per_float)) + self.a_shifted.set_value(new_a_shifted, borrow=True) - new_a_in = np.zeros(new_NoE, dtype=self.nodenet.numpyfloatX) - new_a_in[0:self.NoE] = self.a_in.get_value(borrow=True) - self.a_in.set_value(new_a_in, borrow=True) + new_a_in = np.zeros(new_NoE, dtype=self.nodenet.numpyfloatX) + new_a_in[0:self.NoE] = self.a_in.get_value(borrow=True) + self.a_in.set_value(new_a_in, borrow=True) - new_a_prev = np.zeros(new_NoE, dtype=self.nodenet.numpyfloatX) - new_a_prev[0:self.NoE] = self.a_prev.get_value(borrow=True) - self.a_prev.set_value(new_a_prev, borrow=True) + new_a_prev = np.zeros(new_NoE, dtype=self.nodenet.numpyfloatX) + new_a_prev[0:self.NoE] = self.a_prev.get_value(borrow=True) + self.a_prev.set_value(new_a_prev, borrow=True) - new_g_theta = np.zeros(new_NoE, dtype=self.nodenet.numpyfloatX) - new_g_theta[0:self.NoE] = self.g_theta.get_value(borrow=True) - self.g_theta.set_value(new_g_theta, borrow=True) + new_g_theta = np.zeros(new_NoE, dtype=self.nodenet.numpyfloatX) + new_g_theta[0:self.NoE] = self.g_theta.get_value(borrow=True) + self.g_theta.set_value(new_g_theta, borrow=True) - new_g_theta_shifted = np.lib.stride_tricks.as_strided(new_g_theta, shape=(self.NoE, 7), strides=(self.nodenet.byte_per_float, self.nodenet.byte_per_float)) - self.g_theta_shifted.set_value(new_g_theta_shifted, borrow=True) + new_g_theta_shifted = np.lib.stride_tricks.as_strided(new_g_theta, shape=(self.NoE, 7), strides=(self.nodenet.byte_per_float, self.nodenet.byte_per_float)) + self.g_theta_shifted.set_value(new_g_theta_shifted, borrow=True) - new_g_factor = np.ones(new_NoE, dtype=self.nodenet.numpyfloatX) - new_g_factor[0:self.NoE] = self.g_factor.get_value(borrow=True) - self.g_factor.set_value(new_g_factor, borrow=True) + new_g_factor = np.ones(new_NoE, dtype=self.nodenet.numpyfloatX) + new_g_factor[0:self.NoE] = self.g_factor.get_value(borrow=True) + self.g_factor.set_value(new_g_factor, borrow=True) - new_g_threshold = np.zeros(new_NoE, dtype=self.nodenet.numpyfloatX) - new_g_threshold[0:self.NoE] = self.g_threshold.get_value(borrow=True) - self.g_threshold.set_value(new_g_threshold, borrow=True) + new_g_threshold = np.zeros(new_NoE, dtype=self.nodenet.numpyfloatX) + new_g_threshold[0:self.NoE] = self.g_threshold.get_value(borrow=True) + self.g_threshold.set_value(new_g_threshold, borrow=True) - new_g_amplification = np.ones(new_NoE, dtype=self.nodenet.numpyfloatX) - new_g_amplification[0:self.NoE] = self.g_amplification.get_value(borrow=True) - self.g_amplification.set_value(new_g_amplification, borrow=True) + new_g_amplification = np.ones(new_NoE, dtype=self.nodenet.numpyfloatX) + new_g_amplification[0:self.NoE] = self.g_amplification.get_value(borrow=True) + self.g_amplification.set_value(new_g_amplification, borrow=True) - new_g_min = np.zeros(new_NoE, dtype=self.nodenet.numpyfloatX) - new_g_min[0:self.NoE] = self.g_min.get_value(borrow=True) - self.g_min.set_value(new_g_min, borrow=True) + new_g_min = np.zeros(new_NoE, dtype=self.nodenet.numpyfloatX) + new_g_min[0:self.NoE] = self.g_min.get_value(borrow=True) + self.g_min.set_value(new_g_min, borrow=True) - new_g_max = np.ones(new_NoE, dtype=self.nodenet.numpyfloatX) - new_g_max[0:self.NoE] = self.g_max.get_value(borrow=True) - self.g_max.set_value(new_g_max, borrow=True) + new_g_max = np.ones(new_NoE, dtype=self.nodenet.numpyfloatX) + new_g_max[0:self.NoE] = self.g_max.get_value(borrow=True) + self.g_max.set_value(new_g_max, borrow=True) - new_g_function_selector = np.zeros(new_NoE, dtype=np.int8) - new_g_function_selector[0:self.NoE] = self.g_function_selector.get_value(borrow=True) - self.g_function_selector.set_value(new_g_function_selector, borrow=True) + new_g_function_selector = np.zeros(new_NoE, dtype=np.int8) + new_g_function_selector[0:self.NoE] = self.g_function_selector.get_value(borrow=True) + self.g_function_selector.set_value(new_g_function_selector, borrow=True) - new_g_expect = np.ones(new_NoE, dtype=self.nodenet.numpyfloatX) - new_g_expect[0:self.NoE] = self.g_expect.get_value(borrow=True) - self.g_expect.set_value(new_g_expect, borrow=True) + new_g_expect = np.ones(new_NoE, dtype=self.nodenet.numpyfloatX) + new_g_expect[0:self.NoE] = self.g_expect.get_value(borrow=True) + self.g_expect.set_value(new_g_expect, borrow=True) - new_g_countdown = np.zeros(new_NoE, dtype=np.int16) - new_g_countdown[0:self.NoE] = self.g_countdown.get_value(borrow=True) - self.g_countdown.set_value(new_g_countdown, borrow=True) + new_g_countdown = np.zeros(new_NoE, dtype=np.int16) + new_g_countdown[0:self.NoE] = self.g_countdown.get_value(borrow=True) + self.g_countdown.set_value(new_g_countdown, borrow=True) - new_g_wait = np.ones(new_NoE, dtype=np.int16) - new_g_wait[0:self.NoE] = self.g_wait.get_value(borrow=True) - self.g_wait.set_value(new_g_wait, borrow=True) + new_g_wait = np.ones(new_NoE, dtype=np.int16) + new_g_wait[0:self.NoE] = self.g_wait.get_value(borrow=True) + self.g_wait.set_value(new_g_wait, borrow=True) - new_n_function_selector = np.zeros(new_NoE, dtype=np.int8) - new_n_function_selector[0:self.NoE] = self.n_function_selector.get_value(borrow=True) - self.n_function_selector.set_value(new_n_function_selector, borrow=True) + new_n_function_selector = np.zeros(new_NoE, dtype=np.int8) + new_n_function_selector[0:self.NoE] = self.n_function_selector.get_value(borrow=True) + self.n_function_selector.set_value(new_n_function_selector, borrow=True) - new_n_node_porlinked = np.zeros(new_NoE, dtype=np.int8) - self.n_node_porlinked.set_value(new_n_node_porlinked, borrow=True) + new_n_node_porlinked = np.zeros(new_NoE, dtype=np.int8) + self.n_node_porlinked.set_value(new_n_node_porlinked, borrow=True) - new_n_node_retlinked = np.zeros(new_NoE, dtype=np.int8) - self.n_node_retlinked.set_value(new_n_node_retlinked, borrow=True) + new_n_node_retlinked = np.zeros(new_NoE, dtype=np.int8) + self.n_node_retlinked.set_value(new_n_node_retlinked, borrow=True) - self.NoE = new_NoE - self.has_new_usages = True + self.NoE = new_NoE + self.has_new_usages = True - if self.has_pipes: - self.por_ret_dirty = True + if self.has_pipes: + self.por_ret_dirty = True def announce_nodes(self, number_of_nodes, average_elements_per_node): @@ -1429,6 +1429,14 @@ def delete_node(self, node_id): self.g_function_selector.set_value(g_function_selector_array, borrow=True) self.allocated_elements_to_nodes[np.where(self.allocated_elements_to_nodes == node_id)[0]] = 0 + if type == SENSOR: + sensor_index = np.where(self.sensor_indices == node_id)[0] + self.sensor_indices[sensor_index] = 0 + + if type == ACTUATOR: + actuator_index = np.where(self.actuator_indices == node_id)[0] + self.actuator_indices[actuator_index] = 0 + if type == PIPE: n_function_selector_array = self.n_function_selector.get_value(borrow=True) n_function_selector_array[offset + GEN] = NFPG_PIPE_NON @@ -1551,10 +1559,10 @@ def create_nodespace(self, parent_id, id=None): def delete_nodespace(self, nodespace_id): children_ids = np.where(self.allocated_nodespaces == nodespace_id)[0] for child_id in children_ids: - self.delete_nodespace(child_id) + self.nodenet.delete_nodespace(nodespace_to_id(child_id, self.pid)) node_ids = np.where(self.allocated_node_parents == nodespace_id)[0] for node_id in node_ids: - self.delete_node(node_id) + self.nodenet.delete_node(node_to_id(node_id, self.pid)) self.nodenet.clear_supplements(node_to_id(node_id, self.pid)) self.nodenet.clear_supplements(nodespace_to_id(nodespace_id, self.pid)) @@ -1760,14 +1768,14 @@ def get_link_weights(self, nodespace_from_uid, group_from, nodespace_to_uid, gro return w_matrix[rows,cols] def set_link_weights(self, nodespace_from_uid, group_from, nodespace_to_uid, group_to, new_w): - if nodespace_from_uid not in self.nodegroups or group_from not in self.nodegroups[nodespace_from_uid]: - raise ValueError("Group %s does not exist in nodespace %s." % (group_from, nodespace_from_uid)) - if nodespace_to_uid not in self.nodegroups or group_to not in self.nodegroups[nodespace_to_uid]: - raise ValueError("Group %s does not exist in nodespace %s." % (group_to, nodespace_to_uid)) - if len(self.nodegroups[nodespace_from_uid][group_from]) != new_w.shape[1]: - raise ValueError("group_from %s has length %i, but new_w.shape[1] is %i" % (group_from, len(self.nodegroups[nodespace_from_uid][group_from]), new_w.shape[1])) - if len(self.nodegroups[nodespace_to_uid][group_to]) != new_w.shape[0]: - raise ValueError("group_to %s has length %i, but new_w.shape[0] is %i" % (group_to, len(self.nodegroups[nodespace_to_uid][group_to]), new_w.shape[0])) + #if nodespace_from_uid not in self.nodegroups or group_from not in self.nodegroups[nodespace_from_uid]: + # raise ValueError("Group %s does not exist in nodespace %s." % (group_from, nodespace_from_uid)) + #if nodespace_to_uid not in self.nodegroups or group_to not in self.nodegroups[nodespace_to_uid]: + # raise ValueError("Group %s does not exist in nodespace %s." % (group_to, nodespace_to_uid)) + #if len(self.nodegroups[nodespace_from_uid][group_from]) != new_w.shape[1]: + # raise ValueError("group_from %s has length %i, but new_w.shape[1] is %i" % (group_from, len(self.nodegroups[nodespace_from_uid][group_from]), new_w.shape[1])) + #if len(self.nodegroups[nodespace_to_uid][group_to]) != new_w.shape[0]: + # raise ValueError("group_to %s has length %i, but new_w.shape[0] is %i" % (group_to, len(self.nodegroups[nodespace_to_uid][group_to]), new_w.shape[0])) w_matrix = self.w.get_value(borrow=True) grp_from = self.nodegroups[nodespace_from_uid][group_from] @@ -1776,18 +1784,16 @@ def set_link_weights(self, nodespace_from_uid, group_from, nodespace_to_uid, gro w_matrix[rows, cols] = new_w self.w.set_value(w_matrix, borrow=True) - for id in self.allocated_elements_to_nodes[grp_from]: - self.nodes_last_changed[id] = self.nodenet.current_step - self.nodespaces_contents_last_changed[self.allocated_node_parents[id]] = self.nodenet.current_step - for id in self.allocated_elements_to_nodes[grp_to]: - self.nodes_last_changed[id] = self.nodenet.current_step - self.nodespaces_contents_last_changed[self.allocated_node_parents[id]] = self.nodenet.current_step + cstep = self.nodenet.current_step + self.nodes_last_changed[self.allocated_elements_to_nodes[grp_from]] = cstep + self.nodespaces_contents_last_changed[self.allocated_node_parents[self.allocated_elements_to_nodes[grp_from]]] = cstep + self.nodes_last_changed[self.allocated_elements_to_nodes[grp_to]] = cstep + self.nodespaces_contents_last_changed[self.allocated_node_parents[self.allocated_elements_to_nodes[grp_to]]] = cstep - # todo: only set this if one of the groups is por/ret relevant - if self.has_pipes: - self.por_ret_dirty = True + self.por_ret_dirty = self.has_pipes def set_inlink_weights(self, partition_from_spid, new_from_elements, new_to_elements, new_weights): + from_partition = self.nodenet.partitions[partition_from_spid] if partition_from_spid in self.inlinks: theano_from_elements = self.inlinks[partition_from_spid][0] theano_to_elements = self.inlinks[partition_from_spid][1] @@ -1808,8 +1814,6 @@ def set_inlink_weights(self, partition_from_spid, new_from_elements, new_to_elem theano_to_elements = theano.shared(value=old_to_elements, name=toname, borrow=True) theano_weights = theano.shared(value=old_weights.astype(T.config.floatX), name=weightsname, borrow=True) - from_partition = self.nodenet.partitions[partition_from_spid] - propagation_function = self.get_compiled_propagate_inlinks( from_partition, theano_from_elements, @@ -1834,9 +1838,9 @@ def set_inlink_weights(self, partition_from_spid, new_from_elements, new_to_elem theano_to_elements.set_value(to_elements, borrow=True) theano_weights.set_value(weights, borrow=True) - for id in self.allocated_elements_to_nodes[theano_from_elements.get_value()]: - self.nodes_last_changed[id] = self.nodenet.current_step - self.nodespaces_contents_last_changed[self.allocated_node_parents[id]] = self.nodenet.current_step + for id in from_partition.allocated_elements_to_nodes[theano_from_elements.get_value()]: + from_partition.nodes_last_changed[id] = self.nodenet.current_step + from_partition.nodespaces_contents_last_changed[from_partition.allocated_node_parents[id]] = self.nodenet.current_step for id in self.allocated_elements_to_nodes[theano_to_elements.get_value()]: self.nodes_last_changed[id] = self.nodenet.current_step self.nodespaces_contents_last_changed[self.allocated_node_parents[id]] = self.nodenet.current_step @@ -1859,6 +1863,225 @@ def get_nodespace_changes(self, nodespace_uid, since_step): nodespace_ids = nodespace_ids[np.where(self.allocated_nodespaces[nodespace_ids] == ns_id)[0]] return node_ids, nodespace_ids + def get_node_data(self, ids=None, nodespace_ids=None, complete=False, include_links=True, include_followupnodes=True): + + a = self.a.get_value(borrow=True) + g_threshold_array = self.g_threshold.get_value(borrow=True) + g_amplification_array = self.g_amplification.get_value(borrow=True) + g_min_array = self.g_min.get_value(borrow=True) + g_max_array = self.g_max.get_value(borrow=True) + g_theta = self.g_theta.get_value(borrow=True) + g_function_selector = self.g_function_selector.get_value(borrow=True) + w = self.w.get_value(borrow=True) + + if nodespace_ids is not None: + node_ids = np.where(self.allocated_node_parents == nodespace_ids)[0] + else: + node_ids = np.nonzero(self.allocated_nodes)[0] + + if ids is not None: + node_ids = np.intersect1d(node_ids, ids) + + nodes = {} + followupuids = set() + for id in node_ids: + uid = node_to_id(id, self.pid) + strtype = get_string_node_type(self.allocated_nodes[id], self.nodenet.native_modules) + nodetype = self.nodenet.get_nodetype(strtype) + + gate_functions = {} + gate_parameters = {} + gate_activations = {} + links = {} + for gate in self.nodenet.get_nodetype(strtype).gatetypes: + numericalgate = get_numerical_gate_type(gate, self.nodenet.get_nodetype(strtype)) + element = self.allocated_node_offsets[id] + numericalgate + gate_functions[gate] = get_string_gatefunction_type(g_function_selector[element]) + + parameters = {} + threshold = g_threshold_array[element].item() + if 'threshold' not in nodetype.gate_defaults[gate] or threshold != nodetype.gate_defaults[gate]['threshold']: + parameters['threshold'] = float(threshold) + + amplification = g_amplification_array[element].item() + if 'amplification' not in nodetype.gate_defaults[gate] or amplification != nodetype.gate_defaults[gate]['amplification']: + parameters['amplification'] = float(amplification) + + minimum = g_min_array[element].item() + if 'minimum' not in nodetype.gate_defaults[gate] or minimum != nodetype.gate_defaults[gate]['minimum']: + parameters['minimum'] = float(minimum) + + maximum = g_max_array[element].item() + if 'maximum' not in nodetype.gate_defaults[gate] or maximum != nodetype.gate_defaults[gate]['maximum']: + parameters['maximum'] = float(maximum) + + theta = g_theta[element].item() + if 'theta' not in nodetype.gate_defaults[gate] or theta != nodetype.gate_defaults[gate]['theta']: + parameters['theta'] = float(theta) + + if not len(parameters) == 0: + gate_parameters[gate] = parameters + + gate_activations[gate] = {"default": { + "name": "default", + "uid": "default", + "activation": float(a[element])}} + + state = None + if uid in self.native_module_instances: + state = self.native_module_instances.get(uid).clone_state() + + parameters = {} + if strtype == "Sensor": + sensor_element = self.allocated_node_offsets[id] + GEN + datasource_index = np.where(self.sensor_indices == sensor_element)[0] + if len(datasource_index) == 0: + parameters['datasource'] = None + else: + parameters['datasource'] = self.nodenet.get_datasources()[datasource_index[0]] + elif strtype == "Actor": + actuator_element = self.allocated_node_offsets[id] + GEN + datatarget_index = np.where(self.actuator_indices == actuator_element)[0] + if len(datatarget_index) == 0: + parameters['datatarget'] = None + else: + parameters['datatarget'] = self.nodenet.get_datatargets()[datatarget_index[0]] + elif strtype == "Activator": + activator_type = None + if id in self.allocated_nodespaces_por_activators: + activator_type = "por" + elif id in self.allocated_nodespaces_ret_activators: + activator_type = "ret" + elif id in self.allocated_nodespaces_sub_activators: + activator_type = "sub" + elif id in self.allocated_nodespaces_sur_activators: + activator_type = "sur" + elif id in self.allocated_nodespaces_cat_activators: + activator_type = "cat" + elif id in self.allocated_nodespaces_exp_activators: + activator_type = "exp" + elif id in self.allocated_nodespaces_sampling_activators: + activator_type = "sampling" + parameters['type'] = activator_type + elif strtype == "Pipe": + g_expect_array = self.g_expect.get_value(borrow=True) + value = g_expect_array[self.allocated_node_offsets[id] + get_numerical_gate_type("sur")].item() + parameters['expectation'] = value + g_wait_array = self.g_wait.get_value(borrow=True) + parameters['wait'] = g_wait_array[self.allocated_node_offsets[id] + get_numerical_gate_type("sur")].item() + elif strtype == "Comment": + parameters = self.comment_instances.get(uid).clone_parameters() + elif strtype in self.nodenet.native_modules: + parameters = self.native_module_instances.get(uid).clone_parameters() + + data = {"uid": uid, + "name": self.nodenet.names.get(uid, uid), + "position": self.nodenet.positions.get(uid, (10, 10, 10)), + "parent_nodespace": nodespace_to_id(self.allocated_node_parents[id], self.pid), + "type": strtype, + "parameters": parameters, + "state": state, + "gate_parameters": gate_parameters, + "sheaves": {"default": {"name": "default", + "uid": "default", + "activation": float(a[self.allocated_node_offsets[id] + GEN])}}, + "activation": float(a[self.allocated_node_offsets[id] + GEN]), + "gate_activations": gate_activations, + "gate_functions": gate_functions} + if complete: + data['index'] = id + if include_links: + data['links'] = {} + + nodes[uid] = data + + # fill in links if requested + if include_links: + slots, gates = np.nonzero(w) + for index, gate_index in enumerate(gates): + source_id = self.allocated_elements_to_nodes[gate_index] + source_uid = node_to_id(source_id, self.pid) + if source_uid not in nodes: + continue + + source_type = self.allocated_nodes[source_id] + source_nodetype = self.nodenet.get_nodetype(get_string_node_type(source_type, self.nodenet.native_modules)) + source_gate_numerical = gate_index - self.allocated_node_offsets[source_id] + source_gate_type = get_string_gate_type(source_gate_numerical, source_nodetype) + + slot_index = slots[index] + target_id = self.allocated_elements_to_nodes[slot_index] + target_uid = node_to_id(target_id, self.pid) + target_type = self.allocated_nodes[target_id] + target_nodetype = self.nodenet.get_nodetype(get_string_node_type(target_type, self.nodenet.native_modules)) + target_slot_numerical = slot_index - self.allocated_node_offsets[target_id] + target_slot_type = get_string_slot_type(target_slot_numerical, target_nodetype) + linkdict = {"weight": float(w[slot_index, gate_index]), + "certainty": 1, + "target_slot_name": target_slot_type, + "target_node_uid": target_uid} + if source_gate_type not in nodes[source_uid]["links"]: + nodes[source_uid]["links"][source_gate_type] = [] + nodes[source_uid]["links"][source_gate_type].append(linkdict) + followupuids.add(target_uid) + + # outgoing cross-partition links + for partition_to_spid, to_partition in self.nodenet.partitions.items(): + if self.spid in to_partition.inlinks: + inlinks = to_partition.inlinks[self.spid] + from_elements = inlinks[0].get_value(borrow=True) + to_elements = inlinks[1].get_value(borrow=True) + w = inlinks[2].get_value(borrow=True) + slots, gates = np.nonzero(w) + for index, gate_index in enumerate(gates): + source_id = self.allocated_elements_to_nodes[from_elements[gate_index]] + source_uid = node_to_id(source_id, self.pid) + if source_uid not in nodes: + continue + + source_type = self.allocated_nodes[source_id] + source_nodetype = self.nodenet.get_nodetype(get_string_node_type(source_type, self.nodenet.native_modules)) + source_gate_numerical = from_elements[gate_index] - self.allocated_node_offsets[source_id] + source_gate_type = get_string_gate_type(source_gate_numerical, source_nodetype) + + slot_index = slots[index] + target_id = to_partition.allocated_elements_to_nodes[to_elements[slot_index]] + target_uid = node_to_id(target_id, to_partition.pid) + target_type = to_partition.allocated_nodes[target_id] + target_nodetype = to_partition.nodenet.get_nodetype(get_string_node_type(target_type, to_partition.nodenet.native_modules)) + target_slot_numerical = to_elements[slot_index] - to_partition.allocated_node_offsets[target_id] + target_slot_type = get_string_slot_type(target_slot_numerical, target_nodetype) + linkdict = {"weight": float(w[slot_index, gate_index]), + "certainty": 1, + "target_slot_name": target_slot_type, + "target_node_uid": target_uid} + if source_gate_type not in nodes[source_uid]["links"]: + nodes[source_uid]["links"][source_gate_type] = [] + nodes[source_uid]["links"][source_gate_type].append(linkdict) + followupuids.add(target_uid) + + # incoming cross-partition links need to be checked for followup nodes in the other partition + # even though we're not interested in the links themselves as they will be delivered with the nodes + # in the other partition. + # having to deliver followupnodes for links that aren't even our business is really annoying. + for from_partition_id, inlinks in self.inlinks.items(): + from_partition = self.nodenet.partitions[from_partition_id] + from_elements = inlinks[0].get_value(borrow=True) + to_elements = inlinks[1].get_value(borrow=True) + w = inlinks[2].get_value(borrow=True) + slots, gates = np.nonzero(w) + for index, gate_index in enumerate(gates): + source_id = from_partition.allocated_elements_to_nodes[from_elements[gate_index]] + source_uid = node_to_id(source_id, from_partition.pid) + + slot_index = slots[index] + target_id = self.allocated_elements_to_nodes[to_elements[slot_index]] + target_uid = node_to_id(target_id, self.pid) + if target_uid in nodes: + followupuids.add(source_uid) + + return nodes, followupuids + def integrity_check(self): for nid in range(self.NoN): diff --git a/micropsi_core/nodenet/theano_engine/theano_stepoperators.py b/micropsi_core/nodenet/theano_engine/theano_stepoperators.py index 36078c50..56adbb5e 100644 --- a/micropsi_core/nodenet/theano_engine/theano_stepoperators.py +++ b/micropsi_core/nodenet/theano_engine/theano_stepoperators.py @@ -41,26 +41,10 @@ def __init__(self, nodenet): self.nodenet = nodenet def read_sensors_and_actuator_feedback(self): - if self.worldadapter is None: - return - - datasource_to_value_map = {} - for datasource in self.worldadapter.get_available_datasources(): - datasource_to_value_map[datasource] = self.worldadapter.get_datasource(datasource) - - datatarget_to_value_map = {} - for datatarget in self.worldadapter.get_available_datatargets(): - datatarget_to_value_map[datatarget] = self.worldadapter.get_datatarget_feedback(datatarget) - - self.nodenet.set_sensors_and_actuator_feedback_to_values(datasource_to_value_map, datatarget_to_value_map) + self.nodenet.set_sensors_and_actuator_feedback_values() def write_actuators(self): - if self.worldadapter is None: - return - - values_to_write = self.nodenet.read_actuators() - for datatarget in values_to_write: - self.worldadapter.add_to_datatarget(datatarget, values_to_write[datatarget]) + self.nodenet.set_actuator_values() def count_success_and_failure(self, nodenet): nays = 0 @@ -79,4 +63,5 @@ def execute(self, nodenet, nodes, netapi): self.read_sensors_and_actuator_feedback() for partition in nodenet.partitions.values(): partition.calculate() - self.count_success_and_failure(nodenet) + if nodenet.use_modulators: + self.count_success_and_failure(nodenet) diff --git a/micropsi_core/runtime.py b/micropsi_core/runtime.py index 6d8dbb29..664e0212 100755 --- a/micropsi_core/runtime.py +++ b/micropsi_core/runtime.py @@ -35,30 +35,77 @@ NODENET_DIRECTORY = "nodenets" WORLD_DIRECTORY = "worlds" -RESOURCE_PATH = cfg['paths']['resource_path'] -configs = config.ConfigurationManager(cfg['paths']['server_settings_path']) +signal_handler_registry = [] +runner = {'timestep': 1000, 'runner': None, 'factor': 1} + +nodenet_lock = threading.Lock() + +# global variables set by intialize() +RESOURCE_PATH = None +PERSISTENCY_PATH = None + +configs = None +logger = None worlds = {} nodenets = {} native_modules = {} custom_recipes = {} +custom_operations = {} -runner = {'timestep': 1000, 'runner': None, 'factor': 1} +netapi_consoles = {} -signal_handler_registry = [] +from code import InteractiveConsole -logger = MicropsiLogger({ - 'system': cfg['logging']['level_system'], - 'world': cfg['logging']['level_world'] -}, cfg['logging'].get('logfile')) -nodenet_lock = threading.Lock() +class FileCacher(): + "Cache the stdout text so we can analyze it before returning it" + def __init__(self): + self.reset() + + def reset(self): + self.out = [] + + def write(self,line): + self.out.append(line) + + def flush(self): + output = '\n'.join(self.out) + self.reset() + return output + + +class NetapiShell(InteractiveConsole): + "Wrapper around Python that can filter input/output to the shell" + def __init__(self, netapi): + self.stdout = sys.stdout + self.stderr = sys.stderr + self.outcache = FileCacher() + self.errcache = FileCacher() + InteractiveConsole.__init__(self, locals={'netapi': netapi}) + return + + def get_output(self): + sys.stdout = self.outcache + sys.stderr = self.errcache + + def return_output(self): + sys.stdout = self.stdout + sys.stderr = self.stderr -if cfg['micropsi2'].get('profile_runner'): - import cProfile - import pstats - import io + def push(self,line): + self.get_output() + incomplete = InteractiveConsole.push(self,line) + if incomplete: + InteractiveConsole.push(self,'\n') + self.return_output() + err = self.errcache.flush() + if err and err.startswith('Traceback'): + parts = err.strip().split('\n') + return False, "%s: %s" % (parts[-3], parts[-1]) + out = self.outcache.flush() + return True, out.strip() def add_signal_handler(handler): @@ -82,6 +129,7 @@ class MicropsiRunner(threading.Thread): def __init__(self): threading.Thread.__init__(self) if cfg['micropsi2'].get('profile_runner'): + import cProfile self.profiler = cProfile.Profile() else: self.profiler = None @@ -142,6 +190,8 @@ def run(self): average_duration = self.sum_of_durations / self.number_of_samples if self.total_steps % self.granularity == 0: if self.profiler: + import pstats + import io s = io.StringIO() sortby = 'cumtime' ps = pstats.Stats(self.profiler, stream=s).sort_stats(sortby) @@ -249,7 +299,7 @@ def get_nodenet(nodenet_uid): if nodenet_uid in get_available_nodenets(): load_nodenet(nodenet_uid) else: - return None + raise KeyError("Unknown nodenet") return nodenets[nodenet_uid] @@ -265,126 +315,143 @@ def load_nodenet(nodenet_uid): if nodenet_uid in nodenet_data: world_uid = worldadapter = None - nodenet_lock.acquire() + with nodenet_lock: - if cfg['micropsi2'].get('single_agent_mode'): - # unload all other nodenets if single_agent_mode is selected - for uid in list(nodenets.keys()): - if uid != nodenet_uid: - unload_nodenet(uid) + if cfg['micropsi2'].get('single_agent_mode'): + # unload all other nodenets if single_agent_mode is selected + for uid in list(nodenets.keys()): + if uid != nodenet_uid: + unload_nodenet(uid) - if nodenet_uid not in nodenets: - data = nodenet_data[nodenet_uid] + if nodenet_uid not in nodenets: + data = nodenet_data[nodenet_uid] - if hasattr(data, 'world') and data.world: - if data.world in worlds: - world_uid = data.world - worldadapter = data.get('worldadapter') + worldadapter_instance = None + if hasattr(data, 'world') and data.world: + if data.world in worlds: + world_uid = data.world + worldadapter = data.get('worldadapter') + else: + logging.getLogger("system").warn("World %s for nodenet %s not found" % (data.world, data.uid)) + + if world_uid: + result, worldadapter_instance = worlds[world_uid].register_nodenet(worldadapter, nodenet_uid) + if not result: + logging.getLogger('system').warn(worldadapter_instance) + worldadapter_instance = None + worldadapter = None + world_uid = None + + engine = data.get('engine') or 'dict_engine' + + logger.register_logger("agent.%s" % nodenet_uid, cfg['logging']['level_agent']) + + params = { + 'name': data.name, + 'worldadapter': worldadapter, + 'worldadapter_instance': worldadapter_instance, + 'world': world_uid, + 'owner': data.owner, + 'uid': data.uid, + 'native_modules': filter_native_modules(engine), + 'use_modulators': data.get('use_modulators', True) # getter for compatibility + } + if engine == 'dict_engine': + from micropsi_core.nodenet.dict_engine.dict_nodenet import DictNodenet + nodenets[nodenet_uid] = DictNodenet(**params) + elif engine == 'theano_engine': + from micropsi_core.nodenet.theano_engine.theano_nodenet import TheanoNodenet + nodenets[nodenet_uid] = TheanoNodenet(**params) + # Add additional engine types here else: - logging.getLogger("system").warn("World %s for nodenet %s not found" % (data.world, data.uid)) - - engine = data.get('engine', 'dict_engine') - - logger.register_logger("agent.%s" % nodenet_uid, cfg['logging']['level_agent']) - - if engine == 'dict_engine': - from micropsi_core.nodenet.dict_engine.dict_nodenet import DictNodenet - nodenets[nodenet_uid] = DictNodenet( - name=data.name, worldadapter=worldadapter, - world=world_uid, owner=data.owner, uid=data.uid, - native_modules=filter_native_modules(engine)) - elif engine == 'theano_engine': - from micropsi_core.nodenet.theano_engine.theano_nodenet import TheanoNodenet - nodenets[nodenet_uid] = TheanoNodenet( - name=data.name, worldadapter=worldadapter, - world=world_uid, owner=data.owner, uid=data.uid, - native_modules=filter_native_modules(engine)) - # Add additional engine types here - else: - nodenet_lock.release() - return False, "Nodenet %s requires unknown engine %s" % (nodenet_uid, engine) + return False, "Nodenet %s requires unknown engine %s" % (nodenet_uid, engine) + + nodenets[nodenet_uid].load(os.path.join(PERSISTENCY_PATH, NODENET_DIRECTORY, nodenet_uid + ".json")) - nodenets[nodenet_uid].load(os.path.join(RESOURCE_PATH, NODENET_DIRECTORY, nodenet_uid + ".json")) + netapi_consoles[nodenet_uid] = NetapiShell(nodenets[nodenet_uid].netapi) - if "settings" in data: - nodenets[nodenet_uid].settings = data["settings"].copy() + if "settings" in data: + nodenets[nodenet_uid].settings = data["settings"].copy() + else: + nodenets[nodenet_uid].settings = {} else: - nodenets[nodenet_uid].settings = {} - else: - world_uid = nodenets[nodenet_uid].world or None - worldadapter = nodenets[nodenet_uid].worldadapter - if world_uid: - worlds[world_uid].register_nodenet(worldadapter, nodenets[nodenet_uid]) + world_uid = nodenets[nodenet_uid].world or None + worldadapter = nodenets[nodenet_uid].worldadapter - nodenet_lock.release() return True, nodenet_uid - return False, "Nodenet %s not found in %s" % (nodenet_uid, RESOURCE_PATH) + return False, "Nodenet %s not found in %s" % (nodenet_uid, PERSISTENCY_PATH) -def get_nodenet_data(nodenet_uid, nodespace, step=0, include_links=True): - """ returns the current state of the nodenet """ +def get_nodenet_metadata(nodenet_uid): + """ returns the given nodenet's metadata""" nodenet = get_nodenet(nodenet_uid) data = nodenet.metadata - if step > nodenet.current_step: - return data - with nodenet.netlock: - if not nodenets[nodenet_uid].is_nodespace(nodespace): - nodespace = nodenets[nodenet_uid].get_nodespace(None).uid - data.update(nodenets[nodenet_uid].get_nodespace_data(nodespace, include_links)) - data['nodespace'] = nodespace - data.update({ - 'nodetypes': nodenet.get_standard_nodetype_definitions(), - 'native_modules': filter_native_modules(nodenet.engine), - 'monitors': nodenet.construct_monitors_dict() - }) + data.update({ + 'nodetypes': nodenet.get_standard_nodetype_definitions(), + 'nodespaces': nodenet.construct_nodespaces_dict(None, transitive=True), + 'native_modules': filter_native_modules(nodenet.engine), + 'monitors': nodenet.construct_monitors_dict(), + 'rootnodespace': nodenet.get_nodespace(None).uid + }) return data -def get_nodenet_activation_data(nodenet_uid, nodespace=None, last_call_step=-1): +def get_nodenet_activation_data(nodenet_uid, nodespaces=[], last_call_step=-1): nodenet = get_nodenet(nodenet_uid) with nodenet.netlock: data = { - 'activations': nodenet.get_activation_data(nodespace, rounded=1), - 'has_changes': nodenet.has_nodespace_changes(nodespace, last_call_step) + 'activations': nodenet.get_activation_data(nodespaces, rounded=1), + 'has_changes': nodenet.has_nodespace_changes(nodespaces, last_call_step) } return data -def get_current_state(nodenet_uid, nodenet=None, nodenet_diff=None, world=None, monitors=None, dashboard=None): - """ returns the current state of the nodenet - TODO: maybe merge with above get_nodenet_data? +def get_nodes(nodenet_uid, nodespaces=[], include_links=True): + """Return data for the given nodespaces""" + nodenet = get_nodenet(nodenet_uid) + return nodenet.get_nodes(nodespaces, include_links) + + +def get_calculation_state(nodenet_uid, nodenet=None, nodenet_diff=None, world=None, monitors=None, dashboard=None): + """ returns the current state of the calculation """ data = {} nodenet_obj = get_nodenet(nodenet_uid) if nodenet_obj is not None: condition = nodenet_obj.get_runner_condition() if condition: - data['simulation_condition'] = condition + data['calculation_condition'] = condition if 'monitor' in condition: monitor = nodenet_obj.get_monitor(condition['monitor']['uid']) if monitor: - data['simulation_condition']['monitor']['color'] = monitor.color + data['calculation_condition']['monitor']['color'] = monitor.color else: - del data['simulation_condition']['monitor'] - data['simulation_running'] = nodenet_obj.is_active + del data['calculation_condition']['monitor'] + data['calculation_running'] = nodenet_obj.is_active data['current_nodenet_step'] = nodenet_obj.current_step data['current_world_step'] = worlds[nodenet_obj.world].current_step if nodenet_obj.world else 0 if nodenet is not None: - data['nodenet'] = get_nodenet_data(nodenet_uid=nodenet_uid, **nodenet) + if not type(nodenet) == dict: + nodenet = {} + data['nodenet'] = get_nodes(nodenet_uid, nodespaces=nodenet.get('nodespaces', []), include_links=nodenet.get('include_links', True)) if nodenet_diff is not None: - activations = get_nodenet_activation_data(nodenet_uid, last_call_step=nodenet_diff['step'], nodespace=nodenet_diff.get('nodespace')) + activations = get_nodenet_activation_data(nodenet_uid, last_call_step=nodenet_diff['step'], nodespaces=nodenet_diff.get('nodespaces', [])) data['nodenet_diff'] = { 'activations': activations['activations'], 'modulators': nodenet_obj.construct_modulators_dict() } if activations['has_changes']: - data['nodenet_diff']['changes'] = nodenet_obj.get_nodespace_changes(nodenet_diff.get('nodespace'), nodenet_diff['step']) + data['nodenet_diff']['changes'] = nodenet_obj.get_nodespace_changes(nodenet_diff.get('nodespaces', []), nodenet_diff['step']) if nodenet_obj.user_prompt: data['user_prompt'] = nodenet_obj.user_prompt nodenet_obj.user_prompt = None if world is not None and nodenet_obj.world: + if not type(world) == dict: + world = {} data['world'] = get_world_view(world_uid=nodenet_obj.world, **world) if monitors is not None: + if not type(monitors) == dict: + monitors = {} data['monitors'] = get_monitoring_info(nodenet_uid=nodenet_uid, **monitors) if dashboard is not None: data['dashboard'] = get_agent_dashboard(nodenet_uid) @@ -402,15 +469,17 @@ def unload_nodenet(nodenet_uid): """ if nodenet_uid not in nodenets: return False + if nodenet_uid in netapi_consoles: + del netapi_consoles[nodenet_uid] nodenet = nodenets[nodenet_uid] if nodenet.world: - worlds[nodenet.world].unregister_nodenet(nodenet) + worlds[nodenet.world].unregister_nodenet(nodenet.uid) del nodenets[nodenet_uid] logger.unregister_logger('agent.%s' % nodenet_uid) return True -def new_nodenet(nodenet_name, engine="dict_engine", worldadapter=None, template=None, owner="", world_uid=None, uid=None): +def new_nodenet(nodenet_name, engine="dict_engine", worldadapter=None, template=None, owner="", world_uid=None, uid=None, use_modulators=True): """Creates a new node net manager and registers it. Arguments: @@ -436,14 +505,15 @@ def new_nodenet(nodenet_name, engine="dict_engine", worldadapter=None, template= owner=owner, world=world_uid, settings={}, - engine=engine) + engine=engine, + use_modulators=use_modulators) - filename = os.path.join(RESOURCE_PATH, NODENET_DIRECTORY, data['uid'] + ".json") + filename = os.path.join(PERSISTENCY_PATH, NODENET_DIRECTORY, data['uid'] + ".json") nodenet_data[data['uid']] = Bunch(**data) load_nodenet(data['uid']) if template is not None and template in nodenet_data: load_nodenet(template) - data_to_merge = nodenets[template].export_json() + data_to_merge = get_nodenet(template).export_json() data_to_merge.update(data) load_nodenet(uid) nodenets[uid].merge_data(data_to_merge) @@ -457,7 +527,7 @@ def delete_nodenet(nodenet_uid): Simple unloading is maintained automatically when a nodenet is suspended and another one is accessed. """ - filename = os.path.join(RESOURCE_PATH, NODENET_DIRECTORY, nodenet_uid + '.json') + filename = os.path.join(PERSISTENCY_PATH, NODENET_DIRECTORY, nodenet_uid + '.json') nodenet = get_nodenet(nodenet_uid) nodenet.remove(filename) unload_nodenet(nodenet_uid) @@ -468,9 +538,9 @@ def delete_nodenet(nodenet_uid): def set_nodenet_properties(nodenet_uid, nodenet_name=None, worldadapter=None, world_uid=None, owner=None): """Sets the supplied parameters (and only those) for the nodenet with the given uid.""" - nodenet = nodenets[nodenet_uid] - if nodenet.world and nodenet.world != world_uid: - worlds[nodenet.world].unregister_nodenet(nodenet) + nodenet = get_nodenet(nodenet_uid) + if nodenet.world and (nodenet.world != world_uid or nodenet.worldadapter != worldadapter): + worlds[nodenet.world].unregister_nodenet(nodenet.uid) nodenet.world = None if worldadapter is None: worldadapter = nodenet.worldadapter @@ -478,7 +548,9 @@ def set_nodenet_properties(nodenet_uid, nodenet_name=None, worldadapter=None, wo assert worldadapter in worlds[world_uid].supported_worldadapters nodenet.world = world_uid nodenet.worldadapter = worldadapter - worlds[world_uid].register_nodenet(worldadapter, nodenet) + result, wa_instance = worlds[world_uid].register_nodenet(worldadapter, nodenet.uid) + if result: + nodenet.worldadapter_instance = wa_instance if nodenet_name: nodenet.name = nodenet_name if owner: @@ -496,10 +568,10 @@ def start_nodenetrunner(nodenet_uid): def set_runner_properties(timestep, factor): - """Sets the speed of the nodenet simulation in ms. + """Sets the speed of the nodenet calculation in ms. Argument: - timestep: sets the simulation speed. + timestep: sets the calculation speed. """ configs['runner_timestep'] = timestep runner['timestep'] = timestep @@ -510,19 +582,20 @@ def set_runner_properties(timestep, factor): def set_runner_condition(nodenet_uid, monitor=None, steps=None): """ registers a condition that stops the runner if it is fulfilled""" + nodenet = get_nodenet(nodenet_uid) condition = {} if monitor is not None: condition['monitor'] = monitor if steps is not None: - condition['step'] = nodenets[nodenet_uid].current_step + steps + condition['step'] = nodenet.current_step + steps condition['step_amount'] = steps if condition: - nodenets[nodenet_uid].set_runner_condition(condition) + nodenet.set_runner_condition(condition) return True, condition def remove_runner_condition(nodenet_uid): - nodenets[nodenet_uid].unset_runner_condition() + get_nodenet(nodenet_uid).unset_runner_condition() return True @@ -536,12 +609,13 @@ def get_runner_properties(): def get_is_nodenet_running(nodenet_uid): """Returns True if a nodenet runner is active for the given nodenet, False otherwise.""" - return nodenets[nodenet_uid].is_active + return get_nodenet(nodenet_uid).is_active def stop_nodenetrunner(nodenet_uid): """Stops the thread for the given nodenet.""" - nodenets[nodenet_uid].is_active = False + nodenet = get_nodenet(nodenet_uid) + nodenet.is_active = False test = {nodenets[uid].is_active for uid in nodenets} if True not in test: test = {worlds[uid].is_active for uid in worlds} @@ -552,20 +626,43 @@ def stop_nodenetrunner(nodenet_uid): def step_nodenet(nodenet_uid): - """Advances the given nodenet by one simulation step. + """Advances the given nodenet by one calculation step. Arguments: nodenet_uid: The uid of the nodenet """ - nodenets[nodenet_uid].timed_step() - nodenets[nodenet_uid].update_monitors() - if nodenets[nodenet_uid].world and nodenets[nodenet_uid].current_step % configs['runner_factor'] == 0: - worlds[nodenets[nodenet_uid].world].step() - return nodenets[nodenet_uid].current_step + nodenet = get_nodenet(nodenet_uid) + nodenet.timed_step() + nodenet.update_monitors() + if nodenet.world and nodenet.current_step % configs['runner_factor'] == 0: + worlds[nodenet.world].step() + return nodenet.current_step + + +def step_nodenets_in_world(world_uid, nodenet_uid=None, steps=1): + """ Advances all nodenets registered in the given world + (or, only the given nodenet) by the given number of steps""" + nodenet = None + if nodenet_uid is not None: + nodenet = get_nodenet(nodenet_uid) + if nodenet and nodenet.world == world_uid: + for i in range(steps): + nodenet.timed_step() + nodenet.update_monitors() + else: + for i in range(steps): + for uid in worlds[world_uid].agents: + nodenet = get_nodenet(uid) + nodenet.timed_step() + nodenet.update_monitors() + return True -def revert_nodenet(nodenet_uid): +def revert_nodenet(nodenet_uid, also_revert_world=False): """Returns the nodenet to the last saved state.""" + nodenet = get_nodenet(nodenet_uid) + if also_revert_world and nodenet_uid in nodenets and nodenet.world: + revert_world(nodenet.world) unload_nodenet(nodenet_uid) load_nodenet(nodenet_uid) return True @@ -573,8 +670,8 @@ def revert_nodenet(nodenet_uid): def save_nodenet(nodenet_uid): """Stores the nodenet on the server (but keeps it open).""" - nodenet = nodenets[nodenet_uid] - nodenet.save(os.path.join(RESOURCE_PATH, NODENET_DIRECTORY, nodenet_uid + '.json')) + nodenet = get_nodenet(nodenet_uid) + nodenet.save(os.path.join(PERSISTENCY_PATH, NODENET_DIRECTORY, nodenet_uid + '.json')) nodenet_data[nodenet_uid] = Bunch(**nodenet.metadata) return True @@ -584,7 +681,7 @@ def export_nodenet(nodenet_uid): Returns a string that contains the nodenet state in JSON format. """ - return json.dumps(nodenets[nodenet_uid].export_json(), sort_keys=True, indent=4) + return json.dumps(get_nodenet(nodenet_uid).export_json(), sort_keys=True, indent=4) def import_nodenet(string, owner=None): @@ -603,13 +700,17 @@ def import_nodenet(string, owner=None): raise RuntimeError("A nodenet with this ID already exists.") if 'owner': import_data['owner'] = owner + nodenet_uid = import_data['uid'] + filename = os.path.join(PERSISTENCY_PATH, NODENET_DIRECTORY, import_data['uid'] + '.json') + meta = parse_definition(import_data, filename) + nodenet_data[nodenet_uid] = meta # assert import_data['world'] in worlds - filename = os.path.join(RESOURCE_PATH, NODENET_DIRECTORY, import_data['uid'] + '.json') with open(filename, 'w+') as fp: - fp.write(json.dumps(import_data)) - nodenet_data[import_data['uid']] = parse_definition(import_data, filename) - load_nodenet(import_data['uid']) - return import_data['uid'] + fp.write(json.dumps(meta)) + load_nodenet(nodenet_uid) + merge_nodenet(nodenet_uid, string, keep_uids=True) + save_nodenet(nodenet_uid) + return nodenet_uid def merge_nodenet(nodenet_uid, string, keep_uids=False): @@ -620,7 +721,7 @@ def merge_nodenet(nodenet_uid, string, keep_uids=False): string: a string that contains the nodenet data that is to be merged in JSON format. keep_uids: if true, no uid replacement will be performed. Dangerous. """ - nodenet = nodenets[nodenet_uid] + nodenet = get_nodenet(nodenet_uid) data = json.loads(string) nodenet.merge_data(data, keep_uids) return True @@ -634,7 +735,7 @@ def get_nodespace_list(nodenet_uid): - nodespace parent - a list of nodes (uid, name, and type) residing in that nodespace """ - nodenet = nodenets[nodenet_uid] + nodenet = get_nodenet(nodenet_uid) data = {} for uid in nodenet.get_nodespace_uids(): nodespace = nodenet.get_nodespace(uid) @@ -643,6 +744,7 @@ def get_nodespace_list(nodenet_uid): 'name': nodespace.name, 'parent': nodespace.parent_nodespace, 'nodes': {}, + 'properties': nodenet.get_nodespace_properties(uid) } for nid in nodespace.get_known_ids('nodes'): data[uid]['nodes'][nid] = { @@ -672,10 +774,11 @@ def get_node(nodenet_uid, node_uid, include_links=True): "parent_nodespace" (str): the uid of the nodespace this node lives in } """ - if nodenets[nodenet_uid].is_node(node_uid): - return True, nodenets[nodenet_uid].get_node(node_uid).get_data(include_links=include_links) - elif nodenets[nodenet_uid].is_nodespace(node_uid): - data = nodenets[nodenet_uid].get_nodespace(node_uid).get_data() + nodenet = get_nodenet(nodenet_uid) + if nodenet.is_node(node_uid): + return True, nodenet.get_node(node_uid).get_data(include_links=include_links) + elif nodenet.is_nodespace(node_uid): + data = nodenet.get_nodespace(node_uid).get_data() data['type'] = 'Nodespace' return True, data else: @@ -783,11 +886,21 @@ def clone_nodes(nodenet_uid, node_uids, clonemode, nodespace=None, offset=[50, 5 return False, "Could not clone nodes. See log for details." -def get_nodespace_changes(nodenet_uid, nodespace_uid, since_step): +def get_nodespace_changes(nodenet_uid, nodespaces, since_step): """ Returns a dict of changes that happened in the nodenet in the given nodespace since the given step. Contains uids of deleted nodes and nodespaces and the datadicts for changed or added nodes and nodespaces """ - return nodenets[nodenet_uid].get_nodespace_changes(nodespace_uid, since_step) + return get_nodenet(nodenet_uid).get_nodespace_changes(nodespaces, since_step) + + +def get_nodespace_properties(nodenet_uid, nodespace_uid=None): + """ retrieve the ui properties for the given nodespace""" + return get_nodenet(nodenet_uid).get_nodespace_properties(nodespace_uid) + + +def set_nodespace_properties(nodenet_uid, nodespace_uid, properties): + """ sets the ui properties for the given nodespace""" + return get_nodenet(nodenet_uid).set_nodespace_properties(nodespace_uid, properties) def __pythonify(name): @@ -825,9 +938,9 @@ def generate_netapi_fragment(nodenet_uid, node_uids): else: lines.append("%s = netapi.create_nodespace(None)" % (varname)) idmap[nodespace.uid] = varname - xpos.append(node.position[0]) - ypos.append(node.position[1]) - zpos.append(node.position[2]) + xpos.append(nodespace.position[0]) + ypos.append(nodespace.position[1]) + zpos.append(nodespace.position[2]) # nodes and gates for i, node in enumerate(nodes): @@ -925,28 +1038,27 @@ def generate_netapi_fragment(nodenet_uid, node_uids): lines.append("") # positions - origin = [100, 100, 100] + origin = [100, 100, 0] factor = [int(min(xpos)), int(min(ypos)), int(min(zpos))] - lines.append("origin_pos = (%d, %d)" % origin) + lines.append("origin_pos = (%d, %d, %d)" % (origin[0], origin[1], origin[2])) for node in nodes + nodespaces: x = int(node.position[0] - factor[0]) y = int(node.position[1] - factor[1]) z = int(node.position[2] - factor[2]) - lines.append("%s.position = [origin_pos[0] + %i, origin_pos[1] + %i, origin_pos[2] + %i]" % (idmap[node.uid], x, y, z)) + lines.append("%s.position = (origin_pos[0] + %i, origin_pos[1] + %i, origin_pos[2] + %i)" % (idmap[node.uid], x, y, z)) return "\n".join(lines) def set_entity_positions(nodenet_uid, positions): """ Takes a dict with node_uids as keys and new positions for the nodes as values """ - nodenet = nodenets[nodenet_uid] - nodenet.set_entity_positions(positions) + get_nodenet(nodenet_uid).set_entity_positions(positions) return True def set_node_name(nodenet_uid, node_uid, name): """Sets the display name of the node""" - nodenet = nodenets[nodenet_uid] + nodenet = get_nodenet(nodenet_uid) if nodenet.is_node(node_uid): nodenet.get_node(node_uid).name = name elif nodenet.is_nodespace(node_uid): @@ -956,30 +1068,30 @@ def set_node_name(nodenet_uid, node_uid, name): def set_node_state(nodenet_uid, node_uid, state): """ Sets the state of the given node to the given state""" - node = nodenets[nodenet_uid].get_node(node_uid) + node = get_nodenet(nodenet_uid).get_node(node_uid) for key in state: node.set_state(key, state[key]) return True def set_node_activation(nodenet_uid, node_uid, activation): - nodenets[nodenet_uid].get_node(node_uid).activation = activation + get_nodenet(nodenet_uid).get_node(node_uid).activation = activation return True def delete_nodes(nodenet_uid, node_uids): """Removes the nodes with the given uids""" - nodenet = nodenets[nodenet_uid] + nodenet = get_nodenet(nodenet_uid) with nodenet.netlock: for uid in node_uids: if nodenet.is_node(uid): - nodenets[nodenet_uid].delete_node(uid) + nodenet.delete_node(uid) return True def delete_nodespace(nodenet_uid, nodespace_uid): """ Removes the given node space and all its contents""" - nodenet = nodenets[nodenet_uid] + nodenet = get_nodenet(nodenet_uid) with nodenet.netlock: if nodenet.is_nodespace(nodespace_uid): nodenet.delete_nodespace(nodespace_uid) @@ -989,22 +1101,24 @@ def delete_nodespace(nodenet_uid, nodespace_uid): def get_available_node_types(nodenet_uid): """Returns a list of available node types. (Including native modules.)""" - nodenet = nodenets[nodenet_uid] - all_nodetypes = filter_native_modules(nodenet.engine) - all_nodetypes.update(nodenet.get_standard_nodetype_definitions()) - return all_nodetypes + nodenet = get_nodenet(nodenet_uid) + return { + 'nodetypes': nodenet.get_standard_nodetype_definitions(), + 'native_modules': filter_native_modules(nodenet.engine) + } def get_available_native_module_types(nodenet_uid): """Returns a list of native modules. If an nodenet uid is supplied, filter for node types defined within this nodenet.""" - return filter_native_modules(nodenets[nodenet_uid].engine) + return filter_native_modules(get_nodenet(nodenet_uid).engine) def set_node_parameters(nodenet_uid, node_uid, parameters): """Sets a dict of arbitrary values to make the node stateful.""" + nodenet = get_nodenet(nodenet_uid) for key, value in parameters.items(): - nodenets[nodenet_uid].get_node(node_uid).set_parameter(key, value) + nodenet.get_node(node_uid).set_parameter(key, value) return True @@ -1012,14 +1126,14 @@ def get_gatefunction(nodenet_uid, node_uid, gate_type): """ Returns the name of the gate function configured for that given node and gate """ - return nodenets[nodenet_uid].get_node(node_uid).get_gatefunction_name(gate_type) + return get_nodenet(nodenet_uid).get_node(node_uid).get_gatefunction_name(gate_type) def set_gatefunction(nodenet_uid, node_uid, gate_type, gatefunction=None): """ Sets the gate function of the given node and gate. """ - nodenets[nodenet_uid].get_node(node_uid).set_gatefunction_name(gate_type, gatefunction) + get_nodenet(nodenet_uid).get_node(node_uid).set_gatefunction_name(gate_type, gatefunction) return True @@ -1027,33 +1141,36 @@ def get_available_gatefunctions(nodenet_uid): """ Returns a list of names of the available gatefunctions """ - return nodenets[nodenet_uid].get_available_gatefunctions() + return get_nodenet(nodenet_uid).get_available_gatefunctions() def set_gate_parameters(nodenet_uid, node_uid, gate_type, parameters): """Sets the gate parameters of the given gate of the given node to the supplied dictionary.""" + nodenet = get_nodenet(nodenet_uid) for key, value in parameters.items(): - nodenets[nodenet_uid].get_node(node_uid).set_gate_parameter(gate_type, key, value) + nodenet.get_node(node_uid).set_gate_parameter(gate_type, key, value) return True def get_available_datasources(nodenet_uid): """Returns a list of available datasource types for the given nodenet.""" - if nodenets[nodenet_uid].worldadapter_instance: - return nodenets[nodenet_uid].worldadapter_instance.get_available_datasources() + nodenet = get_nodenet(nodenet_uid) + if nodenet.worldadapter_instance: + return nodenet.worldadapter_instance.get_available_datasources() return [] def get_available_datatargets(nodenet_uid): """Returns a list of available datatarget types for the given nodenet.""" - if nodenets[nodenet_uid].worldadapter_instance: - return nodenets[nodenet_uid].worldadapter_instance.get_available_datatargets() + nodenet = get_nodenet(nodenet_uid) + if nodenet.worldadapter_instance: + return nodenet.worldadapter_instance.get_available_datatargets() return [] def bind_datasource_to_sensor(nodenet_uid, sensor_uid, datasource): """Associates the datasource type to the sensor node with the given uid.""" - node = nodenets[nodenet_uid].get_node(sensor_uid) + node = get_nodenet(nodenet_uid).get_node(sensor_uid) if node.type == "Sensor": node.set_parameter('datasource', datasource) return True @@ -1062,7 +1179,7 @@ def bind_datasource_to_sensor(nodenet_uid, sensor_uid, datasource): def bind_datatarget_to_actor(nodenet_uid, actor_uid, datatarget): """Associates the datatarget type to the actor node with the given uid.""" - node = nodenets[nodenet_uid].get_node(actor_uid) + node = get_nodenet(nodenet_uid).get_node(actor_uid) if node.type == "Actor": node.set_parameter('datatarget', datatarget) return True @@ -1080,7 +1197,7 @@ def add_link(nodenet_uid, source_node_uid, gate_type, target_node_uid, slot_type weight: the weight of the link (a float) certainty (optional): a probabilistic parameter for the link """ - nodenet = nodenets[nodenet_uid] + nodenet = get_nodenet(nodenet_uid) with nodenet.netlock: success = nodenet.create_link(source_node_uid, gate_type, target_node_uid, slot_type, weight, certainty) uid = None @@ -1091,14 +1208,13 @@ def add_link(nodenet_uid, source_node_uid, gate_type, target_node_uid, slot_type def set_link_weight(nodenet_uid, source_node_uid, gate_type, target_node_uid, slot_type, weight=1, certainty=1): """Set weight of the given link.""" - nodenet = nodenets[nodenet_uid] - return nodenet.set_link_weight(source_node_uid, gate_type, target_node_uid, slot_type, weight, certainty) + return get_nodenet(nodenet_uid).set_link_weight(source_node_uid, gate_type, target_node_uid, slot_type, weight, certainty) def get_links_for_nodes(nodenet_uid, node_uids): """ Returns a list of links connected to the given nodes, and their connected nodes, if they are not in the same nodespace""" - nodenet = nodenets[nodenet_uid] + nodenet = get_nodenet(nodenet_uid) source_nodes = [nodenet.get_node(uid) for uid in node_uids] links = {} nodes = {} @@ -1115,21 +1231,21 @@ def get_links_for_nodes(nodenet_uid, node_uids): def delete_link(nodenet_uid, source_node_uid, gate_type, target_node_uid, slot_type): """Delete the given link.""" - nodenet = nodenets[nodenet_uid] - return nodenet.delete_link(source_node_uid, gate_type, target_node_uid, slot_type) + return get_nodenet(nodenet_uid).delete_link(source_node_uid, gate_type, target_node_uid, slot_type) def align_nodes(nodenet_uid, nodespace): """Perform auto-alignment of nodes in the current nodespace""" - result = node_alignment.align(nodenets[nodenet_uid], nodespace) + result = node_alignment.align(get_nodenet(nodenet_uid), nodespace) return result def user_prompt_response(nodenet_uid, node_uid, values, resume_nodenet): + nodenet = get_nodenet(nodenet_uid) for key, value in values.items(): - nodenets[nodenet_uid].get_node(node_uid).set_parameter(key, value) - nodenets[nodenet_uid].is_active = resume_nodenet - nodenets[nodenet_uid].user_prompt = None + nodenet.get_node(node_uid).set_parameter(key, value) + nodenet.is_active = resume_nodenet + nodenet.user_prompt = None def get_available_recipes(): @@ -1140,14 +1256,30 @@ def get_available_recipes(): recipes[name] = { 'name': name, 'parameters': data['parameters'], - 'docstring': data['docstring'] + 'docstring': data['docstring'], + 'category': data['category'] } return recipes +def get_available_operations(): + """ Returns a dict of available user-operations """ + operations = {} + for name, data in custom_operations.items(): + if not name.startswith('_'): + operations[name] = { + 'name': name, + 'parameters': data['parameters'], + 'docstring': data['docstring'], + 'category': data['category'], + 'selection': data['selectioninfo'] + } + return operations + + def run_recipe(nodenet_uid, name, parameters): """ Calls the given recipe with the provided parameters, and returns the output, if any """ - netapi = nodenets[nodenet_uid].netapi + netapi = get_nodenet(nodenet_uid).netapi params = {} for key in parameters: if parameters[key] != '': @@ -1155,6 +1287,7 @@ def run_recipe(nodenet_uid, name, parameters): if name in custom_recipes: func = custom_recipes[name]['function'] if cfg['micropsi2'].get('profile_runner'): + import cProfile profiler = cProfile.Profile() profiler.enable() result = {'reload': True} @@ -1163,6 +1296,8 @@ def run_recipe(nodenet_uid, name, parameters): result.update(ret) if cfg['micropsi2'].get('profile_runner'): profiler.disable() + import pstats + import io s = io.StringIO() sortby = 'cumtime' ps = pstats.Stats(profiler, stream=s).sort_stats(sortby) @@ -1173,15 +1308,111 @@ def run_recipe(nodenet_uid, name, parameters): return False, "Script not found" +def run_operation(nodenet_uid, name, parameters, selection_uids): + """ Calls the given operation on the selection""" + netapi = get_nodenet(nodenet_uid).netapi + params = {} + for key in parameters: + if parameters[key] != '': + params[key] = parameters[key] + if name in custom_operations: + func = custom_operations[name]['function'] + result = {} + ret = func(netapi, selection_uids, **params) + if ret: + result.update(ret) + return True, result + else: + return False, "Operation not found" + + def get_agent_dashboard(nodenet_uid): from .emoexpression import calc_emoexpression_parameters - net = nodenets[nodenet_uid] + net = get_nodenet(nodenet_uid) with net.netlock: data = net.get_dashboard() data['face'] = calc_emoexpression_parameters(net) return data +def run_netapi_command(nodenet_uid, command): + get_nodenet(nodenet_uid) + shell = netapi_consoles[nodenet_uid] + return shell.push(command) + + +def get_netapi_autocomplete_data(nodenet_uid, name=None): + import inspect + nodenet = get_nodenet(nodenet_uid) + if nodenet is None or nodenet_uid not in netapi_consoles: + return {} + nodetypes = get_available_node_types(nodenet_uid) + + shell = netapi_consoles[nodenet_uid] + res, locs = shell.push("[k for k in locals() if not k.startswith('_')]") + locs = eval(locs) + + def parsemembers(members): + data = {} + for name, thing in members: + if name.startswith('_'): + continue + if inspect.isroutine(thing): + argspec = inspect.getargspec(thing) + arguments = argspec.args[1:] + defaults = argspec.defaults or [] + params = [] + diff = len(arguments) - len(defaults) + for i, arg in enumerate(arguments): + if i >= diff: + params.append({ + 'name': arg, + 'default': defaults[i - diff] + }) + else: + params.append({'name': arg}) + + data[name] = params + else: + data[name] = None + return data + + data = { + 'types': {}, + 'autocomplete_options': {} + } + + for n in locs: + if name is None or n == name: + res, typedescript = shell.push(n) + if 'netapi' in typedescript: + data['types'][n] = 'netapi' + else: + # get type of thing. + match = re.search('^<([A-Za-z]+) ', typedescript) + if match: + typename = match.group(1) + if typename in ['Nodespace', 'Node', 'Gate', 'Slot']: + data['types'][n] = typename + elif typename in nodetypes['nodetypes'] or typename in nodetypes['native_modules']: + data['types'][n] = 'Node' + + for t in set(data['types'].values()): + if t == 'netapi': + netapi = nodenet.netapi + methods = inspect.getmembers(netapi, inspect.ismethod) + data['autocomplete_options']['netapi'] = parsemembers(methods) + elif t == 'Nodespace': + from micropsi_core.nodenet.nodespace import Nodespace + data['autocomplete_options']['Nodespace'] = parsemembers(inspect.getmembers(Nodespace)) + elif t in ['Node', 'Gate', 'Slot']: + from micropsi_core.nodenet import node + cls = getattr(node, t) + data['autocomplete_options'][t] = parsemembers(inspect.getmembers(cls)) + + return data + + # --- end of API def filter_native_modules(engine=None): @@ -1198,7 +1429,7 @@ def crawl_definition_files(path, type="definition"): """ result = {} - tools.mkdir(path) + os.makedirs(path, exist_ok=True) for user_directory_name, user_directory_names, file_names in os.walk(path): for definition_file_name in file_names: @@ -1231,18 +1462,22 @@ def parse_definition(json, filename=None): result['world_type'] = json['world_type'] if "settings" in json: result['settings'] = json['settings'] + if "config" in json: + result['config'] = json['config'] + if 'use_modulators' in json: + result['use_modulators'] = json['use_modulators'] return Bunch(**result) # Set up the MicroPsi runtime def load_definitions(): global nodenet_data, world_data - nodenet_data = crawl_definition_files(path=os.path.join(RESOURCE_PATH, NODENET_DIRECTORY), type="nodenet") - world_data = crawl_definition_files(path=os.path.join(RESOURCE_PATH, WORLD_DIRECTORY), type="world") + nodenet_data = crawl_definition_files(path=os.path.join(PERSISTENCY_PATH, NODENET_DIRECTORY), type="nodenet") + world_data = crawl_definition_files(path=os.path.join(PERSISTENCY_PATH, WORLD_DIRECTORY), type="world") if not world_data: # create a default world for convenience. uid = tools.generate_uid() - filename = os.path.join(RESOURCE_PATH, WORLD_DIRECTORY, uid + '.json') + filename = os.path.join(PERSISTENCY_PATH, WORLD_DIRECTORY, uid + '.json') world_data[uid] = Bunch(uid=uid, name="default", version=1, filename=filename) with open(filename, 'w+') as fp: fp.write(json.dumps(world_data[uid], sort_keys=True, indent=4)) @@ -1267,41 +1502,80 @@ def init_worlds(world_data): return worlds -def load_user_files(do_reload=False): - # see if we have additional nodetypes defined by the user. - import sys +def load_user_files(path, reload_nodefunctions=False, errors=[]): + global native_modules, custom_recipes + for f in os.listdir(path): + if not f.startswith('.') and f != '__pycache__': + abspath = os.path.join(path, f) + err = None + if os.path.isdir(abspath): + errors.extend(load_user_files(path=abspath, reload_nodefunctions=reload_nodefunctions, errors=[])) + elif f == 'nodetypes.json': + err = parse_native_module_file(abspath) + elif f == 'recipes.py': + err = parse_recipe_or_operations_file(abspath, reload_nodefunctions) + elif f == 'nodefunctions.py' and reload_nodefunctions: + err = reload_nodefunctions_file(abspath) + elif f == 'operations.py': + err = parse_recipe_or_operations_file(abspath, reload_nodefunctions) + if err: + errors.append(err) + return errors + + +def parse_native_module_file(path): global native_modules - native_modules = {} - custom_nodetype_file = os.path.join(RESOURCE_PATH, 'nodetypes.json') - if os.path.isfile(custom_nodetype_file): + with open(path) as fp: + category = os.path.relpath(os.path.dirname(path), start=RESOURCE_PATH) try: - with open(custom_nodetype_file) as fp: - native_modules = json.load(fp) + modules = json.load(fp) except ValueError: - logging.getLogger('system').warn("Nodetype data in %s not well-formed." % custom_nodetype_file) + return "Nodetype data in %s/nodetypes.json not well-formed." % category + for key in modules: + modules[key]['path'] = os.path.join(os.path.dirname(path), 'nodefunctions.py') + modules[key]['category'] = category + if key in native_modules: + logging.getLogger("system").warning("Native module names must be unique. %s is not." % key) + native_modules[key] = modules[key] - sys.path.append(RESOURCE_PATH) - parse_recipe_file() - return native_modules +def parse_recipe_or_operations_file(path, reload=False, category_overwrite=False): + global custom_recipes + import importlib + import inspect -def parse_recipe_file(): - custom_recipe_file = os.path.join(RESOURCE_PATH, 'recipes.py') - if not os.path.isfile(custom_recipe_file): - return + category = category_overwrite or os.path.relpath(os.path.dirname(path), start=RESOURCE_PATH) + if category == '.': + category == '' # relapth in rootfolder + relpath = os.path.relpath(path, start=RESOURCE_PATH) + name = os.path.basename(path)[:-3] - import importlib.machinery - import inspect - global custom_recipes + mode = 'recipes' if os.path.basename(path).startswith('recipes') else 'operations' + + try: + loader = importlib.machinery.SourceFileLoader(name, path) + recipes = loader.load_module() + # recipes = __import__(pyname, fromlist=['recipes']) + # importlib.reload(sys.modules[pyname]) + except SyntaxError as e: + return "%s in %s file %s, line %d" % (e.__class__.__name__, mode, relpath, e.lineno) + + for name, module in inspect.getmembers(recipes, inspect.ismodule): + if hasattr(module, '__file__') and module.__file__.startswith(RESOURCE_PATH): + module = importlib.reload(module) - loader = importlib.machinery.SourceFileLoader("recipes", custom_recipe_file) - recipes = loader.load_module() - # import recipes - custom_recipes = {} all_functions = inspect.getmembers(recipes, inspect.isfunction) for name, func in all_functions: + filename = os.path.realpath(func.__code__.co_filename) + if filename != os.path.realpath(path) and os.path.basename(filename) == os.path.basename(path): + # import from another file of the same mode. ignore, to avoid + # false duplicate-function-name alerts + continue argspec = inspect.getargspec(func) - arguments = argspec.args[1:] + if mode == 'recipes': + arguments = argspec.args[1:] + elif mode == 'operations': + arguments = argspec.args[2:] defaults = argspec.defaults or [] params = [] diff = len(arguments) - len(defaults) @@ -1314,60 +1588,124 @@ def parse_recipe_file(): 'name': arg, 'default': default }) - custom_recipes[name] = { + if mode == 'recipes' and name in custom_recipes and id(func) != id(custom_recipes[name]['function']): + logging.getLogger("system").warning("Recipe function names must be unique. %s is not." % name) + elif mode == 'operations' and name in custom_operations and id(func) != id(custom_operations[name]['function']): + logging.getLogger("system").warning("Operations function names must be unique. %s is not." % name) + data = { 'name': name, 'parameters': params, 'function': func, - 'docstring': inspect.getdoc(func) + 'docstring': inspect.getdoc(func), + 'category': category, + 'path': path } + if mode == 'recipes': + custom_recipes[name] = data + elif mode == 'operations': + if hasattr(func, 'selectioninfo'): + data['selectioninfo'] = func.selectioninfo + custom_operations[name] = data + + +def reload_nodefunctions_file(path): + import importlib + import inspect + + try: + loader = importlib.machinery.SourceFileLoader("nodefunctions", path) + nodefuncs = loader.load_module() + for name, module in inspect.getmembers(nodefuncs, inspect.ismodule): + if hasattr(module, '__file__') and module.__file__.startswith(RESOURCE_PATH): + loader = importlib.machinery.SourceFileLoader(name, module.__file__) + loader.load_module() + except SyntaxError as e: + relpath = os.path.relpath(path, start=RESOURCE_PATH) + return "%s in nodefunction file %s, line %d" % (e.__class__.__name__, relpath, e.lineno) + def reload_native_modules(): # stop nodenets, save state + global native_modules, custom_recipes, custom_operations + native_modules = {} + custom_recipes = {} + custom_operations = {} runners = {} + errors = [] + # load builtins: + from micropsi_core.nodenet.native_modules import nodetypes + native_modules.update(nodetypes) + operationspath = os.path.abspath('micropsi_core/nodenet/operations/') + for file in os.listdir(operationspath): + import micropsi_core.nodenet.operations + if file != '__init__.py' and not file.startswith('.') and os.path.isfile(os.path.join(operationspath, file)): + err = parse_recipe_or_operations_file(os.path.join(operationspath, file), category_overwrite=file[:-3]) + if err: + errors.append(err) + for uid in nodenets: if nodenets[uid].is_active: runners[uid] = True nodenets[uid].is_active = False - load_user_files(True) - import importlib - import inspect - custom_nodefunctions_file = os.path.join(RESOURCE_PATH, 'nodefunctions.py') - if os.path.isfile(custom_nodefunctions_file): - loader = importlib.machinery.SourceFileLoader("nodefunctions", custom_nodefunctions_file) - nodefuncs = loader.load_module() - for key, obj in inspect.getmembers(nodefuncs): - if inspect.ismodule(obj): - if obj.__file__.startswith(RESOURCE_PATH): - loader = importlib.machinery.SourceFileLoader(key, obj.__file__) - loader.load_module() + errors.extend(load_user_files(RESOURCE_PATH, reload_nodefunctions=True, errors=[])) for nodenet_uid in nodenets: nodenets[nodenet_uid].reload_native_modules(filter_native_modules(nodenets[nodenet_uid].engine)) # restart previously active nodenets for uid in runners: nodenets[uid].is_active = True - return True + if len(errors) == 0: + return True, [] + else: + return False, errors + + +def initialize(persistency_path=None, resource_path=None): + global PERSISTENCY_PATH, RESOURCE_PATH, configs, logger, runner + + if persistency_path is None: + persistency_path = cfg['paths']['data_directory'] + + if resource_path is None: + resource_path = persistency_path + + PERSISTENCY_PATH = persistency_path + RESOURCE_PATH = resource_path + + sys.path.append(resource_path) + + configs = config.ConfigurationManager(cfg['paths']['server_settings_path']) + + if logger is None: + logger = MicropsiLogger({ + 'system': cfg['logging']['level_system'], + 'world': cfg['logging']['level_world'] + }, cfg['logging'].get('logfile')) -load_definitions() -init_worlds(world_data) -load_user_files() + load_definitions() + init_worlds(world_data) + result, errors = reload_native_modules() + for e in errors: + logging.getLogger("system").error(e) -# initialize runners -# Initialize the threads for the continuous simulation of nodenets and worlds -if 'runner_timestep' not in configs: - configs['runner_timestep'] = 200 - configs.save_configs() -if 'runner_factor' not in configs: - configs['runner_factor'] = 2 - configs.save_configs() + # initialize runners + # Initialize the threads for the continuous calculation of nodenets and worlds + if 'runner_timestep' not in configs: + configs['runner_timestep'] = 200 + configs.save_configs() + if 'runner_factor' not in configs: + configs['runner_factor'] = 2 + configs.save_configs() -set_runner_properties(configs['runner_timestep'], configs['runner_factor']) + set_runner_properties(configs['runner_timestep'], configs['runner_factor']) -runner['running'] = True -runner['runner'] = MicropsiRunner() + runner['running'] = True + if runner.get('runner') is None: + runner['runner'] = MicropsiRunner() -add_signal_handler(kill_runners) + if kill_runners not in signal_handler_registry: + add_signal_handler(kill_runners) -signal.signal(signal.SIGINT, signal_handler) -signal.signal(signal.SIGTERM, signal_handler) + signal.signal(signal.SIGINT, signal_handler) + signal.signal(signal.SIGTERM, signal_handler) diff --git a/micropsi_core/tests/conftest.py b/micropsi_core/tests/conftest.py index 6a5629cb..736095e4 100644 --- a/micropsi_core/tests/conftest.py +++ b/micropsi_core/tests/conftest.py @@ -10,8 +10,25 @@ nn_uid = 'Testnet' -@pytest.fixture(scope="function") +@pytest.yield_fixture(scope="function") def fixed_nodenet(request, test_world, engine): + """ + A test nodenet filled with some example data (nodenet_data.py) + Structure: + + -> A1 -> A2 + / + S ACTA + \ + -> B1 -> B2 + + S: Sensor, brightness_l + A1: Pipe + A2: Pipe + B1: Pipe + B2: Pipe + ACTA: Activator, por + """ from micropsi_core.tests.nodenet_data import fixed_nodenet_data if engine == "theano_engine": fixed_nodenet_data = fixed_nodenet_data.replace('Root', 's0001') @@ -19,4 +36,8 @@ def fixed_nodenet(request, test_world, engine): micropsi.get_nodenet(uid) micropsi.merge_nodenet(uid, fixed_nodenet_data, keep_uids=True) micropsi.save_nodenet(uid) - return uid + yield uid + try: + micropsi.delete_nodenet(uid) + except: + pass diff --git a/micropsi_core/tests/test_configurationManager.py b/micropsi_core/tests/test_configurationManager.py index 5a4cd824..f6a84827 100644 --- a/micropsi_core/tests/test_configurationManager.py +++ b/micropsi_core/tests/test_configurationManager.py @@ -64,5 +64,4 @@ def test_create_configs(path, path2): assert conf_mgr["record"]["i"] == 12 assert conf2["color"] == "blue" - with pytest.raises(RuntimeError): - ConfigurationManager(path) # we cannot have more than one config manager at a single path + ConfigurationManager(path) # we can now have more than one config manager at a single path diff --git a/micropsi_core/tests/test_node_logic.py b/micropsi_core/tests/test_node_logic.py index c996420a..5bc15f2e 100644 --- a/micropsi_core/tests/test_node_logic.py +++ b/micropsi_core/tests/test_node_logic.py @@ -6,37 +6,10 @@ """ from micropsi_core import runtime as micropsi -from micropsi_core.world.world import World -from micropsi_core.world.worldadapter import WorldAdapter -class DummyWorld(World): - - supported_worldadapters = ['DummyWorldAdapter'] - - def __init__(self, filename, world_type="DummyWorld", name="", owner="", uid=None, version=1): - World.__init__(self, filename, world_type=world_type, name=name, owner=owner, uid=uid, version=version) - self.current_step = 0 - self.data['assets'] = {} - - -class DummyWorldAdapter(WorldAdapter): - - supported_datasources = ['test_source'] - supported_datatargets = ['test_target'] - - def __init__(self, world, uid=None, **data): - WorldAdapter.__init__(self, world, uid=uid, **data) - self.datasources = {'test_source': 0.7} - self.datatargets = {'test_target': 0} - self.datatarget_feedback = {'test_target': 0.3} - - def update_data_sources_and_targets(self): - self.world.test_target_value = self.datatargets['test_target'] - - -def prepare(fixed_nodenet): - nodenet = micropsi.get_nodenet(fixed_nodenet) +def prepare(test_nodenet): + nodenet = micropsi.get_nodenet(test_nodenet) netapi = nodenet.netapi source = netapi.create_node("Register", None, "Source") netapi.link(source, "gen", source, "gen") @@ -45,22 +18,9 @@ def prepare(fixed_nodenet): return nodenet, netapi, source -def add_dummyworld(fixed_nodenet): - nodenet = micropsi.get_nodenet(fixed_nodenet) - if nodenet.world: - micropsi.worlds[nodenet.world].unregister_nodenet(nodenet) - - worlduid = micropsi.new_world("DummyWorld", "DummyWorld", "DummyOwner")[1] - - nodenet.world = worlduid - nodenet.worldadapter = "DummyWorldAdapter" - micropsi.worlds[worlduid].register_nodenet("DummyWorldAdapter", nodenet) - return micropsi.worlds[worlduid] - - -def test_node_logic_loop(fixed_nodenet): +def test_node_logic_loop(test_nodenet): # test gen looping behaviour - net, netapi, source = prepare(fixed_nodenet) + net, netapi, source = prepare(test_nodenet) net.step() assert source.get_gate("gen").activation == 1 net.step() @@ -70,18 +30,18 @@ def test_node_logic_loop(fixed_nodenet): assert source.get_gate("gen").activation == 0.5 -def test_node_logic_die(fixed_nodenet): +def test_node_logic_die(test_nodenet): # without the link, activation ought to drop to 0 - net, netapi, source = prepare(fixed_nodenet) + net, netapi, source = prepare(test_nodenet) netapi.unlink(source, "gen", source, "gen") net.step() assert source.get_gate("gen").activation == 0 -def test_node_logic_sum(fixed_nodenet): +def test_node_logic_sum(test_nodenet): # propagate positive activation, expect sum - net, netapi, source = prepare(fixed_nodenet) + net, netapi, source = prepare(test_nodenet) reg_a = netapi.create_node("Register", None, "RegA") reg_b = netapi.create_node("Register", None, "RegB") @@ -97,9 +57,9 @@ def test_node_logic_sum(fixed_nodenet): assert reg_result.get_gate("gen").activation == 1 -def test_node_logic_cancel(fixed_nodenet): +def test_node_logic_cancel(test_nodenet): # propagate positive and negative activation, expect cancellation - net, netapi, source = prepare(fixed_nodenet) + net, netapi, source = prepare(test_nodenet) reg_a = netapi.create_node("Register", None, "RegA") reg_b = netapi.create_node("Register", None, "RegB") @@ -116,9 +76,9 @@ def test_node_logic_cancel(fixed_nodenet): assert reg_result.get_gate("gen").activation == 0 -def test_node_logic_store_and_forward(fixed_nodenet): +def test_node_logic_store_and_forward(test_nodenet): # collect activation in one node, go forward only if both dependencies are met - net, netapi, source = prepare(fixed_nodenet) + net, netapi, source = prepare(test_nodenet) reg_a = netapi.create_node("Register", None, "RegA") reg_b = netapi.create_node("Register", None, "RegB") @@ -137,8 +97,8 @@ def test_node_logic_store_and_forward(fixed_nodenet): assert reg_result.get_gate("gen").activation == 1 -def test_node_logic_activators(fixed_nodenet): - net, netapi, source = prepare(fixed_nodenet) +def test_node_logic_activators(test_nodenet): + net, netapi, source = prepare(test_nodenet) activator = netapi.create_node('Activator', None) activator.set_parameter('type', 'sub') activator.activation = 1 @@ -150,30 +110,64 @@ def test_node_logic_activators(fixed_nodenet): assert testpipe.get_gate("sub").activation == 0 -def test_node_logic_sensor(fixed_nodenet): - # read a sensor value from the dummy world adapter - net, netapi, source = prepare(fixed_nodenet) - world = add_dummyworld(fixed_nodenet) +def test_node_logic_sensor_modulator(test_nodenet, default_world): + net, netapi, source = prepare(test_nodenet) + register = netapi.create_node("Register", None) + netapi.link_sensor(register, "emo_activation", "gen") + micropsi.step_nodenet(test_nodenet) + micropsi.step_nodenet(test_nodenet) + micropsi.step_nodenet(test_nodenet) + assert round(netapi.get_modulator("emo_activation"), 3) == round(register.activation, 3) + +def test_node_logic_sensor_datasource(test_nodenet, default_world): + net, netapi, source = prepare(test_nodenet) + micropsi.set_nodenet_properties(test_nodenet, worldadapter="Default", world_uid=default_world) register = netapi.create_node("Register", None) - netapi.link_sensor(register, "test_source", "gen") - world.step() - net.step() - net.step() - assert round(register.get_gate("gen").activation, 1) == 0.7 + netapi.link_sensor(register, "static_on", "gen", weight=0.35) + micropsi.step_nodenet(test_nodenet) + micropsi.step_nodenet(test_nodenet) + assert round(register.get_gate("gen").activation, 3) == 0.35 + +def test_node_logic_actor_modulator(test_nodenet, default_world): + net, netapi, source = prepare(test_nodenet) + netapi.link_actor(source, "base_porret_decay_factor", weight=0.3, gate="gen") + micropsi.step_nodenet(test_nodenet) + assert round(netapi.get_modulator("base_porret_decay_factor"), 3) == 0.3 -def test_node_logic_actor(fixed_nodenet): - # write a value to the dummy world adapter - net, netapi, source = prepare(fixed_nodenet) - world = add_dummyworld(fixed_nodenet) +def test_node_logic_actor_datatarget(test_nodenet, default_world): + net, netapi, source = prepare(test_nodenet) + micropsi.set_nodenet_properties(test_nodenet, worldadapter="Default", world_uid=default_world) + netapi.link_actor(source, "echo", weight=0.5, gate="gen") register = netapi.create_node("Register", None) - netapi.link_actor(source, "test_target", 0.5, 1, "gen", "gen") - actor = netapi.get_nodes(node_name_prefix="test_target")[0] + actor = netapi.get_nodes(node_name_prefix="echo")[0] netapi.link(actor, "gen", register, "gen") - net.step() - world.step() - assert world.test_target_value == 0.5 - net.step() - assert round(register.get_gate("gen").activation, 1) == 0.3 + micropsi.step_nodenet(test_nodenet) + micropsi.step_nodenet(test_nodenet) + micropsi.step_nodenet(test_nodenet) + assert round(register.get_gate("gen").activation, 1) == 0.5 + + +def test_node_logic_sensor_nomodulators(engine, default_world): + result, nnuid = micropsi.new_nodenet("adf", engine, "Default", world_uid=default_world, use_modulators=False) + net, netapi, source = prepare(nnuid) + register = netapi.create_node("Register", None) + netapi.link_sensor(register, "static_on", "gen", weight=0.4) + micropsi.step_nodenet(nnuid) + micropsi.step_nodenet(nnuid) + assert round(register.get_gate("gen").activation, 1) == 0.4 + + +def test_node_logic_actor_nomodulators(engine, default_world): + result, nnuid = micropsi.new_nodenet("adf", engine, "Default", world_uid=default_world, use_modulators=False) + net, netapi, source = prepare(nnuid) + netapi.link_actor(source, "echo", weight=0.7, gate="gen") + register = netapi.create_node("Register", None) + actor = netapi.get_nodes(node_name_prefix="echo")[0] + netapi.link(actor, "gen", register, "gen") + micropsi.step_nodenet(nnuid) + micropsi.step_nodenet(nnuid) + micropsi.step_nodenet(nnuid) + assert round(register.get_gate("gen").activation, 1) == 0.7 diff --git a/micropsi_core/tests/test_node_netapi.py b/micropsi_core/tests/test_node_netapi.py index 434acc5b..d7ae7d86 100644 --- a/micropsi_core/tests/test_node_netapi.py +++ b/micropsi_core/tests/test_node_netapi.py @@ -19,20 +19,6 @@ def prepare(fixed_nodenet): return nodenet, netapi, source -def add_dummyworld(fixed_nodenet): - nodenet = micropsi.get_nodenet(fixed_nodenet) - if nodenet.world: - nodenet.world.unregister_nodenet(nodenet) - - worlduid = micropsi.new_world("DummyWorld", "DummyWorld", "DummyOwner")[1] - - nodenet.world = worlduid - nodenet.worldadapter = "DummyWorldAdapter" - - micropsi.worlds[worlduid].register_nodenet("DummyWorldAdapter", nodenet) - return micropsi.worlds[worlduid] - - def test_node_netapi_create_register_node(fixed_nodenet): # test register node creation net, netapi, source = prepare(fixed_nodenet) @@ -774,7 +760,7 @@ def test_autoalign_updates_last_changed(fixed_nodenet): net.step() net.step() netapi.autoalign_nodespace(netapi.get_nodespace(None).uid) - changes = net.get_nodespace_changes(None, 2) + changes = net.get_nodespace_changes([None], 2) for uid in net.get_node_uids(): if net.get_node(uid).position != [12, 13, 11]: assert uid in changes['nodes_dirty'] @@ -1074,3 +1060,93 @@ def test_set_dashboard_value(test_nodenet, node): netapi = nodenet.netapi netapi.set_dashboard_value('foo', 'bar') assert nodenet.dashboard_values['foo'] == 'bar' + + +def test_decay_porret_links(test_nodenet): + nodenet = micropsi.get_nodenet(test_nodenet) + netapi = nodenet.netapi + pipes = [] + netapi.set_modulator('base_porret_decay_factor', 0.1) + for i in range(10): + node = netapi.create_node("Pipe", None, "P%d" % i) + pipes.append(node) + if i > 0: + netapi.link_with_reciprocal(pipes[i - 1], node, 'porret', weight=0.1 * i) + + netapi.link_with_reciprocal(pipes[0], pipes[1], 'subsur', weight=0.5) + reg = netapi.create_node("Register", None, "source") + netapi.link(reg, 'gen', pipes[0], 'gen', 0.4) + netapi.decay_por_links(None) + for i in range(9): + assert round(pipes[i].get_gate('por').get_links()[0].weight, 3) == round(0.1 * (i + 1) * 0.9, 3) + # sub/sur/ret/gen links unchanged + assert round(reg.get_gate('gen').get_links()[0].weight, 3) == 0.4 + assert round(pipes[0].get_gate('sub').get_links()[0].weight, 3) == 0.5 + assert round(pipes[7].get_gate('ret').get_links()[0].weight, 3) == 0.7 + + +def test_unlink_gate(test_nodenet): + nodenet = micropsi.get_nodenet(test_nodenet) + netapi = nodenet.netapi + node = netapi.create_node("Pipe", None) + pipe1 = netapi.create_node("Pipe", None) + pipe2 = netapi.create_node("Pipe", None) + netapi.link_with_reciprocal(node, pipe1, 'subsur') + netapi.link_with_reciprocal(node, pipe2, 'subsur') + netapi.link(node, 'por', pipe1, 'gen') + netapi.link(node, 'por', pipe2, 'gen') + netapi.link(node, 'por', pipe1, 'sur') + micropsi.save_nodenet(test_nodenet) + netapi.unlink_gate(node, 'por') + assert node.get_gate('por').empty + assert not node.get_gate('sub').empty + micropsi.revert_nodenet(test_nodenet) + netapi = micropsi.nodenets[test_nodenet].netapi + node = netapi.get_node(node.uid) + netapi.unlink_gate(node, 'por', target_node_uid=pipe1.uid) + assert len(node.get_gate('por').get_links()) == 1 + assert node.get_gate('por').get_links()[0].target_node.uid == pipe2.uid + micropsi.revert_nodenet(test_nodenet) + netapi = micropsi.nodenets[test_nodenet].netapi + node = netapi.get_node(node.uid) + netapi.unlink_gate(node, 'por', target_slot_name='sur') + assert len(node.get_gate('por').get_links()) == 2 # pipe1:gen, pipe2:gen + assert len(node.get_gate('sub').get_links()) == 2 # only por->sub unlinked + + +def test_unlink_slot(test_nodenet): + nodenet = micropsi.get_nodenet(test_nodenet) + netapi = nodenet.netapi + node = netapi.create_node("Pipe", None) + pipe1 = netapi.create_node("Pipe", None) + pipe2 = netapi.create_node("Pipe", None) + netapi.link_with_reciprocal(node, pipe1, 'subsur') + netapi.link_with_reciprocal(node, pipe2, 'subsur') + netapi.link(pipe1, 'gen', node, 'por') + netapi.link(pipe2, 'gen', node, 'por') + netapi.link(pipe1, 'sur', node, 'por') + micropsi.save_nodenet(test_nodenet) + netapi.unlink_slot(node, 'por') + assert node.get_slot('por').empty + assert not node.get_slot('sur').empty + micropsi.revert_nodenet(test_nodenet) + netapi = micropsi.nodenets[test_nodenet].netapi + node = netapi.get_node(node.uid) + netapi.unlink_slot(node, 'por', source_node_uid=pipe1.uid) + assert len(node.get_slot('por').get_links()) == 1 + assert node.get_slot('por').get_links()[0].source_node.uid == pipe2.uid + micropsi.revert_nodenet(test_nodenet) + netapi = micropsi.nodenets[test_nodenet].netapi + node = netapi.get_node(node.uid) + netapi.unlink_slot(node, 'por', source_gate_name='sur') + assert len(node.get_slot('por').get_links()) == 2 # pipe1:gen, pipe2:gen + assert len(node.get_slot('sur').get_links()) == 2 # only sur->por unlinked + + +def test_nodespace_properties(test_nodenet): + nodenet = micropsi.get_nodenet(test_nodenet) + netapi = nodenet.netapi + rootns = netapi.get_nodespace(None) + netapi.set_nodespace_properties(None, {'foo': 'bar'}) + data = netapi.get_nodespace_properties() + assert data[rootns.uid] == {'foo': 'bar'} diff --git a/micropsi_core/tests/test_nodenet_partitions.py b/micropsi_core/tests/test_nodenet_partitions.py new file mode 100644 index 00000000..b3fbaecc --- /dev/null +++ b/micropsi_core/tests/test_nodenet_partitions.py @@ -0,0 +1,301 @@ + +import pytest +from micropsi_core import runtime as micropsi + + +def prepare(netapi, partition_options={}): + partition_options.update({'new_partition': True}) + nodespace = netapi.create_nodespace(None, name="partition", options=partition_options) + source = netapi.create_node('Register', None, "Source") + register = netapi.create_node('Register', nodespace.uid, "Register") + netapi.link(source, 'gen', register, 'gen') + netapi.link(source, 'gen', source, 'gen') + source.activation = 1 + return nodespace, source, register + + +@pytest.mark.engine("theano_engine") +def test_partition_creation(test_nodenet): + nodenet = micropsi.get_nodenet(test_nodenet) + netapi = nodenet.netapi + netapi.create_nodespace(None, name="partition", options={'new_partition': True}) + assert len(nodenet.partitions.keys()) == 2 + + +@pytest.mark.engine("theano_engine") +def test_cross_partition_links(test_nodenet): + nodenet = micropsi.get_nodenet(test_nodenet) + netapi = nodenet.netapi + nodespace, source, register = prepare(netapi) + nodenet.step() + assert register.activation == 1 + # change link weight + netapi.link(source, 'gen', register, 'gen', weight=0.7) + + assert register.uid in netapi.get_node(source.uid).get_associated_node_uids() + assert source.uid in netapi.get_node(register.uid).get_associated_node_uids() + + link = register.get_slot('gen').get_links()[0] + assert round(link.weight, 3) == 0.7 + nodenet.step() + assert round(register.activation, 3) == 0.7 + netapi.unlink(source, 'gen', register, 'gen') + assert len(source.get_gate('gen').get_links()) == 1 + assert netapi.get_node(register.uid).get_gate('gen').empty + assert netapi.get_node(register.uid).get_slot('gen').empty + nodenet.step() + assert register.activation == 0 + + +@pytest.mark.engine("theano_engine") +def test_partition_persistence(test_nodenet): + nodenet = micropsi.get_nodenet(test_nodenet) + netapi = nodenet.netapi + nodespace, source, register = prepare(netapi) + micropsi.save_nodenet(test_nodenet) + micropsi.revert_nodenet(test_nodenet) + nodenet.step() + assert register.activation == 1 + + +@pytest.mark.engine("theano_engine") +def test_delete_node_deletes_inlinks(test_nodenet): + nodenet = micropsi.get_nodenet(test_nodenet) + netapi = nodenet.netapi + nodespace, source, register = prepare(netapi) + target = netapi.create_node("Register", None, "target") + netapi.link(register, 'gen', target, 'gen') + netapi.delete_node(register) + links = netapi.get_node(source.uid).get_gate('gen').get_links() + assert len(links) == 1 + assert links[0].target_node.uid == source.uid + assert target.get_slot('gen').empty + assert nodespace.partition.inlinks == {} + assert len(nodenet.rootpartition.inlinks[nodespace.partition.spid][1].get_value()) == 0 + + +@pytest.mark.engine("theano_engine") +def test_delete_node_modifies_inlinks(test_nodenet): + nodenet = micropsi.get_nodenet(test_nodenet) + netapi = nodenet.netapi + nodespace, source, register = prepare(netapi) + target = netapi.create_node("Register", None, "target") + + register2 = netapi.create_node("Register", nodespace.uid, "reg2") + netapi.link(register, 'gen', target, 'gen') + netapi.link(register2, 'gen', target, 'gen') + netapi.link(source, 'gen', register2, 'gen') + + netapi.delete_node(register) + assert len(source.get_gate('gen').get_links()) == 2 + assert len(target.get_slot('gen').get_links()) == 1 + + assert list(nodespace.partition.inlinks.keys()) == [nodenet.rootpartition.spid] + assert list(nodenet.rootpartition.inlinks.keys()) == [nodespace.partition.spid] + assert len(nodespace.partition.inlinks[nodenet.rootpartition.spid][1].get_value()) == 1 + assert len(nodenet.rootpartition.inlinks[nodespace.partition.spid][1].get_value()) == 1 + + +@pytest.mark.engine("theano_engine") +def test_grow_partitions(test_nodenet): + nodenet = micropsi.get_nodenet(test_nodenet) + netapi = nodenet.netapi + nodespace = netapi.create_nodespace(None, name="partition", options={ + "new_partition": True, + "initial_number_of_nodes": 2, + "average_elements_per_node_assumption": 4, + "initial_number_of_nodespaces": 1 + }) + + for i in range(20): + netapi.create_node("Pipe", nodespace.uid, "N %d" % i) + + partition = nodespace.partition + + # growby (NoN // 2): 2,3,4,6,9,13,19,28 + assert len(partition.allocated_nodes) == 28 + assert partition.NoE > 28 * 4 + + for i in range(2): + netapi.create_nodespace(nodespace.uid, name="NS %d" % i) + + assert len(partition.allocated_nodespaces) == 4 + + # step, save, and load the net to make sure all data structures have been grown properly + micropsi.step_nodenet(test_nodenet) + micropsi.save_nodenet(test_nodenet) + micropsi.revert_nodenet(test_nodenet) + micropsi.step_nodenet(test_nodenet) + + +@pytest.mark.engine("theano_engine") +def test_announce_nodes(test_nodenet): + nodenet = micropsi.get_nodenet(test_nodenet) + netapi = nodenet.netapi + nodespace = netapi.create_nodespace(None, name="partition", options={ + "new_partition": True, + "initial_number_of_nodes": 2, + "average_elements_per_node_assumption": 4, + "initial_number_of_nodespaces": 1 + }) + + # announce 20 pipe nodes + netapi.announce_nodes(nodespace.uid, 20, 8) + + partition = nodespace.partition + + # 18 nodes needed + assert partition.NoN == 26 # growby: 18 + 18//3 + # 152 elements needed + assert partition.NoE == 210 # growby: 152 + 152//3 + + for i in range(20): + netapi.create_node("Pipe", nodespace.uid, "N %d" % i) + + # assert that we did not grow again + assert partition.NoN == 26 + assert partition.NoE == 210 + + +@pytest.mark.engine("theano_engine") +def test_delete_partition(test_nodenet): + nodenet = micropsi.get_nodenet(test_nodenet) + netapi = nodenet.netapi + nodespace, source, register = prepare(netapi) + netapi.delete_nodespace(nodespace) + links = source.get_gate('gen').get_links() + assert len(links) == 1 + assert links[0].target_node == source + + +@pytest.mark.engine("theano_engine") +def test_delete_partition_unlinks_native_module(test_nodenet, resourcepath): + nodenet = micropsi.get_nodenet(test_nodenet) + netapi = nodenet.netapi + nodespace, source, register = prepare(netapi) + import os + nodetype_file = os.path.join(resourcepath, 'Test', 'nodetypes.json') + nodefunc_file = os.path.join(resourcepath, 'Test', 'nodefunctions.py') + with open(nodetype_file, 'w') as fp: + fp.write('{"Testnode": {\ + "name": "Testnode",\ + "slottypes": ["gen", "foo", "bar"],\ + "nodefunction_name": "testnodefunc",\ + "gatetypes": ["gen", "foo", "bar"]}}') + with open(nodefunc_file, 'w') as fp: + fp.write("def testnodefunc(netapi, node=None, **prams):\r\n return 17") + micropsi.reload_native_modules() + testnode = netapi.create_node("Testnode", None, "test") + netapi.link(testnode, 'foo', register, 'gen') + netapi.link(register, 'gen', testnode, 'bar') + netapi.delete_nodespace(nodespace) + data = testnode.get_data(include_links=True) + assert data['links'] == {} + + +@pytest.mark.engine("theano_engine") +def test_delete_nodespace_unlinks_native_module(test_nodenet, resourcepath): + nodenet = micropsi.get_nodenet(test_nodenet) + netapi = nodenet.netapi + nodespace = netapi.create_nodespace(None, "foo") + foopipe = netapi.create_node("Pipe", nodespace.uid, 'foopipe') + import os + nodetype_file = os.path.join(resourcepath, 'Test', 'nodetypes.json') + nodefunc_file = os.path.join(resourcepath, 'Test', 'nodefunctions.py') + with open(nodetype_file, 'w') as fp: + fp.write('{"Testnode": {\ + "name": "Testnode",\ + "slottypes": ["gen", "foo", "bar"],\ + "nodefunction_name": "testnodefunc",\ + "gatetypes": ["gen", "foo", "bar"]}}') + with open(nodefunc_file, 'w') as fp: + fp.write("def testnodefunc(netapi, node=None, **prams):\r\n return 17") + micropsi.reload_native_modules() + testnode = netapi.create_node("Testnode", None, "test") + netapi.link(testnode, 'foo', foopipe, 'sub') + netapi.link(foopipe, 'sur', testnode, 'bar') + micropsi.save_nodenet(test_nodenet) + # I don't understand why, but this is necessary. + micropsi.revert_nodenet(test_nodenet) + netapi.delete_nodespace(nodespace) + data = netapi.get_node(testnode.uid).get_data(include_links=True) + assert data['links'] == {} + + +@pytest.mark.engine("theano_engine") +def test_delete_subnodespace_removes_x_partition_links(test_nodenet, resourcepath): + nodenet = micropsi.get_nodenet(test_nodenet) + netapi = nodenet.netapi + nodespace = netapi.create_nodespace(None, "partition", options={'new_partition': True}) + subnodespace = netapi.create_nodespace(nodespace.uid, "foo") + r1 = netapi.create_node("Register", None) + r2 = netapi.create_node("Register", subnodespace.uid) + r3 = netapi.create_node("Register", None) + netapi.link(r1, 'gen', r2, 'gen') + netapi.link(r2, 'gen', r3, 'gen') + netapi.delete_nodespace(subnodespace) + data = netapi.get_node(r1.uid).get_data({'include_links': True}) + assert data['links'] == {} + for key in nodenet.rootpartition.inlinks: + for i in range(3): + assert len(nodenet.rootpartition.inlinks[key][i].get_value()) == 0 + + +@pytest.mark.engine("theano_engine") +def test_sensor_actuator_indices(test_nodenet): + nodenet = micropsi.get_nodenet(test_nodenet) + netapi = nodenet.netapi + result, world_uid = micropsi.new_world('default', 'World') + micropsi.set_nodenet_properties(test_nodenet, worldadapter='Default', world_uid=world_uid) + sensor = netapi.create_node("Sensor", None, "static_sensor") + sensor.set_parameter("datasource", "static_on") + actor = netapi.create_node("Actor", None, "echo_actor") + actor.set_parameter("datatarget", "echo") + register = netapi.create_node("Register", None, "source") + register.activation = 0.8 + netapi.link(register, 'gen', register, 'gen', weight=0.5) + netapi.link(register, 'gen', actor, 'gen') + assert sensor.activation == 0 + assert actor.get_gate('gen').activation == 0 + micropsi.step_nodenet(test_nodenet) + micropsi.step_nodenet(test_nodenet) + assert sensor.activation == 1 + assert round(actor.get_gate('gen').activation, 3) == 0.8 + netapi.delete_node(sensor) + netapi.delete_node(actor) + assert set(nodenet.rootpartition.actuator_indices) == {0} + assert set(nodenet.rootpartition.sensor_indices) == {0} + + +def test_partition_get_node_data(test_nodenet): + nodenet = micropsi.get_nodenet(test_nodenet) + netapi = nodenet.netapi + nodespace, source, register = prepare(netapi) + + nodes = [] + for i in range(10): + n = netapi.create_node("Pipe", nodespace.uid if i > 4 else None, "node %d" % i) + nodes.append(n) + + for i in range(4): + netapi.link(nodes[i], 'gen', nodes[5], 'gen', weight=((i + 2) / 10)) + netapi.link(nodes[9], 'gen', nodes[4], 'gen', 0.375) + + third_ns = netapi.create_nodespace(None, "third") + third = netapi.create_node("Register", third_ns.uid, "third") + netapi.link(nodes[4], 'gen', third, 'gen') + + node_data = nodenet.get_nodes(nodespace_uids=[None])['nodes'] + assert set(node_data.keys()) == set([n.uid for n in nodes[:5]] + [source.uid, register.uid, third.uid] + [nodes[9].uid, nodes[5].uid]) + + node_data = nodenet.get_nodes()['nodes'] + n1, n3, n4, n9 = nodes[1], nodes[3], nodes[4], nodes[9] + assert round(node_data[n1.uid]['links']['gen'][0]['weight'], 3) == 0.3 + assert round(node_data[n3.uid]['links']['gen'][0]['weight'], 3) == 0.5 + assert round(node_data[n9.uid]['links']['gen'][0]['weight'], 3) == 0.375 + # assert node_data[n4.uid]['links'] == {} + + node_data = nodenet.get_nodes(nodespace_uids=[nodespace.uid])['nodes'] + assert len(node_data.keys()) == 12 + assert node_data[n4.uid]['links'] == {} + assert third.uid not in node_data diff --git a/micropsi_core/tests/test_operations.py b/micropsi_core/tests/test_operations.py new file mode 100644 index 00000000..b9cef870 --- /dev/null +++ b/micropsi_core/tests/test_operations.py @@ -0,0 +1,54 @@ + +from micropsi_core import runtime + + +def test_user_operation(test_nodenet, resourcepath): + import os + os.makedirs(os.path.join(resourcepath, 'foobar')) + with open(os.path.join(resourcepath, 'foobar', 'operations.py'), 'w+') as fp: + fp.write(""" +def delete_nodes(netapi, selection): + for uid in selection: + netapi.delete_node(netapi.get_node(uid)) + +delete_nodes.selectioninfo = { + 'nodetypes': [], + 'mincount': 1, + 'maxcount': -1 +}""") + runtime.reload_native_modules() + ops = runtime.get_available_operations() + assert ops['delete_nodes']['category'] == 'foobar' + res, uid = runtime.add_node(test_nodenet, "Register", [10, 10], None) + runtime.run_operation(test_nodenet, "delete_nodes", {}, [uid]) + assert uid not in runtime.nodenets[test_nodenet].get_node_uids() + + +def test_autoalign_operation(test_nodenet): + ops = runtime.get_available_operations() + assert ops['autoalign']['selection']['nodetypes'] == [] + assert ops['autoalign']['selection']['mincount'] == 1 + assert ops['autoalign']['selection']['maxcount'] == -1 + assert ops['autoalign']['category'] == 'layout' + assert ops['autoalign']['parameters'] == [] + + api = runtime.nodenets[test_nodenet].netapi + ns1 = api.create_nodespace(None, "foo") + p1 = api.create_node("Pipe", None, "p1") + p2 = api.create_node("Pipe", None, "p2") + p3 = api.create_node("Pipe", None, "p3") + api.link_with_reciprocal(p1, p2, 'subsur') + api.link_with_reciprocal(p1, p3, 'subsur') + api.link_with_reciprocal(p2, p3, 'porret') + runtime.save_nodenet(test_nodenet) + runtime.run_operation(test_nodenet, "autoalign", {}, [p1.uid, p2.uid, p3.uid, ns1]) + assert p1.position[0] == p2.position[0] + assert p1.position[1] < p2.position[1] + assert p2.position[1] == p3.position[1] + runtime.revert_nodenet(test_nodenet) + runtime.run_operation(test_nodenet, "autoalign", {}, [api.get_nodespace(None).uid]) + assert p1.position[0] == p2.position[0] + assert p1.position[1] < p2.position[1] + assert p2.position[1] == p3.position[1] + result, data = runtime.run_operation(test_nodenet, "autoalign", {}, [p1.uid]) + assert 'error' in data \ No newline at end of file diff --git a/micropsi_core/tests/test_runtime.py b/micropsi_core/tests/test_runtime.py index 855d1e29..ac7b216b 100644 --- a/micropsi_core/tests/test_runtime.py +++ b/micropsi_core/tests/test_runtime.py @@ -85,7 +85,6 @@ def test_get_multiple_logger_messages_are_sorted(): def test_register_runner_condition_step(test_nodenet): import time - micropsi.set_runner_properties(1, 1) success, data = micropsi.set_runner_condition(test_nodenet, steps=7) assert data['step'] == 7 assert data['step_amount'] == 7 @@ -104,7 +103,6 @@ def test_register_runner_condition_step(test_nodenet): def test_register_runner_condition_monitor(test_nodenet): import time - micropsi.set_runner_properties(1, 1) nn = micropsi.nodenets[test_nodenet] node = nn.netapi.create_node('Register', None) nn.netapi.link(node, 'gen', node, 'gen', weight=2) @@ -155,7 +153,7 @@ def test_create_nodenet_from_template(test_nodenet, node, engine): api.link(node1, 'gen', node2, 'gen') micropsi.save_nodenet(test_nodenet) result, uid = micropsi.new_nodenet('copynet', engine=engine, template=test_nodenet) - data = micropsi.get_nodenet_data(uid, None) + data = micropsi.get_nodes(uid) for uid, n in data['nodes'].items(): if n['name'] == node1.name: assert len(n['links']['gen']) == 2 @@ -168,3 +166,103 @@ def test_export_json_does_not_send_duplicate_links(fixed_nodenet): import json result = json.loads(micropsi.export_nodenet(fixed_nodenet)) assert len(result['links']) == 4 + + +def test_generate_netapi_fragment(test_nodenet, resourcepath): + import os + netapi = micropsi.nodenets[test_nodenet].netapi + # create a bunch of nodes and link them + linktypes = ['subsur', 'porret', 'catexp'] + nodes = [] + for t in linktypes: + p1 = netapi.create_node('Pipe', None, t) + p2 = netapi.create_node('Pipe', None, t + '2') + nodes.extend([p1, p2]) + netapi.link_with_reciprocal(p1, p2, t) + reg = netapi.create_node('Register', None, 'reg') + netapi.link(reg, 'gen', nodes[0], 'gen') + ns = netapi.create_nodespace(None, 'ns1') + nodes.extend([reg, ns]) + # remember their names + names = [n.name for n in nodes] + fragment = micropsi.generate_netapi_fragment(test_nodenet, [n.uid for n in nodes]) + micropsi.nodenets[test_nodenet].clear() + code = "def foo(netapi):\n " + "\n ".join(fragment.split('\n')) + # save the fragment as recipe & run + with open(os.path.join(resourcepath, 'recipes.py'), 'w+') as fp: + fp.write(code) + micropsi.reload_native_modules() + micropsi.run_recipe(test_nodenet, 'foo', {}) + # assert that all the nodes are there again + assert set(names) == set([n.name for n in netapi.get_nodes()] + ['ns1']) + + +def test_get_nodes(test_nodenet): + nodenet = micropsi.nodenets[test_nodenet] + netapi = nodenet.netapi + ns1 = netapi.create_nodespace(None, "ns1") + ns2 = netapi.create_nodespace(None, "ns2") + ns3 = netapi.create_nodespace(ns1.uid, "ns3") + n1 = netapi.create_node("Pipe", ns1.uid, "n1") + n2 = netapi.create_node("Pipe", ns2.uid, "n2") + n3 = netapi.create_node("Pipe", ns3.uid, "n3") + result = micropsi.get_nodes(test_nodenet) + rootuid = nodenet.get_nodespace(None).uid + assert set(result['nodes'].keys()) == {n1.uid, n2.uid, n3.uid} + assert set(result['nodespaces'].keys()) == {rootuid, ns1.uid, ns2.uid, ns3.uid} + result = micropsi.get_nodes(test_nodenet, [None]) + assert result['nodes'] == {} + assert set(result['nodespaces'].keys()) == {ns1.uid, ns2.uid} + result = micropsi.get_nodes(test_nodenet, [ns1.uid]) + assert set(result['nodes'].keys()) == {n1.uid} + assert set(result['nodespaces'].keys()) == {ns3.uid} + + +def test_run_netapi_command(test_nodenet): + nodenet = micropsi.nodenets[test_nodenet] + netapi = nodenet.netapi + command = "foo = netapi.create_node('Pipe', None, 'foo')" + result, _ = micropsi.run_netapi_command(test_nodenet, command) + assert result + command = "netapi.link(foo, 'gen', foo, 'gen')" + result, _ = micropsi.run_netapi_command(test_nodenet, command) + assert result + nodes = netapi.get_nodes() + assert len(nodes) == 1 + assert nodes[0].get_gate('gen').get_links()[0].target_node == nodes[0] + command = "netapi.get_node('%s')" % nodes[0].uid + result, node = micropsi.run_netapi_command(test_nodenet, command) + assert node == str(nodes[0]) + command = "[n.name for n in netapi.get_nodes()]" + result, node = micropsi.run_netapi_command(test_nodenet, command) + assert node == "['foo']" + command = "netapi.create_node()" + result, msg = micropsi.run_netapi_command(test_nodenet, command) + assert not result + assert msg.startswith("TypeError") + command = "for i in range(3): netapi.create_node('Register', None, 'test%d' % i)" + result, msg = micropsi.run_netapi_command(test_nodenet, command) + assert result + assert len(netapi.get_nodes()) == 4 + + +def test_get_netapi_autocomplete(test_nodenet): + micropsi.run_netapi_command(test_nodenet, "foonode = netapi.create_node('Pipe', None, 'foo')") + micropsi.run_netapi_command(test_nodenet, "foogate = foonode.get_gate('gen')") + micropsi.run_netapi_command(test_nodenet, "fooslot = foonode.get_slot('gen')") + micropsi.run_netapi_command(test_nodenet, "nodespace = netapi.create_nodespace(None, 'foospace')") + micropsi.run_netapi_command(test_nodenet, "barnode = netapi.create_node('Register', None, 'foo')") + data = micropsi.get_netapi_autocomplete_data(test_nodenet) + data['types']['foonode'] = 'Node' + data['types']['foogate'] = 'Gate' + data['types']['fooslot'] = 'Slot' + data['types']['nodespace'] = 'Nodespace' + data['types']['barnode'] = 'Node' + assert data['autocomplete_options']['Node']["get_gate"][0]['name'] == 'type' + assert data['autocomplete_options']['Gate']["get_links"] == [] + assert data['autocomplete_options']['Slot']["get_links"] == [] + assert data['autocomplete_options']['Nodespace']["get_known_ids"][0]['name'] == 'entitytype' + assert data['autocomplete_options']['Node']['name'] is None + data = micropsi.get_netapi_autocomplete_data(test_nodenet, name='foonode') + assert list(data['types'].keys()) == ['foonode'] + assert list(data['autocomplete_options'].keys()) == ['Node'] diff --git a/micropsi_core/tests/test_runtime_nodenet_basics.py b/micropsi_core/tests/test_runtime_nodenet_basics.py index dcc227a4..ac8eee83 100644 --- a/micropsi_core/tests/test_runtime_nodenet_basics.py +++ b/micropsi_core/tests/test_runtime_nodenet_basics.py @@ -49,8 +49,11 @@ def test_nodenet_data_gate_parameters(fixed_nodenet): assert data == {'gen': {'threshold': 1}} -def test_user_prompt(fixed_nodenet, nodetype_def, nodefunc_def): - with open(nodetype_def, 'w') as fp: +def test_user_prompt(fixed_nodenet, resourcepath): + import os + nodetype_file = os.path.join(resourcepath, 'Test', 'nodetypes.json') + nodefunc_file = os.path.join(resourcepath, 'Test', 'nodefunctions.py') + with open(nodetype_file, 'w') as fp: fp.write('{"Testnode": {\ "name": "Testnode",\ "slottypes": ["gen", "foo", "bar"],\ @@ -61,7 +64,7 @@ def test_user_prompt(fixed_nodenet, nodetype_def, nodefunc_def): "testparam": 13\ }\ }}') - with open(nodefunc_def, 'w') as fp: + with open(nodefunc_file, 'w') as fp: fp.write("def testnodefunc(netapi, node=None, **prams):\r\n return 17") micropsi.reload_native_modules() @@ -74,7 +77,7 @@ def test_user_prompt(fixed_nodenet, nodetype_def, nodefunc_def): "foobar", options ) - result, data = micropsi.get_current_state(fixed_nodenet, nodenet={'nodespace': 'Root'}) + result, data = micropsi.get_calculation_state(fixed_nodenet, nodenet={}) assert 'user_prompt' in data assert data['user_prompt']['msg'] == 'foobar' assert data['user_prompt']['node']['uid'] == uid @@ -100,7 +103,7 @@ def test_user_notification(test_nodenet, node): api = micropsi.nodenets[test_nodenet].netapi node_obj = api.get_node(node) api.notify_user(node_obj, "Hello there") - result, data = micropsi.get_current_state(test_nodenet, nodenet={'nodespace': 'Root'}) + result, data = micropsi.get_calculation_state(test_nodenet, nodenet={'nodespaces': [None]}) assert 'user_prompt' in data assert data['user_prompt']['node']['uid'] == node assert data['user_prompt']['msg'] == "Hello there" @@ -231,18 +234,70 @@ def test_clone_nodes_copies_gate_params(fixed_nodenet): assert round(copy.get_gate_parameters()['gen']['maximum'], 2) == 0.1 -def test_modulators(fixed_nodenet): +def test_modulators(fixed_nodenet, engine): nodenet = micropsi.get_nodenet(fixed_nodenet) + # assert modulators are instantiated from the beginning + assert nodenet._modulators != {} + assert nodenet.get_modulator('emo_activation') is not None - nodenet.netapi.change_modulator("test_modulator", 0.42) - assert nodenet.netapi.get_modulator("test_modulator") == 0.42 - + # set a modulator nodenet.set_modulator("test_modulator", -1) assert nodenet.netapi.get_modulator("test_modulator") == -1 - -def test_node_parameters(fixed_nodenet, nodetype_def, nodefunc_def): - with open(nodetype_def, 'w') as fp: + # assert change_modulator sets diff. + nodenet.netapi.change_modulator("test_modulator", 0.42) + assert round(nodenet.netapi.get_modulator("test_modulator"), 4) == -0.58 + + # no modulators should be set if we disable the emotional_parameter module + res, uid = micropsi.new_nodenet('foobar', engine, use_modulators=False) + new_nodenet = micropsi.get_nodenet(uid) + assert new_nodenet._modulators == {} + # and no Emo-stepoperator should be set. + for item in new_nodenet.stepoperators: + assert 'Emotional' not in item.__class__.__name__ + + +def test_modulators_sensor_actor_connection(test_nodenet, test_world): + nodenet = micropsi.get_nodenet(test_nodenet) + micropsi.set_nodenet_properties(test_nodenet, worldadapter="Braitenberg", world_uid=test_world) + res, s1_id = micropsi.add_node(test_nodenet, "Sensor", [10, 10], None, name="brightness_l", parameters={'datasource': 'brightness_l'}) + res, s2_id = micropsi.add_node(test_nodenet, "Sensor", [20, 20], None, name="emo_activation", parameters={'datasource': 'emo_activation'}) + res, a1_id = micropsi.add_node(test_nodenet, "Actor", [30, 30], None, name="engine_l", parameters={'datatarget': 'engine_l'}) + res, a2_id = micropsi.add_node(test_nodenet, "Actor", [40, 40], None, name="base_importance_of_intention", parameters={'datatarget': 'base_importance_of_intention'}) + res, r1_id = micropsi.add_node(test_nodenet, "Register", [10, 30], None, name="r1") + res, r2_id = micropsi.add_node(test_nodenet, "Register", [10, 30], None, name="r2") + s1 = nodenet.get_node(s1_id) + s2 = nodenet.get_node(s2_id) + r1 = nodenet.get_node(r1_id) + r2 = nodenet.get_node(r2_id) + s2.set_gate_parameter('gen', 'maximum', 999) + micropsi.add_link(test_nodenet, r1_id, 'gen', a1_id, 'gen') + micropsi.add_link(test_nodenet, r2_id, 'gen', a2_id, 'gen') + r1.activation = 0.3 + r2.activation = 0.7 + emo_val = nodenet.get_modulator("emo_activation") + + # patch reset method, to check if datatarget was written + def nothing(): + pass + nodenet.worldadapter_instance.reset_datatargets = nothing + + nodenet.step() + assert round(nodenet.worldadapter_instance.datatargets['engine_l'], 3) == 0.3 + assert round(s1.activation, 3) == round(nodenet.worldadapter_instance.get_datasource_value('brightness_l'), 3) + assert round(s2.activation, 3) == round(emo_val, 3) + assert round(nodenet.get_modulator('base_importance_of_intention'), 3) == 0.7 + assert round(nodenet.worldadapter_instance.datatargets['engine_l'], 3) == 0.3 + emo_val = nodenet.get_modulator("emo_activation") + nodenet.step() + assert round(s2.activation, 3) == round(emo_val, 3) + + +def test_node_parameters(fixed_nodenet, resourcepath): + import os + nodetype_file = os.path.join(resourcepath, 'Test', 'nodetypes.json') + nodefunc_file = os.path.join(resourcepath, 'Test', 'nodefunctions.py') + with open(nodetype_file, 'w') as fp: fp.write('{"Testnode": {\ "name": "Testnode",\ "slottypes": ["gen", "foo", "bar"],\ @@ -258,7 +313,7 @@ def test_node_parameters(fixed_nodenet, nodetype_def, nodefunc_def): "protocol_mode": "all_active"\ }}\ }') - with open(nodefunc_def, 'w') as fp: + with open(nodefunc_file, 'w') as fp: fp.write("def testnodefunc(netapi, node=None, **prams):\r\n return 17") assert micropsi.reload_native_modules() @@ -294,15 +349,18 @@ def test_delete_linked_nodes(fixed_nodenet): netapi.delete_node(evil_two) -def test_multiple_nodenet_interference(engine, nodetype_def, nodefunc_def): - with open(nodetype_def, 'w') as fp: +def test_multiple_nodenet_interference(engine, resourcepath): + import os + nodetype_file = os.path.join(resourcepath, 'Test', 'nodetypes.json') + nodefunc_file = os.path.join(resourcepath, 'Test', 'nodefunctions.py') + with open(nodetype_file, 'w') as fp: fp.write('{"Testnode": {\ "name": "Testnode",\ "slottypes": ["gen", "foo", "bar"],\ "gatetypes": ["gen", "foo", "bar"],\ "nodefunction_name": "testnodefunc"\ }}') - with open(nodefunc_def, 'w') as fp: + with open(nodefunc_file, 'w') as fp: fp.write("def testnodefunc(netapi, node=None, **prams):\r\n node.get_gate('gen').gate_function(17)") micropsi.reload_native_modules() @@ -346,7 +404,7 @@ def test_multiple_nodenet_interference(engine, nodetype_def, nodefunc_def): def test_get_nodespace_changes(fixed_nodenet): net = micropsi.nodenets[fixed_nodenet] net.step() - result = micropsi.get_nodespace_changes(fixed_nodenet, None, 0) + result = micropsi.get_nodespace_changes(fixed_nodenet, [None], 0) assert set(result['nodes_dirty'].keys()) == set(net.get_node_uids()) assert result['nodes_deleted'] == [] assert result['nodespaces_dirty'] == {} @@ -360,9 +418,9 @@ def test_get_nodespace_changes(fixed_nodenet): net.netapi.link(newnode, 'gen', nodes['B1'], 'gen') newspace = net.netapi.create_nodespace(None, "nodespace") net.step() - test = micropsi.get_nodenet_activation_data(fixed_nodenet, None, 1) + test = micropsi.get_nodenet_activation_data(fixed_nodenet, [None], 1) assert test['has_changes'] - result = micropsi.get_nodespace_changes(fixed_nodenet, None, 1) + result = micropsi.get_nodespace_changes(fixed_nodenet, [None], 1) assert nodes['B2'].uid in result['nodes_deleted'] assert nodes['A1'].uid in result['nodes_dirty'] assert nodes['A2'].uid in result['nodes_dirty'] @@ -373,7 +431,7 @@ def test_get_nodespace_changes(fixed_nodenet): assert len(result['nodes_dirty'].keys()) == 4 assert len(result['nodespaces_dirty'].keys()) == 1 net.step() - test = micropsi.get_nodenet_activation_data(fixed_nodenet, None, 2) + test = micropsi.get_nodenet_activation_data(fixed_nodenet, [None], 2) assert not test['has_changes'] @@ -385,9 +443,22 @@ def test_get_nodespace_changes_cycles(fixed_nodenet): nodes[n.name] = n net.netapi.delete_node(nodes['B2']) net.step() - result = micropsi.get_nodespace_changes(fixed_nodenet, None, 1) + result = micropsi.get_nodespace_changes(fixed_nodenet, [None], 1) assert nodes['B2'].uid in result['nodes_deleted'] for i in range(101): net.step() - result = micropsi.get_nodespace_changes(fixed_nodenet, None, 1) + result = micropsi.get_nodespace_changes(fixed_nodenet, [None], 1) assert nodes['B2'].uid not in result['nodes_deleted'] + + +def test_nodespace_properties(test_nodenet): + data = {'testvalue': 'foobar'} + rootns = micropsi.get_nodenet(test_nodenet).get_nodespace(None) + micropsi.set_nodespace_properties(test_nodenet, rootns.uid, data) + assert micropsi.nodenets[test_nodenet].metadata['nodespace_ui_properties'][rootns.uid] == data + assert micropsi.get_nodespace_properties(test_nodenet, rootns.uid) == data + micropsi.save_nodenet(test_nodenet) + micropsi.revert_nodenet(test_nodenet) + assert micropsi.get_nodespace_properties(test_nodenet, rootns.uid) == data + properties = micropsi.get_nodespace_properties(test_nodenet) + assert properties[rootns.uid] == data diff --git a/micropsi_core/tests/test_runtime_nodes.py b/micropsi_core/tests/test_runtime_nodes.py index bea5c2c3..7a950a46 100644 --- a/micropsi_core/tests/test_runtime_nodes.py +++ b/micropsi_core/tests/test_runtime_nodes.py @@ -27,17 +27,17 @@ def prepare_nodenet(test_nodenet): def test_add_node(test_nodenet): micropsi.load_nodenet(test_nodenet) # make sure nodenet is empty - nodespace = micropsi.get_nodenet_data(test_nodenet, None) + nodespace = micropsi.get_nodes(test_nodenet) try: for i in nodespace["nodes"]: micropsi.delete_node(test_nodenet, i) except: pass - nodespace = micropsi.get_nodenet_data(test_nodenet, None) + nodespace = micropsi.get_nodes(test_nodenet) assert len(nodespace.get("nodes", [])) == 0 res, uid = micropsi.add_node(test_nodenet, "Pipe", [200, 250, 10], None, state=None, name="A") - nodespace = micropsi.get_nodenet_data(test_nodenet, None) + nodespace = micropsi.get_nodes(test_nodenet) assert len(nodespace["nodes"]) == 1 node1 = nodespace["nodes"][uid] assert node1["name"] == "A" @@ -45,10 +45,9 @@ def test_add_node(test_nodenet): def test_position_always_3d(test_nodenet): - micropsi.load_nodenet(test_nodenet) res, nuid = micropsi.add_node(test_nodenet, "Pipe", [200], None, state=None, name="A") res, nsuid = micropsi.add_nodespace(test_nodenet, [200, 125, 0, 134], None, name="NS") - data = micropsi.get_nodenet_data(test_nodenet, None) + data = micropsi.get_nodes(test_nodenet) assert data['nodes'][nuid]['position'] == [200, 0, 0] assert data['nodespaces'][nsuid]['position'] == [200, 125, 0] @@ -56,7 +55,7 @@ def test_position_always_3d(test_nodenet): def test_get_nodenet_activation_data(test_nodenet): nodes = prepare_nodenet(test_nodenet) uid = nodes['a'] - activation_data = micropsi.get_nodenet_activation_data(test_nodenet, None) + activation_data = micropsi.get_nodenet_activation_data(test_nodenet, [None]) assert activation_data["activations"][uid][0] == 0 assert activation_data["activations"][uid][1] == 0 assert activation_data["activations"][uid][2] == 0 @@ -67,7 +66,7 @@ def test_get_nodenet_activation_data(test_nodenet): micropsi.set_node_activation(test_nodenet, nodes['a'], 0.34556865) - activation_data = micropsi.get_nodenet_activation_data(test_nodenet, None) + activation_data = micropsi.get_nodenet_activation_data(test_nodenet, [None]) assert activation_data["activations"][uid][0] == 0.3 @@ -75,13 +74,13 @@ def test_get_nodenet_activation_data_for_nodespace(test_nodenet): nodes = prepare_nodenet(test_nodenet) uid = nodes['a'] nodespace = micropsi.nodenets[test_nodenet].get_nodespace_uids()[0] - activation_data = micropsi.get_nodenet_activation_data(test_nodenet, nodespace) + activation_data = micropsi.get_nodenet_activation_data(test_nodenet, [nodespace]) assert activation_data["activations"][uid][0] == 0 def test_get_nodespace(test_nodenet): nodes = prepare_nodenet(test_nodenet) - nodespace = micropsi.get_nodenet_data(test_nodenet, None) + nodespace = micropsi.get_nodes(test_nodenet) assert len(nodespace["nodes"]) == 4 node1 = nodespace["nodes"][nodes['a']] assert node1["name"] == "A" @@ -111,7 +110,7 @@ def test_add_link(test_nodenet): micropsi.add_link(test_nodenet, nodes['a'], "por", nodes['b'], "gen", 1, 0.1) micropsi.add_link(test_nodenet, nodes['c'], "ret", nodes['b'], "gen", 1, 1) - nodespace = micropsi.get_nodenet_data(test_nodenet, None) + nodespace = micropsi.get_nodes(test_nodenet) assert len(nodespace["nodes"]) == 4 link_a_b = nodespace["nodes"][nodes['a']]['links']['por'][0] @@ -132,7 +131,7 @@ def test_delete_link(test_nodenet): success, link = micropsi.add_link(test_nodenet, nodes['a'], "por", nodes['b'], "gen", 0.5, 1) assert success micropsi.delete_link(test_nodenet, nodes['a'], "por", nodes['b'], "gen") - nodespace = micropsi.get_nodenet_data(test_nodenet, None) + nodespace = micropsi.get_nodes(test_nodenet) assert nodespace['nodes'][nodes['a']]['links'] == {} @@ -143,13 +142,13 @@ def test_save_nodenet(test_nodenet): # unload_nodenet micropsi.unload_nodenet(test_nodenet) try: - micropsi.get_nodenet_data(test_nodenet, None) + micropsi.get_nodes(test_nodenet) assert False, "could fetch a Nodespace that should not have been in memory" except: pass # load_nodenet - micropsi.load_nodenet(test_nodenet) - nodespace = micropsi.get_nodenet_data(test_nodenet, None) + micropsi.get_nodenet(test_nodenet) + nodespace = micropsi.get_nodes(test_nodenet) assert len(nodespace["nodes"]) == 4 micropsi.delete_nodenet(test_nodenet) @@ -166,15 +165,42 @@ def hashlink(l): assert links_before == links_after +def test_native_module_and_recipe_categories(fixed_nodenet, resourcepath): + import os + os.mkdir(os.path.join(resourcepath, 'Test', 'Test2')) + nodetype_file = os.path.join(resourcepath, 'Test', 'nodetypes.json') + nodefunc_file = os.path.join(resourcepath, 'Test', 'nodefunctions.py') + recipe_file = os.path.join(resourcepath, 'Test', 'Test2', 'recipes.py') + with open(nodetype_file, 'w') as fp: + fp.write('{"Testnode": {\ + "name": "Testnode",\ + "slottypes": ["gen", "foo", "bar"],\ + "nodefunction_name": "testnodefunc",\ + "gatetypes": ["gen", "foo", "bar"]\ + }}') + with open(nodefunc_file, 'w') as fp: + fp.write("def testnodefunc(netapi, node=None, **prams):\r\n return 17") + with open(recipe_file, 'w') as fp: + fp.write("def testrecipe(netapi):\r\n pass") + micropsi.reload_native_modules() + res = micropsi.get_available_native_module_types(fixed_nodenet) + assert res['Testnode']['category'] == 'Test' + res = micropsi.get_available_recipes() + assert res['testrecipe']['category'] == 'Test/Test2' + + @pytest.mark.engine("dict_engine") # This behavior is not available in theano_engine: Default inheritance at runtime is not implemented for # performance reasons, changed defaults will only affect newly created nodes. # This test will have to be replaced when the generic solution proposed in TOL-90 has been # implemented. -def test_gate_defaults_change_with_nodetype(fixed_nodenet, resourcepath, nodetype_def, nodefunc_def): +def test_gate_defaults_change_with_nodetype(fixed_nodenet, resourcepath,): # gate_parameters are a property of the nodetype, and should change with # the nodetype definition if not explicitly overwritten for a given node - with open(nodetype_def, 'w') as fp: + import os + nodetype_file = os.path.join(resourcepath, 'Test', 'nodetypes.json') + nodefunc_file = os.path.join(resourcepath, 'Test', 'nodefunctions.py') + with open(nodetype_file, 'w') as fp: fp.write('{"Testnode": {\ "name": "Testnode",\ "slottypes": ["gen", "foo", "bar"],\ @@ -186,11 +212,11 @@ def test_gate_defaults_change_with_nodetype(fixed_nodenet, resourcepath, nodetyp "amplification": 13\ }\ }}}') - with open(nodefunc_def, 'w') as fp: + with open(nodefunc_file, 'w') as fp: fp.write("def testnodefunc(netapi, node=None, **prams):\r\n return 17") micropsi.reload_native_modules() res, uid = micropsi.add_node(fixed_nodenet, "Testnode", [10, 10], name="Testnode") - with open(nodetype_def, 'w') as fp: + with open(nodetype_file, 'w') as fp: fp.write('{"Testnode": {\ "name": "Testnode",\ "slottypes": ["gen", "foo", "bar"],\ @@ -207,16 +233,16 @@ def test_gate_defaults_change_with_nodetype(fixed_nodenet, resourcepath, nodetyp assert params["foo"]["amplification"] == 5 -def test_non_standard_gate_defaults(fixed_nodenet): - nodenet = micropsi.nodenets[fixed_nodenet] - res, uid = micropsi.add_node(fixed_nodenet, 'Register', [30, 30, 10], name='test') +def test_non_standard_gate_defaults(test_nodenet): + nodenet = micropsi.nodenets[test_nodenet] + res, uid = micropsi.add_node(test_nodenet, 'Register', [30, 30, 10], name='test') node = nodenet.netapi.get_node(uid) genparams = {'maximum': 0.5} micropsi.set_gate_parameters(nodenet.uid, node.uid, 'gen', genparams) assert node.clone_non_default_gate_parameters()['gen']['maximum'] == 0.5 assert node.get_data()['gate_parameters'] == {'gen': {'maximum': 0.5}} assert nodenet.get_data()['nodes'][uid]['gate_parameters'] == {'gen': {'maximum': 0.5}} - data = micropsi.get_nodenet_data(fixed_nodenet, None, step=-1) + data = micropsi.get_nodes(test_nodenet) assert data['nodes'][uid]['gate_parameters'] == {'gen': {'maximum': 0.5}} @@ -224,18 +250,20 @@ def test_ignore_links(test_nodenet): nodes = prepare_nodenet(test_nodenet) micropsi.add_link(test_nodenet, nodes['a'], "por", nodes['b'], "gen", 0.5, 1) - nodespace = micropsi.get_nodenet_data(test_nodenet, None) + nodespace = micropsi.get_nodes(test_nodenet, []) assert len(nodespace["nodes"]) == 4 assert 'links' not in nodespace assert len(nodespace["nodes"][nodes['a']]['links']['por']) == 1 - nodespace = micropsi.get_nodenet_data(test_nodenet, None, include_links=False) + nodespace = micropsi.get_nodes(test_nodenet, [], include_links=False) assert 'links' not in nodespace["nodes"][nodes['a']] -def test_remove_and_reload_native_module(fixed_nodenet, resourcepath, nodetype_def, nodefunc_def): - from os import remove - with open(nodetype_def, 'w') as fp: +def test_remove_and_reload_native_module(fixed_nodenet, resourcepath): + import os + nodetype_file = os.path.join(resourcepath, 'Test', 'nodetypes.json') + nodefunc_file = os.path.join(resourcepath, 'Test', 'nodefunctions.py') + with open(nodetype_file, 'w') as fp: fp.write('{"Testnode": {\ "name": "Testnode",\ "slottypes": ["gen", "foo", "bar"],\ @@ -247,20 +275,23 @@ def test_remove_and_reload_native_module(fixed_nodenet, resourcepath, nodetype_d "amplification": 13\ }\ }}}') - with open(nodefunc_def, 'w') as fp: + with open(nodefunc_file, 'w') as fp: fp.write("def testnodefunc(netapi, node=None, **prams):\r\n return 17") micropsi.reload_native_modules() res, uid = micropsi.add_node(fixed_nodenet, "Testnode", [10, 10, 10], name="Testnode") - remove(nodetype_def) - remove(nodefunc_def) + os.remove(nodetype_file) + os.remove(nodefunc_file) micropsi.reload_native_modules() - assert micropsi.get_available_native_module_types(fixed_nodenet) == {} + assert 'Testnode' not in micropsi.get_available_native_module_types(fixed_nodenet) @pytest.mark.engine("dict_engine") -def test_engine_specific_nodetype_dict(fixed_nodenet, resourcepath, nodetype_def, nodefunc_def): - with open(nodetype_def, 'w') as fp: +def test_engine_specific_nodetype_dict(fixed_nodenet, resourcepath): + import os + nodetype_file = os.path.join(resourcepath, 'Test', 'nodetypes.json') + nodefunc_file = os.path.join(resourcepath, 'Test', 'nodefunctions.py') + with open(nodetype_file, 'w') as fp: fp.write('{"Testnode": {\ "engine": "theano_engine",\ "name": "Testnode",\ @@ -273,17 +304,20 @@ def test_engine_specific_nodetype_dict(fixed_nodenet, resourcepath, nodetype_def "amplification": 13\ }\ }}}') - with open(nodefunc_def, 'w') as fp: + with open(nodefunc_file, 'w') as fp: fp.write("def testnodefunc(netapi, node=None, **prams):\r\n return 17") micropsi.reload_native_modules() - data = micropsi.get_nodenet_data(fixed_nodenet, nodespace='Root') + data = micropsi.get_nodenet_metadata(fixed_nodenet) assert "Testnode" not in data['native_modules'] @pytest.mark.engine("theano_engine") -def test_engine_specific_nodetype_theano(fixed_nodenet, resourcepath, nodetype_def, nodefunc_def): - with open(nodetype_def, 'w') as fp: +def test_engine_specific_nodetype_theano(fixed_nodenet, resourcepath): + import os + nodetype_file = os.path.join(resourcepath, 'Test', 'nodetypes.json') + nodefunc_file = os.path.join(resourcepath, 'Test', 'nodefunctions.py') + with open(nodetype_file, 'w') as fp: fp.write('{"Testnode": {\ "engine": "dict_engine",\ "name": "Testnode",\ @@ -296,11 +330,11 @@ def test_engine_specific_nodetype_theano(fixed_nodenet, resourcepath, nodetype_d "amplification": 13\ }\ }}}') - with open(nodefunc_def, 'w') as fp: + with open(nodefunc_file, 'w') as fp: fp.write("def testnodefunc(netapi, node=None, **prams):\r\n return 17") micropsi.reload_native_modules() - data = micropsi.get_nodenet_data(fixed_nodenet, nodespace='Root') + data = micropsi.get_nodenet_metadata(fixed_nodenet) assert "Testnode" not in data['native_modules'] @@ -313,8 +347,10 @@ def test_node_parameters_none_resets_to_default(fixed_nodenet): assert node.get_parameter('wait') == 0 -def test_get_recipes(fixed_nodenet, resourcepath, recipes_def): - with open(recipes_def, 'w') as fp: +def test_get_recipes(fixed_nodenet, resourcepath): + import os + recipe_file = os.path.join(resourcepath, 'Test', 'recipes.py') + with open(recipe_file, 'w') as fp: fp.write(""" def testfoo(netapi, count=23): return {'count':count} @@ -327,8 +363,10 @@ def testfoo(netapi, count=23): assert recipes['testfoo']['parameters'][0]['default'] == 23 -def test_run_recipe(fixed_nodenet, resourcepath, recipes_def): - with open(recipes_def, 'w') as fp: +def test_run_recipe(fixed_nodenet, resourcepath): + import os + recipe_file = os.path.join(resourcepath, 'Test', 'recipes.py') + with open(recipe_file, 'w') as fp: fp.write(""" def testfoo(netapi, count=23): return {'count':count} @@ -339,8 +377,11 @@ def testfoo(netapi, count=23): assert result['count'] == 42 -def test_node_parameter_defaults(fixed_nodenet, resourcepath, nodetype_def, nodefunc_def): - with open(nodetype_def, 'w') as fp: +def test_node_parameter_defaults(fixed_nodenet, resourcepath): + import os + nodetype_file = os.path.join(resourcepath, 'Test', 'nodetypes.json') + nodefunc_file = os.path.join(resourcepath, 'Test', 'nodefunctions.py') + with open(nodetype_file, 'w') as fp: fp.write('{"Testnode": {\ "name": "Testnode",\ "slottypes": ["gen", "foo", "bar"],\ @@ -351,18 +392,20 @@ def test_node_parameter_defaults(fixed_nodenet, resourcepath, nodetype_def, node "testparam": 13\ }\ }}') - with open(nodefunc_def, 'w') as fp: + with open(nodefunc_file, 'w') as fp: fp.write("def testnodefunc(netapi, node=None, **prams):\r\n return 17") micropsi.reload_native_modules() - data = micropsi.get_nodenet_data(fixed_nodenet, None) res, uid = micropsi.add_node(fixed_nodenet, "Testnode", [10, 10, 10], name="Test") node = micropsi.nodenets[fixed_nodenet].get_node(uid) assert node.get_parameter("testparam") == 13 -def test_node_parameters_from_persistence(fixed_nodenet, resourcepath, nodetype_def, nodefunc_def): - with open(nodetype_def, 'w') as fp: +def test_node_parameters_from_persistence(fixed_nodenet, resourcepath): + import os + nodetype_file = os.path.join(resourcepath, 'Test', 'nodetypes.json') + nodefunc_file = os.path.join(resourcepath, 'Test', 'nodefunctions.py') + with open(nodetype_file, 'w') as fp: fp.write('{"Testnode": {\ "name": "Testnode",\ "slottypes": ["gen", "foo", "bar"],\ @@ -373,7 +416,7 @@ def test_node_parameters_from_persistence(fixed_nodenet, resourcepath, nodetype_ "testparam": 13\ }\ }}') - with open(nodefunc_def, 'w') as fp: + with open(nodefunc_file, 'w') as fp: fp.write("def testnodefunc(netapi, node=None, **prams):\r\n return 17") micropsi.reload_native_modules() res, uid = micropsi.add_node(fixed_nodenet, "Testnode", [10, 10, 10], name="Test") diff --git a/micropsi_core/tests/test_runtime_world_basics.py b/micropsi_core/tests/test_runtime_world_basics.py index cb52ae57..dc0b478c 100644 --- a/micropsi_core/tests/test_runtime_world_basics.py +++ b/micropsi_core/tests/test_runtime_world_basics.py @@ -42,12 +42,15 @@ def test_get_world_properties(test_world): assert test_world == wp["uid"] -def test_get_worldadapters(test_world): +def test_get_worldadapters(test_world, test_nodenet): wa = micropsi.get_worldadapters(test_world) - assert 'engine_l' in wa['Braitenberg']['datatargets'] - assert 'engine_r' in wa['Braitenberg']['datatargets'] - assert 'brightness_l' in wa['Braitenberg']['datasources'] - assert 'brightness_r' in wa['Braitenberg']['datasources'] + assert 'Braitenberg' in wa + assert 'description' in wa['Braitenberg'] + assert 'datasources' not in wa['Braitenberg'] + runtime.set_nodenet_properties(test_nodenet, worldadapter='Braitenberg', world_uid=test_world) + wa = micropsi.get_worldadapters(test_world, test_nodenet) + assert wa['Braitenberg']['datatargets'] == ['engine_l', 'engine_r'] + assert wa['Braitenberg']['datasources'] == ['brightness_l', 'brightness_r'] def test_add_worldobject(test_world): @@ -83,7 +86,6 @@ def test_register_agent(test_world, test_nodenet): world = runtime.worlds[test_world] nodenet = runtime.get_nodenet(test_nodenet) assert nodenet.uid not in world.data['agents'] - runtime.load_nodenet(test_nodenet) nodenet.world = test_world runtime.set_nodenet_properties(nodenet.uid, worldadapter='Braitenberg', world_uid=world.uid) assert nodenet.uid in world.data['agents'] @@ -114,7 +116,6 @@ def test_set_agent_properties(test_world, test_nodenet): def test_agent_dying_unregisters_agent(test_world, test_nodenet): world = runtime.worlds[test_world] nodenet = runtime.get_nodenet(test_nodenet) - runtime.load_nodenet(test_nodenet) nodenet.world = test_world runtime.set_nodenet_properties(nodenet.uid, worldadapter='Braitenberg', world_uid=world.uid) assert nodenet.uid in world.agents @@ -153,7 +154,6 @@ def test_world_does_not_spawn_deleted_agents(test_world, resourcepath): def test_reset_datatargets(test_world, test_nodenet): world = runtime.worlds[test_world] nodenet = runtime.get_nodenet(test_nodenet) - runtime.load_nodenet(test_nodenet) nodenet.world = test_world runtime.set_nodenet_properties(nodenet.uid, worldadapter='Braitenberg', world_uid=world.uid) world.agents[test_nodenet].datatargets['engine_r'] = 0.7 @@ -163,40 +163,25 @@ def test_reset_datatargets(test_world, test_nodenet): assert world.agents[test_nodenet].datatargets['engine_r'] == 0 -def test_actuators_do_not_reset_each_others_datatarget(test_world, test_nodenet): - world = runtime.worlds[test_world] - nodenet = runtime.get_nodenet(test_nodenet) - runtime.load_nodenet(test_nodenet) - nodenet.world = test_world - runtime.set_runner_properties(200, 1) - runtime.set_nodenet_properties(nodenet.uid, worldadapter='Braitenberg', world_uid=world.uid) - actor1 = nodenet.netapi.create_node("Actor", None) - actor2 = nodenet.netapi.create_node("Actor", None) - actor1.set_parameter('datatarget', 'engine_r') - actor2.set_parameter('datatarget', 'engine_r') - reg1 = nodenet.netapi.create_node("Register", None) - reg2 = nodenet.netapi.create_node("Register", None) - nodenet.netapi.link(reg1, 'gen', actor1, 'gen') - nodenet.netapi.link(reg2, 'gen', actor2, 'gen') - reg1.activation = 0.7 - reg2.activation = 0.3 - mock_reset = mock.Mock(return_value=None) - world.agents[test_nodenet].reset_datatargets = mock_reset - runtime.step_nodenet(test_nodenet) - assert world.agents[test_nodenet].datatargets['engine_r'] == 1 - - def test_worldadapter_update_calls_reset_datatargets(test_world, test_nodenet): world = runtime.worlds[test_world] nodenet = runtime.get_nodenet(test_nodenet) - runtime.load_nodenet(test_nodenet) nodenet.world = test_world runtime.set_nodenet_properties(nodenet.uid, worldadapter='Braitenberg', world_uid=world.uid) - runtime.set_runner_properties(1, 1) world.agents[test_nodenet].reset_datatargets = mock.MagicMock(name='reset') runtime.step_nodenet(test_nodenet) world.agents[test_nodenet].reset_datatargets.assert_called_once_with() + +def test_worlds_are_configurable(): + res, uid = runtime.new_world('testworld', 'Island', config={'foo': 'bar', '42': '23'}) + assert uid in runtime.worlds + assert runtime.worlds[uid].data['config']['foo'] == 'bar' + runtime.revert_world(uid) + assert runtime.worlds[uid].data['config']['foo'] == 'bar' + assert runtime.worlds[uid].data['config']['42'] == '23' + + """ def test_get_world_view(micropsi, test_world): assert 0 diff --git a/micropsi_core/tests/test_vizapi.py b/micropsi_core/tests/test_vizapi.py index 674c3e74..95aea62e 100644 --- a/micropsi_core/tests/test_vizapi.py +++ b/micropsi_core/tests/test_vizapi.py @@ -47,7 +47,7 @@ def test_save_file(test_nodenet, resourcepath): assert os.path.isfile(filepath) -def test_plot_from_nodefunc(test_nodenet, resourcepath, nodetype_def, nodefunc_def): +def test_plot_from_nodefunc(test_nodenet, resourcepath): import os from random import random from time import sleep @@ -61,14 +61,17 @@ def test_plot_from_nodefunc(test_nodenet, resourcepath, nodetype_def, nodefunc_d assert os.path.abspath(returnpath) == os.path.abspath(filepath) assert os.path.isfile(filepath) os.remove(filepath) - with open(nodetype_def, 'w') as fp: + os.mkdir(os.path.join(resourcepath, 'plotter')) + nodetype_file = os.path.join(resourcepath, "plotter", "nodetypes.json") + nodefunc_file = os.path.join(resourcepath, "plotter", "nodefunctions.py") + with open(nodetype_file, 'w') as fp: fp.write("""{"Plotter": { "name": "Plotter", "slottypes": [], "nodefunction_name": "plotfunc", "gatetypes": [], "parameters": ["plotpath"]}}""") - with open(nodefunc_def, 'w') as fp: + with open(nodefunc_file, 'w') as fp: fp.write(""" def plotfunc(netapi, node=None, **params): import os @@ -82,10 +85,8 @@ def plotfunc(netapi, node=None, **params): micropsi.reload_native_modules() node = nodenet.netapi.create_node("Plotter", None, name="Plotter") node.set_parameter("plotpath", resourcepath) - micropsi.set_runner_properties(1000, 1) micropsi.start_nodenetrunner(test_nodenet) sleep(2) micropsi.stop_nodenetrunner(test_nodenet) - print(micropsi.MicropsiRunner.last_nodenet_exception) assert micropsi.MicropsiRunner.last_nodenet_exception == {} assert os.path.isfile(os.path.join(resourcepath, "plot.png")) diff --git a/micropsi_core/tools.py b/micropsi_core/tools.py index 7eba4824..981febf8 100644 --- a/micropsi_core/tools.py +++ b/micropsi_core/tools.py @@ -9,27 +9,14 @@ __date__ = '29.06.12' import uuid -import os + def generate_uid(): """produce a unique identifier, restricted to an ASCII string""" return uuid.uuid1().hex -def mkdir(new_directory_name): - """if the directory does not exist, create it; otherwise, exit quietly""" - - if os.path.isdir(new_directory_name): - pass - elif os.path.isfile(new_directory_name): - raise OSError("a file with the same name as the desired directory, '%s', already exists." % new_directory_name) - else: - head, tail = os.path.split(new_directory_name) - if head and not os.path.isdir(head): - mkdir(head) - if tail: - os.mkdir(new_directory_name) -def check_for_url_proof_id(id, existing_ids = None, min_id_length = 1, max_id_length = 21): +def check_for_url_proof_id(id, existing_ids=None, min_id_length=1, max_id_length=21): """Returns (True, id) if id is permissible, and (False, error message) otherwise. Since we strip the id, you should use the returned one, not the original one""" @@ -38,9 +25,10 @@ def check_for_url_proof_id(id, existing_ids = None, min_id_length = 1, max_id_le # maybe this is too restrictive, but I want to use the id directly in urls for c in id: if not c.lower() in "0123456789abcdefghijklmnopqrstuvwxyz@._-": - return False, "The character '%s' is not allowed" %c + return False, "The character '%s' is not allowed" % c - if existing_ids and id.lower() in existing_ids: return False, "ID already exists" + if existing_ids and id.lower() in existing_ids: + return False, "ID already exists" if len(id) < min_id_length: return False, "Must at least have %s characters" % min_id_length if len(id) > max_id_length: @@ -49,7 +37,6 @@ def check_for_url_proof_id(id, existing_ids = None, min_id_length = 1, max_id_le return True, id - # Global parameters for all created functions # symbols that are included by default in the generated function's environment @@ -61,12 +48,15 @@ def check_for_url_proof_id(id, existing_ids = None, min_id_length = 1, max_id_le # add standard exceptions __bi = __builtins__ -if type(__bi) is not dict: __bi = __bi.__dict__ +if type(__bi) is not dict: + __bi = __bi.__dict__ for k in __bi: - if k.endswith("Error") or k.endswith("Warning"): SAFE_SYMBOLS.append(k) + if k.endswith("Error") or k.endswith("Warning"): + SAFE_SYMBOLS.append(k) del __bi -def create_function(source_string, parameters = "", additional_symbols = None): + +def create_function(source_string, parameters="", additional_symbols=None): """Create a python function from the given source code. Arguments: @@ -86,7 +76,7 @@ def create_function(source_string, parameters = "", additional_symbols = None): get_user = create_function(my_function_source, parameters="index = 0", additional_symbols = {'usermanager': usermanager}) print get_user("klaus") - + (This function is inspired by a recipe by David Decotigny.) """ @@ -99,12 +89,12 @@ def create_function(source_string, parameters = "", additional_symbols = None): # Setup the local and global dictionaries of the execution # environment for __my_function__ - bis = dict() # builtins + bis = dict() # builtins globs = dict() - locs = dict() + locs = dict() # Setup a standard-compatible python environment - bis["locals"] = lambda: locs + bis["locals"] = lambda: locs bis["globals"] = lambda: globs globs["__builtins__"] = bis globs["__name__"] = "SUBENV" @@ -159,8 +149,10 @@ def __init__(self, **kwargs): for i in kwargs: self[i] = kwargs[i] + import collections + class OrderedSet(collections.OrderedDict, collections.MutableSet): def update(self, *args, **kwargs): @@ -205,6 +197,7 @@ def __str__(self): symmetric_difference_update = property(lambda self: self.__ixor__) union = property(lambda self: self.__or__) + def itersubclasses(cls, folder=None, _seen=None): """ Generator over all subclasses of a given class, in depth first order. @@ -216,15 +209,16 @@ def itersubclasses(cls, folder=None, _seen=None): if not isinstance(cls, type): raise TypeError('itersubclasses must be called with new-style classes, not %.100r' % cls) - if _seen is None: _seen = set() + if _seen is None: + _seen = set() try: subs = cls.__subclasses__() - except TypeError: # fails only when cls is type + except TypeError: # fails only when cls is type subs = cls.__subclasses__(cls) for sub in subs: if sub not in _seen: if folder is None or sub.__module__.startswith(folder): _seen.add(sub) yield sub - for sub in itersubclasses(sub, folder=folder, _seen=_seen): - yield sub + for sub in itersubclasses(sub, folder=folder, _seen=_seen): + yield sub diff --git a/micropsi_core/world/island/island.py b/micropsi_core/world/island/island.py index d07061c1..21878606 100644 --- a/micropsi_core/world/island/island.py +++ b/micropsi_core/world/island/island.py @@ -8,8 +8,8 @@ class Island(World): + """ A simple Doerner Island-World""" - """ mandatory: list of world adapters that are supported""" supported_worldadapters = ['Braitenberg', 'Survivor', 'StructuredObjects'] groundmap = { @@ -21,7 +21,7 @@ class Island(World): assets = { 'background': "island/psi_1.png", 'template': 'island/island.tpl', - 'js': "island/island.js", + 'paperjs': "island/island.js", 'x': 2048, 'y': 2048, 'icons': { @@ -43,7 +43,7 @@ class Island(World): } } - def __init__(self, filename, world_type="Island", name="", owner="", engine=None, uid=None, version=1): + def __init__(self, filename, world_type="Island", name="", owner="", engine=None, uid=None, version=1, config={}): World.__init__(self, filename, world_type=world_type, name=name, owner=owner, uid=uid, version=version) self.load_groundmap() # self.current_step = 0 @@ -291,12 +291,13 @@ def action_drink(self): class Survivor(WorldAdapter): - supported_datasources = ['body-energy', 'body-water', 'body-integrity'] - supported_datatargets = ['action_eat', 'action_drink', 'loco_north', 'loco_south', 'loco_east', 'loco_west'] def __init__(self, world, uid=None, **data): super(Survivor, self).__init__(world, uid, **data) + self.datasources = dict((s, 0) for s in ['body-energy', 'body-water', 'body-integrity']) + self.datatargets = dict((t, 0) for t in ['action_eat', 'action_drink', 'loco_north', 'loco_south', 'loco_east', 'loco_west']) + self.currentobject = None self.energy = 1.0 @@ -311,11 +312,11 @@ def __init__(self, world, uid=None, **data): self.datasources['body-integrity'] = self.integrity def initialize_worldobject(self, data): - if not "position" in data: + if "position" not in data: self.position = self.world.groundmap['start_position'] def update_data_sources_and_targets(self): - """called on every world simulation step to advance the life of the agent""" + """called on every world calculation step to advance the life of the agent""" if self.is_dead: return @@ -417,19 +418,19 @@ class Braitenberg(WorldAdapter): # maximum speed speed_limit = 1. - supported_datasources = ['brightness_l', 'brightness_r'] - supported_datatargets = ['engine_l', 'engine_r'] - def __init__(self, world, uid=None, **data): super(Braitenberg, self).__init__(world, uid, **data) + + self.datasources = {'brightness_l': 0, 'brightness_r': 0} + self.datatargets = {'engine_l': 0, 'engine_r': 0} self.datatarget_feedback = {'engine_l': 0, 'engine_r': 0} def initialize_worldobject(self, data): - if not "position" in data: + if "position" not in data: self.position = self.world.groundmap['start_position'] def update_data_sources_and_targets(self): - """called on every world simulation step to advance the life of the agent""" + """called on every world calculation step to advance the life of the agent""" # drive engines l_wheel_speed = self.datatargets["engine_l"] diff --git a/micropsi_core/world/island/structured_objects/structured_objects.py b/micropsi_core/world/island/structured_objects/structured_objects.py index 0b6761a2..7159bea1 100644 --- a/micropsi_core/world/island/structured_objects/structured_objects.py +++ b/micropsi_core/world/island/structured_objects/structured_objects.py @@ -10,30 +10,30 @@ class StructuredObjects(WorldAdapter): """A world adapter exposing objects composed of basic shapes and colors to the agent""" - shapetypes = [] - shapecolors = [] + def __init__(self, world, uid=None, **data): + super(StructuredObjects, self).__init__(world, uid, **data) - supported_datasources = ['fov-x', 'fov-y', 'major-newscene'] - supported_datatargets = ['fov_x', 'fov_y', 'fov_reset'] + self.datasources = {'fov-x': 0, 'fov-y': 0, 'major-newscene': 0} + self.datatargets = {'fov_x': 0, 'fov_y': 0, 'fov_reset': 0} - for key, objecttype in OBJECTS.items(): - for shapeline in objecttype['shape_grid']: - for shape in shapeline: - if shape is not None and shape.type not in shapetypes: - shapetypes.append(shape.type) - if shape is not None and shape.color not in shapecolors: - shapecolors.append(shape.color) + self.shapetypes = [] + self.shapecolors = [] - for shapetype in shapetypes: - supported_datasources.append('fovea-'+shapetype) - supported_datasources.append('presence-'+shapetype) + for key, objecttype in OBJECTS.items(): + for shapeline in objecttype['shape_grid']: + for shape in shapeline: + if shape is not None and shape.type not in self.shapetypes: + self.shapetypes.append(shape.type) + if shape is not None and shape.color not in self.shapecolors: + self.shapecolors.append(shape.color) - for shapecolor in shapecolors: - supported_datasources.append("fovea-"+shapecolor) - supported_datasources.append("presence-"+shapecolor) + for shapetype in self.shapetypes: + self.datasources['fovea-' + shapetype] = 0 + self.datasources['presence-' + shapetype] = 0 - def __init__(self, world, uid=None, **data): - super(StructuredObjects, self).__init__(world, uid, **data) + for shapecolor in self.shapecolors: + self.datasources["fovea-" + shapecolor] = 0 + self.datasources["presence-" + shapecolor] = 0 self.currentobject = None self.scene = None @@ -42,7 +42,7 @@ def __init__(self, world, uid=None, **data): self.scene.load_object("PalmTree", OBJECTS["PalmTree"]["shape_grid"]) def initialize_worldobject(self, data): - if not "position" in data: + if "position" not in data: self.position = self.world.groundmap['start_position'] def get_datasource(self, key): @@ -58,7 +58,7 @@ def get_datasource(self, key): return WorldAdapter.get_datasource(self, key) def update_data_sources_and_targets(self): - """called on every world simulation step to advance the life of the agent""" + """called on every world calculation step to advance the life of the agent""" # we don't move, for now self.position = self.world.get_movement_result(self.position, (0, 0)) diff --git a/micropsi_core/world/minecraft/minecraft.py b/micropsi_core/world/minecraft/minecraft.py index 0f84bcd2..262e3f50 100644 --- a/micropsi_core/world/minecraft/minecraft.py +++ b/micropsi_core/world/minecraft/minecraft.py @@ -1,12 +1,12 @@ from threading import Thread +from spock import plugins as spockplugins from spock.client import Client -from spock.plugins import DefaultPlugins from spock.plugins.core.event import EventPlugin from spock.plugins.helpers.clientinfo import ClientInfoPlugin from spock.plugins.helpers.move import MovementPlugin -from spock.plugins.helpers.world import WorldPlugin from spock.plugins.helpers.reconnect import ReConnectPlugin +from spock.plugins.helpers.world import WorldPlugin from micropsi_core.world.world import World from micropsi_core.world.worldadapter import WorldAdapter @@ -18,8 +18,10 @@ class Minecraft(World): """ - mandatory: list of world adapters that are supported + A minecraft world. + Connects to a minecraft server and serves as a bridge between agent and server. See config.ini for configuration """ + supported_worldadapters = [ 'MinecraftWorldAdapter', 'MinecraftBraitenberg', @@ -30,29 +32,26 @@ class Minecraft(World): assets = { 'template': 'minecraft/minecraft.tpl', - 'js': 'minecraft/minecraft.js', + 'paperjs': 'minecraft/minecraft.js', 'x': 256, 'y': 256, } - # thread and spock only exist once - instances = { - 'spock': None, - 'thread': None - } - - def __init__(self, filename, world_type="Minecraft", name="", owner="", engine=None, uid=None, version=1): + def __init__(self, filename, world_type="Minecraft", name="", owner="", engine=None, uid=None, version=1, config={}): """ Initializes spock client including MicropsiPlugin, starts minecraft communication thread. """ from micropsi_core.runtime import add_signal_handler + self.instances = { + 'spock': None, + 'thread': None + } # do spock things first, then initialize micropsi world because the latter requires self.spockplugin - # register all necessary spock plugins # DefaultPlugins contain EventPlugin, NetPlugin, TimerPlugin, AuthPlugin, # ThreadPoolPlugin, StartPlugin and KeepalivePlugin - plugins = DefaultPlugins + plugins = spockplugins.DefaultPlugins plugins.append(ClientInfoPlugin) plugins.append(MovementPlugin) plugins.append(WorldPlugin) @@ -127,13 +126,20 @@ def get_config(self): def kill_minecraft_thread(self, *args): """ """ - self.spockplugin.event.kill() - self.instances['thread'].join() + if hasattr(self, 'spockplugin'): + self.spockplugin.event.kill() + self.instances['thread'].join() # self.spockplugin.threadpool.shutdown(False) + def __del__(self): + from importlib import reload + self.kill_minecraft_thread() + reload(spockplugins) + class Minecraft2D(Minecraft): - """ mandatory: list of world adapters that are supported""" + """ A Minecraft world that offers a 2d visualization of the agent's perspective""" + supported_worldadapters = [ 'MinecraftWorldAdapter', 'MinecraftGraphLocomotion' @@ -141,18 +147,28 @@ class Minecraft2D(Minecraft): assets = { 'template': 'minecraft/minecraft.tpl', - 'js': 'minecraft/minecraft2d.js', + 'paperjs': 'minecraft/minecraft2d.js', } def step(self): """ - Is called on every world step to advance the simulation. + Is called on every world step to advance the calculation. """ World.step(self) # a 2D perspective projection self.get_perspective_projection(self.spockplugin.clientinfo.position) + def get_world_view(self, step): + """ returns a list of world objects, and the current step of the calculation """ + return { + 'objects': self.get_world_objects(), + 'agents': self.data.get('agents', {}), + 'current_step': self.current_step, + 'projection': self.data['projection'], + 'assets': self.assets + } + def get_perspective_projection(self, agent_info): """ """ @@ -322,9 +338,6 @@ class MinecraftWorldAdapter(WorldAdapter): moves into one of the four cardinal directions ( until it dies ). """ - supported_datasources = ['x', 'y', 'z', 'yaw', 'pitch', 'groundtype'] - supported_datatargets = ['go_north', 'go_east', 'go_west', 'go_south', 'yaw', 'pitch'] - spawn_position = { 'x': -105, 'y': 63, @@ -334,6 +347,9 @@ class MinecraftWorldAdapter(WorldAdapter): def __init__(self, world, uid=None, **data): world.spockplugin.clientinfo.spawn_position = self.spawn_position WorldAdapter.__init__(self, world, uid=uid, **data) + self.datasources = dict((i, 0) for i in ['x', 'y', 'z', 'yaw', 'pitch', 'groundtype']) + self.datatargets = dict((i, 0) for i in ['go_north', 'go_east', 'go_west', 'go_south', 'yaw', 'pitch']) + def initialize_worldobject(self, data): @@ -345,7 +361,7 @@ def initialize_worldobject(self, data): self.datasources['groundtype'] = self.get_groundtype() def update_data_sources_and_targets(self): - """ Advances the agent's life on every cycle of the world simulation. """ + """ Advances the agent's life on every cycle of the world calculation. """ import random # translate data targets @@ -405,25 +421,27 @@ def get_groundtype(self): class MinecraftBraitenberg(WorldAdapter): - supported_datasources = [ - 'diamond_offset_x', - 'diamond_offset_z', - 'grd_stone', - 'grd_dirt', - 'grd_wood', - 'grd_coal', - 'obstcl_x+', - 'obstcl_x-', - 'obstcl_z+', - 'obstcl_z-' - ] - supported_datatargets = [ - 'move_x', - 'move_z' - ] + def __init__(self, world, uid=None, **data): + super().__init__(world, uid, **data) + self.datasources = { + 'diamond_offset_x': 0, + 'diamond_offset_z': 0, + 'grd_stone': 0, + 'grd_dirt': 0, + 'grd_wood': 0, + 'grd_coal': 0, + 'obstcl_x+': 0, + 'obstcl_x-': 0, + 'obstcl_z+': 0, + 'obstcl_z-': 0 + } + self.datatargets = { + 'move_x': 0, + 'move_z': 0 + } def update_data_sources_and_targets(self): - """called on every world simulation step to advance the life of the agent""" + """called on every world calculation step to advance the life of the agent""" # find diamond bot_x = self.world.spockplugin.clientinfo.position['x'] bot_y = self.world.spockplugin.clientinfo.position['y'] diff --git a/micropsi_core/world/minecraft/minecraft_graph_locomotion.py b/micropsi_core/world/minecraft/minecraft_graph_locomotion.py index a6a38c9f..75b6faa8 100644 --- a/micropsi_core/world/minecraft/minecraft_graph_locomotion.py +++ b/micropsi_core/world/minecraft/minecraft_graph_locomotion.py @@ -9,26 +9,6 @@ class MinecraftGraphLocomotion(WorldAdapter): - supported_datasources = [ - 'health', - 'food', - 'temperature', - 'food_supply', - 'fatigue', - 'awake', - 'current_location_index' - ] - - supported_datatargets = [ - 'take_exit_one', - 'take_exit_two', - 'take_exit_three', - 'pitch', - 'yaw', - 'eat', - 'sleep' - ] - loco_node_template = { 'uid': "", 'name': "", @@ -168,16 +148,22 @@ class MinecraftGraphLocomotion(WorldAdapter): def __init__(self, world, uid=None, **data): super().__init__(world, uid, **data) - self.datatarget_feedback = {} - self.datatarget_history = {} - for key in self.datatargets: - self.datatarget_feedback[key] = 0 - self.datatarget_history[key] = 0 + self.datasources = { + 'health': 1, + 'food': 1, + 'temperature': 0.5, + 'food_supply': 0, + 'fatigue': 0, + 'awake': 1, + 'current_location_index': 0 + } - self.datasources['health'] = 1 - self.datasources['food'] = 1 - self.datasources['temperature'] = 0.5 - self.datasources['awake'] = 1 + targets = ['take_exit_one', 'take_exit_two', 'take_exit_three', 'pitch', 'yaw', 'eat', 'sleep'] + self.datatarget_history = {} + for t in targets: + self.datatargets[t] = 0 + self.datatarget_feedback[t] = 0 + self.datatarget_history[t] = 0 # a collection of conditions to check on every update(..), eg., for action feedback self.waiting_list = [] @@ -218,7 +204,7 @@ def set_datasources(self, event, data): self.datasources['food'] = self.spockplugin.clientinfo.health['food'] / 20 def update_data_sources_and_targets(self): - """called on every world simulation step to advance the life of the agent""" + """called on every world calculation step to advance the life of the agent""" self.datasources['awake'] = 0 if self.sleeping else 1 diff --git a/micropsi_core/world/minecraft/minecraft_histogram_vision.py b/micropsi_core/world/minecraft/minecraft_histogram_vision.py index e3ed3a6b..0d110739 100644 --- a/micropsi_core/world/minecraft/minecraft_histogram_vision.py +++ b/micropsi_core/world/minecraft/minecraft_histogram_vision.py @@ -6,32 +6,6 @@ class MinecraftHistogramVision(MinecraftGraphLocomotion, MinecraftProjectionMixin): - supported_datasources = MinecraftGraphLocomotion.supported_datasources + [ - 'fov_x', # fovea sensors receive their input from the fovea actors - 'fov_y', - 'fov_hist__-01', # these names must be the most commonly observed block types - 'fov_hist__000', - 'fov_hist__001', - 'fov_hist__002', - 'fov_hist__003', - 'fov_hist__004', - 'fov_hist__009', - 'fov_hist__012', - 'fov_hist__017', - 'fov_hist__018', - 'fov_hist__020', - 'fov_hist__026', - 'fov_hist__031', - 'fov_hist__064', - 'fov_hist__106', - ] - - supported_datatargets = MinecraftGraphLocomotion.supported_datatargets + [ - 'orientation', - 'fov_x', - 'fov_y' - ] - # specs for vision /fovea # focal length larger 0 means zoom in, smaller 0 means zoom out # ( small values of focal length distort the image if things are close ) @@ -56,13 +30,40 @@ class MinecraftHistogramVision(MinecraftGraphLocomotion, MinecraftProjectionMixi def __init__(self, world, uid=None, **data): super().__init__(world, uid, **data) + self.datasources.update({ + 'fov_x': 0, # fovea sensors receive their input from the fovea actors + 'fov_y': 0, + 'fov_hist__-01': 0, # these names must be the most commonly observed block types + 'fov_hist__000': 0, + 'fov_hist__001': 0, + 'fov_hist__002': 0, + 'fov_hist__003': 0, + 'fov_hist__004': 0, + 'fov_hist__009': 0, + 'fov_hist__012': 0, + 'fov_hist__017': 0, + 'fov_hist__018': 0, + 'fov_hist__020': 0, + 'fov_hist__026': 0, + 'fov_hist__031': 0, + 'fov_hist__064': 0, + 'fov_hist__106': 0, + }) + + targets = { + 'orientation': 0, + 'fov_x': 0, + 'fov_y': 0 + } + + self.datatargets.update(targets) + self.datatarget_feedback.update(targets) # add datasources for fovea for i in range(self.num_fov): for j in range(self.num_fov): name = "fov__%02d_%02d" % (i, j) self.datasources[name] = 0. - MinecraftHistogramVision.supported_datasources.append(name) self.simulated_vision = False if 'simulate_vision' in cfg['minecraft']: @@ -83,7 +84,7 @@ def __init__(self, world, uid=None, **data): self.record_file = open(cfg['minecraft']['record_vision'], 'a') def update_data_sources_and_targets(self): - """called on every world simulation step to advance the life of the agent""" + """called on every world calculation step to advance the life of the agent""" if self.waiting_for_spock: super().update_data_sources_and_targets() diff --git a/micropsi_core/world/minecraft/minecraft_vision.py b/micropsi_core/world/minecraft/minecraft_vision.py index 51a98f71..b6052c77 100644 --- a/micropsi_core/world/minecraft/minecraft_vision.py +++ b/micropsi_core/world/minecraft/minecraft_vision.py @@ -47,14 +47,12 @@ def __init__(self, world, uid=None, **data): for j in range(self.len_y): name = "fov__%02d_%02d" % (i, j) self.datasources[name] = 0. - self.supported_datasources.append(name) # add datasources for fovea position sensors aka fov_pos__*_* for x in range(self.tiling_x): for y in range(self.tiling_y): name = "fov_pos__%02d_%02d" % (y, x) self.datasources[name] = 0. - self.supported_datasources.append(name) # add fovea actors to datatargets, datatarget_feedback, datatarget_history, and actions for x in range(self.tiling_x): @@ -63,7 +61,6 @@ def __init__(self, world, uid=None, **data): self.datatargets[name] = 0. self.datatarget_feedback[name] = 0. self.datatarget_history[name] = 0. - self.supported_datatargets.append(name) self.actions.append(name) self.simulated_vision = False @@ -87,7 +84,7 @@ def __init__(self, world, uid=None, **data): self.visual_field = {} def update_data_sources_and_targets(self): - """called on every world simulation step to advance the life of the agent""" + """called on every world calculation step to advance the life of the agent""" # first thing when spock initialization is done, determine current loco node if self.simulated_vision: diff --git a/micropsi_core/world/timeseries/timeseries.py b/micropsi_core/world/timeseries/timeseries.py new file mode 100644 index 00000000..f1fc158a --- /dev/null +++ b/micropsi_core/world/timeseries/timeseries.py @@ -0,0 +1,198 @@ +""" +Worlds and bodies for agents whose habitats are ordered sequences of vectors. +""" +import os +from configuration import config as cfg +from micropsi_core.world.world import World +from micropsi_core.world.worldadapter import WorldAdapter, ArrayWorldAdapter +import numpy as np +from datetime import datetime + + +class TimeSeries(World): + """ A world that cycles through a fixed time series loaded from a file. + The file should be a numpy archive with the following fields: + 'data': numpy array of shape (nr of ids) x (nr of timestamps) + 'timestamps', a list of timestamps - the legend for the data's second axis + 'ids': a list of IDs - the legend for data's first axis. + """ + supported_worldadapters = ['TimeSeriesRunner'] + + assets = { + 'js': "timeseries/timeseries.js", + 'template': 'timeseries/timeseries.tpl' + } + + def __init__(self, filename, world_type="TimeSeries", name="", owner="", engine=None, uid=None, version=1, config={}): + World.__init__(self, filename, world_type=world_type, name=name, owner=owner, uid=uid, version=version, config=config) + + self.data['assets'] = self.assets + + filename = config.get('time_series_data_file', "timeseries.npz") + if os.path.isabs(filename): + path = filename + else: + path = os.path.join(cfg['micropsi2']['data_directory'], filename) + self.logger.info("loading timeseries from %s for world %s" % (path, uid)) + + self.realtime_per_entry = int(config['realtime_per_entry']) + self.last_realtime_step = datetime.utcnow().timestamp() * 1000 + + try: + with np.load(path) as f: + self.timeseries = f['data'] + self.ids = f['ids'] + self.timestamps = f['timestamps'] + except IOError as error: + self.logger.error("Could not load data file %s, error was: %s" % (path, str(error))) + self.ids = [0] + self.timeseries[[0, 0, 0]] + self.timestamps = [0] + self.len_ts = 1 + return + + # todo use the new configurable world options. + dummydata = config['dummy_data'] == "True" + z_transform = config['z_transform'] == "True" + clip_and_scale = config['clip_and_scale'] == "True" + sigmoid = config['sigmoid'] == "True" + self.shuffle = config['shuffle'] == "True" + + if clip_and_scale and sigmoid: + self.logger.warn("clip_and_scale and sigmoid cannot both be configured, choosing sigmoid") + clip_and_scale = False + + def sigm(X): + """ sigmoid that avoids float overflows for very small inputs. + expects a numpy float array. + """ + cutoff = np.log(np.finfo(X.dtype).max) - 1 + X[np.nan_to_num(X) <= -cutoff] = -cutoff + return 1. / (1. + np.exp(-X)) + + if (z_transform or clip_and_scale or sigmoid) and not dummydata: + data_z = np.empty_like(self.timeseries) + data_z[:] = np.nan + pstds = [] + for i, row in enumerate(self.timeseries): + if not np.all(np.isnan(row)): + std = np.sqrt(np.nanvar(row)) + if std > 0: + if not clip_and_scale: + row_z = (row - np.nanmean(row)) / std + if clip_and_scale: + row_z = row - np.nanmean(row) + pstd = std * 4 + row_z[np.nan_to_num(row_z) > pstd] = pstd + row_z[np.nan_to_num(row_z) < -pstd] = -pstd + row_z = ((row_z / pstd) + 1) * 0.5 + data_z[i,:] = row_z + self.timeseries = data_z if not sigmoid else sigm(data_z) + + if dummydata: + self.logger.warn("! Using dummy data") + n_ids = self.timeseries.shape[0] + self.timeseries = np.tile(np.random.rand(n_ids,1),(1,10)) + + self.len_ts = self.timeseries.shape[1] + + # todo: option to use only a subset of the data (e.g. for training/test) + + def step(self): + now = datetime.utcnow().timestamp() * 1000 + if now - self.realtime_per_entry > self.last_realtime_step: + self.current_step += 1 + for uid in self.agents: + with self.agents[uid].datasource_lock: + self.agents[uid].update() + self.last_realtime_step = now + + @property + def state(self): + t = (self.current_step - 1) % self.len_ts + if self.shuffle: + if t == 0: + idxs = np.arange(self.len_ts) + self.permutation = np.random.permutation(idxs) + t = self.permutation[t] + return self.timeseries[:, t] + + @staticmethod + def get_config_options(): + """ Returns a list of configuration-options for this world. + Expected format: + [{ + 'name': 'param1', + 'description': 'this is just an example', + 'options': ['value1', 'value2'], + 'default': 'value1' + }] + description, options and default are optional settings + """ + return [ + {'name': 'time_series_data_file', + 'description': 'The data file with the time series', + 'default': 'timeseries.npz'}, + {'name': 'shuffle', + 'description': 'Randomize order of presentation', + 'default': 'True', + 'options': ["True", "False"]}, + {'name': 'z_transform', + 'description': 'For each ID, center on mean & normalize by standard deviation', + 'default': 'False', + 'options': ["True", "False"]}, + {'name': 'clip_and_scale', + 'description': 'For each ID, center on mean & clip to 4 standard deviations and rescale to [0,1].', + 'default': 'False', + 'options': ["True", "False"]}, + {'name': 'sigmoid', + 'description': 'For each ID, z-transform and apply a sigmoid activation function', + 'default': 'False', + 'options': ["True", "False"]}, + {'name': 'realtime_per_entry', + 'description': 'Present each pattern from the data for this number of milliseconds', + 'default': '0'}, + {'name': 'dummy_data', + 'description': 'Present the same random pattern in each step (instead of the actual time series data)', + 'default': 'False', + 'options': ["True", "False"]} + ] + + def set_user_data(self, data): + """ Allow the user to set the step of this world""" + if 'step' in data: + self.last_realtime_step = datetime.utcnow().timestamp() * 1000 + self.current_step = data['step'] + for uid in self.agents: + with self.agents[uid].datasource_lock: + self.agents[uid].update() + + def get_world_view(self, step): + return { + 'first_timestamp': self.timestamps[0].isoformat(), + 'last_timestamp': self.timestamps[-1].isoformat(), + 'total_timestamps': len(self.timestamps), + 'current_timestamp': self.timestamps[self.current_step].isoformat(), + 'current_step': self.current_step, + } + + +class TimeSeriesRunner(ArrayWorldAdapter): + + def __init__(self, world, uid=None, **data): + super().__init__(world, uid, **data) + + self.available_datatargets = [] + self.available_datasources = ["update"] + + for idx, ID in enumerate(self.world.ids): + self.available_datasources.append(str(ID)) + + def get_available_datasources(self): + return self.available_datasources + + def get_available_datatargets(self): + return self.available_datatargets + + def update_data_sources_and_targets(self): + self.datasource_values = self.world.state \ No newline at end of file diff --git a/micropsi_core/world/world.py b/micropsi_core/world/world.py index 6517dbdd..39503e3d 100644 --- a/micropsi_core/world/world.py +++ b/micropsi_core/world/world.py @@ -65,10 +65,24 @@ def is_active(self): def is_active(self, is_active): self.data['is_active'] = is_active + @staticmethod + def get_config_options(): + """ Returns a list of configuration-options for this world. + Expected format: + [{ + 'name': 'param1', + 'description': 'this is just an example', + 'options': ['value1', 'value2'], + 'default': 'value1' + }] + description, options and default are optional settings + """ + return [] + supported_worldadapters = ['Default'] - def __init__(self, filename, world_type="", name="", owner="", uid=None, engine=None, version=WORLD_VERSION): - """Create a new MicroPsi simulation environment. + def __init__(self, filename, world_type="", name="", owner="", uid=None, engine=None, version=WORLD_VERSION, config={}): + """Create a new MicroPsi world environment. Arguments: filename: the path and filename of the world data @@ -84,7 +98,8 @@ def __init__(self, filename, world_type="", name="", owner="", uid=None, engine= "version": WORLD_VERSION, # used to check compatibility of the world data "objects": {}, "agents": {}, - "current_step": 0 + "current_step": 0, + "config": config } folder = self.__module__.split('.') @@ -105,7 +120,7 @@ def __init__(self, filename, world_type="", name="", owner="", uid=None, engine= self.agents = {} self.objects = {} - #self.the_image = None + # self.the_image = None self.load() @@ -172,7 +187,7 @@ def step(self): del self.agents[uid] def get_world_view(self, step): - """ returns a list of world objects, and the current step of the simulation """ + """ returns a list of world objects, and the current step of the calculation """ return { 'objects': self.get_world_objects(), 'agents': self.data.get('agents', {}), @@ -219,10 +234,10 @@ def get_world_objects(self, type=None): objects[uid] = obj return objects - def register_nodenet(self, worldadapter, nodenet): + def register_nodenet(self, worldadapter, nodenet_uid, nodenet_name=None): """Attempts to register a nodenet at this world. - Returns True, nodenet_uid if successful, + Returns True, spawned_agent_instance if successful, Returns False, error_message if not successful The methods checks if an existing worldadapterish object without a bound nodenet exists, and if not, @@ -232,42 +247,41 @@ def register_nodenet(self, worldadapter, nodenet): We don't do it the other way around, because the soulless agent body may have been loaded as part of the world definition itself. """ - if nodenet.uid in self.agents: - if self.agents[nodenet.uid].__class__.__name__ == worldadapter: - return True, nodenet.uid + if nodenet_uid in self.agents: + if self.agents[nodenet_uid].__class__.__name__ == worldadapter: + return True, self.agents[nodenet_uid] else: return False, "Nodenet agent already exists in this world, but has the wrong type" - return self.spawn_agent(worldadapter, nodenet, name=nodenet.name) + return self.spawn_agent(worldadapter, nodenet_uid, nodenet_name=nodenet_name) - def unregister_nodenet(self, nodenet): + def unregister_nodenet(self, nodenet_uid): """Removes the connection between a nodenet and its incarnation in this world; may remove the corresponding agent object """ - if nodenet.uid in self.agents: + if nodenet_uid in self.agents: # stop corresponding nodenet - micropsi_core.runtime.stop_nodenetrunner(nodenet.uid) + micropsi_core.runtime.stop_nodenetrunner(nodenet_uid) + del self.agents[nodenet_uid] + if nodenet_uid in self.data['agents']: + del self.data['agents'][nodenet_uid] - # remove agent - nodenet.worldadapter_instance = None - del self.agents[nodenet.uid] - if nodenet.uid in self.data['agents']: - del self.data['agents'][nodenet.uid] - - def spawn_agent(self, worldadapter_name, nodenet, **options): + def spawn_agent(self, worldadapter_name, nodenet_uid, **options): """Creates an agent object, - Returns True, nodenet_uid if successful, + Returns True, spawned_agent_instance if successful, Returns False, error_message if not successful """ if worldadapter_name in self.supported_worldadapters: - self.agents[nodenet.uid] = self.supported_worldadapters[worldadapter_name](self, uid=nodenet.uid, **options) - nodenet.worldadapter_instance = self.agents[nodenet.uid] - return True, nodenet.uid + self.agents[nodenet_uid] = self.supported_worldadapters[worldadapter_name]( + self, + uid=nodenet_uid, + name=options.get('nodenet_name', worldadapter_name), + **options) + return True, self.agents[nodenet_uid] else: self.logger.error("World %s does not support Worldadapter %s" % (self.name, worldadapter_name)) return False, "World %s does not support Worldadapter %s" % (self.name, worldadapter_name) - def set_object_properties(self, uid, position=None, orientation=None, name=None, parameters=None): """set attributes of the world object 'uid'; only supplied attributes will be changed. @@ -307,6 +321,15 @@ def set_agent_properties(self, uid, position=None, orientation=None, name=None, return True return False + def set_user_data(self, data): + """ Sets some data from the user. Implement this in your worldclass to allow + the user to set certain properties of this world""" + pass + + def __del__(self): + """Empty destructor""" + pass + # imports of individual world types: try: @@ -327,3 +350,12 @@ def set_agent_properties(self, uid, position=None, orientation=None, name=None, pass else: sys.stdout.write("Could not import minecraft world.\nError: %s \n\n" % e.msg) + +try: + from micropsi_core.world.timeseries import timeseries +except ImportError as e: + if e.msg == "No module named 'numpy'": + # ignore silently + pass + else: + sys.stdout.write("Could not import timeseries world.\nError: %s \n\n" % e.msg) diff --git a/micropsi_core/world/worldadapter.py b/micropsi_core/world/worldadapter.py index 147a302b..073e56f6 100644 --- a/micropsi_core/world/worldadapter.py +++ b/micropsi_core/world/worldadapter.py @@ -11,7 +11,7 @@ between actions and sensory confirmation (among other things). During the initialization of the agent type, it might want to register an agent body object within the -world simulation (for robotic bodies, the equivalent might consist in powering up/setup/boot operations. +world environment (for robotic bodies, the equivalent might consist in powering up/setup/boot operations. Thus, agent types should be instantiated by the world, inherit from a moving object class of some kind and treated as parts of the world. """ @@ -21,29 +21,22 @@ from threading import Lock from micropsi_core.world.worldobject import WorldObject +from abc import ABCMeta, abstractmethod -class WorldAdapter(WorldObject): +class WorldAdapter(WorldObject, metaclass=ABCMeta): """Transmits data between agent and environment. The agent writes activation values into data targets, and receives it from data sources. The world adapter takes care of translating between the world and these values at each world cycle. """ - supported_datasources = [] - supported_datatargets = [] def __init__(self, world, uid=None, **data): self.datasources = {} - for key in self.supported_datasources: - self.datasources[key] = 0 self.datatargets = {} - for key in self.supported_datatargets: - self.datatargets[key] = 0 self.datatarget_feedback = {} self.datasource_lock = Lock() - self.datasource_snapshots = {} WorldObject.__init__(self, world, category='agents', uid=uid, **data) - self.snapshot() def initialize_worldobject(self, data): for key in self.datasources: @@ -54,33 +47,40 @@ def initialize_worldobject(self, data): self.datatargets[key] = data['datatargets'][key] self.datatarget_feedback[key] = 0 - # agent facing methods: - def snapshot(self): - """called by the agent every netstep to create a consistent set of sensory input""" - with self.datasource_lock: - self.datasource_snapshots = self.datasources.copy() - def get_available_datasources(self): """returns a list of identifiers of the datasources available for this world adapter""" - return list(self.datasources.keys()) + return sorted(list(self.datasources.keys())) def get_available_datatargets(self): """returns a list of identifiers of the datatargets available for this world adapter""" - return list(self.datatargets.keys()) + return sorted(list(self.datatargets.keys())) - def get_datasource(self, key): + def get_datasource_value(self, key): """allows the agent to read a value from a datasource""" - return self.datasource_snapshots.get(key) + return self.datasources.get(key) + + def get_datasource_values(self): + """allows the agent to read all datasource values""" + return [float(self.datasources[x]) for x in self.get_available_datasources()] def add_to_datatarget(self, key, value): """allows the agent to write a value to a datatarget""" if key in self.datatargets: self.datatargets[key] += value - def get_datatarget_feedback(self, key): + def set_datatarget_values(self, values): + """allows the agent to write a list of value to the datatargets""" + for i, key in enumerate(self.get_available_datatargets()): + self.datatargets[key] = values[i] + + def get_datatarget_feedback_value(self, key): """get feedback whether the actor-induced action succeeded""" return self.datatarget_feedback.get(key, 0) + def get_datatarget_feedback_values(self): + """allows the agent to read all datasource values""" + return [float(self.datatarget_feedback[x]) for x in self.get_available_datatargets()] + def set_datatarget_feedback(self, key, value): """set feedback for the given datatarget""" self.datatarget_feedback[key] = value @@ -92,9 +92,10 @@ def update(self): def reset_datatargets(self): """ resets (zeros) the datatargets """ - for datatarget in self.supported_datatargets: + for datatarget in self.datatargets: self.datatargets[datatarget] = 0 + @abstractmethod def update_data_sources_and_targets(self): """must be implemented by concrete world adapters to read datatargets and fill datasources""" pass @@ -105,9 +106,15 @@ def is_alive(self): class Default(WorldAdapter): - - supported_datasources = ['static_on', 'random', 'static_off'] - supported_datatargets = ['echo'] + """ + A default Worldadapter, that provides example-datasources and -targets + """ + def __init__(self, world, uid=None, **data): + super().__init__(world, uid=uid, **data) + self.datasources = dict((s, 0) for s in ['static_on', 'random', 'static_off']) + self.datatargets = {'echo': 0} + self.datatarget_feedback = {'echo': 0} + self.update_data_sources_and_targets() def update_data_sources_and_targets(self): import random @@ -115,3 +122,83 @@ def update_data_sources_and_targets(self): self.datatarget_feedback['echo'] = self.datatargets['echo'] self.datasources['static_on'] = 1 self.datasources['random'] = random.uniform(0, 1) + + +class ArrayWorldAdapter(WorldAdapter, metaclass=ABCMeta): + """ + The ArrayWorldAdapter base class allows to avoid python dictionaries and loops for transmitting values + to nodenet engines. + Engines that bulk-query values, such as the theano_engine, will be faster. + Numpy arrays can be passed directly into the engine. + """ + def __init__(self, world, uid=None, **data): + WorldAdapter.__init__(self, world, duid=uid) + self.datasource_values = [] + self.datatarget_values = [] + self.datatarget_feedback_values = [] + + def get_datasource_value(self, key): + """allows the agent to read a value from a datasource""" + index = self.get_available_datasources().index(key) + return self.datasource_values[index] + + def get_datasource_values(self): + """allows the agent to read all datasource values""" + return self.datasource_values + + def add_to_datatarget(self, key, value): + """allows the agent to write a value to a datatarget""" + index = self.get_available_datasources().index(key) + self.datatarget_values[index] += value + + def get_datatarget_feedback_value(self, key): + """get feedback whether the actor-induced action succeeded""" + index = self.get_available_datatargets().index(key) + return self.datatarget_feedback_values[index] + + def get_datatarget_feedback_values(self): + """allows the agent to read all datasource values""" + return self.datatarget_feedback_values + + def set_datatarget_feedback(self, key, value): + """set feedback for the given datatarget""" + index = self.get_available_datatargets().index(key) + self.datatarget_feedback_values[index] = value + + def set_datatarget_values(self, values): + """allows the agent to write a list of value to the datatargets""" + self.datatarget_values = values + + def reset_datatargets(self): + """ resets (zeros) the datatargets """ + pass + + @abstractmethod + def get_available_datasources(self): + """ + must be implemented by the concrete world adapater and return a list of datasource name strings, + in the same order as values returned by get_datasource_values() + """ + pass + + @abstractmethod + def get_available_datatargets(self): + """ + must be implemented by the concrete world adapater and return a list of datatarget name strings, + in the same order as values returned by get_datatarget_feedback_values() + """ + pass + + @abstractmethod + def update_data_sources_and_targets(self): + """ + must be implemented by concrete world adapters to read and set the following arrays: + datasource_values + datatarget_values + datatarget_feedback_values + + Arrays sizes need to be equal to the corresponding responses of get_available_datasources() and + get_available_datatargets(). + Values of the superclass' dict objects will be bypassed and ignored. + """ + pass diff --git a/micropsi_server/micropsi_app.py b/micropsi_server/micropsi_app.py index 5fc6e815..10c22f60 100755 --- a/micropsi_server/micropsi_app.py +++ b/micropsi_server/micropsi_app.py @@ -33,6 +33,8 @@ VERSION = cfg['micropsi2']['version'] APPTITLE = cfg['micropsi2']['apptitle'] +INCLUDE_CONSOLE = cfg['micropsi2']['host'] == 'localhost' + APP_PATH = os.path.dirname(__file__) micropsi_app = Bottle() @@ -170,13 +172,13 @@ def server_static(filepath): def index(): first_user = usermanager.users == {} user_id, permissions, token = get_request_data() - return _add_world_list("viewer", mode="all", first_user=first_user, logging_levels=runtime.get_logging_levels(), version=VERSION, user_id=user_id, permissions=permissions) + return _add_world_list("viewer", mode="all", first_user=first_user, logging_levels=runtime.get_logging_levels(), version=VERSION, user_id=user_id, permissions=permissions, console=INCLUDE_CONSOLE) @micropsi_app.route("/nodenet") def nodenet(): user_id, permissions, token = get_request_data() - return template("viewer", mode="nodenet", version=VERSION, user_id=user_id, permissions=permissions) + return template("viewer", mode="nodenet", version=VERSION, user_id=user_id, permissions=permissions, console=INCLUDE_CONSOLE) @micropsi_app.route("/monitors") @@ -485,7 +487,7 @@ def select_nodenet_from_console(nodenet_uid): result, uid = runtime.load_nodenet(nodenet_uid) if not result: return template("error", msg="Could not select nodenet") - response.set_cookie("selected_nodenet", nodenet_uid, path="/") + response.set_cookie("selected_nodenet", nodenet_uid + "/", path="/") redirect("/") @@ -578,7 +580,7 @@ def write_nodenet(): user_id, permissions, token = get_request_data() params = dict((key, request.forms.getunicode(key)) for key in request.forms) if "manage nodenets" in permissions: - result, nodenet_uid = runtime.new_nodenet(params['nn_name'], engine=params['nn_engine'], worldadapter=params['nn_worldadapter'], template=params.get('nn_template'), owner=user_id, world_uid=params.get('nn_world')) + result, nodenet_uid = runtime.new_nodenet(params['nn_name'], engine=params['nn_engine'], worldadapter=params['nn_worldadapter'], template=params.get('nn_template'), owner=user_id, world_uid=params.get('nn_world'), use_modulators=params.get('nn_modulators', False)) if result: return dict(status="success", msg="Nodenet created", nodenet_uid=nodenet_uid) else: @@ -617,7 +619,9 @@ def edit_world_form(): token = request.get_cookie("token") id = request.params.get('id', None) title = 'Edit World' if id is not None else 'New World' - return template("world_form.tpl", title=title, worldtypes=runtime.get_available_world_types(), + worldtypes = runtime.get_available_world_types() + return template("world_form.tpl", title=title, + worldtypes=worldtypes, version=VERSION, user_id=usermanager.get_user_id_for_session_token(token), permissions=usermanager.get_permissions_for_session_token(token)) @@ -626,9 +630,14 @@ def edit_world_form(): @micropsi_app.route("/world/edit", method="POST") def edit_world(): params = dict((key, request.forms.getunicode(key)) for key in request.forms) + type = params['world_type'] + config = {} + for p in params: + if p.startswith(type + '_'): + config[p[len(type) + 1:]] = params[p] user_id, permissions, token = get_request_data() if "manage worlds" in permissions: - result, uid = runtime.new_world(params['world_name'], params['world_type'], user_id) + result, uid = runtime.new_world(params['world_name'], params['world_type'], user_id, config=config) if result: return dict(status="success", msg="World created", world_uid=uid) else: @@ -699,31 +708,24 @@ def show_dashboard(): # ## ## ## ## ### ## # ## ###### ## ## ## # ## # ## ## ## ## ## ### -# ## ##### ##### ## ## +# ## ##### ##### ## ## JSON # # ################################################################# -@rpc("select_nodenet") -def select_nodenet(nodenet_uid): - return runtime.load_nodenet(nodenet_uid) +@rpc("get_nodenet_metadata") +def get_nodenet_metadata(nodenet_uid, nodespace='Root', include_links=True): + return True, runtime.get_nodenet_metadata(nodenet_uid) -@rpc("load_nodenet") -def load_nodenet(nodenet_uid, nodespace='Root', include_links=True): - result, uid = runtime.load_nodenet(nodenet_uid) - if result: - data = runtime.get_nodenet_data(nodenet_uid, nodespace, -1, include_links) - data['nodetypes'] = runtime.get_available_node_types(nodenet_uid) - data['recipes'] = runtime.get_available_recipes() - return True, data - else: - return False, uid +@rpc("get_nodes") +def get_nodes(nodenet_uid, nodespaces=[], include_links=True): + return True, runtime.get_nodes(nodenet_uid, nodespaces, include_links) @rpc("new_nodenet") -def new_nodenet(name, owner=None, engine='dict_engine', template=None, worldadapter=None, world_uid=None): +def new_nodenet(name, owner=None, engine='dict_engine', template=None, worldadapter=None, world_uid=None, use_modulators=None): if owner is None: owner, _, _ = get_request_data() return runtime.new_nodenet( @@ -732,12 +734,18 @@ def new_nodenet(name, owner=None, engine='dict_engine', template=None, worldadap worldadapter=worldadapter, template=template, owner=owner, - world_uid=world_uid) + world_uid=world_uid, + use_modulators=use_modulators) + + +@rpc("get_calculation_state") +def get_calculation_state(nodenet_uid, nodenet=None, nodenet_diff=None, world=None, monitors=None, dashboard=None): + return runtime.get_calculation_state(nodenet_uid, nodenet=nodenet, nodenet_diff=nodenet_diff, world=world, monitors=monitors, dashboard=dashboard) -@rpc("get_current_state") -def get_current_state(nodenet_uid, nodenet=None, nodenet_diff=None, world=None, monitors=None, dashboard=None): - return runtime.get_current_state(nodenet_uid, nodenet=nodenet, nodenet_diff=nodenet_diff, world=world, monitors=monitors, dashboard=dashboard) +@rpc("get_nodenet_changes") +def get_nodenet_changes(nodenet_uid, nodespaces=[], since_step=0): + return runtime.get_nodenet_changes(nodenet_uid, nodespaces=nodespaces, since_step=since_step) @rpc("generate_uid") @@ -745,11 +753,29 @@ def generate_uid(): return True, tools.generate_uid() +@rpc("create_auth_token") +def create_auth_token(user, password, remember=True): + # log in new user + token = usermanager.start_session(user, password, remember) + if token: + return True, token + else: + if user in usermanager.users: + return False, "User name and password do not match" + else: + return False, "User unknown" + +@rpc("invalidate_auth_token") +def invalidate_auth_token(token): + usermanager.end_session(token) + return True + + @rpc("get_available_nodenets") -def get_available_nodenets(user_id): - if user_id not in usermanager.users: +def get_available_nodenets(user_id=None): + if user_id and user_id not in usermanager.users: return False, 'User not found' - return True, runtime.get_available_nodenets(user_id) + return True, runtime.get_available_nodenets(owner=user_id) @rpc("delete_nodenet", permission_required="manage nodenets") @@ -774,8 +800,8 @@ def set_node_activation(nodenet_uid, node_uid, activation): return runtime.set_node_activation(nodenet_uid, node_uid, activation) -@rpc("start_simulation", permission_required="manage nodenets") -def start_simulation(nodenet_uid): +@rpc("start_calculation", permission_required="manage nodenets") +def start_calculation(nodenet_uid): return runtime.start_nodenetrunner(nodenet_uid) @@ -805,21 +831,26 @@ def get_runner_properties(): return True, runtime.get_runner_properties() -@rpc("get_is_simulation_running") -def get_is_simulation_running(nodenet_uid): +@rpc("get_is_calculation_running") +def get_is_calculation_running(nodenet_uid): return True, runtime.get_is_nodenet_running(nodenet_uid) -@rpc("stop_simulation", permission_required="manage nodenets") -def stop_simulation(nodenet_uid): +@rpc("stop_calculation", permission_required="manage nodenets") +def stop_calculation(nodenet_uid): return runtime.stop_nodenetrunner(nodenet_uid) -@rpc("step_simulation", permission_required="manage nodenets") -def step_simulation(nodenet_uid): +@rpc("step_calculation", permission_required="manage nodenets") +def step_calculation(nodenet_uid): return True, runtime.step_nodenet(nodenet_uid) +@rpc("revert_calculation", permission_required="manage nodenets") +def revert_calculation(nodenet_uid): + return runtime.revert_nodenet(nodenet_uid, True) + + @rpc("revert_nodenet", permission_required="manage nodenets") def revert_nodenet(nodenet_uid): return runtime.revert_nodenet(nodenet_uid) @@ -847,6 +878,12 @@ def merge_nodenet_rpc(nodenet_uid, nodenet_data): # World + +@rpc("step_nodenets_in_world") +def step_nodenets_in_world(world_uid, nodenet_uid=None, steps=1): + return runtime.step_nodenets_in_world(world_uid, nodenet_uid=nodenet_uid, steps=steps) + + @rpc("get_available_worlds") def get_available_worlds(user_id=None): data = {} @@ -864,9 +901,9 @@ def get_world_properties(world_uid): @rpc("get_worldadapters") -def get_worldadapters(world_uid): +def get_worldadapters(world_uid, nodenet_uid=None): try: - return True, runtime.get_worldadapters(world_uid) + return True, runtime.get_worldadapters(world_uid, nodenet_uid=nodenet_uid) except KeyError: return False, 'World %s not found' % world_uid @@ -914,7 +951,7 @@ def new_world(world_name, world_type, owner=None): @rpc("get_available_world_types") def get_available_world_types(): - return True, runtime.get_available_world_types() + return True, sorted(runtime.get_available_world_types().keys()) @rpc("delete_world", permission_required="manage worlds") @@ -928,10 +965,15 @@ def get_world_view(world_uid, step): @rpc("set_world_properties", permission_required="manage worlds") -def set_world_data(world_uid, world_name=None, owner=None): +def set_world_properties(world_uid, world_name=None, owner=None): return runtime.set_world_properties(world_uid, world_name, owner) +@rpc("set_world_data") +def set_world_data(world_uid, data): + return runtime.set_world_data(world_uid, data) + + @rpc("revert_world", permission_required="manage worlds") def revert_world(world_uid): return runtime.revert_world(world_uid) @@ -1016,19 +1058,24 @@ def get_nodespace_list(nodenet_uid): return True, runtime.get_nodespace_list(nodenet_uid) -@rpc("get_nodespace") -def get_nodespace(nodenet_uid, nodespace, step, include_links=True): - return True, runtime.get_nodenet_data(nodenet_uid, nodespace, step, include_links) - - @rpc("get_nodespace_activations") -def get_nodespace_activations(nodenet_uid, nodespace, last_call_step=-1): - return True, runtime.get_nodenet_activation_data(nodenet_uid, nodespace, last_call_step) +def get_nodespace_activations(nodenet_uid, nodespaces, last_call_step=-1): + return True, runtime.get_nodenet_activation_data(nodenet_uid, nodespaces, last_call_step) @rpc("get_nodespace_changes") -def get_nodespace_changes(nodenet_uid, nodespace_uid, since_step): - return runtime.get_nodespace_changes(nodenet_uid, nodespace_uid, since_step) +def get_nodespace_changes(nodenet_uid, nodespaces, since_step): + return runtime.get_nodespace_changes(nodenet_uid, nodespaces, since_step) + + +@rpc("get_nodespace_properties") +def get_nodespace_properties(nodenet_uid, nodespace_uid=None): + return True, runtime.get_nodespace_properties(nodenet_uid, nodespace_uid) + + +@rpc("set_nodespace_properties") +def set_nodespace_properties(nodenet_uid, nodespace_uid, properties): + return True, runtime.set_nodespace_properties(nodenet_uid, nodespace_uid, properties) @rpc("get_node") @@ -1067,8 +1114,8 @@ def delete_nodes(nodenet_uid, node_uids): @rpc("delete_nodespace", permission_required="manage nodenets") -def delete_nodespace(nodenet_uid, nodespace_uid): - return runtime.delete_nodespace(nodenet_uid, nodespace_uid) +def delete_nodespace(nodenet_uid, nodespace): + return runtime.delete_nodespace(nodenet_uid, nodespace) @rpc("align_nodes", permission_required="manage nodenets") @@ -1205,11 +1252,34 @@ def get_available_recipes(): return True, runtime.get_available_recipes() +@rpc("run_operation") +def run_operation(nodenet_uid, name, parameters, selection_uids): + return runtime.run_operation(nodenet_uid, name, parameters, selection_uids) + + +@rpc('get_available_operations') +def get_available_operations(): + return True, runtime.get_available_operations() + + @rpc('get_agent_dashboard') def get_agent_dashboard(nodenet_uid): return True, runtime.get_agent_dashboard(nodenet_uid) +@rpc("run_netapi_command", permission_required="manage nodenets") +def run_netapi_command(nodenet_uid, command): + if INCLUDE_CONSOLE: + return runtime.run_netapi_command(nodenet_uid, command) + else: + raise RuntimeError("Netapi console only available if serving to localhost only") + + +@rpc("get_netapi_signatures") +def get_netapi_autocomplete_data(nodenet_uid, name=None): + return True, runtime.get_netapi_autocomplete_data(nodenet_uid, name=None) + + # ----------------------------------------------------------------------------------------------- def main(host=None, port=None): @@ -1217,6 +1287,7 @@ def main(host=None, port=None): port = port or cfg['micropsi2']['port'] server = cfg['micropsi2']['server'] print("Starting App on Port " + str(port)) + runtime.initialize() run(micropsi_app, host=host, port=port, quiet=True, server=server) if __name__ == "__main__": diff --git a/micropsi_server/static/css/chosen-sprite.png b/micropsi_server/static/css/chosen-sprite.png new file mode 100644 index 00000000..c57da70b Binary files /dev/null and b/micropsi_server/static/css/chosen-sprite.png differ diff --git a/micropsi_server/static/css/chosen-sprite@2x.png b/micropsi_server/static/css/chosen-sprite@2x.png new file mode 100644 index 00000000..6b505452 Binary files /dev/null and b/micropsi_server/static/css/chosen-sprite@2x.png differ diff --git a/micropsi_server/static/css/chosen.min.css b/micropsi_server/static/css/chosen.min.css new file mode 100755 index 00000000..9484b570 --- /dev/null +++ b/micropsi_server/static/css/chosen.min.css @@ -0,0 +1,3 @@ +/* Chosen v1.5.0 | (c) 2011-2016 by Harvest | MIT License, https://github.com/harvesthq/chosen/blob/master/LICENSE.md */ + +.chosen-container{position:relative;display:inline-block;vertical-align:middle;font-size:13px;-webkit-user-select:none;-moz-user-select:none;user-select:none}.chosen-container *{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}.chosen-container .chosen-drop{position:absolute;top:100%;left:-9999px;z-index:1010;width:100%;border:1px solid #aaa;border-top:0;background:#fff;box-shadow:0 4px 5px rgba(0,0,0,.15)}.chosen-container.chosen-with-drop .chosen-drop{left:0}.chosen-container a{cursor:pointer}.chosen-container .search-choice .group-name,.chosen-container .chosen-single .group-name{margin-right:4px;overflow:hidden;white-space:nowrap;text-overflow:ellipsis;font-weight:400;color:#999}.chosen-container .search-choice .group-name:after,.chosen-container .chosen-single .group-name:after{content:":";padding-left:2px;vertical-align:top}.chosen-container-single .chosen-single{position:relative;display:block;overflow:hidden;padding:0 0 0 8px;height:25px;border:1px solid #aaa;border-radius:5px;background-color:#fff;background:-webkit-gradient(linear,50% 0,50% 100%,color-stop(20%,#fff),color-stop(50%,#f6f6f6),color-stop(52%,#eee),color-stop(100%,#f4f4f4));background:-webkit-linear-gradient(#fff 20%,#f6f6f6 50%,#eee 52%,#f4f4f4 100%);background:-moz-linear-gradient(#fff 20%,#f6f6f6 50%,#eee 52%,#f4f4f4 100%);background:-o-linear-gradient(#fff 20%,#f6f6f6 50%,#eee 52%,#f4f4f4 100%);background:linear-gradient(#fff 20%,#f6f6f6 50%,#eee 52%,#f4f4f4 100%);background-clip:padding-box;box-shadow:0 0 3px #fff inset,0 1px 1px rgba(0,0,0,.1);color:#444;text-decoration:none;white-space:nowrap;line-height:24px}.chosen-container-single .chosen-default{color:#999}.chosen-container-single .chosen-single span{display:block;overflow:hidden;margin-right:26px;text-overflow:ellipsis;white-space:nowrap}.chosen-container-single .chosen-single-with-deselect span{margin-right:38px}.chosen-container-single .chosen-single abbr{position:absolute;top:6px;right:26px;display:block;width:12px;height:12px;background:url(chosen-sprite.png) -42px 1px no-repeat;font-size:1px}.chosen-container-single .chosen-single abbr:hover{background-position:-42px -10px}.chosen-container-single.chosen-disabled .chosen-single abbr:hover{background-position:-42px -10px}.chosen-container-single .chosen-single div{position:absolute;top:0;right:0;display:block;width:18px;height:100%}.chosen-container-single .chosen-single div b{display:block;width:100%;height:100%;background:url(chosen-sprite.png) no-repeat 0 2px}.chosen-container-single .chosen-search{position:relative;z-index:1010;margin:0;padding:3px 4px;white-space:nowrap}.chosen-container-single .chosen-search input[type=text]{margin:1px 0;padding:4px 20px 4px 5px;width:100%;height:auto;outline:0;border:1px solid #aaa;background:#fff url(chosen-sprite.png) no-repeat 100% -20px;background:url(chosen-sprite.png) no-repeat 100% -20px;font-size:1em;font-family:sans-serif;line-height:normal;border-radius:0}.chosen-container-single .chosen-drop{margin-top:-1px;border-radius:0 0 4px 4px;background-clip:padding-box}.chosen-container-single.chosen-container-single-nosearch .chosen-search{position:absolute;left:-9999px}.chosen-container .chosen-results{color:#444;position:relative;overflow-x:hidden;overflow-y:auto;margin:0 4px 4px 0;padding:0 0 0 4px;max-height:240px;-webkit-overflow-scrolling:touch}.chosen-container .chosen-results li{display:none;margin:0;padding:5px 6px;list-style:none;line-height:15px;word-wrap:break-word;-webkit-touch-callout:none}.chosen-container .chosen-results li.active-result{display:list-item;cursor:pointer}.chosen-container .chosen-results li.disabled-result{display:list-item;color:#ccc;cursor:default}.chosen-container .chosen-results li.highlighted{background-color:#3875d7;background-image:-webkit-gradient(linear,50% 0,50% 100%,color-stop(20%,#3875d7),color-stop(90%,#2a62bc));background-image:-webkit-linear-gradient(#3875d7 20%,#2a62bc 90%);background-image:-moz-linear-gradient(#3875d7 20%,#2a62bc 90%);background-image:-o-linear-gradient(#3875d7 20%,#2a62bc 90%);background-image:linear-gradient(#3875d7 20%,#2a62bc 90%);color:#fff}.chosen-container .chosen-results li.no-results{color:#777;display:list-item;background:#f4f4f4}.chosen-container .chosen-results li.group-result{display:list-item;font-weight:700;cursor:default}.chosen-container .chosen-results li.group-option{padding-left:15px}.chosen-container .chosen-results li em{font-style:normal;text-decoration:underline}.chosen-container-multi .chosen-choices{position:relative;overflow:hidden;margin:0;padding:0 5px;width:100%;height:auto!important;height:1%;border:1px solid #aaa;background-color:#fff;background-image:-webkit-gradient(linear,50% 0,50% 100%,color-stop(1%,#eee),color-stop(15%,#fff));background-image:-webkit-linear-gradient(#eee 1%,#fff 15%);background-image:-moz-linear-gradient(#eee 1%,#fff 15%);background-image:-o-linear-gradient(#eee 1%,#fff 15%);background-image:linear-gradient(#eee 1%,#fff 15%);cursor:text}.chosen-container-multi .chosen-choices li{float:left;list-style:none}.chosen-container-multi .chosen-choices li.search-field{margin:0;padding:0;white-space:nowrap}.chosen-container-multi .chosen-choices li.search-field input[type=text]{margin:1px 0;padding:0;height:25px;outline:0;border:0!important;background:transparent!important;box-shadow:none;color:#999;font-size:100%;font-family:sans-serif;line-height:normal;border-radius:0}.chosen-container-multi .chosen-choices li.search-choice{position:relative;margin:3px 5px 3px 0;padding:3px 20px 3px 5px;border:1px solid #aaa;max-width:100%;border-radius:3px;background-color:#eee;background-image:-webkit-gradient(linear,50% 0,50% 100%,color-stop(20%,#f4f4f4),color-stop(50%,#f0f0f0),color-stop(52%,#e8e8e8),color-stop(100%,#eee));background-image:-webkit-linear-gradient(#f4f4f4 20%,#f0f0f0 50%,#e8e8e8 52%,#eee 100%);background-image:-moz-linear-gradient(#f4f4f4 20%,#f0f0f0 50%,#e8e8e8 52%,#eee 100%);background-image:-o-linear-gradient(#f4f4f4 20%,#f0f0f0 50%,#e8e8e8 52%,#eee 100%);background-image:linear-gradient(#f4f4f4 20%,#f0f0f0 50%,#e8e8e8 52%,#eee 100%);background-size:100% 19px;background-repeat:repeat-x;background-clip:padding-box;box-shadow:0 0 2px #fff inset,0 1px 0 rgba(0,0,0,.05);color:#333;line-height:13px;cursor:default}.chosen-container-multi .chosen-choices li.search-choice span{word-wrap:break-word}.chosen-container-multi .chosen-choices li.search-choice .search-choice-close{position:absolute;top:4px;right:3px;display:block;width:12px;height:12px;background:url(chosen-sprite.png) -42px 1px no-repeat;font-size:1px}.chosen-container-multi .chosen-choices li.search-choice .search-choice-close:hover{background-position:-42px -10px}.chosen-container-multi .chosen-choices li.search-choice-disabled{padding-right:5px;border:1px solid #ccc;background-color:#e4e4e4;background-image:-webkit-gradient(linear,50% 0,50% 100%,color-stop(20%,#f4f4f4),color-stop(50%,#f0f0f0),color-stop(52%,#e8e8e8),color-stop(100%,#eee));background-image:-webkit-linear-gradient(#f4f4f4 20%,#f0f0f0 50%,#e8e8e8 52%,#eee 100%);background-image:-moz-linear-gradient(#f4f4f4 20%,#f0f0f0 50%,#e8e8e8 52%,#eee 100%);background-image:-o-linear-gradient(#f4f4f4 20%,#f0f0f0 50%,#e8e8e8 52%,#eee 100%);background-image:linear-gradient(#f4f4f4 20%,#f0f0f0 50%,#e8e8e8 52%,#eee 100%);color:#666}.chosen-container-multi .chosen-choices li.search-choice-focus{background:#d4d4d4}.chosen-container-multi .chosen-choices li.search-choice-focus .search-choice-close{background-position:-42px -10px}.chosen-container-multi .chosen-results{margin:0;padding:0}.chosen-container-multi .chosen-drop .result-selected{display:list-item;color:#ccc;cursor:default}.chosen-container-active .chosen-single{border:1px solid #5897fb;box-shadow:0 0 5px rgba(0,0,0,.3)}.chosen-container-active.chosen-with-drop .chosen-single{border:1px solid #aaa;-moz-border-radius-bottomright:0;border-bottom-right-radius:0;-moz-border-radius-bottomleft:0;border-bottom-left-radius:0;background-image:-webkit-gradient(linear,50% 0,50% 100%,color-stop(20%,#eee),color-stop(80%,#fff));background-image:-webkit-linear-gradient(#eee 20%,#fff 80%);background-image:-moz-linear-gradient(#eee 20%,#fff 80%);background-image:-o-linear-gradient(#eee 20%,#fff 80%);background-image:linear-gradient(#eee 20%,#fff 80%);box-shadow:0 1px 0 #fff inset}.chosen-container-active.chosen-with-drop .chosen-single div{border-left:0;background:transparent}.chosen-container-active.chosen-with-drop .chosen-single div b{background-position:-18px 2px}.chosen-container-active .chosen-choices{border:1px solid #5897fb;box-shadow:0 0 5px rgba(0,0,0,.3)}.chosen-container-active .chosen-choices li.search-field input[type=text]{color:#222!important}.chosen-disabled{opacity:.5!important;cursor:default}.chosen-disabled .chosen-single{cursor:default}.chosen-disabled .chosen-choices .search-choice .search-choice-close{cursor:default}.chosen-rtl{text-align:right}.chosen-rtl .chosen-single{overflow:visible;padding:0 8px 0 0}.chosen-rtl .chosen-single span{margin-right:0;margin-left:26px;direction:rtl}.chosen-rtl .chosen-single-with-deselect span{margin-left:38px}.chosen-rtl .chosen-single div{right:auto;left:3px}.chosen-rtl .chosen-single abbr{right:auto;left:26px}.chosen-rtl .chosen-choices li{float:right}.chosen-rtl .chosen-choices li.search-field input[type=text]{direction:rtl}.chosen-rtl .chosen-choices li.search-choice{margin:3px 5px 3px 0;padding:3px 5px 3px 19px}.chosen-rtl .chosen-choices li.search-choice .search-choice-close{right:auto;left:4px}.chosen-rtl.chosen-container-single-nosearch .chosen-search,.chosen-rtl .chosen-drop{left:9999px}.chosen-rtl.chosen-container-single .chosen-results{margin:0 0 4px 4px;padding:0 4px 0 0}.chosen-rtl .chosen-results li.group-option{padding-right:15px;padding-left:0}.chosen-rtl.chosen-container-active.chosen-with-drop .chosen-single div{border-right:0}.chosen-rtl .chosen-search input[type=text]{padding:4px 5px 4px 20px;background:#fff url(chosen-sprite.png) no-repeat -30px -20px;background:url(chosen-sprite.png) no-repeat -30px -20px;direction:rtl}.chosen-rtl.chosen-container-single .chosen-single div b{background-position:6px 2px}.chosen-rtl.chosen-container-single.chosen-with-drop .chosen-single div b{background-position:-12px 2px}@media only screen and (-webkit-min-device-pixel-ratio:1.5),only screen and (min-resolution:144dpi),only screen and (min-resolution:1.5dppx){.chosen-rtl .chosen-search input[type=text],.chosen-container-single .chosen-single abbr,.chosen-container-single .chosen-single div b,.chosen-container-single .chosen-search input[type=text],.chosen-container-multi .chosen-choices .search-choice .search-choice-close,.chosen-container .chosen-results-scroll-down span,.chosen-container .chosen-results-scroll-up span{background-image:url(chosen-sprite@2x.png)!important;background-size:52px 37px!important;background-repeat:no-repeat!important}} \ No newline at end of file diff --git a/micropsi_server/static/css/highlight_styles/github.css b/micropsi_server/static/css/highlight_styles/github.css new file mode 100644 index 00000000..791932b8 --- /dev/null +++ b/micropsi_server/static/css/highlight_styles/github.css @@ -0,0 +1,99 @@ +/* + +github.com style (c) Vasily Polovnyov + +*/ + +.hljs { + display: block; + overflow-x: auto; + padding: 0.5em; + color: #333; + background: #f8f8f8; +} + +.hljs-comment, +.hljs-quote { + color: #998; + font-style: italic; +} + +.hljs-keyword, +.hljs-selector-tag, +.hljs-subst { + color: #333; + font-weight: bold; +} + +.hljs-number, +.hljs-literal, +.hljs-variable, +.hljs-template-variable, +.hljs-tag .hljs-attr { + color: #008080; +} + +.hljs-string, +.hljs-doctag { + color: #d14; +} + +.hljs-title, +.hljs-section, +.hljs-selector-id { + color: #900; + font-weight: bold; +} + +.hljs-subst { + font-weight: normal; +} + +.hljs-type, +.hljs-class .hljs-title { + color: #458; + font-weight: bold; +} + +.hljs-tag, +.hljs-name, +.hljs-attribute { + color: #000080; + font-weight: normal; +} + +.hljs-regexp, +.hljs-link { + color: #009926; +} + +.hljs-symbol, +.hljs-bullet { + color: #990073; +} + +.hljs-built_in, +.hljs-builtin-name { + color: #0086b3; +} + +.hljs-meta { + color: #999; + font-weight: bold; +} + +.hljs-deletion { + background: #fdd; +} + +.hljs-addition { + background: #dfd; +} + +.hljs-emphasis { + font-style: italic; +} + +.hljs-strong { + font-weight: bold; +} diff --git a/micropsi_server/static/css/micropsi-styles.css b/micropsi_server/static/css/micropsi-styles.css index 2164adfa..e2b5c4c0 100644 --- a/micropsi_server/static/css/micropsi-styles.css +++ b/micropsi_server/static/css/micropsi-styles.css @@ -274,7 +274,7 @@ graphs margin-top: -30px; } -.dropdown-menu li:hover .sub-menu { +.dropdown-menu li:hover > .sub-menu { visibility: visible; display: block; } @@ -451,11 +451,11 @@ table.emoexpression td { top: 100px !important; } -#simulation_controls{ +#calculation_controls{ margin-right: 20px; } -#simulation_controls .step_counters { +#calculation_controls .step_counters { font-size: 8px; line-height: 9px; text-align: left; @@ -469,7 +469,7 @@ table.emoexpression td { border-right: 1px solid #333; margin-right: 1px; } -#simulation_controls .btn-expand { +#calculation_controls .btn-expand { font-size: 8px; padding-left: 1px; padding-right: 1px; @@ -626,4 +626,48 @@ p.clear { .docstring { white-space: pre-line; +} + +.hint.small { + font-size: 0.9em; +} + +#console_input { + border-top: 0px none; + border-left: 0px none; + border-right: 0px none; + background: transparent; + box-shadow: none; + webkit-box-shadow: none; + border-radius: 0; + font-family: inherit; + font-size: inherit; + color: inherit; + width: 50%; + padding-top: 4px; + vertical-align: middle; + margin: 0; +} +#netapi_console .code_container { + overflow-y: scroll; +} +#netapi_console .seperator{ + margin-top: -17px; +} +#console_autocomplete { + font-size: 10px; + max-height: 120px; + overflow-y: scroll; +} +#console_autocomplete li a.selected { + color: #ffffff !important; + text-decoration: none; + background-color: #0081c2 !important; +} + +#timeseries_controls .slider.slider-horizontal { + width: 100% !important; +} +#timeseries_controls .tooltip.top { + opacity: 0.8; } \ No newline at end of file diff --git a/micropsi_server/static/js/chosen.jquery.min.js b/micropsi_server/static/js/chosen.jquery.min.js new file mode 100755 index 00000000..caf5ce7a --- /dev/null +++ b/micropsi_server/static/js/chosen.jquery.min.js @@ -0,0 +1,2 @@ +/* Chosen v1.5.0 | (c) 2011-2016 by Harvest | MIT License, https://github.com/harvesthq/chosen/blob/master/LICENSE.md */ +(function(){var a,AbstractChosen,Chosen,SelectParser,b,c={}.hasOwnProperty,d=function(a,b){function d(){this.constructor=a}for(var e in b)c.call(b,e)&&(a[e]=b[e]);return d.prototype=b.prototype,a.prototype=new d,a.__super__=b.prototype,a};SelectParser=function(){function SelectParser(){this.options_index=0,this.parsed=[]}return SelectParser.prototype.add_node=function(a){return"OPTGROUP"===a.nodeName.toUpperCase()?this.add_group(a):this.add_option(a)},SelectParser.prototype.add_group=function(a){var b,c,d,e,f,g;for(b=this.parsed.length,this.parsed.push({array_index:b,group:!0,label:this.escapeExpression(a.label),title:a.title?a.title:void 0,children:0,disabled:a.disabled,classes:a.className}),f=a.childNodes,g=[],d=0,e=f.length;e>d;d++)c=f[d],g.push(this.add_option(c,b,a.disabled));return g},SelectParser.prototype.add_option=function(a,b,c){return"OPTION"===a.nodeName.toUpperCase()?(""!==a.text?(null!=b&&(this.parsed[b].children+=1),this.parsed.push({array_index:this.parsed.length,options_index:this.options_index,value:a.value,text:a.text,html:a.innerHTML,title:a.title?a.title:void 0,selected:a.selected,disabled:c===!0?c:a.disabled,group_array_index:b,group_label:null!=b?this.parsed[b].label:null,classes:a.className,style:a.style.cssText})):this.parsed.push({array_index:this.parsed.length,options_index:this.options_index,empty:!0}),this.options_index+=1):void 0},SelectParser.prototype.escapeExpression=function(a){var b,c;return null==a||a===!1?"":/[\&\<\>\"\'\`]/.test(a)?(b={"<":"<",">":">",'"':""","'":"'","`":"`"},c=/&(?!\w+;)|[\<\>\"\'\`]/g,a.replace(c,function(a){return b[a]||"&"})):a},SelectParser}(),SelectParser.select_to_array=function(a){var b,c,d,e,f;for(c=new SelectParser,f=a.childNodes,d=0,e=f.length;e>d;d++)b=f[d],c.add_node(b);return c.parsed},AbstractChosen=function(){function AbstractChosen(a,b){this.form_field=a,this.options=null!=b?b:{},AbstractChosen.browser_is_supported()&&(this.is_multiple=this.form_field.multiple,this.set_default_text(),this.set_default_values(),this.setup(),this.set_up_html(),this.register_observers(),this.on_ready())}return AbstractChosen.prototype.set_default_values=function(){var a=this;return this.click_test_action=function(b){return a.test_active_click(b)},this.activate_action=function(b){return a.activate_field(b)},this.active_field=!1,this.mouse_on_container=!1,this.results_showing=!1,this.result_highlighted=null,this.allow_single_deselect=null!=this.options.allow_single_deselect&&null!=this.form_field.options[0]&&""===this.form_field.options[0].text?this.options.allow_single_deselect:!1,this.disable_search_threshold=this.options.disable_search_threshold||0,this.disable_search=this.options.disable_search||!1,this.enable_split_word_search=null!=this.options.enable_split_word_search?this.options.enable_split_word_search:!0,this.group_search=null!=this.options.group_search?this.options.group_search:!0,this.search_contains=this.options.search_contains||!1,this.single_backstroke_delete=null!=this.options.single_backstroke_delete?this.options.single_backstroke_delete:!0,this.max_selected_options=this.options.max_selected_options||1/0,this.inherit_select_classes=this.options.inherit_select_classes||!1,this.display_selected_options=null!=this.options.display_selected_options?this.options.display_selected_options:!0,this.display_disabled_options=null!=this.options.display_disabled_options?this.options.display_disabled_options:!0,this.include_group_label_in_selected=this.options.include_group_label_in_selected||!1,this.max_shown_results=this.options.max_shown_results||Number.POSITIVE_INFINITY},AbstractChosen.prototype.set_default_text=function(){return this.form_field.getAttribute("data-placeholder")?this.default_text=this.form_field.getAttribute("data-placeholder"):this.is_multiple?this.default_text=this.options.placeholder_text_multiple||this.options.placeholder_text||AbstractChosen.default_multiple_text:this.default_text=this.options.placeholder_text_single||this.options.placeholder_text||AbstractChosen.default_single_text,this.results_none_found=this.form_field.getAttribute("data-no_results_text")||this.options.no_results_text||AbstractChosen.default_no_result_text},AbstractChosen.prototype.choice_label=function(a){return this.include_group_label_in_selected&&null!=a.group_label?""+a.group_label+""+a.html:a.html},AbstractChosen.prototype.mouse_enter=function(){return this.mouse_on_container=!0},AbstractChosen.prototype.mouse_leave=function(){return this.mouse_on_container=!1},AbstractChosen.prototype.input_focus=function(a){var b=this;if(this.is_multiple){if(!this.active_field)return setTimeout(function(){return b.container_mousedown()},50)}else if(!this.active_field)return this.activate_field()},AbstractChosen.prototype.input_blur=function(a){var b=this;return this.mouse_on_container?void 0:(this.active_field=!1,setTimeout(function(){return b.blur_test()},100))},AbstractChosen.prototype.results_option_build=function(a){var b,c,d,e,f,g,h;for(b="",e=0,h=this.results_data,f=0,g=h.length;g>f&&(c=h[f],d="",d=c.group?this.result_add_group(c):this.result_add_option(c),""!==d&&(e++,b+=d),(null!=a?a.first:void 0)&&(c.selected&&this.is_multiple?this.choice_build(c):c.selected&&!this.is_multiple&&this.single_set_selected_text(this.choice_label(c))),!(e>=this.max_shown_results));f++);return b},AbstractChosen.prototype.result_add_option=function(a){var b,c;return a.search_match&&this.include_option_in_results(a)?(b=[],a.disabled||a.selected&&this.is_multiple||b.push("active-result"),!a.disabled||a.selected&&this.is_multiple||b.push("disabled-result"),a.selected&&b.push("result-selected"),null!=a.group_array_index&&b.push("group-option"),""!==a.classes&&b.push(a.classes),c=document.createElement("li"),c.className=b.join(" "),c.style.cssText=a.style,c.setAttribute("data-option-array-index",a.array_index),c.innerHTML=a.search_text,a.title&&(c.title=a.title),this.outerHTML(c)):""},AbstractChosen.prototype.result_add_group=function(a){var b,c;return(a.search_match||a.group_match)&&a.active_options>0?(b=[],b.push("group-result"),a.classes&&b.push(a.classes),c=document.createElement("li"),c.className=b.join(" "),c.innerHTML=a.search_text,a.title&&(c.title=a.title),this.outerHTML(c)):""},AbstractChosen.prototype.results_update_field=function(){return this.set_default_text(),this.is_multiple||this.results_reset_cleanup(),this.result_clear_highlight(),this.results_build(),this.results_showing?this.winnow_results():void 0},AbstractChosen.prototype.reset_single_select_options=function(){var a,b,c,d,e;for(d=this.results_data,e=[],b=0,c=d.length;c>b;b++)a=d[b],a.selected?e.push(a.selected=!1):e.push(void 0);return e},AbstractChosen.prototype.results_toggle=function(){return this.results_showing?this.results_hide():this.results_show()},AbstractChosen.prototype.results_search=function(a){return this.results_showing?this.winnow_results():this.results_show()},AbstractChosen.prototype.winnow_results=function(){var a,b,c,d,e,f,g,h,i,j,k,l;for(this.no_results_clear(),d=0,f=this.get_search_text(),a=f.replace(/[-[\]{}()*+?.,\\^$|#\s]/g,"\\$&"),i=new RegExp(a,"i"),c=this.get_search_regex(a),l=this.results_data,j=0,k=l.length;k>j;j++)b=l[j],b.search_match=!1,e=null,this.include_option_in_results(b)&&(b.group&&(b.group_match=!1,b.active_options=0),null!=b.group_array_index&&this.results_data[b.group_array_index]&&(e=this.results_data[b.group_array_index],0===e.active_options&&e.search_match&&(d+=1),e.active_options+=1),b.search_text=b.group?b.label:b.html,(!b.group||this.group_search)&&(b.search_match=this.search_string_match(b.search_text,c),b.search_match&&!b.group&&(d+=1),b.search_match?(f.length&&(g=b.search_text.search(i),h=b.search_text.substr(0,g+f.length)+""+b.search_text.substr(g+f.length),b.search_text=h.substr(0,g)+""+h.substr(g)),null!=e&&(e.group_match=!0)):null!=b.group_array_index&&this.results_data[b.group_array_index].search_match&&(b.search_match=!0)));return this.result_clear_highlight(),1>d&&f.length?(this.update_results_content(""),this.no_results(f)):(this.update_results_content(this.results_option_build()),this.winnow_results_set_highlight())},AbstractChosen.prototype.get_search_regex=function(a){var b;return b=this.search_contains?"":"^",new RegExp(b+a,"i")},AbstractChosen.prototype.search_string_match=function(a,b){var c,d,e,f;if(b.test(a))return!0;if(this.enable_split_word_search&&(a.indexOf(" ")>=0||0===a.indexOf("["))&&(d=a.replace(/\[|\]/g,"").split(" "),d.length))for(e=0,f=d.length;f>e;e++)if(c=d[e],b.test(c))return!0},AbstractChosen.prototype.choices_count=function(){var a,b,c,d;if(null!=this.selected_option_count)return this.selected_option_count;for(this.selected_option_count=0,d=this.form_field.options,b=0,c=d.length;c>b;b++)a=d[b],a.selected&&(this.selected_option_count+=1);return this.selected_option_count},AbstractChosen.prototype.choices_click=function(a){return a.preventDefault(),this.results_showing||this.is_disabled?void 0:this.results_show()},AbstractChosen.prototype.keyup_checker=function(a){var b,c;switch(b=null!=(c=a.which)?c:a.keyCode,this.search_field_scale(),b){case 8:if(this.is_multiple&&this.backstroke_length<1&&this.choices_count()>0)return this.keydown_backstroke();if(!this.pending_backstroke)return this.result_clear_highlight(),this.results_search();break;case 13:if(a.preventDefault(),this.results_showing)return this.result_select(a);break;case 27:return this.results_showing&&this.results_hide(),!0;case 9:case 38:case 40:case 16:case 91:case 17:case 18:break;default:return this.results_search()}},AbstractChosen.prototype.clipboard_event_checker=function(a){var b=this;return setTimeout(function(){return b.results_search()},50)},AbstractChosen.prototype.container_width=function(){return null!=this.options.width?this.options.width:""+this.form_field.offsetWidth+"px"},AbstractChosen.prototype.include_option_in_results=function(a){return this.is_multiple&&!this.display_selected_options&&a.selected?!1:!this.display_disabled_options&&a.disabled?!1:a.empty?!1:!0},AbstractChosen.prototype.search_results_touchstart=function(a){return this.touch_started=!0,this.search_results_mouseover(a)},AbstractChosen.prototype.search_results_touchmove=function(a){return this.touch_started=!1,this.search_results_mouseout(a)},AbstractChosen.prototype.search_results_touchend=function(a){return this.touch_started?this.search_results_mouseup(a):void 0},AbstractChosen.prototype.outerHTML=function(a){var b;return a.outerHTML?a.outerHTML:(b=document.createElement("div"),b.appendChild(a),b.innerHTML)},AbstractChosen.browser_is_supported=function(){return/iP(od|hone)/i.test(window.navigator.userAgent)?!1:/Android/i.test(window.navigator.userAgent)&&/Mobile/i.test(window.navigator.userAgent)?!1:/IEMobile/i.test(window.navigator.userAgent)?!1:/Windows Phone/i.test(window.navigator.userAgent)?!1:/BlackBerry/i.test(window.navigator.userAgent)?!1:/BB10/i.test(window.navigator.userAgent)?!1:"Microsoft Internet Explorer"===window.navigator.appName?document.documentMode>=8:!0},AbstractChosen.default_multiple_text="Select Some Options",AbstractChosen.default_single_text="Select an Option",AbstractChosen.default_no_result_text="No results match",AbstractChosen}(),a=jQuery,a.fn.extend({chosen:function(b){return AbstractChosen.browser_is_supported()?this.each(function(c){var d,e;return d=a(this),e=d.data("chosen"),"destroy"===b?void(e instanceof Chosen&&e.destroy()):void(e instanceof Chosen||d.data("chosen",new Chosen(this,b)))}):this}}),Chosen=function(c){function Chosen(){return b=Chosen.__super__.constructor.apply(this,arguments)}return d(Chosen,c),Chosen.prototype.setup=function(){return this.form_field_jq=a(this.form_field),this.current_selectedIndex=this.form_field.selectedIndex,this.is_rtl=this.form_field_jq.hasClass("chosen-rtl")},Chosen.prototype.set_up_html=function(){var b,c;return b=["chosen-container"],b.push("chosen-container-"+(this.is_multiple?"multi":"single")),this.inherit_select_classes&&this.form_field.className&&b.push(this.form_field.className),this.is_rtl&&b.push("chosen-rtl"),c={"class":b.join(" "),style:"width: "+this.container_width()+";",title:this.form_field.title},this.form_field.id.length&&(c.id=this.form_field.id.replace(/[^\w]/g,"_")+"_chosen"),this.container=a("
",c),this.is_multiple?this.container.html('
    '):this.container.html(''+this.default_text+'
      '),this.form_field_jq.hide().after(this.container),this.dropdown=this.container.find("div.chosen-drop").first(),this.search_field=this.container.find("input").first(),this.search_results=this.container.find("ul.chosen-results").first(),this.search_field_scale(),this.search_no_results=this.container.find("li.no-results").first(),this.is_multiple?(this.search_choices=this.container.find("ul.chosen-choices").first(),this.search_container=this.container.find("li.search-field").first()):(this.search_container=this.container.find("div.chosen-search").first(),this.selected_item=this.container.find(".chosen-single").first()),this.results_build(),this.set_tab_index(),this.set_label_behavior()},Chosen.prototype.on_ready=function(){return this.form_field_jq.trigger("chosen:ready",{chosen:this})},Chosen.prototype.register_observers=function(){var a=this;return this.container.bind("touchstart.chosen",function(b){return a.container_mousedown(b),b.preventDefault()}),this.container.bind("touchend.chosen",function(b){return a.container_mouseup(b),b.preventDefault()}),this.container.bind("mousedown.chosen",function(b){a.container_mousedown(b)}),this.container.bind("mouseup.chosen",function(b){a.container_mouseup(b)}),this.container.bind("mouseenter.chosen",function(b){a.mouse_enter(b)}),this.container.bind("mouseleave.chosen",function(b){a.mouse_leave(b)}),this.search_results.bind("mouseup.chosen",function(b){a.search_results_mouseup(b)}),this.search_results.bind("mouseover.chosen",function(b){a.search_results_mouseover(b)}),this.search_results.bind("mouseout.chosen",function(b){a.search_results_mouseout(b)}),this.search_results.bind("mousewheel.chosen DOMMouseScroll.chosen",function(b){a.search_results_mousewheel(b)}),this.search_results.bind("touchstart.chosen",function(b){a.search_results_touchstart(b)}),this.search_results.bind("touchmove.chosen",function(b){a.search_results_touchmove(b)}),this.search_results.bind("touchend.chosen",function(b){a.search_results_touchend(b)}),this.form_field_jq.bind("chosen:updated.chosen",function(b){a.results_update_field(b)}),this.form_field_jq.bind("chosen:activate.chosen",function(b){a.activate_field(b)}),this.form_field_jq.bind("chosen:open.chosen",function(b){a.container_mousedown(b)}),this.form_field_jq.bind("chosen:close.chosen",function(b){a.input_blur(b)}),this.search_field.bind("blur.chosen",function(b){a.input_blur(b)}),this.search_field.bind("keyup.chosen",function(b){a.keyup_checker(b)}),this.search_field.bind("keydown.chosen",function(b){a.keydown_checker(b)}),this.search_field.bind("focus.chosen",function(b){a.input_focus(b)}),this.search_field.bind("cut.chosen",function(b){a.clipboard_event_checker(b)}),this.search_field.bind("paste.chosen",function(b){a.clipboard_event_checker(b)}),this.is_multiple?this.search_choices.bind("click.chosen",function(b){a.choices_click(b)}):this.container.bind("click.chosen",function(a){a.preventDefault()})},Chosen.prototype.destroy=function(){return a(this.container[0].ownerDocument).unbind("click.chosen",this.click_test_action),this.search_field[0].tabIndex&&(this.form_field_jq[0].tabIndex=this.search_field[0].tabIndex),this.container.remove(),this.form_field_jq.removeData("chosen"),this.form_field_jq.show()},Chosen.prototype.search_field_disabled=function(){return this.is_disabled=this.form_field_jq[0].disabled,this.is_disabled?(this.container.addClass("chosen-disabled"),this.search_field[0].disabled=!0,this.is_multiple||this.selected_item.unbind("focus.chosen",this.activate_action),this.close_field()):(this.container.removeClass("chosen-disabled"),this.search_field[0].disabled=!1,this.is_multiple?void 0:this.selected_item.bind("focus.chosen",this.activate_action))},Chosen.prototype.container_mousedown=function(b){return this.is_disabled||(b&&"mousedown"===b.type&&!this.results_showing&&b.preventDefault(),null!=b&&a(b.target).hasClass("search-choice-close"))?void 0:(this.active_field?this.is_multiple||!b||a(b.target)[0]!==this.selected_item[0]&&!a(b.target).parents("a.chosen-single").length||(b.preventDefault(),this.results_toggle()):(this.is_multiple&&this.search_field.val(""),a(this.container[0].ownerDocument).bind("click.chosen",this.click_test_action),this.results_show()),this.activate_field())},Chosen.prototype.container_mouseup=function(a){return"ABBR"!==a.target.nodeName||this.is_disabled?void 0:this.results_reset(a)},Chosen.prototype.search_results_mousewheel=function(a){var b;return a.originalEvent&&(b=a.originalEvent.deltaY||-a.originalEvent.wheelDelta||a.originalEvent.detail),null!=b?(a.preventDefault(),"DOMMouseScroll"===a.type&&(b=40*b),this.search_results.scrollTop(b+this.search_results.scrollTop())):void 0},Chosen.prototype.blur_test=function(a){return!this.active_field&&this.container.hasClass("chosen-container-active")?this.close_field():void 0},Chosen.prototype.close_field=function(){return a(this.container[0].ownerDocument).unbind("click.chosen",this.click_test_action),this.active_field=!1,this.results_hide(),this.container.removeClass("chosen-container-active"),this.clear_backstroke(),this.show_search_field_default(),this.search_field_scale()},Chosen.prototype.activate_field=function(){return this.container.addClass("chosen-container-active"),this.active_field=!0,this.search_field.val(this.search_field.val()),this.search_field.focus()},Chosen.prototype.test_active_click=function(b){var c;return c=a(b.target).closest(".chosen-container"),c.length&&this.container[0]===c[0]?this.active_field=!0:this.close_field()},Chosen.prototype.results_build=function(){return this.parsing=!0,this.selected_option_count=null,this.results_data=SelectParser.select_to_array(this.form_field),this.is_multiple?this.search_choices.find("li.search-choice").remove():this.is_multiple||(this.single_set_selected_text(),this.disable_search||this.form_field.options.length<=this.disable_search_threshold?(this.search_field[0].readOnly=!0,this.container.addClass("chosen-container-single-nosearch")):(this.search_field[0].readOnly=!1,this.container.removeClass("chosen-container-single-nosearch"))),this.update_results_content(this.results_option_build({first:!0})),this.search_field_disabled(),this.show_search_field_default(),this.search_field_scale(),this.parsing=!1},Chosen.prototype.result_do_highlight=function(a){var b,c,d,e,f;if(a.length){if(this.result_clear_highlight(),this.result_highlight=a,this.result_highlight.addClass("highlighted"),d=parseInt(this.search_results.css("maxHeight"),10),f=this.search_results.scrollTop(),e=d+f,c=this.result_highlight.position().top+this.search_results.scrollTop(),b=c+this.result_highlight.outerHeight(),b>=e)return this.search_results.scrollTop(b-d>0?b-d:0);if(f>c)return this.search_results.scrollTop(c)}},Chosen.prototype.result_clear_highlight=function(){return this.result_highlight&&this.result_highlight.removeClass("highlighted"),this.result_highlight=null},Chosen.prototype.results_show=function(){return this.is_multiple&&this.max_selected_options<=this.choices_count()?(this.form_field_jq.trigger("chosen:maxselected",{chosen:this}),!1):(this.container.addClass("chosen-with-drop"),this.results_showing=!0,this.search_field.focus(),this.search_field.val(this.search_field.val()),this.winnow_results(),this.form_field_jq.trigger("chosen:showing_dropdown",{chosen:this}))},Chosen.prototype.update_results_content=function(a){return this.search_results.html(a)},Chosen.prototype.results_hide=function(){return this.results_showing&&(this.result_clear_highlight(),this.container.removeClass("chosen-with-drop"),this.form_field_jq.trigger("chosen:hiding_dropdown",{chosen:this})),this.results_showing=!1},Chosen.prototype.set_tab_index=function(a){var b;return this.form_field.tabIndex?(b=this.form_field.tabIndex,this.form_field.tabIndex=-1,this.search_field[0].tabIndex=b):void 0},Chosen.prototype.set_label_behavior=function(){var b=this;return this.form_field_label=this.form_field_jq.parents("label"),!this.form_field_label.length&&this.form_field.id.length&&(this.form_field_label=a("label[for='"+this.form_field.id+"']")),this.form_field_label.length>0?this.form_field_label.bind("click.chosen",function(a){return b.is_multiple?b.container_mousedown(a):b.activate_field()}):void 0},Chosen.prototype.show_search_field_default=function(){return this.is_multiple&&this.choices_count()<1&&!this.active_field?(this.search_field.val(this.default_text),this.search_field.addClass("default")):(this.search_field.val(""),this.search_field.removeClass("default"))},Chosen.prototype.search_results_mouseup=function(b){var c;return c=a(b.target).hasClass("active-result")?a(b.target):a(b.target).parents(".active-result").first(),c.length?(this.result_highlight=c,this.result_select(b),this.search_field.focus()):void 0},Chosen.prototype.search_results_mouseover=function(b){var c;return c=a(b.target).hasClass("active-result")?a(b.target):a(b.target).parents(".active-result").first(),c?this.result_do_highlight(c):void 0},Chosen.prototype.search_results_mouseout=function(b){return a(b.target).hasClass("active-result")?this.result_clear_highlight():void 0},Chosen.prototype.choice_build=function(b){var c,d,e=this;return c=a("
    • ",{"class":"search-choice"}).html(""+this.choice_label(b)+""),b.disabled?c.addClass("search-choice-disabled"):(d=a("",{"class":"search-choice-close","data-option-array-index":b.array_index}),d.bind("click.chosen",function(a){return e.choice_destroy_link_click(a)}),c.append(d)),this.search_container.before(c)},Chosen.prototype.choice_destroy_link_click=function(b){return b.preventDefault(),b.stopPropagation(),this.is_disabled?void 0:this.choice_destroy(a(b.target))},Chosen.prototype.choice_destroy=function(a){return this.result_deselect(a[0].getAttribute("data-option-array-index"))?(this.show_search_field_default(),this.is_multiple&&this.choices_count()>0&&this.search_field.val().length<1&&this.results_hide(),a.parents("li").first().remove(),this.search_field_scale()):void 0},Chosen.prototype.results_reset=function(){return this.reset_single_select_options(),this.form_field.options[0].selected=!0,this.single_set_selected_text(),this.show_search_field_default(),this.results_reset_cleanup(),this.form_field_jq.trigger("change"),this.active_field?this.results_hide():void 0},Chosen.prototype.results_reset_cleanup=function(){return this.current_selectedIndex=this.form_field.selectedIndex,this.selected_item.find("abbr").remove()},Chosen.prototype.result_select=function(a){var b,c;return this.result_highlight?(b=this.result_highlight,this.result_clear_highlight(),this.is_multiple&&this.max_selected_options<=this.choices_count()?(this.form_field_jq.trigger("chosen:maxselected",{chosen:this}),!1):(this.is_multiple?b.removeClass("active-result"):this.reset_single_select_options(),b.addClass("result-selected"),c=this.results_data[b[0].getAttribute("data-option-array-index")],c.selected=!0,this.form_field.options[c.options_index].selected=!0,this.selected_option_count=null,this.is_multiple?this.choice_build(c):this.single_set_selected_text(this.choice_label(c)),(a.metaKey||a.ctrlKey)&&this.is_multiple||this.results_hide(),this.show_search_field_default(),(this.is_multiple||this.form_field.selectedIndex!==this.current_selectedIndex)&&this.form_field_jq.trigger("change",{selected:this.form_field.options[c.options_index].value}),this.current_selectedIndex=this.form_field.selectedIndex,a.preventDefault(),this.search_field_scale())):void 0},Chosen.prototype.single_set_selected_text=function(a){return null==a&&(a=this.default_text),a===this.default_text?this.selected_item.addClass("chosen-default"):(this.single_deselect_control_build(),this.selected_item.removeClass("chosen-default")),this.selected_item.find("span").html(a)},Chosen.prototype.result_deselect=function(a){var b;return b=this.results_data[a],this.form_field.options[b.options_index].disabled?!1:(b.selected=!1,this.form_field.options[b.options_index].selected=!1,this.selected_option_count=null,this.result_clear_highlight(),this.results_showing&&this.winnow_results(),this.form_field_jq.trigger("change",{deselected:this.form_field.options[b.options_index].value}),this.search_field_scale(),!0)},Chosen.prototype.single_deselect_control_build=function(){return this.allow_single_deselect?(this.selected_item.find("abbr").length||this.selected_item.find("span").first().after(''),this.selected_item.addClass("chosen-single-with-deselect")):void 0},Chosen.prototype.get_search_text=function(){return a("
      ").text(a.trim(this.search_field.val())).html()},Chosen.prototype.winnow_results_set_highlight=function(){var a,b;return b=this.is_multiple?[]:this.search_results.find(".result-selected.active-result"),a=b.length?b.first():this.search_results.find(".active-result").first(),null!=a?this.result_do_highlight(a):void 0},Chosen.prototype.no_results=function(b){var c;return c=a('
    • '+this.results_none_found+' ""
    • '),c.find("span").first().html(b),this.search_results.append(c),this.form_field_jq.trigger("chosen:no_results",{chosen:this})},Chosen.prototype.no_results_clear=function(){return this.search_results.find(".no-results").remove()},Chosen.prototype.keydown_arrow=function(){var a;return this.results_showing&&this.result_highlight?(a=this.result_highlight.nextAll("li.active-result").first())?this.result_do_highlight(a):void 0:this.results_show()},Chosen.prototype.keyup_arrow=function(){var a;return this.results_showing||this.is_multiple?this.result_highlight?(a=this.result_highlight.prevAll("li.active-result"),a.length?this.result_do_highlight(a.first()):(this.choices_count()>0&&this.results_hide(),this.result_clear_highlight())):void 0:this.results_show()},Chosen.prototype.keydown_backstroke=function(){var a;return this.pending_backstroke?(this.choice_destroy(this.pending_backstroke.find("a").first()),this.clear_backstroke()):(a=this.search_container.siblings("li.search-choice").last(),a.length&&!a.hasClass("search-choice-disabled")?(this.pending_backstroke=a,this.single_backstroke_delete?this.keydown_backstroke():this.pending_backstroke.addClass("search-choice-focus")):void 0)},Chosen.prototype.clear_backstroke=function(){return this.pending_backstroke&&this.pending_backstroke.removeClass("search-choice-focus"),this.pending_backstroke=null},Chosen.prototype.keydown_checker=function(a){var b,c;switch(b=null!=(c=a.which)?c:a.keyCode,this.search_field_scale(),8!==b&&this.pending_backstroke&&this.clear_backstroke(),b){case 8:this.backstroke_length=this.search_field.val().length;break;case 9:this.results_showing&&!this.is_multiple&&this.result_select(a),this.mouse_on_container=!1;break;case 13:this.results_showing&&a.preventDefault();break;case 32:this.disable_search&&a.preventDefault();break;case 38:a.preventDefault(),this.keyup_arrow();break;case 40:a.preventDefault(),this.keydown_arrow()}},Chosen.prototype.search_field_scale=function(){var b,c,d,e,f,g,h,i,j;if(this.is_multiple){for(d=0,h=0,f="position:absolute; left: -1000px; top: -1000px; display:none;",g=["font-size","font-style","font-weight","font-family","line-height","text-transform","letter-spacing"],i=0,j=g.length;j>i;i++)e=g[i],f+=e+":"+this.search_field.css(e)+";";return b=a("
      ",{style:f}),b.text(this.search_field.val()),a("body").append(b),h=b.width()+25,b.remove(),c=this.container.outerWidth(),h>c-10&&(h=c-10),this.search_field.css({width:h+"px"})}},Chosen}(AbstractChosen)}).call(this); \ No newline at end of file diff --git a/micropsi_server/static/js/chosen.min.css b/micropsi_server/static/js/chosen.min.css new file mode 100755 index 00000000..9484b570 --- /dev/null +++ b/micropsi_server/static/js/chosen.min.css @@ -0,0 +1,3 @@ +/* Chosen v1.5.0 | (c) 2011-2016 by Harvest | MIT License, https://github.com/harvesthq/chosen/blob/master/LICENSE.md */ + +.chosen-container{position:relative;display:inline-block;vertical-align:middle;font-size:13px;-webkit-user-select:none;-moz-user-select:none;user-select:none}.chosen-container *{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}.chosen-container .chosen-drop{position:absolute;top:100%;left:-9999px;z-index:1010;width:100%;border:1px solid #aaa;border-top:0;background:#fff;box-shadow:0 4px 5px rgba(0,0,0,.15)}.chosen-container.chosen-with-drop .chosen-drop{left:0}.chosen-container a{cursor:pointer}.chosen-container .search-choice .group-name,.chosen-container .chosen-single .group-name{margin-right:4px;overflow:hidden;white-space:nowrap;text-overflow:ellipsis;font-weight:400;color:#999}.chosen-container .search-choice .group-name:after,.chosen-container .chosen-single .group-name:after{content:":";padding-left:2px;vertical-align:top}.chosen-container-single .chosen-single{position:relative;display:block;overflow:hidden;padding:0 0 0 8px;height:25px;border:1px solid #aaa;border-radius:5px;background-color:#fff;background:-webkit-gradient(linear,50% 0,50% 100%,color-stop(20%,#fff),color-stop(50%,#f6f6f6),color-stop(52%,#eee),color-stop(100%,#f4f4f4));background:-webkit-linear-gradient(#fff 20%,#f6f6f6 50%,#eee 52%,#f4f4f4 100%);background:-moz-linear-gradient(#fff 20%,#f6f6f6 50%,#eee 52%,#f4f4f4 100%);background:-o-linear-gradient(#fff 20%,#f6f6f6 50%,#eee 52%,#f4f4f4 100%);background:linear-gradient(#fff 20%,#f6f6f6 50%,#eee 52%,#f4f4f4 100%);background-clip:padding-box;box-shadow:0 0 3px #fff inset,0 1px 1px rgba(0,0,0,.1);color:#444;text-decoration:none;white-space:nowrap;line-height:24px}.chosen-container-single .chosen-default{color:#999}.chosen-container-single .chosen-single span{display:block;overflow:hidden;margin-right:26px;text-overflow:ellipsis;white-space:nowrap}.chosen-container-single .chosen-single-with-deselect span{margin-right:38px}.chosen-container-single .chosen-single abbr{position:absolute;top:6px;right:26px;display:block;width:12px;height:12px;background:url(chosen-sprite.png) -42px 1px no-repeat;font-size:1px}.chosen-container-single .chosen-single abbr:hover{background-position:-42px -10px}.chosen-container-single.chosen-disabled .chosen-single abbr:hover{background-position:-42px -10px}.chosen-container-single .chosen-single div{position:absolute;top:0;right:0;display:block;width:18px;height:100%}.chosen-container-single .chosen-single div b{display:block;width:100%;height:100%;background:url(chosen-sprite.png) no-repeat 0 2px}.chosen-container-single .chosen-search{position:relative;z-index:1010;margin:0;padding:3px 4px;white-space:nowrap}.chosen-container-single .chosen-search input[type=text]{margin:1px 0;padding:4px 20px 4px 5px;width:100%;height:auto;outline:0;border:1px solid #aaa;background:#fff url(chosen-sprite.png) no-repeat 100% -20px;background:url(chosen-sprite.png) no-repeat 100% -20px;font-size:1em;font-family:sans-serif;line-height:normal;border-radius:0}.chosen-container-single .chosen-drop{margin-top:-1px;border-radius:0 0 4px 4px;background-clip:padding-box}.chosen-container-single.chosen-container-single-nosearch .chosen-search{position:absolute;left:-9999px}.chosen-container .chosen-results{color:#444;position:relative;overflow-x:hidden;overflow-y:auto;margin:0 4px 4px 0;padding:0 0 0 4px;max-height:240px;-webkit-overflow-scrolling:touch}.chosen-container .chosen-results li{display:none;margin:0;padding:5px 6px;list-style:none;line-height:15px;word-wrap:break-word;-webkit-touch-callout:none}.chosen-container .chosen-results li.active-result{display:list-item;cursor:pointer}.chosen-container .chosen-results li.disabled-result{display:list-item;color:#ccc;cursor:default}.chosen-container .chosen-results li.highlighted{background-color:#3875d7;background-image:-webkit-gradient(linear,50% 0,50% 100%,color-stop(20%,#3875d7),color-stop(90%,#2a62bc));background-image:-webkit-linear-gradient(#3875d7 20%,#2a62bc 90%);background-image:-moz-linear-gradient(#3875d7 20%,#2a62bc 90%);background-image:-o-linear-gradient(#3875d7 20%,#2a62bc 90%);background-image:linear-gradient(#3875d7 20%,#2a62bc 90%);color:#fff}.chosen-container .chosen-results li.no-results{color:#777;display:list-item;background:#f4f4f4}.chosen-container .chosen-results li.group-result{display:list-item;font-weight:700;cursor:default}.chosen-container .chosen-results li.group-option{padding-left:15px}.chosen-container .chosen-results li em{font-style:normal;text-decoration:underline}.chosen-container-multi .chosen-choices{position:relative;overflow:hidden;margin:0;padding:0 5px;width:100%;height:auto!important;height:1%;border:1px solid #aaa;background-color:#fff;background-image:-webkit-gradient(linear,50% 0,50% 100%,color-stop(1%,#eee),color-stop(15%,#fff));background-image:-webkit-linear-gradient(#eee 1%,#fff 15%);background-image:-moz-linear-gradient(#eee 1%,#fff 15%);background-image:-o-linear-gradient(#eee 1%,#fff 15%);background-image:linear-gradient(#eee 1%,#fff 15%);cursor:text}.chosen-container-multi .chosen-choices li{float:left;list-style:none}.chosen-container-multi .chosen-choices li.search-field{margin:0;padding:0;white-space:nowrap}.chosen-container-multi .chosen-choices li.search-field input[type=text]{margin:1px 0;padding:0;height:25px;outline:0;border:0!important;background:transparent!important;box-shadow:none;color:#999;font-size:100%;font-family:sans-serif;line-height:normal;border-radius:0}.chosen-container-multi .chosen-choices li.search-choice{position:relative;margin:3px 5px 3px 0;padding:3px 20px 3px 5px;border:1px solid #aaa;max-width:100%;border-radius:3px;background-color:#eee;background-image:-webkit-gradient(linear,50% 0,50% 100%,color-stop(20%,#f4f4f4),color-stop(50%,#f0f0f0),color-stop(52%,#e8e8e8),color-stop(100%,#eee));background-image:-webkit-linear-gradient(#f4f4f4 20%,#f0f0f0 50%,#e8e8e8 52%,#eee 100%);background-image:-moz-linear-gradient(#f4f4f4 20%,#f0f0f0 50%,#e8e8e8 52%,#eee 100%);background-image:-o-linear-gradient(#f4f4f4 20%,#f0f0f0 50%,#e8e8e8 52%,#eee 100%);background-image:linear-gradient(#f4f4f4 20%,#f0f0f0 50%,#e8e8e8 52%,#eee 100%);background-size:100% 19px;background-repeat:repeat-x;background-clip:padding-box;box-shadow:0 0 2px #fff inset,0 1px 0 rgba(0,0,0,.05);color:#333;line-height:13px;cursor:default}.chosen-container-multi .chosen-choices li.search-choice span{word-wrap:break-word}.chosen-container-multi .chosen-choices li.search-choice .search-choice-close{position:absolute;top:4px;right:3px;display:block;width:12px;height:12px;background:url(chosen-sprite.png) -42px 1px no-repeat;font-size:1px}.chosen-container-multi .chosen-choices li.search-choice .search-choice-close:hover{background-position:-42px -10px}.chosen-container-multi .chosen-choices li.search-choice-disabled{padding-right:5px;border:1px solid #ccc;background-color:#e4e4e4;background-image:-webkit-gradient(linear,50% 0,50% 100%,color-stop(20%,#f4f4f4),color-stop(50%,#f0f0f0),color-stop(52%,#e8e8e8),color-stop(100%,#eee));background-image:-webkit-linear-gradient(#f4f4f4 20%,#f0f0f0 50%,#e8e8e8 52%,#eee 100%);background-image:-moz-linear-gradient(#f4f4f4 20%,#f0f0f0 50%,#e8e8e8 52%,#eee 100%);background-image:-o-linear-gradient(#f4f4f4 20%,#f0f0f0 50%,#e8e8e8 52%,#eee 100%);background-image:linear-gradient(#f4f4f4 20%,#f0f0f0 50%,#e8e8e8 52%,#eee 100%);color:#666}.chosen-container-multi .chosen-choices li.search-choice-focus{background:#d4d4d4}.chosen-container-multi .chosen-choices li.search-choice-focus .search-choice-close{background-position:-42px -10px}.chosen-container-multi .chosen-results{margin:0;padding:0}.chosen-container-multi .chosen-drop .result-selected{display:list-item;color:#ccc;cursor:default}.chosen-container-active .chosen-single{border:1px solid #5897fb;box-shadow:0 0 5px rgba(0,0,0,.3)}.chosen-container-active.chosen-with-drop .chosen-single{border:1px solid #aaa;-moz-border-radius-bottomright:0;border-bottom-right-radius:0;-moz-border-radius-bottomleft:0;border-bottom-left-radius:0;background-image:-webkit-gradient(linear,50% 0,50% 100%,color-stop(20%,#eee),color-stop(80%,#fff));background-image:-webkit-linear-gradient(#eee 20%,#fff 80%);background-image:-moz-linear-gradient(#eee 20%,#fff 80%);background-image:-o-linear-gradient(#eee 20%,#fff 80%);background-image:linear-gradient(#eee 20%,#fff 80%);box-shadow:0 1px 0 #fff inset}.chosen-container-active.chosen-with-drop .chosen-single div{border-left:0;background:transparent}.chosen-container-active.chosen-with-drop .chosen-single div b{background-position:-18px 2px}.chosen-container-active .chosen-choices{border:1px solid #5897fb;box-shadow:0 0 5px rgba(0,0,0,.3)}.chosen-container-active .chosen-choices li.search-field input[type=text]{color:#222!important}.chosen-disabled{opacity:.5!important;cursor:default}.chosen-disabled .chosen-single{cursor:default}.chosen-disabled .chosen-choices .search-choice .search-choice-close{cursor:default}.chosen-rtl{text-align:right}.chosen-rtl .chosen-single{overflow:visible;padding:0 8px 0 0}.chosen-rtl .chosen-single span{margin-right:0;margin-left:26px;direction:rtl}.chosen-rtl .chosen-single-with-deselect span{margin-left:38px}.chosen-rtl .chosen-single div{right:auto;left:3px}.chosen-rtl .chosen-single abbr{right:auto;left:26px}.chosen-rtl .chosen-choices li{float:right}.chosen-rtl .chosen-choices li.search-field input[type=text]{direction:rtl}.chosen-rtl .chosen-choices li.search-choice{margin:3px 5px 3px 0;padding:3px 5px 3px 19px}.chosen-rtl .chosen-choices li.search-choice .search-choice-close{right:auto;left:4px}.chosen-rtl.chosen-container-single-nosearch .chosen-search,.chosen-rtl .chosen-drop{left:9999px}.chosen-rtl.chosen-container-single .chosen-results{margin:0 0 4px 4px;padding:0 4px 0 0}.chosen-rtl .chosen-results li.group-option{padding-right:15px;padding-left:0}.chosen-rtl.chosen-container-active.chosen-with-drop .chosen-single div{border-right:0}.chosen-rtl .chosen-search input[type=text]{padding:4px 5px 4px 20px;background:#fff url(chosen-sprite.png) no-repeat -30px -20px;background:url(chosen-sprite.png) no-repeat -30px -20px;direction:rtl}.chosen-rtl.chosen-container-single .chosen-single div b{background-position:6px 2px}.chosen-rtl.chosen-container-single.chosen-with-drop .chosen-single div b{background-position:-12px 2px}@media only screen and (-webkit-min-device-pixel-ratio:1.5),only screen and (min-resolution:144dpi),only screen and (min-resolution:1.5dppx){.chosen-rtl .chosen-search input[type=text],.chosen-container-single .chosen-single abbr,.chosen-container-single .chosen-single div b,.chosen-container-single .chosen-search input[type=text],.chosen-container-multi .chosen-choices .search-choice .search-choice-close,.chosen-container .chosen-results-scroll-down span,.chosen-container .chosen-results-scroll-up span{background-image:url(chosen-sprite@2x.png)!important;background-size:52px 37px!important;background-repeat:no-repeat!important}} \ No newline at end of file diff --git a/micropsi_server/static/js/dialogs.js b/micropsi_server/static/js/dialogs.js index da3335d5..576951f0 100644 --- a/micropsi_server/static/js/dialogs.js +++ b/micropsi_server/static/js/dialogs.js @@ -106,6 +106,13 @@ var dialogs = { } else { dialogs.setModalForm(data, callback); } + }, + error: function(data, param1){ + if(data.status == 500){ + $('body').html(data.responseText); + } else { + dialogs.notification(data.statusText, "error"); + } } }); }, @@ -138,7 +145,7 @@ var api = { call: function(functionname, params, success_cb, error_cb, method){ var url = '/rpc/'+functionname; - if(method != "post"){ + if(method == "get"){ args = ''; for(var key in params){ args += key+'='+encodeURIComponent(JSON.stringify(params[key]))+','; @@ -147,8 +154,8 @@ var api = { } $.ajax({ url: url, - data: ((method == "post") ? JSON.stringify(params) : null), - type: method || "get", + data: ((method != "get") ? JSON.stringify(params) : null), + type: method || "post", processData: (method == "get"), contentType: "application/json", success: function(response){ @@ -173,14 +180,26 @@ var api = { if(data.status == 0){ msg = "Server not reachable."; } else { - try{ - error = JSON.parse(data.responseText); - var errtext = $('
      ').text(error.data).html(); - msg += '' + errtext + ''; - if(error.traceback){ - msg += '

      '+error.traceback+'

      '; + if(data.responseText){ + try{ + error = JSON.parse(data.responseText); + var errtext = $('
      ').text(error.data).html(); + msg += '' + errtext + ''; + if(error.traceback){ + msg += '

      '+error.traceback+'

      '; + } + } catch (err){} + } else if(data.data) { + if(typeof data.data == 'object'){ + msg = '
        '; + for(var i in data.data){ + msg += '
      • '+data.data[i]+'
      • '; + } + msg += '
      '; + } else { + msg = data } - } catch (err){} + } if(!msg){ msg = type || "serverside exception"; } @@ -201,7 +220,7 @@ $(function() { dialogs.remote_form_dialog($(event.target).attr('href')); } - $('#menu_nodenet').find('a').on('click', function(event){ + $('.navbar .nav .dropdown').find('a').on('click', function(event){ if($(event.target).parent().hasClass("disabled")){ event.stopImmediatePropagation(); event.preventDefault(); @@ -214,6 +233,13 @@ $(function() { $('#menu_nodenet').find('[data=nodenet-needed]').addClass("disabled") } }); + $('a.dropdown-toggle[href=#menu_world]').on('click', function(event){ + if(currentWorld){ + $('#menu_world').find('[data=world-needed]').removeClass("disabled") + } else { + $('#menu_world').find('[data=world-needed]').addClass("disabled") + } + }); $('a.remote_form_dialog').on('click', remote_form); @@ -222,20 +248,17 @@ $(function() { dialogs.remote_form_dialog($(event.target).attr('href'), function(data){ // refreshNodenetList(); -- TODO: does not work yet (due to paperscript missing proper js integration) dialogs.notification('Nodenet created. ID: ' + data.nodenet_uid, 'success'); - $.cookie('selected_nodenet', data.nodenet_uid, { expires: 7, path: '/' }); + $.cookie('selected_nodenet', data.nodenet_uid+"/", { expires: 7, path: '/' }); window.location.reload(); }); }); $('.navbar a.nodenet_delete').on('click', function(){ - if(typeof currentNodenet == 'undefined'){ - return dialogs.notification("there is no current nodenet selected"); - } dialogs.confirm("Do you really want to delete this nodenet?", function(){ api.call('delete_nodenet', {nodenet_uid: currentNodenet}, function(data){ currentNodenet=null; // refreshNodenetList(); -- TODO: does not work yet (due to paperscript missing proper js integration) - $.cookie('selected_nodenet', currentNodenet, { expires: 7, path: '/' }); + $.cookie('selected_nodenet', "", { expires: 7, path: '/' }); dialogs.notification('Nodenet deleted'); window.location.reload(); }); @@ -244,26 +267,17 @@ $(function() { $('.navbar a.nodenet_edit').on('click', function(event){ event.preventDefault(); - if(typeof currentNodenet == 'undefined'){ - return dialogs.notification("there is no current nodenet selected"); - } api.call('edit_nodenet', {nodenet_uid: currentNodenet}); }); $('.navbar a.nodenet_save').on('click', function(event){ event.preventDefault(); - if(typeof currentNodenet == 'undefined'){ - return dialogs.notification("there is no current nodenet selected"); - } $('#loading').show(); api.call('save_nodenet', {nodenet_uid: currentNodenet}); }); $('.navbar a.nodenet_revert').on('click', function(event){ event.preventDefault(); - if(typeof currentNodenet == 'undefined'){ - return dialogs.notification("there is no current nodenet selected"); - } $('#loading').show(); api.call('revert_nodenet', {nodenet_uid: currentNodenet}, function(data){ dialogs.notification("nodenet reverted"); @@ -274,9 +288,6 @@ $(function() { $('.navbar a.reload_native_modules').on('click', function(event){ event.preventDefault(); - if(typeof currentNodenet == 'undefined'){ - return dialogs.notification("there is no current nodenet selected"); - } if($(event.target).hasClass("reload_revert")){ api.call('reload_native_modules', {}, function(){ api.call('revert_nodenet', {nodenet_uid: currentNodenet}, function(){ @@ -300,9 +311,6 @@ $(function() { }); $('.navbar a.nodenet_merge').on('click', function(event){ event.preventDefault(); - if(typeof currentNodenet == 'undefined'){ - return dialogs.notification("there is no current nodenet selected"); - } dialogs.remote_form_dialog(event.target.href + '/' + currentNodenet, function(){ window.location.reload(); }); @@ -313,6 +321,7 @@ $(function() { event.preventDefault(); dialogs.remote_form_dialog($(event.target).attr('href'), function(data){ dialogs.notification('World created. ID: ' + data.world_uid, 'success'); + $(document).trigger('new_world_created', data); var url = '/world_list/' + ($.cookie('selected_world') || ''); $.get(url, {}, function(data){ $('#world_list').html(data); @@ -323,9 +332,6 @@ $(function() { $('.navbar a.world_delete').on('click', function(event){ event.preventDefault(); - if(typeof currentWorld == 'undefined'){ - return dialogs.notification("there is no current world selected"); - } dialogs.confirm("Do you really want to delete this world?", function(){ api.call('delete_world', {world_uid: currentWorld}, @@ -340,17 +346,11 @@ $(function() { $('.navbar a.world_save').on('click', function(event){ event.preventDefault(); - if(typeof currentWorld == 'undefined'){ - return dialogs.notification("there is no current world selected"); - } api.call('save_world', {world_uid: currentWorld}); }); $('.navbar a.world_revert').on('click', function(event){ event.preventDefault(); - if(typeof currentWorld == 'undefined'){ - return dialogs.notification("there is no current world selected"); - } api.call('revert_world', {world_uid: currentWorld}, function(){ dialogs.notification("World state reverted"); @@ -383,17 +383,11 @@ $(function() { $('.nodenet_export').on('click', function(event){ event.preventDefault(); - if(typeof currentNodenet == 'undefined'){ - return dialogs.notification("there is no current nodenet selected"); - } window.location.replace(event.target.href + '/' + currentNodenet); }); $('.world_export').on('click', function(event){ event.preventDefault(); - if(typeof currentWorld == 'undefined'){ - return dialogs.notification("there is no current world selected"); - } window.location.replace(event.target.href + '/' + currentWorld); }); @@ -465,8 +459,7 @@ $(function() { }, function(data){ api.defaultErrorCallback(data); $('#monitor_modal').modal('hide'); - }, - method="post"); + }); } $('#monitor_modal .btn-primary').on('click', submitMonitorModal); $('#monitor_modal form').on('submit', submitMonitorModal); @@ -524,15 +517,13 @@ $(function() { $('#recipe_modal .docstring').show(); $('#recipe_modal .docstring').html(recipes[name].docstring); $('#recipe_modal .btn-primary').show(); - $('#recipe_modal form').show(); } else { $('#recipe_modal .default_explanation').show(); $('#recipe_modal .docstring').hide(); $('#recipe_modal .btn-primary').hide(); - $('#recipe_modal form').hide(); } + var html = ''; if(name in recipes){ - var html = ''; for(var i in recipes[name].parameters){ var param = recipes[name].parameters[i]; html += '' + @@ -543,8 +534,8 @@ $(function() { '
      '+ '
      '; } - $('.recipe_param_container').html(html); } + $('.recipe_param_container').html(html); }; var run_recipe = function(event){ @@ -621,14 +612,32 @@ $(function() { $('#recipe_modal button').prop('disabled', false); api.call('get_available_recipes', {}, function(data){ recipes = data; - var options = ''; - var items = Object.values(data); - var sorted = items.sort(sortByName); - for(var idx in sorted){ - options += ''; + var categories = {}; + for(var key in recipes){ + if(!categories[recipes[key].category]){ + categories[recipes[key].category] = []; + } + categories[recipes[key].category].push(recipes[key]); + } + var sorted = Object.keys(categories); + sorted.sort(); + recipe_name_input.chosen('destroy'); + var html = ''; + var cat; + for(var i in sorted){ + cat = sorted[i] + html += ''; + categories[cat].sort(sortByName); + for(var i in categories[cat]){ + html += ''; + } + html += '' } - recipe_name_input.html(options); + recipe_name_input.html(html); + recipe_name_input.val(''); + recipe_name_input.chosen({'search_contains': true}); recipe_name_input.focus(); + recipe_name_input.trigger('chosen:open'); update_parameters_for_recipe(); }); }); @@ -639,16 +648,16 @@ $(function() { updateWorldAdapterSelector = function() { var option = $("#nn_world option:selected"); if (option) { - $("#nn_worldadapter").load("/create_worldadapter_selector/"+option.val()); + $("#nn_worldadapter").parent().load("/create_worldadapter_selector/"+option.val()); } }; var listeners = {} -var simulationRunning = false; +var calculationRunning = false; var currentNodenet; var runner_properties = {}; -var sections = ['nodenet_editor', 'monitor', 'world_editor']; +var sections = ['nodenet_editor', 'netapi_console', 'monitor', 'world_editor']; register_stepping_function = function(type, input, callback){ @@ -667,7 +676,7 @@ fetch_stepping_info = function(){ params[key] = listeners[key].input() } busy = true; - api.call('get_current_state', params, success=function(data){ + api.call('get_calculation_state', params, success=function(data){ busy = false; var start = new Date().getTime(); window.currentSimulationStep = data.current_nodenet_step; @@ -680,36 +689,36 @@ fetch_stepping_info = function(){ $('.nodenet_step').text(data.current_nodenet_step); $('.world_step').text(data.current_world_step); var text = []; - if(data.simulation_condition){ - if(data.simulation_condition.step_amount){ - text.push("run " + data.simulation_condition.step_amount + " steps"); - $('#run_condition_steps').val(data.simulation_condition.step_amount); + if(data.calculation_condition){ + if(data.calculation_condition.step_amount){ + text.push("run " + data.calculation_condition.step_amount + " steps"); + $('#run_condition_steps').val(data.calculation_condition.step_amount); } - if(data.simulation_condition.monitor){ - text.push('monitor = ' + data.simulation_condition.monitor.value + ''); - $('#run_condition_monitor_selector').val(data.simulation_condition.monitor.uid); - $('#run_condition_monitor_value').val(data.simulation_condition.monitor.value); + if(data.calculation_condition.monitor){ + text.push('monitor = ' + data.calculation_condition.monitor.value + ''); + $('#run_condition_monitor_selector').val(data.calculation_condition.monitor.uid); + $('#run_condition_monitor_value').val(data.calculation_condition.monitor.value); } } if(text.length){ - $('#simulation_controls .runner_condition').html(text.join(" or ")); - $('#simulation_controls .running_conditional').show(); + $('#calculation_controls .runner_condition').html(text.join(" or ")); + $('#calculation_controls .running_conditional').show(); $('#remove_runner_condition').show(); } else { - $('#simulation_controls .running_conditional').hide(); + $('#calculation_controls .running_conditional').hide(); $('#remove_runner_condition').hide(); $('#set_runner_condition').show(); } var end = new Date().getTime(); - if(data.simulation_running && !busy){ + if(data.calculation_running && !busy){ if(runner_properties.timestep - (end - start) > 0){ window.setTimeout(fetch_stepping_info, runner_properties.timestep - (end - start)); } else { $(document).trigger('runner_stepped'); } } - setButtonStates(data.simulation_running); + setButtonStates(data.calculation_running); if(data.user_prompt){ promptUser(data.user_prompt); } @@ -751,7 +760,7 @@ $(document).on('runner_started', fetch_stepping_info); $(document).on('runner_stepped', fetch_stepping_info); $(document).on('nodenet_changed', function(event, new_uid){ currentNodenet = new_uid; - $.cookie('selected_nodenet', currentNodenet, { expires: 7, path: '/' }); + $.cookie('selected_nodenet', currentNodenet+"/", { expires: 7, path: '/' }); refreshNodenetList(); }) $(document).on('form_submit', function(event, data){ @@ -791,24 +800,24 @@ function setButtonStates(running){ $(document).prop('title', "▶ " + default_title); $('#nodenet_start').addClass('active'); $('#nodenet_stop').removeClass('active'); - $('#simulation_controls .runner_running').show(); - $('#simulation_controls .runner_paused').hide(); + $('#calculation_controls .runner_running').show(); + $('#calculation_controls .runner_paused').hide(); } else { $(document).prop('title', default_title); $('#nodenet_start').removeClass('active'); $('#nodenet_stop').addClass('active'); - $('#simulation_controls .runner_running').hide(); - $('#simulation_controls .runner_paused').show(); + $('#calculation_controls .runner_running').hide(); + $('#calculation_controls .runner_paused').show(); } } function stepNodenet(event){ event.preventDefault(); - if(simulationRunning){ + if(calculationRunning){ stopNodenetrunner(event); } if(currentNodenet){ - api.call("step_simulation", + api.call("step_calculation", {nodenet_uid: currentNodenet}, success=function(data){ $(document).trigger('runner_stepped'); @@ -822,7 +831,7 @@ function startNodenetrunner(event){ event.preventDefault(); nodenetRunning = true; if(currentNodenet){ - api.call('start_simulation', {nodenet_uid: currentNodenet}, function(){ + api.call('start_calculation', {nodenet_uid: currentNodenet}, function(){ $(document).trigger('runner_started'); }); } else { @@ -831,19 +840,19 @@ function startNodenetrunner(event){ } function stopNodenetrunner(event){ event.preventDefault(); - api.call('stop_simulation', {nodenet_uid: currentNodenet}, function(){ + api.call('stop_calculation', {nodenet_uid: currentNodenet}, function(){ $(document).trigger('runner_stopped'); nodenetRunning = false; }); } -function resetNodenet(event){ +function revertAll(event){ event.preventDefault(); nodenetRunning = false; if(currentNodenet){ $('#loading').show(); api.call( - 'revert_nodenet', + 'revert_calculation', {nodenet_uid: currentNodenet}, function(){ window.location.reload(); @@ -857,7 +866,7 @@ function resetNodenet(event){ $(function() { $('#nodenet_start').on('click', startNodenetrunner); $('#nodenet_stop').on('click', stopNodenetrunner); - $('#nodenet_reset').on('click', resetNodenet); + $('#revert_all').on('click', revertAll); $('#nodenet_step_forward').on('click', stepNodenet); }); @@ -868,12 +877,20 @@ $.extend( $.fn.dataTableExt.oStdClasses, { } ); $(document).ready(function() { - currentNodenet = $.cookie('selected_nodenet') || ''; + var nodenetcookie = $.cookie('selected_nodenet') || ''; + if (nodenetcookie && nodenetcookie.indexOf('/') > 0){ + nodenetcookie = nodenetcookie.split("/"); + currentNodenet = nodenetcookie[0]; + currentNodeSpace = nodenetcookie[1] || null; + } else { + currentNodenet = ''; + currentNodeSpace = ''; + } currentWorld = $.cookie('selected_world') || ''; $('#nodenet_mgr').dataTable( { "sDom": "<'row'<'span6'l><'span6'f>r>t<'row'<'span6'i><'span6'p>>", "sPaginationType": "bootstrap" - } ); + }); $('textarea.loc').autogrow(); if($('.frontend_section').length == 1){ $('.frontend_section').addClass('in'); @@ -883,7 +900,7 @@ $(document).ready(function() { refreshNodenetList(); setButtonStates(false); if(currentNodenet){ - fetch_stepping_info(); + $(document).trigger('nodenet_changed', currentNodenet); } }); diff --git a/micropsi_server/static/js/highlight.pack.js b/micropsi_server/static/js/highlight.pack.js new file mode 100644 index 00000000..3ebe606a --- /dev/null +++ b/micropsi_server/static/js/highlight.pack.js @@ -0,0 +1,2 @@ +/*! highlight.js v9.2.0 | BSD3 License | git.io/hljslicense */ +!function(e){var n="object"==typeof window&&window||"object"==typeof self&&self;"undefined"!=typeof exports?e(exports):n&&(n.hljs=e({}),"function"==typeof define&&define.amd&&define([],function(){return n.hljs}))}(function(e){function n(e){return e.replace(/&/gm,"&").replace(//gm,">")}function t(e){return e.nodeName.toLowerCase()}function r(e,n){var t=e&&e.exec(n);return t&&0==t.index}function a(e){return/^(no-?highlight|plain|text)$/i.test(e)}function i(e){var n,t,r,i=e.className+" ";if(i+=e.parentNode?e.parentNode.className:"",t=/\blang(?:uage)?-([\w-]+)\b/i.exec(i))return w(t[1])?t[1]:"no-highlight";for(i=i.split(/\s+/),n=0,r=i.length;r>n;n++)if(w(i[n])||a(i[n]))return i[n]}function o(e,n){var t,r={};for(t in e)r[t]=e[t];if(n)for(t in n)r[t]=n[t];return r}function u(e){var n=[];return function r(e,a){for(var i=e.firstChild;i;i=i.nextSibling)3==i.nodeType?a+=i.nodeValue.length:1==i.nodeType&&(n.push({event:"start",offset:a,node:i}),a=r(i,a),t(i).match(/br|hr|img|input/)||n.push({event:"stop",offset:a,node:i}));return a}(e,0),n}function c(e,r,a){function i(){return e.length&&r.length?e[0].offset!=r[0].offset?e[0].offset"}function u(e){f+=""}function c(e){("start"==e.event?o:u)(e.node)}for(var s=0,f="",l=[];e.length||r.length;){var g=i();if(f+=n(a.substr(s,g[0].offset-s)),s=g[0].offset,g==e){l.reverse().forEach(u);do c(g.splice(0,1)[0]),g=i();while(g==e&&g.length&&g[0].offset==s);l.reverse().forEach(o)}else"start"==g[0].event?l.push(g[0].node):l.pop(),c(g.splice(0,1)[0])}return f+n(a.substr(s))}function s(e){function n(e){return e&&e.source||e}function t(t,r){return new RegExp(n(t),"m"+(e.cI?"i":"")+(r?"g":""))}function r(a,i){if(!a.compiled){if(a.compiled=!0,a.k=a.k||a.bK,a.k){var u={},c=function(n,t){e.cI&&(t=t.toLowerCase()),t.split(" ").forEach(function(e){var t=e.split("|");u[t[0]]=[n,t[1]?Number(t[1]):1]})};"string"==typeof a.k?c("keyword",a.k):Object.keys(a.k).forEach(function(e){c(e,a.k[e])}),a.k=u}a.lR=t(a.l||/\b\w+\b/,!0),i&&(a.bK&&(a.b="\\b("+a.bK.split(" ").join("|")+")\\b"),a.b||(a.b=/\B|\b/),a.bR=t(a.b),a.e||a.eW||(a.e=/\B|\b/),a.e&&(a.eR=t(a.e)),a.tE=n(a.e)||"",a.eW&&i.tE&&(a.tE+=(a.e?"|":"")+i.tE)),a.i&&(a.iR=t(a.i)),void 0===a.r&&(a.r=1),a.c||(a.c=[]);var s=[];a.c.forEach(function(e){e.v?e.v.forEach(function(n){s.push(o(e,n))}):s.push("self"==e?a:e)}),a.c=s,a.c.forEach(function(e){r(e,a)}),a.starts&&r(a.starts,i);var f=a.c.map(function(e){return e.bK?"\\.?("+e.b+")\\.?":e.b}).concat([a.tE,a.i]).map(n).filter(Boolean);a.t=f.length?t(f.join("|"),!0):{exec:function(){return null}}}}r(e)}function f(e,t,a,i){function o(e,n){for(var t=0;t";return i+=e+'">',i+n+o}function h(){if(!k.k)return n(M);var e="",t=0;k.lR.lastIndex=0;for(var r=k.lR.exec(M);r;){e+=n(M.substr(t,r.index-t));var a=g(k,r);a?(B+=a[1],e+=p(a[0],n(r[0]))):e+=n(r[0]),t=k.lR.lastIndex,r=k.lR.exec(M)}return e+n(M.substr(t))}function d(){var e="string"==typeof k.sL;if(e&&!R[k.sL])return n(M);var t=e?f(k.sL,M,!0,y[k.sL]):l(M,k.sL.length?k.sL:void 0);return k.r>0&&(B+=t.r),e&&(y[k.sL]=t.top),p(t.language,t.value,!1,!0)}function b(){L+=void 0!==k.sL?d():h(),M=""}function v(e,n){L+=e.cN?p(e.cN,"",!0):"",k=Object.create(e,{parent:{value:k}})}function m(e,n){if(M+=e,void 0===n)return b(),0;var t=o(n,k);if(t)return t.skip?M+=n:(t.eB&&(M+=n),b(),t.rB||t.eB||(M=n)),v(t,n),t.rB?0:n.length;var r=u(k,n);if(r){var a=k;a.skip?M+=n:(a.rE||a.eE||(M+=n),b(),a.eE&&(M=n));do k.cN&&(L+=""),k.skip||(B+=k.r),k=k.parent;while(k!=r.parent);return r.starts&&v(r.starts,""),a.rE?0:n.length}if(c(n,k))throw new Error('Illegal lexeme "'+n+'" for mode "'+(k.cN||"")+'"');return M+=n,n.length||1}var N=w(e);if(!N)throw new Error('Unknown language: "'+e+'"');s(N);var x,k=i||N,y={},L="";for(x=k;x!=N;x=x.parent)x.cN&&(L=p(x.cN,"",!0)+L);var M="",B=0;try{for(var C,j,I=0;;){if(k.t.lastIndex=I,C=k.t.exec(t),!C)break;j=m(t.substr(I,C.index-I),C[0]),I=C.index+j}for(m(t.substr(I)),x=k;x.parent;x=x.parent)x.cN&&(L+="");return{r:B,value:L,language:e,top:k}}catch(O){if(-1!=O.message.indexOf("Illegal"))return{r:0,value:n(t)};throw O}}function l(e,t){t=t||E.languages||Object.keys(R);var r={r:0,value:n(e)},a=r;return t.forEach(function(n){if(w(n)){var t=f(n,e,!1);t.language=n,t.r>a.r&&(a=t),t.r>r.r&&(a=r,r=t)}}),a.language&&(r.second_best=a),r}function g(e){return E.tabReplace&&(e=e.replace(/^((<[^>]+>|\t)+)/gm,function(e,n){return n.replace(/\t/g,E.tabReplace)})),E.useBR&&(e=e.replace(/\n/g,"
      ")),e}function p(e,n,t){var r=n?x[n]:t,a=[e.trim()];return e.match(/\bhljs\b/)||a.push("hljs"),-1===e.indexOf(r)&&a.push(r),a.join(" ").trim()}function h(e){var n=i(e);if(!a(n)){var t;E.useBR?(t=document.createElementNS("http://www.w3.org/1999/xhtml","div"),t.innerHTML=e.innerHTML.replace(/\n/g,"").replace(//g,"\n")):t=e;var r=t.textContent,o=n?f(n,r,!0):l(r),s=u(t);if(s.length){var h=document.createElementNS("http://www.w3.org/1999/xhtml","div");h.innerHTML=o.value,o.value=c(s,u(h),r)}o.value=g(o.value),e.innerHTML=o.value,e.className=p(e.className,n,o.language),e.result={language:o.language,re:o.r},o.second_best&&(e.second_best={language:o.second_best.language,re:o.second_best.r})}}function d(e){E=o(E,e)}function b(){if(!b.called){b.called=!0;var e=document.querySelectorAll("pre code");Array.prototype.forEach.call(e,h)}}function v(){addEventListener("DOMContentLoaded",b,!1),addEventListener("load",b,!1)}function m(n,t){var r=R[n]=t(e);r.aliases&&r.aliases.forEach(function(e){x[e]=n})}function N(){return Object.keys(R)}function w(e){return e=(e||"").toLowerCase(),R[e]||R[x[e]]}var E={classPrefix:"hljs-",tabReplace:null,useBR:!1,languages:void 0},R={},x={};return e.highlight=f,e.highlightAuto=l,e.fixMarkup=g,e.highlightBlock=h,e.configure=d,e.initHighlighting=b,e.initHighlightingOnLoad=v,e.registerLanguage=m,e.listLanguages=N,e.getLanguage=w,e.inherit=o,e.IR="[a-zA-Z]\\w*",e.UIR="[a-zA-Z_]\\w*",e.NR="\\b\\d+(\\.\\d+)?",e.CNR="(-?)(\\b0[xX][a-fA-F0-9]+|(\\b\\d+(\\.\\d*)?|\\.\\d+)([eE][-+]?\\d+)?)",e.BNR="\\b(0b[01]+)",e.RSR="!|!=|!==|%|%=|&|&&|&=|\\*|\\*=|\\+|\\+=|,|-|-=|/=|/|:|;|<<|<<=|<=|<|===|==|=|>>>=|>>=|>=|>>>|>>|>|\\?|\\[|\\{|\\(|\\^|\\^=|\\||\\|=|\\|\\||~",e.BE={b:"\\\\[\\s\\S]",r:0},e.ASM={cN:"string",b:"'",e:"'",i:"\\n",c:[e.BE]},e.QSM={cN:"string",b:'"',e:'"',i:"\\n",c:[e.BE]},e.PWM={b:/\b(a|an|the|are|I|I'm|isn't|don't|doesn't|won't|but|just|should|pretty|simply|enough|gonna|going|wtf|so|such|will|you|your|like)\b/},e.C=function(n,t,r){var a=e.inherit({cN:"comment",b:n,e:t,c:[]},r||{});return a.c.push(e.PWM),a.c.push({cN:"doctag",b:"(?:TODO|FIXME|NOTE|BUG|XXX):",r:0}),a},e.CLCM=e.C("//","$"),e.CBCM=e.C("/\\*","\\*/"),e.HCM=e.C("#","$"),e.NM={cN:"number",b:e.NR,r:0},e.CNM={cN:"number",b:e.CNR,r:0},e.BNM={cN:"number",b:e.BNR,r:0},e.CSSNM={cN:"number",b:e.NR+"(%|em|ex|ch|rem|vw|vh|vmin|vmax|cm|mm|in|pt|pc|px|deg|grad|rad|turn|s|ms|Hz|kHz|dpi|dpcm|dppx)?",r:0},e.RM={cN:"regexp",b:/\//,e:/\/[gimuy]*/,i:/\n/,c:[e.BE,{b:/\[/,e:/\]/,r:0,c:[e.BE]}]},e.TM={cN:"title",b:e.IR,r:0},e.UTM={cN:"title",b:e.UIR,r:0},e.METHOD_GUARD={b:"\\.\\s*"+e.UIR,r:0},e});hljs.registerLanguage("python",function(e){var r={cN:"meta",b:/^(>>>|\.\.\.) /},b={cN:"string",c:[e.BE],v:[{b:/(u|b)?r?'''/,e:/'''/,c:[r],r:10},{b:/(u|b)?r?"""/,e:/"""/,c:[r],r:10},{b:/(u|r|ur)'/,e:/'/,r:10},{b:/(u|r|ur)"/,e:/"/,r:10},{b:/(b|br)'/,e:/'/},{b:/(b|br)"/,e:/"/},e.ASM,e.QSM]},a={cN:"number",r:0,v:[{b:e.BNR+"[lLjJ]?"},{b:"\\b(0o[0-7]+)[lLjJ]?"},{b:e.CNR+"[lLjJ]?"}]},l={cN:"params",b:/\(/,e:/\)/,c:["self",r,a,b]};return{aliases:["py","gyp"],k:{keyword:"and elif is global as in if from raise for except finally print import pass return exec else break not with class assert yield try while continue del or def lambda async await nonlocal|10 None True False",built_in:"Ellipsis NotImplemented"},i:/(<\/|->|\?)/,c:[r,a,b,e.HCM,{v:[{cN:"function",bK:"def",r:10},{cN:"class",bK:"class"}],e:/:/,i:/[${=;\n,]/,c:[e.UTM,l,{b:/->/,eW:!0,k:"None"}]},{cN:"meta",b:/^[\t ]*@/,e:/$/},{b:/\b(print|exec)\(/}]}}); \ No newline at end of file diff --git a/micropsi_server/static/js/monitor.js b/micropsi_server/static/js/monitor.js index 9176be58..4700a2d9 100644 --- a/micropsi_server/static/js/monitor.js +++ b/micropsi_server/static/js/monitor.js @@ -15,7 +15,11 @@ $(function(){ var currentMonitors = []; var currentSimulationStep = 0; - var currentNodenet = $.cookie('selected_nodenet'); + var currentNodenet = ''; + var cookieval = $.cookie('selected_nodenet'); + if (cookieval && cookieval.indexOf('/')){ + currentNodenet = cookieval.split('/')[0]; + } var capturedLoggers = { 'system': false, @@ -42,6 +46,10 @@ $(function(){ init(); + if(!$('#nodenet_editor').length && currentNodenet){ + refreshMonitors(); + } + $('.layoutbtn').on('click', function(event){ event.preventDefault(); var target = $(event.target); @@ -79,6 +87,11 @@ $(function(){ $(document).on('nodenet_changed', function(data, newNodenet){ currentNodenet = newNodenet; init(); + refreshMonitors(); + }); + $(document).on('nodenet_loaded', function(data, newNodenet){ + currentNodenet = newNodenet; + refreshMonitors(); }); log_container.on('click', '.logentry', function(event){ @@ -101,26 +114,6 @@ $(function(){ function init() { bindEvents(); - if (currentNodenet = $.cookie('selected_nodenet')) { - $('#loading').show(); - api.call('load_nodenet', { - nodenet_uid: currentNodenet, - include_links: false - }, function(data) { - $('#loading').hide(); - refreshMonitors(); - }, - function(data) { - $('#loading').hide(); - if(data.status == 500){ - api.defaultErrorCallback(data); - } else { - currentNodenet = null; - $.cookie('selected_nodenet', '', { expires: -1, path: '/' }); - dialogs.notification(data.data, "Info"); - } - }); - } } function getPollParams(){ diff --git a/micropsi_server/static/js/netapi_console.js b/micropsi_server/static/js/netapi_console.js new file mode 100644 index 00000000..bb161371 --- /dev/null +++ b/micropsi_server/static/js/netapi_console.js @@ -0,0 +1,458 @@ + + +// thanks to Chris Coyier at css-tricks.com +jQuery.fn.putCursorAt = function(index) { + return this.each(function() { + $(this).focus() + // If this function exists... + if (this.setSelectionRange) { + // ... then use it (Doesn't work in IE) + // Double the length because Opera is inconsistent about whether a carriage return is one character or two. Sigh. + var len = index; + if(len < 0){ + len = $(this).val().length * 2; + } + this.setSelectionRange(len, len); + } else if(this.createTextRange){ + var range = this.createTextRange(); + range.move('character', index); + range.select(); + } + else { + // ... otherwise replace the contents with itself + // (Doesn't work in Google Chrome) + $(this).val($(this).val()); + } + // Scroll to the bottom, in case we're in a tall textarea + // (Necessary for Firefox and Google Chrome) + this.scrollTop = 999999; + }); +}; + +/*! https://mths.be/startswith v0.2.0 by @mathias */ +if (!String.prototype.startsWith) { + (function() { + 'use strict'; // needed to support `apply`/`call` with `undefined`/`null` + var defineProperty = (function() { + // IE 8 only supports `Object.defineProperty` on DOM elements + try { + var object = {}; + var $defineProperty = Object.defineProperty; + var result = $defineProperty(object, object, object) && $defineProperty; + } catch(error) {} + return result; + }()); + var toString = {}.toString; + var startsWith = function(search) { + if (this == null) { + throw TypeError(); + } + var string = String(this); + if (search && toString.call(search) == '[object RegExp]') { + throw TypeError(); + } + var stringLength = string.length; + var searchString = String(search); + var searchLength = searchString.length; + var position = arguments.length > 1 ? arguments[1] : undefined; + // `ToInteger` + var pos = position ? Number(position) : 0; + if (pos != pos) { // better `isNaN` + pos = 0; + } + var start = Math.min(Math.max(pos, 0), stringLength); + // Avoid the `indexOf` call if no match is possible + if (searchLength + start > stringLength) { + return false; + } + var index = -1; + while (++index < searchLength) { + if (string.charCodeAt(start + index) != searchString.charCodeAt(index)) { + return false; + } + } + return true; + }; + if (defineProperty) { + defineProperty(String.prototype, 'startsWith', { + 'value': startsWith, + 'configurable': true, + 'writable': true + }); + } else { + String.prototype.startsWith = startsWith; + } + }()); +} + +function registerResizeHandler(){ + // resize handler for nodenet viewer: + var isDragging = false; + var container = $('#netapi_console .code_container'); + if($.cookie('netapi_console_height')){ + container.height($.cookie('netapi_console_height')); + } + var startHeight, startPos, newHeight; + $("a#consoleSizeHandle").mousedown(function(event) { + startHeight = container.height(); + startPos = event.pageY; + $(window).mousemove(function(event) { + isDragging = true; + newHeight = startHeight + (event.pageY - startPos); + container.height(newHeight); + }); + }); + $(window).mouseup(function(event) { + if(isDragging){ + $.cookie('netapi_console_height', container.height(), {expires:7, path:'/'}); + } + isDragging = false; + $(window).unbind("mousemove"); + }); +} + +$(function(){ + + var input = $('#console_input'); + var currentNodenet = ''; + var cookieval = $.cookie('selected_nodenet'); + if (cookieval && cookieval.indexOf('/')){ + currentNodenet = cookieval.split('/')[0]; + } + + var history = $('#console_history'); + var container = $('#netapi_console .code_container'); + var autocomplete_container = $('#console_autocomplete'); + + hljs.highlightBlock(history[0]); + + var command_history = []; + var history_pointer = -1; + + registerResizeHandler(); + + var nametypes = {}; + var autocomplete_options = {}; + var autocomplete_open = false; + var autocomplete_pointer = -1; + + if(currentNodenet){ + enable(); + } + + bindEvents(); + + $(document).on('nodenet_changed', function(event, new_uid){ + currentNodenet = new_uid; + if(new_uid) enable(); + else disable(); + }); + + function enable(){ + input.removeAttr("disabled"); + if($.isEmptyObject(autocomplete_options)){ + getAutocompleteOptions(); + } + } + + function getAutocompleteOptions(name){ + params = {nodenet_uid: currentNodenet}; + if(name){ + params['name'] = name; + } + api.call('get_netapi_signatures', params, function(data){ + if(name){ + var type = data.types[name] + nametypes[name] = type + autocomplete_options[type] = data.autocomplete_options[type]; + } else { + nametypes = data.types; + autocomplete_options = data.autocomplete_options; + } + }); + + } + + function disable(){ + input.attr('disabled', 'disabled') + } + + function isDisabled(){ + return input.attr('disabled'); + } + + function bindEvents(){ + autocomplete_container.on('click', function(event){ + if(isDisabled()) return; + autocomplete_select(event); + }); + input.keydown(function(event){ + if(isDisabled()) return; + var code = input.val(); + switch(event.keyCode){ + case 38: // arrow up + if(autocomplete_open){ + event.preventDefault(); + autocomplete_prev(); + } else if(code == '' && history_pointer == -1){ + event.preventDefault(); + history_pointer = command_history.length - 1; + input.val(command_history[history_pointer]) + input.putCursorAt(-1) + } else if(history_pointer > 0 && code == command_history[history_pointer]) { + event.preventDefault(); + history_pointer -= 1; + input.val(command_history[history_pointer]) + input.putCursorAt(-1) + } + break; + + case 40: // arrow down + if(autocomplete_open){ + event.preventDefault(); + autocomplete_next(); + } else if(history_pointer < command_history.length - 1 && code == command_history[history_pointer]) { + event.preventDefault(); + history_pointer += 1; + input.val(command_history[history_pointer]) + input.putCursorAt(-1) + } + break; + } + }); + input.keyup(function(event){ + if(isDisabled()) return; + var code = input.val(); + switch(event.keyCode){ + case 13: // Enter + if(autocomplete_open){ + autocomplete_select(event); + } else { + if(code.trim().length){ + submitInput(code); + } + } + break; + case 32: // spacebar + if(event.ctrlKey){ + autocomplete(true); + } + break; + case 27: // escape + stop_autocomplete(); + break; + case 38: // arrow up + case 40: // arrow down + // do nothing. + break; + default: + history_pointer = -1 + autocomplete(); + } + }); + input.blur(function(){ + stop_autocomplete(); + }) + } + + function autocomplete_next(){ + if(autocomplete_pointer < autocomplete_container.children().length - 1){ + autocomplete_pointer += 1; + $('a.selected', autocomplete_container).removeClass('selected') + var child = $(autocomplete_container.children()[autocomplete_pointer]); + $(child.children()).addClass('selected'); + var pos = child.offset().top; + + autocomplete_container.scrollTop( + autocomplete_container.scrollTop() + child.position().top + - autocomplete_container.height()/2 + child.height()/2); + } + } + + function autocomplete_prev(){ + if(autocomplete_pointer > 0){ + autocomplete_pointer -= 1; + $('a.selected', autocomplete_container).removeClass('selected') + var child = $(autocomplete_container.children()[autocomplete_pointer]); + $(child.children()).addClass('selected'); + autocomplete_container.scrollTop( + autocomplete_container.scrollTop() + child.position().top + - autocomplete_container.height()/2 + child.height()/2); + + } + } + + function autocomplete(do_autoselect){ + autocomplete_open = true; + var code = input.val(); + if(code.indexOf('.') > -1){ + var parts = input.val().split('.'); + var last = parts[parts.length - 1]; + var obj = parts[parts.length - 2]; + obj = obj.match(/([a-zA-Z0-9_]+)/g); + if(obj) + obj = obj[obj.length - 1]; + if(!obj || !(obj in nametypes)){ + stop_autocomplete(); + } + autocomplete_properties(obj, last); + } else if(code.match(/ ?([a-z0-9]+)?$/m)){ + var parts = input.val().split(' '); + var last = parts[parts.length - 1]; + autocomplete_names(last); + } + if (do_autoselect && autocomplete_container.children().length == 1){ + autocomplete_select(); + } + } + + function autocomplete_names(start){ + html = []; + for(var key in nametypes){ + if(start.length == 0 || key.startsWith(start)){ + html.push('
    • '+key+'
    • ') + } + } + if(html.length == 0){ + return stop_autocomplete(); + } + autocomplete_container.html(html.join('')); + autocomplete_container.css({ + 'position': 'absolute', + 'top': input.offset().top + input.height(), + 'left': input.offset().left + (input.val().length * 4), + 'display': 'block' + }); + } + + function autocomplete_properties(obj, last){ + html = []; + var type = nametypes[obj]; + var sorted = Object.keys(autocomplete_options[type]).sort(); + for(var i in sorted){ + var key = sorted[i]; + if(key && (last == "" || key.startsWith(last))){ + if(autocomplete_options[type][key] == null){ + html.push('
    • '+key+'
    • '); + } else { + html.push('
    • '+key+'()
    • '); + } + } + } + if (html.length == 0){ + return stop_autocomplete(); + } + autocomplete_container.html(html.join('')); + autocomplete_container.css({ + 'position': 'absolute', + 'top': input.offset().top + input.height(), + 'left': input.offset().left + (input.val().length * 4), + 'display': 'block' + }); + } + + function autocomplete_select(event){ + if(event && $(event.target).attr('id') == 'console_input'){ + var el = $('a.selected', autocomplete_container) + } else { + if(event){ + var el = $(event.target); + } else if (autocomplete_container.children().length == 1) { + var el = $('a', autocomplete_container); + } + } + var val = input.val() + if(el.attr('data-complete') == 'name'){ + if(val[val.length-1] == " "){ + console.log(val); + input.val(val + el.attr('data')); + } else { + var parts = val.split(" "); + parts.pop(); + var pre = ''; + if(parts.length){ + pre = parts.join(" ") +" "; + } + input.val(pre + el.attr('data')); + } + return stop_autocomplete(); + } + var selected = el.attr('data'); + var parts = val.split('.'); + var last = null; + var obj = null; + if(val.indexOf('.') > -1){ + var last = parts[parts.length - 1]; + var obj = parts[parts.length - 2]; + obj = obj.match(/([a-zA-Z0-9_]+)/g); + obj = obj[obj.length - 1]; + } + parts.pop() + val = parts.join('.') + '.' + selected; + var params = []; + var type = nametypes[obj]; + var data = autocomplete_options[type][selected]; + if(data != null){ + for(var i=0; i < data.length; i++){ + if(!('default' in data[i])){ + params.push(data[i].name); + } else { + if(data[i].default == null){ + params.push(data[i].name+'=None') + } else if(isNaN(data[i].default)){ + params.push(data[i].name+'='+'"'+data[i].default+'"') + } else { + params.push(data[i].name+'='+data[i].default) + } + } + } + var length = val.length; + val += '(' + params.join(', ') + ')'; + input.val(val); + input.putCursorAt(length + 1); + + } else { + input.val(val); + input.putCursorAt(-1); + } + stop_autocomplete(); + } + + function stop_autocomplete(){ + autocomplete_open = false; + autocomplete_pointer = -1; + autocomplete_container.html('') + autocomplete_container.hide(); + } + + function submitInput(code){ + api.call('run_netapi_command', {nodenet_uid: currentNodenet, command: code}, function(data){ + getAutocompleteOptions(); + data = data.replace(/\n+/g, '\n') + var hist = history.text(); + hist += "\n" + code; + if(data){ + hist += "\n# " + data.replace(/\n/g, "\n# "); + } + history.text(hist); + input.val(''); + hljs.highlightBlock(history[0]); + container.scrollTop(999999999) + command_history.push(code); + $(document).trigger('runner_stepped'); + }, function(error){ + var hist = history.text(); + if(error.data){ + hist += "\n" + code; + hist += '\n# ERROR: '+error.data.replace(/\n/g, "\n# "); + } + history.text(hist); + input.val(''); + hljs.highlightBlock(history[0]); + container.scrollTop(999999999) + command_history.push(code); + }); + } + +}); + diff --git a/micropsi_server/static/js/nodenet.js b/micropsi_server/static/js/nodenet.js index 89e42d3f..53cd1ee1 100644 --- a/micropsi_server/static/js/nodenet.js +++ b/micropsi_server/static/js/nodenet.js @@ -91,12 +91,25 @@ prerenderLayer.visible = false; viewProperties.zoomFactor = parseFloat($.cookie('zoom_factor')) || viewProperties.zoomFactor; -currentNodenet = $.cookie('selected_nodenet') || ''; -currentNodeSpace = $.cookie('selected_nodespace') || ''; -if(!$.cookie('renderlinks')){ - $.cookie('renderlinks', 'always'); +var nodenetcookie = $.cookie('selected_nodenet') || ''; +if (nodenetcookie && nodenetcookie.indexOf('/') > 0){ + nodenetcookie = nodenetcookie.split("/"); + currentNodenet = nodenetcookie[0]; + currentNodeSpace = nodenetcookie[1] || null; +} else { + currentNodenet = ''; + currentNodeSpace = ''; } +nodespaceProperties = {}; + +// compatibility +nodespace_property_defaults = { + 'renderlinks': ($.cookie('renderlinks') || 'always'), + 'activation_display': 'redgreen' +} + + currentWorldadapter = null; var currentSheaf = "default"; @@ -110,6 +123,7 @@ selectionBox.name = "selectionBox"; nodetypes = {}; native_modules = {}; +native_module_categories = {}; available_gatetypes = []; nodespaces = {}; sorted_nodetypes = []; @@ -150,17 +164,14 @@ registerResizeHandler(); globalDataSources = []; globalDataTargets = []; -$(document).on('load_nodenet', function(event, uid){ - ns = 'Root'; - if(uid == currentNodenet){ - ns = currentNodeSpace; - } - setCurrentNodenet(uid, ns); -}); +available_operations = {}; $(document).on('nodenet_changed', function(event, new_nodenet){ setCurrentNodenet(new_nodenet, null, true); }); +$(document).on('new_world_created', function(data){ + get_available_worlds(); +}); function toggleButtons(on){ if(on) @@ -176,25 +187,25 @@ function get_available_worlds(){ html += ''; } $('#nodenet_world').html(html); + if(currentNodenet && nodenet_data){ + $('#nodenet_world').val(nodenet_data.world); + } }); } function get_available_worldadapters(world_uid, callback){ worldadapters = {}; if(world_uid){ - api.call("get_worldadapters", {world_uid: world_uid}, + api.call("get_worldadapters", {world_uid: world_uid, nodenet_uid: currentNodenet}, success=function(data){ worldadapters = data; - currentWorld = world_uid; var str = ''; var name; var keys = Object.keys(worldadapters); keys.sort(); for (var idx in keys){ name = keys[idx]; - worldadapters[name].datasources = worldadapters[name].datasources.sort(); - worldadapters[name].datatargets = worldadapters[name].datatargets.sort(); - str += ''; + str += ''; } $('#nodenet_worldadapter').html(str); if(callback){ @@ -224,7 +235,6 @@ function setNodenetValues(data){ $('#nodenet_uid').val(currentNodenet); $('#nodenet_name').val(data.name); $('#nodenet_snap').attr('checked', data.snap_to_grid); - $('#nodenet_renderlinks').val(nodenet_data.renderlinks); if (!jQuery.isEmptyObject(worldadapters)) { var worldadapter_select = $('#nodenet_worldadapter'); worldadapter_select.val(data.worldadapter); @@ -234,47 +244,69 @@ function setNodenetValues(data){ } } +function buildCategoryTree(item, path, idx){ + if (idx < path.length){ + name = path[idx]; + if (!item[name]){ + item[name] = {}; + } + buildCategoryTree(item[name], path, idx + 1); + } +} + + +api.call("get_available_operations", {}, function(data){ + available_operations = data +}); + + function setCurrentNodenet(uid, nodespace, changed){ if(!nodespace){ - nodespace = "Root"; + nodespace = null; } $('#loading').show(); - api.call('load_nodenet', - {nodenet_uid: uid, - nodespace: nodespace, - include_links: $.cookie('renderlinks') == 'always', - }, + api.call('get_nodenet_metadata', {nodenet_uid: uid}, function(data){ $('#loading').hide(); nodenetscope.activate(); toggleButtons(true); var nodenetChanged = changed || (uid != currentNodenet); - var nodespaceChanged = changed || (nodespace != currentNodeSpace); - + currentNodenet = uid; + currentNodeSpace = data.rootnodespace; + currentWorldadapter = data.worldadapter; + nodespaceProperties = data.nodespace_ui_properties; + for(var key in data.nodespaces){ + if(!(key in nodespaceProperties)){ + nodespaceProperties[key] = {}; + } + if(!nodespaceProperties[key].renderlinks){ + nodespaceProperties[key].renderlinks = nodespace_property_defaults.renderlinks; + } + if(!nodespaceProperties[key].activation_display){ + nodespaceProperties[key].activation_display = nodespace_property_defaults.activation_display; + } + } if(nodenetChanged){ - $(document).trigger('nodenetChanged', uid); clipboard = {}; selection = {}; nodespaces = {}; + nodes = {}; + links = {}; + nodeLayer.removeChildren(); + linkLayer.removeChildren(); } + $(document).trigger('nodenet_loaded', uid); nodenet_data = data; - nodenet_data['renderlinks'] = $.cookie('renderlinks') || 'always'; nodenet_data['snap_to_grid'] = $.cookie('snap_to_grid') || viewProperties.snap_to_grid; showDefaultForm(); - currentNodeSpace = data['nodespace']; - currentNodenet = uid; - - nodes = {}; - links = {}; - nodeLayer.removeChildren(); - linkLayer.removeChildren(); - $.cookie('selected_nodenet', currentNodenet, { expires: 7, path: '/' }); + $.cookie('selected_nodenet', currentNodenet+"/", { expires: 7, path: '/' }); if(nodenetChanged || jQuery.isEmptyObject(nodetypes)){ nodetypes = data.nodetypes; + native_modules = data.native_modules; sorted_nodetypes = Object.keys(nodetypes); sorted_nodetypes.sort(function(a, b){ if(a < b) return -1; @@ -288,9 +320,17 @@ function setCurrentNodenet(uid, nodespace, changed){ if(a > b) return 1; return 0; }); + + categories = []; for(var key in native_modules){ nodetypes[key] = native_modules[key]; + categories.push(native_modules[key].category.split('/')); + } + native_module_categories = {} + for(var i =0; i < categories.length; i++){ + buildCategoryTree(native_module_categories, categories[i], 0); } + available_gatetypes = []; for(var key in nodetypes){ $.merge(available_gatetypes, nodetypes[key].gatetypes || []); @@ -301,22 +341,16 @@ function setCurrentNodenet(uid, nodespace, changed){ showDefaultForm(); }); get_available_gatefunctions(); - setNodespaceData(data, true); getNodespaceList(); - } else { - setNodespaceData(data, (nodespaceChanged)); + $(document).trigger('refreshNodenetList'); } - $(document).trigger('refreshNodenetList'); nodenet_loaded = true; + refreshNodespace(nodespace) }, function(data) { - if(data.status == 500 || data.status === 0){ - api.defaultErrorCallback(data); - } else { - currentNodenet = null; - $.cookie('selected_nodenet', '', { expires: -1, path: '/' }); - dialogs.notification(data.data, "Info"); - } + api.defaultErrorCallback(data); + $('#loading').hide(); + $.cookie('selected_nodenet', '', { expires: -1, path: '/' }); }); } @@ -328,6 +362,9 @@ function getNodespaceList(){ for(var i=0; i < sorted.length; i++){ nodespaces[sorted[i].uid] = sorted[i]; html += '
    • '+sorted[i].name+'
    • '; + for(var key in sorted[i].properties){ + nodespaceProperties[sorted[i].uid][key] = sorted[i].properties[key]; + } } $('#nodespace_control ul').html(html); $("#current_nodespace_name").text(nodespaces[currentNodeSpace].name); @@ -340,7 +377,7 @@ function setNodespaceData(data, changed){ nodenetscope.activate(); if (data && !jQuery.isEmptyObject(data)){ currentSimulationStep = data.current_step || 0; - currentWorldadapter = data.worldadapter; + nodenetRunning = data.is_active; if(!('selectionBox' in nodeLayer)){ @@ -395,7 +432,7 @@ function setNodespaceData(data, changed){ } } - if(nodenet_data.renderlinks == 'selection'){ + if(nodespaceProperties[currentNodeSpace].renderlinks == 'selection'){ loadLinksForSelection(function(data){ for(var uid in links) { if(!(uid in data)) { @@ -490,18 +527,20 @@ function setNodespaceDiffData(data, changed){ } // activations: for(var uid in data.activations){ - activations = data.activations[uid]; - var gen = 0 - for(var i=0; i < nodes[uid].gateIndexes.length; i++){ - var type = nodes[uid].gateIndexes[i]; - nodes[uid].gates[type].sheaves['default'].activation = activations[i]; - if(type == 'gen'){ - gen = activations[i]; + if (uid in nodes){ + activations = data.activations[uid]; + var gen = 0 + for(var i=0; i < nodes[uid].gateIndexes.length; i++){ + var type = nodes[uid].gateIndexes[i]; + nodes[uid].gates[type].sheaves['default'].activation = activations[i]; + if(type == 'gen'){ + gen = activations[i]; + } } + nodes[uid].sheaves['default'].activation = gen; + setActivation(nodes[uid]); + redrawNodeLinks(nodes[uid]); } - nodes[uid].sheaves['default'].activation = gen; - setActivation(nodes[uid]); - redrawNodeLinks(nodes[uid]); } updateModulators(data.modulators); @@ -550,14 +589,14 @@ function addLinks(link_data){ function get_nodenet_params(){ return { - 'nodespace': currentNodeSpace, + 'nodespaces': [currentNodeSpace], 'step': currentSimulationStep - 1, - 'include_links': $.cookie('renderlinks') == 'always', + 'include_links': nodespaceProperties[currentNodeSpace].renderlinks == 'always', } } function get_nodenet_diff_params(){ return { - 'nodespace': currentNodeSpace, + 'nodespaces': [currentNodeSpace], 'step': window.currentSimulationStep, } } @@ -581,19 +620,20 @@ function refreshNodespace(nodespace, step, callback){ nodespace = nodespace || currentNodeSpace; params = { nodenet_uid: currentNodenet, - nodespace: nodespace, - step: currentSimulationStep + nodespaces: [nodespace], + include_links: true }; - if(step){ - params.step = step; + if(nodespaceProperties[nodespace] && nodespaceProperties[nodespace].renderlinks != 'always'){ + params.include_links = false; } - params.include_links = nodenet_data['renderlinks'] == 'always'; - api.call('get_nodespace', params , success=function(data){ + api.call('get_nodes', params , success=function(data){ var changed = nodespace != currentNodeSpace; if(changed){ currentNodeSpace = nodespace; - $.cookie('selected_nodespace', currentNodeSpace, { expires: 7, path: '/' }); - $("#current_nodespace_name").text(nodespaces[nodespace].name); + $.cookie('selected_nodenet', currentNodenet+"/"+currentNodeSpace, { expires: 7, path: '/' }); + if(!$.isEmptyObject(nodespaces)){ + $("#current_nodespace_name").text(nodespaces[nodespace].name); + } nodeLayer.removeChildren(); linkLayer.removeChildren(); } @@ -616,21 +656,28 @@ function updateModulators(data){ var sorted = []; globalDataSources = []; globalDataTargets = []; - + if($.isEmptyObject(data)){ + return $('.modulators_container').hide(); + } + $('.modulators_container').show(); for(key in data){ sorted.push({'name': key, 'value': data[key]}); } sorted.sort(sortByName); + var emo_html = ''; + var base_html = '' // display reversed to get emo_ before base_ - for(var i = sorted.length-1; i >=0; i--){ - html += ''+sorted[i].name+''+sorted[i].value.toFixed(2)+'' + for(var i = 0; i < sorted.length; i++){ + var html = ''+sorted[i].name+''+sorted[i].value.toFixed(2)+''; if(sorted[i].name.substr(0, 3) == "emo"){ + emo_html += html globalDataSources.push(sorted[i].name); } else { + base_html += html globalDataTargets.push(sorted[i].name); } } - table.html(html); + table.html(emo_html + base_html); $('button', table).each(function(idx, button){ $(button).on('click', function(evt){ evt.preventDefault(); @@ -1164,13 +1211,12 @@ function createPlaceholder(node, direction, point){ // draw link function renderLink(link, force) { - if(nodenet_data.renderlinks == 'no' && !force){ + if(nodespaceProperties[currentNodeSpace].renderlinks == 'no'){ return; } - if(nodenet_data.renderlinks == 'selection' && !force){ - var is_hovered = hoverNode && (link.sourceNodeUid == hoverNode.uid || link.targetNodeUid == hoverNode.uid); + if(nodespaceProperties[currentNodeSpace].renderlinks == 'selection'){ var is_selected = selection && (link.sourceNodeUid in selection || link.targetNodeUid in selection); - if(!is_hovered && !is_selected){ + if(!is_selected){ return; } } @@ -1213,7 +1259,13 @@ function renderLink(link, force) { linkItem.name = "link"; var linkContainer = new Group(linkItem); linkContainer.name = link.uid; - + if (nodespaceProperties[currentNodeSpace].activation_display == 'alpha'){ + if(sourceNode){ + linkContainer.opacity = Math.max(0.1, sourceNode.sheaves[currentSheaf].activation) + } else { + linkContainer.opacity = 0.1 + } + } linkLayer.addChild(linkContainer); } @@ -1758,8 +1810,23 @@ function setActivation(node) { } if (node.uid in nodeLayer.children) { var nodeItem = nodeLayer.children[node.uid]; - node.fillColor = nodeItem.children["activation"].children["body"].fillColor = - activationColor(node.sheaves[currentSheaf].activation, viewProperties.nodeColor); + if((nodespaceProperties[currentNodeSpace].activation_display != 'alpha') || node.sheaves[currentSheaf].activation > 0.5){ + node.fillColor = nodeItem.children["activation"].children["body"].fillColor = + activationColor(node.sheaves[currentSheaf].activation, viewProperties.nodeColor); + } + if(nodespaceProperties[currentNodeSpace].activation_display == 'alpha'){ + for(var i in nodeItem.children){ + if(nodeItem.children[i].name == 'labelText'){ + nodeItem.children[i].opacity = 0; + if (node.sheaves[currentSheaf].activation > 0.5){ + nodeItem.children[i].opacity = node.sheaves[currentSheaf].activation; + } + } else { + nodeItem.children[i].opacity = Math.max(0.1, node.sheaves[currentSheaf].activation) + } + } + } + if (!isCompact(node) && (node.slotIndexes.length || node.gateIndexes.length)) { var i=0; var type; @@ -1775,7 +1842,7 @@ function setActivation(node) { viewProperties.nodeColor); } } - } else console.warn ("node "+node.uid+" not found in current view"); + } } // mark node as selected, and add it to the selected nodes @@ -1843,7 +1910,7 @@ function deselectLink(linkUid) { delete selection[linkUid]; if(linkUid in linkLayer.children){ var linkShape = linkLayer.children[linkUid].children["link"]; - if(nodenet_data.renderlinks == 'no' || nodenet_data.renderlinks == 'selection'){ + if(nodespaceProperties[currentNodeSpace].renderlinks == 'no' || nodespaceProperties[currentNodeSpace].renderlinks == 'selection'){ linkLayer.children[linkUid].remove(); } linkShape.children["line"].strokeColor = links[linkUid].strokeColor; @@ -2349,7 +2416,7 @@ function onMouseUp(event) { selectionRectangle.width = selectionRectangle.height = 1; selectionBox.setBounds(selectionRectangle); } - if(currentNodenet && nodenet_data && nodenet_data['renderlinks'] == 'selection'){ + if(currentNodenet && nodenet_data && nodespaceProperties[currentNodeSpace].renderlinks == 'selection'){ loadLinksForSelection(); } } @@ -2552,6 +2619,37 @@ function initializeDialogs(){ var clickPosition = null; +function buildRecursiveDropdown(cat, html, current_category, generate_items){ + if(!current_category){ + current_category=''; + } + var catentries = [] + for(var key in cat){ + catentries.push(key); + } + catentries.sort(); + for(var i = 0; i < catentries.length; i++){ + if(catentries[i] == ''){ + continue; + } + var newcategory = current_category || ''; + if(current_category == ''){ + newcategory += catentries[i] + } + else { + newcategory += '/'+catentries[i]; + } + html += '
    • '+catentries[i]+''; + html += '
    • '; + } + + html += generate_items(current_category); + + return html +} + function openContextMenu(menu_id, event) { event.cancelBubble = true; if(!currentNodenet){ @@ -2571,39 +2669,46 @@ function openContextMenu(menu_id, event) { html += '
    • Create ' + sorted_nodetypes[idx] +'
    • '; } if(Object.keys(native_modules).length){ - html += '
    • Create Native Module'; + html += '
    • Create Native Module'; html += '
    • '; } - html += '
    • Autoalign Nodes
    • '; html += '
    • a").on('click', function(event){event.stopPropagation();}) } function openMultipleNodesContextMenu(event){ - var typecheck = null; - var sametype = true; var node = null; var compact = false; + var nodetypes = []; + var count = 0 for(var uid in selection){ + if(!node) node = nodes[uid]; if(isCompact(nodes[uid])) { compact = true; } - if(typecheck == null || typecheck == nodes[uid].type){ - typecheck = nodes[uid].type; - node = nodes[uid]; - } else { - sametype = false; + if(nodetypes.indexOf(nodes[uid].type) == -1){ + nodetypes.push(nodes[uid].type); } + count += 1; } var menu = $('#multi_node_menu .nodenet_menu'); var html = ''; @@ -2613,7 +2718,10 @@ function openMultipleNodesContextMenu(event){ html += '
    • Copy nodes
    • '+ '
    • Paste nodes
    • '+ '
    • Delete nodes
    • '; - if(sametype){ + + html += getOperationsDropdownHTML(nodetypes, count); + + if(nodetypes.length == 1){ html += '
    • ' + getNodeLinkageContextMenuHTML(node); } html += '
    • Generate netapi fragment
    • '; @@ -2626,6 +2734,50 @@ function openMultipleNodesContextMenu(event){ openContextMenu('#multi_node_menu', event); } +function getOperationsDropdownHTML(nodetypes, count){ + operation_categories = {}; + sorted_operations = []; + + applicable_operations = {}; + for(var key in available_operations){ + var conditions = available_operations[key].selection; + if((conditions.nodetypes.length == 0 || $(nodetypes).not(conditions.nodetypes).get().length == 0) && + (count >= conditions.mincount) && + (conditions.maxcount < 0 || count <= conditions.maxcount)){ + applicable_operations[key] = available_operations[key] + } + } + + categories = []; + for(var key in applicable_operations){ + categories.push(applicable_operations[key].category.split('/')); + } + operation_categories = {} + for(var i =0; i < categories.length; i++){ + buildCategoryTree(operation_categories, categories[i], 0); + } + sorted_operations = Object.keys(applicable_operations).sort(); + + var html = ''; + if(sorted_operations.length){ + html += '
    • Operations
    • '; + } else { + html += '
    • Operations
    • '; + } + return html; +} + function getNodeLinkageContextMenuHTML(node){ var html = ''; if (node.gateIndexes.length) { @@ -2696,7 +2848,6 @@ function handleContextMenu(event) { switch (clickType) { case null: // create nodes var type = $el.attr("data-create-node"); - var autoalign = $el.attr("data-auto-align"); var callback = function(data){ dialogs.notification('Node created', 'success'); }; @@ -2727,9 +2878,7 @@ function handleContextMenu(event) { }; break; } - if(autoalign){ - autoalignmentHandler(); - } else if(type) { + if(type) { if(nodenet_data.snap_to_grid){ var xpos = Math.round(clickPosition.x / 10) * 10; var ypos = Math.round(clickPosition.y / 10) * 10; @@ -2741,6 +2890,10 @@ function handleContextMenu(event) { ypos/viewProperties.zoomFactor, "", type, null, callback); } else{ + if($el.attr('data-run-operation')){ + selectOperation($el.attr('data-run-operation')); + break; + } return false; } break; @@ -2786,18 +2939,22 @@ function handleContextMenu(event) { } break; default: - var linktype = $(event.target).attr('data-link-type'); - if (linktype) { - var forwardlinktype = linktype; - if(forwardlinktype.indexOf('/')){ - forwardlinktype = forwardlinktype.split('/')[0]; - } - for(var uid in selection){ - clickIndex = nodes[uid].gateIndexes.indexOf(forwardlinktype); - createLinkHandler(uid, clickIndex, linktype); - } + if($el.attr('data-run-operation')){ + selectOperation($el.attr('data-run-operation')); } else { - openLinkCreationDialog(path.name) + var linktype = $(event.target).attr('data-link-type'); + if (linktype) { + var forwardlinktype = linktype; + if(forwardlinktype.indexOf('/')){ + forwardlinktype = forwardlinktype.split('/')[0]; + } + for(var uid in selection){ + clickIndex = nodes[uid].gateIndexes.indexOf(forwardlinktype); + createLinkHandler(uid, clickIndex, linktype); + } + } else { + openLinkCreationDialog(path.name) + } } } break; @@ -2847,6 +3004,79 @@ function handleContextMenu(event) { view.draw(); } +function selectOperation(name){ + var modal = $('#operations-modal'); + if(available_operations[name].parameters.length){ + $('#recipe_modal .docstring').html(available_operations[name].docstring); + var html = ''; + for(var i in available_operations[name].parameters){ + var param = available_operations[name].parameters[i]; + html += '' + + '
      '+ + ''+ + '
      '+ + ''+ + '
      '+ + '
      '; + } + $('fieldset', modal).html(html); + var run = function(){ + data = $('form', modal).serializeArray(); + parameters = {}; + for(var i=0; i < data.length; i++){ + parameters[data[i].name] = data[i].value + } + modal.modal('hide'); + runOperation(name, parameters); + }; + $('form', modal).on('submit', run); + $('.btn-primary', modal).on('click', run); + modal.modal('show'); + } else { + runOperation(name); + } +} + +function runOperation(name, params){ + var selection_uids = Object.keys(selection); + if(selection_uids.length == 0){ + selection_uids = [currentNodeSpace]; + } + api.call('run_operation', { + 'nodenet_uid': currentNodenet, + 'name': $el.attr('data-run-operation'), + 'parameters': params || {}, + 'selection_uids': selection_uids}, function(data){ + refreshNodespace(); + if(!$.isEmptyObject(data)){ + html = ''; + if(data.content_type && data.content_type.indexOf("image") > -1){ + html += '

      '; + delete data.content_type + delete data.data + } + if(Object.keys(data).length){ + html += '
      '; + for(var key in data){ + html += '
      '+key+':
      '; + if(typeof data[key] == 'string'){ + html += '
      '+data[key]+'
      '; + } else { + html += '
      '+JSON.stringify(data[key])+'
      '; + } + } + html += '
      '; + } + if(html){ + $('#recipe_result .modal-body').html(html); + $('#recipe_result').modal('show'); + $('#recipe_result button').off(); + } + } + } + ); +} + function openLinkCreationDialog(nodeUid){ $("#link_target_node").html(''); $('#link_target_slot').html(''); @@ -2867,52 +3097,51 @@ function openLinkCreationDialog(nodeUid){ } function get_datasource_options(worldadapter, value){ - var sources = worldadapters[worldadapter].datasources; - html = ''; - for(var i in sources){ - html += ''; - } - if(value && sources.indexOf(value) < 0) { - html += ''; + var html = ''; + if(worldadapter){ + var sources = worldadapters[worldadapter].datasources; + html += ''; + for(var i in sources){ + html += ''; + } + if(value && sources.indexOf(value) < 0) { + html += ''; + } + html += ''; } - html += ''; - html += ''; - for(var i in globalDataSources){ - html += ''; + if(globalDataSources.length){ + html += ''; + for(var i in globalDataSources){ + html += ''; + } + html += ''; } - html += ''; return html; } function get_datatarget_options(worldadapter, value){ - var targets = worldadapters[worldadapter].datatargets; - html = ''; - for(var i in targets){ - html += ''; - } - if(value && targets.indexOf(value) < 0) { - html += ''; + var html = ''; + if(worldadapter){ + var targets = worldadapters[worldadapter].datatargets; + html += ''; + for(var i in targets){ + html += ''; + } + if(value && targets.indexOf(value) < 0) { + html += ''; + } + html += ''; } - html += ''; - html += ''; - for(var i in globalDataTargets){ - html += ''; + if(globalDataTargets.length){ + html += ''; + for(var i in globalDataTargets){ + html += ''; + } + html += ''; } - html += ''; return html; } -// rearrange nodes in the current nodespace -function autoalignmentHandler() { - api.call("align_nodes", { - nodenet_uid: currentNodenet, - nodespace: currentNodeSpace - }, - function(data){ - setCurrentNodenet(currentNodenet, currentNodeSpace); - }); -} - // let user create a new node function createNodeHandler(x, y, name, type, parameters, callback) { params = {}; @@ -2942,6 +3171,9 @@ function createNodeHandler(x, y, name, type, parameters, callback) { } api.call(method, params, success=function(uid){ + if(type == 'Nodespace'){ + nodespaceProperties[uid] = nodespace_property_defaults + } addNode(new Node(uid, x, y, currentNodeSpace, name || '', type, null, null, parameters)); view.draw(); selectNode(uid); @@ -3023,7 +3255,7 @@ function deleteNodeHandler(nodeUid) { function deleteNodespaceOnServer(nodespace_uid){ var params = { nodenet_uid: currentNodenet, - nodespace_uid: nodespace_uid + nodespace: nodespace_uid } api.call("delete_nodespace", params, success=function(data){ @@ -3301,7 +3533,7 @@ function finalizeLinkHandler(nodeUid, slotIndex) { nodes[link.sourceNodeUid].linksToOutside.push(link.uid); } } - if(nodenet_data.renderlinks == 'always'){ + if(nodespaceProperties[currentNodeSpace].renderlinks == 'always'){ addLink(link); } }); @@ -3474,7 +3706,7 @@ function updateNodeParameters(nodeUid, parameters){ nodenet_uid: currentNodenet, node_uid: nodeUid, parameters: parameters - }, api.defaultSuccessCallback, api.defaultErrorCallback, "post"); + }, api.defaultSuccessCallback, api.defaultErrorCallback); } // handler for renaming the node @@ -3554,8 +3786,6 @@ function handleEditNodenet(event){ if(worldadapter){ params.worldadapter = worldadapter; } - nodenet_data.renderlinks = $('#nodenet_renderlinks').val(); - $.cookie('renderlinks', nodenet_data.renderlinks || '', {path: '/', expires: 7}) nodenet_data.snap_to_grid = $('#nodenet_snap').attr('checked'); $.cookie('snap_to_grid', nodenet_data.snap_to_grid || '', {path: '/', expires: 7}) api.call("set_nodenet_properties", params, @@ -3564,7 +3794,8 @@ function handleEditNodenet(event){ if(reload){ window.location.reload(); } else { - setCurrentNodenet(currentNodenet, currentNodeSpace); + // setCurrentNodenet(currentNodenet, currentNodeSpace); + refreshNodespace(); } } ); @@ -3576,6 +3807,27 @@ function handleEditNodespace(event){ if(name != nodespaces[currentNodeSpace].name){ renameNode(currentNodeSpace, name); } + properties = {}; + properties['renderlinks'] = $('#nodespace_renderlinks').val(); + properties['activation_display'] = $('#nodespace_activation_display').val(); + var update = false; + for(var key in properties){ + if(properties[key] != nodespaceProperties[currentNodeSpace][key]){ + update = true; + nodespaceProperties[currentNodeSpace][key] = properties[key]; + } else { + delete properties[key]; + } + } + if(update){ + params = {nodenet_uid: currentNodenet, nodespace_uid: currentNodeSpace, properties: properties} + api.call('set_nodespace_properties', params); + if ('renderlinks' in properties){ + refreshNodespace(); + } else { + redrawNodeNet(); + } + } } @@ -3822,16 +4074,12 @@ function getNodeParameterHTML(parameters, parameter_values){ var i; switch(name){ case "datatarget": - if(currentWorldadapter in worldadapters){ - var opts = get_datatarget_options(currentWorldadapter, value); - input = ""; - } + var opts = get_datatarget_options(currentWorldadapter, value); + input = ""; break; case "datasource": - if(currentWorldadapter in worldadapters){ - var opts = get_datasource_options(currentWorldadapter, value); - input = ""; - } + var opts = get_datasource_options(currentWorldadapter, value); + input = ""; break; default: if(parameter_values && parameter_values[name]){ @@ -3887,12 +4135,8 @@ function updateNodespaceForm(){ } else { $('#nodespace_name').removeAttr('disabled'); } - var nodetypehtml = ''; - for(var idx in sorted_nodetypes){ - if(nodetypes[sorted_nodetypes[idx]].gatetypes && nodetypes[sorted_nodetypes[idx]].gatetypes.length > 0){ - nodetypehtml += ''; - } - } + $('#nodespace_renderlinks').val(nodespaceProperties[currentNodeSpace].renderlinks); + $('#nodespace_activation_display').val(nodespaceProperties[currentNodeSpace].activation_display); } } diff --git a/micropsi_server/static/js/world.js b/micropsi_server/static/js/world.js index 3a5a7d65..7822e6db 100644 --- a/micropsi_server/static/js/world.js +++ b/micropsi_server/static/js/world.js @@ -1,56 +1,58 @@ -var canvas = $('#world'); +$(function(){ + currentWorld = $.cookie('selected_world') || null; + currentWorldSimulationStep = 0; -currentWorld = $.cookie('selected_world') || null; -currentWorldSimulationStep = 0; + worldRunning = false; + wasRunning = false; -worldRunning = false; -wasRunning = false; + registerResizeHandler(); -registerResizeHandler(); - -function get_world_data(){ - return {step: currentWorldSimulationStep}; -} + function get_world_data(){ + return {step: currentWorldSimulationStep}; + } -function set_world_data(data){ - if(!jQuery.isEmptyObject(data)){ - currentWorldSimulationStep = data.current_step; + function set_world_data(data){ + if(!jQuery.isEmptyObject(data)){ + currentWorldSimulationStep = data.current_step; + } } -} - -register_stepping_function('world', get_world_data, set_world_data); - -function updateViewSize() { - view.draw(true); -} - -function registerResizeHandler(){ - // resize handler for nodenet viewer: - var isDragging = false; - var container = $('.section.world .editor_field'); - if($.cookie('world_editor_height')){ - container.height($.cookie('world_editor_height')); - try{ - updateViewSize(); - } catch(err){} + + register_stepping_function('world', get_world_data, set_world_data); + + function updateViewSize() { + if(typeof view != 'undefined'){ + view.draw(true); + } } - var startHeight, startPos, newHeight; - $("a#worldSizeHandle").mousedown(function(event) { - startHeight = container.height(); - startPos = event.pageY; - $(window).mousemove(function(event) { - isDragging = true; - newHeight = startHeight + (event.pageY - startPos); - container.height(newHeight); - updateViewSize(); - }); - }); - $(window).mouseup(function(event) { - if(isDragging){ - $.cookie('world_editor_height', container.height(), {expires:7, path:'/'}); + + function registerResizeHandler(){ + // resize handler for nodenet viewer: + var isDragging = false; + var container = $('.section.world .editor_field'); + if($.cookie('world_editor_height')){ + container.height($.cookie('world_editor_height')); + try{ + updateViewSize(); + } catch(err){} } - isDragging = false; - $(window).unbind("mousemove"); - }); -} + var startHeight, startPos, newHeight; + $("a#worldSizeHandle").mousedown(function(event) { + startHeight = container.height(); + startPos = event.pageY; + $(window).mousemove(function(event) { + isDragging = true; + newHeight = startHeight + (event.pageY - startPos); + container.height(newHeight); + updateViewSize(); + }); + }); + $(window).mouseup(function(event) { + if(isDragging){ + $.cookie('world_editor_height', container.height(), {expires:7, path:'/'}); + } + isDragging = false; + $(window).unbind("mousemove"); + }); + } +}); \ No newline at end of file diff --git a/micropsi_server/static/minecraft/minecraft.js b/micropsi_server/static/minecraft/minecraft.js index ab6ae0d1..1bbf4e8b 100644 --- a/micropsi_server/static/minecraft/minecraft.js +++ b/micropsi_server/static/minecraft/minecraft.js @@ -32,41 +32,12 @@ function get_world_data(){ } function set_world_data(data){ currentWorldSimulationStep = data.current_step; + + agent_html = ''; for (var key in data.agents) { - if (data.agents[key].minecraft_vision_pixel) { - - if (current_layer == 1) { - console.log("activating second layer ..."); - secondLayer.activate(); - } - else{ - console.log("activating first layer ..."); - firstLayer.activate(); - } - - var minecraft_pixel = data.agents[key].minecraft_vision_pixel; - for (var x = 0; x < WIDTH; x++) { - for (var y = 0; y < HEIGHT; y++) { - - var raster = new Raster('mc_block_img_' + minecraft_pixel[(y + x * HEIGHT) * 2]); - raster.position = new Point(world.width / WIDTH * x, world.height / HEIGHT * y); - var distance = minecraft_pixel[(y + x * HEIGHT) * 2 + 1]; - raster.scale((world.width / WIDTH) / 64 * (1 / Math.pow(distance, 1 / 5)), (world.height / HEIGHT) / 64 * (1 / Math.pow(distance, 1 / 5))); - } - } - if (current_layer == 1) { - console.log("removing frist layer children ..."); - firstLayer.removeChildren(); - current_layer = 0; - } - else{ - console.log("removing frist layer children ..."); - secondLayer.removeChildren(); - current_layer = 1; - } - break; - } + agent_html += ""+data.agents[key].name+ ' ('+data.agents[key].type+')'; } + $('#world_agents_list table').html(agent_html); updateViewSize(); if (worldRunning) { @@ -112,35 +83,12 @@ function setCurrentWorld(uid) { expires: 7, path: '/' }); - loadWorldInfo(); -} - -function loadWorldInfo() { - - var all_images = "" - - var editor_div = $("#world_forms"); - - $.getScript('/static/minecraft/minecraft_struct.js', function () { - for (var i = -1; i < 173; i++) { - - var block_name = block_names["" + i]; - all_images = all_images + ''; - - editor_div.html('
      ' + all_images + '
      '); - } - }); - - editor_div.html('
      ' + all_images + '
      '); - - firstLayer = project.activeLayer; - secondLayer = new Layer(); - firstLayer.activate(); api.call('get_world_properties', { world_uid: currentWorld }, success = function (data) { refreshWorldView(); + $('#world').parent().html(''); }, error = function (data) { $.cookie('selected_world', '', { expires: -1, diff --git a/micropsi_server/static/minecraft/minecraft.tpl b/micropsi_server/static/minecraft/minecraft.tpl index 81c82886..0c919743 100644 --- a/micropsi_server/static/minecraft/minecraft.tpl +++ b/micropsi_server/static/minecraft/minecraft.tpl @@ -8,26 +8,11 @@

      World Status

      -
      -

      Scene Viewer

      -

      - - -

      -
      -

      Agents

      -

      World Objects

      -
      - - -
      -
      \ No newline at end of file diff --git a/micropsi_server/static/timeseries/timeseries.js b/micropsi_server/static/timeseries/timeseries.js new file mode 100644 index 00000000..e2a80a23 --- /dev/null +++ b/micropsi_server/static/timeseries/timeseries.js @@ -0,0 +1,80 @@ + +$(function(){ + + var container = $('#timeseries_controls'); + + var slider = $('#timeseries_slider'); + + var initialized = false; + + var first, last, total; + + var advance_nodenet = $('#timeseries_controls_nodenet'); + var nodenet_amount = $('#timeseries_controls_nodenet_amount') + + function get_world_data(){ + return {step: currentWorldSimulationStep}; + } + + $('.section.world .editor_field').height('auto'); + + function set_world_data(data){ + if(!initialized){ + first = new Date(data['first_timestamp']); + last = new Date(data['last_timestamp']); + total = data['total_timestamps']; + slider.slider({ + 'min': 0, + 'max': total - 1, + 'width': '100%', + 'step': 1, + 'value': data['current_step'], + 'tooltip': 'show', + 'handle': 'triangle', + 'selection': 'none', + 'formater': function(index){ + if (index > 0){ + var interval = parseInt((last.getTime() - first.getTime()) / total); + return new Date(first.getTime() + (interval * index)).toLocaleString('de'); + } else { + return first.toLocaleString('de'); + } + } + + }); + $('.firstval', container).html(first.toLocaleString('de').replace(', ', '
      ')); + $('.lastval', container).html(last.toLocaleString('de').replace(', ', '
      ')); + initialized = true; + slider.on('slideStop', set_world_state); + } + slider.slider('setValue', data['current_step']); + $('.world_step').text(data.current_step); + } + + function set_world_state(event){ + var value = parseInt(slider.val()); + api.call('set_world_data', {world_uid: currentWorld, data: {step: value}}, function(){ + if(advance_nodenet.attr('checked')){ + var nn_uid = (currentNodenet) ? currentNodenet : null; + api.call('step_nodenets_in_world', {world_uid: currentWorld, nodenet_uid: nn_uid, steps: parseInt(nodenet_amount.val())}, function(){ + if(nn_uid){ + $(document).trigger('runner_stepped'); + } else { + console.log('qwer'); + } + }); + } else { + get_world_state(); + } + }); + } + + function get_world_state(){ + api.call('get_world_view', {'world_uid': currentWorld, 'step': 0}, set_world_data); + } + + register_stepping_function('world', get_world_data, set_world_data); + + get_world_state(); + +}); \ No newline at end of file diff --git a/micropsi_server/static/timeseries/timeseries.tpl b/micropsi_server/static/timeseries/timeseries.tpl new file mode 100644 index 00000000..e5d3d89c --- /dev/null +++ b/micropsi_server/static/timeseries/timeseries.tpl @@ -0,0 +1,24 @@ + + +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +

      Drag the slider to change the current point in time of the timeseries

      +

      + +

      + +
      +
      +
      +
      + \ No newline at end of file diff --git a/micropsi_server/tests/conftest.py b/micropsi_server/tests/conftest.py index a80fe771..91dd3b50 100644 --- a/micropsi_server/tests/conftest.py +++ b/micropsi_server/tests/conftest.py @@ -6,10 +6,11 @@ nn_uid = 'Testnet' from configuration import config -from conftest import user_token from micropsi_server import usermanagement +from micropsi_server.micropsi_app import usermanager -test_path = os.path.join(config['paths']['resource_path'], 'user-test-db.json') +user_token = list(usermanager.users['Pytest User']['sessions'].keys())[0] +test_path = os.path.join(config['paths']['data_directory'], 'user-test-db.json') class MicropsiTestApp(TestApp): @@ -66,6 +67,7 @@ def user_mgr(user_def): @pytest.fixture(scope="function") def eliza(user_mgr): + """ creates a user eliza, and a session. returns eliza's session token """ user_mgr.create_user("eliza", "qwerty", "Full") - user_mgr.start_session("eliza") - return "eliza" + token = user_mgr.start_session("eliza") + return token diff --git a/micropsi_server/tests/test_json_api.py b/micropsi_server/tests/test_json_api.py index e6e3396e..aeb632e8 100644 --- a/micropsi_server/tests/test_json_api.py +++ b/micropsi_server/tests/test_json_api.py @@ -20,22 +20,28 @@ def test_generate_uid(app): assert re.match('[a-f0-9]+', response.json_body['data']) is not None -def test_select_nodenet(app, test_nodenet): - app.set_auth() - response = app.get_json('/rpc/select_nodenet(nodenet_uid="%s")' % test_nodenet) +def test_create_and_invalidate_auth_token(app): + response = app.get_json('/rpc/create_auth_token(user="Pytest User",password="test")') assert_success(response) - data = response.json_body['data'] - assert data == test_nodenet + from micropsi_server.micropsi_app import usermanager + token = response.json_body['data'] + assert token in usermanager.users['Pytest User']['sessions'] + response = app.get_json('/rpc/invalidate_auth_token(token="%s")' % token) + assert_success(response) + assert token not in usermanager.users['Pytest User']['sessions'] -def test_load_nodenet(app, test_nodenet, node): - response = app.get_json('/rpc/load_nodenet(nodenet_uid="%s")' % test_nodenet) +def test_get_nodenet_metadata(app, test_nodenet, node): + response = app.get_json('/rpc/get_nodenet_metadata(nodenet_uid="%s")' % test_nodenet) assert_success(response) data = response.json_body['data'] assert 'nodetypes' in data - assert 'nodes' in data + assert 'native_modules' in data + assert 'engine' in data + assert 'nodespaces' in data + assert 'nodes' not in data assert 'links' not in data - assert len(data['nodes'][node]['links']['gen']) == 1 # genloop + assert data['current_step'] == 0 assert data['uid'] == test_nodenet @@ -48,10 +54,10 @@ def test_new_nodenet(app, engine): assert_success(response) uid = response.json_body['data'] assert uid is not None - response = app.get_json('/rpc/load_nodenet(nodenet_uid="%s")' % uid) + response = app.get_json('/rpc/get_nodenet_metadata(nodenet_uid="%s")' % uid) assert_success(response) assert response.json_body['data']['name'] == 'FooBarTestNet' - assert response.json_body['data']['nodes'] == {} + assert response.json_body['data']['engine'] == engine def test_get_available_nodenets(app, test_nodenet): @@ -71,16 +77,20 @@ def test_set_nodenet_properties(app, test_nodenet, test_world): app.set_auth() response = app.post_json('/rpc/set_nodenet_properties', params=dict(nodenet_uid=test_nodenet, nodenet_name="new_name", worldadapter="Braitenberg", world_uid=test_world)) assert_success(response) - response = app.get_json('/rpc/load_nodenet(nodenet_uid="%s")' % test_nodenet) + response = app.get_json('/rpc/get_nodenet_metadata(nodenet_uid="%s")' % test_nodenet) data = response.json_body['data'] assert data['name'] == 'new_name' assert data['worldadapter'] == 'Braitenberg' -def test_set_node_state(app, test_nodenet, nodetype_def, nodefunc_def): +def test_set_node_state(app, test_nodenet, resourcepath): + import os app.set_auth() # create a native module: - with open(nodetype_def, 'w') as fp: + + nodetype_file = os.path.join(resourcepath, 'Test', 'nodetypes.json') + nodefunc_file = os.path.join(resourcepath, 'Test', 'nodefunctions.py') + with open(nodetype_file, 'w') as fp: fp.write('{"Testnode": {\ "name": "Testnode",\ "slottypes": ["gen", "foo", "bar"],\ @@ -88,7 +98,7 @@ def test_set_node_state(app, test_nodenet, nodetype_def, nodefunc_def): "gatetypes": ["gen", "foo", "bar"],\ "symbol": "t"}}') - with open(nodefunc_def, 'w') as fp: + with open(nodefunc_file, 'w') as fp: fp.write("def testnodefunc(netapi, node=None, **prams):\r\n return 17") response = app.get_json('/rpc/reload_native_modules()') @@ -111,7 +121,7 @@ def test_set_node_state(app, test_nodenet, nodetype_def, nodefunc_def): 'state': {'foo': 'bar'} }) assert_success(response) - response = app.get_json('/rpc/load_nodenet(nodenet_uid="%s")' % test_nodenet) + response = app.get_json('/rpc/get_nodes(nodenet_uid="%s")' % test_nodenet) assert response.json_body['data']['nodes'][uid]['state'] == {'foo': 'bar'} @@ -122,38 +132,36 @@ def test_set_node_activation(app, test_nodenet, node): 'activation': '0.734' }) assert_success(response) - response = app.get_json('/rpc/load_nodenet(nodenet_uid="%s")' % test_nodenet) + response = app.get_json('/rpc/get_nodes(nodenet_uid="%s")' % test_nodenet) sheaves = response.json_body['data']['nodes'][node]['sheaves'] assert float("%.3f" % sheaves['default']['activation']) == 0.734 -def test_start_simulation(app, test_nodenet): +def test_start_calculation(app, test_nodenet): app.set_auth() - response = app.get_json('/rpc/load_nodenet(nodenet_uid="%s")' % test_nodenet) - response = app.post_json('/rpc/start_simulation', params=dict(nodenet_uid=test_nodenet)) + response = app.post_json('/rpc/start_calculation', params=dict(nodenet_uid=test_nodenet)) assert_success(response) - response = app.get_json('/rpc/load_nodenet(nodenet_uid="%s")' % test_nodenet) + response = app.get_json('/rpc/get_nodenet_metadata(nodenet_uid="%s")' % test_nodenet) assert response.json_body['data']['is_active'] -def test_start_simulation_with_condition(app, test_nodenet): +def test_start_calculation_with_condition(app, test_nodenet): import time app.set_auth() app.post_json('/rpc/set_runner_properties', params={ 'timestep': 10, 'factor': 1 }) - response = app.get_json('/rpc/load_nodenet(nodenet_uid="%s")' % test_nodenet) response = app.post_json('/rpc/set_runner_condition', params={ 'nodenet_uid': test_nodenet, 'steps': '2' }) assert_success(response) assert response.json_body['data']['step'] == 2 - response = app.post_json('/rpc/start_simulation', params=dict(nodenet_uid=test_nodenet)) + response = app.post_json('/rpc/start_calculation', params=dict(nodenet_uid=test_nodenet)) assert_success(response) time.sleep(1) - response = app.get_json('/rpc/load_nodenet(nodenet_uid="%s")' % test_nodenet) + response = app.get_json('/rpc/get_nodenet_metadata(nodenet_uid="%s")' % test_nodenet) assert not response.json_body['data']['is_active'] assert response.json_body['data']['current_step'] == 2 response = app.post_json('/rpc/remove_runner_condition', params=dict(nodenet_uid=test_nodenet)) @@ -178,41 +186,41 @@ def test_set_runner_properties(app): assert response.json_body['data']['factor'] == 1 -def test_get_is_simulation_running(app, test_nodenet): - response = app.get_json('/rpc/get_is_simulation_running(nodenet_uid="%s")' % test_nodenet) +def test_get_is_calculation_running(app, test_nodenet): + response = app.get_json('/rpc/get_is_calculation_running(nodenet_uid="%s")' % test_nodenet) assert_success(response) assert not response.json_body['data'] -def test_stop_simulation(app, test_nodenet): +def test_stop_calculation(app, test_nodenet): app.set_auth() - response = app.post_json('/rpc/start_simulation', params=dict(nodenet_uid=test_nodenet)) + response = app.post_json('/rpc/start_calculation', params=dict(nodenet_uid=test_nodenet)) assert_success(response) - response = app.get_json('/rpc/get_is_simulation_running(nodenet_uid="%s")' % test_nodenet) + response = app.get_json('/rpc/get_is_calculation_running(nodenet_uid="%s")' % test_nodenet) assert_success(response) assert response.json_body['data'] - response = app.post_json('/rpc/stop_simulation', params=dict(nodenet_uid=test_nodenet)) + response = app.post_json('/rpc/stop_calculation', params=dict(nodenet_uid=test_nodenet)) assert_success(response) - response = app.get_json('/rpc/get_is_simulation_running(nodenet_uid="%s")' % test_nodenet) + response = app.get_json('/rpc/get_is_calculation_running(nodenet_uid="%s")' % test_nodenet) assert_success(response) assert not response.json_body['data'] -def test_step_simulation(app, test_nodenet): +def test_step_calculation(app, test_nodenet): app.set_auth() - response = app.get_json('/rpc/load_nodenet(nodenet_uid="%s")' % test_nodenet) + response = app.get_json('/rpc/get_nodenet_metadata(nodenet_uid="%s")' % test_nodenet) assert response.json_body['data']['current_step'] == 0 - response = app.get_json('/rpc/step_simulation(nodenet_uid="%s")' % test_nodenet) + response = app.get_json('/rpc/step_calculation(nodenet_uid="%s")' % test_nodenet) assert_success(response) assert response.json_body['data'] == 1 - response = app.get_json('/rpc/load_nodenet(nodenet_uid="%s")' % test_nodenet) + response = app.get_json('/rpc/get_nodenet_metadata(nodenet_uid="%s")' % test_nodenet) assert response.json_body['data']['current_step'] == 1 -def test_get_current_state(app, test_nodenet, test_world, node): +def test_get_calculation_state(app, test_nodenet, test_world, node): from time import sleep app.set_auth() - response = app.get_json('/rpc/load_nodenet(nodenet_uid="%s")' % test_nodenet) + response = app.get_json('/rpc/get_nodenet_metadata(nodenet_uid="%s")' % test_nodenet) assert response.json_body['data']['current_step'] == 0 response = app.post_json('/rpc/set_nodenet_properties', params=dict(nodenet_uid=test_nodenet, nodenet_name="new_name", worldadapter="Braitenberg", world_uid=test_world)) @@ -223,17 +231,17 @@ def test_get_current_state(app, test_nodenet, test_world, node): }) monitor_uid = response.json_body['data'] - response = app.get_json('/rpc/step_simulation(nodenet_uid="%s")' % test_nodenet) + response = app.get_json('/rpc/step_calculation(nodenet_uid="%s")' % test_nodenet) assert_success(response) - response = app.get_json('/rpc/start_simulation(nodenet_uid="%s")' % test_nodenet) + response = app.get_json('/rpc/start_calculation(nodenet_uid="%s")' % test_nodenet) assert_success(response) sleep(1) - response = app.post_json('/rpc/get_current_state', params={ + response = app.post_json('/rpc/get_calculation_state', params={ 'nodenet_uid': test_nodenet, 'nodenet': { - 'nodespace': None, + 'nodespaces': [None], 'step': -1, }, 'monitors': { @@ -251,11 +259,7 @@ def test_get_current_state(app, test_nodenet, test_world, node): assert data['current_nodenet_step'] > 0 assert data['current_world_step'] > 0 - assert data['simulation_running'] - - assert 'nodenet' in data - assert data['nodenet']['current_step'] > 0 - assert data['nodenet']['is_active'] + assert data['calculation_running'] assert 'servertime' in data['monitors']['logs'] assert 'logs' in data['monitors']['logs'] @@ -271,12 +275,26 @@ def test_revert_nodenet(app, test_nodenet, test_world): assert_success(response) response = app.get_json('/rpc/revert_nodenet(nodenet_uid="%s")' % test_nodenet) assert_success(response) - response = app.get_json('/rpc/load_nodenet(nodenet_uid="%s")' % test_nodenet) + response = app.get_json('/rpc/get_nodenet_metadata(nodenet_uid="%s")' % test_nodenet) data = response.json_body['data'] assert data['name'] == 'Testnet' assert data['worldadapter'] is None +def test_revert_both(app, test_nodenet, test_world): + app.set_auth() + app.post_json('/rpc/set_nodenet_properties', params=dict(nodenet_uid=test_nodenet, worldadapter="Braitenberg", world_uid=test_world)) + for i in range(5): + app.get_json('/rpc/step_calculation(nodenet_uid="%s")' % test_nodenet) + res = app.get_json('/rpc/get_calculation_state(nodenet_uid="%s")' % test_nodenet) + assert res.json_body['data']['current_nodenet_step'] > 0 + assert res.json_body['data']['current_world_step'] > 0 + app.get_json('/rpc/revert_calculation(nodenet_uid="%s")' % test_nodenet) + res = app.get_json('/rpc/get_calculation_state(nodenet_uid="%s")' % test_nodenet) + assert res.json_body['data']['current_nodenet_step'] == 0 + assert res.json_body['data']['current_world_step'] == 0 + + def test_save_nodenet(app, test_nodenet, test_world): app.set_auth() response = app.post_json('/rpc/set_nodenet_properties', params=dict(nodenet_uid=test_nodenet, nodenet_name="new_name", worldadapter="Braitenberg", world_uid=test_world)) @@ -285,7 +303,7 @@ def test_save_nodenet(app, test_nodenet, test_world): assert_success(response) response = app.get_json('/rpc/revert_nodenet(nodenet_uid="%s")' % test_nodenet) assert_success(response) - response = app.get_json('/rpc/load_nodenet(nodenet_uid="%s")' % test_nodenet) + response = app.get_json('/rpc/get_nodenet_metadata(nodenet_uid="%s")' % test_nodenet) data = response.json_body['data'] assert data['name'] == 'new_name' assert data['worldadapter'] == 'Braitenberg' @@ -314,9 +332,12 @@ def test_import_nodenet(app, test_nodenet, node): assert_success(response) uid = response.json_body['data'] assert uid is not None - response = app.get_json('/rpc/load_nodenet(nodenet_uid="%s")' % uid) + response = app.get_json('/rpc/get_nodenet_metadata(nodenet_uid="%s")' % uid) + assert response.json_body['data']['name'] == data['name'] + assert response.json_body['data']['world'] == data['world'] + assert response.json_body['data']['worldadapter'] == data['worldadapter'] + response = app.get_json('/rpc/get_nodes(nodenet_uid="%s")' % uid) assert list(response.json_body['data']['nodes'].keys()) == [node] - assert response.json_body['data']['name'] == 'Testnet' response = app.get_json('/rpc/delete_nodenet(nodenet_uid="%s")' % uid) @@ -338,8 +359,9 @@ def test_merge_nodenet(app, test_nodenet, engine, node): 'nodenet_data': json.dumps(data) }) assert_success(response) - response = app.get_json('/rpc/load_nodenet(nodenet_uid="%s")' % uid) + response = app.get_json('/rpc/get_nodes(nodenet_uid="%s")' % uid) assert len(list(response.json_body['data']['nodes'].keys())) == 1 + response = app.get_json('/rpc/get_nodenet_metadata(nodenet_uid="%s")' % uid) assert response.json_body['data']['name'] == 'ImporterNet' response = app.get_json('/rpc/delete_nodenet(nodenet_uid="%s")' % uid) @@ -733,21 +755,10 @@ def test_get_nodespace_list(app, test_nodenet, node): assert node in response.json_body['data'][rootid]['nodes'] -def test_get_nodespace(app, test_nodenet, node): - response = app.post_json('/rpc/get_nodespace', params={ - 'nodenet_uid': test_nodenet, - 'nodespace': None, - 'include_links': True, - 'step': -1, - }) - assert_success(response) - assert node in response.json_body['data']['nodes'] - - def test_get_nodespace_activations(app, test_nodenet, node): response = app.post_json('/rpc/get_nodespace_activations', params={ 'nodenet_uid': test_nodenet, - 'nodespace': None, + 'nodespaces': [None], 'last_call_step': -1 }) assert_success(response) @@ -785,7 +796,7 @@ def test_add_nodespace(app, test_nodenet): }) assert_success(response) uid = response.json_body['data'] - response = app.get_json('/rpc/load_nodenet(nodenet_uid="%s")' % (test_nodenet)) + response = app.get_json('/rpc/get_nodes(nodenet_uid="%s")' % (test_nodenet)) assert uid in response.json_body['data']['nodespaces'] assert uid not in response.json_body['data']['nodes'] @@ -836,7 +847,7 @@ def test_delete_node(app, test_nodenet, node): 'node_uids': [node] }) assert_success(response) - response = app.get_json('/rpc/load_nodenet(nodenet_uid="%s")' % test_nodenet) + response = app.get_json('/rpc/get_nodes(nodenet_uid="%s")' % test_nodenet) assert response.json_body['data']['nodes'] == {} @@ -851,10 +862,10 @@ def test_delete_nodespace(app, test_nodenet, node): uid = response.json_body['data'] response = app.post_json('/rpc/delete_nodespace', params={ 'nodenet_uid': test_nodenet, - 'nodespace_uid': uid + 'nodespace': uid }) assert_success(response) - response = app.get_json('/rpc/load_nodenet(nodenet_uid="%s")' % test_nodenet) + response = app.get_json('/rpc/get_nodes(nodenet_uid="%s")' % test_nodenet) assert uid not in response.json_body['data']['nodespaces'] @@ -881,15 +892,18 @@ def test_align_nodes(app, test_nodenet): def test_get_available_node_types(app, test_nodenet): response = app.get_json('/rpc/get_available_node_types(nodenet_uid="%s")' % test_nodenet) assert_success(response) - assert 'Pipe' in response.json_body['data'] - assert 'Register' in response.json_body['data'] - assert 'Sensor' in response.json_body['data'] + assert 'Pipe' in response.json_body['data']['nodetypes'] + assert 'Register' in response.json_body['data']['nodetypes'] + assert 'Sensor' in response.json_body['data']['nodetypes'] -def test_get_available_native_module_types(app, test_nodenet): +def test_get_available_native_module_types(app, test_nodenet, engine): response = app.get_json('/rpc/get_available_native_module_types(nodenet_uid="%s")' % test_nodenet) assert_success(response) - assert response.json_body['data'] == {} + if engine == 'dict_engine': + assert response.json_body['data'] == {} + elif engine == 'theano_engine': + assert "GradientDescent" in response.json_body['data'] def test_set_node_parameters(app, test_nodenet): @@ -979,8 +993,9 @@ def test_get_available_datatargets(app, test_nodenet, test_world): assert 'engine_r' in response.json_body['data'] -def test_bind_datasource_to_sensor(app, test_nodenet): +def test_bind_datasource_to_sensor(app, test_nodenet, test_world): app.set_auth() + response = app.post_json('/rpc/set_nodenet_properties', params=dict(nodenet_uid=test_nodenet, world_uid=test_world, worldadapter="Braitenberg")) response = app.post_json('/rpc/add_node', params={ 'nodenet_uid': test_nodenet, 'type': 'Sensor', @@ -998,8 +1013,9 @@ def test_bind_datasource_to_sensor(app, test_nodenet): assert response.json_body['data']['parameters']['datasource'] == 'brightness_l' -def test_bind_datatarget_to_actor(app, test_nodenet): +def test_bind_datatarget_to_actor(app, test_nodenet, test_world): app.set_auth() + response = app.post_json('/rpc/set_nodenet_properties', params=dict(nodenet_uid=test_nodenet, world_uid=test_world, worldadapter="Braitenberg")) response = app.post_json('/rpc/add_node', params={ 'nodenet_uid': test_nodenet, 'type': 'Actor', @@ -1030,7 +1046,7 @@ def test_add_link(app, test_nodenet, node): assert_success(response) uid = response.json_body['data'] assert uid is not None - response = app.get_json('/rpc/load_nodenet(nodenet_uid="%s")' % test_nodenet) + response = app.get_json('/rpc/get_nodes(nodenet_uid="%s")' % test_nodenet) data = response.json_body['data'] assert data['nodes'][node]['links']['sub'][0]['target_node_uid'] == node assert round(data['nodes'][node]['links']['sub'][0]['weight'], 3) == 0.7 @@ -1047,7 +1063,7 @@ def test_set_link_weight(app, test_nodenet, node): 'weight': 0.345 }) assert_success(response) - response = app.get_json('/rpc/load_nodenet(nodenet_uid="%s")' % test_nodenet) + response = app.get_json('/rpc/get_nodes(nodenet_uid="%s")' % test_nodenet) data = response.json_body['data'] assert float("%.3f" % data['nodes'][node]['links']['gen'][0]['weight']) == 0.345 @@ -1072,44 +1088,50 @@ def test_delete_link(app, test_nodenet, node): 'slot_type': "gen" }) assert_success(response) - response = app.get_json('/rpc/load_nodenet(nodenet_uid="%s")' % test_nodenet) + response = app.get_json('/rpc/get_nodes(nodenet_uid="%s")' % test_nodenet) data = response.json_body['data'] data['nodes'][node]['links'] == {} -def test_reload_native_modules(app, test_nodenet, nodetype_def, nodefunc_def): +def test_reload_native_modules(app, test_nodenet, resourcepath): app.set_auth() # create a native module: - with open(nodetype_def, 'w') as fp: + import os + nodetype_file = os.path.join(resourcepath, 'Test', 'nodetypes.json') + nodefunc_file = os.path.join(resourcepath, 'Test', 'nodefunctions.py') + with open(nodetype_file, 'w') as fp: fp.write('{"Testnode": {\ "name": "Testnode",\ "slottypes": ["gen", "foo", "bar"],\ "nodefunction_name": "testnodefunc",\ "gatetypes": ["gen", "foo", "bar"],\ "symbol": "t"}}') - with open(nodefunc_def, 'w') as fp: + with open(nodefunc_file, 'w') as fp: fp.write("def testnodefunc(netapi, node=None, **prams):\r\n return 17") response = app.get_json('/rpc/reload_native_modules()') assert_success(response) response = app.get_json('/rpc/get_available_node_types(nodenet_uid="%s")' % test_nodenet) - data = response.json_body['data']['Testnode'] + data = response.json_body['data']['native_modules']['Testnode'] assert data['nodefunction_name'] == "testnodefunc" assert data['gatetypes'] == ['gen', 'foo', 'bar'] assert data['slottypes'] == ['gen', 'foo', 'bar'] assert data['name'] == 'Testnode' -def test_user_prompt_response(app, test_nodenet, nodetype_def, nodefunc_def): +def test_user_prompt_response(app, test_nodenet, resourcepath): app.set_auth() # create a native module: - with open(nodetype_def, 'w') as fp: + import os + nodetype_file = os.path.join(resourcepath, 'Test', 'nodetypes.json') + nodefunc_file = os.path.join(resourcepath, 'Test', 'nodefunctions.py') + with open(nodetype_file, 'w') as fp: fp.write('{"Testnode": {\ "name": "Testnode",\ "slottypes": ["gen", "foo", "bar"],\ "nodefunction_name": "testnodefunc",\ "gatetypes": ["gen", "foo", "bar"],\ "symbol": "t"}}') - with open(nodefunc_def, 'w') as fp: + with open(nodefunc_file, 'w') as fp: fp.write("def testnodefunc(netapi, node=None, **prams):\r\n return 17") response = app.get_json('/rpc/reload_native_modules()') assert_success(response) @@ -1217,9 +1239,11 @@ def test_500(app): assert response.json_body['traceback'] is not None -def test_get_recipes(app, test_nodenet, recipes_def): +def test_get_recipes(app, test_nodenet, resourcepath): app.set_auth() - with open(recipes_def, 'w') as fp: + import os + recipe_file = os.path.join(resourcepath, 'Test', 'recipes.py') + with open(recipe_file, 'w') as fp: fp.write(""" def foobar(netapi, quatsch=23): return {'quatsch': quatsch} @@ -1233,9 +1257,11 @@ def foobar(netapi, quatsch=23): assert data['foobar']['parameters'][0]['default'] == 23 -def test_run_recipes(app, test_nodenet, recipes_def): +def test_run_recipes(app, test_nodenet, resourcepath): app.set_auth() - with open(recipes_def, 'w') as fp: + import os + recipe_file = os.path.join(resourcepath, 'Test', 'recipes.py') + with open(recipe_file, 'w') as fp: fp.write(""" def foobar(netapi, quatsch=23): return {'quatsch': quatsch} @@ -1252,22 +1278,27 @@ def foobar(netapi, quatsch=23): assert data['quatsch'] == 23 -def test_get_agent_dashboard(app, test_nodenet, node): +def test_get_agent_dashboard(app, test_nodenet, node, default_world): + app.set_auth() + response = app.post_json('/rpc/set_nodenet_properties', params=dict(nodenet_uid=test_nodenet, worldadapter="Default", world_uid=default_world)) response = app.get_json('/rpc/get_agent_dashboard(nodenet_uid="%s")' % test_nodenet) data = response.json_body['data'] assert data['count_nodes'] == 1 -def test_nodenet_data_structure(app, test_nodenet, nodetype_def, nodefunc_def, node): +def test_nodenet_data_structure(app, test_nodenet, resourcepath, node): app.set_auth() - with open(nodetype_def, 'w') as fp: + import os + nodetype_file = os.path.join(resourcepath, 'Test', 'nodetypes.json') + nodefunc_file = os.path.join(resourcepath, 'Test', 'nodefunctions.py') + with open(nodetype_file, 'w') as fp: fp.write('{"Testnode": {\ "name": "Testnode",\ "slottypes": ["gen", "foo", "bar"],\ "nodefunction_name": "testnodefunc",\ "gatetypes": ["gen", "foo", "bar"],\ "symbol": "t"}}') - with open(nodefunc_def, 'w') as fp: + with open(nodefunc_file, 'w') as fp: fp.write("def testnodefunc(netapi, node=None, **prams):\r\n return 17") response = app.get_json('/rpc/reload_native_modules()') response = app.post_json('/rpc/add_nodespace', params={ @@ -1294,12 +1325,16 @@ def test_nodenet_data_structure(app, test_nodenet, nodetype_def, nodefunc_def, n }) monitor_uid = response.json_body['data'] - response_1 = app.get_json('/rpc/load_nodenet(nodenet_uid="%s")' % test_nodenet) + response = app.get_json('/rpc/get_nodenet_metadata(nodenet_uid="%s")' % test_nodenet) + metadata = response.json_body['data'] + + response_1 = app.post_json('/rpc/get_calculation_state', params={'nodenet_uid': test_nodenet, 'nodenet': {'nodespaces': [None]}, 'monitors': True}) response = app.get_json('/rpc/save_nodenet(nodenet_uid="%s")' % test_nodenet) response = app.get_json('/rpc/revert_nodenet(nodenet_uid="%s")' % test_nodenet) - response_2 = app.get_json('/rpc/load_nodenet(nodenet_uid="%s")' % test_nodenet) + response_2 = app.post_json('/rpc/get_calculation_state', params={'nodenet_uid': test_nodenet, 'nodenet': {'nodespaces': [None]}, 'monitors': True}) - assert response_1.json_body['data'] == response_2.json_body['data'] + assert response_1.json_body['data']['nodenet'] == response_2.json_body['data']['nodenet'] + assert response_1.json_body['data']['monitors']['monitors'] == response_2.json_body['data']['monitors']['monitors'] data = response_2.json_body['data'] @@ -1307,33 +1342,33 @@ def test_nodenet_data_structure(app, test_nodenet, nodetype_def, nodefunc_def, n response = app.get_json('/rpc/export_monitor_data(nodenet_uid="%s", monitor_uid="%s")' % (test_nodenet, monitor_uid)) monitor_data = response.json_body['data'] - assert data['monitors'][monitor_uid]['name'] == 'Testmonitor' - assert data['monitors'][monitor_uid]['node_uid'] == node - assert data['monitors'][monitor_uid]['target'] == 'gen' - assert data['monitors'][monitor_uid]['type'] == 'gate' - assert data['monitors'][monitor_uid]['uid'] == monitor_uid - assert data['monitors'][monitor_uid]['values'] == {} - assert data['monitors'][monitor_uid]['color'] == '#332211' - assert data['monitors'][monitor_uid] == monitor_data + assert data['monitors']['monitors'][monitor_uid]['name'] == 'Testmonitor' + assert data['monitors']['monitors'][monitor_uid]['node_uid'] == node + assert data['monitors']['monitors'][monitor_uid]['target'] == 'gen' + assert data['monitors']['monitors'][monitor_uid]['type'] == 'gate' + assert data['monitors']['monitors'][monitor_uid]['uid'] == monitor_uid + assert data['monitors']['monitors'][monitor_uid]['values'] == {} + assert data['monitors']['monitors'][monitor_uid]['color'] == '#332211' + assert data['monitors']['monitors'][monitor_uid] == monitor_data # Nodes response = app.get_json('/rpc/get_node(nodenet_uid="%s", node_uid="%s")' % (test_nodenet, node)) node_data = response.json_body['data'] - assert node in data['nodes'] - assert n2_uid not in data['nodes'] - assert nodespace_uid not in data['nodes'] + assert node in data['nodenet']['nodes'] + assert n2_uid not in data['nodenet']['nodes'] + assert nodespace_uid not in data['nodenet']['nodes'] # gates for key in ['gen', 'por', 'ret', 'sub', 'sur', 'cat', 'exp']: - assert data['nodes'][node]['gate_activations'][key]['default']['activation'] == 0 - assert key not in data['nodes'][node]['gate_parameters'] - assert data['nodes'][node]['gate_functions'][key] == 'identity' - - assert data['nodes'][node]['parameters']['expectation'] == 1 - assert data['nodes'][node]['parameters']['wait'] == 10 - assert data['nodes'][node]['position'] == [10, 10, 10] - assert data['nodes'][node]['type'] == "Pipe" + assert data['nodenet']['nodes'][node]['gate_activations'][key]['default']['activation'] == 0 + assert key not in data['nodenet']['nodes'][node]['gate_parameters'] + assert data['nodenet']['nodes'][node]['gate_functions'][key] == 'identity' + + assert data['nodenet']['nodes'][node]['parameters']['expectation'] == 1 + assert data['nodenet']['nodes'][node]['parameters']['wait'] == 10 + assert data['nodenet']['nodes'][node]['position'] == [10, 10, 10] + assert data['nodenet']['nodes'][node]['type'] == "Pipe" assert 'links' not in data assert node_data['parameters']['expectation'] == 1 @@ -1342,65 +1377,65 @@ def test_nodenet_data_structure(app, test_nodenet, nodetype_def, nodefunc_def, n assert node_data['type'] == "Pipe" # Links - for link in data['nodes'][node]['links']['gen']: + for link in data['nodenet']['nodes'][node]['links']['gen']: assert link['weight'] == 1 assert link['target_node_uid'] == node assert link['target_slot_name'] == 'gen' # Nodespaces - # assert data['nodespaces'][nodespace_uid]['index'] == 3 - assert data['nodespaces'][nodespace_uid]['name'] == 'Test-Node-Space' - # assert data['nodespaces'][nodespace_uid]['parent_nodespace'] == 'Root' - assert data['nodespaces'][nodespace_uid]['position'] == [23, 23, 42] + # assert data['nodenet']['nodespaces'][nodespace_uid]['index'] == 3 + assert data['nodenet']['nodespaces'][nodespace_uid]['name'] == 'Test-Node-Space' + # assert data['nodenet']['nodespaces'][nodespace_uid]['parent_nodespace'] == 'Root' + assert data['nodenet']['nodespaces'][nodespace_uid]['position'] == [23, 23, 42] # Nodetypes response = app.get_json('/rpc/get_available_node_types(nodenet_uid="%s")' % test_nodenet) node_type_data = response.json_body['data'] for key in ['Comment', 'Nodespace']: - assert 'gatetypes' not in data['nodetypes'][key] - assert 'slottypes' not in data['nodetypes'][key] + assert 'gatetypes' not in metadata['nodetypes'][key] + assert 'slottypes' not in metadata['nodetypes'][key] for key in ['Pipe', 'Register', 'Actor']: - assert 'gatetypes' in data['nodetypes'][key] - assert 'slottypes' in data['nodetypes'][key] + assert 'gatetypes' in metadata['nodetypes'][key] + assert 'slottypes' in metadata['nodetypes'][key] - assert 'slottypes' in data['nodetypes']['Activator'] - assert 'gatetypes' not in data['nodetypes']['Activator'] + assert 'slottypes' in metadata['nodetypes']['Activator'] + assert 'gatetypes' not in metadata['nodetypes']['Activator'] - assert 'slottypes' not in data['nodetypes']['Sensor'] - assert 'gatetypes' in data['nodetypes']['Sensor'] + assert 'slottypes' not in metadata['nodetypes']['Sensor'] + assert 'gatetypes' in metadata['nodetypes']['Sensor'] - assert data['nodetypes'] == node_type_data + assert metadata['nodetypes'] == node_type_data['nodetypes'] # Native Modules response = app.get_json('/rpc/get_available_native_module_types(nodenet_uid="%s")' % test_nodenet) native_module_data = response.json_body['data'] - assert data['native_modules']['Testnode']['gatetypes'] == ['gen', 'foo', 'bar'] - assert data['native_modules']['Testnode']['name'] == 'Testnode' - assert data['native_modules']['Testnode']['nodefunction_name'] == 'testnodefunc' - assert data['native_modules']['Testnode']['slottypes'] == ['gen', 'foo', 'bar'] - assert data['native_modules']['Testnode']['symbol'] == 't' + assert metadata['native_modules']['Testnode']['gatetypes'] == ['gen', 'foo', 'bar'] + assert metadata['native_modules']['Testnode']['name'] == 'Testnode' + assert metadata['native_modules']['Testnode']['nodefunction_name'] == 'testnodefunc' + assert metadata['native_modules']['Testnode']['slottypes'] == ['gen', 'foo', 'bar'] + assert metadata['native_modules']['Testnode']['symbol'] == 't' - assert data['native_modules'] == native_module_data + assert metadata['native_modules'] == native_module_data # Nodenet - assert data['current_step'] == 0 # TODO: + assert metadata['current_step'] == 0 # TODO: assert 'step' not in data # current_step && step? - assert data['version'] == 1 - assert data['world'] is None - assert data['worldadapter'] is None + assert metadata['version'] == 1 + assert metadata['world'] is None + assert metadata['worldadapter'] is None def test_get_state_diff(app, test_nodenet, node): from micropsi_core import runtime nodenet = runtime.nodenets[test_nodenet] runtime.step_nodenet(test_nodenet) - response = app.post_json('/rpc/get_current_state', params={ + response = app.post_json('/rpc/get_calculation_state', params={ 'nodenet_uid': test_nodenet, 'nodenet_diff': { - 'nodespace': None, + 'nodespaces': [None], 'step': 0, } }) @@ -1410,12 +1445,28 @@ def test_get_state_diff(app, test_nodenet, node): assert node in data['changes']['nodes_dirty'] node2 = nodenet.create_node("Register", None, [10, 10], name="node2") runtime.step_nodenet(test_nodenet) - response = app.post_json('/rpc/get_current_state', params={ + response = app.post_json('/rpc/get_calculation_state', params={ 'nodenet_uid': test_nodenet, 'nodenet_diff': { - 'nodespace': None, + 'nodespaces': [None], 'step': 1, } }) data = response.json_body['data']['nodenet_diff'] assert [node2] == list(data['changes']['nodes_dirty'].keys()) + + +def test_get_operations(app, test_nodenet): + response = app.get_json('/rpc/get_available_operations()') + data = response.json_body['data'] + assert data['autoalign']['selection']['nodetypes'] == [] + + +def test_run_operation(app, test_nodenet, node): + response = app.post_json('/rpc/run_operation', { + 'nodenet_uid': test_nodenet, + 'name': 'autoalign', + 'parameters': {}, + 'selection_uids': [None] + }) + assert response.json_body['status'] == 'success' diff --git a/micropsi_server/tests/test_userManager.py b/micropsi_server/tests/test_userManager.py index 83fa5e51..4e64c2b1 100644 --- a/micropsi_server/tests/test_userManager.py +++ b/micropsi_server/tests/test_userManager.py @@ -70,47 +70,42 @@ def test_set_user_password(user_mgr, eliza): def test_start_session(user_mgr, eliza): - user_mgr.end_session(user_mgr.users["eliza"]["session_token"]) - assert user_mgr.users["eliza"]["session_token"] is None + user_mgr.end_session(eliza) + assert user_mgr.users["eliza"]["sessions"] == {} assert user_mgr.start_session("eliza", password="wrong") is None - assert user_mgr.users["eliza"]["session_token"] is None + assert user_mgr.users["eliza"]["sessions"] == {} token = user_mgr.start_session("eliza", password="qwerty") assert token is not None - assert token == user_mgr.users["eliza"]["session_token"] + assert token in user_mgr.users["eliza"]["sessions"] def test_get_user_id_for_session_token(user_mgr, eliza): - token = user_mgr.start_session("eliza") - assert user_mgr.get_user_id_for_session_token(token) == "eliza" + assert user_mgr.get_user_id_for_session_token(eliza) == "eliza" assert user_mgr.get_user_id_for_session_token("notoken") == "Guest" def test_get_permissions_for_session_token(user_mgr, eliza): - token = user_mgr.users["eliza"]["session_token"] - perms = user_mgr.get_permissions_for_session_token(token) + perms = user_mgr.get_permissions_for_session_token(eliza) assert "manage server" in perms assert "manage users" not in perms user_mgr.set_user_role("eliza", "Guest") - perms = user_mgr.get_permissions_for_session_token(token) + perms = user_mgr.get_permissions_for_session_token(eliza) assert "manage server" not in perms def test_switch_user_for_session_token(user_mgr, eliza): user_mgr.create_user("norbert", "abcd", "Full") - user_mgr.start_session("norbert") - token = user_mgr.users["norbert"]["session_token"] + token = user_mgr.start_session("norbert") user_mgr.switch_user_for_session_token("eliza", token) - token1 = user_mgr.users["norbert"]["session_token"] - token2 = user_mgr.users["eliza"]["session_token"] - assert token1 is None - assert token2 == token + assert user_mgr.users["norbert"]["sessions"] == {} + assert token in user_mgr.users["eliza"]["sessions"] + # assert eliza's own session is still valid + assert eliza in user_mgr.users["eliza"]["sessions"] def test_end_session(user_mgr, eliza): - token = user_mgr.users["eliza"]["session_token"] - assert token is not None - user_mgr.end_session(token) - assert user_mgr.users["eliza"]["session_token"] is None + user_mgr.end_session(eliza) + assert user_mgr.users["eliza"]["sessions"] == {} def test_test_password(user_mgr, eliza): @@ -119,14 +114,14 @@ def test_test_password(user_mgr, eliza): def test_end_all_sessions(user_mgr, eliza): - user_mgr.start_session("eliza") user_mgr.create_user("norbert", "abcd", "Full") - user_mgr.start_session("norbert") - assert user_mgr.users["eliza"]["session_token"] is not None - assert user_mgr.users["norbert"]["session_token"] is not None + norbert = user_mgr.start_session("norbert") + assert eliza in user_mgr.users["eliza"]["sessions"] + assert norbert in user_mgr.users["norbert"]["sessions"] user_mgr.end_all_sessions() - assert user_mgr.users["eliza"]["session_token"] is None - assert user_mgr.users["norbert"]["session_token"] is None + assert user_mgr.users["eliza"]["sessions"] == {} + assert user_mgr.users["norbert"]["sessions"] == {} + assert user_mgr.sessions == {} def test_delete_user(user_mgr): @@ -139,19 +134,27 @@ def test_delete_user(user_mgr): def test_check_for_expired_user_sessions(user_mgr, eliza): t = datetime.datetime.now().time().isoformat() user_mgr.create_user("norbert", "abcd", "Full") - user_mgr.start_session("norbert", keep_logged_in_forever=False) - user_mgr.users["norbert"]["session_expires"] = repr(t) - user_mgr.start_session("eliza", keep_logged_in_forever=True) - assert user_mgr.users["eliza"]["session_expires"] is False + norbert = user_mgr.start_session("norbert", keep_logged_in_forever=False) + user_mgr.users["norbert"]["sessions"][norbert]['expires'] = repr(t) + assert user_mgr.users["eliza"]["sessions"][eliza]["expires"] is False user_mgr.check_for_expired_user_sessions() - assert user_mgr.users["norbert"]["session_token"] is None - assert user_mgr.users["eliza"]["session_token"] is not None + assert user_mgr.users["norbert"]["sessions"] == {} + assert user_mgr.users["eliza"]["sessions"] != {} -def test_refresh_session(user_mgr, eliza): +def test_refresh_session(user_mgr): + user_mgr.create_user("norbert", "abcd", "Full") + norbert = user_mgr.start_session("norbert", keep_logged_in_forever=False) t = datetime.datetime.now().isoformat() - token = user_mgr.start_session("eliza", keep_logged_in_forever=False) - user_mgr.users["eliza"]["session_expires"] = t - user_mgr.refresh_session(token) + user_mgr.users["norbert"]["sessions"][norbert]["expires"] = t + user_mgr.refresh_session(norbert) user_mgr.check_for_expired_user_sessions() - assert user_mgr.users["eliza"]["session_token"] is token + assert norbert in user_mgr.users["norbert"]["sessions"] + + +def test_multiple_sessions_for_user(user_mgr, eliza): + eliza2 = user_mgr.start_session("eliza", keep_logged_in_forever=False) + assert user_mgr.get_user_id_for_session_token(eliza) == "eliza" + assert user_mgr.get_user_id_for_session_token(eliza2) == "eliza" + assert not user_mgr.users["eliza"]["sessions"][eliza]['expires'] + assert user_mgr.users["eliza"]["sessions"][eliza2]['expires'] diff --git a/micropsi_server/usermanagement.py b/micropsi_server/usermanagement.py index 116483d0..f73170c6 100644 --- a/micropsi_server/usermanagement.py +++ b/micropsi_server/usermanagement.py @@ -52,7 +52,7 @@ ADMIN_USER = "admin" # default name of the admin user DEFAULT_ROLE = "Restricted" # new users can create and edit nodenets, but not create worlds -IDLE_TIME_BEFORE_SESSION_EXPIRES = 360000 # after 100h idle time, expire the user session (but not the simulation) +IDLE_TIME_BEFORE_SESSION_EXPIRES = 360000 # after 100h idle time, expire the user session (but not the calculation) TIME_INTERVAL_BETWEEN_EXPIRATION_CHECKS = 3600 # check every hour if we should log out users USER_ROLES = { # sets of strings; each represents a permission. @@ -89,7 +89,7 @@ def __init__(self, userfile_path=None): # set up persistence if userfile_path is None: userfile_path = cfg['paths']['usermanager_path'] - micropsi_core.tools.mkdir(os.path.dirname(userfile_path)) + os.makedirs(os.path.dirname(userfile_path), exist_ok=True) self.user_file_name = userfile_path # todo: make this work without a file system try: @@ -104,10 +104,16 @@ def __init__(self, userfile_path=None): self.users = {} # set up sessions - for i in self.users: - active_session = self.users[i]["session_token"] - if active_session: - self.sessions[active_session] = i + for name in self.users: + + # compatibility for files before multi-session-feature + if "session_token" in self.users[name] and "sessions" not in self.users[name]: + self.users[name]["sessions"] = { + self.users[name]["session_token"]: {"expires": self.users[name]["session_expires"]} + } + + for token in self.users[name]["sessions"]: + self.sessions[token] = name # set up session cleanup def _session_expiration(): @@ -134,35 +140,35 @@ def create_user(self, user_id, password="", role = DEFAULT_ROLE, uid = None): role: a string corresponding to a user role (such as "Administrator", or "Restricted") uid: a string that acts as a unique, immutable handle (so we can store resources for this user) """ - if user_id and not user_id in self.users: + if user_id and user_id not in self.users: self.users[user_id] = { "uid": uid or user_id, "hashed_password": hashlib.md5(password.encode('utf-8')).hexdigest(), "role": role, - "session_token": None, - "session_expires": False + "sessions": {} } self.save_users() return True - else: return False + else: + return False def save_users(self): """stores the user data to a file""" with open(self.user_file_name, mode='w+') as file: - json.dump(self.users, file, indent = 4) + json.dump(self.users, file, indent=4) def list_users(self): """returns a dictionary with all users currently known to the user manager for display purposes""" - return { i: { - "role": self.users[i]["role"], - "is_active": True if self.users[i]["session_token"] else False } - for i in self.users } + return dict((name, { + "role": self.users[name]["role"], + "is_active": True if self.users[name]["sessions"] else False}) + for name in self.users) def set_user_id(self, user_id_old, user_id_new): """returns the new username if the user has been renamed successfully, the old username if the new one was already in use, and None if the old username did not exist""" if user_id_old in self.users: - if not user_id_new in self.users: + if user_id_new not in self.users: self.users[user_id_new] = self.users[user_id_old] del self.users[user_id_old] self.save_users() @@ -191,13 +197,14 @@ def delete_user(self, user_id): """deletes the specified user, returns True if successful""" if user_id in self.users: # if the user is still active, kill the session - if self.users[user_id]["session_token"]: self.end_session(self.users[user_id]["session_token"]) + for token in list(self.users[user_id]["sessions"].keys()): + self.end_session(token) del self.users[user_id] self.save_users() return True return False - def start_session(self, user_id, password = None, keep_logged_in_forever=True): + def start_session(self, user_id, password=None, keep_logged_in_forever=True): """authenticates the specified user, returns session token if successful, or None if not. Arguments: @@ -206,11 +213,12 @@ def start_session(self, user_id, password = None, keep_logged_in_forever=True): keep_logged_in_forever (optional): if True, the session will not expire unless manually logging off """ if password is None or self.test_password(user_id, password): - session_token = str(uuid.UUID(bytes = os.urandom(16))) - self.users[user_id]["session_token"] = session_token + session_token = str(uuid.UUID(bytes=os.urandom(16))) + self.users[user_id]["sessions"][session_token] = { + "expires": not keep_logged_in_forever + } self.sessions[session_token] = user_id if keep_logged_in_forever: - self.users[user_id]["session_expires"] = False self.save_users() else: self.refresh_session(session_token) @@ -230,8 +238,11 @@ def switch_user_for_session_token(self, user_id, session_token): if session_token in self.sessions and user_id in self.users: current_user = self.sessions[session_token] if current_user in self.users: - self.users[current_user]["session_token"] = None - self.users[user_id]["session_token"] = session_token + session = self.users[current_user]["sessions"][session_token] + del self.users[current_user]["sessions"][session_token] + self.users[user_id]["sessions"].update({ + session_token: session + }) self.sessions[session_token] = user_id self.refresh_session(session_token) self.save_users() @@ -251,7 +262,7 @@ def end_session(self, session_token): user_id = self.sessions[session_token] del self.sessions[session_token] if user_id in self.users: - self.users[user_id]["session_token"] = None + del self.users[user_id]["sessions"][session_token] def end_all_sessions(self): """useful during a reset of the runtime, because all open user sessions will persist during shutdown""" @@ -263,22 +274,21 @@ def refresh_session(self, session_token): """resets the idle time until a currently active session expires to some point in the future""" if session_token in self.sessions: user_id = self.sessions[session_token] - if self.users[user_id]["session_expires"]: - self.users[user_id]["session_expires"] = (datetime.datetime.now() + datetime.timedelta( + if self.users[user_id]["sessions"][session_token]["expires"]: + self.users[user_id]["sessions"][session_token]["expires"] = (datetime.datetime.now() + datetime.timedelta( seconds=IDLE_TIME_BEFORE_SESSION_EXPIRES)).isoformat() def check_for_expired_user_sessions(self): """removes all user sessions that have been idle for too long""" - change_flag = False now = datetime.datetime.now().isoformat() sessions = self.sessions.copy() for session_token in sessions: user_id = self.sessions[session_token] - if self.users[user_id]["session_expires"]: - if self.users[user_id]["session_expires"] < now: - self.end_session(session_token) - change_flag = True + expires = self.users[user_id]["sessions"][session_token]["expires"] + if expires and expires < now: + self.end_session(session_token) + change_flag = True if change_flag: self.save_users() @@ -289,7 +299,6 @@ def get_permissions_for_session_token(self, session_token): Example usage: if "create nodenets" in usermanager.get_permissions(my_session): ... """ - if session_token in self.sessions: user_id = self.sessions[session_token] if user_id in self.users: @@ -306,4 +315,3 @@ def get_user_id_for_session_token(self, session_token): return self.sessions[session_token] else: return "Guest" - diff --git a/micropsi_server/view/about.tpl b/micropsi_server/view/about.tpl index ab32707a..d6a094ec 100644 --- a/micropsi_server/view/about.tpl +++ b/micropsi_server/view/about.tpl @@ -1,4 +1,4 @@ -%include menu.tpl version = version +%include("menu.tpl", version=version)

      @@ -30,4 +30,4 @@

      -%rebase boilerplate title = "About MicroPsi 2" +%rebase("boilerplate.tpl", title="About MicroPsi 2") diff --git a/micropsi_server/view/boilerplate.tpl b/micropsi_server/view/boilerplate.tpl index 2cf3466b..27e1915b 100644 --- a/micropsi_server/view/boilerplate.tpl +++ b/micropsi_server/view/boilerplate.tpl @@ -23,6 +23,8 @@ + + + +
      diff --git a/micropsi_server/view/nodenet_mgt.tpl b/micropsi_server/view/nodenet_mgt.tpl index 08ef2dc6..2c0a060b 100644 --- a/micropsi_server/view/nodenet_mgt.tpl +++ b/micropsi_server/view/nodenet_mgt.tpl @@ -1,4 +1,4 @@ -%include menu.tpl version = version, permissions = permissions, user_id = user_id +%include("menu.tpl", version=version, permissions=permissions, user_id=user_id)

      @@ -45,4 +45,4 @@ Save all nodenets

      -%rebase boilerplate title = "Manage Nodenets" +%rebase("boilerplate.tpl", title="Manage Nodenets") diff --git a/micropsi_server/view/user_mgt.tpl b/micropsi_server/view/user_mgt.tpl index 0a884a42..87561df8 100644 --- a/micropsi_server/view/user_mgt.tpl +++ b/micropsi_server/view/user_mgt.tpl @@ -1,4 +1,4 @@ -%include menu.tpl version = version, permissions = permissions, user_id = user_id +%include("menu.tpl", version=version, permissions=permissions, user_id=user_id)

      @@ -61,4 +61,4 @@

      -%rebase boilerplate title = "Manage Users" +%rebase("boilerplate.tpl", title="Manage Users") diff --git a/micropsi_server/view/viewer.tpl b/micropsi_server/view/viewer.tpl index 56da4d4c..14db5250 100644 --- a/micropsi_server/view/viewer.tpl +++ b/micropsi_server/view/viewer.tpl @@ -1,5 +1,5 @@ -%include menu.tpl version = version, user_id = user_id, permissions = permissions +%include("menu.tpl", version=version, permissions=permissions, user_id=user_id) @@ -11,25 +11,22 @@

      %if mode == "nodenet": - %include nodenet + %include("nodenet.tpl") % end %if mode == "monitors": - %include monitors logging_levels=logging_levels + %include("monitors.tpl", logging_levels=logging_levels) %end %if mode == "world": - %include world mine=mine,others=others,current=current,world_assets=world_assets -% end -%if mode == "face": - %include face + %include("world.tpl", mine=mine, others=others, current=current, world_assets=world_assets) % end %if mode == "dashboard": - %include dashboard logging_levels=logging_levels + %include("dashboard.tpl", logging_levels=logging_levels) % end %if mode == "all": - %include nodenet - %include monitors logging_levels=logging_levels - %include world mine=mine,others=others,current=current,world_assets=world_assets + %include("nodenet.tpl") + %include("monitors.tpl", logging_levels=logging_levels) + %include("world.tpl", mine=mine, others=others, current=current, world_assets=world_assets) % end %if defined('first_user') and first_user: @@ -41,4 +38,4 @@ %end -%rebase boilerplate title = "MicroPsi Simulator" +%rebase("boilerplate.tpl", title="MicroPsi Simulator") diff --git a/micropsi_server/view/world.tpl b/micropsi_server/view/world.tpl index 72bf71e5..0343e0a1 100644 --- a/micropsi_server/view/world.tpl +++ b/micropsi_server/view/world.tpl @@ -12,7 +12,7 @@
      - %include nodenet_list type="world",mine=mine,others=others,current=current + %include("nodenet_list.tpl", type="world", mine=mine, others=others, current=current)
      @@ -44,8 +44,10 @@ - + -%if world_assets.get('js'): - +%if world_assets.get('paperjs'): + +%elif world_assets.get('js'): + %end diff --git a/micropsi_server/view/world_form.tpl b/micropsi_server/view/world_form.tpl index d189b1d2..1f0fe262 100644 --- a/micropsi_server/view/world_form.tpl +++ b/micropsi_server/view/world_form.tpl @@ -49,9 +49,41 @@ %end %end + % for type in worldtypes: + + %end + %for type in worldtypes: + % for param in worldtypes[type].get_config_options(): + + %end + %end + + %if defined("world"): %end @@ -67,3 +99,13 @@ + + \ No newline at end of file diff --git a/micropsi_server/view/worldadapter_selector.tpl b/micropsi_server/view/worldadapter_selector.tpl index b02f9ff2..850d249c 100644 --- a/micropsi_server/view/worldadapter_selector.tpl +++ b/micropsi_server/view/worldadapter_selector.tpl @@ -1,15 +1,36 @@ - % if not world_uid in worlds: % else: - % for type in worlds[world_uid].supported_worldadapters: + % for type in sorted(worlds[world_uid].supported_worldadapters.keys()): % if defined("nodenet_uid") and nodenet_uid in nodenets and nodenets[nodenet_uid].worldadapter == type: % else: %end %end + +%end + +
      Select a worldadapter to see a description
      + +%if world_uid in worlds: + %end diff --git a/requirements.txt b/requirements.txt index 21b4107a..fdc4a6b8 100644 --- a/requirements.txt +++ b/requirements.txt @@ -2,17 +2,17 @@ CherryPy==3.6.0 Theano==0.7.0 WebOb==1.4 WebTest==2.0.16 -beautifulsoup4==4.3.2 +beautifulsoup4==4.4.1 cov-core==1.14.0 coverage==3.7.1 matplotlib==1.4.3 mock==1.0.1 numpy==1.9.2 -py==1.4.26 +py==1.4.31 pycrypto==2.6.1 -pytest==2.6.4 +pytest==2.8.7 pytest-cov==1.8.1 -scipy==0.15.1 +scipy==0.17.0 six==1.8.0 -e git+https://github.com/micropsi-industries/spock.git#egg=spock-dev waitress==0.8.9