From 87e5dbb945674f7ab90101be038c7c49bbed51d0 Mon Sep 17 00:00:00 2001 From: John Harwell Date: Fri, 4 Oct 2024 13:42:29 -0500 Subject: [PATCH] refactor(#317): Better design - Continue moving things over to the new PathSet() paradigm - I MAY want to have the batch criteria own its own pathset; not sure if that's a good idea or not. Will revisit. --- .../tutorials/project/template_input_file.rst | 8 +- sierra/core/__init__.py | 2 - sierra/core/batchroot.py | 26 +- sierra/core/experiment/bindings.py | 4 +- sierra/core/experiment/spec.py | 9 +- sierra/core/exproot.py | 29 ++ sierra/core/generators/exp_creator.py | 21 +- sierra/core/generators/exp_generators.py | 25 +- sierra/core/generators/generator_factory.py | 8 +- sierra/core/models/interface.py | 11 +- sierra/core/pipeline/pipeline.py | 9 +- .../core/pipeline/stage1/pipeline_stage1.py | 11 +- sierra/core/pipeline/stage2/exp_runner.py | 24 +- .../core/pipeline/stage2/pipeline_stage2.py | 9 +- sierra/core/pipeline/stage3/collate.py | 6 +- sierra/core/pipeline/stage3/imagize.py | 2 +- .../core/pipeline/stage3/pipeline_stage3.py | 17 +- sierra/core/pipeline/stage3/statistics.py | 2 +- sierra/core/pipeline/stage4/__init__.py | 1 - sierra/core/pipeline/stage4/graphs/collate.py | 49 ++-- .../pipeline/stage4/graphs/inter/generate.py | 8 +- .../pipeline/stage4/graphs/inter/heatmap.py | 2 +- .../core/pipeline/stage4/graphs/inter/line.py | 15 +- .../pipeline/stage4/graphs/intra/generate.py | 54 ++-- sierra/core/pipeline/stage4/model_runner.py | 53 ++-- .../core/pipeline/stage4/pipeline_stage4.py | 30 ++- sierra/core/pipeline/stage4/render.py | 26 +- .../stage5/inter_scenario_comparator.py | 31 +-- .../stage5/intra_scenario_comparator.py | 251 +++++++++--------- sierra/core/pipeline/stage5/leafcalc.py | 24 -- sierra/core/pipeline/stage5/namecalc.py | 29 ++ sierra/core/pipeline/stage5/outputroot.py | 6 +- .../core/pipeline/stage5/pipeline_stage5.py | 28 +- sierra/core/platform.py | 6 +- sierra/core/ros1/callbacks.py | 2 +- sierra/core/utils.py | 22 +- sierra/core/variables/batch_criteria.py | 157 ++++++----- sierra/core/variables/population_size.py | 10 +- sierra/main.py | 5 +- sierra/plugins/platform/argos/cmdline.py | 2 +- .../argos/generators/platform_generators.py | 16 +- sierra/plugins/platform/argos/plugin.py | 13 +- .../argos/variables/physics_engines.py | 8 +- .../variables/population_constant_density.py | 24 +- .../argos/variables/population_size.py | 7 +- .../variables/population_variable_density.py | 22 +- sierra/plugins/platform/ros1gazebo/plugin.py | 15 +- .../ros1gazebo/variables/population_size.py | 7 +- .../generators/platform_generators.py | 4 +- sierra/plugins/platform/ros1robot/plugin.py | 33 +-- .../ros1robot/variables/population_size.py | 9 +- 51 files changed, 642 insertions(+), 550 deletions(-) mode change 100755 => 100644 sierra/core/__init__.py create mode 100644 sierra/core/exproot.py delete mode 100644 sierra/core/pipeline/stage5/leafcalc.py create mode 100644 sierra/core/pipeline/stage5/namecalc.py diff --git a/docs/src/tutorials/project/template_input_file.rst b/docs/src/tutorials/project/template_input_file.rst index 16231306..8ba8801d 100644 --- a/docs/src/tutorials/project/template_input_file.rst +++ b/docs/src/tutorials/project/template_input_file.rst @@ -165,7 +165,7 @@ Any of the following may be inserted: - + ... @@ -189,7 +189,7 @@ Any of the following may be inserted: - + ... @@ -224,7 +224,7 @@ Any of the following may be inserted: - + ... @@ -247,7 +247,7 @@ Any of the following may be inserted: - + ... diff --git a/sierra/core/__init__.py b/sierra/core/__init__.py old mode 100755 new mode 100644 index 800c311b..38573038 --- a/sierra/core/__init__.py +++ b/sierra/core/__init__.py @@ -7,5 +7,3 @@ # 3rd party packages # Project packages -from . import cmdline -from . import config diff --git a/sierra/core/batchroot.py b/sierra/core/batchroot.py index 0cc7a800..d5640904 100644 --- a/sierra/core/batchroot.py +++ b/sierra/core/batchroot.py @@ -94,18 +94,18 @@ def to_path(self) -> pathlib.Path: class PathSet(): def __init__(self, root: ExpRoot) -> None: - self.root = root - self.input_root = self.root.to_path() / "exp-inputs" - self.output_root = self.root.to_path() / "exp-outputs" - self.graph_root = self.root.to_path() / "graphs" - self.model_root = self.root.to_path() / "models" - self.stat_root = self.root.to_path() / "statistics" + self.input_root = root.to_path() / "exp-inputs" + self.output_root = root.to_path() / "exp-outputs" + self.graph_root = root.to_path() / "graphs" + self.model_root = root.to_path() / "models" + self.stat_root = root.to_path() / "statistics" self.stat_exec_root = self.stat_root.to_path() / "exec" - self.imagize_root = self.root.to_path() / "imagize" - self.video_root = self.root.to_path() / "videos" - self.stat_collate = self.stat_root.to_path() / "collated" - self.graph_collate = self.graph_root.to_path() / "collated" - self.scratch_root = self.root.to_path() / "scratch" + self.imagize_root = root.to_path() / "imagize" + self.video_root = root.to_path() / "videos" + self.stat_collate_root = self.stat_root.to_path() / "collated" + self.graph_collate_root = self.graph_root.to_path() / "collated" + self.scratch_root = root.to_path() / "scratch" + self.root = root.to_path() def from_cmdline(args: argparse.Namespace) -> PathSet: @@ -128,7 +128,7 @@ def from_cmdline(args: argparse.Namespace) -> PathSet: args.controller) -def from_exp(sierra_rpath: str, +def from_exp(sierra_root: str, project: str, batch_leaf: ExpRootLeaf, controller: str) -> PathSet: @@ -150,7 +150,7 @@ def from_exp(sierra_rpath: str, controller: The name of the controller used. """ - root = ExpRoot(sierra_rpath, + root = ExpRoot(sierra_root, project, controller, batch_leaf) diff --git a/sierra/core/experiment/bindings.py b/sierra/core/experiment/bindings.py index 997a8057..122e678c 100644 --- a/sierra/core/experiment/bindings.py +++ b/sierra/core/experiment/bindings.py @@ -135,7 +135,7 @@ class IExpRunShellCmdsGenerator(implements.Interface): cmdopts: Dictionary of parsed cmdline options. - n_robots: The configured # of robots for the experimental run. + n_agents: The configured # of robots for the experimental run. exp_num: The 0-based index of the experiment in the batch. @@ -144,7 +144,7 @@ class IExpRunShellCmdsGenerator(implements.Interface): def __init__(self, cmdopts: types.Cmdopts, criteria: bc.BatchCriteria, - n_robots: int, + n_agents: int, exp_num: int) -> None: raise NotImplementedError diff --git a/sierra/core/experiment/spec.py b/sierra/core/experiment/spec.py index 5b1dc1ac..021e5b8d 100755 --- a/sierra/core/experiment/spec.py +++ b/sierra/core/experiment/spec.py @@ -33,7 +33,7 @@ def __init__(self, self.mods = [] self.is_compound = False - assert len(self.rms) == 0,\ + assert len(self.rms) == 0, \ "Batch criteria cannot remove XML tags" if self.chgs: @@ -79,7 +79,7 @@ def __init__(self, self.is_compound = True self.mods = [] - assert len(self.rms) == 0,\ + assert len(self.rms) == 0, \ "Batch criteria cannot remove XML tags" if self.chgs and self.adds: @@ -125,12 +125,13 @@ class ExperimentSpec(): def __init__(self, criteria: bc.IConcreteBatchCriteria, + batch_input_root: pathlib.Path, exp_num: int, cmdopts: types.Cmdopts) -> None: self.exp_num = exp_num - exp_name = criteria.gen_exp_names(cmdopts)[exp_num] + exp_name = criteria.gen_exp_names()[exp_num] - self.exp_input_root = pathlib.Path(cmdopts['batch_input_root'], exp_name) + self.exp_input_root = batch_input_root / exp_name self.exp_def_fpath = self.exp_input_root / config.kPickleLeaf self.logger = logging.getLogger(__name__) diff --git a/sierra/core/exproot.py b/sierra/core/exproot.py new file mode 100644 index 00000000..305f724b --- /dev/null +++ b/sierra/core/exproot.py @@ -0,0 +1,29 @@ +# +# Copyright 2024 John Harwell, All rights reserved. +# +# SPDX-License Identifier: MIT +# + +# Core packages +import typing as tp + +# 3rd party packages + +# Project packages +from sierra.core import batchroot + + +class PathSet(): + def __init__(self, + batch: batchroot.PathSet, + exp_name: str, + exp0_name: tp.Optional[str] = None) -> None: + self.input_root = batch.input_root.to_path() / exp_name + self.output_root = batch.output_root.to_path() / exp_name + self.graph_root = batch.graph_root.to_path() / exp_name + self.model_root = batch.model_root.to_path() / exp_name + self.stat_root = batch.state_root.to_path() / exp_name + + if exp0_name: + self.exp0_output_root = batch.output_root.to_path() / exp0_name + self.exp0_stat_root = batch.stat_root.to_path() / exp0_name diff --git a/sierra/core/generators/exp_creator.py b/sierra/core/generators/exp_creator.py index 94e3ced7..4274f4ef 100755 --- a/sierra/core/generators/exp_creator.py +++ b/sierra/core/generators/exp_creator.py @@ -21,7 +21,7 @@ # Project packages from sierra.core.variables import batch_criteria as bc -from sierra.core import config, utils, types, platform +from sierra.core import config, utils, types, platform, batchroot import sierra.core.plugin_manager as pm from sierra.core.generators.exp_generators import BatchExpDefGenerator from sierra.core.experiment import bindings, definition @@ -118,13 +118,13 @@ def from_def(self, exp_def: definition.XMLExpDef): if configurer.cmdfile_paradigm() == 'per-exp' and utils.path_exists(commands_fpath): commands_fpath.unlink() - n_robots = utils.get_n_robots(self.criteria.main_config, + n_agents = utils.get_n_agents(self.criteria.main_config, self.cmdopts, self.exp_input_root, exp_def) generator = platform.ExpRunShellCmdsGenerator(self.cmdopts, self.criteria, - n_robots, + n_agents, self.exp_num) # Create all experimental runs @@ -229,7 +229,7 @@ def _update_cmds_file(self, pre_specs = cmds_generator.pre_run_cmds(for_host, launch_stem_path, run_num) - assert all(spec.shell for spec in pre_specs),\ + assert all(spec.shell for spec in pre_specs), \ "All pre-exp commands are run in a shell" pre_cmds = [spec.cmd for spec in pre_specs] self.logger.trace("Pre-experiment cmds: %s", pre_cmds) # type: ignore @@ -237,13 +237,13 @@ def _update_cmds_file(self, exec_specs = cmds_generator.exec_run_cmds(for_host, launch_stem_path, run_num) - assert all(spec.shell for spec in exec_specs),\ + assert all(spec.shell for spec in exec_specs), \ "All exec-exp commands are run in a shell" exec_cmds = [spec.cmd for spec in exec_specs] self.logger.trace("Exec-experiment cmds: %s", exec_cmds) # type: ignore post_specs = cmds_generator.post_run_cmds(for_host) - assert all(spec.shell for spec in post_specs),\ + assert all(spec.shell for spec in post_specs), \ "All post-exp commands are run in a shell" post_cmds = [spec.cmd for spec in post_specs] self.logger.trace("Post-experiment cmds: %s", post_cmds) # type: ignore @@ -298,11 +298,12 @@ class BatchExpCreator: def __init__(self, criteria: bc.BatchCriteria, - cmdopts: types.Cmdopts) -> None: + cmdopts: types.Cmdopts, + pathset: batchroot.PathSet) -> None: self.batch_config_template = pathlib.Path(cmdopts['template_input_file']) - self.batch_input_root = pathlib.Path(cmdopts['batch_input_root']) - self.batch_output_root = pathlib.Path(cmdopts['batch_output_root']) + self.batch_input_root = pathset.input_root + self.batch_output_root = pathset.output_root self.criteria = criteria self.cmdopts = cmdopts self.logger = logging.getLogger(__name__) @@ -333,7 +334,7 @@ def create(self, generator: BatchExpDefGenerator) -> None: self.logger.debug( "Applying generated scenario+controller changes to exp%s", i) - expi = self.criteria.gen_exp_names(self.cmdopts)[i] + expi = self.criteria.gen_exp_names()[i] exp_output_root = self.batch_output_root / expi exp_input_root = self.batch_input_root / expi diff --git a/sierra/core/generators/exp_generators.py b/sierra/core/generators/exp_generators.py index e35c2bd0..db929a3a 100755 --- a/sierra/core/generators/exp_generators.py +++ b/sierra/core/generators/exp_generators.py @@ -27,7 +27,7 @@ # Project packages import sierra.core.generators.generator_factory as gf from sierra.core.experiment import spec, definition -from sierra.core import types +from sierra.core import types, batchroot import sierra.core.variables.batch_criteria as bc @@ -41,20 +41,6 @@ class BatchExpDefGenerator: batch_config_template: Absolute path to the root template XML configuration file. - batch_input_root: Root directory for all generated XML input files all - experiments should be stored (relative to current - dir or absolute). Each experiment will get a - directory within this root to store the xml input - files for the set of :term:`Experimental Runs - ` comprising an - :term:`Experiment`; directory name determined by - the batch criteria used. - - batch_output_root: Root directory for all experiment outputs (relative - to current dir or absolute). Each experiment will get - a directory 'exp' in this directory for its - outputs. - criteria: :class:`~sierra.core.variables.batch_criteria.BatchCriteria` derived object instance created from cmdline definition. @@ -66,6 +52,7 @@ class BatchExpDefGenerator: def __init__(self, criteria: bc.IConcreteBatchCriteria, + pathset: batchroot.PathSet, controller_name: str, scenario_basename: str, cmdopts: types.Cmdopts) -> None: @@ -77,8 +64,7 @@ def __init__(self, self.exp_template_stem = self.batch_config_template.stem self.batch_config_extension = None - self.batch_input_root = pathlib.Path(cmdopts['batch_input_root']) - self.batch_output_root = pathlib.Path(cmdopts['batch_output_root']) + self.pathset = pathset self.controller_name = controller_name self.scenario_basename = scenario_basename @@ -118,7 +104,10 @@ def _create_exp_generator(self, exp_num: int): exp_num: Experiment number in the batch """ - exp_spec = spec.ExperimentSpec(self.criteria, exp_num, self.cmdopts) + exp_spec = spec.ExperimentSpec(self.criteria, + self.pathset.input_root, + exp_num, + self.cmdopts) template_fpath = exp_spec.exp_input_root / self.exp_template_stem config_root = pathlib.Path(self.cmdopts['project_config_root']) scenario = gf.scenario_generator_create(controller=self.controller_name, diff --git a/sierra/core/generators/generator_factory.py b/sierra/core/generators/generator_factory.py index 275c4f59..86ecf891 100755 --- a/sierra/core/generators/generator_factory.py +++ b/sierra/core/generators/generator_factory.py @@ -82,15 +82,15 @@ def _do_tag_add(self, # the platform relies on added tags to calculate population sizes, # then this won't work. controllers = config.kYAML.controllers - assert hasattr(self.spec.criteria, 'n_robots'),\ + assert hasattr(self.spec.criteria, 'n_agents'), \ (f"When using __UUID__ and tag_add in {controllers}, the batch " "criteria must implement bc.IQueryableBatchCriteria") - n_robots = self.spec.criteria.n_robots(self.spec.exp_num) + n_agents = self.spec.criteria.n_agents(self.spec.exp_num) - assert n_robots > 0,\ + assert n_agents > 0, \ "Batch criteria {self.spec.criteria} returned 0 robots?" - for robot_id in range(0, n_robots): + for robot_id in range(0, n_agents): to_pp = copy.deepcopy(add) pp_add = self._pp_for_tag_add(to_pp, robot_id) exp_def.tag_add(pp_add.path, diff --git a/sierra/core/models/interface.py b/sierra/core/models/interface.py index 01dc3d32..3ab058a2 100755 --- a/sierra/core/models/interface.py +++ b/sierra/core/models/interface.py @@ -16,7 +16,7 @@ # Project packages from sierra.core.variables import batch_criteria as bc -from sierra.core import types +from sierra.core import types, exproot, batchroot class IConcreteIntraExpModel1D(implements.Interface): @@ -34,7 +34,8 @@ class IConcreteIntraExpModel1D(implements.Interface): def run(self, criteria: bc.IConcreteBatchCriteria, exp_num: int, - cmdopts: types.Cmdopts) -> tp.List[pd.DataFrame]: + cmdopts: types.Cmdopts, + pathset: exproot.PathSet) -> tp.List[pd.DataFrame]: """Run the model and generate a list of dataframes. Each dataframe can (potentially) target different graphs. All dataframes @@ -97,7 +98,8 @@ class IConcreteIntraExpModel2D(implements.Interface): def run(self, criteria: bc.IConcreteBatchCriteria, exp_num: int, - cmdopts: types.Cmdopts) -> tp.List[pd.DataFrame]: + cmdopts: types.Cmdopts, + pathset: exproot.PathSet) -> tp.List[pd.DataFrame]: """Run the model and generate a list of dataframes. Each dataframe can (potentially) target a different graph. Each @@ -150,7 +152,8 @@ class IConcreteInterExpModel1D(implements.Interface): def run(self, criteria: bc.IConcreteBatchCriteria, - cmdopts: types.Cmdopts) -> tp.List[pd.DataFrame]: + cmdopts: types.Cmdopts, + pathset: batchroot.PathSet) -> tp.List[pd.DataFrame]: """Run the model and generate list of dataframes. Each dataframe can (potentially) target a different graph. Each diff --git a/sierra/core/pipeline/pipeline.py b/sierra/core/pipeline/pipeline.py index 1baba66c..3aa5cc73 100755 --- a/sierra/core/pipeline/pipeline.py +++ b/sierra/core/pipeline/pipeline.py @@ -33,7 +33,7 @@ class Pipeline: def __init__(self, args: argparse.Namespace, controller: tp.Optional[str], - pathset: batchroot.PathSet) -> None: + pathset: tp.Optional[batchroot.PathSet] = None) -> None: self.args = args self.logger = logging.getLogger(__name__) @@ -145,11 +145,11 @@ def run(self) -> None: self.batch_criteria).run() if 2 in self.args.pipeline: - PipelineStage2(self.cmdopts).run(self.batch_criteria) + PipelineStage2(self.cmdopts, self.pathset).run(self.batch_criteria) if 3 in self.args.pipeline: PipelineStage3(self.main_config, - self.cmdopts).run(self.batch_criteria) + self.cmdopts, self.pathset).run(self.batch_criteria) if 4 in self.args.pipeline: PipelineStage4(self.main_config, @@ -158,7 +158,8 @@ def run(self) -> None: # not part of default pipeline if 5 in self.args.pipeline: PipelineStage5(self.main_config, - self.cmdopts).run(self.args) + self.cmdopts, + self.pathset).run(self.args) def _load_config(self) -> None: self.logger.debug("Loading project config from '%s'", diff --git a/sierra/core/pipeline/stage1/pipeline_stage1.py b/sierra/core/pipeline/stage1/pipeline_stage1.py index 9b38d783..a0321c70 100755 --- a/sierra/core/pipeline/stage1/pipeline_stage1.py +++ b/sierra/core/pipeline/stage1/pipeline_stage1.py @@ -15,7 +15,7 @@ from sierra.core.generators.exp_generators import BatchExpDefGenerator from sierra.core.generators.exp_creator import BatchExpCreator import sierra.core.variables.batch_criteria as bc -from sierra.core import types +from sierra.core import types, batchroot class PipelineStage1: @@ -30,13 +30,18 @@ class PipelineStage1: def __init__(self, cmdopts: types.Cmdopts, + pathset: batchroot.PathSet, controller: str, criteria: bc.IConcreteBatchCriteria) -> None: self.generator = BatchExpDefGenerator(controller_name=controller, scenario_basename=cmdopts['scenario'], criteria=criteria, + pathset=pathset, cmdopts=cmdopts) - self.creator = BatchExpCreator(criteria=criteria, cmdopts=cmdopts) + self.creator = BatchExpCreator(criteria=criteria, + cmdopts=cmdopts, + pathset=pathset) + self.pathset = pathset self.cmdopts = cmdopts self.criteria = criteria @@ -69,7 +74,7 @@ def run(self) -> None: """ self.logger.info("Generating input files for batch experiment in %s...", - self.cmdopts['batch_root']) + self.pathset.root) self.creator.create(self.generator) n_exp_in_batch = len(self.criteria.gen_attr_changelist()) + \ diff --git a/sierra/core/pipeline/stage2/exp_runner.py b/sierra/core/pipeline/stage2/exp_runner.py index 954620c2..733bb663 100755 --- a/sierra/core/pipeline/stage2/exp_runner.py +++ b/sierra/core/pipeline/stage2/exp_runner.py @@ -165,9 +165,9 @@ def __call__(self) -> None: module.pre_exp_diagnostics(self.cmdopts, self.logger) exp_all = [self.batch_exp_root / d - for d in self.criteria.gen_exp_names(self.cmdopts)] + for d in self.criteria.gen_exp_names()] - exp_to_run = utils.exp_range_calc(self.cmdopts, + exp_to_run = utils.exp_range_calc(self.cmdopts["exp_range"], self.batch_exp_root, self.criteria) @@ -180,7 +180,8 @@ def __call__(self) -> None: # Calculate path for to file for logging execution times now = datetime.datetime.now() - exec_times_fpath = self.batch_stat_exec_root / now.strftime("%Y-%m-%e-%H:%M") + exec_times_fpath = self.pathset.stat_exec_root / \ + now.strftime("%Y-%m-%e-%H:%M") # Start a new process for the experiment shell so pre-run commands have # an effect (if they set environment variables, etc.). @@ -197,11 +198,12 @@ def __call__(self) -> None: for spec in generator.pre_exp_cmds(): shell.run_from_spec(spec) - runner = ExpRunner(self.cmdopts, + runner = ExpRunner(self.pathset, + self.cmdopts, exec_times_fpath, generator, shell) - runner(exp, exp_num) + runner(exp.name, exp_num) # Run cmds to cleanup platform-specific things now that the experiment # is done (if needed). @@ -219,6 +221,7 @@ class ExpRunner: """ def __init__(self, + pathset: batchroot.PathSet, cmdopts: types.Cmdopts, exec_times_fpath: pathlib.Path, generator: platform.ExpShellCmdsGenerator, @@ -228,14 +231,16 @@ def __init__(self, self.shell = shell self.generator = generator self.cmdopts = cmdopts + self.pathset = pathset self.logger = logging.getLogger(__name__) def __call__(self, - exp_input_root: pathlib.Path, + exp_name: str, exp_num: int) -> None: """Execute experimental runs for a single experiment. """ - + exp_input_root = self.pathset.input_root / exp_name + exp_scratch_root = self.pathset.scratch_root / exp_name self.logger.info("Running exp%s in '%s'", exp_num, exp_input_root) @@ -244,8 +249,7 @@ def __call__(self, wd = exp_input_root.relative_to(pathlib.Path().home()) start = time.time() - scratch_root = self.cmdopts['batch_scratch_root'] / exp_input_root.name - utils.dir_create_checked(scratch_root, exist_ok=True) + utils.dir_create_checked(exp_scratch_root, exist_ok=True) assert self.cmdopts['exec_jobs_per_node'] is not None, \ "# parallel jobs can't be None" @@ -253,7 +257,7 @@ def __call__(self, exec_opts = { 'exp_input_root': str(exp_input_root), 'work_dir': str(wd), - 'scratch_dir': str(scratch_root), + 'scratch_dir': str(exp_scratch_root), 'cmdfile_stem_path': str(exp_input_root / config.kGNUParallel['cmdfile_stem']), 'cmdfile_ext': config.kGNUParallel['cmdfile_ext'], 'exec_resume': self.cmdopts['exec_resume'], diff --git a/sierra/core/pipeline/stage2/pipeline_stage2.py b/sierra/core/pipeline/stage2/pipeline_stage2.py index 3ba61a42..9646d0a7 100755 --- a/sierra/core/pipeline/stage2/pipeline_stage2.py +++ b/sierra/core/pipeline/stage2/pipeline_stage2.py @@ -16,7 +16,7 @@ # Project packages from sierra.core.variables import batch_criteria as bc from sierra.core.pipeline.stage2.exp_runner import BatchExpRunner -from sierra.core import types +from sierra.core import types, batchroot class PipelineStage2: @@ -30,13 +30,16 @@ class PipelineStage2: """ - def __init__(self, cmdopts: types.Cmdopts) -> None: + def __init__(self, + cmdopts: types.Cmdopts, + pathset: batchroot.PathSet) -> None: self.logger = logging.getLogger(__name__) self.cmdopts = cmdopts + self.pathset = pathset def run(self, criteria: bc.BatchCriteria) -> None: start = time.time() - BatchExpRunner(self.cmdopts, criteria)() + BatchExpRunner(self.cmdopts, self.pathset, criteria)() elapsed = int(time.time() - start) sec = datetime.timedelta(seconds=elapsed) self.logger.info("Execution complete in %s", str(sec)) diff --git a/sierra/core/pipeline/stage3/collate.py b/sierra/core/pipeline/stage3/collate.py index c517e20e..fb920fac 100755 --- a/sierra/core/pipeline/stage3/collate.py +++ b/sierra/core/pipeline/stage3/collate.py @@ -31,7 +31,7 @@ # Project packages import sierra.core.variables.batch_criteria as bc import sierra.core.plugin_manager as pm -from sierra.core import types, storage, utils, config, pathset +from sierra.core import types, storage, utils, config, batchroot class ExpParallelCollator: @@ -69,12 +69,12 @@ def __call__(self, criteria: bc.IConcreteBatchCriteria) -> None: gatherq = m.Queue() processq = m.Queue() - exp_to_proc = utils.exp_range_calc(self.cmdopts, + exp_to_proc = utils.exp_range_calc(self.cmdopts["exp_range"], self.pathset.output_root, criteria) for exp in exp_to_proc: - gatherq.put((self.cmdopts['batch_output_root'], exp.name)) + gatherq.put((self.pathset.output_root, exp.name)) self.logger.debug("Starting %d gatherers, method=%s", n_gatherers, diff --git a/sierra/core/pipeline/stage3/imagize.py b/sierra/core/pipeline/stage3/imagize.py index c65a6846..4e7a21c0 100755 --- a/sierra/core/pipeline/stage3/imagize.py +++ b/sierra/core/pipeline/stage3/imagize.py @@ -38,7 +38,7 @@ def proc_batch_exp(main_config: types.YAMLDict, to serial if memory on the SIERRA host machine is limited via ``--processing-serial``. """ - exp_to_imagize = utils.exp_range_calc(cmdopts, + exp_to_imagize = utils.exp_range_calc(cmdopts["exp_range"], pathset.output_root, criteria) diff --git a/sierra/core/pipeline/stage3/pipeline_stage3.py b/sierra/core/pipeline/stage3/pipeline_stage3.py index 965d3928..cbe394aa 100755 --- a/sierra/core/pipeline/stage3/pipeline_stage3.py +++ b/sierra/core/pipeline/stage3/pipeline_stage3.py @@ -78,7 +78,9 @@ def _run_statistics(self, criteria: bc.IConcreteBatchCriteria): self.logger.info("Generating statistics from experiment outputs in %s...", self.pathset.output_root) start = time.time() - statistics.BatchExpCalculator(self.main_config, self.cmdopts)(criteria) + statistics.BatchExpCalculator(self.main_config, + self.cmdopts, + self.pathset)(criteria) elapsed = int(time.time() - start) sec = datetime.timedelta(seconds=elapsed) self.logger.info("Statistics generation complete in %s", str(sec)) @@ -89,7 +91,9 @@ def _run_run_collation(self, self.logger.info("Collating experiment run outputs into %s...", self.pathset.stat_collate_root) start = time.time() - collate.ExpParallelCollator(self.main_config, self.cmdopts)(criteria) + collate.ExpParallelCollator(self.main_config, + self.cmdopts, + self.pathset)(criteria) elapsed = int(time.time() - start) sec = datetime.timedelta(seconds=elapsed) self.logger.info( @@ -100,10 +104,13 @@ def _run_imagizing(self, intra_HM_config: dict, cmdopts: types.Cmdopts, criteria: bc.IConcreteBatchCriteria): - self.logger.info("Imagizing .csvs in %s...", - cmdopts['batch_output_root']) + self.logger.info("Imagizing .csvs in %s...", self.pathset.output_root) start = time.time() - imagize.proc_batch_exp(main_config, cmdopts, intra_HM_config, criteria) + imagize.proc_batch_exp(main_config, + cmdopts, + self.pathset, + intra_HM_config, + criteria) elapsed = int(time.time() - start) sec = datetime.timedelta(seconds=elapsed) self.logger.info("Imagizing complete: %s", str(sec)) diff --git a/sierra/core/pipeline/stage3/statistics.py b/sierra/core/pipeline/stage3/statistics.py index 839d2b84..4231e326 100755 --- a/sierra/core/pipeline/stage3/statistics.py +++ b/sierra/core/pipeline/stage3/statistics.py @@ -56,7 +56,7 @@ def __init__(self, main_config: dict, cmdopts: types.Cmdopts, pathset: batchroot def __call__(self, criteria: bc.IConcreteBatchCriteria) -> None: - exp_to_avg = utils.exp_range_calc(self.cmdopts, + exp_to_avg = utils.exp_range_calc(self.cmdopts["exp_range"], self.pathset.output_root, criteria) diff --git a/sierra/core/pipeline/stage4/__init__.py b/sierra/core/pipeline/stage4/__init__.py index 695200db..e69de29b 100755 --- a/sierra/core/pipeline/stage4/__init__.py +++ b/sierra/core/pipeline/stage4/__init__.py @@ -1 +0,0 @@ -from . import graphs diff --git a/sierra/core/pipeline/stage4/graphs/collate.py b/sierra/core/pipeline/stage4/graphs/collate.py index 3a8f13b4..839d3d11 100755 --- a/sierra/core/pipeline/stage4/graphs/collate.py +++ b/sierra/core/pipeline/stage4/graphs/collate.py @@ -60,22 +60,23 @@ class UnivarGraphCollator: def __init__(self, main_config: types.YAMLDict, - cmdopts: types.Cmdopts) -> None: + cmdopts: types.Cmdopts, + pathset: batchroot.PathSet) -> None: self.main_config = main_config self.cmdopts = cmdopts + self.pathset = pathset self.logger = logging.getLogger(__name__) def __call__(self, criteria, - target: dict, - stat_collate_root: pathlib.Path) -> None: + target: dict) -> None: self.logger.info("Univariate files from batch in %s for graph '%s'...", - self.cmdopts['batch_output_root'], + self.pathset.output_root, target['src_stem']) self.logger.trace(json.dumps(target, indent=4)) # type: ignore - exp_dirs = utils.exp_range_calc(self.cmdopts, - self.cmdopts['batch_output_root'], + exp_dirs = utils.exp_range_calc(self.cmdopts["exp_range"], + self.pathset.output_root, criteria) # Always do the mean, even if stats are disabled @@ -98,12 +99,13 @@ def __call__(self, for stat in stats: if stat.all_srcs_exist: writer(stat.df, - stat_collate_root / (target['dest_stem'] + stat.df_ext), + self.pathset.stat_collate_root / + (target['dest_stem'] + stat.df_ext), index=False) elif not stat.all_srcs_exist and stat.some_srcs_exist: self.logger.warning("Not all experiments in '%s' produced '%s%s'", - self.cmdopts['batch_output_root'], + self.pathset.output_root, target['src_stem'], stat.df_ext) @@ -144,22 +146,23 @@ class BivarGraphCollator: def __init__(self, main_config: types.YAMLDict, - cmdopts: types.Cmdopts) -> None: + cmdopts: types.Cmdopts, + pathset: batchroot.PathSet) -> None: self.main_config = main_config self.cmdopts = cmdopts + self.pathset = pathset self.logger = logging.getLogger(__name__) def __call__(self, criteria: bc.IConcreteBatchCriteria, - target: dict, - stat_collate_root: pathlib.Path) -> None: + target: dict) -> None: self.logger.info("Bivariate files from batch in %s for graph '%s'...", - self.cmdopts['batch_output_root'], + self.pathset.output_root, target['src_stem']) self.logger.trace(json.dumps(target, indent=4)) # type: ignore - exp_dirs = utils.exp_range_calc(self.cmdopts, - self.cmdopts['batch_output_root'], + exp_dirs = utils.exp_range_calc(self.cmdopts["exp_range"], + self.pathset.output_root, criteria) xlabels, ylabels = utils.bivar_exp_labels_calc(exp_dirs) @@ -189,7 +192,7 @@ def __call__(self, row, stat.df_ext) writer(df, - stat_collate_root / name, + self.pathset.stat_collate_root / name, index=False) # TODO: Don't write this for now, until I find a better way of @@ -200,7 +203,7 @@ def __call__(self, elif stat.some_srcs_exist: self.logger.warning("Not all experiments in '%s' produced '%s%s'", - self.cmdopts['batch_output_root'], + self.pathset.output_root, target['src_stem'], stat.df_ext) @@ -254,8 +257,8 @@ class ParallelCollator(): def __init__(self, main_config: types.YAMLDict, - pathset: batchroot.PathSet, - cmdopts: types.Cmdopts) -> None: + cmdopts: types.Cmdopts, + pathset: batchroot.PathSet) -> None: self.main_config = main_config self.cmdopts = cmdopts @@ -282,7 +285,7 @@ def __call__(self, args=(q, self.main_config, self.cmdopts, - self.pathset.stat_collate_root, + self.pathset, criteria)) p.start() @@ -292,21 +295,21 @@ def __call__(self, def _thread_worker(q: mp.Queue, main_config: types.YAMLDict, cmdopts: types.Cmdopts, - stat_collate_root: pathlib.Path, + pathset: batchroot.PathSet, criteria) -> None: collator: tp.Union[UnivarGraphCollator, BivarGraphCollator] if criteria.is_univar(): - collator = UnivarGraphCollator(main_config, cmdopts) + collator = UnivarGraphCollator(main_config, cmdopts, pathset) else: - collator = BivarGraphCollator(main_config, cmdopts) + collator = BivarGraphCollator(main_config, cmdopts, pathset) while True: # Wait for 3 seconds after the queue is empty before bailing try: graph = q.get(True, 3) - collator(criteria, graph, stat_collate_root) + collator(criteria, graph) q.task_done() except queue.Empty: break diff --git a/sierra/core/pipeline/stage4/graphs/inter/generate.py b/sierra/core/pipeline/stage4/graphs/inter/generate.py index 77f64031..89938a11 100755 --- a/sierra/core/pipeline/stage4/graphs/inter/generate.py +++ b/sierra/core/pipeline/stage4/graphs/inter/generate.py @@ -14,12 +14,13 @@ # Project packages from sierra.core.variables import batch_criteria as bc -from sierra.core import types, utils +from sierra.core import types, utils, batchroot from sierra.core.pipeline.stage4.graphs.inter import line, heatmap def generate(main_config: types.YAMLDict, + pathset: batchroot.PathSet, cmdopts: types.Cmdopts, LN_targets: tp.List[types.YAMLDict], HM_targets: tp.List[types.YAMLDict], @@ -58,17 +59,18 @@ def generate(main_config: types.YAMLDict, to get logging messages have unique logger names between this class and your derived class , in order to reduce confusion. """ - utils.dir_create_checked(cmdopts['batch_graph_collate_root'], - exist_ok=True) + utils.dir_create_checked(pathset.graph_collate_root, exist_ok=True) if criteria.is_univar(): if not cmdopts['project_no_LN']: line.generate(cmdopts, + pathset, LN_targets, criteria) else: if not cmdopts['project_no_HM']: heatmap.generate(cmdopts, + pathset, HM_targets, criteria) diff --git a/sierra/core/pipeline/stage4/graphs/inter/heatmap.py b/sierra/core/pipeline/stage4/graphs/inter/heatmap.py index 8ecc1afb..3b99083b 100644 --- a/sierra/core/pipeline/stage4/graphs/inter/heatmap.py +++ b/sierra/core/pipeline/stage4/graphs/inter/heatmap.py @@ -15,7 +15,7 @@ import json # Project packages -from sierra.core import types, config, +from sierra.core import types, config, batchroot from sierra.core.variables import batch_criteria as bc from sierra.core.graphs.heatmap import Heatmap diff --git a/sierra/core/pipeline/stage4/graphs/inter/line.py b/sierra/core/pipeline/stage4/graphs/inter/line.py index 3bab1c25..9fbb29e9 100644 --- a/sierra/core/pipeline/stage4/graphs/inter/line.py +++ b/sierra/core/pipeline/stage4/graphs/inter/line.py @@ -44,14 +44,12 @@ def generate( _gen_summary_linegraph(graph, pathset, cmdopts, - criteria, - pathset.graph_collate_root) + criteria) else: _gen_stacked_linegraph(graph, - pathset.stat_collate_root, + pathset, cmdopts, - criteria, - pathset.graph_collate_root) + criteria) def _gen_summary_linegraph(graph: types.YAMLDict, @@ -68,9 +66,10 @@ def _gen_summary_linegraph(graph: types.YAMLDict, title=graph['title'], xlabel=criteria.graph_xlabel(cmdopts), ylabel=graph.get('ylabel', None), - xticks=criteria.graph_xticks(cmdopts), - xtick_labels=criteria.graph_xticklabels( - cmdopts), + xticks=criteria.graph_xticks(cmdopts, + pathset.output_root), + xtick_labels=criteria.graph_xticklabels(cmdopts, + pathset.output_root), logyscale=cmdopts['plot_log_yscale'], large_text=cmdopts['plot_large_text']) ln.generate() diff --git a/sierra/core/pipeline/stage4/graphs/intra/generate.py b/sierra/core/pipeline/stage4/graphs/intra/generate.py index 035beb6c..783c7b70 100755 --- a/sierra/core/pipeline/stage4/graphs/intra/generate.py +++ b/sierra/core/pipeline/stage4/graphs/intra/generate.py @@ -19,7 +19,7 @@ import sierra.core.variables.batch_criteria as bc import sierra.core.plugin_manager as pm -from sierra.core import types, utils +from sierra.core import types, utils, batchroot, exproot from sierra.core.pipeline.stage4.graphs.intra import line, heatmap _logger = logging.getLogger(__name__) @@ -27,6 +27,7 @@ def generate(main_config: types.YAMLDict, cmdopts: types.Cmdopts, + pathset: batchroot.PathSet, controller_config: types.YAMLDict, LN_config: types.YAMLDict, HM_config: types.YAMLDict, @@ -50,34 +51,29 @@ def generate(main_config: types.YAMLDict, criteria: The :term:`Batch Criteria` used for the batch experiment. """ - exp_to_gen = utils.exp_range_calc(cmdopts, - cmdopts['batch_output_root'], + exp_to_gen = utils.exp_range_calc(cmdopts["exp_range"], + pathset.output_root, criteria) + if not exp_to_gen: + return + + module = pm.module_load_tiered(project=cmdopts['project'], + path='pipeline.stage4.graphs.intra.generate') + + generator = module.IntraExpGraphGenerator(main_config, + controller_config, + LN_config, + HM_config, + cmdopts) for exp in exp_to_gen: - batch_output_root = pathlib.Path(cmdopts["batch_output_root"]) - batch_stat_root = pathlib.Path(cmdopts["batch_stat_root"]) - batch_input_root = pathlib.Path(cmdopts["batch_input_root"]) - batch_graph_root = pathlib.Path(cmdopts["batch_graph_root"]) - batch_model_root = pathlib.Path(cmdopts["batch_model_root"]) - - cmdopts = copy.deepcopy(cmdopts) - cmdopts["exp_input_root"] = str(batch_input_root / exp.name) - cmdopts["exp_output_root"] = str(batch_output_root / exp.name) - cmdopts["exp_graph_root"] = str(batch_graph_root / exp.name) - cmdopts["exp_model_root"] = str(batch_model_root / exp.name) - cmdopts["exp_stat_root"] = str(batch_stat_root / exp.name) - - if os.path.isdir(cmdopts["exp_stat_root"]): - generator = pm.module_load_tiered(project=cmdopts['project'], - path='pipeline.stage4.graphs.intra.generate') - generator.IntraExpGraphGenerator(main_config, - controller_config, - LN_config, - HM_config, - cmdopts)(criteria) + exproots = exproot.PathSet(pathset, exp.name) + + if os.path.isdir(exproots.stat_root): + generator(exproots, criteria) else: - _logger.warning("Skipping experiment '%s': %s does not exist", + _logger.warning("Skipping experiment '%s': % s does not exist, or " + "isn't a directory", exp, cmdopts['exp_stat_root']) @@ -132,9 +128,9 @@ def __init__(self, self.controller_config = controller_config self.logger = logging.getLogger(__name__) - utils.dir_create_checked(self.cmdopts["exp_graph_root"], exist_ok=True) - - def __call__(self, criteria: bc.IConcreteBatchCriteria) -> None: + def __call__(self, + pathset: exproot.PathSet, + criteria: bc.IConcreteBatchCriteria) -> None: """ Generate graphs. @@ -146,6 +142,8 @@ def __call__(self, criteria: bc.IConcreteBatchCriteria) -> None: #. :func:`~sierra.core.pipeline.stage4.graphs.intra.heatmap.generate()` to generate heatmaps for each experiment in the batch. """ + utils.dir_create_checked(self.pathset.graph_root, exist_ok=True) + LN_targets, HM_targets = self.calc_targets() if not self.cmdopts['project_no_LN']: diff --git a/sierra/core/pipeline/stage4/model_runner.py b/sierra/core/pipeline/stage4/model_runner.py index 7bbee84d..d48b038d 100755 --- a/sierra/core/pipeline/stage4/model_runner.py +++ b/sierra/core/pipeline/stage4/model_runner.py @@ -15,7 +15,7 @@ # Project packages import sierra.core.variables.batch_criteria as bc -from sierra.core import models, types, utils, storage, config +from sierra.core import models, types, utils, storage, config, batchroot, exproot class IntraExpModelRunner: @@ -25,19 +25,20 @@ class IntraExpModelRunner: def __init__(self, cmdopts: types.Cmdopts, + pathset: batchroot.PathSet, to_run: tp.List[tp.Union[models.interface.IConcreteIntraExpModel1D, models.interface.IConcreteIntraExpModel2D]]) -> None: self.cmdopts = cmdopts self.models = to_run + self.pathset = pathset self.logger = logging.getLogger(__name__) def __call__(self, - main_config: types.YAMLDict, criteria: bc.IConcreteBatchCriteria) -> None: - exp_to_run = utils.exp_range_calc(self.cmdopts, - self.cmdopts['batch_output_root'], + exp_to_run = utils.exp_range_calc(self.cmdopts["exp_range"], + self.pathset.output_root, criteria) - exp_dirnames = criteria.gen_exp_names(self.cmdopts) + exp_dirnames = criteria.gen_exp_names() for exp in exp_to_run: self._run_models_in_exp(criteria, exp_dirnames, exp) @@ -48,30 +49,21 @@ def _run_models_in_exp(self, exp: pathlib.Path) -> None: exp_index = exp_dirnames.index(exp) - cmdopts = copy.deepcopy(self.cmdopts) - batch_output_root = pathlib.Path(self.cmdopts["batch_output_root"]) - batch_stat_root = pathlib.Path(self.cmdopts["batch_stat_root"]) - batch_input_root = pathlib.Path(self.cmdopts["batch_input_root"]) - batch_graph_root = pathlib.Path(self.cmdopts["batch_graph_root"]) - batch_model_root = pathlib.Path(self.cmdopts["batch_model_root"]) - - cmdopts["exp0_output_root"] = str(batch_output_root / exp_dirnames[0].name) - cmdopts["exp0_stat_root"] = str(batch_stat_root / exp_dirnames[0].name) - - cmdopts["exp_input_root"] = str(batch_input_root / exp.name) - cmdopts["exp_output_root"] = str(batch_output_root / exp.name) - cmdopts["exp_graph_root"] = str(batch_graph_root / exp.name) - cmdopts["exp_stat_root"] = str(batch_stat_root / exp.name) - cmdopts["exp_model_root"] = str(batch_model_root / exp.name) + exproots = exproot.PathSet(self.pathset, exp.name, exp_dirnames[0].name) - utils.dir_create_checked(cmdopts['exp_model_root'], exist_ok=True) + utils.dir_create_checked(exproots.model_root, exist_ok=True) for model in self.models: - self._run_model_in_exp(criteria, cmdopts, exp_index, model) + self._run_model_in_exp(criteria, + self.cmdopts, + exproots, + exp_index, + model) def _run_model_in_exp(self, criteria: bc.IConcreteBatchCriteria, cmdopts: types.Cmdopts, + pathset: exproot.PathSet, exp_index: int, model: tp.Union[models.interface.IConcreteIntraExpModel1D, models.interface.IConcreteIntraExpModel2D]) -> None: @@ -85,11 +77,11 @@ def _run_model_in_exp(self, self.logger.debug("Run intra-experiment model '%s' for exp%s", str(model), exp_index) - dfs = model.run(criteria, exp_index, cmdopts) + dfs = model.run(criteria, exp_index, cmdopts, pathset) writer = storage.DataFrameWriter('storage.csv') for df, csv_stem in zip(dfs, model.target_csv_stems()): - path_stem = pathlib.Path(cmdopts['exp_model_root']) / csv_stem + path_stem = pathset.model_root / csv_stem # Write model legend file so the generated graph can find it with utils.utf8open(path_stem.with_suffix(config.kModelsExt['legend']), @@ -113,21 +105,20 @@ class InterExpModelRunner: def __init__(self, cmdopts: types.Cmdopts, + pathset: batchroot.PathSet, to_run: tp.List[models.interface.IConcreteInterExpModel1D]) -> None: + self.pathset = pathset self.cmdopts = cmdopts self.models = to_run self.logger = logging.getLogger(__name__) def __call__(self, - main_config: types.YAMLDict, criteria: bc.IConcreteBatchCriteria) -> None: cmdopts = copy.deepcopy(self.cmdopts) - utils.dir_create_checked( - cmdopts['batch_model_root'], exist_ok=True) - utils.dir_create_checked( - cmdopts['batch_graph_collate_root'], exist_ok=True) + utils.dir_create_checked(self.pathset.model_root, exist_ok=True) + utils.dir_create_checked(self.pathset.graph_collate_root, exist_ok=True) for model in self.models: if not model.run_for_batch(criteria, cmdopts): @@ -138,10 +129,10 @@ def __call__(self, # Run the model self.logger.debug("Run inter-experiment model '%s'", str(model)) - dfs = model.run(criteria, cmdopts) + dfs = model.run(criteria, cmdopts, self.pathset) for df, csv_stem in zip(dfs, model.target_csv_stems()): - path_stem = pathlib.Path(cmdopts['batch_model_root']) / csv_stem + path_stem = self.model_root / csv_stem # Write model .csv file writer = storage.DataFrameWriter('storage.csv') diff --git a/sierra/core/pipeline/stage4/pipeline_stage4.py b/sierra/core/pipeline/stage4/pipeline_stage4.py index 567f6f78..a097d534 100755 --- a/sierra/core/pipeline/stage4/pipeline_stage4.py +++ b/sierra/core/pipeline/stage4/pipeline_stage4.py @@ -295,19 +295,28 @@ def _run_rendering(self, criteria: bc.IConcreteBatchCriteria) -> None: start = time.time() if self.cmdopts['platform_vc']: - render.from_platform(self.main_config, self.cmdopts, criteria) + render.from_platform(self.main_config, + self.cmdopts, + self.pathset, + criteria) else: self.logger.debug(("--platform-vc not passed--skipping rendering " "frames captured by the platform")) if self.cmdopts['project_rendering']: - render.from_project_imagized(self.main_config, self.cmdopts, criteria) + render.from_project_imagized(self.main_config, + self.cmdopts, + self.pathset, + criteria) else: self.logger.debug(("--project-rendering not passed--skipping " "rendering frames captured by the project")) if criteria.is_bivar() and self.cmdopts['bc_rendering']: - render.from_bivar_heatmaps(self.main_config, self.cmdopts, criteria) + render.from_bivar_heatmaps(self.main_config, + self.cmdopts, + self.pathset, + criteria) else: self.logger.debug(("--bc-rendering not passed or univariate batch " "criteria--skipping rendering generated graphs")) @@ -321,8 +330,8 @@ def _run_intra_models(self, criteria: bc.IConcreteBatchCriteria) -> None: len(self.models_intra)) start = time.time() IntraExpModelRunner(self.cmdopts, - self.models_intra)(self.main_config, - criteria) + self.pathset, + self.models_intra)(criteria) elapsed = int(time.time() - start) sec = datetime.timedelta(seconds=elapsed) self.logger.info("Intra-experiment models finished in %s", str(sec)) @@ -332,8 +341,9 @@ def _run_inter_models(self, criteria: bc.IConcreteBatchCriteria) -> None: len(self.models_inter)) start = time.time() - runner = InterExpModelRunner(self.cmdopts, self.models_inter) - runner(self.main_config, criteria) + InterExpModelRunner(self.cmdopts, + self.pathset, + self.models_inter)(criteria) elapsed = int(time.time() - start) sec = datetime.timedelta(seconds=elapsed) @@ -347,6 +357,7 @@ def _run_intra_graph_generation(self, criteria: bc.IConcreteBatchCriteria) -> No start = time.time() graphs.intra.generate.generate(self.main_config, self.cmdopts, + self.pathset, self.controller_config, self.intra_LN_config, self.intra_HM_config, @@ -367,8 +378,9 @@ def _run_collation(self, criteria: bc.IConcreteBatchCriteria) -> None: if not self.cmdopts['skip_collate']: self.logger.info("Collating inter-experiment CSV files...") start = time.time() - collator = graphs.collate.ParallelCollator( - self.main_config, self.cmdopts) + collator = graphs.collate.ParallelCollator(self.main_config, + self.cmdopts, + self.pathset) collator(criteria, LN_targets) collator(criteria, HM_targets) elapsed = int(time.time() - start) diff --git a/sierra/core/pipeline/stage4/render.py b/sierra/core/pipeline/stage4/render.py index a9e870b1..e77b8fd7 100755 --- a/sierra/core/pipeline/stage4/render.py +++ b/sierra/core/pipeline/stage4/render.py @@ -29,7 +29,7 @@ # Project packages import sierra.core.variables.batch_criteria as bc -from sierra.core import types, config, utils +from sierra.core import types, config, utils, batchroot _logger = logging.getLogger(__name__) @@ -119,6 +119,7 @@ def _worker(q: mp.Queue, main_config: types.YAMLDict) -> None: def from_platform(main_config: types.YAMLDict, cmdopts: types.Cmdopts, + pathset: batchroot.PathSet, criteria: bc.IConcreteBatchCriteria) -> None: """Render frames (images) captured in by a platform into videos. @@ -138,13 +139,13 @@ def from_platform(main_config: types.YAMLDict, .. note:: This currently only works with PNG images. """ - exp_to_render = utils.exp_range_calc(cmdopts, - cmdopts['batch_output_root'], + exp_to_render = utils.exp_range_calc(cmdopts["exp_range"], + pathset.output_root, criteria) inputs = [] for exp in exp_to_render: - output_dir = pathlib.Path(cmdopts['batch_video_root'], exp.name) + output_dir = pathset.video_root / exp.name for run in exp.iterdir(): platform = cmdopts['platform'].split('.')[1] @@ -162,6 +163,7 @@ def from_platform(main_config: types.YAMLDict, def from_project_imagized(main_config: types.YAMLDict, cmdopts: types.Cmdopts, + pathset: batchroot.PathSet, criteria: bc.IConcreteBatchCriteria) -> None: """Render THINGS previously imagized in a project in stage 3 into videos. @@ -181,18 +183,17 @@ def from_project_imagized(main_config: types.YAMLDict, .. note:: This currently only works with PNG images. """ - exp_to_render = utils.exp_range_calc(cmdopts, - cmdopts['batch_output_root'], + exp_to_render = utils.exp_range_calc(cmdopts["exp_range"], + pathset.output_root, criteria) inputs = [] for exp in exp_to_render: - exp_imagize_root = pathlib.Path(cmdopts['batch_imagize_root'], - exp.name) + exp_imagize_root = pathset.imagize_root / exp.name if not exp_imagize_root.exists(): continue - output_dir = pathlib.Path(cmdopts['batch_video_root'], exp.name) + output_dir = pathset.videoroot / exp.name for candidate in exp_imagize_root.iterdir(): if candidate.is_dir(): @@ -209,6 +210,7 @@ def from_project_imagized(main_config: types.YAMLDict, def from_bivar_heatmaps(main_config: types.YAMLDict, cmdopts: types.Cmdopts, + pathset: batchroot.PathSet, criteria: bc.IConcreteBatchCriteria) -> None: """Render inter-experiment heatmaps into videos. @@ -227,14 +229,12 @@ def from_bivar_heatmaps(main_config: types.YAMLDict, versionadded:: 1.2.20 """ - graph_root = pathlib.Path(cmdopts['batch_graph_collate_root']) inputs = [] - for candidate in graph_root.iterdir(): + for candidate in pathset.graph_collate_root.iterdir(): if "HM-" in candidate.name and candidate.is_dir(): - output_dir = pathlib.Path(cmdopts['batch_video_root'], - candidate.name) + output_dir = pathset.video_root / candidate.name opts = { 'input_dir': str(candidate), diff --git a/sierra/core/pipeline/stage5/inter_scenario_comparator.py b/sierra/core/pipeline/stage5/inter_scenario_comparator.py index a933cc04..ab02b43b 100755 --- a/sierra/core/pipeline/stage5/inter_scenario_comparator.py +++ b/sierra/core/pipeline/stage5/inter_scenario_comparator.py @@ -25,7 +25,7 @@ from sierra.core.variables import batch_criteria as bc import sierra.core.plugin_manager as pm from sierra.core import types, utils, config, storage, batchroot -from sierra.core.pip.line.stage5 import outputroot +from sierra.core.pipeline.stage5 import outputroot class UnivarInterScenarioComparator: @@ -106,8 +106,9 @@ def __call__(self, # using data from all scenarios cmdopts = copy.deepcopy(self.cmdopts) for graph in graphs: - for leaf in batch_leaves: - if self._leaf_select(leaf): + for l in batch_leaves: + if self._leaf_select(l): + leaf = batchroot.ExpRootLeaf.from_name(l) self._compare_across_scenarios(cmdopts=cmdopts, graph=graph, batch_leaf=leaf, @@ -133,7 +134,7 @@ def _leaf_select(self, candidate: str) -> bool: def _compare_across_scenarios(self, cmdopts: types.Cmdopts, graph: types.YAMLDict, - batch_leaf: str, + batch_leaf: batchroot.ExpRootLeaf, legend: tp.List[str]) -> None: # We need to generate the root directory paths for each batch experiment @@ -149,17 +150,19 @@ def _compare_across_scenarios(self, # because they are all different. criteria = bc.factory(self.main_config, cmdopts, + pathset.input_root, self.cli_args, self.scenarios[0]) self._gen_csvs(pathset=pathset, - proect=self.cli_args.project, + project=self.cli_args.project, batch_leaf=batch_leaf, src_stem=graph['src_stem'], dest_stem=graph['dest_stem']) self._gen_graph(criteria=criteria, cmdopts=cmdopts, + batch_output_root=pathset.output_root, dest_stem=graph['dest_stem'], inc_exps=graph.get('include_exp', None), title=graph.get('title', None), @@ -169,6 +172,7 @@ def _compare_across_scenarios(self, def _gen_graph(self, criteria: bc.IConcreteBatchCriteria, cmdopts: types.Cmdopts, + batch_output_root: pathlib.Path, dest_stem: str, inc_exps: tp.Optional[str], title: str, @@ -181,8 +185,8 @@ def _gen_graph(self, img_opath = pathlib.Path(self.stage5_roots.graph_root, dest_stem + '-' + self.controller + config.kImageExt) - xticks = criteria.graph_xticks(cmdopts) - xtick_labels = criteria.graph_xticklabels(cmdopts) + xticks = criteria.graph_xticks(cmdopts, batch_output_root) + xtick_labels = criteria.graph_xticklabels(cmdopts, batch_output_root) if inc_exps is not None: xtick_labels = utils.exp_include_filter(inc_exps, @@ -208,7 +212,7 @@ def _gen_graph(self, def _gen_csvs(self, pathset: batchroot.PathSet, project: str, - batch_leaf: str, + batch_leaf: batchroot.ExpRootLeaf, src_stem: str, dest_stem: str) -> None: """Generate a set of CSV files for use in inter-scenario graph generation. @@ -231,9 +235,7 @@ def _gen_csvs(self, """ - csv_ipath_stem = pathlib.Path(pathset.output_root, - pathset.stat_collate_root, - src_stem) + csv_ipath_stem = pathset.stat_collate_root / src_stem # Some experiments might not generate the necessary performance measure # CSVs for graph generation, which is OK. @@ -269,8 +271,8 @@ def _gen_csvs(self, # Can't use with_suffix() for opath, because that path contains the # controller, which already has a '.' in it. model_istem = pathlib.Path(pathset.model_root, src_stem) - model_ostem = pathlib.Path(self.stage5_roots.model_root, - dest_stem + "-" + self.controller) + model_ostem = self.stage5_roots.model_root / \ + (dest_stem + "-" + self.controller) model_ipath = model_istem.with_suffix(config.kModelsExt['model']) model_opath = model_ostem.with_name( @@ -283,10 +285,9 @@ def _gen_csvs(self, writer(model_df, model_opath, index=False) with utils.utf8open(legend_opath, 'a') as f: - scenario = batchroot.ExpRootLeaf.from_name(batch_leaf).scenario sgp = pm.module_load_tiered(project=project, path='generators.scenario_generator_parser') - kw = sgp.ScenarioGeneratorParser().to_dict(scenario) + kw = sgp.ScenarioGeneratorParser().to_dict(batch_leaf.scenario) f.write("{0} Prediction\n".format(kw['scenario_tag'])) def _accum_df(self, diff --git a/sierra/core/pipeline/stage5/intra_scenario_comparator.py b/sierra/core/pipeline/stage5/intra_scenario_comparator.py index 7a93c4e0..d4393bd9 100755 --- a/sierra/core/pipeline/stage5/intra_scenario_comparator.py +++ b/sierra/core/pipeline/stage5/intra_scenario_comparator.py @@ -26,7 +26,7 @@ from sierra.core.graphs.heatmap import Heatmap, DualHeatmap from sierra.core.variables import batch_criteria as bc from sierra.core import types, utils, config, storage, batchroot -from sierra.core.pipeline.stage5 import leafcalc, preprocess +from sierra.core.pipeline.stage5 import namecalc, preprocess, outputroot class UnivarIntraScenarioComparator: @@ -42,11 +42,7 @@ class UnivarIntraScenarioComparator: controllers: List of controller names to compare. - cc_csv_root: Absolute directory path to the location controller CSV - files should be output to. - - cc_graph_root: Absolute directory path to the location the generated - graphs should be output to. + stage5_roots: Set of directory paths for stage 5 file generation. cmdopts: Dictionary of parsed cmdline parameters. @@ -63,15 +59,14 @@ class UnivarIntraScenarioComparator: def __init__(self, controllers: tp.List[str], - pathset: batchroot.PathSet, - cc_csv_root: pathlib.Path, - cc_graph_root: pathlib.Path, + batch_roots: batchroot.PathSet, + stage5_roots: outputroot.PathSet, cmdopts: types.Cmdopts, cli_args, main_config: types.YAMLDict) -> None: self.controllers = controllers - self.cc_graph_root = cc_graph_root - self.cc_csv_root = cc_csv_root + self.batch_roots = batch_roots + self.stage5_roots = stage5_roots self.cmdopts = cmdopts self.cli_args = cli_args @@ -94,11 +89,12 @@ def __call__(self, cmdopts = copy.deepcopy(self.cmdopts) for graph in graphs: found = False - for leaf in batch_leaves: - if self._leaf_select(leaf): + for l in batch_leaves: + if self._leaf_select(l): self.logger.debug("Generating graph %s from scenario '%s'", graph, - leaf) + l) + leaf = batchroot.ExpRootLeaf.from_name(l) self._compare_in_scenario(cmdopts=cmdopts, graph=graph, batch_leaf=leaf, @@ -123,7 +119,7 @@ def _leaf_select(self, candidate: str) -> bool: def _compare_in_scenario(self, cmdopts: types.Cmdopts, graph: types.YAMLDict, - batch_leaf: str, + batch_leaf: batchroot.ExpRootLeaf, legend: tp.List[str]) -> None: for controller in self.controllers: @@ -141,23 +137,22 @@ def _compare_in_scenario(self, # experiment (which # lives inside of the scenario dir), because # they are all different. We need generate these paths for EACH # controller, because the controller is part of the batch root path. - paths = batchroot.from_exp(sierra_rpath=self.cli_args.sierra_root, - project=self.cli_args.project, - batch_leaf=batch_leaf, - controller=controller) - cmdopts.update(paths) + pathset = batchroot.from_exp(sierra_root=self.cli_args.sierra_root, + project=self.cli_args.project, + batch_leaf=batch_leaf, + controller=controller) # For each scenario, we have to create the batch criteria for it, # because they are all different. - criteria = bc.factory(self.main_config, cmdopts, + pathset.input_root, self.cli_args, batch_leaf.scenario) - self._gen_csv(batch_leaf=batch_leaf.to_path(), + self._gen_csv(batch_leaf=batch_leaf, criteria=criteria, - cmdopts=cmdopts, + pathset=pathset, controller=controller, src_stem=graph['src_stem'], dest_stem=graph['dest_stem'], @@ -166,6 +161,7 @@ def _compare_in_scenario(self, self._gen_graph(batch_leaf=batch_leaf, criteria=criteria, cmdopts=cmdopts, + batch_output_root=pathset.output_root, dest_stem=graph['dest_stem'], title=graph.get('title', ''), label=graph.get('label', ''), @@ -173,9 +169,9 @@ def _compare_in_scenario(self, legend=legend) def _gen_csv(self, - batch_leaf: str, + batch_leaf: batchroot.ExpRootLeaf, criteria: bc.IConcreteBatchCriteria, - cmdopts: types.Cmdopts, + pathset: batchroot.PathSet, controller: str, src_stem: str, dest_stem: str, @@ -187,8 +183,8 @@ def _gen_csv(self, """ self.logger.debug("Gathering data for '%s' from %s -> %s", controller, src_stem, dest_stem) - ipath = pathlib.Path(cmdopts['batch_stat_collate_root'], - src_stem + config.kStats['mean'].exts['mean']) + ipath = pathset.stat_collate_root / \ + (src_stem + config.kStats['mean'].exts['mean']) # Some experiments might not generate the necessary performance measure # .csvs for graph generation, which is OK. @@ -198,17 +194,18 @@ def _gen_csv(self, controller) return - preparer = preprocess.IntraExpPreparer(ipath_stem=cmdopts['batch_stat_collate_root'], + preparer = preprocess.IntraExpPreparer(ipath_stem=pathset.stat_collate_root, ipath_leaf=src_stem, - opath_stem=self.cc_csv_root, + opath_stem=self.stage5_roots.csv_root, n_exp=criteria.n_exp()) - opath_leaf = leafcalc.from_batch_leaf(batch_leaf, dest_stem, None) + opath_leaf = namecalc.for_output(batch_leaf.name, dest_stem, None) preparer.across_rows(opath_leaf=opath_leaf, index=0, inc_exps=inc_exps) def _gen_graph(self, - batch_leaf: str, + batch_leaf: batchroot.ExpRootLeaf, criteria: bc.IConcreteBatchCriteria, cmdopts: types.Cmdopts, + batch_output_root: pathlib.Path, dest_stem: str, title: str, label: str, @@ -216,10 +213,12 @@ def _gen_graph(self, legend: tp.List[str]) -> None: """Generate a graph comparing the specified controllers within a scenario. """ - opath_leaf = leafcalc.from_batch_leaf(batch_leaf, dest_stem, None) + opath_leaf = namecalc.for_output(batch_leaf, + dest_stem, + None) - xticks = criteria.graph_xticks(cmdopts) - xtick_labels = criteria.graph_xticklabels(cmdopts) + xticks = criteria.graph_xticks(cmdopts, batch_output_root) + xtick_labels = criteria.graph_xticklabels(cmdopts, batch_output_root) if inc_exps is not None: xtick_labels = utils.exp_include_filter( @@ -227,9 +226,9 @@ def _gen_graph(self, xticks = utils.exp_include_filter( inc_exps, xticks, criteria.n_exp()) - opath = self.cc_graph_root / (opath_leaf + config.kImageExt) + opath = self.stage5_roots.graph_root / (opath_leaf + config.kImageExt) - SummaryLineGraph(stats_root=self.cc_csv_root, + SummaryLineGraph(stats_root=self.stage5_roots.csv_root, input_stem=opath_leaf, output_fpath=opath, stats=cmdopts['dist_stats'], @@ -256,12 +255,6 @@ class BivarIntraScenarioComparator: controllers: List of controller names to compare. - cc_csv_root: Absolute directory path to the location controller CSV - files should be output to. - - cc_graph_root: Absolute directory path to the location the generated - graphs should be output to. - cmdopts: Dictionary of parsed cmdline parameters. cli_args: :class:`argparse` object containing the cmdline @@ -276,21 +269,19 @@ class BivarIntraScenarioComparator: def __init__(self, controllers: tp.List[str], - cc_csv_root: pathlib.Path, - cc_graph_root: pathlib.Path, + stage5_roots: outputroot.PathSet, cmdopts: types.Cmdopts, cli_args: argparse.Namespace, main_config: types.YAMLDict) -> None: self.controllers = controllers - self.cc_csv_root = cc_csv_root - self.cc_graph_root = cc_graph_root + self.stage5_roots = stage5_roots self.cmdopts = cmdopts self.cli_args = cli_args self.main_config = main_config self.logger = logging.getLogger(__name__) - self.logger.debug("csv_root=%s", str(self.cc_csv_root)) - self.logger.debug("graph_root=%s", str(self.cc_graph_root)) + self.logger.debug("csv_root=%s", str(self.stage5_roots.csv_root)) + self.logger.debug("graph_root=%s", str(self.stage5_roots.graph_root)) self.project_root = pathlib.Path(self.cmdopts['sierra_root'], self.cmdopts['project']) @@ -308,8 +299,9 @@ def __call__(self, cmdopts = copy.deepcopy(self.cmdopts) for graph in graphs: found = False - for leaf in batch_leaves: - if self._leaf_select(leaf): + for l in batch_leaves: + if self._leaf_select(l): + leaf = batchroot.ExpRootLeaf.from_name(l) self.logger.debug("Generating graph %s from scenario '%s'", graph, leaf) @@ -338,7 +330,7 @@ def _leaf_select(self, candidate: str) -> bool: def _compare_in_scenario(self, cmdopts: types.Cmdopts, graph: types.YAMLDict, - batch_leaf: str, + batch_leaf: batchroot.ExpRootLeaf, legend: tp.List[str], comp_type: str) -> None: """Compare all controllers within the specified scenario. @@ -355,30 +347,28 @@ def _compare_in_scenario(self, batch_leaf) continue - batch_leaf = dirs[0] - scenario = batchroot.ExpRootLeaf.from_name(batch_leaf).scenario + batch_leaf = batchroot.ExpRootLeaf.from_name(dirs[0]) # We need to generate the root directory paths for each batch # experiment (which # lives inside of the scenario dir), because # they are all different. We need generate these paths for EACH # controller, because the controller is part of the batch root path. - paths = batchroot.from_exp(sierra_root=self.cli_args.sierra_root, - project=self.cli_args.project, - batch_leaf=batch_leaf, - controller=controller) - - cmdopts.update(paths) + pathset = batchroot.from_exp(sierra_root=self.cli_args.sierra_root, + project=self.cli_args.project, + batch_leaf=batch_leaf, + controller=controller) # For each scenario, we have to create the batch criteria for it, # because they are all different. criteria = bc.factory(self.main_config, cmdopts, + pathset.input_root, self.cli_args, - scenario) - + batch_leaf.scenario) if comp_type == 'LNraw': self._gen_csvs_for_1D(cmdopts=cmdopts, criteria=criteria, + pathset=pathset, controller=controller, batch_leaf=batch_leaf, src_stem=graph['src_stem'], @@ -387,9 +377,9 @@ def _compare_in_scenario(self, inc_exps=graph.get('include_exp', None)) elif 'HM' in comp_type or 'SU' in comp_type: - self._gen_csvs_for_2D_or_3D(cmdopts=cmdopts, - controller=controller, + self._gen_csvs_for_2D_or_3D(controller=controller, batch_leaf=batch_leaf, + pathset=pathset, src_stem=graph['src_stem'], dest_stem=graph['dest_stem']) @@ -397,6 +387,7 @@ def _compare_in_scenario(self, self._gen_graphs1D(batch_leaf=batch_leaf, criteria=criteria, cmdopts=cmdopts, + pathset=pathset, dest_stem=graph['dest_stem'], title=graph.get('title', ''), label=graph.get('label', ''), @@ -424,8 +415,8 @@ def _compare_in_scenario(self, comp_type=comp_type) def _gen_csvs_for_2D_or_3D(self, - cmdopts: types.Cmdopts, - batch_leaf: str, + pathset: batchroot.PathSet, + batch_leaf: batchroot.ExpRootLeaf, controller: str, src_stem: str, dest_stem: str) -> None: @@ -434,7 +425,7 @@ def _gen_csvs_for_2D_or_3D(self, 1 CSV per controller, for 2D/3D comparison types only. Because each CSV file corresponding to performance measures are 2D arrays, we actually just copy and rename the performance measure CSV files for each - controllers into :attr:`cc_csv_root`. + controllers into :attr:`stage5_roots.csv_root`. :class:`~sierra.core.graphs.stacked_surface_graph.StackedSurfaceGraph` expects an ``_[0-9]+.csv`` pattern for each 2D surfaces to graph in @@ -449,8 +440,8 @@ def _gen_csvs_for_2D_or_3D(self, self.logger.debug("Gathering data for '%s' from %s -> %s", controller, src_stem, dest_stem) - csv_ipath = pathlib.Path(cmdopts['batch_stat_collate_root'], - src_stem + config.kStats['mean'].exts['mean']) + csv_ipath = pathset.stat_collate_root / \ + (src_stem + config.kStats['mean'].exts['mean']) # Some experiments might not generate the necessary performance measure # .csvs for graph generation, which is OK. @@ -462,11 +453,11 @@ def _gen_csvs_for_2D_or_3D(self, df = storage.DataFrameReader('storage.csv')(csv_ipath) - opath_leaf = leafcalc.from_batch_leaf(batch_leaf, - dest_stem, - [self.controllers.index(controller)]) + opath_leaf = namecalc.for_output(batch_leaf, + dest_stem, + [self.controllers.index(controller)]) - opath_stem = self.cc_csv_root / opath_leaf + opath_stem = self.stage5_roots.csv_root / opath_leaf opath = opath_stem.with_name( opath_stem.name + config.kStats['mean'].exts['mean']) writer = storage.DataFrameWriter('storage.csv') @@ -474,8 +465,9 @@ def _gen_csvs_for_2D_or_3D(self, def _gen_csvs_for_1D(self, cmdopts: types.Cmdopts, - criteria: bc.IConcreteBatchCriteria, - batch_leaf: str, + pathset: batchroot.PathSet, + criteria: bc.BivarBatchCriteria, + batch_leaf: batchroot.ExpRootLeaf, controller: str, src_stem: str, dest_stem: str, @@ -492,8 +484,8 @@ def _gen_csvs_for_1D(self, self.logger.debug("Gathering data for '%s' from %s -> %s", controller, src_stem, dest_stem) - csv_ipath = pathlib.Path(cmdopts['batch_stat_collate_root'], - src_stem + config.kStats['mean'].exts['mean']) + csv_ipath = pathset.stat_collate_root / \ + src_stem + config.kStats['mean'].exts['mean'] # Some experiments might not generate the necessary performance measure # .csvs for graph generation, which is OK. @@ -509,46 +501,48 @@ def _gen_csvs_for_1D(self, "generation: no stats will be included")) if primary_axis == 0: - preparer = preprocess.IntraExpPreparer(ipath_stem=cmdopts['batch_stat_collate_root'], + preparer = preprocess.IntraExpPreparer(ipath_stem=pathset.stat_collate_root, ipath_leaf=src_stem, - opath_stem=self.cc_csv_root, + opath_stem=self.stage5_roots.csv_root, n_exp=criteria.criteria2.n_exp()) reader = storage.DataFrameReader('storage.csv') - ipath = pathlib.Path(cmdopts['batch_stat_collate_root'], - src_stem + config.kStats['mean'].exts['mean']) + ipath = pathset.stat_collate_root / \ + (src_stem + config.kStats['mean'].exts['mean']) n_rows = len(reader(ipath).index) for i in range(0, n_rows): - opath_leaf = leafcalc.from_batch_leaf(batch_leaf, - dest_stem, - [i]) + opath_leaf = namecalc.for_output(batch_leaf, + dest_stem, + [i]) preparer.across_rows(opath_leaf=opath_leaf, index=i, inc_exps=inc_exps) else: - preparer = preprocess.IntraExpPreparer(ipath_stem=cmdopts['batch_stat_collate_root'], + preparer = preprocess.IntraExpPreparer(ipath_stem=pathset.stat_collate_root, ipath_leaf=src_stem, - opath_stem=self.cc_csv_root, + opath_stem=self.stage5_roots.csv_root, n_exp=criteria.criteria1.n_exp()) - exp_dirs = criteria.gen_exp_names(cmdopts) + exp_dirs = criteria.gen_exp_names() xlabels, ylabels = utils.bivar_exp_labels_calc(exp_dirs) xlabels = utils.exp_include_filter( inc_exps, xlabels, criteria.criteria1.n_exp()) for col in ylabels: col_index = ylabels.index(col) - opath_leaf = leafcalc.from_batch_leaf( - batch_leaf, dest_stem, [col_index]) + opath_leaf = namecalc.for_output(batch_leaf, + dest_stem, + [col_index]) preparer.across_cols(opath_leaf=opath_leaf, col_index=col_index, all_cols=xlabels, inc_exps=inc_exps) def _gen_graphs1D(self, - batch_leaf: str, + batch_leaf: batchroot.ExpRootLeaf, criteria: bc.BivarBatchCriteria, + pathset: batchroot.PathSet, cmdopts: types.Cmdopts, dest_stem: str, title: str, @@ -556,15 +550,19 @@ def _gen_graphs1D(self, primary_axis: int, inc_exps: tp.Optional[str], legend: tp.List[str]) -> None: - oleaf = leafcalc.from_batch_leaf(batch_leaf, dest_stem, None) - csv_stem_root = self.cc_csv_root / oleaf + oleaf = namecalc.for_output(batch_leaf, + dest_stem, + None) + csv_stem_root = self.stage5_roots.csv_root / oleaf pattern = str(csv_stem_root) + '*' + config.kStats['mean'].exts['mean'] paths = [f for f in glob.glob(pattern) if re.search('_[0-9]+', f)] for i in range(0, len(paths)): - opath_leaf = leafcalc.from_batch_leaf( - batch_leaf, dest_stem, [i]) - img_opath = self.cc_graph_root / (opath_leaf + config.kImageExt) + opath_leaf = namecalc.for_output(batch_leaf, + dest_stem, + [i]) + img_opath = self.stage5_roots.graph_root / \ + (opath_leaf + config.kImageExt) if primary_axis == 0: n_exp = criteria.criteria1.n_exp() @@ -590,11 +588,11 @@ def _gen_graphs1D(self, xlabel = criteria.graph_xlabel(cmdopts) # TODO: Fix no statistics support for these graphs - SummaryLineGraph(stats_root=self.cc_csv_root, + SummaryLineGraph(stats_root=self.stage5_roots.csv_root, input_stem=opath_leaf, stats='none', output_fpath=img_opath, - model_root=cmdopts['batch_model_root'], + model_root=pathset.model_root, title=title, xlabel=xlabel, ylabel=label, @@ -605,7 +603,7 @@ def _gen_graphs1D(self, large_text=cmdopts['plot_large_text']).generate() def _gen_graphs2D(self, - batch_leaf: str, + batch_leaf: batchroot.ExpRootLeaf, criteria: bc.BivarBatchCriteria, cmdopts: types.Cmdopts, dest_stem: str, @@ -632,7 +630,7 @@ def _gen_graphs2D(self, comp_type) def _gen_paired_heatmaps(self, - batch_leaf: str, + batch_leaf: batchroot.ExpRootLeaf, criteria: bc.BivarBatchCriteria, cmdopts: types.Cmdopts, dest_stem: str, @@ -643,20 +641,22 @@ def _gen_paired_heatmaps(self, Uses a configured controller of primary interest against all other controllers (one graph per pairing), after input files have been - gathered from each controller into :attr:`cc_csv_root`. + gathered from each controller into :attr:`stage5_roots.csv_root`. """ - opath_leaf = leafcalc.from_batch_leaf(batch_leaf, dest_stem, None) - opath = self.cc_graph_root / (opath_leaf + config.kImageExt) - pattern = self.cc_csv_root / (opath_leaf + '*' + - config.kStats['mean'].exts['mean']) + opath_leaf = namecalc.for_output(batch_leaf, + dest_stem, + None) + opath = self.stage5_roots.graph_root / (opath_leaf + config.kImageExt) + pattern = self.stage5_roots.csv_root / (opath_leaf + '*' + + config.kStats['mean'].exts['mean']) paths = [pathlib.Path(f) for f in glob.glob(str(pattern)) if re.search(r'_[0-9]+\.', f)] self.logger.debug("Generating paired heatmaps in %s -> %s", pattern, - [str(f.relative_to(self.cc_csv_root)) for f in paths]) + [str(f.relative_to(self.stage5_roots.csv_root)) for f in paths]) if len(paths) < 2: self.logger.warning(("Not enough matches from pattern='%s'--" @@ -677,11 +677,12 @@ def _gen_paired_heatmaps(self, # Have to add something before the .mean to ensure that the diff CSV # does not get picked up by the regex above as each controller is # treated in turn as the primary. - leaf = leafcalc.from_batch_leaf(batch_leaf, - dest_stem, - [0, i]) + '_paired' - ipath = self.cc_csv_root / (leaf + config.kStats['mean'].exts['mean']) - opath = self.cc_graph_root / (leaf + config.kImageExt) + leaf = namecalc.for_output(batch_leaf, + dest_stem, + [0, i]) + '_paired' + ipath = self.stage5_roots.csv_root / \ + (leaf + config.kStats['mean'].exts['mean']) + opath = self.stage5_roots.graph_root / (leaf + config.kImageExt) writer = storage.DataFrameWriter('storage.csv') writer(plot_df, ipath, index=False) @@ -697,7 +698,7 @@ def _gen_paired_heatmaps(self, ytick_labels=criteria.graph_yticklabels(cmdopts)).generate() def _gen_dual_heatmaps(self, - batch_leaf: str, + batch_leaf: batchroot.ExpRootLeaf, criteria: bc.BivarBatchCriteria, cmdopts: types.Cmdopts, dest_stem: str, @@ -709,21 +710,23 @@ def _gen_dual_heatmaps(self, Graphs contain all pairings of (primary controller, other), one per graph, within the specified scenario after input files have been - gathered from each controller into :attr:`cc_csv_root`. Only valid if + gathered from each controller into :attr:`stage5_roots.csv_root`. Only valid if the comparison type is ``HMraw``. """ - opath_leaf = leafcalc.from_batch_leaf(batch_leaf, dest_stem, None) - opath = self.cc_graph_root / (opath_leaf + config.kImageExt) - pattern = self.cc_csv_root / (opath_leaf + '*' + - config.kStats['mean'].exts['mean']) + opath_leaf = namecalc.for_output(batch_leaf, + dest_stem, + None) + opath = self.stage5_roots.graph_root / (opath_leaf + config.kImageExt) + pattern = self.stage5_roots.csv_root / (opath_leaf + '*' + + config.kStats['mean'].exts['mean']) paths = [pathlib.Path(f) for f in glob.glob(str(pattern)) if re.search('_[0-9]+', f)] self.logger.debug("Generating dual heatmaps in %s -> %s", pattern, - [str(f.relative_to(self.cc_csv_root)) for f in paths]) + [str(f.relative_to(self.stage5_roots.csv_root)) for f in paths]) DualHeatmap(ipaths=paths, output_fpath=opath, @@ -737,7 +740,7 @@ def _gen_dual_heatmaps(self, ytick_labels=criteria.graph_yticklabels(cmdopts)).generate() def _gen_graph3D(self, - batch_leaf: str, + batch_leaf: batchroot.ExpRootLeaf, criteria: bc.BivarBatchCriteria, cmdopts: types.Cmdopts, dest_stem: str, @@ -749,20 +752,22 @@ def _gen_graph3D(self, Graph contains the specified controllers within thes pecified scenario after input files have been gathered from each controllers into - :attr:`cc_csv_root`. + :attr:`stage5_roots.csv_root`. """ - opath_leaf = leafcalc.from_batch_leaf(batch_leaf, dest_stem, None) - opath = self.cc_graph_root / (opath_leaf + config.kImageExt) - pattern = self.cc_csv_root / (opath_leaf + '*' + - config.kStats['mean'].exts['mean']) + opath_leaf = namecalc.for_output(batch_leaf, + dest_stem, + None) + opath = self.stage5_roots.graph_root / (opath_leaf + config.kImageExt) + pattern = self.stage5_roots.csv_root / (opath_leaf + '*' + + config.kStats['mean'].exts['mean']) paths = [pathlib.Path(f) for f in glob.glob( str(pattern)) if re.search('_[0-9]+', f)] self.logger.debug("Generating stacked surface graphs in %s -> %s", pattern, - [str(f.relative_to(self.cc_csv_root)) for f in paths]) + [str(f.relative_to(self.stage5_roots.csv_root)) for f in paths]) StackedSurfaceGraph(ipaths=paths, output_fpath=opath, diff --git a/sierra/core/pipeline/stage5/leafcalc.py b/sierra/core/pipeline/stage5/leafcalc.py deleted file mode 100644 index 161d53fa..00000000 --- a/sierra/core/pipeline/stage5/leafcalc.py +++ /dev/null @@ -1,24 +0,0 @@ -# -# Copyright 2024 John Harwell, All rights reserved. -# -# SPDX-License Identifier: MIT -# - -# Core packages -import pathlib -import typing as tp - -# 3rd party packages - -# Project packages - - -def from_batch_leaf(batch_leaf: str, - graph_stem: str, - indices: tp.Union[tp.List[int], None]): - leaf = graph_stem + "-" + batch_leaf - - if indices is not None: - leaf += '_' + ''.join([str(i) for i in indices]) - - return leaf diff --git a/sierra/core/pipeline/stage5/namecalc.py b/sierra/core/pipeline/stage5/namecalc.py new file mode 100644 index 00000000..4e4f506d --- /dev/null +++ b/sierra/core/pipeline/stage5/namecalc.py @@ -0,0 +1,29 @@ +# +# Copyright 2024 John Harwell, All rights reserved. +# +# SPDX-License Identifier: MIT +# + +# Core packages +import typing as tp + +# 3rd party packages + +# Project packages +from sierra.core import batchroot + + +def for_output(leaf: batchroot.ExpRootLeaf, + new_stem: str, + indices: tp.Union[tp.List[int], None]) -> str: + """ + Create a new name given an existing leaf and a new component. + + "Name" here is in pathlib terminology. + """ + name = new_stem + "-" + leaf.to_path().name + + if indices is not None: + name += '_' + ''.join([str(i) for i in indices]) + + return name diff --git a/sierra/core/pipeline/stage5/outputroot.py b/sierra/core/pipeline/stage5/outputroot.py index 5c4ed936..4637e944 100644 --- a/sierra/core/pipeline/stage5/outputroot.py +++ b/sierra/core/pipeline/stage5/outputroot.py @@ -43,10 +43,10 @@ def __init__(self, # stage5 idempotent). self.graph_root = pathlib.Path(cmdopts['sierra_root'], cmdopts['project'], - '+'.join(scenarios) + "-sc-graphs"), + '+'.join(scenarios) + "-sc-graphs") self.csv_root = pathlib.Path(cmdopts['sierra_root'], cmdopts['project'], - '+'.join(scenarios) + "-sc-csvs"), + '+'.join(scenarios) + "-sc-csvs") self.model_root = pathlib.Path(cmdopts['sierra_root'], cmdopts['project'], - '+'.join(scenarios) + "-sc-models"), + '+'.join(scenarios) + "-sc-models") diff --git a/sierra/core/pipeline/stage5/pipeline_stage5.py b/sierra/core/pipeline/stage5/pipeline_stage5.py index 6cba93bb..b70ffd4b 100755 --- a/sierra/core/pipeline/stage5/pipeline_stage5.py +++ b/sierra/core/pipeline/stage5/pipeline_stage5.py @@ -50,9 +50,11 @@ class PipelineStage5: def __init__(self, main_config: types.YAMLDict, - cmdopts: types.Cmdopts) -> None: + cmdopts: types.Cmdopts, + batch_roots: batchroot.PathSet) -> None: self.cmdopts = cmdopts self.main_config = main_config + self.batch_roots = batch_roots path = pathlib.Path(self.cmdopts['project_config_root'], config.kYAML.stage5) @@ -72,7 +74,7 @@ def __init__(self, else: self.scenarios = [] - self.output_roots = outputroot.PathSet(cmdopts, + self.stage5_roots = outputroot.PathSet(cmdopts, self.controllers, self.scenarios) @@ -98,11 +100,11 @@ def run(self, cli_args) -> None: """ # Create directories for .csv files and graphs - utils.dir_create_checked(self.output_roots.graph_root, True) - utils.dir_create_checked(self.output_roots.csv_root, True) + utils.dir_create_checked(self.stage5_roots.graph_root, True) + utils.dir_create_checked(self.stage5_roots.csv_root, True) - if self.output_roots.model_root is not None: - utils.dir_create_checked(self.output_roots.model_root, True) + if self.stage5_roots.model_root is not None: + utils.dir_create_checked(self.stage5_roots.model_root, True) if self.cmdopts['controller_comparison']: self._run_cc(cli_args) @@ -123,8 +125,8 @@ def _run_cc(self, cli_args): if cli_args.bc_univar: univar = intrasc.UnivarIntraScenarioComparator(self.controllers, - self.output_roots.csv_root, - self.output_roots.graph_root, + self.batch_roots, + self.stage5_roots, self.cmdopts, cli_args, self.main_config) @@ -133,8 +135,7 @@ def _run_cc(self, cli_args): comp_type=self.cmdopts['comparison_type']) else: bivar = intrasc.BivarIntraScenarioComparator(self.controllers, - self.output_roots.csv_root, - self.output_roots.graph_root, + self.stage5_roots, self.cmdopts, cli_args, self.main_config) @@ -160,7 +161,8 @@ def _run_sc(self, cli_args): comparator = intersc.UnivarInterScenarioComparator(self.cmdopts['controller'], self.scenarios, - self.output_roots, + self.batch_roots, + self.stage5_roots, self.cmdopts, cli_args, self.main_config) @@ -182,14 +184,14 @@ def _verify_comparability(self, controllers, cli_args): """ for c1 in controllers: for item in (self.project_root / c1).iterdir(): - leaf = batchroot.ExpRootLeaf.from_name(item.name).to_path() + leaf = batchroot.ExpRootLeaf.from_name(item.name) for c2 in controllers: opts1 = batchroot.from_exp(sierra_root=self.cmdopts['sierra_root'], project=self.cmdopts['project'], batch_leaf=leaf, controller=c1) - opts2 = batchroot.from_exp(sierra_rot=self.cmdopts['sierra_root'], + opts2 = batchroot.from_exp(sierra_root=self.cmdopts['sierra_root'], project=self.cmdopts['project'], batch_leaf=leaf, controller=c2) diff --git a/sierra/core/platform.py b/sierra/core/platform.py index 60f044f9..2cd70bec 100644 --- a/sierra/core/platform.py +++ b/sierra/core/platform.py @@ -61,7 +61,7 @@ class ExpRunShellCmdsGenerator(): def __init__(self, cmdopts: types.Cmdopts, criteria: bc.BatchCriteria, - n_robots: int, + n_agents: int, exp_num: int) -> None: self.cmdopts = cmdopts self.criteria = criteria @@ -70,7 +70,7 @@ def __init__(self, if hasattr(module, 'ExpRunShellCmdsGenerator'): self.platform = module.ExpRunShellCmdsGenerator(self.cmdopts, self.criteria, - n_robots, + n_agents, exp_num) else: self.platform = None @@ -79,7 +79,7 @@ def __init__(self, if hasattr(module, 'ExpRunShellCmdsGenerator'): self.env = module.ExpRunShellCmdsGenerator(self.cmdopts, self.criteria, - n_robots, + n_agents, exp_num) else: self.env = None diff --git a/sierra/core/ros1/callbacks.py b/sierra/core/ros1/callbacks.py index f0461f59..38ceaf41 100644 --- a/sierra/core/ros1/callbacks.py +++ b/sierra/core/ros1/callbacks.py @@ -21,7 +21,7 @@ def population_size_from_pickle(adds_def: tp.Union[xml.AttrChangeSet, main_config: types.YAMLDict, cmdopts: types.Cmdopts) -> int: for add in adds_def: - if 'name' in add.attr and 'n_robots' in add.attr['name']: + if 'name' in add.attr and 'n_agents' in add.attr['name']: return int(add.attr['value']) return 0 diff --git a/sierra/core/utils.py b/sierra/core/utils.py index 6d536554..a5903b0c 100755 --- a/sierra/core/utils.py +++ b/sierra/core/utils.py @@ -179,17 +179,15 @@ def get_primary_axis(criteria, return 1 -def exp_range_calc(cmdopts: types.Cmdopts, +def exp_range_calc(exp_range: str, root_dir: pathlib.Path, criteria) -> types.PathList: """ Get the range of experiments to run/do stuff with. SUPER USEFUL. """ - exp_all = [root_dir / d for d in criteria.gen_exp_names(cmdopts)] + exp_all = [root_dir / d for d in criteria.gen_exp_names()] - exp_range = cmdopts['exp_range'] - - if cmdopts['exp_range'] is not None: + if exp_range is not None: min_exp = int(exp_range.split(':')[0]) max_exp = int(exp_range.split(':')[1]) assert min_exp <= max_exp, \ @@ -319,7 +317,7 @@ def exp_template_path(cmdopts: types.Cmdopts, return batch_input_root / dirname / template.stem -def get_n_robots(main_config: types.YAMLDict, +def get_n_agents(main_config: types.YAMLDict, cmdopts: types.Cmdopts, exp_input_root: pathlib.Path, exp_def: definition.XMLExpDef) -> int: @@ -336,18 +334,18 @@ def get_n_robots(main_config: types.YAMLDict, # # 2. Getting it from the pickled experiment definition (i.e., from the # batch criteria which was used for this experiment). - n_robots = module.population_size_from_def(exp_def, + n_agents = module.population_size_from_def(exp_def, main_config, cmdopts) - if n_robots <= 0: + if n_agents <= 0: pkl_def = definition.unpickle(exp_input_root / config.kPickleLeaf) - n_robots = module.population_size_from_pickle(pkl_def, + n_agents = module.population_size_from_pickle(pkl_def, main_config, cmdopts) - assert n_robots > 0, "n_robots must be > 0" + assert n_agents > 0, "n_agents must be > 0" - return n_robots + return n_agents def df_fill(df: pd.DataFrame, policy: str) -> pd.DataFrame: @@ -413,7 +411,7 @@ def sphinx_ref(ref: str) -> str: 'apply_to_expdef', 'pickle_modifications', 'exp_template_path', - 'get_n_robots', + 'get_n_agents', 'df_fill', 'utf8open', ] diff --git a/sierra/core/variables/batch_criteria.py b/sierra/core/variables/batch_criteria.py index f9cc03ae..6f7648de 100755 --- a/sierra/core/variables/batch_criteria.py +++ b/sierra/core/variables/batch_criteria.py @@ -31,9 +31,9 @@ class IQueryableBatchCriteria(implements.Interface): """ - def n_robots(self, exp_num: int) -> int: + def n_agents(self, exp_num: int) -> int: """ - Return the # of robots used for a given :term:`Experiment`. + Return the # of agents used for a given :term:`Experiment`. """ raise NotImplementedError @@ -45,16 +45,22 @@ class IConcreteBatchCriteria(implements.Interface): def graph_xticks(self, cmdopts: types.Cmdopts, + batch_output_root: pathlib.Path, exp_names: tp.Optional[tp.List[str]] = None) -> tp.List[float]: """Calculate X axis ticks for graph generation. Arguments: - cmdopts: Dictionary of parsed command line options. + cmdopts: Dictionary of parsed command line options. Most batch + criteria will not need this, BUT it is available. + + batch_output_root: Root directory for all experimental output in the + batch. Needed in calculating graphs for batch + criteria when ``--exp-range`` is used. exp_names: If not None, then this list of directories will be used to calculate the ticks, rather than the results of - gen_exp_names(). + :ref:`gen_exp_names()`. """ @@ -62,16 +68,23 @@ def graph_xticks(self, def graph_xticklabels(self, cmdopts: types.Cmdopts, + batch_output_root: pathlib.Path, exp_names: tp.Optional[tp.List[str]] = None) -> tp.List[str]: """Calculate X axis tick labels for graph generation. Arguments: - cmdopts: Dictionary of parsed command line options. + cmdopts: Dictionary of parsed command line options. Most batch + criteria will not need this, BUT it is available. + + batch_output_root: Root directory for all experimental output in the + batch. Needed in calculating graphs for batch + criteria when ``--exp-range`` is used. + batch. exp_names: If not None, then these directories will be used to calculate the labels, rather than the results of - gen_exp_names(). + :ref:`gen_exp_names()`. """ raise NotImplementedError @@ -95,34 +108,45 @@ class IBivarBatchCriteria(implements.Interface): def graph_yticks(self, cmdopts: types.Cmdopts, + batch_output_root: pathlib.Path, exp_names: tp.Optional[tp.List[str]] = None) -> tp.List[float]: """ Calculate Y axis ticks for graph generation. Arguments: - cmdopts: Dictionary of parsed command line options. + cmdopts: Dictionary of parsed command line options. Most batch + criteria will not need this, BUT it is available. - exp_names: If not None, then these directories will be used to - calculate the ticks, rather than the results of - gen_exp_names(). + batch_output_root: Root directory for all experimental output in the + batch. Needed in calculating graphs for batch + criteria when ``--exp-range`` is used. + exp_names: If not None, then these directories will be used to + calculate the labels, rather than the results of + :ref:`gen_exp_names()`. """ raise NotImplementedError def graph_yticklabels(self, cmdopts: types.Cmdopts, + batch_output_root: pathlib.Path, exp_names: tp.Optional[tp.List[str]] = None) -> tp.List[str]: """ Calculate X axis ticks for graph generation. Arguments: - cmdopts: Dictionary of parsed command line options. + cmdopts: Dictionary of parsed command line options. Most batch + criteria will not need this, BUT it is available. + + batch_output_root: Root directory for all experimental output in the + batch. Needed in calculating graphs for batch + criteria when ``--exp-range`` is used. exp_names: If not None, then these directories will be used to calculate the labels, rather than the results of - gen_exp_names(). + :ref:`gen_exp_names()`. """ raise NotImplementedError @@ -208,7 +232,7 @@ def gen_tag_addlist(self) -> tp.List[xml.TagAddList]: def gen_files(self) -> None: pass - def gen_exp_names(self, cmdopts: types.Cmdopts) -> tp.List[str]: + def gen_exp_names(self) -> tp.List[str]: """ Generate list of experiment names from the criteria. @@ -247,7 +271,7 @@ def pickle_exp_defs(self, cmdopts: types.Cmdopts) -> None: scaffold_spec = spec.scaffold_spec_factory(self) for exp in range(0, scaffold_spec.n_exps): - exp_dirname = self.gen_exp_names(cmdopts)[exp] + exp_dirname = self.gen_exp_names()[exp] # Pickling of batch criteria experiment definitions is the FIRST set # of changes to be pickled--all other changes come after. We append # to the pickle file by default, which allows any number of @@ -307,7 +331,7 @@ def _scaffold_expi(self, is_compound: bool, i: int, cmdopts: types.Cmdopts) -> None: - exp_dirname = self.gen_exp_names(cmdopts)[i] + exp_dirname = self.gen_exp_names()[i] exp_input_root = self.batch_input_root / exp_dirname utils.dir_create_checked(exp_input_root, @@ -402,7 +426,7 @@ def populations(self, if exp_names is not None: names = exp_names else: - names = self.gen_exp_names(cmdopts) + names = self.gen_exp_names() module = pm.pipeline.get_plugin_module(cmdopts['platform']) for d in names: @@ -489,7 +513,7 @@ def gen_tag_rmlist(self) -> tp.List[xml.TagRmList]: ret.extend(self.criteria2.gen_tag_rmlist()) return ret - def gen_exp_names(self, cmdopts: types.Cmdopts) -> tp.List[str]: + def gen_exp_names(self) -> tp.List[str]: """ Generate a SORTED list of strings for all experiment names. @@ -497,8 +521,8 @@ def gen_exp_names(self, cmdopts: types.Cmdopts) -> tp.List[str]: parents. """ - list1 = self.criteria1.gen_exp_names(cmdopts) - list2 = self.criteria2.gen_exp_names(cmdopts) + list1 = self.criteria1.gen_exp_names() + list2 = self.criteria2.gen_exp_names() ret = [] for l1 in list1: @@ -514,15 +538,16 @@ def populations(self, cmdopts: types.Cmdopts) -> tp.List[tp.List[int]]: `gen_exp_names()` for each criteria along each axis. """ - names = self.gen_exp_names(cmdopts) + names = self.gen_exp_names() - sizes = [[0 for col in self.criteria2.gen_exp_names( - cmdopts)] for row in self.criteria1.gen_exp_names(cmdopts)] + sizes = [[0 for col in self.criteria2.gen_exp_names()] + for row in self.criteria1.gen_exp_names()] n_chgs2 = len(self.criteria2.gen_attr_changelist()) n_adds2 = len(self.criteria2.gen_tag_addlist()) module = pm.pipeline.get_plugin_module(cmdopts['platform']) + for d in names: pkl_path = self.batch_input_root / d / config.kPickleLeaf exp_def = definition.unpickle(pkl_path) @@ -557,71 +582,58 @@ def exp_scenario_name(self, exp_num: int) -> str: def graph_xticks(self, cmdopts: types.Cmdopts, + batch_output_root: pathlib.Path, exp_names: tp.Optional[tp.List[str]] = None) -> tp.List[float]: names = [] - all_dirs = utils.exp_range_calc(cmdopts, - cmdopts['batch_output_root'], + all_dirs = utils.exp_range_calc(cmdopts["exp_range"], + batch_output_root, self) - for c1 in self.criteria1.gen_exp_names(cmdopts): + ynames = exp_names if exp_names else self.criteria1.gen_exp_names(cmdopts) + + for c1 in ynames: for x in all_dirs: leaf = x.name if c1 in leaf.split('+')[0]: names.append(leaf) break - return self.criteria1.graph_xticks(cmdopts, names) + return self.criteria1.graph_xticks(cmdopts, batch_output_root, names) def graph_yticks(self, cmdopts: types.Cmdopts, + batch_output_root: pathlib.Path, exp_names: tp.Optional[tp.List[str]] = None) -> tp.List[float]: names = [] - all_dirs = utils.exp_range_calc(cmdopts, - cmdopts['batch_output_root'], + all_dirs = utils.exp_range_calc(cmdopts["exp_range"], + batch_output_root, self) + xnames = exp_names if exp_names else self.criteria2.gen_exp_names(cmdopts) - for c2 in self.criteria2.gen_exp_names(cmdopts): + for c2 in xnames: for y in all_dirs: leaf = y.name if c2 in leaf.split('+')[1]: names.append(leaf) break - return self.criteria2.graph_xticks(cmdopts, names) + return self.criteria2.graph_xticks(cmdopts, batch_output_root, names) def graph_xticklabels(self, cmdopts: types.Cmdopts, + batch_output_root: pathlib.Path, exp_names: tp.Optional[tp.List[str]] = None) -> tp.List[str]: - names = [] - all_dirs = utils.exp_range_calc(cmdopts, - cmdopts['batch_output_root'], - self) - - for c1 in self.criteria1.gen_exp_names(cmdopts): - for x in all_dirs: - leaf = x.name - if c1 in leaf.split('+')[0]: - names.append(leaf) - break - - return self.criteria1.graph_xticklabels(cmdopts, names) + return self.criteria1.graph_xticklabels(cmdopts, + batch_output_root, + exp_names) def graph_yticklabels(self, cmdopts: types.Cmdopts, + batch_output_root: pathlib.Path, exp_names: tp.Optional[tp.List[str]] = None) -> tp.List[str]: - names = [] - all_dirs = utils.exp_range_calc(cmdopts, - cmdopts['batch_output_root'], - self) - - for c2 in self.criteria2.gen_exp_names(cmdopts): - for y in all_dirs: - leaf = y.name - if c2 in leaf.split('+')[1]: - names.append(leaf) - break - - return self.criteria2.graph_xticklabels(cmdopts, names) + return self.criteria2.graph_xticklabels(cmdopts, + batch_output_root, + exp_names) def graph_xlabel(self, cmdopts: types.Cmdopts) -> str: return self.criteria1.graph_xlabel(cmdopts) @@ -634,22 +646,23 @@ def set_batch_input_root(self, root: pathlib.Path) -> None: self.criteria1.batch_input_root = root self.criteria2.batch_input_root = root - def n_robots(self, exp_num: int) -> int: + def n_agents(self, exp_num: int) -> int: n_chgs2 = len(self.criteria2.gen_attr_changelist()) n_adds2 = len(self.criteria2.gen_tag_addlist()) i = int(exp_num / (n_chgs2 + n_adds2)) j = exp_num % (n_chgs2 + n_adds2) - if hasattr(self.criteria1, 'n_robots'): - return self.criteria1.n_robots(i) - elif hasattr(self.criteria2, 'n_robots'): - return self.criteria2.n_robots(j) + if hasattr(self.criteria1, 'n_agents'): + return self.criteria1.n_agents(i) + elif hasattr(self.criteria2, 'n_agents'): + return self.criteria2.n_agents(j) raise NotImplementedError def factory(main_config: types.YAMLDict, cmdopts: types.Cmdopts, + batch_input_root: pathlib.Path, args: argparse.Namespace, scenario: tp.Optional[str] = None) -> IConcreteBatchCriteria: if scenario is None: @@ -658,13 +671,15 @@ def factory(main_config: types.YAMLDict, if len(args.batch_criteria) == 1: return __univar_factory(main_config, cmdopts, + batch_input_root, args.batch_criteria[0], scenario) elif len(args.batch_criteria) == 2: - assert args.batch_criteria[0] != args.batch_criteria[1],\ + assert args.batch_criteria[0] != args.batch_criteria[1], \ "Duplicate batch criteria passed" return __bivar_factory(main_config, cmdopts, + batch_input_root, args.batch_criteria, scenario) else: @@ -674,6 +689,7 @@ def factory(main_config: types.YAMLDict, def __univar_factory(main_config: types.YAMLDict, cmdopts: types.Cmdopts, + batch_input_root: pathlib.Path, cli_arg: str, scenario) -> IConcreteBatchCriteria: """ @@ -689,9 +705,10 @@ def __univar_factory(main_config: types.YAMLDict, ret = bcfactory(cli_arg, main_config, cmdopts, + batch_input_root, scenario=scenario)() else: - ret = bcfactory(cli_arg, main_config, cmdopts)() + ret = bcfactory(cli_arg, main_config, cmdopts, batch_input_root)() logging.info("Create univariate batch criteria '%s' from '%s'", ret.__class__.__name__, @@ -701,10 +718,20 @@ def __univar_factory(main_config: types.YAMLDict, def __bivar_factory(main_config: types.YAMLDict, cmdopts: types.Cmdopts, + batch_input_root: pathlib.Path, cli_arg: tp.List[str], scenario: str) -> IConcreteBatchCriteria: - criteria1 = __univar_factory(main_config, cmdopts, cli_arg[0], scenario) - criteria2 = __univar_factory(main_config, cmdopts, cli_arg[1], scenario) + criteria1 = __univar_factory(main_config, + cmdopts, + batch_input_root, + cli_arg[0], + scenario) + + criteria2 = __univar_factory(main_config, + cmdopts, + batch_input_root, + cli_arg[1], + scenario) # Project hook bc = pm.module_load_tiered(project=cmdopts['project'], diff --git a/sierra/core/variables/population_size.py b/sierra/core/variables/population_size.py index ef1f360a..aba62218 100644 --- a/sierra/core/variables/population_size.py +++ b/sierra/core/variables/population_size.py @@ -29,7 +29,7 @@ def graph_xticks(self, exp_names: tp.Optional[tp.List[str]] = None) -> tp.List[float]: if exp_names is None: - exp_names = self.gen_exp_names(cmdopts) + exp_names = self.gen_exp_names() ret = list(map(float, self.populations(cmdopts, exp_names))) @@ -45,7 +45,7 @@ def graph_xticklabels(self, exp_names: tp.Optional[tp.List[str]] = None) -> tp.List[str]: if exp_names is None: - exp_names = self.gen_exp_names(cmdopts) + exp_names = self.gen_exp_names() ret = map(float, self.populations(cmdopts, exp_names)) @@ -59,9 +59,7 @@ def graph_xlabel(self, cmdopts: types.Cmdopts) -> str: class Parser(): - """A base parser for use in changing the # robots/agents. - - """ + """A base parser for use in changing the # robots/agents.""" def __call__(self, arg: str) -> types.CLIArgSpec: ret = { @@ -74,7 +72,7 @@ def __call__(self, arg: str) -> types.CLIArgSpec: # remove batch criteria variable name, leaving only the spec sections = sections[1:] - assert len(sections) >= 1 and len(sections) <= 2,\ + assert len(sections) >= 1 and len(sections) <= 2, \ ("Spec must have 1 or 2 sections separated by '.'; " f"have {len(sections)} from '{arg}'") diff --git a/sierra/main.py b/sierra/main.py index 1576385e..2c8e2ba0 100755 --- a/sierra/main.py +++ b/sierra/main.py @@ -16,7 +16,8 @@ # Project packages import sierra.core.cmdline as cmd -from sierra.core import platform, plugin, startup, version, batchroot +from sierra import version +from sierra.core import platform, plugin, startup, batchroot from sierra.core.pipeline.pipeline import Pipeline from sierra.core.generators.controller_generator_parser import ControllerGeneratorParser import sierra.core.plugin_manager as pm @@ -116,7 +117,7 @@ def __call__(self) -> None: pathset = batchroot.from_cmdline(self.args) pipeline = Pipeline(self.args, controller, pathset) else: - pipeline = Pipeline(self.args, None, None) + pipeline = Pipeline(self.args, None) try: pipeline.run() diff --git a/sierra/plugins/platform/argos/cmdline.py b/sierra/plugins/platform/argos/cmdline.py index dbf99efd..80085daf 100755 --- a/sierra/plugins/platform/argos/cmdline.py +++ b/sierra/plugins/platform/argos/cmdline.py @@ -290,7 +290,7 @@ def cmdopts_update(cli_args, cmdopts: types.Cmdopts) -> None: updates = { # Stage 1 - 'n_robots': cli_args.n_robots, + 'n_agents': cli_args.n_agents, 'exp_setup': cli_args.exp_setup, diff --git a/sierra/plugins/platform/argos/generators/platform_generators.py b/sierra/plugins/platform/argos/generators/platform_generators.py index 71daa2b0..8ebd06e6 100644 --- a/sierra/plugins/platform/argos/generators/platform_generators.py +++ b/sierra/plugins/platform/argos/generators/platform_generators.py @@ -69,7 +69,7 @@ def generate(self) -> definition.XMLExpDef: write_config=wr_config) # Generate # robots - self._generate_n_robots(exp_def) + self._generate_n_agents(exp_def) # Setup library self._generate_library(exp_def) @@ -118,18 +118,18 @@ def generate_physics(self, n_engines, engine_type) if cmdopts['physics_spatial_hash2D']: - assert hasattr(self.spec.criteria, 'n_robots'),\ + assert hasattr(self.spec.criteria, 'n_agents'), \ ("When using the 2D spatial hash, the batch " "criteria must implement bc.IQueryableBatchCriteria") - n_robots = self.spec.criteria.n_robots(self.spec.exp_num) + n_agents = self.spec.criteria.n_agents(self.spec.exp_num) else: - n_robots = None + n_agents = None module = pm.pipeline.get_plugin_module(cmdopts['platform']) robot_type = module.robot_type_from_def(exp_def) pe = physics_engines.factory(engine_type, n_engines, - n_robots, + n_agents, robot_type, cmdopts, extents) @@ -149,19 +149,19 @@ def generate_arena_shape(self, _, adds, chgs = utils.apply_to_expdef(shape, exp_def) utils.pickle_modifications(adds, chgs, self.spec.exp_def_fpath) - def _generate_n_robots(self, exp_def: definition.XMLExpDef) -> None: + def _generate_n_agents(self, exp_def: definition.XMLExpDef) -> None: """ Generate XML changes to setup # robots (if specified on cmdline). Writes generated changes to the simulation definition pickle file. """ - if self.cmdopts['n_robots'] is None: + if self.cmdopts['n_agents'] is None: return self.logger.trace(("Generating changes for # robots " # type: ignore "(all runs)")) chgs = population_size.PopulationSize.gen_attr_changelist_from_list( - [self.cmdopts['n_robots']]) + [self.cmdopts['n_agents']]) for a in chgs[0]: exp_def.attr_change(a.path, a.attr, a.value, True) diff --git a/sierra/plugins/platform/argos/plugin.py b/sierra/plugins/platform/argos/plugin.py index e3f28f9d..099f86c4 100644 --- a/sierra/plugins/platform/argos/plugin.py +++ b/sierra/plugins/platform/argos/plugin.py @@ -20,7 +20,7 @@ # Project packages from sierra.plugins.platform.argos import cmdline -from sierra.core import hpc, config, types, utils, platform +from sierra.core import hpc, config, types, utils, platform, batchroot from sierra.core.experiment import bindings, definition, xml import sierra.core.variables.batch_criteria as bc @@ -102,7 +102,7 @@ def _hpc_slurm(self, args: argparse.Namespace) -> None: def _hpc_local(self, args: argparse.Namespace) -> None: self.logger.debug("Configuring ARGoS for LOCAL execution") if any(stage in args.pipeline for stage in [1, 2]): - assert args.physics_n_engines is not None,\ + assert args.physics_n_engines is not None, \ '--physics-n-engines is required for --exec-env=hpc.local when running stage{1,2}' ppn_per_run_req = args.physics_n_engines @@ -153,7 +153,7 @@ class ExpRunShellCmdsGenerator(): def __init__(self, cmdopts: types.Cmdopts, criteria: bc.BatchCriteria, - n_robots: int, + n_agents: int, exp_num: int) -> None: self.cmdopts = cmdopts self.display_port = -1 @@ -276,7 +276,7 @@ def __call__(self) -> None: version = packaging.version.parse(res.group(0)) min_version = config.kARGoS['min_version'] - assert version >= min_version,\ + assert version >= min_version, \ f"ARGoS version {version} < min required {min_version}" if self.cmdopts['platform_vc']: @@ -302,7 +302,7 @@ def arena_dims_from_criteria(criteria: bc.BatchCriteria) -> tp.List[utils.ArenaE d = utils.Vector3D.from_str(c.value) dims.append(utils.ArenaExtent(d)) - assert len(dims) > 0,\ + assert len(dims) > 0, \ "Scenario dimensions not contained in batch criteria" return dims @@ -328,10 +328,11 @@ def population_size_from_def(exp_def: definition.XMLExpDef, def pre_exp_diagnostics(cmdopts: types.Cmdopts, + pathset: batchroot.PathSet, logger: logging.Logger) -> None: s = "batch_exp_root='%s',runs/exp=%s,threads/job=%s,n_jobs=%s" logger.info(s, - cmdopts['batch_root'], + pathset.root, cmdopts['n_runs'], cmdopts['physics_n_threads'], cmdopts['exec_jobs_per_node']) diff --git a/sierra/plugins/platform/argos/variables/physics_engines.py b/sierra/plugins/platform/argos/variables/physics_engines.py index c3f43912..01d0a000 100755 --- a/sierra/plugins/platform/argos/variables/physics_engines.py +++ b/sierra/plugins/platform/argos/variables/physics_engines.py @@ -62,7 +62,7 @@ def __init__(self, # If we are given multiple extents to map, we need to divide the # specified # of engines among them. self.n_engines = int(self.n_engines / float(len(self.extents))) - assert self.layout == 'uniform_grid2D',\ + assert self.layout == 'uniform_grid2D', \ "Only uniform_grid2D physics engine layout currently supported" self.logger = logging.getLogger(__name__) @@ -473,7 +473,7 @@ def __init__(self, def factory(engine_type: str, n_engines: int, - n_robots: tp.Optional[int], + n_agents: tp.Optional[int], robot_type: str, cmdopts: types.Cmdopts, extents: tp.List[ArenaExtent]) -> PhysicsEngines: @@ -484,11 +484,11 @@ def factory(engine_type: str, # remain so in the future, so we employ a factory function to make # implementation of diverging functionality easier later. if '2d' in engine_type: - if n_robots and cmdopts['physics_spatial_hash2D']: + if n_agents and cmdopts['physics_spatial_hash2D']: spatial_hash = { # Per ARGoS documentation in 'argos3 -q dynamics2d' 'cell_size': config.kARGoS['spatial_hash2D'][robot_type], - 'cell_num': n_robots / float(n_engines) * 10 + 'cell_num': n_agents / float(n_engines) * 10 } logging.debug(("Using 2D spatial hash for physics engines: " "cell_size=%f,cell_num=%d"), diff --git a/sierra/plugins/platform/argos/variables/population_constant_density.py b/sierra/plugins/platform/argos/variables/population_constant_density.py index 9a06300a..9928965a 100755 --- a/sierra/plugins/platform/argos/variables/population_constant_density.py +++ b/sierra/plugins/platform/argos/variables/population_constant_density.py @@ -13,6 +13,7 @@ import typing as tp import logging import math +import pathlib # 3rd party packages import implements @@ -58,11 +59,11 @@ def gen_attr_changelist(self) -> tp.List[xml.AttrChangeSet]: Vector3D(x, y, z)) # ARGoS won't start if there are 0 robots, so you always # need to put at least 1. - n_robots = int(extent.area() * + n_agents = int(extent.area() * (self.target_density / 100.0)) - if n_robots == 0: - n_robots = 1 - self.logger.warning(("n_robots set to 1 even though " + if n_agents == 0: + n_agents = 1 + self.logger.warning(("n_agents set to 1 even though " "calculated as 0 for area=%s," "density=%s"), str(extent.area()), @@ -70,9 +71,9 @@ def gen_attr_changelist(self) -> tp.List[xml.AttrChangeSet]: changeset.add(xml.AttrChange(".//arena/distribute/entity", "quantity", - str(n_robots))) + str(n_agents))) self.logger.debug("Calculated population size=%d for extent=%s,density=%s", - n_robots, + n_agents, str(extent), self.target_density) break @@ -80,7 +81,7 @@ def gen_attr_changelist(self) -> tp.List[xml.AttrChangeSet]: return self.attr_changes - def gen_exp_names(self, cmdopts: types.Cmdopts) -> tp.List[str]: + def gen_exp_names(self) -> tp.List[str]: changes = self.gen_attr_changelist() return ['exp' + str(x) for x in range(0, len(changes))] @@ -89,7 +90,7 @@ def graph_xticks(self, exp_names: tp.Optional[tp.List[str]] = None) -> tp.List[float]: if exp_names is None: - exp_names = self.gen_exp_names(cmdopts) + exp_names = self.gen_exp_names() ret = list(map(float, self.populations(cmdopts, exp_names))) @@ -104,7 +105,7 @@ def graph_xticklabels(self, cmdopts: types.Cmdopts, exp_names: tp.Optional[tp.List[str]] = None) -> tp.List[str]: if exp_names is None: - exp_names = self.gen_exp_names(cmdopts) + exp_names = self.gen_exp_names() ret = map(float, self.populations(cmdopts, exp_names)) @@ -116,7 +117,7 @@ def graph_xlabel(self, cmdopts: types.Cmdopts) -> str: return r"Population Size" - def n_robots(self, exp_num: int) -> int: + def n_agents(self, exp_num: int) -> int: return int(self.target_density / 100.0 * self.dimensions[exp_num].area()) @@ -148,6 +149,7 @@ def calc_dims(cmdopts: types.Cmdopts, def factory(cli_arg: str, main_config: types.YAMLDict, cmdopts: types.Cmdopts, + batch_input_root: pathlib.Path, **kwargs) -> PopulationConstantDensity: """Create a :class:`PopulationConstantDensity` derived class. @@ -160,7 +162,7 @@ def __init__(self) -> None: PopulationConstantDensity.__init__(self, cli_arg, main_config, - cmdopts['batch_input_root'], + batch_input_root, attr["target_density"], dims, kw['scenario_tag']) diff --git a/sierra/plugins/platform/argos/variables/population_size.py b/sierra/plugins/platform/argos/variables/population_size.py index cc394daf..8df2feb0 100755 --- a/sierra/plugins/platform/argos/variables/population_size.py +++ b/sierra/plugins/platform/argos/variables/population_size.py @@ -67,17 +67,18 @@ def gen_attr_changelist(self) -> tp.List[xml.AttrChangeSet]: self.size_list) return self.attr_changes - def gen_exp_names(self, cmdopts: types.Cmdopts) -> tp.List[str]: + def gen_exp_names(self) -> tp.List[str]: changes = self.gen_attr_changelist() return ['exp' + str(x) for x in range(0, len(changes))] - def n_robots(self, exp_num: int) -> int: + def n_agents(self, exp_num: int) -> int: return self.size_list[exp_num] def factory(cli_arg: str, main_config: types.YAMLDict, cmdopts: types.Cmdopts, + batch_input_root: pathlib.Path, **kwargs) -> PopulationSize: """Create a :class:`PopulationSize` derived class from the cmdline definition. @@ -89,7 +90,7 @@ def __init__(self) -> None: PopulationSize.__init__(self, cli_arg, main_config, - cmdopts['batch_input_root'], + batch_input_root, max_sizes) return type(cli_arg, # type: ignore diff --git a/sierra/plugins/platform/argos/variables/population_variable_density.py b/sierra/plugins/platform/argos/variables/population_variable_density.py index 80435537..4e68fd30 100755 --- a/sierra/plugins/platform/argos/variables/population_variable_density.py +++ b/sierra/plugins/platform/argos/variables/population_variable_density.py @@ -13,6 +13,7 @@ import typing as tp import logging import numpy as np +import pathlib # 3rd party packages import implements @@ -50,19 +51,19 @@ def gen_attr_changelist(self) -> tp.List[xml.AttrChangeSet]: for density in self.densities: # ARGoS won't start if there are 0 robots, so you always # need to put at least 1. - n_robots = int(self.extent.area() * (density / 100.0)) - if n_robots == 0: - n_robots = 1 - self.logger.warning("n_robots set to 1 even though \ + n_agents = int(self.extent.area() * (density / 100.0)) + if n_agents == 0: + n_agents = 1 + self.logger.warning("n_agents set to 1 even though \ calculated as 0 for area=%d,density=%s", self.extent.area(), density) changeset = xml.AttrChangeSet(xml.AttrChange(".//arena/distribute/entity", "quantity", - str(n_robots))) + str(n_agents))) self.attr_changes.append(changeset) self.logger.debug("Calculated swarm size=%d for extent=%s,density=%s", - n_robots, + n_agents, str(self.extent), density) @@ -70,7 +71,7 @@ def gen_attr_changelist(self) -> tp.List[xml.AttrChangeSet]: return self.attr_changes - def gen_exp_names(self, cmdopts: types.Cmdopts) -> tp.List[str]: + def gen_exp_names(self) -> tp.List[str]: changes = self.gen_attr_changelist() return ['exp' + str(x) for x in range(0, len(changes))] @@ -79,7 +80,7 @@ def graph_xticks(self, exp_names: tp.Optional[tp.List[str]] = None) -> tp.List[float]: if exp_names is None: - exp_names = self.gen_exp_names(cmdopts) + exp_names = self.gen_exp_names() return [p / self.extent.area() for p in self.populations(cmdopts, exp_names)] @@ -92,13 +93,14 @@ def graph_xticklabels(self, def graph_xlabel(self, cmdopts: types.Cmdopts) -> str: return r"Population Density" - def n_robots(self, exp_num: int) -> int: + def n_agents(self, exp_num: int) -> int: return int(self.extent.area() * self.densities[exp_num] / 100.0) def factory(cli_arg: str, main_config: types.YAMLDict, cmdopts: types.Cmdopts, + batch_input_root: pathlib.Path, **kwargs) -> PopulationVariableDensity: """ Create a :class:`PopulationVariableDensity` derived class. @@ -118,7 +120,7 @@ def __init__(self) -> None: PopulationVariableDensity.__init__(self, cli_arg, main_config, - cmdopts['batch_input_root'], + batch_input_root, densities, extent) diff --git a/sierra/plugins/platform/ros1gazebo/plugin.py b/sierra/plugins/platform/ros1gazebo/plugin.py index e82502cd..a6a632c3 100644 --- a/sierra/plugins/platform/ros1gazebo/plugin.py +++ b/sierra/plugins/platform/ros1gazebo/plugin.py @@ -20,7 +20,7 @@ # Project packages from sierra.plugins.platform.ros1gazebo import cmdline -from sierra.core import hpc, platform, config, ros1, types +from sierra.core import hpc, platform, config, ros1, types, batchroot from sierra.core.experiment import bindings, definition, xml import sierra.core.variables.batch_criteria as bc @@ -119,7 +119,7 @@ class ExpRunShellCmdsGenerator(): def __init__(self, cmdopts: types.Cmdopts, criteria: bc.BatchCriteria, - n_robots: int, + n_agents: int, exp_num: int) -> None: self.cmdopts = cmdopts self.gazebo_port = -1 @@ -278,15 +278,15 @@ def __call__(self) -> None: keys = ['ROS_DISTRO', 'ROS_VERSION'] for k in keys: - assert k in os.environ,\ + assert k in os.environ, \ f"Non-ROS+Gazebo environment detected: '{k}' not found" # Check ROS distro - assert os.environ['ROS_DISTRO'] in ['kinetic', 'noetic'],\ + assert os.environ['ROS_DISTRO'] in ['kinetic', 'noetic'], \ "SIERRA only supports ROS1 kinetic,noetic" # Check ROS version - assert os.environ['ROS_VERSION'] == "1",\ + assert os.environ['ROS_VERSION'] == "1", \ "Wrong ROS version: this plugin is for ROS1" # Check we can find Gazebo @@ -299,7 +299,7 @@ def __call__(self) -> None: version = packaging.version.parse(res.group(0)) min_version = packaging.version.parse(config.kGazebo['min_version']) - assert version >= min_version,\ + assert version >= min_version, \ f"Gazebo version {version} < min required {min_version}" @@ -326,10 +326,11 @@ def robot_prefix_extract(main_config: types.YAMLDict, def pre_exp_diagnostics(cmdopts: types.Cmdopts, + pathset: batchroot.PathSet, logger: logging.Logger) -> None: s = "batch_exp_root='%s',runs/exp=%s,threads/job=%s,n_jobs=%s" logger.info(s, - cmdopts['batch_root'], + pathset.root, cmdopts['n_runs'], cmdopts['physics_n_threads'], cmdopts['exec_jobs_per_node']) diff --git a/sierra/plugins/platform/ros1gazebo/variables/population_size.py b/sierra/plugins/platform/ros1gazebo/variables/population_size.py index b3405bc6..0da37b89 100755 --- a/sierra/plugins/platform/ros1gazebo/variables/population_size.py +++ b/sierra/plugins/platform/ros1gazebo/variables/population_size.py @@ -153,7 +153,7 @@ def gen_tag_addlist(self) -> tp.List[xml.TagAddList]: return self.tag_adds - def gen_exp_names(self, cmdopts: types.Cmdopts) -> tp.List[str]: + def gen_exp_names(self) -> tp.List[str]: adds = self.gen_tag_addlist() return ['exp' + str(x) for x in range(0, len(adds))] @@ -164,7 +164,8 @@ def n_robots(self, exp_num: int) -> int: def factory(cli_arg: str, main_config: types.YAMLDict, cmdopts: types.Cmdopts, - **kwargs) -> PopulationSize: + batch_input_root: pathlib.Path, + ** kwargs) -> PopulationSize: """Create a :class:`PopulationSize` derived class from the cmdline definition. """ @@ -189,7 +190,7 @@ def __init__(self) -> None: PopulationSize.__init__(self, cli_arg, main_config, - cmdopts['batch_input_root'], + batch_input_root, cmdopts['robot'], max_sizes, positions) diff --git a/sierra/plugins/platform/ros1robot/generators/platform_generators.py b/sierra/plugins/platform/ros1robot/generators/platform_generators.py index de0f5e26..3d633108 100644 --- a/sierra/plugins/platform/ros1robot/generators/platform_generators.py +++ b/sierra/plugins/platform/ros1robot/generators/platform_generators.py @@ -81,12 +81,12 @@ def generate(self, exp_def: definition.XMLExpDef): with utils.utf8open(main_path) as f: main_config = yaml.load(f, yaml.FullLoader) - n_robots = utils.get_n_robots(main_config, + n_agents = utils.get_n_agents(main_config, self.cmdopts, self.launch_stem_path.parent, exp_def) - for i in range(0, n_robots): + for i in range(0, n_agents): prefix = main_config['ros']['robots'][self.cmdopts['robot']]['prefix'] exp_def.write_config.add({ 'src_parent': "./robot", diff --git a/sierra/plugins/platform/ros1robot/plugin.py b/sierra/plugins/platform/ros1robot/plugin.py index 4254f2c4..2005c545 100644 --- a/sierra/plugins/platform/ros1robot/plugin.py +++ b/sierra/plugins/platform/ros1robot/plugin.py @@ -17,7 +17,7 @@ # Project packages from sierra.plugins.platform.ros1robot import cmdline -from sierra.core import platform, config, ros1, types, utils +from sierra.core import platform, config, ros1, types, utils, batchroot from sierra.core.experiment import bindings, definition, xml import sierra.core.variables.batch_criteria as bc @@ -37,7 +37,7 @@ def __init__(self, exec_env: str) -> None: def __call__(self, args: argparse.Namespace) -> None: if args.nodefile is None: - assert 'SIERRA_NODEFILE' in os.environ,\ + assert 'SIERRA_NODEFILE' in os.environ, \ ("Non-ros1robot environment detected: --nodefile not " "passed and 'SIERRA_NODEFILE' not found") args.nodefile = os.environ['SIERRA_NODEFILE'] @@ -46,7 +46,7 @@ def __call__(self, args: argparse.Namespace) -> None: f"SIERRA_NODEFILE '{args.nodefile}' does not exist" self.logger.info("Using '%s' as robot hostnames file", args.nodefile) - assert not args.platform_vc,\ + assert not args.platform_vc, \ "Platform visual capture not supported on ros1robot" @@ -55,10 +55,10 @@ class ExpRunShellCmdsGenerator(): def __init__(self, cmdopts: types.Cmdopts, criteria: bc.BatchCriteria, - n_robots: int, + n_agents: int, exp_num: int) -> None: self.cmdopts = cmdopts - self.n_robots = n_robots + self.n_agents = n_agents self.exp_num = exp_num self.criteria = criteria self.logger = logging.getLogger('platform.ros1robot') @@ -90,7 +90,7 @@ def pre_run_cmds(self, self.logger.debug("Generating pre-exec cmds for run%s slaves: %d robots", run_num, - self.n_robots) + self.n_agents) script_yaml = main_config['ros']['robots'][self.cmdopts['robot']] script_file = script_yaml.get('setup_script', "$HOME/.bashrc") @@ -123,7 +123,7 @@ def _exec_run_cmds_master(self, run_num) # ROS master node - exp_dirname = self.criteria.gen_exp_names(self.cmdopts)[self.exp_num] + exp_dirname = self.criteria.gen_exp_names()[self.exp_num] exp_template_path = utils.exp_template_path(self.cmdopts, self.criteria.batch_input_root, exp_dirname) @@ -152,20 +152,20 @@ def _exec_run_cmds_slave(self, self.logger.debug("Generating exec cmds for run%s slaves: %d robots", run_num, - self.n_robots) + self.n_agents) nodes = platform.ExecEnvChecker.parse_nodefile(self.cmdopts['nodefile']) - if len(nodes) < self.n_robots: + if len(nodes) < self.n_agents: self.logger.critical(("Need %d hosts to correctly generate launch " "cmds for run%s with %d robots; %d available"), - self.n_robots, + self.n_agents, run_num, - self.n_robots, + self.n_agents, len(nodes)) ret = [] # type: tp.List[types.ShellCmdSpec] - for i in range(0, self.n_robots): + for i in range(0, self.n_agents): # --wait tells roslaunch to wait for the configured master to # come up before launch the robot code. cmd = '{0} --wait {1}_robot{2}{3} ' @@ -207,7 +207,7 @@ def pre_exp_cmds(self) -> tp.List[types.ShellCmdSpec]: self.logger.info("Using ROS_MASTER_URI=%s", master_uri) - return[ + return [ types.ShellCmdSpec( # roscore will run on the SIERRA host machine. cmd=f'export ROS_MASTER_URI={master_uri}', @@ -335,11 +335,11 @@ def __call__(self) -> None: k) # Check ROS distro - assert os.environ['ROS_DISTRO'] in ['kinetic', 'noetic'],\ + assert os.environ['ROS_DISTRO'] in ['kinetic', 'noetic'], \ "SIERRA only supports ROS1 kinetic,noetic" # Check ROS version - assert os.environ['ROS_VERSION'] == "1",\ + assert os.environ['ROS_VERSION'] == "1", \ "Wrong ROS version: This plugin is for ROS1" @@ -366,8 +366,9 @@ def robot_prefix_extract(main_config: types.YAMLDict, def pre_exp_diagnostics(cmdopts: types.Cmdopts, + pathset: batchroot.PathSet, logger: logging.Logger) -> None: s = "batch_exp_root='%s',runs/exp=%s" logger.info(s, - cmdopts['batch_root'], + pathset.root, cmdopts['n_runs']) diff --git a/sierra/plugins/platform/ros1robot/variables/population_size.py b/sierra/plugins/platform/ros1robot/variables/population_size.py index 8ab3ebf2..7280438a 100755 --- a/sierra/plugins/platform/ros1robot/variables/population_size.py +++ b/sierra/plugins/platform/ros1robot/variables/population_size.py @@ -79,7 +79,7 @@ def gen_tag_addlist(self) -> tp.List[xml.TagAddList]: per_robot.append(xml.TagAdd("./master/group/[@ns='sierra']", "param", { - 'name': 'experiment/n_robots', + 'name': 'experiment/n_agents', 'value': str(s) }, False)) @@ -109,17 +109,18 @@ def gen_tag_addlist(self) -> tp.List[xml.TagAddList]: return self.tag_adds - def gen_exp_names(self, cmdopts: types.Cmdopts) -> tp.List[str]: + def gen_exp_names(self) -> tp.List[str]: adds = self.gen_tag_addlist() return ['exp' + str(x) for x in range(0, len(adds))] - def n_robots(self, exp_num: int) -> int: + def n_agents(self, exp_num: int) -> int: return self.sizes[exp_num] def factory(cli_arg: str, main_config: types.YAMLDict, cmdopts: types.Cmdopts, + batch_input_root: pathlib.Path, **kwargs) -> PopulationSize: """Create a :class:`PopulationSize` derived class from the cmdline definition. @@ -132,7 +133,7 @@ def __init__(self) -> None: PopulationSize.__init__(self, cli_arg, main_config, - cmdopts['batch_input_root'], + batch_input_root, cmdopts['robot'], max_sizes)