From 7a5774ba01536f45c00850188b1b40b42de35eb2 Mon Sep 17 00:00:00 2001 From: Henri Casanova Date: Wed, 16 Sep 2020 21:48:18 -1000 Subject: [PATCH] Documentation fixes --- include/wrench/logging/TerminalOutput.h | 1 + .../simgrid_S4U_util/S4U_VirtualMachine.h | 3 + .../tools/pegasus/PegasusWorkflowParser.h | 64 +++++++++++++++++++ include/wrench/workflow/DagOfTasks.h | 11 +++- .../parallel_model/AmdahlParallelModel.h | 3 + .../ConstantEfficiencyParallelModel.h | 3 + .../parallel_model/CustomParallelModel.h | 3 + .../workflow/parallel_model/ParallelModel.h | 13 ++++ .../batsched/BatschedBatchScheduler.h | 6 +- .../TraceFileLoader.cpp | 7 ++ .../simgrid_S4U_util/S4U_Simulation.cpp | 2 +- src/wrench/simulation/SimulationOutput.cpp | 1 + .../simulation/SimulationTimestampTypes.cpp | 1 + tools/pegasus/src/PegasusWorkflowParser.cpp | 64 ++----------------- 14 files changed, 120 insertions(+), 62 deletions(-) diff --git a/include/wrench/logging/TerminalOutput.h b/include/wrench/logging/TerminalOutput.h index 738304a9bf..30dcf99b76 100644 --- a/include/wrench/logging/TerminalOutput.h +++ b/include/wrench/logging/TerminalOutput.h @@ -91,6 +91,7 @@ namespace wrench { static void endThisProcessColor(); static void disableColor(); + static void disableLog(); /***********************/ diff --git a/include/wrench/simgrid_S4U_util/S4U_VirtualMachine.h b/include/wrench/simgrid_S4U_util/S4U_VirtualMachine.h index b14c7ccc75..595896fcf9 100644 --- a/include/wrench/simgrid_S4U_util/S4U_VirtualMachine.h +++ b/include/wrench/simgrid_S4U_util/S4U_VirtualMachine.h @@ -27,6 +27,9 @@ namespace wrench { public: + /** + * @brief A map to keep track of VM-to-PM mapping + */ static std::unordered_map vm_to_pm_map; /** @brief VM state enum */ diff --git a/include/wrench/tools/pegasus/PegasusWorkflowParser.h b/include/wrench/tools/pegasus/PegasusWorkflowParser.h index 57f7e004c7..1c4ea9130b 100644 --- a/include/wrench/tools/pegasus/PegasusWorkflowParser.h +++ b/include/wrench/tools/pegasus/PegasusWorkflowParser.h @@ -24,10 +24,74 @@ namespace wrench { public: + /** + * @brief Create an abstract workflow based on a DAX file + * + * @param filename: the path to the DAX file + * @param reference_flop_rate: a reference compute speed (in flops/sec), assuming a task's computation is purely flops. + * This is needed because DAX files specify task execution times in seconds, + * but the WRENCH simulation needs some notion of "amount of computation" to + * apply reasonable scaling. (Because the XML platform description specifies host + * compute speeds in flops/sec). The times in the DAX file are thus assumed to be + * obtained on an machine with flop rate reference_flop_rate. + * @param redundant_dependencies: Workflows provided by Pegasus + * sometimes include control/data dependencies between tasks that are already induced by + * other control/data dependencies (i.e., they correspond to transitive + * closures or existing edges in the workflow graphs). Passing redundant_dependencies=true + * force these "redundant" dependencies to be added as edges in the workflow. Passing + * redundant_dependencies=false will ignore these "redundant" dependencies. Most users + * would likely pass "false". + * + * @return a workflow + * + * @throw std::invalid_argument + */ static Workflow *createWorkflowFromDAX(const std::string &filename, const std::string &reference_flop_rate, bool redundant_dependencies = false); + /** + * @brief Create an abstract workflow based on a JSON file + * + * @param filename: the path to the JSON file + * @param reference_flop_rate: a reference compute speed (in flops/sec), assuming a task's computation is purely flops. + * This is needed because JSON files specify task execution times in seconds, + * but the WRENCH simulation needs some notion of "amount of computation" to + * apply reasonable scaling. (Because the XML platform description specifies host + * compute speeds in flops/sec). The times in the JSON file are thus assumed to be + * obtained on an machine with flop rate reference_flop_rate. + * @param redundant_dependencies: Workflows provided by Pegasus + * sometimes include control/data dependencies between tasks that are already induced by + * other control/data dependencies (i.e., they correspond to transitive + * closures or existing edges in the workflow graphs). Passing redundant_dependencies=true + * force these "redundant" dependencies to be added as edges in the workflow. Passing + * redundant_dependencies=false will ignore these "redundant" dependencies. Most users + * would likely pass "false". + * @return a workflow + * + * @throw std::invalid_argument + * + */ static Workflow *createWorkflowFromJSON(const std::string &filename, const std::string &reference_flop_rate, bool redundant_dependencies = false); + /** + * @brief Create an NON-abstract workflow based on a JSON file + * + * @param filename: the path to the JSON file + * @param reference_flop_rate: a reference compute speed (in flops/sec), assuming a task's computation is purely flops. + * This is needed because JSON files specify task execution times in seconds, + * but the WRENCH simulation needs some notion of "amount of computation" to + * apply reasonable scaling. (Because the XML platform description specifies host + * compute speeds in flops/sec). The times in the JSON file are thus assumed to be + * obtained on an machine with flop rate reference_flop_rate. + * @param redundant_dependencies: Workflows provided by Pegasus + * sometimes include control/data dependencies between tasks that are already induced by + * other control/data dependencies (i.e., they correspond to transitive + * closures or existing edges in the workflow graphs). Passing redundant_dependencies=true + * force these "redundant" dependencies to be added as edges in the workflow. Passing + * redundant_dependencies=false will ignore these "redundant" dependencies. Most users + * woudl likely pass "false". + * @return a workflow + * @throw std::invalid_argument + */ static Workflow *createExecutableWorkflowFromJSON(const std::string &filename, const std::string &reference_flop_rate, bool redundant_dependencies = false); }; diff --git a/include/wrench/workflow/DagOfTasks.h b/include/wrench/workflow/DagOfTasks.h index 750aebc5fd..3f35111ca0 100644 --- a/include/wrench/workflow/DagOfTasks.h +++ b/include/wrench/workflow/DagOfTasks.h @@ -24,15 +24,24 @@ namespace wrench { class WorkflowTask; + /** + * @brief Data structure to store vertex properties + */ struct VertexProperties { // std::size_t index; // boost::default_color_type color; const WorkflowTask *task; }; + /** + * @brief Convenient DAG typedef + */ typedef boost::adjacency_list DAG; - typedef unsigned long vertex_t; // To clean up later... + /** + * @brief Convenient vertext_t typedef + */ + typedef unsigned long vertex_t; // To clean up some day... /** * @brief An internal class that uses the Boost Graph Library to implement a DAG of WorkflowTask objects diff --git a/include/wrench/workflow/parallel_model/AmdahlParallelModel.h b/include/wrench/workflow/parallel_model/AmdahlParallelModel.h index 5fda1db390..7b8002b648 100644 --- a/include/wrench/workflow/parallel_model/AmdahlParallelModel.h +++ b/include/wrench/workflow/parallel_model/AmdahlParallelModel.h @@ -16,6 +16,9 @@ namespace wrench { + /** + * @brief A class that defines an Amdahl's Law-based parallel task performance model. + */ class AmdahlParallelModel : public ParallelModel { public: diff --git a/include/wrench/workflow/parallel_model/ConstantEfficiencyParallelModel.h b/include/wrench/workflow/parallel_model/ConstantEfficiencyParallelModel.h index 5332aae204..2724fe6e68 100644 --- a/include/wrench/workflow/parallel_model/ConstantEfficiencyParallelModel.h +++ b/include/wrench/workflow/parallel_model/ConstantEfficiencyParallelModel.h @@ -16,6 +16,9 @@ namespace wrench { + /** + * @brief A class the implemens a constant-efficiency parallel task performance model + */ class ConstantEfficiencyParallelModel : public ParallelModel { public: diff --git a/include/wrench/workflow/parallel_model/CustomParallelModel.h b/include/wrench/workflow/parallel_model/CustomParallelModel.h index 60ebd0e206..529b3af893 100644 --- a/include/wrench/workflow/parallel_model/CustomParallelModel.h +++ b/include/wrench/workflow/parallel_model/CustomParallelModel.h @@ -17,6 +17,9 @@ namespace wrench { + /** + * @brief A class that defines a custom parallel task performance model. + */ class CustomParallelModel : public ParallelModel { public: diff --git a/include/wrench/workflow/parallel_model/ParallelModel.h b/include/wrench/workflow/parallel_model/ParallelModel.h index e8de7ac332..9d6fe890a1 100644 --- a/include/wrench/workflow/parallel_model/ParallelModel.h +++ b/include/wrench/workflow/parallel_model/ParallelModel.h @@ -16,6 +16,10 @@ namespace wrench { + /** + * @brief A virtual class (with convenient static methods) to define + * parallel task performance models + */ class ParallelModel { public: @@ -28,6 +32,15 @@ namespace wrench { /** \cond INTERNAL **/ /***********************/ + /** + * @brief A method the, for this parallel model, computes how much work each thread that is + * part of a parallel task should do + * + * @param total_work: the total amount of work (in flops) + * @param num_threads: the number of threads + * + * @return an amount of work (in flop) per thread + */ virtual std::vector getWorkPerThread(double total_work, unsigned long num_threads) = 0; virtual ~ParallelModel() {}; diff --git a/src/wrench/services/compute/batch/batch_schedulers/batsched/BatschedBatchScheduler.h b/src/wrench/services/compute/batch/batch_schedulers/batsched/BatschedBatchScheduler.h index d3fd8d47f8..e6859a5f72 100644 --- a/src/wrench/services/compute/batch/batch_schedulers/batsched/BatschedBatchScheduler.h +++ b/src/wrench/services/compute/batch/batch_schedulers/batsched/BatschedBatchScheduler.h @@ -77,7 +77,11 @@ namespace wrench { /** * @brief Method to get start time estimates - * @param set_of_jobs: the set of job configurations whose start times should be estimated + * @param set_of_jobs: the set of job configurations whose start times should + * be estimated. Each job configuration is :(i) a name; (ii) a number of nodes; + * (iii) a number of cores per node; and (iv) an execution duration in seconds. + * + * @return a map of estimated start dates (in seconds), indexed by job configuration name */ std::map getStartTimeEstimates(std::set> set_of_jobs) override; diff --git a/src/wrench/services/compute/batch/workload_helper_classes/TraceFileLoader.cpp b/src/wrench/services/compute/batch/workload_helper_classes/TraceFileLoader.cpp index 469879c952..b8a3d24c73 100644 --- a/src/wrench/services/compute/batch/workload_helper_classes/TraceFileLoader.cpp +++ b/src/wrench/services/compute/batch/workload_helper_classes/TraceFileLoader.cpp @@ -18,6 +18,13 @@ WRENCH_LOG_CATEGORY(wrench_core_trace_file_loader, "Log category for Trace File namespace wrench { + /** + * @brief A method to generate a random username, so that generated workload + * traces look more realistic + * + * @param userid: numerical userid + * @return a generated alpha userid + */ std::string generateRandomUsername(unsigned long userid) { //Type of random number distribution const char charset[] = diff --git a/src/wrench/simgrid_S4U_util/S4U_Simulation.cpp b/src/wrench/simgrid_S4U_util/S4U_Simulation.cpp index 5e033344ef..572cc4aee9 100644 --- a/src/wrench/simgrid_S4U_util/S4U_Simulation.cpp +++ b/src/wrench/simgrid_S4U_util/S4U_Simulation.cpp @@ -218,7 +218,7 @@ namespace wrench { /** * @brief Determines whether a link exists for a given linkname - * @param link_id: the name of the link + * @param linkname: the name of the link * @return true or false */ bool S4U_Simulation::linkExists(std::string linkname) { diff --git a/src/wrench/simulation/SimulationOutput.cpp b/src/wrench/simulation/SimulationOutput.cpp index 337305e972..64393718b9 100644 --- a/src/wrench/simulation/SimulationOutput.cpp +++ b/src/wrench/simulation/SimulationOutput.cpp @@ -145,6 +145,7 @@ namespace wrench { * @param generate_host_utilization_layout: boolean specifying whether or not you would like a possible host utilization * layout to be generated * @param include_disk: boolean specifying whether to include disk operation in JSON (disk timestamps must be enabled) + * @param include_bandwidth: boolean specifying whether to include link bandwidth measurements in JSON */ void SimulationOutput::dumpUnifiedJSON(Workflow *workflow, std::string file_path, bool include_platform, diff --git a/src/wrench/simulation/SimulationTimestampTypes.cpp b/src/wrench/simulation/SimulationTimestampTypes.cpp index cf10f04951..644b195a1a 100644 --- a/src/wrench/simulation/SimulationTimestampTypes.cpp +++ b/src/wrench/simulation/SimulationTimestampTypes.cpp @@ -667,6 +667,7 @@ namespace wrench { * @param hostname: hostname being read from * @param mount: mountpoint of disk * @param bytes: number of bytes read + * @param counter: An integer ID */ SimulationTimestampDiskRead::SimulationTimestampDiskRead(std::string hostname, std::string mount, diff --git a/tools/pegasus/src/PegasusWorkflowParser.cpp b/tools/pegasus/src/PegasusWorkflowParser.cpp index 96127a4abb..78d6ea938d 100644 --- a/tools/pegasus/src/PegasusWorkflowParser.cpp +++ b/tools/pegasus/src/PegasusWorkflowParser.cpp @@ -23,26 +23,7 @@ WRENCH_LOG_CATEGORY(pegasus_workflow_parser, "Log category for PegasusWorkflowPa namespace wrench { /** - * @brief Create an abstract workflow based on a JSON file - * - * @param filename: the path to the JSON file - * @param reference_flop_rate: a reference compute speed (in flops/sec), assuming a task's computation is purely flops. - * This is needed because JSON files specify task execution times in seconds, - * but the WRENCH simulation needs some notion of "amount of computation" to - * apply reasonable scaling. (Because the XML platform description specifies host - * compute speeds in flops/sec). The times in the JSON file are thus assumed to be - * obtained on an machine with flop rate reference_flop_rate. - * @param redundant_dependencies: Workflows provided by Pegasus - * sometimes include control/data dependencies between tasks that are already induced by - * other control/data dependencies (i.e., they correspond to transitive - * closures or existing edges in the workflow graphs). Passing redundant_dependencies=true - * force these "redundant" dependencies to be added as edges in the workflow. Passing - * redundant_dependencies=false will ignore these "redundant" dependencies. Most users - * would likely pass "false". - * @return a workflow - * - * @throw std::invalid_argument - * + * Documention in .h file */ Workflow *PegasusWorkflowParser::createWorkflowFromJSON(const std::string &filename, const std::string &reference_flop_rate, @@ -194,25 +175,9 @@ namespace wrench { return workflow; } + /** - * @brief Create an NON-abstract workflow based on a JSON file - * - * @param filename: the path to the JSON file - * @param reference_flop_rate: a reference compute speed (in flops/sec), assuming a task's computation is purely flops. - * This is needed because JSON files specify task execution times in seconds, - * but the WRENCH simulation needs some notion of "amount of computation" to - * apply reasonable scaling. (Because the XML platform description specifies host - * compute speeds in flops/sec). The times in the JSON file are thus assumed to be - * obtained on an machine with flop rate reference_flop_rate. - * @param redundant_dependencies: Workflows provided by Pegasus - * sometimes include control/data dependencies between tasks that are already induced by - * other control/data dependencies (i.e., they correspond to transitive - * closures or existing edges in the workflow graphs). Passing redundant_dependencies=true - * force these "redundant" dependencies to be added as edges in the workflow. Passing - * redundant_dependencies=false will ignore these "redundant" dependencies. Most users - * woudl likely pass "false". - * @return a workflow - * @throw std::invalid_argument + * Documention in .h file */ Workflow *PegasusWorkflowParser::createExecutableWorkflowFromJSON(const std::string &filename, const std::string &reference_flop_rate, bool redundant_dependencies) { @@ -220,27 +185,8 @@ namespace wrench { } /** - * @brief Create an abstract workflow based on a DAX file - * - * @param filename: the path to the DAX file - * @param reference_flop_rate: a reference compute speed (in flops/sec), assuming a task's computation is purely flops. - * This is needed because DAX files specify task execution times in seconds, - * but the WRENCH simulation needs some notion of "amount of computation" to - * apply reasonable scaling. (Because the XML platform description specifies host - * compute speeds in flops/sec). The times in the DAX file are thus assumed to be - * obtained on an machine with flop rate reference_flop_rate. - * @param redundant_dependencies: Workflows provided by Pegasus - * sometimes include control/data dependencies between tasks that are already induced by - * other control/data dependencies (i.e., they correspond to transitive - * closures or existing edges in the workflow graphs). Passing redundant_dependencies=true - * force these "redundant" dependencies to be added as edges in the workflow. Passing - * redundant_dependencies=false will ignore these "redundant" dependencies. Most users - * would likely pass "false". - * - * @return a workflow - * - * @throw std::invalid_argument - */ + * Documention in .h file + */ Workflow *PegasusWorkflowParser::createWorkflowFromDAX(const std::string &filename, const std::string &reference_flop_rate, bool redundant_dependencies) {