Skip to content

Commit

Permalink
Documentation fixes
Browse files Browse the repository at this point in the history
  • Loading branch information
henricasanova committed Sep 17, 2020
1 parent 6718537 commit 7a5774b
Show file tree
Hide file tree
Showing 14 changed files with 120 additions and 62 deletions.
1 change: 1 addition & 0 deletions include/wrench/logging/TerminalOutput.h
Original file line number Diff line number Diff line change
Expand Up @@ -91,6 +91,7 @@ namespace wrench {
static void endThisProcessColor();

static void disableColor();

static void disableLog();

/***********************/
Expand Down
3 changes: 3 additions & 0 deletions include/wrench/simgrid_S4U_util/S4U_VirtualMachine.h
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,9 @@ namespace wrench {

public:

/**
* @brief A map to keep track of VM-to-PM mapping
*/
static std::unordered_map<std::string, std::string> vm_to_pm_map;

/** @brief VM state enum */
Expand Down
64 changes: 64 additions & 0 deletions include/wrench/tools/pegasus/PegasusWorkflowParser.h
Original file line number Diff line number Diff line change
Expand Up @@ -24,10 +24,74 @@ namespace wrench {

public:

/**
* @brief Create an abstract workflow based on a DAX file
*
* @param filename: the path to the DAX file
* @param reference_flop_rate: a reference compute speed (in flops/sec), assuming a task's computation is purely flops.
* This is needed because DAX files specify task execution times in seconds,
* but the WRENCH simulation needs some notion of "amount of computation" to
* apply reasonable scaling. (Because the XML platform description specifies host
* compute speeds in flops/sec). The times in the DAX file are thus assumed to be
* obtained on an machine with flop rate reference_flop_rate.
* @param redundant_dependencies: Workflows provided by Pegasus
* sometimes include control/data dependencies between tasks that are already induced by
* other control/data dependencies (i.e., they correspond to transitive
* closures or existing edges in the workflow graphs). Passing redundant_dependencies=true
* force these "redundant" dependencies to be added as edges in the workflow. Passing
* redundant_dependencies=false will ignore these "redundant" dependencies. Most users
* would likely pass "false".
*
* @return a workflow
*
* @throw std::invalid_argument
*/
static Workflow *createWorkflowFromDAX(const std::string &filename, const std::string &reference_flop_rate, bool redundant_dependencies = false);

/**
* @brief Create an abstract workflow based on a JSON file
*
* @param filename: the path to the JSON file
* @param reference_flop_rate: a reference compute speed (in flops/sec), assuming a task's computation is purely flops.
* This is needed because JSON files specify task execution times in seconds,
* but the WRENCH simulation needs some notion of "amount of computation" to
* apply reasonable scaling. (Because the XML platform description specifies host
* compute speeds in flops/sec). The times in the JSON file are thus assumed to be
* obtained on an machine with flop rate reference_flop_rate.
* @param redundant_dependencies: Workflows provided by Pegasus
* sometimes include control/data dependencies between tasks that are already induced by
* other control/data dependencies (i.e., they correspond to transitive
* closures or existing edges in the workflow graphs). Passing redundant_dependencies=true
* force these "redundant" dependencies to be added as edges in the workflow. Passing
* redundant_dependencies=false will ignore these "redundant" dependencies. Most users
* would likely pass "false".
* @return a workflow
*
* @throw std::invalid_argument
*
*/
static Workflow *createWorkflowFromJSON(const std::string &filename, const std::string &reference_flop_rate, bool redundant_dependencies = false);

/**
* @brief Create an NON-abstract workflow based on a JSON file
*
* @param filename: the path to the JSON file
* @param reference_flop_rate: a reference compute speed (in flops/sec), assuming a task's computation is purely flops.
* This is needed because JSON files specify task execution times in seconds,
* but the WRENCH simulation needs some notion of "amount of computation" to
* apply reasonable scaling. (Because the XML platform description specifies host
* compute speeds in flops/sec). The times in the JSON file are thus assumed to be
* obtained on an machine with flop rate reference_flop_rate.
* @param redundant_dependencies: Workflows provided by Pegasus
* sometimes include control/data dependencies between tasks that are already induced by
* other control/data dependencies (i.e., they correspond to transitive
* closures or existing edges in the workflow graphs). Passing redundant_dependencies=true
* force these "redundant" dependencies to be added as edges in the workflow. Passing
* redundant_dependencies=false will ignore these "redundant" dependencies. Most users
* woudl likely pass "false".
* @return a workflow
* @throw std::invalid_argument
*/
static Workflow *createExecutableWorkflowFromJSON(const std::string &filename, const std::string &reference_flop_rate, bool redundant_dependencies = false);

};
Expand Down
11 changes: 10 additions & 1 deletion include/wrench/workflow/DagOfTasks.h
Original file line number Diff line number Diff line change
Expand Up @@ -24,15 +24,24 @@ namespace wrench {

class WorkflowTask;

/**
* @brief Data structure to store vertex properties
*/
struct VertexProperties {
// std::size_t index;
// boost::default_color_type color;
const WorkflowTask *task;
};

/**
* @brief Convenient DAG typedef
*/
typedef boost::adjacency_list<boost::listS, boost::vecS, boost::bidirectionalS, VertexProperties> DAG;

typedef unsigned long vertex_t; // To clean up later...
/**
* @brief Convenient vertext_t typedef
*/
typedef unsigned long vertex_t; // To clean up some day...

/**
* @brief An internal class that uses the Boost Graph Library to implement a DAG of WorkflowTask objects
Expand Down
3 changes: 3 additions & 0 deletions include/wrench/workflow/parallel_model/AmdahlParallelModel.h
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,9 @@

namespace wrench {

/**
* @brief A class that defines an Amdahl's Law-based parallel task performance model.
*/
class AmdahlParallelModel : public ParallelModel {

public:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,9 @@

namespace wrench {

/**
* @brief A class the implemens a constant-efficiency parallel task performance model
*/
class ConstantEfficiencyParallelModel : public ParallelModel {

public:
Expand Down
3 changes: 3 additions & 0 deletions include/wrench/workflow/parallel_model/CustomParallelModel.h
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,9 @@

namespace wrench {

/**
* @brief A class that defines a custom parallel task performance model.
*/
class CustomParallelModel : public ParallelModel {

public:
Expand Down
13 changes: 13 additions & 0 deletions include/wrench/workflow/parallel_model/ParallelModel.h
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,10 @@

namespace wrench {

/**
* @brief A virtual class (with convenient static methods) to define
* parallel task performance models
*/
class ParallelModel {

public:
Expand All @@ -28,6 +32,15 @@ namespace wrench {
/** \cond INTERNAL **/
/***********************/

/**
* @brief A method the, for this parallel model, computes how much work each thread that is
* part of a parallel task should do
*
* @param total_work: the total amount of work (in flops)
* @param num_threads: the number of threads
*
* @return an amount of work (in flop) per thread
*/
virtual std::vector<double> getWorkPerThread(double total_work, unsigned long num_threads) = 0;
virtual ~ParallelModel() {};

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,11 @@ namespace wrench {

/**
* @brief Method to get start time estimates
* @param set_of_jobs: the set of job configurations whose start times should be estimated
* @param set_of_jobs: the set of job configurations whose start times should
* be estimated. Each job configuration is :(i) a name; (ii) a number of nodes;
* (iii) a number of cores per node; and (iv) an execution duration in seconds.
*
* @return a map of estimated start dates (in seconds), indexed by job configuration name
*/
std::map<std::string, double> getStartTimeEstimates(std::set<std::tuple<std::string, unsigned long, unsigned long, double>> set_of_jobs) override;

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,13 @@ WRENCH_LOG_CATEGORY(wrench_core_trace_file_loader, "Log category for Trace File

namespace wrench {

/**
* @brief A method to generate a random username, so that generated workload
* traces look more realistic
*
* @param userid: numerical userid
* @return a generated alpha userid
*/
std::string generateRandomUsername(unsigned long userid) {
//Type of random number distribution
const char charset[] =
Expand Down
2 changes: 1 addition & 1 deletion src/wrench/simgrid_S4U_util/S4U_Simulation.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -218,7 +218,7 @@ namespace wrench {

/**
* @brief Determines whether a link exists for a given linkname
* @param link_id: the name of the link
* @param linkname: the name of the link
* @return true or false
*/
bool S4U_Simulation::linkExists(std::string linkname) {
Expand Down
1 change: 1 addition & 0 deletions src/wrench/simulation/SimulationOutput.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -145,6 +145,7 @@ namespace wrench {
* @param generate_host_utilization_layout: boolean specifying whether or not you would like a possible host utilization
* layout to be generated
* @param include_disk: boolean specifying whether to include disk operation in JSON (disk timestamps must be enabled)
* @param include_bandwidth: boolean specifying whether to include link bandwidth measurements in JSON
*/
void SimulationOutput::dumpUnifiedJSON(Workflow *workflow, std::string file_path,
bool include_platform,
Expand Down
1 change: 1 addition & 0 deletions src/wrench/simulation/SimulationTimestampTypes.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -667,6 +667,7 @@ namespace wrench {
* @param hostname: hostname being read from
* @param mount: mountpoint of disk
* @param bytes: number of bytes read
* @param counter: An integer ID
*/
SimulationTimestampDiskRead::SimulationTimestampDiskRead(std::string hostname,
std::string mount,
Expand Down
64 changes: 5 additions & 59 deletions tools/pegasus/src/PegasusWorkflowParser.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -23,26 +23,7 @@ WRENCH_LOG_CATEGORY(pegasus_workflow_parser, "Log category for PegasusWorkflowPa
namespace wrench {

/**
* @brief Create an abstract workflow based on a JSON file
*
* @param filename: the path to the JSON file
* @param reference_flop_rate: a reference compute speed (in flops/sec), assuming a task's computation is purely flops.
* This is needed because JSON files specify task execution times in seconds,
* but the WRENCH simulation needs some notion of "amount of computation" to
* apply reasonable scaling. (Because the XML platform description specifies host
* compute speeds in flops/sec). The times in the JSON file are thus assumed to be
* obtained on an machine with flop rate reference_flop_rate.
* @param redundant_dependencies: Workflows provided by Pegasus
* sometimes include control/data dependencies between tasks that are already induced by
* other control/data dependencies (i.e., they correspond to transitive
* closures or existing edges in the workflow graphs). Passing redundant_dependencies=true
* force these "redundant" dependencies to be added as edges in the workflow. Passing
* redundant_dependencies=false will ignore these "redundant" dependencies. Most users
* would likely pass "false".
* @return a workflow
*
* @throw std::invalid_argument
*
* Documention in .h file
*/
Workflow *PegasusWorkflowParser::createWorkflowFromJSON(const std::string &filename,
const std::string &reference_flop_rate,
Expand Down Expand Up @@ -194,53 +175,18 @@ namespace wrench {
return workflow;
}


/**
* @brief Create an NON-abstract workflow based on a JSON file
*
* @param filename: the path to the JSON file
* @param reference_flop_rate: a reference compute speed (in flops/sec), assuming a task's computation is purely flops.
* This is needed because JSON files specify task execution times in seconds,
* but the WRENCH simulation needs some notion of "amount of computation" to
* apply reasonable scaling. (Because the XML platform description specifies host
* compute speeds in flops/sec). The times in the JSON file are thus assumed to be
* obtained on an machine with flop rate reference_flop_rate.
* @param redundant_dependencies: Workflows provided by Pegasus
* sometimes include control/data dependencies between tasks that are already induced by
* other control/data dependencies (i.e., they correspond to transitive
* closures or existing edges in the workflow graphs). Passing redundant_dependencies=true
* force these "redundant" dependencies to be added as edges in the workflow. Passing
* redundant_dependencies=false will ignore these "redundant" dependencies. Most users
* woudl likely pass "false".
* @return a workflow
* @throw std::invalid_argument
* Documention in .h file
*/
Workflow *PegasusWorkflowParser::createExecutableWorkflowFromJSON(const std::string &filename, const std::string &reference_flop_rate,
bool redundant_dependencies) {
throw std::runtime_error("PegasusWorkflowParser::createExecutableWorkflowFromJSON(): not implemented yet");
}

/**
* @brief Create an abstract workflow based on a DAX file
*
* @param filename: the path to the DAX file
* @param reference_flop_rate: a reference compute speed (in flops/sec), assuming a task's computation is purely flops.
* This is needed because DAX files specify task execution times in seconds,
* but the WRENCH simulation needs some notion of "amount of computation" to
* apply reasonable scaling. (Because the XML platform description specifies host
* compute speeds in flops/sec). The times in the DAX file are thus assumed to be
* obtained on an machine with flop rate reference_flop_rate.
* @param redundant_dependencies: Workflows provided by Pegasus
* sometimes include control/data dependencies between tasks that are already induced by
* other control/data dependencies (i.e., they correspond to transitive
* closures or existing edges in the workflow graphs). Passing redundant_dependencies=true
* force these "redundant" dependencies to be added as edges in the workflow. Passing
* redundant_dependencies=false will ignore these "redundant" dependencies. Most users
* would likely pass "false".
*
* @return a workflow
*
* @throw std::invalid_argument
*/
* Documention in .h file
*/
Workflow *PegasusWorkflowParser::createWorkflowFromDAX(const std::string &filename, const std::string &reference_flop_rate,
bool redundant_dependencies) {

Expand Down

0 comments on commit 7a5774b

Please sign in to comment.