From 7ea84a46c8a65083225fde42e9e14585cf87297b Mon Sep 17 00:00:00 2001 From: maxpaik16 Date: Thu, 15 Feb 2024 00:20:47 -0500 Subject: [PATCH 1/9] Add doxygen style comments --- src/polysolve/linear/Solver.hpp | 42 ++++++++-------- src/polysolve/nonlinear/Problem.hpp | 45 ++++++++++++++++- src/polysolve/nonlinear/Solver.hpp | 48 +++++++++++++++---- .../descent_strategies/DescentStrategy.hpp | 14 ++++++ .../nonlinear/line_search/LineSearch.hpp | 38 ++++++++++++++- 5 files changed, 156 insertions(+), 31 deletions(-) diff --git a/src/polysolve/linear/Solver.hpp b/src/polysolve/linear/Solver.hpp index deaae018..0c977804 100644 --- a/src/polysolve/linear/Solver.hpp +++ b/src/polysolve/linear/Solver.hpp @@ -51,22 +51,22 @@ namespace polysolve::linear /// Selects the correct solver based on params using the fallback or the list of solvers if necessary static void select_valid_solver(json ¶ms, spdlog::logger &logger); - // Static constructor - // - // @param[in] params Parameter of the solver, including name and preconditioner - // @param[in] logger Logger used for error - // @param[in] strict_validation strict validation of the input paraams - // @param[out] a pointer to a linear solver + /// @brief Static constructor + /// + /// @param[in] params Parameter of the solver, including name and preconditioner + /// @param[in] logger Logger used for error + /// @param[in] strict_validation strict validation of the input paraams + /// @return a pointer to a linear solver // static std::unique_ptr create(const json ¶ms, spdlog::logger &logger, const bool strict_validation = true); - // Static constructor - // - // @param[in] solver Solver type - // @param[in] precond Preconditioner for iterative solvers - // + /// @brief Static constructor + /// + /// @param[in] solver Solver type + /// @param[in] precond Preconditioner for iterative solvers + /// static std::unique_ptr create(const std::string &solver, const std::string &precond); // List available solvers @@ -107,15 +107,15 @@ namespace polysolve::linear // If solver uses dense matrices virtual bool is_dense() const { return false; } - // - // @brief { Solve the linear system Ax = b } - // - // @param[in] b { Right-hand side. } - // @param[in,out] x { Unknown to compute. When using an iterative - // solver, the input unknown vector is used as an - // initial guess, and must thus be properly allocated - // and initialized. } - // + /// + /// @brief { Solve the linear system Ax = b } + /// + /// @param[in] b { Right-hand side. } + /// @param[in,out] x { Unknown to compute. When using an iterative + /// solver, the input unknown vector is used as an + /// initial guess, and must thus be properly allocated + /// and initialized. } + /// virtual void solve(const Ref b, Ref x) = 0; public: @@ -123,7 +123,7 @@ namespace polysolve::linear // Debug // /////////// - // Name of the solver type (for debugging purposes) + /// @brief Name of the solver type (for debugging purposes) virtual std::string name() const { return ""; } }; diff --git a/src/polysolve/nonlinear/Problem.hpp b/src/polysolve/nonlinear/Problem.hpp index 93fa0db6..f4ed35d2 100644 --- a/src/polysolve/nonlinear/Problem.hpp +++ b/src/polysolve/nonlinear/Problem.hpp @@ -11,6 +11,7 @@ namespace polysolve::nonlinear { + /// @brief Class defining optimization problem to be solved. To be defined by user code class Problem : public cppoptlib::Problem { public: @@ -26,23 +27,65 @@ namespace polysolve::nonlinear virtual void init(const TVector &x0) {} + /// @brief Defines function to be optimized + /// @param x input vector (n x 1) + /// @return value of function virtual double value(const TVector &x) override = 0; + + /// @brief Defines gradient of objective function + /// @param[in] x input vector (n x 1) + /// @param[out] gradv gradient vector (n x 1) virtual void gradient(const TVector &x, TVector &gradv) override = 0; + + /// @brief Defines Hessian of objective function + /// @param[in] x input vector (n x 1) + /// @param[out] hessian hessian matrix (n x n) virtual void hessian(const TVector &x, THessian &hessian) = 0; - + + /// @brief Determine if a step from solution x0 to solution x1 is allowed + /// @param x0 Current solution + /// @param x1 Proposed next solution + /// @return True if the step is allowed virtual bool is_step_valid(const TVector &x0, const TVector &x1) { return true; } + + /// @param x0 Current solution (step size = 0) + /// @param x1 Next solution (step size = 1) + /// @return Maximum allowable step size virtual double max_step_size(const TVector &x0, const TVector &x1) { return 1; } + /// @brief Initialize variables used during the line search + /// @param x0 Current solution + /// @param x1 Next solution virtual void line_search_begin(const TVector &x0, const TVector &x1) {} + + /// @brief Clear variables used during the line search virtual void line_search_end() {} + + /// @brief Update fields after a step in the optimization + /// @param iter_num Optimization iteration number + /// @param x Current solution + /// @param data Data containing info about the current iteration virtual void post_step(const PostStepData &data) {} + /// @brief Set project to psd + /// @param val If true, the form's second derivative is projected to be positive semidefinite virtual void set_project_to_psd(bool val) {} + /// @brief Update cached fields upon a change in the solution + /// @param new_x New solution virtual void solution_changed(const TVector &new_x) {} virtual bool stop(const TVector &x) { return false; } + /// @brief Sample function along a given direction + /// @param[in] x starting input value (n x 1) + /// @param[in] direction direction along which to sample (n x 1) + /// @param[in] start starting step size + /// @param[in] end ending step size + /// @param[in] num_samples total number of samples + /// @param[out] alphas step sizes (num_samples x 1) + /// @param[out] fs function values along the given direction (num_samples x 1) + /// @param[out] valid whether or not the given step is valid (num_samples x 1) void sample_along_direction( const Problem::TVector &x, const Problem::TVector &direction, diff --git a/src/polysolve/nonlinear/Solver.hpp b/src/polysolve/nonlinear/Solver.hpp index c66a8568..1b4b2366 100644 --- a/src/polysolve/nonlinear/Solver.hpp +++ b/src/polysolve/nonlinear/Solver.hpp @@ -31,11 +31,11 @@ namespace polysolve::nonlinear class Solver : public cppoptlib::ISolver { public: - // Static constructor - // - // @param[in] solver Solver type - // @param[in] precond Preconditioner for iterative solvers - // + /// @brief Static constructor + /// @param solver_params JSON of general solver parameters + /// @param linear_solver_params JSON of linear solver parameters + /// @param logger + /// @param strict_validation whether or not to strictly validate input JSON static std::unique_ptr create( const json &solver_params, const json &linear_solver_params, @@ -43,7 +43,7 @@ namespace polysolve::nonlinear spdlog::logger &logger, const bool strict_validation = true); - // List available solvers + /// @brief List available solvers static std::vector available_solvers(); using Superclass = ISolver; @@ -59,12 +59,23 @@ namespace polysolve::nonlinear const double characteristic_length, spdlog::logger &logger); + /// @brief Set number of iterations array + /// @param solver_params JSON of solver parameters (incuding itereations_per_strategy) void set_strategies_iterations(const json &solver_params); + /// @brief Return norm of gradient + /// @param x why is this included? + /// @param grad gradient (n x 1) vector + /// @return norm of given gradient virtual double compute_grad_norm(const Eigen::VectorXd &x, const Eigen::VectorXd &grad) const; + /// @brief Create LineSearch object to be used by solver + /// @param params JSON of solver parameters void set_line_search(const json ¶ms); + /// @brief Find argument that minimizes the objective function (loops through all strategies and terminates according to stopping criteria) + /// @param objFunc Problem to be minimized + /// @param x initial value for function input (n x 1) void minimize(Problem &objFunc, TVector &x) override; const json &get_info() const { return solver_info; } @@ -74,6 +85,8 @@ namespace polysolve::nonlinear const typename Superclass::TCriteria &getStopCriteria() { return this->m_stop; } // setStopCriteria already in ISolver + /// @brief Checks whether or not the solver has converged based on x delta, f delta, and grad norm + /// @return Boolean representing whether or not the solver has converged. bool converged() const { return this->m_status == cppoptlib::Status::XDeltaTolerance @@ -85,11 +98,19 @@ namespace polysolve::nonlinear size_t &max_iterations() { return this->m_stop.iterations; } bool allow_out_of_iterations = false; + /// @brief Add given descent strategy to solver + /// @param s strategy to be added void add_strategy(const std::shared_ptr &s) { m_strategies.push_back(s); } const std::shared_ptr &line_search() const { return m_line_search; }; protected: + /// @brief Compute direction in which the argument should be updated + /// @param objFunc Problem to be minimized + /// @param x Current input (n x 1) + /// @param grad Gradient at current step (n x 1) + /// @param[out] direction Current update direction (n x 1) + /// @return True if update direction was found, False otherwises virtual bool compute_update_direction( Problem &objFunc, const TVector &x, @@ -109,6 +130,10 @@ namespace polysolve::nonlinear FiniteDiffStrategy gradient_fd_strategy = FiniteDiffStrategy::NONE; double gradient_fd_eps = 1e-7; + /// @brief Check gradient versus finite difference results + /// @param objFunc Problem defining relevant objective function + /// @param x Current input (n x 1) + /// @param grad Current gradient (n x 1) virtual void verify_gradient(Problem &objFunc, const TVector &x, const TVector &grad) final; private: @@ -126,8 +151,9 @@ namespace polysolve::nonlinear // ==================================================================== // Solver state // ==================================================================== - - // Reset the solver at the start of a minimization + + /// @brief Reset the solver at the start of a minimization + /// @param ndof number of degrees of freedom void reset(const int ndof); std::string descent_strategy_name() const { return m_strategies[m_descent_strategy]->name(); }; @@ -141,8 +167,14 @@ namespace polysolve::nonlinear // Solver info // ==================================================================== + /// @brief Update solver info JSON object + /// @param energy void update_solver_info(const double energy); + + /// @brief Reset timing members to 0 void reset_times(); + + /// @brief Log time taken in different phases of the solve void log_times(); json solver_info; diff --git a/src/polysolve/nonlinear/descent_strategies/DescentStrategy.hpp b/src/polysolve/nonlinear/descent_strategies/DescentStrategy.hpp index 6e3f8579..c7a8928b 100644 --- a/src/polysolve/nonlinear/descent_strategies/DescentStrategy.hpp +++ b/src/polysolve/nonlinear/descent_strategies/DescentStrategy.hpp @@ -13,6 +13,10 @@ namespace polysolve::nonlinear using TVector = Problem::TVector; using Scalar = Problem::Scalar; + /// @brief Constructor + /// @param solver_params_ JSON of solver parameters + /// @param characteristic_length + /// @param logger DescentStrategy(const json &solver_params_, const double characteristic_length, spdlog::logger &logger) @@ -24,11 +28,21 @@ namespace polysolve::nonlinear virtual void reset(const int ndof) {} virtual void reset_times() {} + + /// @brief Update solver info after finding descent direction + /// @param solver_info JSON of solver parameters + /// @param per_iteration Number of iterations (used to normalize timings) virtual void update_solver_info(json &solver_info, const double per_iteration) {} virtual bool is_direction_descent() { return true; } virtual bool handle_error() { return false; } + /// @brief Compute descent direction along which to do line search + /// @param objFunc Problem to be minimized + /// @param x Current input (n x 1) + /// @param grad Current gradient (n x 1) + /// @param direction Current descent direction (n x 1) + /// @return True if a descent direction was successfully found virtual bool compute_update_direction( Problem &objFunc, const TVector &x, diff --git a/src/polysolve/nonlinear/line_search/LineSearch.hpp b/src/polysolve/nonlinear/line_search/LineSearch.hpp index e195955b..80327b5f 100644 --- a/src/polysolve/nonlinear/line_search/LineSearch.hpp +++ b/src/polysolve/nonlinear/line_search/LineSearch.hpp @@ -15,18 +15,32 @@ namespace polysolve::nonlinear::line_search using Scalar = typename Problem::Scalar; using TVector = typename Problem::TVector; + /// @brief Constructor for creating new LineSearch Object + /// @param params JSON of solver parameters + /// @param m_logger LineSearch(const json ¶ms, spdlog::logger &m_logger); virtual ~LineSearch() = default; + /// @brief + /// @param x Current input vector (n x 1) + /// @param x_delta Current descent direction (n x 1) + /// @param objFunc Objective function + /// @return double line_search( const TVector &x, - const TVector &grad, + const TVector &x_delta, Problem &objFunc); + /// @brief Dispatch function for creating appropriate subclass + /// @param params JSON of solver parameters + /// @param logger + /// @return Pointer to object of the specified subclass static std::shared_ptr create( const json ¶ms, spdlog::logger &logger); + /// @brief Get list of available method names + /// @return Vector of names of available methods static std::vector available_methods(); void reset_times() @@ -81,6 +95,15 @@ namespace polysolve::nonlinear::line_search double step_ratio; + /// @brief Compute step size to use during line search + /// @param x Current input (n x 1) + /// @param delta_x Current step direction (n x 1) + /// @param objFunc Problem to be minimized + /// @param use_grad_norm Whether to compare grad norm or energy norm in stopping criteria + /// @param old_energy Previous energy (scalar) + /// @param old_grad Previous gradient (n x 1) + /// @param starting_step_size Initial step size + /// @return Step size to use in line search virtual double compute_descent_step_size( const TVector &x, const TVector &delta_x, @@ -91,12 +114,25 @@ namespace polysolve::nonlinear::line_search const double starting_step_size) = 0; private: + /// @brief Compute step size that avoids nan/infinite energy + /// @param x Current input (n x 1) + /// @param delta_x Current step direction (n x 1) + /// @param objFunc Problem to be minimized + /// @param starting_step_size Initial step size + /// @param rate Rate at which to decrease step size if too large + /// @return Nan free step size double compute_nan_free_step_size( const TVector &x, const TVector &delta_x, Problem &objFunc, const double starting_step_size, const double rate); + /// @brief Compute step size that is collision free + /// @param x Current input (n x 1) + /// @param delta_x Current step direction (n x 1) + /// @param objFunc Problem to be minimized + /// @param starting_step_size Initial step size + /// @return Collision free step size (NaN if it is 0) double compute_collision_free_step_size( const TVector &x, const TVector &delta_x, From 118d4b3a8fc9b5579be51e7416fb67b69a279af4 Mon Sep 17 00:00:00 2001 From: maxpaik16 Date: Thu, 15 Feb 2024 13:48:20 -0500 Subject: [PATCH 2/9] rename function --- src/polysolve/nonlinear/line_search/LineSearch.cpp | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/polysolve/nonlinear/line_search/LineSearch.cpp b/src/polysolve/nonlinear/line_search/LineSearch.cpp index e3f9cb7f..45ac714d 100644 --- a/src/polysolve/nonlinear/line_search/LineSearch.cpp +++ b/src/polysolve/nonlinear/line_search/LineSearch.cpp @@ -139,7 +139,7 @@ namespace polysolve::nonlinear::line_search { POLYSOLVE_SCOPED_STOPWATCH("CCD narrow-phase", ccd_time, m_logger); m_logger.trace("Performing narrow-phase CCD"); - step_size = compute_collision_free_step_size(x, delta_x, objFunc, step_size); + step_size = compute_max_step_size(x, delta_x, objFunc, step_size); if (std::isnan(step_size)) return NaN; } @@ -237,7 +237,8 @@ namespace polysolve::nonlinear::line_search return step_size; } - double LineSearch::compute_collision_free_step_size( + // change to max_step_size + double LineSearch::compute_max_step_size( const TVector &x, const TVector &delta_x, Problem &objFunc, From 7ecaca3874a26e391360a50020cf388119add93b Mon Sep 17 00:00:00 2001 From: maxpaik16 Date: Thu, 15 Feb 2024 13:48:44 -0500 Subject: [PATCH 3/9] rename function --- src/polysolve/nonlinear/line_search/LineSearch.hpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/polysolve/nonlinear/line_search/LineSearch.hpp b/src/polysolve/nonlinear/line_search/LineSearch.hpp index 80327b5f..9440c623 100644 --- a/src/polysolve/nonlinear/line_search/LineSearch.hpp +++ b/src/polysolve/nonlinear/line_search/LineSearch.hpp @@ -127,13 +127,13 @@ namespace polysolve::nonlinear::line_search Problem &objFunc, const double starting_step_size, const double rate); - /// @brief Compute step size that is collision free + /// @brief Compute maximum valid step size /// @param x Current input (n x 1) /// @param delta_x Current step direction (n x 1) /// @param objFunc Problem to be minimized /// @param starting_step_size Initial step size - /// @return Collision free step size (NaN if it is 0) - double compute_collision_free_step_size( + /// @return Maximum valid step size (NaN if it is 0) + double compute_max_step_size( const TVector &x, const TVector &delta_x, Problem &objFunc, From 1b142d3ae2a91522bea964b7f9f63753dec8739b Mon Sep 17 00:00:00 2001 From: Zachary Ferguson Date: Sat, 24 Feb 2024 01:31:16 -0500 Subject: [PATCH 4/9] Fix nonlinear solver convergence check (#72) * Refactor print function and improve logging messages * Fix nonlinear solver --- src/polysolve/nonlinear/Criteria.cpp | 47 +++++++-------- src/polysolve/nonlinear/Criteria.hpp | 7 ++- src/polysolve/nonlinear/Solver.cpp | 85 ++++++++++++++-------------- src/polysolve/nonlinear/Solver.hpp | 2 +- 4 files changed, 73 insertions(+), 68 deletions(-) diff --git a/src/polysolve/nonlinear/Criteria.cpp b/src/polysolve/nonlinear/Criteria.cpp index 37fed882..97ef6020 100644 --- a/src/polysolve/nonlinear/Criteria.cpp +++ b/src/polysolve/nonlinear/Criteria.cpp @@ -2,6 +2,8 @@ // License: MIT #include "Criteria.hpp" +#include + namespace polysolve::nonlinear { bool is_converged_status(const Status s) @@ -27,13 +29,9 @@ namespace polysolve::nonlinear void Criteria::print(std::ostream &os) const { - os << "Iterations: " << iterations << std::endl; - os << "xDelta: " << xDelta << std::endl; - os << "fDelta: " << fDelta << std::endl; - os << "GradNorm: " << gradNorm << std::endl; - os << "xDeltaDotGrad: " << xDeltaDotGrad << std::endl; - os << "FirstGradNorm: " << firstGradNorm << std::endl; - os << "fDeltaCount: " << fDeltaCount << std::endl; + os << fmt::format( + "iters={:d} Δf={:g} ‖∇f‖={:g} ‖Δx‖={:g} Δx⋅∇f(x)={:g}", + iterations, fDelta, gradNorm, xDelta, xDeltaDotGrad); } Status checkConvergence(const Criteria &stop, const Criteria ¤t) @@ -42,6 +40,11 @@ namespace polysolve::nonlinear { return Status::IterationLimit; } + const double stopGradNorm = current.iterations == 0 ? stop.firstGradNorm : stop.gradNorm; + if (stopGradNorm > 0 && current.gradNorm < stopGradNorm) + { + return Status::GradNormTolerance; + } if (stop.xDelta > 0 && current.xDelta < stop.xDelta) { return Status::XDeltaTolerance; @@ -50,11 +53,6 @@ namespace polysolve::nonlinear { return Status::FDeltaTolerance; } - const double stopGradNorm = current.iterations == 0 ? stop.firstGradNorm : stop.gradNorm; - if (stopGradNorm > 0 && current.gradNorm < stopGradNorm) - { - return Status::GradNormTolerance; - } // Δx⋅∇f ≥ 0 means that the search direction is not a descent direction if (stop.xDeltaDotGrad < 0 && current.xDeltaDotGrad > stop.xDeltaDotGrad) { @@ -68,37 +66,40 @@ namespace polysolve::nonlinear switch (s) { case Status::NotStarted: - os << "Solver not started."; + os << "Solver not started"; break; case Status::Continue: - os << "Convergence criteria not reached."; + os << "Convergence criteria not reached"; break; case Status::IterationLimit: - os << "Iteration limit reached."; + os << "Iteration limit reached"; break; case Status::XDeltaTolerance: - os << "Change in parameter vector too small."; + os << "Change in parameter vector too small"; break; case Status::FDeltaTolerance: - os << "Change in cost function value too small."; + os << "Change in cost function value too small"; break; case Status::GradNormTolerance: - os << "Gradient vector norm too small."; + os << "Gradient vector norm too small"; break; case Status::ObjectiveCustomStop: - os << "Objective function specified to stop."; + os << "Objective function specified to stop"; break; case Status::NanEncountered: - os << "Objective or gradient function returned NaN."; + os << "Objective or gradient function returned NaN"; break; case Status::NotDescentDirection: - os << "Search direction not a descent direction."; + os << "Search direction not a descent direction"; break; case Status::LineSearchFailed: - os << "Line search failed."; + os << "Line search failed"; + break; + case Status::UpdateDirectionFailed: + os << "Update direction could not be computed"; break; default: - os << "Unknown status."; + os << "Unknown status"; break; } return os; diff --git a/src/polysolve/nonlinear/Criteria.hpp b/src/polysolve/nonlinear/Criteria.hpp index f318ed5c..86a89e17 100644 --- a/src/polysolve/nonlinear/Criteria.hpp +++ b/src/polysolve/nonlinear/Criteria.hpp @@ -19,9 +19,10 @@ namespace polysolve::nonlinear GradNormTolerance, ///< The norm of the gradient vector is below the tolerance ObjectiveCustomStop, ///< The objective function specified to stop // Failure cases - NanEncountered, ///< The objective function returned NaN - NotDescentDirection, ///< The search direction is not a descent direction - LineSearchFailed, ///< The line search failed + NanEncountered, ///< The objective function returned NaN + NotDescentDirection, ///< The search direction is not a descent direction + LineSearchFailed, ///< The line search failed + UpdateDirectionFailed, ///< The update direction could not be computed }; bool is_converged_status(const Status s); diff --git a/src/polysolve/nonlinear/Solver.cpp b/src/polysolve/nonlinear/Solver.cpp index 976115e6..5bc42d25 100644 --- a/src/polysolve/nonlinear/Solver.cpp +++ b/src/polysolve/nonlinear/Solver.cpp @@ -205,6 +205,7 @@ namespace polysolve::nonlinear m_stop.xDeltaDotGrad = -solver_params["advanced"]["derivative_along_delta_x_tol"].get(); // Make these relative to the characteristic length + m_logger.trace("Using a characteristic length of {:g}", characteristic_length); m_stop.xDelta *= characteristic_length; m_stop.fDelta *= characteristic_length; m_stop.gradNorm *= characteristic_length; @@ -277,11 +278,8 @@ namespace polysolve::nonlinear stop_watch.start(); m_logger.debug( - "Starting {} with {} solve f₀={:g} " - "(stopping criteria: max_iters={:d} Δf={:g} ‖∇f‖={:g} ‖Δx‖={:g})", - descent_strategy_name(), m_line_search->name(), - objFunc(x), m_stop.iterations, - m_stop.fDelta, m_stop.gradNorm, m_stop.xDelta); + "Starting {} with {} solve f₀={:g} (stopping criteria: {})", + descent_strategy_name(), m_line_search->name(), objFunc(x), m_stop); update_solver_info(objFunc(x)); objFunc.post_step(PostStepData(m_current.iterations, solver_info, x, grad)); @@ -325,6 +323,9 @@ namespace polysolve::nonlinear log_and_throw_error(m_logger, "[{}][{}] Gradient is nan; stopping", descent_strategy_name(), m_line_search->name()); } + // Check convergence without these values to avoid impossible linear solves. + m_current.xDelta = NaN; + m_current.xDeltaDotGrad = NaN; m_status = checkConvergence(m_stop, m_current); if (m_status != Status::Continue) break; @@ -332,49 +333,55 @@ namespace polysolve::nonlinear // --- Update direction -------------------------------------------- const bool ok = compute_update_direction(objFunc, x, grad, delta_x); - m_current.xDeltaDotGrad = delta_x.dot(grad); + m_current.xDelta = delta_x.norm(); - if (!ok || (m_strategies[m_descent_strategy]->is_direction_descent() && m_current.gradNorm != 0 && m_current.xDeltaDotGrad >= 0)) + if (!ok || std::isnan(m_current.xDelta)) { - const std::string current_name = descent_strategy_name(); - + const auto current_name = descent_strategy_name(); if (!m_strategies[m_descent_strategy]->handle_error()) ++m_descent_strategy; if (m_descent_strategy >= m_strategies.size()) { - m_status = Status::NotDescentDirection; + m_status = Status::UpdateDirectionFailed; log_and_throw_error( - m_logger, "[{}][{}] direction is not a descent direction on last strategy (‖Δx‖={:g}; ‖g‖={:g}; Δx⋅g={:g}≥0); stopping", - current_name, m_line_search->name(), delta_x.norm(), compute_grad_norm(x, grad), m_current.xDeltaDotGrad); - } - else - { - m_status = Status::Continue; - m_logger.debug( - "[{}][{}] direction is not a descent direction (‖Δx‖={:g}; ‖g‖={:g}; Δx⋅g={:g}≥0); reverting to {}", - current_name, m_line_search->name(), delta_x.norm(), compute_grad_norm(x, grad), m_current.xDeltaDotGrad, - descent_strategy_name()); + m_logger, "[{}][{}] {} on last strategy; stopping", + current_name, m_line_search->name(), m_status); } + + m_logger.debug( + "[{}][{}] {}; reverting to {}", current_name, m_line_search->name(), + Status::UpdateDirectionFailed, descent_strategy_name()); + m_status = Status::Continue; continue; } - m_current.xDelta = delta_x.norm(); - if (std::isnan(m_current.xDelta)) + m_current.xDeltaDotGrad = delta_x.dot(grad); + + if (m_strategies[m_descent_strategy]->is_direction_descent() && m_current.gradNorm != 0 && m_current.xDeltaDotGrad >= 0) { - const auto current_name = descent_strategy_name(); + const std::string current_name = descent_strategy_name(); + if (!m_strategies[m_descent_strategy]->handle_error()) ++m_descent_strategy; if (m_descent_strategy >= m_strategies.size()) { - m_status = Status::NanEncountered; - log_and_throw_error(m_logger, "[{}][{}] Δx is nan on last strategy; stopping", - current_name, m_line_search->name()); + m_status = Status::NotDescentDirection; + log_and_throw_error( + m_logger, "[{}][{}] {} on last strategy (‖Δx‖={:g}; ‖g‖={:g}; Δx⋅g={:g}≥0); stopping", + current_name, m_line_search->name(), m_status, delta_x.norm(), compute_grad_norm(x, grad), + m_current.xDeltaDotGrad); + } + else + { + m_status = Status::Continue; + m_logger.debug( + "[{}][{}] {} (‖Δx‖={:g}; ‖g‖={:g}; Δx⋅g={:g}≥0); reverting to {}", + current_name, m_line_search->name(), Status::NotDescentDirection, + delta_x.norm(), compute_grad_norm(x, grad), m_current.xDeltaDotGrad, + descent_strategy_name()); } - - m_logger.debug("[{}][{}] Δx is nan; reverting to {}", current_name, m_line_search->name(), descent_strategy_name()); - m_status = Status::Continue; continue; } @@ -442,6 +449,9 @@ namespace polysolve::nonlinear // ----------- const double step = (rate * delta_x).norm(); + // m_logger.debug("[{}][{}] rate={:g} ‖step‖={:g}", + // descent_strategy_name(), m_line_search->name(), rate, step); + update_solver_info(energy); objFunc.post_step(PostStepData(m_current.iterations, solver_info, x, grad)); @@ -454,12 +464,8 @@ namespace polysolve::nonlinear m_current.fDeltaCount = (m_current.fDelta < m_stop.fDelta) ? (m_current.fDeltaCount + 1) : 0; m_logger.debug( - "[{}][{}] iter={:d} f={:g} Δf={:g} ‖∇f‖={:g} ‖Δx‖={:g} Δx⋅∇f(x)={:g} rate={:g} ‖step‖={:g}" - " (stopping criteria: max_iters={:d} Δf={:g} ‖∇f‖={:g} ‖Δx‖={:g})", - descent_strategy_name(), m_line_search->name(), - m_current.iterations, energy, m_current.fDelta, - m_current.gradNorm, m_current.xDelta, delta_x.dot(grad), rate, step, - m_stop.iterations, m_stop.fDelta, m_stop.gradNorm, m_stop.xDelta); + "[{}][{}] {} (stopping criteria: {})", + descent_strategy_name(), m_line_search->name(), m_current, m_stop); if (++m_current.iterations >= m_stop.iterations) m_status = Status::IterationLimit; @@ -480,12 +486,9 @@ namespace polysolve::nonlinear const bool succeeded = m_status == Status::GradNormTolerance; m_logger.log( succeeded ? spdlog::level::info : spdlog::level::err, - "[{}][{}] Finished: {} Took {:g}s (niters={:d} f={:g} Δf={:g} ‖∇f‖={:g} ‖Δx‖={:g})" - " (stopping criteria: max_iters={:d} Δf={:g} ‖∇f‖={:g} ‖Δx‖={:g})", - descent_strategy_name(), m_line_search->name(), - m_status, tot_time, m_current.iterations, - old_energy, m_current.fDelta, m_current.gradNorm, m_current.xDelta, - m_stop.iterations, m_stop.fDelta, m_stop.gradNorm, m_stop.xDelta); + "[{}][{}] Finished: {} took {:g}s ({}) (stopping criteria: {})", + descent_strategy_name(), m_line_search->name(), m_status, tot_time, + m_current, m_stop); log_times(); update_solver_info(objFunc(x)); diff --git a/src/polysolve/nonlinear/Solver.hpp b/src/polysolve/nonlinear/Solver.hpp index c00f7397..97c80bb3 100644 --- a/src/polysolve/nonlinear/Solver.hpp +++ b/src/polysolve/nonlinear/Solver.hpp @@ -73,7 +73,7 @@ namespace polysolve::nonlinear Criteria &stop_criteria() { return m_stop; } const Criteria &stop_criteria() const { return m_stop; } const Criteria ¤t_criteria() const { return m_current; } - const Status &status() const { return m_status; } + Status status() const { return m_status; } void set_strategies_iterations(const json &solver_params); void set_line_search(const json ¶ms); From 9f8a5e18d064a38f12a9abf5bcf1ff87a7432219 Mon Sep 17 00:00:00 2001 From: Zachary Ferguson Date: Tue, 12 Mar 2024 16:58:12 -0400 Subject: [PATCH 5/9] Improve timers (#73) * Improve timers - Move timers into DescentStrategy - Move timers into LineSearch * Skip Newton log_times if not used --- src/polysolve/nonlinear/Solver.cpp | 64 ++++++++----------- src/polysolve/nonlinear/Solver.hpp | 5 +- .../descent_strategies/DescentStrategy.hpp | 1 + .../nonlinear/descent_strategies/Newton.cpp | 30 +++++++-- .../nonlinear/descent_strategies/Newton.hpp | 12 ++-- .../nonlinear/line_search/Armijo.hpp | 2 +- .../nonlinear/line_search/Backtracking.hpp | 2 +- .../nonlinear/line_search/LineSearch.cpp | 33 +++++++++- .../nonlinear/line_search/LineSearch.hpp | 54 +++++++--------- .../nonlinear/line_search/NoLineSearch.hpp | 2 +- .../nonlinear/line_search/RobustArmijo.hpp | 2 +- 11 files changed, 117 insertions(+), 90 deletions(-) diff --git a/src/polysolve/nonlinear/Solver.cpp b/src/polysolve/nonlinear/Solver.cpp index 5bc42d25..990c7b91 100644 --- a/src/polysolve/nonlinear/Solver.cpp +++ b/src/polysolve/nonlinear/Solver.cpp @@ -332,10 +332,14 @@ namespace polysolve::nonlinear // --- Update direction -------------------------------------------- - const bool ok = compute_update_direction(objFunc, x, grad, delta_x); - m_current.xDelta = delta_x.norm(); + bool update_direction_successful; + { + POLYSOLVE_SCOPED_STOPWATCH("compute update direction", update_direction_time, m_logger); + update_direction_successful = compute_update_direction(objFunc, x, grad, delta_x); + } - if (!ok || std::isnan(m_current.xDelta)) + m_current.xDelta = delta_x.norm(); + if (!update_direction_successful || std::isnan(m_current.xDelta)) { const auto current_name = descent_strategy_name(); if (!m_strategies[m_descent_strategy]->handle_error()) @@ -400,7 +404,12 @@ namespace polysolve::nonlinear m_current.iterations, energy, m_current.gradNorm); // Perform a line_search to compute step scale - double rate = m_line_search->line_search(x, delta_x, objFunc); + double rate; + { + POLYSOLVE_SCOPED_STOPWATCH("line search", line_search_time, m_logger); + rate = m_line_search->line_search(x, delta_x, objFunc); + } + if (std::isnan(rate)) { const auto current_name = descent_strategy_name(); @@ -428,7 +437,6 @@ namespace polysolve::nonlinear // if we did enough lower strategy, we revert back to normal if (m_descent_strategy != 0 && current_strategy_iter >= m_iter_per_strategy[m_descent_strategy]) { - const auto current_name = descent_strategy_name(); const std::string prev_strategy_name = descent_strategy_name(); @@ -514,14 +522,13 @@ namespace polysolve::nonlinear void Solver::reset_times() { total_time = 0; + obj_fun_time = 0; grad_time = 0; + update_direction_time = 0; line_search_time = 0; - obj_fun_time = 0; constraint_set_update_time = 0; if (m_line_search) - { m_line_search->reset_times(); - } for (auto &s : m_strategies) s->reset_times(); } @@ -538,47 +545,30 @@ namespace polysolve::nonlinear double per_iteration = m_current.iterations ? m_current.iterations : 1; solver_info["total_time"] = total_time; + solver_info["time_obj_fun"] = obj_fun_time / per_iteration; solver_info["time_grad"] = grad_time / per_iteration; + // Do not save update_direction_time as it is redundant with the strategies solver_info["time_line_search"] = line_search_time / per_iteration; solver_info["time_constraint_set_update"] = constraint_set_update_time / per_iteration; - solver_info["time_obj_fun"] = obj_fun_time / per_iteration; for (auto &s : m_strategies) s->update_solver_info(solver_info, per_iteration); - if (m_line_search) - { - solver_info["line_search_iterations"] = m_line_search->iterations(); - - solver_info["time_checking_for_nan_inf"] = - m_line_search->checking_for_nan_inf_time / per_iteration; - solver_info["time_broad_phase_ccd"] = - m_line_search->broad_phase_ccd_time / per_iteration; - solver_info["time_ccd"] = m_line_search->ccd_time / per_iteration; - // Remove double counting - solver_info["time_classical_line_search"] = - (m_line_search->classical_line_search_time - - m_line_search->constraint_set_update_time) - / per_iteration; - solver_info["time_line_search_constraint_set_update"] = - m_line_search->constraint_set_update_time / per_iteration; - } + m_line_search->update_solver_info(solver_info, per_iteration); } - void Solver::log_times() + void Solver::log_times() const { m_logger.debug( - "[{}] grad {:.3g}s, " - "line_search {:.3g}s, constraint_set_update {:.3g}s, " - "obj_fun {:.3g}s, checking_for_nan_inf {:.3g}s, " - "broad_phase_ccd {:.3g}s, ccd {:.3g}s, " - "classical_line_search {:.3g}s", + "[{}] f: {:.2e}s, grad_f: {:.2e}s, update_direction: {:.2e}s, " + "line_search: {:.2e}s, constraint_set_update: {:.2e}s", fmt::format(fmt::fg(fmt::terminal_color::magenta), "timing"), - grad_time, line_search_time, - constraint_set_update_time + (m_line_search ? m_line_search->constraint_set_update_time : 0), - obj_fun_time, m_line_search ? m_line_search->checking_for_nan_inf_time : 0, - m_line_search ? m_line_search->broad_phase_ccd_time : 0, m_line_search ? m_line_search->ccd_time : 0, - m_line_search ? m_line_search->classical_line_search_time : 0); + obj_fun_time, grad_time, update_direction_time, line_search_time, + constraint_set_update_time); + for (auto &s : m_strategies) + s->log_times(); + if (m_line_search) + m_line_search->log_times(); } void Solver::verify_gradient(Problem &objFunc, const TVector &x, const TVector &grad) diff --git a/src/polysolve/nonlinear/Solver.hpp b/src/polysolve/nonlinear/Solver.hpp index 97c80bb3..9eda521e 100644 --- a/src/polysolve/nonlinear/Solver.hpp +++ b/src/polysolve/nonlinear/Solver.hpp @@ -145,16 +145,17 @@ namespace polysolve::nonlinear void update_solver_info(const double energy); void reset_times(); - void log_times(); + void log_times() const; json solver_info; // Timers double total_time; + double obj_fun_time; double grad_time; + double update_direction_time; double line_search_time; double constraint_set_update_time; - double obj_fun_time; // ==================================================================== // END diff --git a/src/polysolve/nonlinear/descent_strategies/DescentStrategy.hpp b/src/polysolve/nonlinear/descent_strategies/DescentStrategy.hpp index 6e3f8579..62cbb167 100644 --- a/src/polysolve/nonlinear/descent_strategies/DescentStrategy.hpp +++ b/src/polysolve/nonlinear/descent_strategies/DescentStrategy.hpp @@ -25,6 +25,7 @@ namespace polysolve::nonlinear virtual void reset(const int ndof) {} virtual void reset_times() {} virtual void update_solver_info(json &solver_info, const double per_iteration) {} + virtual void log_times() const {} virtual bool is_direction_descent() { return true; } virtual bool handle_error() { return false; } diff --git a/src/polysolve/nonlinear/descent_strategies/Newton.cpp b/src/polysolve/nonlinear/descent_strategies/Newton.cpp index 2fea6d67..e32eb69f 100644 --- a/src/polysolve/nonlinear/descent_strategies/Newton.cpp +++ b/src/polysolve/nonlinear/descent_strategies/Newton.cpp @@ -2,6 +2,8 @@ #include +#include + namespace polysolve::nonlinear { @@ -57,14 +59,14 @@ namespace polysolve::nonlinear const double characteristic_length, spdlog::logger &logger) : Superclass(solver_params, characteristic_length, logger), - is_sparse(sparse), m_characteristic_length(characteristic_length), m_residual_tolerance(residual_tolerance) + is_sparse(sparse), characteristic_length(characteristic_length), residual_tolerance(residual_tolerance) { linear_solver = polysolve::linear::Solver::create(linear_solver_params, logger); if (linear_solver->is_dense() == sparse) log_and_throw_error(logger, "Newton linear solver must be {}, instead got {}", sparse ? "sparse" : "dense", linear_solver->name()); - if (m_residual_tolerance <= 0) - log_and_throw_error(logger, "Newton residual_tolerance must be > 0, instead got {}", m_residual_tolerance); + if (residual_tolerance <= 0) + log_and_throw_error(logger, "Newton residual_tolerance must be > 0, instead got {}", residual_tolerance); } Newton::Newton( @@ -139,10 +141,10 @@ namespace polysolve::nonlinear is_sparse ? solve_sparse_linear_system(objFunc, x, grad, direction) : solve_dense_linear_system(objFunc, x, grad, direction); - if (std::isnan(residual) || residual > m_residual_tolerance * m_characteristic_length) + if (std::isnan(residual) || residual > residual_tolerance * characteristic_length) { m_logger.debug("[{}] large (or nan) linear solve residual {}>{} (||∇f||={})", - name(), residual, m_residual_tolerance * m_characteristic_length, grad.norm()); + name(), residual, residual_tolerance * characteristic_length, grad.norm()); return false; } @@ -156,8 +158,6 @@ namespace polysolve::nonlinear // ======================================================================= - // ======================================================================= - double Newton::solve_sparse_linear_system(Problem &objFunc, const TVector &x, const TVector &grad, @@ -326,6 +326,22 @@ namespace polysolve::nonlinear solver_info["time_inverting"] = inverting_time / per_iteration; } + void Newton::reset_times() + { + assembly_time = 0; + inverting_time = 0; + } + + void Newton::log_times() const + { + if (assembly_time <= 0 && inverting_time <= 0) + return; // nothing to log + m_logger.debug( + "[{}][{}] assembly: {:.2e}s; linear_solve: {:.2e}s", + fmt::format(fmt::fg(fmt::terminal_color::magenta), "timing"), + name(), assembly_time, inverting_time); + } + // ======================================================================= } // namespace polysolve::nonlinear diff --git a/src/polysolve/nonlinear/descent_strategies/Newton.hpp b/src/polysolve/nonlinear/descent_strategies/Newton.hpp index b50718a5..56cc5f11 100644 --- a/src/polysolve/nonlinear/descent_strategies/Newton.hpp +++ b/src/polysolve/nonlinear/descent_strategies/Newton.hpp @@ -47,8 +47,8 @@ namespace polysolve::nonlinear json internal_solver_info = json::array(); const bool is_sparse; - const double m_characteristic_length; - double m_residual_tolerance; + const double characteristic_length; + double residual_tolerance; std::unique_ptr linear_solver; ///< Linear solver used to solve the linear system @@ -71,12 +71,8 @@ namespace polysolve::nonlinear void reset(const int ndof) override; void update_solver_info(json &solver_info, const double per_iteration) override; - - void reset_times() override - { - assembly_time = 0; - inverting_time = 0; - } + void reset_times() override; + void log_times() const override; }; class ProjectedNewton : public Newton diff --git a/src/polysolve/nonlinear/line_search/Armijo.hpp b/src/polysolve/nonlinear/line_search/Armijo.hpp index 14ff81f4..c66fc266 100644 --- a/src/polysolve/nonlinear/line_search/Armijo.hpp +++ b/src/polysolve/nonlinear/line_search/Armijo.hpp @@ -13,7 +13,7 @@ namespace polysolve::nonlinear::line_search Armijo(const json ¶ms, spdlog::logger &logger); - virtual std::string name() override { return "Armijo"; } + virtual std::string name() const override { return "Armijo"; } protected: virtual void init_compute_descent_step_size( diff --git a/src/polysolve/nonlinear/line_search/Backtracking.hpp b/src/polysolve/nonlinear/line_search/Backtracking.hpp index 1fdebd44..78663a02 100644 --- a/src/polysolve/nonlinear/line_search/Backtracking.hpp +++ b/src/polysolve/nonlinear/line_search/Backtracking.hpp @@ -13,7 +13,7 @@ namespace polysolve::nonlinear::line_search Backtracking(const json ¶ms, spdlog::logger &logger); - virtual std::string name() override { return "Backtracking"; } + virtual std::string name() const override { return "Backtracking"; } double compute_descent_step_size( const TVector &x, diff --git a/src/polysolve/nonlinear/line_search/LineSearch.cpp b/src/polysolve/nonlinear/line_search/LineSearch.cpp index f30e5a24..119d1105 100644 --- a/src/polysolve/nonlinear/line_search/LineSearch.cpp +++ b/src/polysolve/nonlinear/line_search/LineSearch.cpp @@ -9,7 +9,7 @@ #include -#include +#include #include @@ -117,7 +117,7 @@ namespace polysolve::nonlinear::line_search } { - POLYSOLVE_SCOPED_STOPWATCH("CCD narrow-phase", ccd_time, m_logger); + POLYSOLVE_SCOPED_STOPWATCH("CCD narrow-phase", narrow_phase_ccd_time, m_logger); m_logger.trace("Performing narrow-phase CCD"); step_size = compute_collision_free_step_size(x, delta_x, objFunc, step_size); if (std::isnan(step_size)) @@ -246,4 +246,33 @@ namespace polysolve::nonlinear::line_search return step_size; } + + void LineSearch::update_solver_info(json &solver_info, const double per_iteration) + { + solver_info["line_search_iterations"] = iterations(); + solver_info["time_checking_for_nan_inf"] = checking_for_nan_inf_time / per_iteration; + solver_info["time_broad_phase_ccd"] = broad_phase_ccd_time / per_iteration; + solver_info["time_ccd"] = narrow_phase_ccd_time / per_iteration; + solver_info["time_classical_line_search"] = (classical_line_search_time - constraint_set_update_time) / per_iteration; // Remove double counting + solver_info["time_line_search_constraint_set_update"] = constraint_set_update_time / per_iteration; + } + + void LineSearch::reset_times() + { + checking_for_nan_inf_time = 0; + broad_phase_ccd_time = 0; + narrow_phase_ccd_time = 0; + constraint_set_update_time = 0; + classical_line_search_time = 0; + } + + void LineSearch::log_times() const + { + m_logger.debug( + "[{}][{}] constraint_set_update {:.2e}s, checking_for_nan_inf {:.2e}s, " + "broad_phase_ccd {:.2e}s, narrow_phase_ccd {:.2e}s, classical_line_search {:.2e}s", + fmt::format(fmt::fg(fmt::terminal_color::magenta), "timing"), + name(), constraint_set_update_time, checking_for_nan_inf_time, + broad_phase_ccd_time, narrow_phase_ccd_time, classical_line_search_time); + } } // namespace polysolve::nonlinear::line_search diff --git a/src/polysolve/nonlinear/line_search/LineSearch.hpp b/src/polysolve/nonlinear/line_search/LineSearch.hpp index e195955b..6541692b 100644 --- a/src/polysolve/nonlinear/line_search/LineSearch.hpp +++ b/src/polysolve/nonlinear/line_search/LineSearch.hpp @@ -15,6 +15,7 @@ namespace polysolve::nonlinear::line_search using Scalar = typename Problem::Scalar; using TVector = typename Problem::TVector; + public: LineSearch(const json ¶ms, spdlog::logger &m_logger); virtual ~LineSearch() = default; @@ -29,58 +30,38 @@ namespace polysolve::nonlinear::line_search static std::vector available_methods(); - void reset_times() - { - checking_for_nan_inf_time = 0; - broad_phase_ccd_time = 0; - ccd_time = 0; - constraint_set_update_time = 0; - classical_line_search_time = 0; - } + virtual std::string name() const = 0; + + void update_solver_info(json &solver_info, const double per_iteration); + void reset_times(); + void log_times() const; void set_is_final_strategy(const bool val) { is_final_strategy = val; } - inline double current_min_step_size() const + double current_min_step_size() const { return is_final_strategy ? min_step_size_final : min_step_size; } - inline int current_max_step_size_iter() const + int current_max_step_size_iter() const { return is_final_strategy ? max_step_size_iter_final : max_step_size_iter; } + int iterations() const { return cur_iter; } + double checking_for_nan_inf_time; double broad_phase_ccd_time; - double ccd_time; + double narrow_phase_ccd_time; double constraint_set_update_time; double classical_line_search_time; double use_grad_norm_tol = -1; - virtual std::string name() = 0; - - inline int iterations() const { return cur_iter; } - - private: - double min_step_size; - int max_step_size_iter; - double min_step_size_final; - int max_step_size_iter_final; - - bool is_final_strategy; - - double default_init_step_size; - protected: - int cur_iter; - spdlog::logger &m_logger; - - double step_ratio; - virtual double compute_descent_step_size( const TVector &x, const TVector &delta_x, @@ -90,6 +71,10 @@ namespace polysolve::nonlinear::line_search const TVector &old_grad, const double starting_step_size) = 0; + spdlog::logger &m_logger; + double step_ratio; + int cur_iter; + private: double compute_nan_free_step_size( const TVector &x, @@ -102,5 +87,14 @@ namespace polysolve::nonlinear::line_search const TVector &delta_x, Problem &objFunc, const double starting_step_size); + + double min_step_size; + int max_step_size_iter; + double min_step_size_final; + int max_step_size_iter_final; + + bool is_final_strategy; + + double default_init_step_size; }; } // namespace polysolve::nonlinear::line_search diff --git a/src/polysolve/nonlinear/line_search/NoLineSearch.hpp b/src/polysolve/nonlinear/line_search/NoLineSearch.hpp index f7eb1011..5f257f2e 100644 --- a/src/polysolve/nonlinear/line_search/NoLineSearch.hpp +++ b/src/polysolve/nonlinear/line_search/NoLineSearch.hpp @@ -12,7 +12,7 @@ namespace polysolve::nonlinear::line_search NoLineSearch(const json ¶ms, spdlog::logger &logger); - virtual std::string name() override { return "None"; } + virtual std::string name() const override { return "None"; } protected: double compute_descent_step_size( diff --git a/src/polysolve/nonlinear/line_search/RobustArmijo.hpp b/src/polysolve/nonlinear/line_search/RobustArmijo.hpp index 9b573165..97937fb8 100644 --- a/src/polysolve/nonlinear/line_search/RobustArmijo.hpp +++ b/src/polysolve/nonlinear/line_search/RobustArmijo.hpp @@ -14,7 +14,7 @@ namespace polysolve::nonlinear::line_search RobustArmijo(const json ¶ms, spdlog::logger &logger); - virtual std::string name() override { return "RobustArmijo"; } + virtual std::string name() const override { return "RobustArmijo"; } protected: bool criteria( From 83dc1c855b787de71e026c1dd3f857208dfb3f0a Mon Sep 17 00:00:00 2001 From: xDarkLemon Date: Tue, 12 Mar 2024 19:56:04 -0400 Subject: [PATCH 6/9] Add set_block_size and set_is_nullspace for linear sovler --- src/polysolve/linear/Solver.hpp | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/polysolve/linear/Solver.hpp b/src/polysolve/linear/Solver.hpp index deaae018..4775639e 100644 --- a/src/polysolve/linear/Solver.hpp +++ b/src/polysolve/linear/Solver.hpp @@ -107,6 +107,12 @@ namespace polysolve::linear // If solver uses dense matrices virtual bool is_dense() const { return false; } + // Set block size for multigrid solvers + virtual void set_block_size(int block_size) {} + + // If the problem is nullspace for multigrid solvers + virtual void set_is_nullspace(bool is_nullspace) {} + // // @brief { Solve the linear system Ax = b } // From 7536eb3c78804419e422906d012cc3d3eede70da Mon Sep 17 00:00:00 2001 From: xDarkLemon Date: Wed, 13 Mar 2024 20:45:49 -0400 Subject: [PATCH 7/9] Update set_is_nullspace for linear sovler --- src/polysolve/linear/Solver.hpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/polysolve/linear/Solver.hpp b/src/polysolve/linear/Solver.hpp index 4775639e..13fdedaa 100644 --- a/src/polysolve/linear/Solver.hpp +++ b/src/polysolve/linear/Solver.hpp @@ -111,7 +111,7 @@ namespace polysolve::linear virtual void set_block_size(int block_size) {} // If the problem is nullspace for multigrid solvers - virtual void set_is_nullspace(bool is_nullspace) {} + virtual void set_is_nullspace(const VectorXd &x) {} // // @brief { Solve the linear system Ax = b } From deb95e4f2128a0c58e58e2e9d6bfea189975085a Mon Sep 17 00:00:00 2001 From: Zachary Ferguson Date: Thu, 2 May 2024 10:14:20 -0400 Subject: [PATCH 8/9] Fix log string --- src/polysolve/nonlinear/descent_strategies/Newton.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/polysolve/nonlinear/descent_strategies/Newton.cpp b/src/polysolve/nonlinear/descent_strategies/Newton.cpp index e32eb69f..4dce8f5b 100644 --- a/src/polysolve/nonlinear/descent_strategies/Newton.cpp +++ b/src/polysolve/nonlinear/descent_strategies/Newton.cpp @@ -143,7 +143,7 @@ namespace polysolve::nonlinear if (std::isnan(residual) || residual > residual_tolerance * characteristic_length) { - m_logger.debug("[{}] large (or nan) linear solve residual {}>{} (||∇f||={})", + m_logger.debug("[{}] large (or nan) linear solve residual {}>{} (‖∇f‖={})", name(), residual, residual_tolerance * characteristic_length, grad.norm()); return false; From fdd346e6e4ef03d8d03c2a1be6270add190e7433 Mon Sep 17 00:00:00 2001 From: teseoch Date: Wed, 17 Jul 2024 10:15:35 -0700 Subject: [PATCH 9/9] mkl link (#77) fix mkl link --- cmake/recipes/mkl.cmake | 86 +++++++++++++---------------------------- 1 file changed, 27 insertions(+), 59 deletions(-) diff --git a/cmake/recipes/mkl.cmake b/cmake/recipes/mkl.cmake index 999b096c..814b3516 100644 --- a/cmake/recipes/mkl.cmake +++ b/cmake/recipes/mkl.cmake @@ -34,8 +34,8 @@ set_property(CACHE MKL_LINKING PROPERTY STRINGS ${MKL_LINKINK_CHOICES}) message(STATUS "MKL linking strategy: ${MKL_LINKING}") # MKL version -set(MKL_VERSION "2021.3.0" CACHE STRING "MKL version to use (2020.4 or 2021.3.0)") -set(MKL_VERSION_CHOICES 2020.4 2021.3.0) +set(MKL_VERSION "2022.2.1" CACHE STRING "MKL version to use (2022.2.1)") +set(MKL_VERSION_CHOICES 2022.2.1) set_property(CACHE MKL_VERSION PROPERTY STRINGS ${MKL_VERSION_CHOICES}) message(STATUS "MKL version: ${MKL_VERSION}") @@ -78,66 +78,34 @@ elseif(UNIX) set(MKL_PLATFORM linux-64) endif() -if(MKL_VERSION VERSION_EQUAL 2020.4) - # To compute the md5 checksums for each lib, use the following bash script (replace the target version number): - # for f in mkl mkl-include mkl-static mkl-devel; do for os in linux osx win; do cat <(printf "$f-$os-64-md5") <(conda search --override-channel --channel intel $f=2020.4 --platform $os-64 -i | grep md5 | cut -d : -f 2); done; done - set(mkl-linux-64-md5 f85891f97a04c7b2fbf619d1b903d7f5) - set(mkl-osx-64-md5 735d7f93c7fbcffe658f1ccf67418cb3) - set(mkl-win-64-md5 37ade09cace5cd73053b16574a3ee3c3) - set(mkl-include-linux-64-md5 8b2bf0e42bd95dd700d9877add1ca6de) - set(mkl-include-osx-64-md5 26043328553952cdb064c5aab8b50c78) - set(mkl-include-win-64-md5 87e9a73a6e6757a8ed0dbc87d50d7f60) - set(mkl-static-linux-64-md5 9f589a1508fb083c3e73427db459ca4c) - set(mkl-static-osx-64-md5 2f9e1b8b6d6b0903e81a573084e4494f) - set(mkl-static-win-64-md5 5ae780c06edd0be62966c6d8ab47d5fb) - set(mkl-devel-linux-64-md5 b571698ef237c0e61abe15b7d300f157) - set(mkl-devel-osx-64-md5 ee58da0463676d910eeab9aec0470f0e) - set(mkl-devel-win-64-md5 8a7736b81b9bc2d5c044b88d6ac8af6e) - - # To compute file names, we use the following bash script: - # for f in mkl mkl-include mkl-static mkl-devel; do for os in linux osx win; do cat <(printf "$f-$os-64-file") <(conda search --override-channel --channel intel $f=2020.4 --platform $os-64 -i | grep file | cut -d : -f 2); done; done - set(mkl-linux-64-file mkl-2020.4-intel_304.tar.bz2) - set(mkl-osx-64-file mkl-2020.4-intel_301.tar.bz2) - set(mkl-win-64-file mkl-2020.4-intel_311.tar.bz2) - set(mkl-include-linux-64-file mkl-include-2020.4-intel_304.tar.bz2) - set(mkl-include-osx-64-file mkl-include-2020.4-intel_301.tar.bz2) - set(mkl-include-win-64-file mkl-include-2020.4-intel_311.tar.bz2) - set(mkl-static-linux-64-file mkl-static-2020.4-intel_304.tar.bz2) - set(mkl-static-osx-64-file mkl-static-2020.4-intel_301.tar.bz2) - set(mkl-static-win-64-file mkl-static-2020.4-intel_311.tar.bz2) - set(mkl-devel-linux-64-file mkl-devel-2020.4-intel_304.tar.bz2) - set(mkl-devel-osx-64-file mkl-devel-2020.4-intel_301.tar.bz2) - set(mkl-devel-win-64-file mkl-devel-2020.4-intel_311.tar.bz2) -elseif(MKL_VERSION VERSION_EQUAL 2021.3.0) +if(MKL_VERSION VERSION_EQUAL 2022.2.1) # To compute the md5 checksums for each lib, use the following bash script (replace the target version number): # for f in mkl mkl-include mkl-static mkl-devel; do for os in linux osx win; do cat <(printf "$f-$os-64-md5") <(conda search --override-channel --channel intel $f=2021.3.0 --platform $os-64 -i | grep md5 | cut -d : -f 2); done; done - set(mkl-linux-64-md5 2501643729c00b24fddb9530b339aea7) - set(mkl-osx-64-md5 d6129ae9dfba58671667a65c160d0776) - set(mkl-win-64-md5 264213ea4c5cb6b6d81ea97f59e757ab) - set(mkl-include-linux-64-md5 70b4f9a53401a3d11ce27d7ddb0e2511) - set(mkl-include-osx-64-md5 6da50c06992b78c4127a1881d39c1804) - set(mkl-include-win-64-md5 28d785eb22d28512d4e40e5890a817dc) - set(mkl-static-linux-64-md5 1469ad60a34269d4d0c5666bc131b82a) - set(mkl-static-osx-64-md5 4a099581ba95cc50bb538598b26389e4) - set(mkl-static-win-64-md5 69aef10428893314bc486e81397e1b25) - set(mkl-devel-linux-64-md5 2432ad963e3f7e4619ffc7f896178fbe) - set(mkl-devel-osx-64-md5 61b84a60715a3855a2097a3b619a00c8) - set(mkl-devel-win-64-md5 6128dee67d2b20ff534cf54757f623e0) + # set(mkl-linux-64-md5 2501643729c00b24fddb9530b339aea7) + # set(mkl-win-64-md5 264213ea4c5cb6b6d81ea97f59e757ab) + + # set(mkl-include-linux-64-md5 70b4f9a53401a3d11ce27d7ddb0e2511) + # set(mkl-include-win-64-md5 28d785eb22d28512d4e40e5890a817dc) + + # set(mkl-static-linux-64-md5 1469ad60a34269d4d0c5666bc131b82a) + # set(mkl-static-win-64-md5 69aef10428893314bc486e81397e1b25) + + # set(mkl-devel-linux-64-md5 2432ad963e3f7e4619ffc7f896178fbe) + # set(mkl-devel-win-64-md5 6128dee67d2b20ff534cf54757f623e0) # To compute file names, we use the following bash script: # for f in mkl mkl-include mkl-static mkl-devel; do for os in linux osx win; do cat <(printf "$f-$os-64-file") <(conda search --override-channel --channel intel $f=2021.3.0 --platform $os-64 -i | grep file | cut -d : -f 2); done; done - set(mkl-linux-64-file mkl-2021.3.0-intel_520.tar.bz2) - set(mkl-osx-64-file mkl-2021.3.0-intel_517.tar.bz2) - set(mkl-win-64-file mkl-2021.3.0-intel_524.tar.bz2) - set(mkl-include-linux-64-file mkl-include-2021.3.0-intel_520.tar.bz2) - set(mkl-include-osx-64-file mkl-include-2021.3.0-intel_517.tar.bz2) - set(mkl-include-win-64-file mkl-include-2021.3.0-intel_524.tar.bz2) - set(mkl-static-linux-64-file mkl-static-2021.3.0-intel_520.tar.bz2) - set(mkl-static-osx-64-file mkl-static-2021.3.0-intel_517.tar.bz2) - set(mkl-static-win-64-file mkl-static-2021.3.0-intel_524.tar.bz2) - set(mkl-devel-linux-64-file mkl-devel-2021.3.0-intel_520.tar.bz2) - set(mkl-devel-osx-64-file mkl-devel-2021.3.0-intel_517.tar.bz2) - set(mkl-devel-win-64-file mkl-devel-2021.3.0-intel_524.tar.bz2) + set(mkl-linux-64-file mkl-2022.2.1-h6508926_16999.tar.bz2) + set(mkl-win-64-file mkl-2022.2.1-h4060db9_19760.tar.bz2) + + set(mkl-include-linux-64-file mkl-include-2022.2.1-ha957f24_16999.tar.bz2) + set(mkl-include-win-64-file mkl-include-2022.2.1-h66d3029_19760.tar.bz2) + + set(mkl-static-linux-64-file mkl-static-2022.2.1-h6508926_16999.tar.bz2) + set(mkl-static-win-64-file mkl-static-2022.2.1-h4060db9_19760.tar.bz2) + + set(mkl-devel-linux-64-file mkl-devel-2022.2.1-ha957f24_16999.tar.bz2) + set(mkl-devel-win-64-file mkl-devel-2022.2.1-h66d3029_19760.tar.bz2) endif() # On Windows, `mkl-devel` contains the .lib files (needed at link time), @@ -153,8 +121,8 @@ include(CPM) foreach(name IN ITEMS ${MKL_REMOTES}) CPMAddPackage( NAME ${name} - URL https://anaconda.org/intel/${name}/${MKL_VERSION}/download/${MKL_PLATFORM}/${${name}-${MKL_PLATFORM}-file} - URL_MD5 ${${name}-${MKL_PLATFORM}-md5} + URL https://anaconda.org/conda-forge/${name}/${MKL_VERSION}/download/${MKL_PLATFORM}/${${name}-${MKL_PLATFORM}-file} + # URL_MD5 ${${name}-${MKL_PLATFORM}-md5} ) endforeach()