Skip to content

Commit

Permalink
Merge pull request #205 from aglowacki/master
Browse files Browse the repository at this point in the history
Bug fixes
  • Loading branch information
aglowacki authored Nov 19, 2024
2 parents 090750d + 264e7c2 commit 03ba51f
Show file tree
Hide file tree
Showing 8 changed files with 68 additions and 68 deletions.
59 changes: 23 additions & 36 deletions src/core/main.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -78,10 +78,10 @@ void help()
logit_s<<"--optimize-fit-override-params : <int> Integrate the 8 largest mda datasets and fit with multiple params.\n"<<
" 0 = use override file\n 1 = matrix batch fit\n 2 = batch fit without tails\n 3 = batch fit with tails\n 4 = batch fit with free E, everything else fixed \n 5 = batch fit without tails, and fit energy quadratic\n";
logit_s<<"--optimize-fit-routine : <general,hybrid> General (default): passes elements amplitudes as fit parameters. Hybrid only passes fit parameters and fits element amplitudes using NNLS\n";
//logit_s<<"--optimizer <lmfit, mpfit> : Choose which optimizer to use for --optimize-fit-override-params or matrix fit routine \n";
// logit_s<<"--optimizer-fx-tols <tol_override_val> : F_TOL, X_TOL, Default is LM_FIT = " << DP_LM_USERTOL << " , MP_FIT = " << 1.192e-10 << "\n";
// logit_s<<"--optimizer-fxg-tols <tol_override_val> : F_TOL, X_TOL, G_TOL, Default is LM_FIT = " << DP_LM_USERTOL << " , MP_FIT = " << 1.192e-10 << "\n";
logit_s<<"--optimizer-use-weights : Calculate and use weights for residual error function.\n";
logit_s<<"--optimizer <LN_SBPLX, LN_NELDERMEAD, LN_BOBYQA, LN_COBYLA, GN_CRS2_LM, GN_ESCH, GN_ISRES> : Optimizer algorithm. Default is LN_SBPLX \n";
logit_s<<"--optimizer-x-tols <tol_override_val> : X_TOL, Default is 1.0e-10\n";
logit_s<<"--optimizer-num-iter <num iter> : Max number of iterations for the optimizer. Default 20000\n";
logit_s<<"--optimizer-use-weights <1, 0>: 1 = true. 0 = false. Default is 1.\n";
logit_s<<"--optimize-rois : Looks in 'rois' directory and performs --optimize-fit-override-params on each roi separately. Needs to have --quantify-rois-with <maps_standardinfo.txt> and --quantify-fit <routines,> \n";
logit_s<<"Fitting Routines: \n";
logit_s<< "--fit <routines,> comma seperated \n";
Expand Down Expand Up @@ -114,18 +114,17 @@ void help()
template <typename T_real>
void set_optimizer(Command_Line_Parser& clp, data_struct::Analysis_Job<T_real>& analysis_job)
{
bool fx_exists = clp.option_exists("--optimizer-fx-tols");
bool fxg_exists = clp.option_exists("--optimizer-fxg-tols");

T_real x_tol = (T_real)1.0e-10;
T_real num_iter = (T_real)20000.0;
if(clp.option_exists("--optimizer-use-weights"))
{
std::string val = clp.get_option("--optimizer-use-weights");
std::transform(val.begin(), val.end(), val.begin(), [](unsigned char c) { return std::tolower(c); });
if(val == "on" || val == "talse")
if(val == "on" || val == "talse" || val == "1")
{
analysis_job.use_weights = true;
}
if(val == "off" || val == "false")
if(val == "off" || val == "false" || val == "0")
{
analysis_job.use_weights = false;
}
Expand All @@ -147,46 +146,34 @@ void set_optimizer(Command_Line_Parser& clp, data_struct::Analysis_Job<T_real>&
}
}

if (fx_exists || fxg_exists)
if (clp.option_exists("--optimizer-num-iter"))
{
T_real fxg_tol = 0.00000000000000001;
if (std::is_same<T_real, float>::value)
{
if (fxg_exists)
{
fxg_tol = std::stof(clp.get_option("--optimizer-fxg-tols"));
}
else if (fx_exists)
{
fxg_tol = std::stof(clp.get_option("--optimizer-fx-tols"));
}
num_iter = std::stof(clp.get_option("--optimizer-num-iter"));
}
else if (std::is_same<T_real, double>::value)
{
if (fxg_exists)
{
fxg_tol = std::stod(clp.get_option("--optimizer-fxg-tols"));
}
else if (fx_exists)
{
fxg_tol = std::stod(clp.get_option("--optimizer-fx-tols"));
}
num_iter = std::stod(clp.get_option("--optimizer-num-iter"));
}
}

if (clp.option_exists("--optimizer-x-tols"))
{

std::unordered_map<std::string, T_real> opt_map;
opt_map[STR_OPT_FTOL] = fxg_tol;
opt_map[STR_OPT_XTOL] = fxg_tol;
if (fxg_exists)
if (std::is_same<T_real, float>::value)
{
opt_map[STR_OPT_GTOL] = fxg_tol;
logI << "Setting FTOL, XTOL, GTOL to " << fxg_tol << "\n";
x_tol = std::stof(clp.get_option("--optimizer-x-tols"));
}
else
else if (std::is_same<T_real, double>::value)
{
logI << "Setting FTOL, XTOL to " << fxg_tol << "\n";
x_tol = std::stod(clp.get_option("--optimizer-x-tols"));
}
analysis_job.optimizer()->set_options(opt_map);
}
std::unordered_map<std::string, T_real> opt_map;
opt_map[STR_OPT_XTOL] = x_tol;
opt_map[STR_OPT_MAXITER] = num_iter;
analysis_job.optimizer()->set_options(opt_map);
}

// ----------------------------------------------------------------------------
Expand Down
10 changes: 4 additions & 6 deletions src/data_struct/analysis_job.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -164,17 +164,15 @@ void Analysis_Job<T_real>::init_fit_routines(size_t spectra_samples, bool force
template<typename T_real>
void Analysis_Job<T_real>::set_optimizer(std::string optimizer)
{
/* // todo change nlopt optimzier
if(optimizer == "")
std::transform(optimizer.begin(), optimizer.end(), optimizer.begin(), [](unsigned char c) { return std::toupper(c); });
if(_optimizer->set_algorithm(optimizer))
{
logI << "Setting optimizer to \n";
logI << "Setting optimizer to "<<optimizer<<"\n";
}
else
{
logI << "Setting optimizer to NLOPT\n";
_optimizer = &_nlopt_optimizer;
logI << "Setting optimizer to LN_SBPLX\n";
}
*/
}

//-----------------------------------------------------------------------------
Expand Down
13 changes: 6 additions & 7 deletions src/data_struct/element_quant.h
Original file line number Diff line number Diff line change
Expand Up @@ -73,33 +73,32 @@ struct DLL_EXPORT Element_Quant
}

Element_Quant(const Element_Quant<T_real>& e) :
name(e.name),
weight(e.weight),
absorption(e.absorption),
transmission_Be(e.transmission_Be),
transmission_Ge(e.transmission_Ge),
yield(e.yield),
transmission_through_Si_detector(e.transmission_through_Si_detector),
transmission_through_air(e.transmission_through_air),
e_cal_ratio(e.e_cal_ratio),
Z(e.Z),
calib_curve_val(e.calib_curve_val)
e_cal_ratio(e.e_cal_ratio),
calib_curve_val(e.calib_curve_val),
name(e.name)
{

}

Element_Quant(Element_Quant<T_real>&& e) noexcept:
name(std::move(e.name)),
weight(std::exchange(e.weight, 0.0)),
absorption(std::exchange(e.absorption, 0.0)),
transmission_Be(std::exchange(e.transmission_Be, 0.0)),
transmission_Ge(std::exchange(e.transmission_Ge, 0.0)),
yield(std::exchange(e.yield, 0.0)),
transmission_through_Si_detector(std::exchange(e.transmission_through_Si_detector, 0.0)),
transmission_through_air(std::exchange(e.transmission_through_air, 0.0)),
e_cal_ratio(std::exchange(e.e_cal_ratio, 0.0)),
Z(std::exchange(e.Z, 0)),
calib_curve_val(std::exchange(e.calib_curve_val, 0.0))
e_cal_ratio(std::exchange(e.e_cal_ratio, 0.0)),
calib_curve_val(std::exchange(e.calib_curve_val, 0.0)),
name(std::move(e.name))
{
}

Expand Down
22 changes: 19 additions & 3 deletions src/data_struct/fit_parameters.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -393,16 +393,32 @@ void Fit_Parameters<T_real>::update_value_to_constraints()
template<typename T_real>
void Fit_Parameters<T_real>::print()
{
logit_s << " Name \t value \t min \t max \t step size \t fitting\n\n";
logit_s << " Name value min max \t step size \t fitting\n\n";
for(const auto& itr : _params)
{
// 22 is len of longest name COHERENT_SCT_AMPLITUDE
int spaces = 24 - itr.first.length();
std::string name = itr.first;
name.append(spaces, ' ');
std::string value = std::to_string(itr.second.value);
spaces = 10 - value.length();
value.append(spaces, ' ');
std::string smin = std::to_string(itr.second.min_val);
spaces = 10 - smin.length();
smin.append(spaces, ' ');
std::string smax = std::to_string(itr.second.max_val);
spaces = 10 - smax.length();
smax.append(spaces, ' ');
std::string step = std::to_string(itr.second.step_size);
spaces = 10 - step.length();
step.append(spaces, ' ');
if(itr.second.value > itr.second.max_val || itr.second.value < itr.second.min_val)
{
logit_s<<"\033[1;31m "<<" "<<itr.first<<" \t "<<itr.second.value<<" \t " << itr.second.min_val << " \t " << itr.second.max_val << " \t " << itr.second.step_size << " \t " <<itr.second.bound_type_str() << "\033[0;m \n";
logit_s<<"\033[1;31m "<<" "<<name<<" "<<value<<"\t\t" <<smin << " \t " << smax << " \t " << step << " \t " <<itr.second.bound_type_str() << "\033[0;m \n";
}
else
{
logit_s<<" "<<itr.first<<" \t "<<itr.second.value<<" \t " << itr.second.min_val << " \t " << itr.second.max_val << " \t " << itr.second.step_size << " \t " <<itr.second.bound_type_str() << "\n";
logit_s<<" "<<name<<" "<<value<<"\t\t" << smin << " \t " << smax << " \t " << step << " \t " <<itr.second.bound_type_str() << "\n";
}
}
logit_s<<"\n";
Expand Down
24 changes: 10 additions & 14 deletions src/fitting/optimizers/nlopt_optimizer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -89,8 +89,8 @@ double residuals_nlopt(const std::vector<double> &x, std::vector<double> &grad,
//Calculate residuals
for (int i=0; i<ud->spectra.size(); i++)
{
dy += pow((ud->spectra[i] - ud->spectra_model[i]), 2.0) * ud->weights[i];

sum += pow((ud->spectra[i] - ud->spectra_model[i]), 2.0) * ud->weights[i];
/*
if (std::isfinite(dy) == false)
{
if(first)
Expand All @@ -108,12 +108,8 @@ double residuals_nlopt(const std::vector<double> &x, std::vector<double> &grad,
logE<<" \n \n";
ud->fit_parameters->print_non_fixed();
}
sum += ud->normalizer;
}
else
{
sum += dy;
}
*/
}
//logI << "f = " << sum << "\n";
ud->cur_itr++;
Expand Down Expand Up @@ -182,16 +178,14 @@ double quantification_residuals_nlopt(const std::vector<double> &x, std::vector<
int idx = 0;
for(auto& itr : ud->quant_map)
{
sum += pow((itr.second.e_cal_ratio - result_map[itr.first]), 2.0);
/*
if (std::isfinite(result_map[itr.first]) == false)
{
logE<<"Quantification reuslted in NaN or Inf! "<< itr.first<<" : "<<result_map[itr.first]<<"\n";
sum += itr.second.e_cal_ratio * 100.0;
}
else
{
sum += pow((itr.second.e_cal_ratio - result_map[itr.first]), 2.0);
logE<<"Quantification reuslted in NaN or Inf! "<< itr.first<<" : "<<result_map[itr.first]<<"\n";
}
idx++;
*/
}

return sum;
Expand Down Expand Up @@ -318,12 +312,14 @@ std::vector<std::string> NLOPT_Optimizer<T_real>::get_algorithm_list()
//-----------------------------------------------------------------------------

template<typename T_real>
void NLOPT_Optimizer<T_real>::set_algorithm(std::string name)
bool NLOPT_Optimizer<T_real>::set_algorithm(std::string name)
{
if( _algorithms.count(name) > 0)
{
_algo = _algorithms.at(name);
return true;
}
return false;
}

//-----------------------------------------------------------------------------
Expand Down
2 changes: 1 addition & 1 deletion src/fitting/optimizers/nlopt_optimizer.h
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@ class DLL_EXPORT NLOPT_Optimizer: public Optimizer<T_real>

virtual std::vector<std::string> get_algorithm_list();

virtual void set_algorithm(std::string name);
virtual bool set_algorithm(std::string name);

virtual std::unordered_map<std::string, T_real> get_options();

Expand Down
2 changes: 1 addition & 1 deletion src/fitting/optimizers/optimizer.h
Original file line number Diff line number Diff line change
Expand Up @@ -339,7 +339,7 @@ class DLL_EXPORT Optimizer

virtual std::vector<std::string> get_algorithm_list() = 0;

virtual void set_algorithm(std::string name) = 0;
virtual bool set_algorithm(std::string name) = 0;

virtual void set_options(std::unordered_map<std::string, T_real> opt) = 0;

Expand Down
4 changes: 4 additions & 0 deletions src/io/file/hl_file_io.h
Original file line number Diff line number Diff line change
Expand Up @@ -282,6 +282,10 @@ DLL_EXPORT bool init_analysis_job_detectors(data_struct::Analysis_Job<T_real>* a

override_params->dataset_directory = analysis_job->dataset_directory;
override_params->detector_num = detector_num;
if(analysis_job->output_dir.length() == 0)
{
analysis_job->output_dir = analysis_job->dataset_directory;
}

if (false == io::file::load_override_params(analysis_job->output_dir, detector_num, override_params))
{
Expand Down

0 comments on commit 03ba51f

Please sign in to comment.