Skip to content

Commit

Permalink
Set PartialPivLU decomposition as the default policy and relaxed the …
Browse files Browse the repository at this point in the history
…unit tests accordingly

This makes the learning process a lot faster.
If compiled with -fopenmp (gcc/clang) or /openmp (VS), it will additionally run in parallel.
  • Loading branch information
patrikhuber committed Apr 30, 2015
1 parent c7ef596 commit 02b4a47
Show file tree
Hide file tree
Showing 3 changed files with 6 additions and 6 deletions.
2 changes: 1 addition & 1 deletion include/superviseddescent/regressors.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -295,7 +295,7 @@ class ColPivHouseholderQRSolver
* Works with multi-dimensional label data. In that case, the coefficients for
* each label will be learned independently.
*/
template<class Solver = ColPivHouseholderQRSolver>
template<class Solver = PartialPivLUSolver>
class LinearRegressor : public Regressor
{

Expand Down
4 changes: 2 additions & 2 deletions test/test_LinearRegressorND.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -181,7 +181,7 @@ TEST(LinearRegressor, NDimManyExamplesNDimYRegularisation) {
bool isInvertible = lr.learn(data, labels);
EXPECT_EQ(true, isInvertible);
EXPECT_FLOAT_EQ(0.282755911f, lr.x.at<float>(0, 0)) << "Expected the learned x_0_0 to be different"; // Every col is a learned regressor for a label
EXPECT_FLOAT_EQ(0.0360795595f, lr.x.at<float>(1, 0)) << "Expected the learned x_1_0 to be different";
EXPECT_NEAR(0.03607957f, lr.x.at<float>(1, 0), 0.00000002) << "Expected the learned x_1_0 to be different";
EXPECT_FLOAT_EQ(0.291039944f, lr.x.at<float>(2, 0)) << "Expected the learned x_2_0 to be different";
EXPECT_NEAR(-0.0989616f, lr.x.at<float>(0, 1), 0.0000001) << "Expected the learned x_0_1 to be different";
EXPECT_FLOAT_EQ(0.330635577f, lr.x.at<float>(1, 1)) << "Expected the learned x_1_1 to be different";
Expand Down Expand Up @@ -235,7 +235,7 @@ TEST(LinearRegressor, NDimManyExamplesNDimYBiasRegularisation) {
bool isInvertible = lr.learn(data, labels);
EXPECT_EQ(true, isInvertible);
EXPECT_NEAR(0.2814246f, lr.x.at<float>(0, 0), 0.0000002) << "Expected the learned x_0_0 to be different"; // Every col is a learned regressor for a label
EXPECT_FLOAT_EQ(0.0331765190f, lr.x.at<float>(1, 0)) << "Expected the learned x_1_0 to be different";
EXPECT_NEAR(0.03317654f, lr.x.at<float>(1, 0), 00000003) << "Expected the learned x_1_0 to be different";
EXPECT_FLOAT_EQ(0.289116770f, lr.x.at<float>(2, 0)) << "Expected the learned x_2_0 to be different";
EXPECT_FLOAT_EQ(0.0320090912f, lr.x.at<float>(3, 0)) << "Expected the learned x_3_0 to be different";
EXPECT_NEAR(-0.1005448f, lr.x.at<float>(0, 1), 0.0000001) << "Expected the learned x_0_1 to be different";
Expand Down
6 changes: 3 additions & 3 deletions test/test_SupervisedDescentOptimiser.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -133,7 +133,7 @@ TEST(SupervisedDescentOptimiser, SinConvergenceCascade) {
// Make sure the training converges, i.e. the residual is correct on the training data:
Mat predictions = sdo.test(x0, y_tr, h);
double trainingResidual = normalisedLeastSquaresResidual(predictions, x_tr);
EXPECT_NEAR(0.040279395, trainingResidual, 0.000000001);
EXPECT_NEAR(0.040279395, trainingResidual, 0.00000008);

// Test the trained model:
// Test data with finer resolution:
Expand All @@ -153,7 +153,7 @@ TEST(SupervisedDescentOptimiser, SinConvergenceCascade) {

predictions = sdo.test(x0_ts, y_ts, h);
double testResidual = normalisedLeastSquaresResidual(predictions, x_ts_gt);
ASSERT_NEAR(0.026156775, testResidual, 0.000000004);
ASSERT_NEAR(0.026156775, testResidual, 0.00000005);
}

TEST(SupervisedDescentOptimiser, XCubeConvergence) {
Expand Down Expand Up @@ -420,7 +420,7 @@ TEST(SupervisedDescentOptimiser, ExpConvergence) {

predictions = sdo.test(x0_ts, y_ts, h);
double testResidual = normalisedLeastSquaresResidual(predictions, x_ts_gt);
ASSERT_NEAR(0.1924569501, testResidual, 0.0000000005);
ASSERT_NEAR(0.1924569501, testResidual, 0.000000006);
}

TEST(SupervisedDescentOptimiser, ExpConvergenceCascade) {
Expand Down

0 comments on commit 02b4a47

Please sign in to comment.