From 02b4a4717e9fa1b22057054a482f8c41bb0f8be2 Mon Sep 17 00:00:00 2001 From: Patrik Huber Date: Thu, 30 Apr 2015 10:46:44 +0100 Subject: [PATCH] Set PartialPivLU decomposition as the default policy and relaxed the unit tests accordingly This makes the learning process a lot faster. If compiled with -fopenmp (gcc/clang) or /openmp (VS), it will additionally run in parallel. --- include/superviseddescent/regressors.hpp | 2 +- test/test_LinearRegressorND.cpp | 4 ++-- test/test_SupervisedDescentOptimiser.cpp | 6 +++--- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/include/superviseddescent/regressors.hpp b/include/superviseddescent/regressors.hpp index dc325fe..784e6df 100644 --- a/include/superviseddescent/regressors.hpp +++ b/include/superviseddescent/regressors.hpp @@ -295,7 +295,7 @@ class ColPivHouseholderQRSolver * Works with multi-dimensional label data. In that case, the coefficients for * each label will be learned independently. */ -template +template class LinearRegressor : public Regressor { diff --git a/test/test_LinearRegressorND.cpp b/test/test_LinearRegressorND.cpp index 7d25c77..fbd90ad 100644 --- a/test/test_LinearRegressorND.cpp +++ b/test/test_LinearRegressorND.cpp @@ -181,7 +181,7 @@ TEST(LinearRegressor, NDimManyExamplesNDimYRegularisation) { bool isInvertible = lr.learn(data, labels); EXPECT_EQ(true, isInvertible); EXPECT_FLOAT_EQ(0.282755911f, lr.x.at(0, 0)) << "Expected the learned x_0_0 to be different"; // Every col is a learned regressor for a label - EXPECT_FLOAT_EQ(0.0360795595f, lr.x.at(1, 0)) << "Expected the learned x_1_0 to be different"; + EXPECT_NEAR(0.03607957f, lr.x.at(1, 0), 0.00000002) << "Expected the learned x_1_0 to be different"; EXPECT_FLOAT_EQ(0.291039944f, lr.x.at(2, 0)) << "Expected the learned x_2_0 to be different"; EXPECT_NEAR(-0.0989616f, lr.x.at(0, 1), 0.0000001) << "Expected the learned x_0_1 to be different"; EXPECT_FLOAT_EQ(0.330635577f, lr.x.at(1, 1)) << "Expected the learned x_1_1 to be different"; @@ -235,7 +235,7 @@ TEST(LinearRegressor, NDimManyExamplesNDimYBiasRegularisation) { bool isInvertible = lr.learn(data, labels); EXPECT_EQ(true, isInvertible); EXPECT_NEAR(0.2814246f, lr.x.at(0, 0), 0.0000002) << "Expected the learned x_0_0 to be different"; // Every col is a learned regressor for a label - EXPECT_FLOAT_EQ(0.0331765190f, lr.x.at(1, 0)) << "Expected the learned x_1_0 to be different"; + EXPECT_NEAR(0.03317654f, lr.x.at(1, 0), 00000003) << "Expected the learned x_1_0 to be different"; EXPECT_FLOAT_EQ(0.289116770f, lr.x.at(2, 0)) << "Expected the learned x_2_0 to be different"; EXPECT_FLOAT_EQ(0.0320090912f, lr.x.at(3, 0)) << "Expected the learned x_3_0 to be different"; EXPECT_NEAR(-0.1005448f, lr.x.at(0, 1), 0.0000001) << "Expected the learned x_0_1 to be different"; diff --git a/test/test_SupervisedDescentOptimiser.cpp b/test/test_SupervisedDescentOptimiser.cpp index 15c68d6..53e32f2 100644 --- a/test/test_SupervisedDescentOptimiser.cpp +++ b/test/test_SupervisedDescentOptimiser.cpp @@ -133,7 +133,7 @@ TEST(SupervisedDescentOptimiser, SinConvergenceCascade) { // Make sure the training converges, i.e. the residual is correct on the training data: Mat predictions = sdo.test(x0, y_tr, h); double trainingResidual = normalisedLeastSquaresResidual(predictions, x_tr); - EXPECT_NEAR(0.040279395, trainingResidual, 0.000000001); + EXPECT_NEAR(0.040279395, trainingResidual, 0.00000008); // Test the trained model: // Test data with finer resolution: @@ -153,7 +153,7 @@ TEST(SupervisedDescentOptimiser, SinConvergenceCascade) { predictions = sdo.test(x0_ts, y_ts, h); double testResidual = normalisedLeastSquaresResidual(predictions, x_ts_gt); - ASSERT_NEAR(0.026156775, testResidual, 0.000000004); + ASSERT_NEAR(0.026156775, testResidual, 0.00000005); } TEST(SupervisedDescentOptimiser, XCubeConvergence) { @@ -420,7 +420,7 @@ TEST(SupervisedDescentOptimiser, ExpConvergence) { predictions = sdo.test(x0_ts, y_ts, h); double testResidual = normalisedLeastSquaresResidual(predictions, x_ts_gt); - ASSERT_NEAR(0.1924569501, testResidual, 0.0000000005); + ASSERT_NEAR(0.1924569501, testResidual, 0.000000006); } TEST(SupervisedDescentOptimiser, ExpConvergenceCascade) {