diff --git a/.flake8 b/.flake8 index 8b71cc2e..fe17d85f 100644 --- a/.flake8 +++ b/.flake8 @@ -1,3 +1,3 @@ [flake8] ignore = E401,W503,W504 -max-line-length = 80 +max-line-length = 100 diff --git a/docs/source/conf.py b/docs/source/conf.py index 2d9953f8..cb129cf7 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -89,7 +89,7 @@ # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. -language = None +language = 'en' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. diff --git a/examples/plot_swimmer.py b/examples/plot_swimmer.py index e1ace097..1c39eccb 100644 --- a/examples/plot_swimmer.py +++ b/examples/plot_swimmer.py @@ -73,7 +73,7 @@ # ``min_pts`` should be half of the number of bootstraps. nboot = 20 -min_pts = nboot / 2 +min_pts = max(nboot // 2, 1) ranks = [16] shape = corrupted.shape diff --git a/src/pyuoi/decomposition/NMF.py b/src/pyuoi/decomposition/NMF.py index 27a62285..3b49e339 100644 --- a/src/pyuoi/decomposition/NMF.py +++ b/src/pyuoi/decomposition/NMF.py @@ -119,7 +119,7 @@ def __initialize(self, **kwargs): raise ValueError('dbscan must be an instance, not a class.') self.cluster = cluster else: - self.cluster = DBSCAN(min_samples=self.n_boots / 2) + self.cluster = DBSCAN(min_samples=max(self.n_boots // 2, 1)) # initialize non-negative regression solver if nnreg is None: @@ -423,7 +423,7 @@ def __init__( # create DBSCAN solver if db_min_samples is None: - db_min_samples = n_boots / 2 + db_min_samples = max(n_boots // 2, 1) dbscan = DBSCAN(eps=db_eps, min_samples=db_min_samples, metric=db_metric, diff --git a/tests/test_mpi/test_mpi_utils.py b/tests/test_mpi/test_mpi_utils.py index 59e79857..2116b71b 100644 --- a/tests/test_mpi/test_mpi_utils.py +++ b/tests/test_mpi/test_mpi_utils.py @@ -128,4 +128,4 @@ def test_Gatherv_random_rows(): data = Gatherv_rows(data, comm, root) if rank == root: - assert(data.shape[0] == np.sum(sizes)) + assert data.shape[0] == np.sum(sizes) diff --git a/tests/test_nmf.py b/tests/test_nmf.py index 31132fa7..56bcbce7 100644 --- a/tests/test_nmf.py +++ b/tests/test_nmf.py @@ -39,7 +39,7 @@ def test_UoI_NMF_Base_initialization(): assert_array_equal(uoi.ranks, np.arange(2, ranks + 1)) assert uoi.nmf.solver == 'mu' assert uoi.nmf.beta_loss == 'kullback-leibler' - assert uoi.cluster.min_samples == n_boots / 2 + assert uoi.cluster.min_samples == max(n_boots // 2, 1) @pytest.mark.fast @@ -51,7 +51,7 @@ def test_UoI_NMF_initialization(): assert_array_equal(uoi.ranks, np.arange(2, ranks + 1)) assert uoi.nmf.solver == 'mu' assert uoi.nmf.beta_loss == 'kullback-leibler' - assert uoi.cluster.min_samples == n_boots / 2 + assert uoi.cluster.min_samples == max(n_boots // 2, 1) assert uoi.cons_meth == np.mean diff --git a/tests/test_scores.py b/tests/test_scores.py index e316b320..9c4b8684 100644 --- a/tests/test_scores.py +++ b/tests/test_scores.py @@ -101,7 +101,7 @@ def test_LinearRegressor_scoring_defaults(): support = np.ones(X.shape[1]).astype(bool) # r2 - must use test data uoi = UoI_Lasso(estimation_score='r2') - assert(uoi._estimation_target == 1) + assert uoi._estimation_target == 1 score = uoi._score_predictions('r2', fitter, X, y, support, (train_idxs, test_idxs)) @@ -111,14 +111,14 @@ def test_LinearRegressor_scoring_defaults(): fitter.predict(X_train[:, support])) # BIC - must use train data uoi = UoI_Lasso(estimation_score='BIC') - assert(uoi._estimation_target == 0) + assert uoi._estimation_target == 0 score = -1 * uoi._score_predictions('BIC', fitter, X, y, support, (train_idxs, test_idxs)) assert_equal(BIC(ll, *X_train.T.shape), score) # AIC - must use train data uoi = UoI_Lasso(estimation_score='AIC') - assert(uoi._estimation_target == 0) + assert uoi._estimation_target == 0 score = -1 * uoi._score_predictions('AIC', fitter, X, y, support, (train_idxs, test_idxs)) @@ -126,7 +126,7 @@ def test_LinearRegressor_scoring_defaults(): # AICc - must use train data uoi = UoI_Lasso(estimation_score='AICc') - assert(uoi._estimation_target == 0) + assert uoi._estimation_target == 0 score = -1 * uoi._score_predictions('AICc', fitter, X, y, support, (train_idxs, test_idxs)) @@ -158,7 +158,7 @@ def test_GeneralizedLinearRegressor_scoring_defaults(): # acc - must use test data uoi = UoI_L1Logistic(estimation_score='acc') - assert(uoi._estimation_target == 1) + assert uoi._estimation_target == 1 uoi.classes_ = np.unique(y) score = uoi._score_predictions('acc', fitter, X, y, support, (train_idxs, test_idxs)) @@ -166,7 +166,7 @@ def test_GeneralizedLinearRegressor_scoring_defaults(): # log - must use test data. Note the sign difference uoi = UoI_L1Logistic(estimation_score='log') - assert(uoi._estimation_target == 1) + assert uoi._estimation_target == 1 uoi.classes_ = np.unique(y) score = uoi._score_predictions('log', fitter, X, y, support, (train_idxs, test_idxs)) @@ -180,7 +180,7 @@ def test_GeneralizedLinearRegressor_scoring_defaults(): total_ll = ll * X_train.shape[0] # BIC - must use train data uoi = UoI_L1Logistic(estimation_score='BIC') - assert(uoi._estimation_target == 0) + assert uoi._estimation_target == 0 uoi.classes_ = np.unique(y) score = -1 * uoi._score_predictions('BIC', fitter, X, y, support, (train_idxs, test_idxs)) @@ -188,7 +188,7 @@ def test_GeneralizedLinearRegressor_scoring_defaults(): # AIC uoi = UoI_L1Logistic(estimation_score='AIC') - assert(uoi._estimation_target == 0) + assert uoi._estimation_target == 0 uoi.classes_ = np.unique(y) score = -1 * uoi._score_predictions('AIC', fitter, X, y, support, (train_idxs, test_idxs)) @@ -196,7 +196,7 @@ def test_GeneralizedLinearRegressor_scoring_defaults(): # AICc uoi = UoI_L1Logistic(estimation_score='AICc') - assert(uoi._estimation_target == 0) + assert uoi._estimation_target == 0 uoi.classes_ = np.unique(y) score = -1 * uoi._score_predictions('AICc', fitter, X, y, support, (train_idxs, test_idxs)) @@ -210,7 +210,7 @@ def test_estimation_target(): uoi = UoI_Lasso(estimation_score='r2', estimation_target='train') # train gets converted to the index 0 - assert(uoi._estimation_target == 0) + assert uoi._estimation_target == 0 # Assess BIC on test data uoi = UoI_Lasso(estimation_score='BIC', estimation_target='test') @@ -219,25 +219,25 @@ def test_estimation_target(): uoi = UoI_ElasticNet(estimation_score='r2', estimation_target='train') # train gets converted to the index 0 - assert(uoi._estimation_target == 0) + assert uoi._estimation_target == 0 # Assess BIC on test data uoi = UoI_ElasticNet(estimation_score='BIC', estimation_target='test') - assert(uoi._estimation_target == 1) + assert uoi._estimation_target == 1 uoi = UoI_L1Logistic(estimation_score='acc', estimation_target='train') - assert(uoi._estimation_target == 0) + assert uoi._estimation_target == 0 uoi = UoI_L1Logistic(estimation_score='BIC', estimation_target='test') - assert(uoi._estimation_target == 1) + assert uoi._estimation_target == 1 uoi = UoI_Poisson(estimation_score='acc', estimation_target='train') - assert(uoi._estimation_target == 0) + assert uoi._estimation_target == 0 uoi = UoI_Poisson(estimation_score='BIC', estimation_target='test') - assert(uoi._estimation_target == 1) + assert uoi._estimation_target == 1 diff --git a/tests/test_uoi_lasso.py b/tests/test_uoi_lasso.py index b9f54c3b..7ab60b44 100644 --- a/tests/test_uoi_lasso.py +++ b/tests/test_uoi_lasso.py @@ -262,10 +262,10 @@ def test_choice_of_solver(): '''Tests whether one can correctly switch between solvers in UoI Lasso''' uoi1 = UoI_Lasso(solver='cd') - assert(isinstance(uoi1._selection_lm, Lasso)) + assert isinstance(uoi1._selection_lm, Lasso) uoi2 = UoI_Lasso(solver='pyc') - assert(isinstance(uoi2._selection_lm, PycLasso)) + assert isinstance(uoi2._selection_lm, PycLasso) @pytest.mark.skipif(pycasso is not None, reason='pycasso is installed') @@ -275,7 +275,7 @@ def test_pycasso_error(): with pytest.raises(ImportError): uoi2 = UoI_Lasso(solver='pyc') - assert(isinstance(uoi2._selection_lm, PycLasso)) + assert isinstance(uoi2._selection_lm, PycLasso) @pytest.mark.skipif(pycasso is None, reason='pycasso not installed') @@ -285,11 +285,11 @@ def test_pyclasso(): pyclasso = PycLasso() # Test that we can set params correctly pyclasso.set_params(fit_intercept=True) - assert(pyclasso.fit_intercept) + assert pyclasso.fit_intercept pyclasso.set_params(max_iter=500) - assert(pyclasso.max_iter == 500) + assert pyclasso.max_iter == 500 pyclasso.set_params(alphas=np.arange(100)) - assert(np.array_equal(pyclasso.alphas, np.arange(100))) + assert np.array_equal(pyclasso.alphas, np.arange(100)) # Test that spurious parameters are rejected try: @@ -313,10 +313,10 @@ def test_pyclasso(): alphas = _alpha_grid(X, y) pyclasso.set_params(alphas=alphas) pyclasso.fit(X, y) - assert(np.array_equal(pyclasso.coef_.shape, (100, 3))) + assert np.array_equal(pyclasso.coef_.shape, (100, 3)) y_pred = pyclasso.predict(X) scores = np.array([r2_score(y, y_pred[:, j]) for j in range(100)]) - assert(np.allclose(1, max(scores))) + assert np.allclose(1, max(scores)) @pytest.mark.skipif(pycasso is None, reason='pycasso not installed')