forked from mne-tools/mne-python
-
Notifications
You must be signed in to change notification settings - Fork 1
/
.travis.yml
154 lines (148 loc) · 7.04 KB
/
.travis.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
language: python
# Use container-based infrastructure
sudo: false
env:
# Enable python 2 and python 3 builds
# DEPS=full: build optional dependencies: pandas, nitime, statsmodels,
# scikit-learn, patsy, nibabel pillow;
# in the case of Python 2, also mayavi, traits, pysurfer
# DEPS=minimal: don't build optional dependencies; tests that require those
# dependencies are supposed to be skipped
#
# Note that we don't run coverage on Py3k anyway because it slows our tests
# by a factor of 2 (!), so we make this our "from install dir" run.
#
# If we change the old-version run to be a different Python version
# from 2.6, then we need to update mne.utils.clean_warning_registry.
#
# Run one test (3.5) with a non-default stim channel to make sure our
# tests are explicit about channels.
#
# Must force libpng version to avoid silly libpng.so.15 error (MPL 1.1 needs it)
#
# Conda currently has packaging bug with mayavi/traits/numpy where 1.10 can't be used
# but breaks sklearn on install; hopefully eventually the NUMPY=1.9 on 2.7 full can be removed
- PYTHON=2.7 DEPS=full TEST_LOCATION=src NUMPY="=1.9" SCIPY="=0.17"
- PYTHON=2.7 DEPS=nodata TEST_LOCATION=src MNE_DONTWRITE_HOME=true MNE_FORCE_SERIAL=true MNE_SKIP_NETWORK_TEST=1 # also runs flake8
- PYTHON=3.5 DEPS=full TEST_LOCATION=install MNE_STIM_CHANNEL=STI101
- PYTHON=2.6 DEPS=full TEST_LOCATION=src NUMPY="=1.7" SCIPY="=0.11" MPL="=1.1" LIBPNG="=1.5" SKLEARN="=0.11" PANDAS="=0.8"
- PYTHON=2.7 DEPS=minimal TEST_LOCATION=src
# Setup anaconda
before_install:
- wget -q http://repo.continuum.io/miniconda/Miniconda-latest-Linux-x86_64.sh -O miniconda.sh
- chmod +x miniconda.sh
- ./miniconda.sh -b -p /home/travis/miniconda
- export PATH=/home/travis/miniconda/bin:$PATH
- conda update --yes --quiet conda
# We need to create a (fake) display on Travis (allows Mayavi tests to run)
- export DISPLAY=:99.0
- /sbin/start-stop-daemon --start --quiet --pidfile /tmp/custom_xvfb_99.pid --make-pidfile --background --exec /usr/bin/Xvfb -- :99 -screen 0 1400x900x24 -ac +extension GLX +render -noreset
install:
- conda create -n testenv --yes pip python=$PYTHON
- source activate testenv
- ENSURE_PACKAGES="numpy$NUMPY scipy$SCIPY matplotlib$MPL libpng$LIBPNG"
- conda install --yes --quiet $ENSURE_PACKAGES nose coverage
# We have to replicate e.g. numpy$NUMPY to ensure the recommended (higher) versions
# are not automatically installed below with multiple "conda install" calls!
- if [ "${DEPS}" == "full" ]; then
curl http://lester.ilabs.uw.edu/files/minimal_cmds.tar.gz | tar xz;
export MNE_ROOT="${PWD}/minimal_cmds";
export NEUROMAG2FT_ROOT="${PWD}/minimal_cmds/bin";
source ${MNE_ROOT}/bin/mne_setup_sh;
conda install --yes --quiet $ENSURE_PACKAGES pandas$PANDAS scikit-learn$SKLEARN patsy h5py pillow;
pip install -q joblib nibabel;
if [ "${PYTHON}" == "3.5" ]; then
conda install --yes --quiet $ENSURE_PACKAGES ipython;
else
conda install --yes --quiet $ENSURE_PACKAGES ipython==1.1.0 statsmodels pandas$PANDAS;
pip install -q nitime;
if [ "${PYTHON}" == "2.7" ]; then
conda install --yes --quiet $ENSURE_PACKAGES mayavi traits;
pip install -q pysurfer faulthandler;
fi;
fi;
fi;
- if [ "${DEPS}" == "nodata" ]; then
pip install -q flake8;
wget -q https://github.com/lucasdemarchi/codespell/archive/v1.8.tar.gz;
tar xzf v1.8.tar.gz;
cp codespell-1.8/codespell.py ~/miniconda/envs/testenv/bin;
rm v1.8.tar.gz;
rm -r codespell-1.8;
fi;
- pip install -q coveralls nose-timer
# check our versions for the major packages
- NP_VERSION=`python -c 'import numpy; print(numpy.__version__)'`
- if [ -n "$NUMPY" ] && [ "${NUMPY:(-3)}" != "${NP_VERSION::3}" ]; then
echo "Incorrect numpy version $NP_VERSION";
exit 1;
fi;
- SP_VERSION=`python -c 'import scipy; print(scipy.__version__)'`
- if [ -n "$SCIPY" ] && [ "${SCIPY:(-4)}" != "${SP_VERSION::4}" ]; then
echo "Incorrect scipy version $SP_VERSION";
exit 1;
fi;
- MPL_VERSION=`python -c 'import matplotlib; print(matplotlib.__version__)'`
- if [ -n "$MPL" ] && [ "${MPL:(-3)}" != "${MPL_VERSION::3}" ]; then
echo "Incorrect matplotlib version $MPL_VERSION";
exit 1;
fi;
# Suppress the parallel outputs for logging cleanliness
- export MNE_LOGGING_LEVEL=warning
- python setup.py build
- python setup.py install
- myscripts='browse_raw bti2fiff surf2bem'
- for script in $myscripts; do mne $script --help; done;
- SRC_DIR=$(pwd)
- cd ~
# Trigger download of testing data. Note that
# the testing dataset has been constructed to contain the necessary
# files to act as a FREESURFER_HOME for the coreg tests
- if [ "${DEPS}" != "nodata" ]; then
python -c 'import mne; mne.datasets.testing.data_path(verbose=True)';
if [ "${DEPS}" == "full" ]; then
export FREESURFER_HOME=$(python -c 'import mne; print(mne.datasets.testing.data_path())');
fi;
else
export MNE_SKIP_TESTING_DATASET_TESTS=true;
fi;
- MNE_DIR=$(python -c 'import mne;print(mne.__path__[0])')
# We run two versions: one out of the source directory (that makes
# coveralls coverage work), and one out of the install directory (that
# ensures we have included all necessary files).
- if [ "${TEST_LOCATION}" == "install" ]; then
ln -s ${SRC_DIR}/mne/io/tests/data ${MNE_DIR}/io/tests/data;
ln -s ${SRC_DIR}/mne/io/bti/tests/data ${MNE_DIR}/io/bti/tests/data;
ln -s ${SRC_DIR}/mne/io/edf/tests/data ${MNE_DIR}/io/edf/tests/data;
ln -s ${SRC_DIR}/mne/io/kit/tests/data ${MNE_DIR}/io/kit/tests/data;
ln -s ${SRC_DIR}/mne/io/brainvision/tests/data ${MNE_DIR}/io/brainvision/tests/data;
ln -s ${SRC_DIR}/mne/io/egi/tests/data ${MNE_DIR}/io/egi/tests/data;
ln -s ${SRC_DIR}/mne/io/nicolet/tests/data ${MNE_DIR}/io/nicolet/tests/data;
ln -s ${SRC_DIR}/mne/preprocessing/tests/data ${MNE_DIR}/preprocessing/tests/data;
ln -s ${SRC_DIR}/setup.cfg ${MNE_DIR}/../setup.cfg;
ln -s ${SRC_DIR}/.coveragerc ${MNE_DIR}/../.coveragerc;
cd ${MNE_DIR}/../;
else
cd ${SRC_DIR};
fi;
- if [ "${PYTHON}" != "3.5" ]; then
COVERAGE=--with-coverage;
else
COVERAGE=;
fi;
script:
- nosetests -a '!ultra_slow_test' --with-timer --timer-top-n 30 --verbosity=2 $COVERAGE
- if [ "${DEPS}" == "nodata" ]; then
make flake;
fi;
- if [ "${DEPS}" == "nodata" ]; then
make codespell-error;
fi;
after_success:
# Need to run from source dir to exectue "git" commands
# Coverage not collected for 3.5, so don't report it
- if [ "${TEST_LOCATION}" == "src" ] && [ "${PYTHON}" != "3.5" ]; then
echo "Running coveralls";
cd ${SRC_DIR};
coveralls;
fi;