forked from yt-project/yt
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathconftest.py
332 lines (302 loc) · 11.2 KB
/
conftest.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
import os
import shutil
import tempfile
import typing
from pathlib import Path
import pytest
import yaml
from yt.config import ytcfg
from yt.utilities.answer_testing.testing_utilities import (
_compare_raw_arrays,
_hash_results,
_save_raw_arrays,
_save_result,
_streamline_for_io,
data_dir_load,
)
def pytest_addoption(parser):
"""
Lets options be passed to test functions.
"""
parser.addoption(
"--with-answer-testing",
action="store_true",
)
parser.addoption(
"--answer-store",
action="store_true",
)
parser.addoption(
"--answer-raw-arrays",
action="store_true",
)
parser.addoption(
"--raw-answer-store",
action="store_true",
)
parser.addoption(
"--force-overwrite",
action="store_true",
)
parser.addoption(
"--no-hash",
action="store_true",
)
parser.addoption("--local-dir", default=None, help="Where answers are saved.")
# Tell pytest about the local-dir option in the ini files. This
# option is used for creating the answer directory on CI
parser.addini(
"local-dir",
default=str(Path(__file__).parent / "answer-store"),
help="answer directory.",
)
parser.addini(
"test_data_dir",
default=ytcfg.get("yt", "test_data_dir"),
help="Directory where data for tests is stored.",
)
def pytest_configure(config):
r"""
Reads in the tests/tests.yaml file. This file contains a list of
each answer test's answer file (including the changeset number).
"""
ytcfg["yt", "internals", "within_pytest"] = True
# Register custom marks for answer tests and big data
config.addinivalue_line("markers", "answer_test: Run the answer tests.")
config.addinivalue_line(
"markers", "big_data: Run answer tests that require" " large data files."
)
def pytest_collection_modifyitems(config, items):
r"""
Decide which tests to skip based on command-line options.
"""
# Set up the skip marks
skip_answer = pytest.mark.skip(reason="--with-answer-testing not set.")
skip_unit = pytest.mark.skip(reason="Running answer tests, so skipping unit tests.")
skip_big = pytest.mark.skip(reason="--answer-big-data not set.")
# Loop over every collected test function
for item in items:
# If it's an answer test and the appropriate CL option hasn't
# been set, skip it
if "answer_test" in item.keywords and not config.getoption(
"--with-answer-testing"
):
item.add_marker(skip_answer)
# If it's an answer test that requires big data and the CL
# option hasn't been set, skip it
if (
"big_data" in item.keywords
and not config.getoption("--with-answer-testing")
and not config.getoption("--answer-big-data")
):
item.add_marker(skip_big)
if "answer_test" not in item.keywords and config.getoption(
"--with-answer-testing"
):
item.add_marker(skip_unit)
def _param_list(request):
r"""
Saves the non-ds, non-fixture function arguments for saving to
the answer file.
"""
# pytest treats parameterized arguments as fixtures, so there's no
# clean way to separate them out from other other fixtures (that I
# know of), so we do it explicitly
blacklist = [
"hashing",
"answer_file",
"request",
"answer_compare",
"temp_dir",
"orbit_traj",
"etc_traj",
]
test_params = {}
for key, val in request.node.funcargs.items():
if key not in blacklist:
# For plotwindow, the callback arg is a tuple and the second
# element contains a memory address, so we need to drop it.
# The first element is the callback name, which is all that's
# needed
if key == "callback":
val = val[0]
test_params[key] = str(val)
# Convert python-specific data objects (such as tuples) to a more
# io-friendly format (in order to not have python-specific anchors
# in the answer yaml file)
test_params = _streamline_for_io(test_params)
return test_params
def _get_answer_files(request):
"""
Gets the path to where the hashed and raw answers are saved.
"""
answer_file = f"{request.cls.__name__}_{request.cls.answer_version}.yaml"
raw_answer_file = f"{request.cls.__name__}_{request.cls.answer_version}.h5"
# Add the local-dir aspect of the path. If there's a command line value,
# have that override the ini file value
clLocalDir = request.config.getoption("--local-dir")
iniLocalDir = request.config.getini("local-dir")
if clLocalDir is not None:
answer_file = os.path.join(os.path.expanduser(clLocalDir), answer_file)
raw_answer_file = os.path.join(os.path.expanduser(clLocalDir), raw_answer_file)
else:
answer_file = os.path.join(os.path.expanduser(iniLocalDir), answer_file)
raw_answer_file = os.path.join(os.path.expanduser(iniLocalDir), raw_answer_file)
# Make sure we don't overwrite unless we mean to
overwrite = request.config.getoption("--force-overwrite")
storing = request.config.getoption("--answer-store")
raw_storing = request.config.getoption("--raw-answer-store")
raw = request.config.getoption("--answer-raw-arrays")
if os.path.exists(answer_file) and storing and not overwrite:
raise FileExistsError(
"Use `--force-overwrite` to overwrite an existing answer file."
)
if os.path.exists(raw_answer_file) and raw_storing and raw and not overwrite:
raise FileExistsError(
"Use `--force-overwrite` to overwrite an existing raw answer file."
)
# If we do mean to overwrite, do so here by deleting the original file
if os.path.exists(answer_file) and storing and overwrite:
os.remove(answer_file)
if os.path.exists(raw_answer_file) and raw_storing and raw and overwrite:
os.remove(raw_answer_file)
print(os.path.abspath(answer_file))
return answer_file, raw_answer_file
@pytest.fixture(scope="function")
def hashing(request):
r"""
Handles initialization, generation, and saving of answer test
result hashes.
"""
no_hash = request.config.getoption("--no-hash")
store_hash = request.config.getoption("--answer-store")
raw = request.config.getoption("--answer-raw-arrays")
raw_store = request.config.getoption("--raw-answer-store")
# This check is so that, when checking if the answer file exists in
# _get_answer_files, we don't continuously fail. With this check,
# _get_answer_files is called once per class, despite this having function
# scope
if request.cls.answer_file is None:
request.cls.answer_file, request.cls.raw_answer_file = _get_answer_files(
request
)
if not no_hash and not store_hash and request.cls.saved_hashes is None:
try:
with open(request.cls.answer_file) as fd:
request.cls.saved_hashes = yaml.safe_load(fd)
except FileNotFoundError:
# On travis and appveyor only a minimal set of answer tests are
# run, which means that, for most answer tests, there won't be
# an existing answer file when comparing. There is currently no
# list of the minimal answer tests, so they can't be marked.
# As such, if we're comparing and the file of saved hashes isn't
# found, we just skip the test. We do the skip before the test
# is run to save time
pytest.skip("Answer file not found.")
request.cls.hashes = {}
# Load the saved answers if we're comparing. We don't do this for the raw
# answers because those are huge
yield
# Get arguments and their values passed to the test (e.g., axis, field, etc.)
params = _param_list(request)
# Hash the test results. Don't save to request.cls.hashes so we still have
# raw data, in case we want to work with that
hashes = _hash_results(request.cls.hashes)
# Add the other test parameters
hashes.update(params)
# Add the function name as the "master" key to the hashes dict
hashes = {request.node.name: hashes}
# Save hashes
if not no_hash and store_hash:
_save_result(hashes, request.cls.answer_file)
# Compare hashes
elif not no_hash and not store_hash:
try:
assert hashes == request.cls.saved_hashes
except AssertionError:
pytest.fail(f"Comparison failure: {request.node.name}", pytrace=False)
# Save raw data
if raw and raw_store:
_save_raw_arrays(
request.cls.hashes, request.cls.raw_answer_file, request.node.name
)
# Compare raw data. This is done one test at a time because the
# arrays can get quite large and storing everything in memory would
# be bad
if raw and not raw_store:
_compare_raw_arrays(
request.cls.hashes, request.cls.raw_answer_file, request.node.name
)
@pytest.fixture(scope="function")
def temp_dir():
r"""
Creates a temporary directory needed by certain tests.
"""
curdir = os.getcwd()
if int(os.environ.get("GENERATE_YTDATA", 0)):
tmpdir = os.getcwd()
else:
tmpdir = tempfile.mkdtemp()
os.chdir(tmpdir)
yield tmpdir
os.chdir(curdir)
if tmpdir != curdir:
shutil.rmtree(tmpdir)
@pytest.fixture(scope="class")
def ds(request):
# data_dir_load can take the cls, args, and kwargs. These optional
# arguments, if present, are given in a dictionary as the second
# element of the list
if isinstance(request.param, typing.Sequence):
ds_fn, opts = request.param
else:
ds_fn = request.param
opts = {}
try:
return data_dir_load(
ds_fn, cls=opts.get("cls"), args=opts.get("args"), kwargs=opts.get("kwargs")
)
except FileNotFoundError:
return pytest.skip(f"Data file: `{request.param}` not found.")
@pytest.fixture(scope="class")
def field(request):
"""
Fixture for returning the field. Needed because indirect=True is
used for loading the datasets.
"""
return request.param
@pytest.fixture(scope="class")
def dobj(request):
"""
Fixture for returning the ds_obj. Needed because indirect=True is
used for loading the datasets.
"""
return request.param
@pytest.fixture(scope="class")
def axis(request):
"""
Fixture for returning the axis. Needed because indirect=True is
used for loading the datasets.
"""
return request.param
@pytest.fixture(scope="class")
def weight(request):
"""
Fixture for returning the weight_field. Needed because
indirect=True is used for loading the datasets.
"""
return request.param
@pytest.fixture(scope="class")
def ds_repr(request):
"""
Fixture for returning the string representation of a dataset.
Needed because indirect=True is used for loading the datasets.
"""
return request.param
@pytest.fixture(scope="class")
def Npart(request):
"""
Fixture for returning the number of particles in a dataset.
Needed because indirect=True is used for loading the datasets.
"""
return request.param