-
Notifications
You must be signed in to change notification settings - Fork 47
/
plugin.py
764 lines (631 loc) · 30.8 KB
/
plugin.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
# Copyright (c) 2015, Thomas P. Robitaille
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# The code below includes code adapted from WCSAxes, which is released
# under a 3-clause BSD license and can be found here:
#
# https://github.com/astrofrog/wcsaxes
import io
import os
import json
import shutil
import hashlib
import inspect
import tempfile
import warnings
import contextlib
from pathlib import Path
from functools import wraps
from urllib.request import urlopen
import pytest
from pytest_mpl.summary.html import generate_summary_basic_html, generate_summary_html
SUPPORTED_FORMATS = {'html', 'json', 'basic-html'}
SHAPE_MISMATCH_ERROR = """Error: Image dimensions did not match.
Expected shape: {expected_shape}
{expected_path}
Actual shape: {actual_shape}
{actual_path}"""
def _download_file(baseline, filename):
# Note that baseline can be a comma-separated list of URLs that we can
# then treat as mirrors
for base_url in baseline.split(','):
try:
u = urlopen(base_url + filename)
content = u.read()
except Exception as e:
warnings.warn('Downloading {0} failed: {1}'.format(base_url + filename, e))
else:
break
else:
raise Exception("Could not download baseline image from any of the "
"available URLs")
result_dir = Path(tempfile.mkdtemp())
filename = result_dir / 'downloaded'
with open(str(filename), 'wb') as tmpfile:
tmpfile.write(content)
return Path(filename)
def _hash_file(in_stream):
"""
Hashes an already opened file.
"""
in_stream.seek(0)
buf = in_stream.read()
hasher = hashlib.sha256()
hasher.update(buf)
return hasher.hexdigest()
def pathify(path):
"""
Remove non-path safe characters.
"""
path = Path(path)
ext = ''
if path.suffixes[-1] == '.png':
ext = '.png'
path = str(path).split(ext)[0]
path = str(path)
path = path.replace('[', '_').replace(']', '_')
path = path.replace('/', '_')
if path.endswith('_'):
path = path[:-1]
return Path(path + ext)
def pytest_report_header(config, startdir):
import matplotlib
import matplotlib.ft2font
return ["Matplotlib: {0}".format(matplotlib.__version__),
"Freetype: {0}".format(matplotlib.ft2font.__freetype_version__)]
def pytest_addoption(parser):
group = parser.getgroup("matplotlib image comparison")
group.addoption('--mpl', action='store_true',
help="Enable comparison of matplotlib figures to reference files")
group.addoption('--mpl-generate-path',
help="directory to generate reference images in, relative "
"to location where py.test is run", action='store')
group.addoption('--mpl-generate-hash-library',
help="filepath to save a generated hash library, relative "
"to location where py.test is run", action='store')
group.addoption('--mpl-baseline-path',
help="directory containing baseline images, relative to "
"location where py.test is run unless --mpl-baseline-relative is given. "
"This can also be a URL or a set of comma-separated URLs (in case "
"mirrors are specified)", action='store')
group.addoption("--mpl-baseline-relative", help="interpret the baseline directory as "
"relative to the test location.", action="store_true")
group.addoption('--mpl-hash-library',
help="json library of image hashes, relative to "
"location where py.test is run", action='store')
group.addoption('--mpl-generate-summary', action='store',
help="Generate a summary report of any failed tests"
", in --mpl-results-path. The type of the report should be "
"specified. Supported types are `html`, `json` and `basic-html`. "
"Multiple types can be specified separated by commas.")
results_path_help = "directory for test results, relative to location where py.test is run"
group.addoption('--mpl-results-path', help=results_path_help, action='store')
parser.addini('mpl-results-path', help=results_path_help)
results_always_help = ("Always generate result images, not just for failed tests. "
"This option is automatically applied when generating a HTML summary.")
group.addoption('--mpl-results-always', action='store_true',
help=results_always_help)
parser.addini('mpl-results-always', help=results_always_help)
parser.addini('mpl-use-full-test-name', help="use fully qualified test name as the filename.",
type='bool')
def pytest_configure(config):
config.addinivalue_line('markers',
"mpl_image_compare: Compares matplotlib figures "
"against a baseline image")
if (config.getoption("--mpl") or
config.getoption("--mpl-generate-path") is not None or
config.getoption("--mpl-generate-hash-library") is not None):
baseline_dir = config.getoption("--mpl-baseline-path")
generate_dir = config.getoption("--mpl-generate-path")
generate_hash_lib = config.getoption("--mpl-generate-hash-library")
results_dir = config.getoption("--mpl-results-path") or config.getini("mpl-results-path")
hash_library = config.getoption("--mpl-hash-library")
generate_summary = config.getoption("--mpl-generate-summary")
results_always = (config.getoption("--mpl-results-always") or
config.getini("mpl-results-always"))
if config.getoption("--mpl-baseline-relative"):
baseline_relative_dir = config.getoption("--mpl-baseline-path")
else:
baseline_relative_dir = None
# Note that results_dir is an empty string if not specified
if not results_dir:
results_dir = None
if generate_dir is not None:
if baseline_dir is not None:
warnings.warn("Ignoring --mpl-baseline-path since --mpl-generate-path is set")
if results_dir is not None and generate_dir is not None:
warnings.warn("Ignoring --mpl-result-path since --mpl-generate-path is set")
if baseline_dir is not None and not baseline_dir.startswith(("https", "http")):
baseline_dir = os.path.abspath(baseline_dir)
if generate_dir is not None:
baseline_dir = os.path.abspath(generate_dir)
if results_dir is not None:
results_dir = os.path.abspath(results_dir)
config.pluginmanager.register(ImageComparison(config,
baseline_dir=baseline_dir,
baseline_relative_dir=baseline_relative_dir,
generate_dir=generate_dir,
results_dir=results_dir,
hash_library=hash_library,
generate_hash_library=generate_hash_lib,
generate_summary=generate_summary,
results_always=results_always))
else:
config.pluginmanager.register(FigureCloser(config))
@contextlib.contextmanager
def switch_backend(backend):
import matplotlib
import matplotlib.pyplot as plt
prev_backend = matplotlib.get_backend().lower()
if prev_backend != backend.lower():
plt.switch_backend(backend)
yield
plt.switch_backend(prev_backend)
else:
yield
def close_mpl_figure(fig):
"Close a given matplotlib Figure. Any other type of figure is ignored"
import matplotlib.pyplot as plt
from matplotlib.figure import Figure
# We only need to close actual Matplotlib figure objects. If
# we are dealing with a figure-like object that provides
# savefig but is not a real Matplotlib object, we shouldn't
# try closing it here.
if isinstance(fig, Figure):
plt.close(fig)
def get_marker(item, marker_name):
if hasattr(item, 'get_closest_marker'):
return item.get_closest_marker(marker_name)
else:
# "item.keywords.get" was deprecated in pytest 3.6
# See https://docs.pytest.org/en/latest/mark.html#updating-code
return item.keywords.get(marker_name)
def path_is_not_none(apath):
return Path(apath) if apath is not None else apath
class ImageComparison:
def __init__(self,
config,
baseline_dir=None,
baseline_relative_dir=None,
generate_dir=None,
results_dir=None,
hash_library=None,
generate_hash_library=None,
generate_summary=None,
results_always=False
):
self.config = config
self.baseline_dir = baseline_dir
self.baseline_relative_dir = path_is_not_none(baseline_relative_dir)
self.generate_dir = path_is_not_none(generate_dir)
self.results_dir = path_is_not_none(results_dir)
self.hash_library = path_is_not_none(hash_library)
self.generate_hash_library = path_is_not_none(generate_hash_library)
if generate_summary:
generate_summary = {i.lower() for i in generate_summary.split(',')}
unsupported_formats = generate_summary - SUPPORTED_FORMATS
if len(unsupported_formats) > 0:
raise ValueError(f"The mpl summary type(s) '{sorted(unsupported_formats)}' "
"are not supported.")
# Ignore `results_always` and always save result images for HTML output
if generate_summary & {'html', 'basic-html'}:
results_always = True
self.generate_summary = generate_summary
self.results_always = results_always
# Generate the containing dir for all test results
if not self.results_dir:
self.results_dir = Path(tempfile.mkdtemp(dir=self.results_dir))
self.results_dir.mkdir(parents=True, exist_ok=True)
# We need global state to store all the hashes generated over the run
self._generated_hash_library = {}
self._test_results = {}
self._test_stats = None
def get_compare(self, item):
"""
Return the mpl_image_compare marker for the given item.
"""
return get_marker(item, 'mpl_image_compare')
def generate_filename(self, item):
"""
Given a pytest item, generate the figure filename.
"""
if self.config.getini('mpl-use-full-test-name'):
filename = self.generate_test_name(item) + '.png'
else:
compare = self.get_compare(item)
# Find test name to use as plot name
filename = compare.kwargs.get('filename', None)
if filename is None:
filename = item.name + '.png'
filename = str(pathify(filename))
return filename
def generate_test_name(self, item):
"""
Generate a unique name for the hash for this test.
"""
return f"{item.module.__name__}.{item.name}"
def make_test_results_dir(self, item):
"""
Generate the directory to put the results in.
"""
test_name = pathify(self.generate_test_name(item))
results_dir = self.results_dir / test_name
results_dir.mkdir(exist_ok=True, parents=True)
return results_dir
def baseline_directory_specified(self, item):
"""
Returns `True` if a non-default baseline directory is specified.
"""
compare = self.get_compare(item)
item_baseline_dir = compare.kwargs.get('baseline_dir', None)
return item_baseline_dir or self.baseline_dir or self.baseline_relative_dir
def get_baseline_directory(self, item):
"""
Return a full path to the baseline directory, either local or remote.
Using the global and per-test configuration return the absolute
baseline dir, if the baseline file is local else return base URL.
"""
compare = self.get_compare(item)
baseline_dir = compare.kwargs.get('baseline_dir', None)
if baseline_dir is None:
if self.baseline_dir is None:
baseline_dir = Path(item.fspath).parent / 'baseline'
else:
if self.baseline_relative_dir:
# baseline dir is relative to the current test
baseline_dir = Path(item.fspath).parent / self.baseline_relative_dir
else:
# baseline dir is relative to where pytest was run
baseline_dir = self.baseline_dir
baseline_remote = (isinstance(baseline_dir, str) and # noqa
baseline_dir.startswith(('http://', 'https://')))
if not baseline_remote:
return Path(item.fspath).parent / baseline_dir
return baseline_dir
def obtain_baseline_image(self, item, target_dir):
"""
Copy the baseline image to our working directory.
If the image is remote it is downloaded, if it is local it is copied to
ensure it is kept in the event of a test failure.
"""
filename = self.generate_filename(item)
baseline_dir = self.get_baseline_directory(item)
baseline_remote = (isinstance(baseline_dir, str) and # noqa
baseline_dir.startswith(('http://', 'https://')))
if baseline_remote:
# baseline_dir can be a list of URLs when remote, so we have to
# pass base and filename to download
baseline_image = _download_file(baseline_dir, filename)
else:
baseline_image = (baseline_dir / filename).absolute()
return baseline_image
def generate_baseline_image(self, item, fig):
"""
Generate reference figures.
"""
compare = self.get_compare(item)
savefig_kwargs = compare.kwargs.get('savefig_kwargs', {})
if not os.path.exists(self.generate_dir):
os.makedirs(self.generate_dir)
fig.savefig(str((self.generate_dir / self.generate_filename(item)).absolute()),
**savefig_kwargs)
close_mpl_figure(fig)
def generate_image_hash(self, item, fig):
"""
For a `matplotlib.figure.Figure`, returns the SHA256 hash as a hexadecimal
string.
"""
compare = self.get_compare(item)
savefig_kwargs = compare.kwargs.get('savefig_kwargs', {})
imgdata = io.BytesIO()
fig.savefig(imgdata, **savefig_kwargs)
out = _hash_file(imgdata)
imgdata.close()
close_mpl_figure(fig)
return out
def compare_image_to_baseline(self, item, fig, result_dir, summary=None):
"""
Compare a test image to a baseline image.
"""
from matplotlib.image import imread
from matplotlib.testing.compare import compare_images
if summary is None:
summary = {}
compare = self.get_compare(item)
tolerance = compare.kwargs.get('tolerance', 2)
savefig_kwargs = compare.kwargs.get('savefig_kwargs', {})
baseline_image_ref = self.obtain_baseline_image(item, result_dir)
test_image = (result_dir / "result.png").absolute()
fig.savefig(str(test_image), **savefig_kwargs)
summary['result_image'] = '%EXISTS%'
if not os.path.exists(baseline_image_ref):
summary['status'] = 'failed'
error_message = ("Image file not found for comparison test in: \n\t"
f"{self.get_baseline_directory(item)}\n"
"(This is expected for new tests.)\n"
"Generated Image: \n\t"
f"{test_image}")
summary['status_msg'] = error_message
return error_message
# setuptools may put the baseline images in non-accessible places,
# copy to our tmpdir to be sure to keep them in case of failure
baseline_image = (result_dir / "baseline.png").absolute()
shutil.copyfile(baseline_image_ref, baseline_image)
summary['baseline_image'] = '%EXISTS%'
# Compare image size ourselves since the Matplotlib
# exception is a bit cryptic in this case and doesn't show
# the filenames
expected_shape = imread(str(baseline_image)).shape[:2]
actual_shape = imread(str(test_image)).shape[:2]
if expected_shape != actual_shape:
summary['status'] = 'failed'
error_message = SHAPE_MISMATCH_ERROR.format(expected_path=baseline_image,
expected_shape=expected_shape,
actual_path=test_image,
actual_shape=actual_shape)
summary['status_msg'] = error_message
return error_message
results = compare_images(str(baseline_image), str(test_image), tol=tolerance, in_decorator=True)
summary['tolerance'] = tolerance
if results is None:
summary['status'] = 'passed'
summary['status_msg'] = 'Image comparison passed.'
return None
else:
summary['status'] = 'failed'
summary['rms'] = results['rms']
summary['diff_image'] = '%EXISTS%'
template = ['Error: Image files did not match.',
'RMS Value: {rms}',
'Expected: \n {expected}',
'Actual: \n {actual}',
'Difference:\n {diff}',
'Tolerance: \n {tol}', ]
error_message = '\n '.join([line.format(**results) for line in template])
summary['status_msg'] = error_message
return error_message
def load_hash_library(self, library_path):
with open(str(library_path)) as fp:
return json.load(fp)
def compare_image_to_hash_library(self, item, fig, result_dir, summary=None):
new_test = False
hash_comparison_pass = False
baseline_image_path = None
if summary is None:
summary = {}
compare = self.get_compare(item)
savefig_kwargs = compare.kwargs.get('savefig_kwargs', {})
hash_library_filename = self.hash_library or compare.kwargs.get('hash_library', None)
hash_library_filename = (Path(item.fspath).parent / hash_library_filename).absolute()
if not Path(hash_library_filename).exists():
pytest.fail(f"Can't find hash library at path {hash_library_filename}")
hash_library = self.load_hash_library(hash_library_filename)
hash_name = self.generate_test_name(item)
test_hash = self.generate_image_hash(item, fig)
summary['result_hash'] = test_hash
if hash_name not in hash_library:
new_test = True
summary['status'] = 'failed'
error_message = (f"Hash for test '{hash_name}' not found in {hash_library_filename}. "
f"Generated hash is {test_hash}.")
summary['status_msg'] = error_message
else:
summary['baseline_hash'] = hash_library[hash_name]
# Save the figure for later summary (will be removed later if not needed)
test_image = (result_dir / "result.png").absolute()
fig.savefig(str(test_image), **savefig_kwargs)
summary['result_image'] = '%EXISTS%'
if not new_test:
if test_hash == hash_library[hash_name]:
hash_comparison_pass = True
summary['status'] = 'passed'
summary['status_msg'] = 'Test hash matches baseline hash.'
else:
error_message = (f"Hash {test_hash} doesn't match hash "
f"{hash_library[hash_name]} in library "
f"{hash_library_filename} for test {hash_name}.")
summary['status'] = 'failed'
summary['status_msg'] = 'Test hash does not match baseline hash.'
# If the compare has only been specified with hash and not baseline
# dir, don't attempt to find a baseline image at the default path.
if not hash_comparison_pass and not self.baseline_directory_specified(item) or new_test:
return error_message
# If this is not a new test try and get the baseline image.
if not new_test:
baseline_error = None
baseline_summary = {}
# Ignore Errors here as it's possible the reference image dosen't exist yet.
try:
baseline_image_path = self.obtain_baseline_image(item, result_dir)
baseline_image = baseline_image_path
if baseline_image and not baseline_image.exists():
baseline_image = None
# Get the baseline and generate a diff image, always so that
# --mpl-results-always can be respected.
baseline_comparison = self.compare_image_to_baseline(item, fig, result_dir,
summary=baseline_summary)
except Exception as e:
baseline_image = None
baseline_error = e
for k in ['baseline_image', 'diff_image', 'rms', 'tolerance', 'result_image']:
summary[k] = summary[k] or baseline_summary.get(k)
# If the hash comparison passes then return
if hash_comparison_pass:
return
if baseline_image is None:
error_message += f"\nUnable to find baseline image for {item}."
if baseline_error:
error_message += f"\n{baseline_error}"
summary['status'] = 'failed'
summary['status_msg'] = error_message
return error_message
summary['baseline_image'] = '%EXISTS%'
# Override the tolerance (if not explicitly set) to 0 as the hashes are not forgiving
tolerance = compare.kwargs.get('tolerance', None)
if not tolerance:
compare.kwargs['tolerance'] = 0
comparison_error = (baseline_comparison or
"\nHowever, the comparison to the baseline image succeeded.")
error_message = f"{error_message}\n{comparison_error}"
summary['status'] = 'failed'
summary['status_msg'] = error_message
return error_message
def pytest_runtest_setup(self, item): # noqa
compare = self.get_compare(item)
if compare is None:
return
import matplotlib.pyplot as plt
try:
from matplotlib.testing.decorators import remove_ticks_and_titles
except ImportError:
from matplotlib.testing.decorators import ImageComparisonTest as MplImageComparisonTest
remove_ticks_and_titles = MplImageComparisonTest.remove_text
style = compare.kwargs.get('style', 'classic')
remove_text = compare.kwargs.get('remove_text', False)
backend = compare.kwargs.get('backend', 'agg')
original = item.function
@wraps(item.function)
def item_function_wrapper(*args, **kwargs):
with plt.style.context(style, after_reset=True), switch_backend(backend):
# Run test and get figure object
if inspect.ismethod(original): # method
# In some cases, for example if setup_method is used,
# original appears to belong to an instance of the test
# class that is not the same as args[0], and args[0] is the
# one that has the correct attributes set up from setup_method
# so we ignore original.__self__ and use args[0] instead.
fig = original.__func__(*args, **kwargs)
else: # function
fig = original(*args, **kwargs)
if remove_text:
remove_ticks_and_titles(fig)
test_name = self.generate_test_name(item)
summary = {
'status': None,
'status_msg': None,
'baseline_image': None,
'diff_image': None,
'rms': None,
'tolerance': None,
'result_image': None,
'baseline_hash': None,
'result_hash': None,
}
# What we do now depends on whether we are generating the
# reference images or simply running the test.
if self.generate_dir is not None:
summary['status'] = 'skipped'
summary['status_msg'] = 'Skipped test, since generating image.'
self.generate_baseline_image(item, fig)
if self.generate_hash_library is None:
self._test_results[str(pathify(test_name))] = summary
pytest.skip("Skipping test, since generating image.")
if self.generate_hash_library is not None:
image_hash = self.generate_image_hash(item, fig)
self._generated_hash_library[test_name] = image_hash
summary['result_hash'] = image_hash
# Only test figures if not generating images
if self.generate_dir is None:
result_dir = self.make_test_results_dir(item)
# Compare to hash library
if self.hash_library or compare.kwargs.get('hash_library', None):
msg = self.compare_image_to_hash_library(item, fig, result_dir, summary=summary)
# Compare against a baseline if specified
else:
msg = self.compare_image_to_baseline(item, fig, result_dir, summary=summary)
close_mpl_figure(fig)
if msg is None:
if not self.results_always:
shutil.rmtree(result_dir)
for image_type in ['baseline_image', 'diff_image', 'result_image']:
summary[image_type] = None # image no longer %EXISTS%
else:
self._test_results[str(pathify(test_name))] = summary
pytest.fail(msg, pytrace=False)
close_mpl_figure(fig)
self._test_results[str(pathify(test_name))] = summary
if item.cls is not None:
setattr(item.cls, item.function.__name__, item_function_wrapper)
else:
item.obj = item_function_wrapper
def generate_summary_json(self):
json_file = self.results_dir / 'results.json'
with open(json_file, 'w') as f:
json.dump(self._test_results, f, indent=2)
return json_file
def pytest_unconfigure(self, config):
"""
Save out the hash library at the end of the run.
"""
if self.generate_hash_library is not None:
hash_library_path = Path(config.rootdir) / self.generate_hash_library
hash_library_path.parent.mkdir(parents=True, exist_ok=True)
with open(hash_library_path, "w") as fp:
json.dump(self._generated_hash_library, fp, indent=2)
if self.generate_summary:
# Generate a list of test directories
dir_list = [p.relative_to(self.results_dir)
for p in self.results_dir.iterdir() if p.is_dir()]
# Resolve image paths
for directory in dir_list:
test_name = directory.parts[-1]
for image_type, filename in [
('baseline_image', 'baseline.png'),
('diff_image', 'result-failed-diff.png'),
('result_image', 'result.png'),
]:
if self._test_results[test_name][image_type] == '%EXISTS%':
self._test_results[test_name][image_type] = str(directory / filename)
if 'json' in self.generate_summary:
summary = self.generate_summary_json()
print(f"A JSON report can be found at: {summary}")
if 'html' in self.generate_summary:
summary = generate_summary_html(self._test_results, self.results_dir)
print(f"A summary of the failed tests can be found at: {summary}")
if 'basic-html' in self.generate_summary:
summary = generate_summary_basic_html(self._test_results, self.results_dir)
print(f"A summary of the failed tests can be found at: {summary}")
class FigureCloser:
"""
This is used in place of ImageComparison when the --mpl option is not used,
to make sure that we still close figures returned by tests.
"""
def __init__(self, config):
self.config = config
def pytest_runtest_setup(self, item):
compare = get_marker(item, 'mpl_image_compare')
if compare is None:
return
original = item.function
@wraps(item.function)
def item_function_wrapper(*args, **kwargs):
if inspect.ismethod(original): # method
fig = original.__func__(*args, **kwargs)
else: # function
fig = original(*args, **kwargs)
close_mpl_figure(fig)
if item.cls is not None:
setattr(item.cls, item.function.__name__, item_function_wrapper)
else:
item.obj = item_function_wrapper