File: ipnbdoctest.py

package info (click to toggle)
seaborn 0.4.0-3
  • links: PTS, VCS
  • area: main
  • in suites: jessie, jessie-kfreebsd
  • size: 3,896 kB
  • ctags: 1,183
  • sloc: python: 8,372; makefile: 181
file content (342 lines) | stat: -rw-r--r-- 10,534 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
#!/usr/bin/env python
"""
simple example script for running and testing notebooks.

Usage: `ipnbdoctest.py foo.ipynb [bar.ipynb [...]]`

Each cell is submitted to the kernel, and the outputs are compared with those
stored in the notebook.

From https://gist.github.com/minrk/2620735

"""
from __future__ import print_function
import os
import sys
import re
import difflib
import base64

from collections import defaultdict
from io import StringIO, BytesIO
from six.moves import queue
from six import string_types

from IPython.kernel import KernelManager
from IPython.nbformat.current import reads, NotebookNode

SKIP_COMPARE = ('traceback', 'latex', 'prompt_number')
IMAGE_OUTPUTS = ('png', 'svg', 'jpeg')


def sanitize(s):
    """Sanitize a string for comparison.

    - Fix universal newlines
    - Strip trailing newlines
    - Normalize likely random values (memory addresses and UUIDs)

    """
    if not isinstance(s, string_types):
        return s

    # Formalize newline:
    s = s.replace('\r\n', '\n')

    # Ignore trailing newlines (but not space)
    s = s.rstrip('\n')

    # Normalize hex addresses:
    s = re.sub(r'0x[a-f0-9]+', '0xFFFFFFFFF', s)

    # Normalize UUIDs:
    s = re.sub(r'[a-f0-9]{8}(\-[a-f0-9]{4}){3}\-[a-f0-9]{12}', 'U-U-I-D', s)

    return s


def consolidate_outputs(outputs):
    """consolidate outputs into a summary dict (incomplete)"""
    data = defaultdict(list)
    data['stdout'] = ''
    data['stderr'] = ''

    for out in outputs:
        if out.type == 'stream':
            data[out.stream] += out.text
        elif out.type == 'pyerr':
            data['pyerr'] = dict(ename=out.ename, evalue=out.evalue)
        else:
            for key in ('png', 'svg', 'latex', 'html',
                        'javascript', 'text', 'jpeg',):
                if key in out:
                    data[key].append(out[key])
    return data


def base64_to_array(data):
    """Convert a base64 image to an array."""
    import numpy as np
    from PIL import Image
    try:
        data = StringIO(base64.b64decode(data))
    except TypeError:
        data = BytesIO(base64.b64decode(data))
    return np.array(Image.open(data)) / 255.


def image_diff(test, ref, key="image", prompt_num=None):
    """Diff two base64-encoded images."""
    if test == ref:
        return True, ""

    message = "Mismatch in %s output" % key
    if prompt_num is not None:
        message += " (#%d)" % prompt_num

    try:
        test = base64_to_array(test)
        ref = base64_to_array(ref)
        if test.shape == ref.shape:
            import numpy as np
            diff = np.abs(test - ref).mean() * 100
            # TODO hardcode tol, make configurable later
            if diff < 5:
                return True, ""
            message += ": %.3g%% difference" % diff
        else:
            message += ": Test image (%dx%d)" % test.shape[:2]
            message += "; Ref image (%dx%d)" % ref.shape[:2]
    except ImportError:
        pass
    return False, message


def compare_outputs(test, ref, prompt_num=None, skip_compare=SKIP_COMPARE):
    """Test whether the stored outputs match the execution outputs."""
    match, message = True, ""

    # Iterate through the reference output fields
    for key in ref:

        # Don't check everything
        if key in skip_compare:
            continue

        # Report when test output is missing a field
        if key not in test:
            match = False
            msg = "Mismatch: '%s' field not in test output" % key
            if prompt_num is not None:
                msg += " (#%d)" % prompt_num
            message += msg + "\n"
            continue

        # Obtain the field values
        test_value = test[key]
        ref_value = ref[key]

        # Diff images seperately
        if key in IMAGE_OUTPUTS:

            # As of June 2014, changes in IPython have broken the tests
            # vs. the reference notebooks, and testing on IPython 1
            # doesn't work as conda doesn't package it for Python 3.4.
            # To avoid rerunning the reference notebooks, I'm going to
            # skip the image diffs for the time being until I find a
            # better solution.
            continue
            mtch, msg = image_diff(test_value, ref_value, key, prompt_num)
            match = match and mtch
            message += msg

        else:

            # Clean up some randomness and check the match
            test_value = sanitize(test_value)
            ref_value = sanitize(ref_value)
            if test_value == ref_value:
                continue

            # Build a textual diff report
            match = False
            diff = difflib.context_diff(test_value.split("\n"),
                                        ref_value.split("\n"),
                                        "Test output",
                                        "Reference output",
                                        n=1, lineterm="")
            message += "Mismatch in textual output"
            if prompt_num is not None:
                message += " (#%d)\n" % prompt_num
            message += "  " + "\n  ".join(diff) + "\n"

    return match, message


def run_cell(shell, iopub, cell):
    # print cell.input
    shell.execute(cell.input)
    # wait for finish, maximum 20s
    shell.get_msg(timeout=30)
    outs = []

    while True:
        try:
            msg = iopub.get_msg(timeout=0.2)
        except queue.Empty:
            break
        msg_type = msg['msg_type']
        if msg_type in ('status', 'pyin'):
            continue
        elif msg_type == 'clear_output':
            outs = []
            continue

        content = msg['content']
        # print msg_type, content
        out = NotebookNode(output_type=msg_type)

        if msg_type == 'stream':
            out.stream = content['name']
            out.text = content['data']
        elif msg_type in ('display_data', 'pyout'):
            out['metadata'] = content['metadata']
            for mime, data in content['data'].items():
                attr = mime.split('/')[-1].lower()
                # this gets most right, but fix svg+html, plain
                attr = attr.replace('+xml', '').replace('plain', 'text')
                setattr(out, attr, data)
            if msg_type == 'pyout':
                out.prompt_number = content['execution_count']
        elif msg_type == 'pyerr':
            out.ename = content['ename']
            out.evalue = content['evalue']
            out.traceback = content['traceback']
        else:
            print("unhandled iopub msg:", msg_type)

        outs.append(out)
    return outs


def test_notebook(nb):
    """Main function to run tests at the level of one notebook."""
    # Boot up the kernel, assume inline plotting
    km = KernelManager()
    km.start_kernel(extra_arguments=["--matplotlib=inline",
                                     "--colors=NoColor"],
                    stderr=open(os.devnull, 'w'))

    # Connect, allowing for older IPythons
    try:
        kc = km.client()
        kc.start_channels()
        iopub = kc.iopub_channel
    except AttributeError:
        # IPython 0.13
        kc = km
        kc.start_channels()
        iopub = kc.sub_channel
    shell = kc.shell_channel

    # Initialize the result tracking
    successes = 0
    failures = 0
    errors = 0
    fail_messages = []
    err_messages = []

    # Iterate the notebook, testing only code cells
    for ws in nb.worksheets:
        for cell in ws.cells:
            if cell.cell_type != 'code':
                continue

            # Try and get the prompt number for easier reference
            try:
                prompt_num = cell.prompt_number
            except AttributeError:
                prompt_num = None

            # Try to execute the cell, catch errors from test execution
            try:
                outs = run_cell(shell, iopub, cell)
            except Exception as e:
                message = "Error while running cell:\n%s" % repr(e)
                err_messages.append(message)
                errors += 1
                sys.stdout.write("E")
                continue

            errored = False
            failed = False

            for out, ref in zip(outs, cell.outputs):

                # Now check for an error in the cell execution itself
                bad_error = (out.output_type == "pyerr"
                             and not ref.output_type == "pyerr")
                if bad_error:
                    message = "\nError in code cell"
                    if prompt_num is not None:
                        message = " %s (#%d)" % (message, prompt_num)
                    message = "%s:\n%s" % (message, "".join(out.traceback))
                    err_messages.append(message)
                    errored = True

                # Otherwise check whether the stored and achived outputs match
                else:
                    try:
                        match, message = compare_outputs(out, ref, prompt_num)
                        if not match:
                            failed = True
                            fail_messages.append(message)

                    except Exception as e:
                        message = "Error while comparing output:\n%s" % repr(e)
                        err_messages.append(message)
                        errors += 1
                        sys.stdout.write("E")
                        continue

            if failed:
                failures += 1
                dot = "F"
            elif errored:
                errors += 1
                dot = "E"
            else:
                successes += 1
                dot = "."
            print(dot, end="")

    print()
    print("    %3i cells successfully replicated" % successes)
    if failures:
        print("    %3i cells mismatched output" % failures)
    if errors:
        print("    %3i cells failed to complete" % errors)
    if failures:
        print("Failures:")
        print("-" * 20)
        print("\n" + "\n".join(fail_messages) + "\n")
    if errors:
        print("Errors:")
        print("-" * 20)
        print("\n" + "\n".join(err_messages) + "\n")
    kc.stop_channels()
    km.shutdown_kernel()
    del km

    return int(bool(failures + errors))

if __name__ == '__main__':

    status = 0
    for ipynb in sys.argv[1:]:
        print("testing %s" % ipynb)
        with open(ipynb) as f:
            nb = reads(f.read(), 'json')

        status += test_notebook(nb)
    sys.exit(status)