File: run_tests.py

package info (click to toggle)
python-biopython 1.68%2Bdfsg-3~bpo8%2B1
  • links: PTS, VCS
  • area: main
  • in suites: jessie-backports
  • size: 46,856 kB
  • sloc: python: 160,306; xml: 93,216; ansic: 9,118; sql: 1,208; makefile: 155; sh: 63
file content (525 lines) | stat: -rw-r--r-- 18,930 bytes parent folder | download | duplicates (2)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
#!/usr/bin/env python
# This code is part of the Biopython distribution and governed by its
# license.  Please see the LICENSE file that should have been included
# as part of this package.
"""Run a set of PyUnit-based regression tests.

This will find all modules whose name is "test_*.py" in the test
directory, and run them.  Various command line options provide
additional facilities.

Command line options:

--help        -- show usage info
--offline     -- skip tests which require internet access
-g;--generate -- write the output file for a test instead of comparing it.
                 The name of the test to write the output for must be
                 specified.
-v;--verbose  -- run tests with higher verbosity (does not affect our
                 print-and-compare style unit tests).
<test_name>   -- supply the name of one (or more) tests to be run.
                 The .py file extension is optional.
doctest       -- run the docstring tests.

By default, all tests are run.
"""

from __future__ import print_function

# standard modules
import sys
import os
import re
import getopt
import time
import traceback
import unittest
import doctest
import distutils.util
import gc
from io import BytesIO

# Note, we want to be able to call run_tests.py BEFORE
# Biopython is installed, so we can't use this:
# from Bio._py3k import StringIO
try:
    from StringIO import StringIO  # Python 2 (byte strings)
except ImportError:
    from io import StringIO  # Python 3 (unicode strings)


def is_pypy():
    import platform
    try:
        if platform.python_implementation() == 'PyPy':
            return True
    except AttributeError:
        # New in Python 2.6, not in Jython yet either
        pass
    return False


def is_numpy():
    if is_pypy():
        return False
    try:
        import numpy
        del numpy
        return True
    except ImportError:
        return False

# The default verbosity (not verbose)
VERBOSITY = 0

# This is the list of modules containing docstring tests.
# If you develop docstring tests for other modules, please add
# those modules here. Please sort names alphabetically.
DOCTEST_MODULES = [
    "Bio.Align",
    "Bio.Align.Generic",
    "Bio.Align.Applications._Clustalw",
    "Bio.Align.Applications._ClustalOmega",
    "Bio.Align.Applications._Dialign",
    "Bio.Align.Applications._MSAProbs",
    "Bio.Align.Applications._Mafft",
    "Bio.Align.Applications._Muscle",
    "Bio.Align.Applications._Probcons",
    "Bio.Align.Applications._Prank",
    "Bio.Align.Applications._TCoffee",
    "Bio.AlignIO",
    "Bio.AlignIO.StockholmIO",
    "Bio.Alphabet",
    "Bio.Application",
    "Bio.bgzf",
    "Bio.codonalign",
    "Bio.codonalign.codonalignment",
    "Bio.codonalign.codonalphabet",
    "Bio.codonalign.codonseq",
    "Bio.Blast.Applications",
    "Bio.Emboss.Applications",
    "Bio.GenBank",
    "Bio.KEGG.Compound",
    "Bio.KEGG.Enzyme",
    "Bio.NMR.xpktools",
    "Bio.motifs",
    "Bio.motifs.applications._xxmotif",
    "Bio.pairwise2",
    "Bio.Phylo.Applications._Raxml",
    "Bio.Phylo.Consensus",
    "Bio.Phylo.BaseTree",
    "Bio.SearchIO",
    "Bio.SearchIO._model",
    "Bio.SearchIO._model.query",
    "Bio.SearchIO._model.hit",
    "Bio.SearchIO._model.hsp",
    "Bio.SearchIO.BlastIO",
    "Bio.SearchIO.HmmerIO",
    "Bio.SearchIO.FastaIO",
    "Bio.SearchIO.BlatIO",
    "Bio.SearchIO.ExonerateIO",
    "Bio.Seq",
    "Bio.SeqIO",
    "Bio.SeqIO.AceIO",
    "Bio.SeqIO.FastaIO",
    "Bio.SeqIO.IgIO",
    "Bio.SeqIO.InsdcIO",
    "Bio.SeqIO.PhdIO",
    "Bio.SeqIO.PirIO",
    "Bio.SeqIO.QualityIO",
    "Bio.SeqIO.SffIO",
    "Bio.SeqIO.TabIO",
    "Bio.SeqFeature",
    "Bio.SeqRecord",
    "Bio.SeqUtils",
    "Bio.SeqUtils.CheckSum",
    "Bio.SeqUtils.MeltingTemp",
    "Bio.Sequencing.Applications._Novoalign",
    "Bio.Sequencing.Applications._bwa",
    "Bio.Sequencing.Applications._samtools",
    "Bio.Wise",
    "Bio.Wise.psw",
]
# Silently ignore any doctests for modules requiring numpy!
if is_numpy():
    DOCTEST_MODULES.extend(["Bio.Affy.CelFile",
                            "Bio.Statistics.lowess",
                            "Bio.PDB.Polypeptide",
                            "Bio.PDB.Selection",
                            "Bio.SeqIO.PdbIO",
                            ])


try:
    import sqlite3
    del sqlite3
except ImportError:
    # Missing on Jython or Python 2.4
    DOCTEST_MODULES.remove("Bio.SeqIO")
    DOCTEST_MODULES.remove("Bio.SearchIO")

# Skip Bio.Seq doctest under Python 3, see http://bugs.python.org/issue7490
if sys.version_info[0] == 3:
    DOCTEST_MODULES.remove("Bio.Seq")


# Skip Bio.bgzf doctest for broken gzip, see http://bugs.python.org/issue17666
def _have_bug17666():
    """Debug function to check if Python's gzip is broken (PRIVATE).

    Checks for http://bugs.python.org/issue17666 expected in Python 2.7.4,
    3.2.4 and 3.3.1 only.
    """
    if os.name == 'java':
        # Jython not affected
        return False
    import gzip
    # Would like to use byte literal here:
    bgzf_eof = "\x1f\x8b\x08\x04\x00\x00\x00\x00\x00\xff\x06\x00BC" + \
               "\x02\x00\x1b\x00\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00"
    if sys.version_info[0] >= 3:
        import codecs
        bgzf_eof = codecs.latin_1_encode(bgzf_eof)[0]
    h = gzip.GzipFile(fileobj=BytesIO(bgzf_eof))
    try:
        data = h.read()
        h.close()
        assert not data, "Should be zero length, not %i" % len(data)
        return False
    except TypeError as err:
        # TypeError: integer argument expected, got 'tuple'
        h.close()
        return True
if _have_bug17666():
    DOCTEST_MODULES.remove("Bio.bgzf")

system_lang = os.environ.get('LANG', 'C')  # Cache this


def main(argv):
    """Run tests, return number of failures (integer)."""
    # insert our paths in sys.path:
    # ../build/lib.*
    # ..
    # Q. Why this order?
    # A. To find the C modules (which are in ../build/lib.*/Bio)
    # Q. Then, why ".."?
    # A. Because Martel may not be in ../build/lib.*
    test_path = sys.path[0] or "."
    source_path = os.path.abspath("%s/.." % test_path)
    sys.path.insert(1, source_path)
    build_path = os.path.abspath("%s/../build/lib.%s-%s" % (
        test_path, distutils.util.get_platform(), sys.version[:3]))
    if os.access(build_path, os.F_OK):
        sys.path.insert(1, build_path)

    # Using "export LANG=C" (which should work on Linux and similar) can
    # avoid problems detecting optional command line tools on
    # non-English OS (we may want 'command not found' in English).
    # HOWEVER, we do not want to change the default encoding which is
    # rather important on Python 3 with unicode.
    # lang = os.environ['LANG']

    # get the command line options
    try:
        opts, args = getopt.getopt(argv, 'gv', ["generate", "verbose",
                                                "doctest", "help", "offline"])
    except getopt.error as msg:
        print(msg)
        print(__doc__)
        return 2

    verbosity = VERBOSITY

    # deal with the options
    for o, a in opts:
        if o == "--help":
            print(__doc__)
            return 0
        if o == "--offline":
            print("Skipping any tests requiring internet access")
            # This is a bit of a hack...
            import requires_internet
            requires_internet.check.available = False
            # The check() function should now report internet not available
        if o == "-g" or o == "--generate":
            if len(args) > 1:
                print("Only one argument (the test name) needed for generate")
                print(__doc__)
                return 2
            elif len(args) == 0:
                print("No test name specified to generate output for.")
                print(__doc__)
                return 2
            # strip off .py if it was included
            if args[0][-3:] == ".py":
                args[0] = args[0][:-3]

            test = ComparisonTestCase(args[0])
            test.generate_output()
            return 0

        if o == "-v" or o == "--verbose":
            verbosity = 2

    # deal with the arguments, which should be names of tests to run
    for arg_num in range(len(args)):
        # strip off the .py if it was included
        if args[arg_num][-3:] == ".py":
            args[arg_num] = args[arg_num][:-3]

    print("Python version: %s" % sys.version)
    print("Operating system: %s %s" % (os.name, sys.platform))

    # run the tests
    runner = TestRunner(args, verbosity)
    return runner.run()


class ComparisonTestCase(unittest.TestCase):
    """Run a print-and-compare test and check it against expected output."""

    def __init__(self, name, output=None):
        """Initialize with the test to run.

        Arguments:
        o name - The name of the test. The expected output should be
          stored in the file output/name.
        o output - The output that was generated when this test was run.
        """
        unittest.TestCase.__init__(self)
        self.name = name
        self.output = output

    def shortDescription(self):
        return self.name

    def runTest(self):
        # check the expected output to be consistent with what
        # we generated
        outputdir = os.path.join(TestRunner.testdir, "output")
        outputfile = os.path.join(outputdir, self.name)
        try:
            if sys.version_info[0] >= 3:
                # Python 3 problem: Can't use utf8 on output/test_geo
                # due to micro (\xb5) and degrees (\xb0) symbols
                # Also universal new lines mode deprecated on Python 3
                expected = open(outputfile, encoding="latin")
            else:
                expected = open(outputfile, "rU")
        except IOError:
            self.fail("Warning: Can't open %s for test %s" %
                      (outputfile, self.name))

        self.output.seek(0)
        # first check that we are dealing with the right output
        # the first line of the output file is the test name
        expected_test = expected.readline().strip()

        if expected_test != self.name:
            expected.close()
            raise ValueError("\nOutput:   %s\nExpected: %s"
                             % (self.name, expected_test))

        # now loop through the output and compare it to the expected file
        while True:
            expected_line = expected.readline()
            output_line = self.output.readline()

            # stop looping if either of the info handles reach the end
            if not(expected_line) or not(output_line):
                # make sure both have no information left
                assert expected_line == '', "Unread: %s" % expected_line
                assert output_line == '', "Extra output: %s" % output_line
                break

            # normalize the newlines in the two lines
            expected_line = expected_line.strip("\r\n")
            output_line = output_line.strip("\r\n")

            # if the line is a doctest or PyUnit time output like:
            # Ran 2 tests in 0.285s
            # ignore it, so we don't have problems with different running times
            if re.compile("^Ran [0-9]+ tests? in ").match(expected_line):
                pass
            # otherwise make sure the two lines are the same
            elif expected_line != output_line:
                expected.close()
                raise ValueError("\nOutput  : %s\nExpected: %s"
                                 % (repr(output_line), repr(expected_line)))
        expected.close()

    def generate_output(self):
        """Generate the golden output for the specified test.
        """
        outputdir = os.path.join(TestRunner.testdir, "output")
        outputfile = os.path.join(outputdir, self.name)

        output_handle = open(outputfile, 'w')

        # write the test name as the first line of the output
        output_handle.write(self.name + "\n")

        # remember standard out so we can reset it after we are done
        save_stdout = sys.stdout
        try:
            # write the output from the test into a string
            sys.stdout = output_handle
            __import__(self.name)
        finally:
            output_handle.close()
            # return standard out to its normal setting
            sys.stdout = save_stdout


class TestRunner(unittest.TextTestRunner):

    if __name__ == '__main__':
        file = sys.argv[0]
    else:
        file = __file__
    testdir = os.path.abspath(os.path.dirname(file) or os.curdir)

    def __init__(self, tests=(), verbosity=0):
        # if no tests were specified to run, we run them all
        # including the doctests
        self.tests = tests
        if not self.tests:
            # Make a list of all applicable test modules.
            names = os.listdir(TestRunner.testdir)
            for name in names:
                if name[:5] == "test_" and name[-3:] == ".py":
                    self.tests.append(name[:-3])
            self.tests.sort()
            self.tests.append("doctest")
        if "doctest" in self.tests:
            self.tests.remove("doctest")
            self.tests.extend(DOCTEST_MODULES)
        stream = StringIO()
        unittest.TextTestRunner.__init__(self, stream,
                                         verbosity=verbosity)

    def runTest(self, name):
        from Bio import MissingExternalDependencyError
        result = self._makeResult()
        output = StringIO()
        # Restore the language and thus default encoding (in case a prior
        # test changed this, e.g. to help with detecting command line tools)
        global system_lang
        os.environ['LANG'] = system_lang
        # Always run tests from the Tests/ folder where run_tests.py
        # should be located (as we assume this with relative paths etc)
        os.chdir(self.testdir)
        try:
            stdout = sys.stdout
            sys.stdout = output
            if name.startswith("test_"):
                sys.stderr.write("%s ... " % name)
                # It's either a unittest or a print-and-compare test
                loader = unittest.TestLoader()
                suite = loader.loadTestsFromName(name)
                if hasattr(loader, "errors") and loader.errors:
                    # New in Python 3.5, don't always get an exception anymore
                    # Instead this is a list of error messages as strings
                    for msg in loader.errors:
                        if "Bio.MissingExternalDependencyError: " in msg or \
                                "Bio.MissingPythonDependencyError: " in msg:
                            # Remove the traceback etc
                            msg = msg[msg.find("Bio.Missing"):]
                            msg = msg[msg.find("Error: "):]
                            sys.stderr.write("skipping. %s\n" % msg)
                            return True
                    # Looks like a real failure
                    sys.stderr.write("loading tests failed:\n")
                    for msg in loader.errors:
                        sys.stderr.write("%s\n" % msg)
                    return False
                if suite.countTestCases() == 0:
                    # This is a print-and-compare test instead of a
                    # unittest-type test.
                    test = ComparisonTestCase(name, output)
                    suite = unittest.TestSuite([test])
            else:
                # It's a doc test
                sys.stderr.write("%s docstring test ... " % name)
                module = __import__(name, fromlist=name.split("."))
                suite = doctest.DocTestSuite(module,
                                             optionflags=doctest.ELLIPSIS)
                del module
            suite.run(result)
            if self.testdir != os.path.abspath("."):
                sys.stderr.write("FAIL\n")
                result.stream.write(result.separator1 + "\n")
                result.stream.write("ERROR: %s\n" % name)
                result.stream.write(result.separator2 + "\n")
                result.stream.write("Current directory changed\n")
                result.stream.write("Was: %s\n" % self.testdir)
                result.stream.write("Now: %s\n" % os.path.abspath("."))
                os.chdir(self.testdir)
                if not result.wasSuccessful():
                    result.printErrors()
                return False
            elif result.wasSuccessful():
                sys.stderr.write("ok\n")
                return True
            else:
                sys.stderr.write("FAIL\n")
                result.printErrors()
            return False
        except MissingExternalDependencyError as msg:
            # Seems this isn't always triggered on Python 3.5,
            # exception messages can be in loader.errors instead.
            sys.stderr.write("skipping. %s\n" % msg)
            return True
        except Exception as msg:
            # This happened during the import
            sys.stderr.write("ERROR\n")
            result.stream.write(result.separator1 + "\n")
            result.stream.write("ERROR: %s\n" % name)
            result.stream.write(result.separator2 + "\n")
            result.stream.write(traceback.format_exc())
            return False
        except KeyboardInterrupt as err:
            # Want to allow this, and abort the test
            # (see below for special case)
            raise err
        except:
            # This happens in Jython with java.lang.ClassFormatError:
            # Invalid method Code length ...
            sys.stderr.write("ERROR\n")
            result.stream.write(result.separator1 + "\n")
            result.stream.write("ERROR: %s\n" % name)
            result.stream.write(result.separator2 + "\n")
            result.stream.write(traceback.format_exc())
            return False
        finally:
            sys.stdout = stdout
            # Running under PyPy we were leaking file handles...
            gc.collect()

    def run(self):
        """Run tests, return number of failures (integer)."""
        failures = 0
        startTime = time.time()
        for test in self.tests:
            ok = self.runTest(test)
            if not ok:
                failures += 1
        total = len(self.tests)
        stopTime = time.time()
        timeTaken = stopTime - startTime
        sys.stderr.write(self.stream.getvalue())
        sys.stderr.write('-' * 70 + "\n")
        sys.stderr.write("Ran %d test%s in %.3f seconds\n" %
                         (total, total != 1 and "s" or "", timeTaken))
        sys.stderr.write("\n")
        if failures:
            sys.stderr.write("FAILED (failures = %d)\n" % failures)
        return failures


if __name__ == "__main__":
    errors = main(sys.argv[1:])
    if errors:
        # Doing a sys.exit(...) isn't nice if run from IDLE...
        sys.exit(1)