File: tests.py

package info (click to toggle)
roc-toolkit 0.4.0%2Bdfsg-6
  • links: PTS, VCS
  • area: main
  • in suites: forky, sid
  • size: 9,700 kB
  • sloc: cpp: 102,987; ansic: 8,959; python: 6,125; sh: 942; makefile: 19; javascript: 9
file content (89 lines) | stat: -rw-r--r-- 2,565 bytes parent folder | download | duplicates (2)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
import SCons.Script
import os
import re

def _is_test_enabled(kind, testname):
    for target in [kind, testname]:
        if target in SCons.Script.COMMAND_LINE_TARGETS:
            return True

def _get_non_test_targets(env):
    if SCons.Script.COMMAND_LINE_TARGETS:
        for target in SCons.Script.COMMAND_LINE_TARGETS:
            if target == 'test' or target == 'bench':
                yield env.Dir('#')
            elif not re.match('^(test|bench)/.+', target):
                yield target
    else:
        yield env.Dir('#')

def _run_with_timeout(env, cmd, timeout):
    return '{} scripts/scons_helpers/timeout-run.py {} {}'.format(
        env.GetPythonExecutable(),
        timeout,
        cmd)

def _add_test(env, kind, name, exe, cmd, timeout):
    varname = '_{}_TARGETS'.format(kind.upper())
    testname = '{}/{}'.format(kind, name)

    if not _is_test_enabled(kind, testname):
        return

    if not cmd:
        cmd = env.File(exe).path

    if timeout is not None:
        cmd = _run_with_timeout(env, cmd, timeout)

    if kind == 'test':
        cmd += ' -b'

    comstr = env.PrettyCommand(kind.upper(), name, 'green' if kind == 'test' else 'cyan')
    target = env.Alias(testname, [], env.Action(cmd, comstr))

    # This target produces no files.
    env.AlwaysBuild(target)

    # This target depends on test executable that it should run.
    env.Depends(target, env.File(exe))

    # This target should be run after all build targets.
    for t in _get_non_test_targets(env):
        env.Requires(target, t)

    # This target should be run after all previous tests.
    for t in env[varname]:
        env.Requires(target, t)

    # Benchmarks should be run after all tests.
    if kind == 'bench':
        for t in env['_TEST_TARGETS']:
            env.Requires(target, t)
    else:
        for t in env['_BENCH_TARGETS']:
            env.Requires(t, target)

    # Add target to test list.
    env[varname] += [testname]

    # 'test' target depends on this target.
    env.Depends(kind, target)

def AddTest(env, name, exe, cmd=None, timeout=5*60):
    _add_test(env, 'test', name, exe, cmd, timeout)

def AddBench(env, name, exe, cmd=None, timeout=None):
    _add_test(env, 'bench', name, exe, cmd, timeout)

def init(env):
    env['_TEST_TARGETS'] = []
    env['_BENCH_TARGETS'] = []

    env.AlwaysBuild(env.Alias('test', [], env.Action('')))
    env.AlwaysBuild(env.Alias('bench', [], env.Action('')))

    env.Requires('bench', 'test')

    env.AddMethod(AddTest, 'AddTest')
    env.AddMethod(AddBench, 'AddBench')