File: dummy_format.py

package info (click to toggle)
llvm-toolchain-11 1%3A11.0.1-2
  • links: PTS, VCS
  • area: main
  • in suites: bullseye
  • size: 995,808 kB
  • sloc: cpp: 4,767,656; ansic: 760,916; asm: 477,436; python: 170,940; objc: 69,804; lisp: 29,914; sh: 23,855; f90: 18,173; pascal: 7,551; perl: 7,471; ml: 5,603; awk: 3,489; makefile: 2,573; xml: 915; cs: 573; fortran: 503; javascript: 452
file content (43 lines) | stat: -rw-r--r-- 1,418 bytes parent folder | download | duplicates (11)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
import os
try:
    import ConfigParser
except ImportError:
    import configparser as ConfigParser

import lit.formats
import lit.Test

class DummyFormat(lit.formats.FileBasedTest):
    def execute(self, test, lit_config):
        # In this dummy format, expect that each test file is actually just a
        # .ini format dump of the results to report.

        source_path = test.getSourcePath()

        cfg = ConfigParser.ConfigParser()
        cfg.read(source_path)

        # Create the basic test result.
        result_code = cfg.get('global', 'result_code')
        result_output = cfg.get('global', 'result_output')
        result = lit.Test.Result(getattr(lit.Test, result_code),
                                 result_output)

        if cfg.has_option('global', 'required_feature'):
            required_feature = cfg.get('global', 'required_feature')
            if required_feature:
                test.requires.append(required_feature)

        # Load additional metrics.
        for key,value_str in cfg.items('results'):
            value = eval(value_str)
            if isinstance(value, int):
                metric = lit.Test.IntMetricValue(value)
            elif isinstance(value, float):
                metric = lit.Test.RealMetricValue(value)
            else:
                raise RuntimeError("unsupported result type")
            result.addMetric(key, metric)

        return result