File: dummy_format.py

package info (click to toggle)
llvm-toolchain-20 1%3A20.1.6-1~exp1
  • links: PTS, VCS
  • area: main
  • in suites: experimental
  • size: 2,111,304 kB
  • sloc: cpp: 7,438,677; ansic: 1,393,822; asm: 1,012,926; python: 241,650; f90: 86,635; objc: 75,479; lisp: 42,144; pascal: 17,286; sh: 10,027; ml: 5,082; perl: 4,730; awk: 3,523; makefile: 3,349; javascript: 2,251; xml: 892; fortran: 672
file content (53 lines) | stat: -rw-r--r-- 1,907 bytes parent folder | download | duplicates (10)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
import os

try:
    import ConfigParser
except ImportError:
    import configparser as ConfigParser

import lit.formats
import lit.Test


class DummyFormat(lit.formats.FileBasedTest):
    def execute(self, test, lit_config):
        # In this dummy format, expect that each test file is actually just a
        # .ini format dump of the results to report.

        source_path = test.getSourcePath()

        cfg = ConfigParser.ConfigParser()
        cfg.read(source_path)

        # Create the basic test result.
        result_code = cfg.get("global", "result_code")
        result_output = cfg.get("global", "result_output")
        result = lit.Test.Result(getattr(lit.Test, result_code), result_output)

        # Load additional metrics.
        for key, value_str in cfg.items("results"):
            value = eval(value_str)
            if isinstance(value, int):
                metric = lit.Test.IntMetricValue(value)
            elif isinstance(value, float):
                metric = lit.Test.RealMetricValue(value)
            else:
                raise RuntimeError("unsupported result type")
            result.addMetric(key, metric)

        # Create micro test results
        for key, micro_name in cfg.items("micro-tests"):
            micro_result = lit.Test.Result(getattr(lit.Test, result_code, ""))
            # Load micro test additional metrics
            for key, value_str in cfg.items("micro-results"):
                value = eval(value_str)
                if isinstance(value, int):
                    metric = lit.Test.IntMetricValue(value)
                elif isinstance(value, float):
                    metric = lit.Test.RealMetricValue(value)
                else:
                    raise RuntimeError("unsupported result type")
                micro_result.addMetric(key, metric)
            result.addMicroResult(micro_name, micro_result)

        return result