File: parse-junit-results.py

package info (click to toggle)
firefox 147.0-1
  • links: PTS, VCS
  • area: main
  • in suites: sid
  • size: 4,683,324 kB
  • sloc: cpp: 7,607,156; javascript: 6,532,492; ansic: 3,775,158; python: 1,415,368; xml: 634,556; asm: 438,949; java: 186,241; sh: 62,751; makefile: 18,079; objc: 13,092; perl: 12,808; yacc: 4,583; cs: 3,846; pascal: 3,448; lex: 1,720; ruby: 1,003; php: 436; lisp: 258; awk: 247; sql: 66; sed: 54; csh: 10; exp: 6
file content (168 lines) | stat: -rw-r--r-- 6,222 bytes parent folder | download | duplicates (2)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
#!/usr/bin/python3

# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.

"""
Script to parse and print UI test JUnit results from a FullJUnitReport.xml file.

This script processes JUnit test results generated by Flank and Firebase Test Lab.
It reads test results from a specified directory containing a full JUnit report,
parses the results to identify test failures and flaky tests, and prints a formatted
table summarizing these results to the console.

- Parses JUnit XML test result files, including custom 'flaky' attributes.
- Identifies and displays unique test failures and flaky tests.
- Prints the results in a readable table format using BeautifulTable.
- Provides detailed failure messages and test case information.
- Designed for use in Taskcluster following a Firebase Test Lab test execution.

Flank: https://flank.github.io/flank/

Usage:
    python3 parse-junit-results.py --results <path_to_results_directory>
"""

import argparse
import sys
import xml
from pathlib import Path

from beautifultable import BeautifulTable
from junitparser import Attr, Failure, JUnitXml, TestCase, TestSuite


def parse_args(cmdln_args):
    parser = argparse.ArgumentParser(
        description="Parse and print UI test JUnit results"
    )
    parser.add_argument(
        "--results",
        type=Path,
        help="Directory containing task artifact results",
        required=True,
    )
    return parser.parse_args(args=cmdln_args)


class test_suite(TestSuite):
    flakes = Attr()


class test_case(TestCase):
    flaky = Attr()


def parse_print_failure_results(results):
    """
    Parses the given JUnit test results and prints a formatted table of failures and flaky tests.

    Args:
        results (JUnitXml): Parsed JUnit XML results.

    Returns:
        int: The number of test failures.

    The function processes each test suite and each test case within the suite.
    If a test case has a result that is an instance of Failure, it is added to the table.
    The test case is marked as 'Flaky' if the flaky attribute is set to "true", otherwise it is marked as 'Failure'.

    Example of possible JUnit XML (FullJUnitReport.xml):
    <testsuites>
        <testsuite name="ExampleSuite" tests="2" failures="1" flakes="1" time="0.003">
            <testcase classname="example.TestClass" name="testSuccess" flaky="true" time="0.001">
                <failure message="Assertion failed">Expected true but was false</failure>
            </testcase>
            <testcase classname="example.TestClass" name="testFailure" time="0.002">
                <failure message="Assertion failed">Expected true but was false</failure>
                <failure message="Assertion failed">Expected true but was false</failure>
            </testcase>
        </testsuite>
    </testsuites>
    """

    table = BeautifulTable(maxwidth=256)
    table.columns.header = ["UI Test", "Outcome", "Details"]
    table.columns.alignment = BeautifulTable.ALIGN_LEFT
    table.set_style(BeautifulTable.STYLE_GRID)

    failure_count = 0

    # Dictionary to store the last seen failure details for each test case
    last_seen_failures = {}

    for suite in results:
        cur_suite = test_suite.fromelem(suite)
        for case in cur_suite:
            cur_case = test_case.fromelem(case)
            if cur_case.result:
                for entry in case.result:
                    if isinstance(entry, Failure):
                        flaky_status = getattr(cur_case, "flaky", "false") == "true"
                        if flaky_status:
                            test_id = "%s#%s" % (case.classname, case.name)
                            details = (
                                entry.text.replace("\t", " ") if entry.text else ""
                            )
                            # Check if the current failure details are different from the last seen ones
                            if details != last_seen_failures.get(test_id, ""):
                                table.rows.append(
                                    [
                                        test_id,
                                        "Flaky",
                                        details,
                                    ]
                                )
                            last_seen_failures[test_id] = details
                        else:
                            test_id = "%s#%s" % (case.classname, case.name)
                            details = (
                                entry.text.replace("\t", " ") if entry.text else ""
                            )
                            # Check if the current failure details are different from the last seen ones
                            if details != last_seen_failures.get(test_id, ""):
                                table.rows.append(
                                    [
                                        test_id,
                                        "Failure",
                                        details,
                                    ]
                                )
                                print(f"TEST-UNEXPECTED-FAIL | {test_id} | {details}")
                                failure_count += 1
                            # Update the last seen failure details for this test case
                            last_seen_failures[test_id] = details

    print(table)
    return failure_count


def load_results_file(filename):
    ret = None
    try:
        f = open(filename)
        try:
            ret = JUnitXml.fromfile(f)
        except xml.etree.ElementTree.ParseError as e:
            print(f"Error parsing {filename} file: {e}")
        finally:
            f.close()
    except OSError as e:
        print(e)

    return ret


def main():
    args = parse_args(sys.argv[1:])

    failure_count = 0
    junitxml = load_results_file(args.results.joinpath("FullJUnitReport.xml"))
    if junitxml:
        failure_count = parse_print_failure_results(junitxml)
    return failure_count


if __name__ == "__main__":
    sys.exit(main())