File: automated_test.py

package info (click to toggle)
pyocd 0.36.0-3
  • links: PTS, VCS
  • area: main
  • in suites: forky, sid
  • size: 341,216 kB
  • sloc: xml: 3,682,260; python: 59,213; ansic: 112; makefile: 87; asm: 25; sh: 14
file content (441 lines) | stat: -rwxr-xr-x 16,378 bytes parent folder | download | duplicates (2)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
#!/usr/bin/env python3
# pyOCD debugger
# Copyright (c) 2015-2020 Arm Limited
# Copyright (c) 2021-2022 Chris Reed
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import os
import sys
import logging
from time import time
import argparse
from xml.etree import ElementTree
import multiprocessing as mp
import io
from dataclasses import dataclass
from typing import (IO, List, Optional)

from pyocd.core.session import Session
from pyocd.core.helpers import ConnectHelper
from pyocd.probe.aggregator import DebugProbeAggregator

from test_util import (
    get_env_file_name,
    TestResult,
    Test,
    IOTee,
    RecordingLogHandler,
    get_session_options,
    ensure_output_dir,
    TEST_OUTPUT_DIR,
    )

from basic_test import BasicTest
from speed_test import SpeedTest
from cortex_test import CortexTest
from flash_test import FlashTest
from flash_loader_test import FlashLoaderTest
from gdb_test import GdbTest
from json_lists_test import JsonListsTest
from connect_test import ConnectTest
from debug_context_test import DebugContextTest
from concurrency_test import ConcurrencyTest
from commands_test import CommandsTest
from commander_test import CommanderTest
from probeserver_test import ProbeserverTest
from user_script_test import UserScriptTest

XML_RESULTS_TEMPLATE = "test_results{}.xml"
LOG_FILE_TEMPLATE = "automated_test_result{}.txt"
SUMMARY_FILE_TEMPLATE = "automated_test_summary{}.txt"

LOG_FORMAT = "%(relativeCreated)07d:%(levelname)s:%(module)s:%(message)s"

JOB_TIMEOUT = 30 * 60 # 30 minutes

# Put together list of all tests.
all_tests = [
             BasicTest(),
             JsonListsTest(),
             ConnectTest(),
             SpeedTest(),
             CortexTest(),
             ConcurrencyTest(),
             FlashTest(),
             FlashLoaderTest(),
             DebugContextTest(),
             GdbTest(),
             CommandsTest(),
             CommanderTest(),
             ProbeserverTest(),
             UserScriptTest(),
             ]

# Actual list used at runtime, filted by command line args.
test_list = []

# Tests that can fail without causing a non-zero exit code.
IGNORE_FAILURE_TESTS = [
            "Connect Test",
            "Gdb Test",
            ]

def print_summary(test_list, result_list, test_time, output_file=None):
    for test in test_list:
        test.print_perf_info(result_list, output_file=output_file)

    Test.print_results(result_list, output_file=output_file, ignored=IGNORE_FAILURE_TESTS)
    print("", file=output_file)
    print("Test Time: %.3f" % test_time, file=output_file)
    if Test.all_tests_pass(result_list):
        print("All tests passed", file=output_file)
    else:
        print("One or more tests has failed!", file=output_file)

def split_results_by_board(result_list):
    boards = {}
    for result in result_list:
        if result.board_name in boards:
            boards[result.board_name].append(result)
        else:
            boards[result.board_name] = [result]
    return boards

def generate_xml_results(result_list):
    board_results = split_results_by_board(result_list)

    suite_id = 0
    total_failures = 0
    total_tests = 0
    total_time = 0

    root = ElementTree.Element('testsuites',
            name="pyocd"
            )
    root.text = "\n"

    for board_name, results in board_results.items():
        total = 0
        failures = 0
        suite_time = 0
        suite = ElementTree.SubElement(root, 'testsuite',
                    name=board_name,
                    id=str(suite_id))
        suite.text = "\n"
        suite.tail = "\n"
        suite_id += 1

        for result in results:

            total += 1
            if not result.passed:
                failures += 1
            case = result.get_test_case()
            suite.append(case)
            suite_time += result.time

        suite.set('tests', str(total))
        suite.set('failures', str(failures))
        suite.set('time', "%.3f" % suite_time)
        total_tests += total
        total_failures += failures
        total_time += suite_time

    root.set('tests', str(total_tests))
    root.set('failures', str(total_failures))
    root.set('time', "%.3f" % total_time)

    xml_results = os.path.join(TEST_OUTPUT_DIR, XML_RESULTS_TEMPLATE.format(get_env_file_name()))
    ElementTree.ElementTree(root).write(xml_results, encoding="UTF-8", xml_declaration=True)

def print_board_header(outputFile, board, n, includeDividers=True, includeLeadingNewline=False):
    header = "TESTING BOARD {name} [{target}] [{uid}] #{n}".format(
        name=board.name, target=board.target_type, uid=board.unique_id, n=n)
    if includeDividers:
        divider = "=" * len(header)
        if includeLeadingNewline:
            print("\n" + divider, file=outputFile)
        else:
            print(divider, file=outputFile)
    print(header, file=outputFile)
    if includeDividers:
        print(divider + "\n", file=outputFile)

def print_test_header(output_file, board, test):
    header = f"Test: {test.name} | {board.name} #{test.n}"
    divider = "-" * 80
    print("\n" + divider, file=output_file)
    print(header, file=output_file)
    print(divider, file=output_file)

def clean_board_name(name: str) -> str:
    return "".join((c if c.isalnum() else "_") for c in name)

@dataclass
class BoardTestConfig:
    board_id: str
    n: int
    loglevel: int
    log_to_console: bool
    common_log_file: Optional[IO[str]]
    test_list: List[Test]

def test_board(config: BoardTestConfig):
    """@brief Run all tests on a given board.

    When multiple test jobs are being used, this function is the entry point executed in
    child processes.

    Always writes both stdout and log messages of tests to a board-specific log file, and saves
    the output for each test to a string that is stored in the TestResult object. Depending on
    the logToConsole and commonLogFile parameters, output may also be copied to the console
    (sys.stdout) and/or a common log file for all boards.

    @param board_id Unique ID of the board to test.
    @param n Unique index of the test run.
    @param loglevel Log level passed to logger instance. Usually INFO or DEBUG.
    @param logToConsole Boolean indicating whether output should be copied to sys.stdout.
    @param commonLogFile If not None, an open file object to which output should be copied.
    """
    board_id = config.board_id
    n = config.n
    loglevel = config.loglevel
    logToConsole = config.log_to_console
    commonLogFile = config.common_log_file

    probe = DebugProbeAggregator.get_probe_with_id(board_id)
    assert probe is not None
    session = Session(probe, **get_session_options())
    board = session.board

    originalStdout = sys.stdout
    originalStderr = sys.stderr

    # Set up board-specific output file. A previously existing file is removed.
    env_name = (("_" + os.environ['TOX_ENV_NAME']) if ('TOX_ENV_NAME' in os.environ) else '')
    name_info = "{}_{}_{}".format(env_name, clean_board_name(board.name), n)
    log_filename = os.path.join(TEST_OUTPUT_DIR, LOG_FILE_TEMPLATE.format(name_info))
    if os.path.exists(log_filename):
        os.remove(log_filename)

    # Skip board if specified in the config.
    if session.options['skip_test']:
        print("Skipping board %s due as specified in config" % board.unique_id)
        return []
    # Skip this board if we don't have a test binary.
    if board.test_binary is None:
        print("Skipping board %s due to missing test binary" % board.unique_id)
        return []

    # Open board-specific output file. This is done after skipping so a skipped board doesn't have a
    # log file created for it (but a previous log file will be removed, above).
    # buffering=1=Line buffered
    log_file = open(log_filename, "w", buffering=1, encoding='utf-8', errors='backslashreplace')

    # Setup logging.
    log_handler = RecordingLogHandler(None)
    log_handler.setFormatter(logging.Formatter(LOG_FORMAT))
    root_logger = logging.getLogger()
    root_logger.setLevel(loglevel)
    root_logger.addHandler(log_handler)

    result_list = []
    try:
        # Write board header to board log file, common log file, and console.
        print_board_header(log_file, board, n)
        if commonLogFile:
            print_board_header(commonLogFile, board, n, includeLeadingNewline=(n != 0))
        print_board_header(originalStdout, board, n, logToConsole, includeLeadingNewline=(n != 0))

        # Run all tests on this board.
        for test in config.test_list:
            print("{} #{}: starting {}...".format(board.name, n, test.name), file=originalStdout)

            # Set the test number on the test object. Used to get a unique port for the GdbTest.
            test.n = n

            # Print header and divider at the start of every test into the log files.
            print_test_header(log_file, board, test)
            if commonLogFile is not None:
                print_test_header(commonLogFile, board, test)

            # Create a StringIO object to record the test's output, an IOTee to copy
            # output to both the log file and StringIO, then set the log handler and
            # stdio to write to the tee.
            testOutput = io.StringIO()
            tee = IOTee(log_file, testOutput)
            if logToConsole:
                tee.add(originalStdout)
            if commonLogFile is not None:
                tee.add(commonLogFile)
            log_handler.stream = tee
            sys.stdout = tee
            sys.stderr = tee

            test_start = time()
            result = test.run(board)
            test_stop = time()
            result.time = test_stop - test_start
            tee.flush()
            result.output = testOutput.getvalue()
            result_list.append(result)

            passFail = "PASSED" if result.passed else "FAILED"
            print("{} #{}: finished {}... {} ({:.3f} s)".format(
                board.name, n, test.name, passFail, result.time),
                file=originalStdout)
    finally:
        # Restore stdout/stderr in case we're running in the parent process (1 job).
        sys.stdout = originalStdout
        sys.stderr = originalStderr

        root_logger.removeHandler(log_handler)
        log_handler.flush()
        log_handler.close()
    return result_list

def filter_tests(args):
    """@brief Generate the list of tests to run based on arguments."""
    if args.exclude_tests and args.include_tests:
        print("Please only include or exclude tests, not both simultaneously.")
        sys.exit(1)
    excludes = [t.strip().lower() for t in args.exclude_tests.split(',')] if args.exclude_tests else []
    includes = [t.strip().lower() for t in args.include_tests.split(',')] if args.include_tests else []

    for test in all_tests:
        if excludes:
            include_it = (test.name.lower() not in excludes)
        elif includes:
            include_it = (test.name.lower() in includes)
        else:
            include_it = True

        if include_it:
            test_list.append(test)

def main():
    parser = argparse.ArgumentParser(description='pyOCD automated testing')
    parser.add_argument('-d', '--debug', action="store_true", help='Enable debug logging')
    parser.add_argument('-q', '--quiet', action="store_true", help='Hide test progress for 1 job')
    parser.add_argument('-j', '--jobs', action="store", default=1, type=int, metavar="JOBS",
        help='Set number of concurrent board tests (default is 1)')
    parser.add_argument('-b', '--board', action="append", metavar="ID", help="Limit testing to boards with specified unique IDs. Multiple boards can be listed.")
    parser.add_argument('-l', '--list-tests', action="store_true", help="Print a list of tests that will be run.")
    parser.add_argument('-x', '--exclude-tests', metavar="TESTS", default="", help="Comma-separated list of tests to exclude.")
    parser.add_argument('-i', '--include-tests', metavar="TESTS", default="", help="Comma-separated list of tests to include.")
    args = parser.parse_args()

    # Allow CI to override the number of concurrent jobs.
    if 'CI_JOBS' in os.environ:
        args.jobs = int(os.environ['CI_JOBS'])

    filter_tests(args)

    if args.list_tests:
        for test in test_list:
            print(test.name)
        return

    # Disable multiple jobs on macOS prior to Python 3.4. By default, multiprocessing uses
    # fork() on Unix, which doesn't work on the Mac because CoreFoundation requires exec()
    # to be used in order to init correctly (CoreFoundation is used in hidapi). Only on Python
    # version 3.4+ is the multiprocessing.set_start_method() API available that lets us
    # switch to the 'spawn' method, i.e. exec().
    if args.jobs > 1 and sys.platform.startswith('darwin') and sys.version_info[0:2] < (3, 4):
        print("WARNING: Cannot support multiple jobs on macOS prior to Python 3.4. Forcing 1 job.")
        args.jobs = 1

    ensure_output_dir()

    # Setup logging based on concurrency and quiet option.
    level = logging.DEBUG if args.debug else logging.INFO
    if args.jobs == 1 and not args.quiet:
        log_file = os.path.join(TEST_OUTPUT_DIR, LOG_FILE_TEMPLATE.format(get_env_file_name()))
        # Create common log file.
        if os.path.exists(log_file):
            os.remove(log_file)
        logToConsole = True
        commonLogFile = open(log_file, "a")
    else:
        logToConsole = False
        commonLogFile = None

    board_list = []
    result_list = []

    # Put together list of boards to test
    board_list = ConnectHelper.get_all_connected_probes(blocking=False)
    board_id_list = sorted(b.unique_id for b in board_list)

    # Filter boards.
    if args.board:
        # Get the full unique ID of any matching probes.
        board_id_list = [b for b in board_id_list if any(c for c in args.board if c.lower() in b.lower())]
        # Add in any requested remotes.
        board_id_list += [a for a in args.board if a.startswith('remote:')]

    # Generate board test configs.
    test_configs = [
                BoardTestConfig(
                    board_id=board_id,
                    n=n,
                    loglevel=level,
                    log_to_console=logToConsole,
                    common_log_file=commonLogFile,
                    test_list=test_list,
                )
                for n, board_id in enumerate(board_id_list)
            ]

    # If only 1 job was requested, don't bother spawning processes.
    start = time()
    if args.jobs == 1:
        for config in test_configs:
            result_list += test_board(config)
    else:
        # Create a pool of processes to run tests.
        try:
            pool = mp.Pool(args.jobs)

            # Issue board test job to process pool.
            async_results = [pool.apply_async(test_board, (config,))
                             for config in test_configs]

            # Gather results.
            for r in async_results:
                result_list += r.get(timeout=JOB_TIMEOUT)
        finally:
            pool.close()
            pool.join()
    stop = time()
    test_time = (stop - start)

    print_summary(test_list, result_list, test_time)
    summary_file = os.path.join(TEST_OUTPUT_DIR, SUMMARY_FILE_TEMPLATE.format(get_env_file_name()))
    with open(summary_file, "w") as output_file:
        print_summary(test_list, result_list, test_time, output_file)
    generate_xml_results(result_list)

    exit_val = 0 if Test.all_tests_pass(result_list, ignored=IGNORE_FAILURE_TESTS) else -1
    exit(exit_val)

    #TODO - check if any threads are still running?

if __name__ == "__main__":
    # set_start_method is only available in Python 3.4+.
    if sys.version_info[0:2] >= (3, 4):
        mp.set_start_method('spawn')
    main()